content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def ctf_to_pickle(trace_directory: str, target: Pickler) -> int: """ Load CTF trace, convert events, and dump to a pickle file. :param trace_directory: the trace directory :param target: the target file to write to :return: the number of events written """ ctf_events = get_trace_ctf_events(trace_directory) count = 0 count_written = 0 for event in ctf_events: count += 1 pod = event_to_dict(event) target.dump(pod) count_written += 1 return count_written
e317be9d5577c8f85e02945d9ae95e63be9e76ef
7,700
def list_lines(lines): """Returns the list of trimmed lines. @param lines Multi-line string """ return list(filter(None, (x.strip() for x in lines.splitlines())))
293610d17e1fe8a27ab6bb5c35a349059e0179f3
7,701
from functools import reduce def _histogram_2d_vectorized( *args, bins=None, weights=None, density=False, right=False, block_size=None ): """Calculate the histogram independently on each row of a 2D array""" N_inputs = len(args) a0 = args[0] # consistency checks for inputa for a, b in zip(args, bins): assert a.ndim == 2 assert b.ndim == 1 assert a.shape == a0.shape if weights is not None: assert weights.shape == a0.shape nrows, ncols = a0.shape nbins = [len(b) for b in bins] hist_shapes = [nb + 1 for nb in nbins] # a marginally faster implementation would be to use searchsorted, # like numpy histogram itself does # https://github.com/numpy/numpy/blob/9c98662ee2f7daca3f9fae9d5144a9a8d3cabe8c/numpy/lib/histograms.py#L864-L882 # for now we stick with `digitize` because it's easy to understand how it works # Add small increment to the last bin edge to make the final bin right-edge inclusive # Note, this is the approach taken by sklearn, e.g. # https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/calibration.py#L592 # but a better approach would be to use something like _search_sorted_inclusive() in # numpy histogram. This is an additional motivation for moving to searchsorted bins = [np.concatenate((b[:-1], b[-1:] + 1e-8)) for b in bins] # the maximum possible value of of digitize is nbins # for right=False: # - 0 corresponds to a < b[0] # - i corresponds to bins[i-1] <= a < b[i] # - nbins corresponds to a a >= b[1] each_bin_indices = [digitize(a, b) for a, b in zip(args, bins)] # product of the bins gives the joint distribution if N_inputs > 1: bin_indices = ravel_multi_index(each_bin_indices, hist_shapes) else: bin_indices = each_bin_indices[0] # total number of unique bin indices N = reduce(lambda x, y: x * y, hist_shapes) bin_counts = _dispatch_bincount( bin_indices, weights, N, hist_shapes, block_size=block_size ) # just throw out everything outside of the bins, as np.histogram does # TODO: make this optional? slices = (slice(None),) + (N_inputs * (slice(1, -1),)) bin_counts = bin_counts[slices] return bin_counts
be363475cecc2d12486c4b8d23976cfa909ca333
7,702
def carbon_offset_cost(kWh): """ Donation to Cool Earth (in USD) needed to offset carbon emssions. """ return KG_CO2_PER_KWH * USD_PER_KG_CO2 * kWh
6bbb9cfd3c058d4148fe3286defe75ade0fddb62
7,703
from typing import List from typing import Tuple from typing import Union import time def run( duration: int, runtime_mode: str, connection_mode: str ) -> List[Tuple[str, Union[int, float]]]: """Test memory usage.""" # pylint: disable=import-outside-toplevel,unused-import # import manually due to some lazy imports in decision_maker resources = Resources() if connection_mode not in CONNECTION_MODES: raise ValueError( f"bad connection mode {connection_mode}. valid is one of {list(CONNECTION_MODES.keys())}" ) base_cls = CONNECTION_MODES[connection_mode] conn_cls = type("conn_cls", (TestConnectionMixIn, base_cls), {}) connection = conn_cls.make() # type: ignore # pylint: disable=no-member resources.add_connection(connection) agent = make_agent(runtime_mode=runtime_mode, resources=resources) agent.resources.add_skill(make_skill(agent, handlers={"test": TestHandler})) t = Thread(target=agent.start, daemon=True) t.start() wait_for_condition(lambda: agent.is_running, timeout=5) connection.enable() time.sleep(duration) connection.disable() time.sleep(0.2) # possible race condition in stop? agent.stop() t.join(5) latency = mean( map( lambda x: x[1] - x[0], zip( connection.sends, connection.recvs, ), ) ) total_amount = len(connection.recvs) rate = total_amount / duration return [ ("envelopes received", len(connection.recvs)), ("envelopes sent", len(connection.sends)), ("latency(ms)", 10**6 * latency), ("rate(envelopes/second)", rate), ]
677bdb5cb73cfc4ccc38d813bf875d506905512e
7,704
def tf_fermion_massmatrix(t_A3, t_potential, tc_masses_factor): """Computes the spin-1/2 mass matrix from the A3-tensor.""" # The extra factor 2.0 relative to https://arxiv.org/abs/1906.00207 # makes the fermion masses align with the way particle states are # grouped into SUSY multiplets in appendix (B.2) of: # https://arxiv.org/abs/1909.10969 return mu.tfc128(2.0) * tf.einsum( 'ij,ik->jk', t_A3, tf.math.conj(t_A3)) * ( tc_masses_factor / tf.cast(t_potential, tf.complex128))
934c606fd55f93bdfa91a1e4d23fb7b6b5df8703
7,705
def filter_nsa_catalog_to_approximate_sky_area(nsa, bricks, visualise=False): """ DECALS is only in a well-defined portion of sky (which depends on the data release version). Filter the NSA catalog so that it only includes galaxies in that approximate area. This saves time matching later. Args: nsa (astropy.Table): NSA catalog of SDSS galaxies bricks (astropy.Table): catalog of DECALS imaging bricks visualise (bool): if True, plot and save sky footprint of NSA catalog Returns: (astropy.Table) NSA catalog filtered to galaxies within the approximate sky area imaged by DECALS """ if visualise: fig, ((ul, ur), (ll, lr)) = plt.subplots(2, 2) ul.hist(bricks['dec']) ul.set_title('brick dec') ur.hist(nsa['dec']) ur.set_title('nsa dec') ll.hist(bricks['ra']) ll.set_title('brick ra') lr.hist(nsa['ra']) lr.set_title('nsa ra') plt.tight_layout() plt.savefig('nsa_catalog_sky_coverage.png') brick_maxdec = max(bricks['dec2']) brick_mindec = min(bricks['dec1']) # ra spans 0 through 360, do not filter declim = (nsa['dec'] >= brick_mindec) & (nsa['dec'] <= brick_maxdec) # approximately -25 to +30 degrees nsa_in_decals_area = nsa[declim] return nsa_in_decals_area
3f961cab16a58e7323f1f0730497beaf15f5db18
7,706
from typing import Dict from typing import Union from typing import List from typing import Optional from typing import Any from typing import Tuple def apply_variants(variants: Dict[Union[str, List[str]], int], parameters: Optional[Dict[Any, Any]] = None, variant=DEFAULT_VARIANT_VARIANTS) -> Tuple[PetriNet, Marking, Marking]: """ Apply the chosen IM algorithm to a dictionary/list/set of variants obtaining a Petri net along with an initial and final marking Parameters ----------- variants Dictionary/list/set of variants in the log variant Variant of the algorithm to apply, possible values: - Variants.IMd parameters Parameters of the algorithm, including: Parameters.ACTIVITY_KEY -> attribute of the log to use as activity name (default concept:name) Returns ----------- net Petri net initial_marking Initial marking final_marking Final marking """ return exec_utils.get_variant(variant).apply_variants(variants, parameters=parameters)
13a2466c1c7921fe5f6ccbf4fe819e2ac19ee87f
7,707
def query_update(request: HttpRequest, **kwargs: str) -> str: """Update the query string with new values.""" updated = request.GET.copy() for key, value in kwargs.items(): updated[key] = value return updated.urlencode()
43d60853f53fec4e696c2c6010b6e3b3db0da389
7,708
def get_user_info(user_id): """ Fetches User Info Based On User ID :param user_id: :return: user """ user = session.query(User).filter_by(id=user_id).one_or_none() return user
e1134f9305bd6df1b650bc3362c0e85f6dc10ccf
7,709
def gatk_version(request) -> GATKVersion: """Given a version number, return a GATKVersion.""" return GATKVersion(request.param)
ec05d5f34f45454bb7c0b8c562851c3691d01ace
7,710
def load_objs(name_obj_dat, sim, obj_ids, auto_sleep=True): """ - name_obj_dat: List[(str, List[ transformation as a 4x4 list of lists of floats, int representing the motion type ]) """ static_obj_ids = [] for i, (name, obj_dat) in enumerate(name_obj_dat): if len(obj_ids) == 0: obj_id = add_obj(name, sim) else: obj_id = obj_ids[i] trans = obj_dat[0] obj_type = obj_dat[1] use_trans = mn.Matrix4(trans) sim.set_transformation(use_trans, obj_id) sim.set_linear_velocity(mn.Vector3(0, 0, 0), obj_id) sim.set_angular_velocity(mn.Vector3(0, 0, 0), obj_id) sim.set_object_motion_type(MotionType(obj_type), obj_id) static_obj_ids.append(obj_id) if len(obj_ids) != 0: return obj_ids return static_obj_ids
899670f7ff63ef124dd51575ff59560b27b6e974
7,711
def get_mod_metadata(module: Module): """ Get descriptions for produced dependencies. """ meta = {} has_meta = hasattr(module, 'prod_meta') for prod in module.produces: prod = prod.replace('?', '').replace('!', '') if not has_meta: meta[prod] = '<no descritption>' continue prod_meta = module.prod_meta.get(prod) meta[prod] = prod_meta if prod_meta else '<no description>' return meta
b0000c555cc22f5d81f31241bc3eaa3aee7d99ad
7,712
def register_module(): """Registers this module in the registry.""" dashboard.dashboard.DashboardRegistry.add_analytics_section( dashboard.analytics.QuestionScoreHandler) global_handlers = [] for path, handler_class in mapreduce_main.create_handlers_map(): # The mapreduce and pipeline libraries are pretty casual about # mixing up their UI support in with their functional paths. # Here, we separate things and give them different prefixes # so that the only-admin-access patterns we define in app.yaml # can be reasonably clean. if path.startswith('.*/pipeline'): if 'pipeline/rpc/' in path or path == '.*/pipeline(/.+)': path = path.replace('.*/pipeline', '/mapreduce/ui/pipeline') else: path = path.replace('.*/pipeline', '/mapreduce/worker/pipeline') else: if '_callback' in path: path = path.replace('.*', '/mapreduce/worker', 1) elif '/list_configs' in path: # This needs mapreduce.yaml, which we don't distribute. Not # having this prevents part of the mapreduce UI front page # from loading, but we don't care, because we don't want # people using the M/R front page to relaunch jobs anyhow. continue else: path = path.replace('.*', '/mapreduce/ui', 1) # The UI needs to be guarded by a config so that casual users aren't # exposed to the internals, but advanced users can investigate issues. if '/ui/' in path or path.endswith('/ui'): if (hasattr(handler_class, 'dispatch') and not hasattr(handler_class, 'real_dispatch')): handler_class.real_dispatch = handler_class.dispatch handler_class.dispatch = ui_access_wrapper global_handlers.append((path, handler_class)) # Wrap worker handlers with check that request really is coming # from task queue. else: if (hasattr(handler_class, 'dispatch') and not hasattr(handler_class, 'real_dispatch')): handler_class.real_dispatch = handler_class.dispatch handler_class.dispatch = authorization_wrapper global_handlers.append((path, handler_class)) # Tell map/reduce internals that this is now the base path to use. mapreduce_parameters.config.BASE_PATH = '/mapreduce/worker' global custom_module custom_module = custom_modules.Module( MODULE_NAME, 'Provides support for analysis jobs based on map/reduce', global_handlers, []) return custom_module
7e711f6e67e7a9bcd118dc304bd99073b25a8049
7,713
import warnings def theta_b(wlen, d, n=1): """return the Bragg angle, $\theta_{B}$, (deg) for a given wavelength (\AA$^{-1}$) and d-spacing (\AA)""" if not (d == 0): try: with warnings.catch_warnings(): warnings.simplefilter("ignore") _thb = np.rad2deg(np.arcsin(((wlen * n) / (2 * d)))) return _thb except Exception: return 0 else: return 0
89080b455744bab1e94aa47eb53a3a2935985d32
7,714
def replace_newlines(s, replacement=' / ', newlines=(u"\n", u"\r")): """ Used by the status message display on the buddy list to replace newline characters. """ # turn all carraige returns to newlines for newline in newlines[1:]: s = s.replace(newline, newlines[0]) # while there are pairs of newlines, turn them into one while s.find(newlines[0] * 2) != -1: s = s.replace( newlines[0] * 2, newlines[0]) # replace newlines with the newline_replacement above return s.strip().replace(newlines[0], replacement)
d7b42ad67a3732c1ecac5bbfd7b9920b0215aa13
7,715
import sys import requests import pprint def get_user_data(prs, client_id, client_secret): """Get user data from PR data.""" users = {} for owner, repo, number, pr in prs: username = pr.username # Initialize the User if needed if username not in users: print(pr.user_url, file=sys.stderr) payload = { 'client_id': client_id, 'client_secret': client_secret } resp = requests.get(pr.user_url, params=payload) # Abort if the return is an error out = resp.json() if 'message' in out: pprint.pprint(out, file=sys.stderr) raise Exception(resp.text) user = User(out) users[username] = user users[username].add_pr(pr) return users
3486f159afd9b3b4bd0e7242bdf63e62e1e873d6
7,716
def default_bucket_name(): """Obtain the default Google Storage bucket name for this application. Returns: A string that is the name of the default bucket. """ return files._default_gs_bucket_name()
01cad1b881217849ff55af6f1b67da624b584810
7,717
def LineGaussSeidel_i(Uo, Beta): """Return the numerical solution of dependent variable in the model eq. This routine uses the Line-Gauss Seidel method along constant i direction (parallel to y-axis) to obtain the solution of the Poisson's equation. Call signature: LineGaussSeidel_i(Uo, Beta) Parameters ---------- Uo : 2D array The dependent variable obtained from the previous iteration level, n. Beta : float Coefficient in the Poissons finite difference approximation. Beta = dX/dY Returns ------- U : 2D array The dependent variable calculated at time level (n+1) within the entire domain. """ shapeU = Uo.shape # Obtain Dimension if len(shapeU) == 1: raise DimensionError("1D", "POISSONS") # Proceed to numerical solution U = Uo.copy() # Initialize U iMax, jMax = shapeU B2 = Beta*Beta A = [B2 for j in range(jMax)] B = [-2.0*(1.0 + B2) for j in range(jMax)] C = [B2 for j in range(jMax)] D = [0 for j in range(jMax)] UU = [0 for j in range(jMax)] # NOTE that in the POISSON'S SOLVERS formulation, the dependent # variable U # is used on RHS of discretized eqn instead of Uo as in other MODELS, # which is due to the formulation requirement to use values of # dependent # variable from advanced time steps (k+1) at points (i-1,j) or (i,j-1). for i in range(1, iMax-1): UU[0] = U[i][0] # Convert U to 1-D array for Tridiagonal solver UU[-1] = U[i][jMax-1] for j in range(1, jMax-1): D[j] = -(U[i+1][j] + U[i-1][j]) UU = TridiagonalSolver(jMax, A, B, C, D, UU) for j in range(1, jMax-1): U[i][j] = UU[j] return U
2fd2fda54169bc0f1e686781b26823a8f1a29b49
7,718
def add_padding_to_grid( component, grid_size=127, x=10, y=10, bottom_padding=5, layers=[pp.LAYER.PADDING], suffix="p", ): """ returns component width a padding layer on each side matches a minimum size """ c = pp.Component(name=f"{component.name}_{suffix}") c << component c.ports = component.ports if c.size_info.height < grid_size: y_padding = grid_size - c.size_info.height else: n_grids = np.ceil(c.size_info.height / grid_size) y_padding = n_grids * grid_size - c.size_info.height if c.size_info.width < grid_size: x_padding = grid_size - c.size_info.width else: n_grids = np.ceil(c.size_info.width / grid_size) x_padding = n_grids * grid_size - c.size_info.width x_padding -= x y_padding -= y points = [ [c.xmin - x_padding / 2, c.ymin - bottom_padding], [c.xmax + x_padding / 2, c.ymin - bottom_padding], [c.xmax + x_padding / 2, c.ymax + y_padding - bottom_padding], [c.xmin - x_padding / 2, c.ymax + y_padding - bottom_padding], ] for layer in layers: c.add_polygon(points, layer=layer) return c
5f886f7f5cec874eda10675580954ad46cbb2200
7,719
def _find_role(oneandone_conn, role): """ Given a name, validates that the role exists whether it is a proper ID or a name. Returns the role if one was found, else None. """ for _role in oneandone_conn.list_roles(per_page=1000): if role in (_role['id'], _role['name']): return _role
b8e2e93b13c9595e40dd61b2e9bbda1f89f23cca
7,720
def load_vgg(sess, vgg_path): """ Load Pretrained VGG Model into TensorFlow. :param sess: TensorFlow Session :param vgg_path: Path to vgg folder, containing "variables/" and "saved_model.pb" :return: Tuple of Tensors from VGG model (image_input, keep_prob, layer3_out, layer4_out, layer7_out) """ # TODO: Implement function # (DONE) Use tf.saved_model.loader.load to load the model and weights vgg_tag = 'vgg16' vgg_input_tensor_name = 'image_input:0' vgg_keep_prob_tensor_name = 'keep_prob:0' vgg_layer3_out_tensor_name = 'layer3_out:0' vgg_layer4_out_tensor_name = 'layer4_out:0' vgg_layer7_out_tensor_name = 'layer7_out:0' tf.saved_model.loader.load(sess, [vgg_tag],vgg_path) graph = tf.get_default_graph() w1 = graph.get_tensor_by_name(vgg_input_tensor_name) # w1.set_shape((None, 160, 576, 3)) keep = graph.get_tensor_by_name(vgg_keep_prob_tensor_name) layer3_out = graph.get_tensor_by_name(vgg_layer3_out_tensor_name) layer4_out = graph.get_tensor_by_name(vgg_layer4_out_tensor_name) layer7_out = graph.get_tensor_by_name(vgg_layer7_out_tensor_name) if DEBUG_MODEL: print ("VGG:") print ("\tInput: ", w1.shape) print ("\tKeep: ", keep.shape) print ("\tLayer 3: ", layer3_out.shape) print ("\tLayer 4: ", layer4_out.shape) print ("\tLayer 7: ", layer7_out.shape) return w1, keep, layer3_out, layer4_out, layer7_out
b3d577a9c46859ea85c5029e2837777a9bdb0e7d
7,721
def created_median_mask(disparity_map, valid_depth_mask, rect=None): """生成掩模,使得矩形中不想要的区域的掩模值为0,想要的区域的掩模值为1""" if rect is not None: x, y, w, h = rect disparity_map = disparity_map[y:y + h, x:x + w] valid_depth_mask = valid_depth_mask[y:y + h, x:x + w] # 获得中位数 median = np.median(disparity_map) # 当有效的视差值与平均视差值相差12 或者更多时,可以将像素看做噪声。12 这个值是根据经验 return np.where((valid_depth_mask == 0) | (abs(disparity_map - median) < 12), 1.0, 0.0)
e57a990d250564c4e8d2b59aa27522115c9791e2
7,722
import torch def l2_loss(pred_traj, pred_traj_gt, mode='sum'): """ Input: - pred_traj: Tensor of shape (seq_len, batch, 2). Predicted trajectory. - pred_traj_gt: Tensor of shape (seq_len, batch, 2). Groud truth predictions. - mode: Can be one of sum, average, raw Output: - loss: l2 loss depending on mode """ seq_len, batch, _ = pred_traj.size() loss = (pred_traj_gt.permute(1, 0, 2) - pred_traj.permute(1, 0, 2))**2 if mode == 'sum': return torch.sum(loss) elif mode == 'raw': return loss.sum(dim=2).sum(dim=1)
f9e98e30d4299c79a93de6905c65dcb23da65ac1
7,723
def findpeer(port = None, os = None): """Args: port (defaults to any port) Finds a socket, which is connected to the specified port. Leaves socket in ESI.""" if os == 'linux': code = """ findpeer: push -1 push SYS_socketcall_getpeername mov ebp, esp pop ebx pop esi .loop: push SYS_socketcall pop eax inc esi lea ecx, [esp-32] push 4 pushad int 0x80 """ if port == None: return code + """ test eax, eax popad pop edx jnz .loop """ else: return code + """ popad pop edx shr eax, 16 cmp ax, 0x%04x jne .loop """ % htons(int(port)) elif os == 'freebsd': code = """ findpeer: push -1 pop esi push SYS_getpeername pop eax mov ebp, esp pushad .loop: inc esi pushad int 0x80 """ if port == None: return code + """ test eax, eax popad jnz .loop """ else: return code + """ popad cmp word [ebp+2], 0x%04x jne .loop """ % htons(int(port)) else: bug('OS was neither linux nor freebsd')
89c9616d935629cf362a6638847af522183f4a10
7,724
import requests import re def api_wowlight_version_check(version: str) -> bool: """ Checks incoming wow-lite wallet version, returns False when the version is too old and needs to be upgraded. :param version: :return: bool """ url = "https://raw.githubusercontent.com/wownero/wow-lite-wallet/master/src/renderer/components/Landing/LandingPage.vue" try: resp = requests.get(url, headers={"User-Agent": "Mozilla 5.0"}) resp.raise_for_status() content = resp.content.decode() except: return True # default to true # parse latest version current = next(re.finditer(r"wowlite\?version=(\d+.\d+.\d+)", content), None) if not current: return False return version == current.group(1)
470f8580df357c206b595c1145e04e33fd897058
7,725
import math def fruit_growth(jth: int, last_24_canopy_t): """ Equations 9.38 fruit_growth_rate_j = POTENTIAL_FRUIT_DRY_WEIGHT*math.exp(-math.exp(-curve_steepness*(days_after_fruit_set - fruit_development_time))) Returns: fruit growth rate [mg {CH2O} fruit^-1 d^-1] """ fruit_development_rate = fruit_development(last_24_canopy_t) Fruit_Growth_Period = 1/(fruit_development_rate*86400) fruit_development_time = -93.4 + 548.0 * Fruit_Growth_Period curve_steepness = 1/(2.44 + 403.0 * fruit_development_time) days_after_fruit_set = ((jth-1)+0.5)*Fruit_Growth_Period/FRUIT_DEVELOPMENT_STAGES_NUM return POTENTIAL_FRUIT_DRY_WEIGHT*math.exp(-math.exp(-curve_steepness*(days_after_fruit_set - fruit_development_time)))
78d1ee0e9ea8d364b6282466fb9ee27dc9cbb602
7,726
from typing import Callable def _pickled_cache_s(filepath: str) -> Callable[[Callable], Callable]: """Store the last result of the function call in a pickled file (string version) Args: filepath (str): The path of the file to read/write Returns: Callable[[Callable], Callable]: function decorator. The decorated function will also have an attribute function 'forced', that calls the function forcing cache overwriting""" return _pickled_cache_m(lambda *args, **kwargs: filepath)
b95703fc90275ba06d3816b442d07b14e4854eaf
7,727
from datetime import datetime def home(request): """Index page view :param request: HTTP request :return: index page render """ today = datetime.date.today() return render(request, 'taskbuster/index.html', {'today': today, 'now': now()})
cccfa91a728ce4f5dd482bbbd9418ec94f102844
7,728
from typing import Tuple from typing import Iterable def get_trials_for_drug( drug: Tuple[str, str], *, client: Neo4jClient ) -> Iterable[Node]: """Return the trials for the given drug. Parameters ---------- client : The Neo4j client. drug : The drug to query. Returns ------- : The trials for the given drug. """ return client.get_targets( drug, relation="tested_in", source_type="BioEntity", target_type="ClinicalTrial", )
64641e52468d46a3b4071d58cbdbff3167ff3fa6
7,729
from typing import List import torch def convert_features_to_dataset(all_features: List[InputFeaturesTC], dataset_type: str = 'pytorch' ) -> TensorDataset: """Converts a list of features into a dataset. Args: all_features (:obj:`list` of :obj:`InputFeatureTC`): the list of ``InputFeatureTC`` originating from a list of ``InputExampleTC`` that will constitute the dataset. dataset_type (str): the type of dataset, curruntly only `pytorch` is supported. Returns: A pytorch TensorDataset. Raises: ValueError if `dataset_type` is not supported. """ if dataset_type == 'pytorch': all_input_ids = torch.tensor([x.input_ids for x in all_features], dtype=torch.long) all_attention_mask = torch.tensor([x.attention_mask for x in all_features], dtype=torch.long) all_token_type_ids = torch.tensor([x.token_type_ids for x in all_features], dtype=torch.long) all_label_ids = torch.tensor([x.label_ids for x in all_features], dtype=torch.long) # Create Tensor dataset dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_label_ids) else: raise ValueError(f'Invalid return dataset type: {dataset_type}') return dataset
88e892effbc60569d35d8f14e1a8032837d409e0
7,730
import requests from bs4 import BeautifulSoup def soup_from_name(username): """ Grabs bs4 object from html page """ # html_source = urlopen('https://www.instagram.com/'+ str(username) + '/') url = 'https://www.instagram.com/'+ str(username) + '/' headers = {"User-Agent" : "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_0)" \ "AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36"} html_source = requests.get(url, headers=headers).text return BeautifulSoup(html_source, 'html.parser') #react-root > section > main > div > div.Nd_Rl._2z6nI > article > div._4Kbb_ > div > h2 # print(soup.body.span.section.main.div.div.article.div.div.h2)
442c6e9fa036fef59b82246462bf0e992384fd15
7,731
def SectionMenu(rating_key, title=None, base_title=None, section_title=None, ignore_options=True, section_items_key="all"): """ displays the contents of a section :param section_items_key: :param rating_key: :param title: :param base_title: :param section_title: :param ignore_options: :return: """ items = get_all_items(key=section_items_key, value=rating_key, base="library/sections") kind, deeper = get_items_info(items) title = unicode(title) section_title = title title = base_title + " > " + title oc = SubFolderObjectContainer(title2=title, no_cache=True, no_history=True) if ignore_options: add_ignore_options(oc, "sections", title=section_title, rating_key=rating_key, callback_menu=IgnoreMenu) return dig_tree(oc, items, MetadataMenu, pass_kwargs={"base_title": title, "display_items": deeper, "previous_item_type": "section", "previous_rating_key": rating_key})
3ba91e054de81c4d8eb32d2feaeb9ab99125683e
7,732
def round_floats_for_json(obj, ndigits=2, key_ndigits=None): """ Tries to round all floats in obj in order to reduce json size. ndigits is the default number of digits to round to, key_ndigits allows you to override this for specific dictionary keys, though there is no concept of nested keys. It converts numpy arrays and iterables to lists, so it should only be used when serializing to json """ if key_ndigits is None: key_ndigits = {} if isinstance(obj, np.floating): obj = float(obj) elif isinstance(obj, np.ndarray): obj = obj.tolist() if isinstance(obj, float): obj = round(obj, ndigits) elif isinstance(obj, dict): new_obj = {} for k, v in obj.items(): this_ndigits = key_ndigits.get(k, ndigits) new_obj[k] = round_floats_for_json(v, this_ndigits, key_ndigits) return new_obj elif isinstance(obj, str): return obj else: try: return [round_floats_for_json(x, ndigits, key_ndigits) for x in obj] except TypeError: pass return obj
8143a3a063e45b6a501ca2de6f1bb5dd1b64e843
7,733
def read_shared(function_name, verb, request, local_variables=None): """all the shared code for each of thse read functions""" command = function_name.split('_')[1] # assumes fn name is query_<command> command_args, verb_args = create_filters(function_name, command, request, local_variables) verb = cleanup_verb(verb) columns = local_variables.get('columns', None) format = local_variables.get('format', None) ret, svc_inst = run_command_verb( command, verb, command_args, verb_args, columns, format) return ret
c60feac9c16cfcd2d503032826aedced20d2959d
7,734
import time import requests def http_put_request( portia_config: dict, endpoint: str, payload: dict, params: dict=None, optional_headers: dict=None ) -> object: """Makes an HTTP PUT request. Arguments: portia_config {dict} -- Portia's configuration arguments endpoint {str} -- endpoint to make the request to payload {dict} -- payload to send to the service Keyword Arguments: params {dict} -- params to send to the service (default: {None}) optional_headers {dict} -- dictionary with other headers (default: {None}) Returns: object -- response object """ headers = { 'Authorization': 'Bearer {0}' \ .format(portia_config.get('authorization')) } if optional_headers is not None: headers = {**headers, **optional_headers} start = time.time() response = requests.put( '{0}{1}'.format(portia_config.get('baseurl'), endpoint), headers=headers, params=params, json=payload ) end = time.time() if portia_config.get('debug') == True: print( '[portia-debug]: status: {0} | {1:.4f} sec. | {2}' \ .format(response.status_code, end - start, response.url) ) return response
3b56ffd5eb029a2141184cdf3a218779c657e073
7,735
def construct_SN_default_rows(timestamps, ants, nif, gain=1.0): """ Construct list of ants dicts for each timestamp with REAL, IMAG, WEIGHT = gains """ default_nif = [gain] * nif rows = [] for ts in timestamps: rows += [{'TIME': [ts], 'TIME INTERVAL': [0.1], 'ANTENNA NO.': [antn], 'REAL1': default_nif, 'REAL2': default_nif, 'IMAG1': default_nif, 'IMAG2': default_nif, 'WEIGHT 1': default_nif, 'WEIGHT 2': default_nif} for antn in ants] return rows
b81e45d2d5299042b3332a2386a0fd4d2d6d59d7
7,736
import aiohttp async def test_disable(aresponses): """Test disabling AdGuard Home query log.""" async def response_handler(request): data = await request.json() assert data == {"enabled": False, "interval": 1} return aresponses.Response(status=200) aresponses.add( "example.com:3000", "/control/querylog_info", "GET", aresponses.Response( status=200, headers={"Content-Type": "application/json"}, text='{"interval": 1}', ), ) aresponses.add( "example.com:3000", "/control/querylog_config", "POST", response_handler ) aresponses.add( "example.com:3000", "/control/querylog_info", "GET", aresponses.Response( status=200, headers={"Content-Type": "application/json"}, text='{"interval": 1}', ), ) aresponses.add( "example.com:3000", "/control/querylog_config", "POST", aresponses.Response(status=500), ) async with aiohttp.ClientSession() as session: adguard = AdGuardHome("example.com", session=session) await adguard.querylog.disable() with pytest.raises(AdGuardHomeError): await adguard.querylog.disable()
a9c211d5bf9a0c2842ae835215718f4f81430c69
7,737
import torch import logging def load_checkpoint(path: str, device: torch.device = None, logger: logging.Logger = None) -> MoleculeModel: """ Loads a model checkpoint. :param path: Path where checkpoint is saved. :param device: Device where the model will be moved. :param logger: A logger for recording output. :return: The loaded :class:`~chemprop.models.model.MoleculeModel`. """ if logger is not None: debug, info = logger.debug, logger.info else: debug = info = print # Load model and args state = torch.load(path, map_location=lambda storage, loc: storage) args = TrainArgs() args.from_dict(vars(state['args']), skip_unsettable=True) loaded_state_dict = state['state_dict'] if device is not None: args.device = device if args.quantileregression != 'None': model = PB_MoleculeModel(args) else: model = MoleculeModel(args) model_state_dict = model.state_dict() # Skip missing parameters and parameters of mismatched size pretrained_state_dict = {} for param_name in loaded_state_dict.keys(): if param_name not in model_state_dict: info(f'Warning: Pretrained parameter "{param_name}" cannot be found in model parameters.') elif model_state_dict[param_name].shape != loaded_state_dict[param_name].shape: info(f'Warning: Pretrained parameter "{param_name}" ' f'of shape {loaded_state_dict[param_name].shape} does not match corresponding ' f'model parameter of shape {model_state_dict[param_name].shape}.') else: debug(f'Loading pretrained parameter "{param_name}".') pretrained_state_dict[param_name] = loaded_state_dict[param_name] # Load pretrained weights model_state_dict.update(pretrained_state_dict) model.load_state_dict(model_state_dict) if args.cuda: debug('Moving model to cuda') model = model.to(args.device) return model
b01786d969d952728ef0806e4e0dbb35ccc15b36
7,738
def kron_diag(*lts): """Compute diagonal of a KroneckerProductLazyTensor from the diagonals of the constituiting tensors""" lead_diag = lts[0].diag() if len(lts) == 1: # base case: return lead_diag trail_diag = kron_diag(*lts[1:]) diag = lead_diag.unsqueeze(-2) * trail_diag.unsqueeze(-1) return diag.transpose(-1, -2).reshape(*diag.shape[:-2], -1)
d57bb679dede93ababb2d164cfc85132acef60db
7,739
import time def makeBundleObj(config_fname, getPackage, getPackageLength): """Given a description of a thandy bundle in config_fname, return a new unsigned bundle object. getPackage must be a function returning a package object for every package the bundle requires when given the package's name as input. getPacakgeLength must be a function returning the length of the package file. """ packages = [] def ShortGloss(lang, val): packages[-1]['gloss'][lang] = val def LongGloss(lang, val): packages[-1]['longgloss'][lang] = val def Package(name, order, version=None, path=None, optional=False): packages.append({'name' : name, 'version' : version, 'path' : path, 'order' : order, 'optional' : optional, 'gloss' : {}, 'longgloss' : {} }) preload = { 'ShortGloss' : ShortGloss, 'LongGloss' : LongGloss, 'Package' : Package } r = readConfigFile(config_fname, ['name', 'os', 'version', 'location', ], ['arch'], preload) result = { '_type' : "Bundle", 'at' : formatTime(time.time()), 'name' : r['name'], 'os' : r['os'], 'version' : r['version'], 'location' : r['location'], 'packages' : packages } if r.has_key('arch'): result['arch'] = r['arch'] for p in packages: try: pkginfo = getPackage(p['name']) except KeyError: raise thandy.FormatException("No such package as %s"%p['name']) p['hash'] = formatHash(getDigest(pkginfo)) p['length'] = getPackageLength(p['name']) if p['path'] == None: p['path'] = pkginfo['location'] if p['version'] == None: p['version'] = pkginfo['version'] BUNDLE_SCHEMA.checkMatch(result) return result
4579cab99a2e1f7f52bc49bfd12c001aee06de21
7,740
import re def find_English_term(term: list) -> tuple: """ Find the English and numbers from a term list and remove the English and numbers from the term :param term: the term list :return term: the term removed the English and numbers :return Eng_terms: the removed English """ temp_terms = [] Eng_terms = [] for i in range(len(term)): string = term[i] result = re.findall(r'[a-zA-Z0-9]+', string) for j in result: temp_terms.append(j) term[i] = re.sub(pattern=j, repl='', string=term[i]) temp_terms = set(temp_terms) for k in temp_terms: Eng_terms.append(k) return term, Eng_terms
69507970eb226d2379bb11e121bc224b1ce741ad
7,741
def listplaylists(context): """ *musicpd.org, stored playlists section:* ``listplaylists`` Prints a list of the playlist directory. After each playlist name the server sends its last modification time as attribute ``Last-Modified`` in ISO 8601 format. To avoid problems due to clock differences between clients and the server, clients should not compare this value with their local clock. Output format:: playlist: a Last-Modified: 2010-02-06T02:10:25Z playlist: b Last-Modified: 2010-02-06T02:11:08Z """ result = [] for playlist in context.backend.stored_playlists.playlists.get(): result.append((u'playlist', playlist.name)) last_modified = (playlist.last_modified or dt.datetime.now()).isoformat() # Remove microseconds last_modified = last_modified.split('.')[0] # Add time zone information # TODO Convert to UTC before adding Z last_modified = last_modified + 'Z' result.append((u'Last-Modified', last_modified)) return result
1ede54286a0acb6b52fe919f7e3867b948bbf5cc
7,742
def add_target_variable(df: pd.DataFrame) -> pd.DataFrame: """Add column with the target variable to the given dataframe.""" return df.assign(y=df.rent + df.admin_fee)
236f16bab38d36625173640d5223f9fed48f34fe
7,743
def get_address_from_public_key(public_key): """ Get bytes from public key object and call method that expect bytes :param public_key: Public key object :param public_key: ec.EllipticCurvePublicKey :return: address in bytes :rtype: bytes """ public_key_bytes = get_public_key_bytes_compressed(public_key) return get_address_from_public_key_bytes(public_key_bytes)
775701261e07b9153807d9b5ae08f02050ecc51e
7,744
import re import math def read_ORIGEN_gamma_spectrum(output_filename, cooling_time_string): """ Function for reading a gamma spectrum from an ORIGEN output file. """ #Too long text may cause problems, so check for it. if len(cooling_time_string) >= 10: print("The cooling time could not be found in the input, the header text \"" + cooling_time_string + "\" is too long.") return 0,0 found_spectrum = False bin_count = [0] bin_edges = [0] f = open(output_filename, 'r') ORIGEN = f.read() if len(ORIGEN) < 1: #Did not read anything, or read an empty file. Return empty arrays print("Failed to open ORIGEN output file " + output_filename) return bin_edges, bin_count #get the gamma spectra form the output #The header we are looking for starts with this string, and ends with a total row, the data we want is in between. spectrumpattern = re.compile("gamma spectra, photons\/sec\/basis(.*?)\s*totals", re.DOTALL) if re.search(spectrumpattern, ORIGEN): spectrum_list = re.findall(spectrumpattern, ORIGEN) else: #Failed to find any gamma spectrum, return empty arrays print("Failed to find a gamma spectrum in ORIGEN output file " + output_filename) return bin_edges, bin_count for spectrum in spectrum_list: spectrum_textlines = spectrum.splitlines() #Get the spectrum table header, search for cooling_time_string in the header headers = spectrum_textlines[3] #after removing the 23 first characters, each column header should start with a space, followed #by possibly more spaces for right-alignmnet, and then the cooling time string. #Each such header is 10 characters long. header_columns = headers[23:] #Column headers are padded with spaces at the beginning to be 10 characters wide. header_string = cooling_time_string.strip() while len(header_string ) < 10: header_string = ' ' + header_string if header_columns.find(header_string) != -1: column = math.ceil(header_columns.find(header_string)/10) found_spectrum = True #allocate memory bin_count = [0] * (len(spectrum_textlines)-4) bin_edges = [0] * (len(spectrum_textlines)-3) #Table should start at row 4. for i in range(4,len(spectrum_textlines)): #read the gamma spectrum line = spectrum_textlines[i].strip() split_line = line.split(" ") #The split lines should have the following format: # <line number> <low bin edge> <hyphen> <high bin edge> #<first cooling time bin count> <second cooling time bin count> <third...> bin_count[i-4] = float(split_line[column + 3]) bin_edges[i-4] = float(split_line[1]) #Final upper bin edge. bin_edges[len(spectrum_textlines)-4] = float(split_line[3]) if found_spectrum == False: #Did not find the requested spectra in the file, return empty arrays. print("Unable to find a gamma spectrum with cooling time " + cooling_time_string + " in ORIGEN output file " + output_filename) bin_count = [0] bin_edges = [0] return bin_edges, bin_count else: #Found the requested gamma spectrum, return it. #If several are found, this will return the last one, which is typically the one of interest. return bin_edges, bin_count
1e722bea88e9947f7c297a07bb4f0c5cb5ec4419
7,745
def process_cases(list_): """Process cases and determine whether group flag or empty line.""" # Get information is_empty = (len(list_) == 0) if not is_empty: is_group = list_[0].isupper() is_comment = list_[0][0] == '#' else: is_group = False is_comment = False # Finishing return is_empty, is_group, is_comment
5a0dda6873417cfcd813efe30b64c9e0a71b9b11
7,746
def updateNestedDicts(d1, d2): """Updates two dictionaries, assuming they have the same entries""" finalDict = createDictionary() for key in d1: #print(key) newDict = updateDicts(d1[key], d2[key]) finalDict[key] = newDict return finalDict
29fa5218cb4bca67f8e358aebf742025dd541789
7,747
def page_not_found(e): """error handler for page not found""" flash(e.description, 'danger') return render_template('main/404.html'), 404
a64941bca6bd9e90d35286e3d2474c2841ecb112
7,748
def get_cifar10_raw_data(): """ Gets raw CIFAR10 data from http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz. Returns: X_train: CIFAR10 train data in numpy array with shape (50000, 32, 32, 3). Y_train: CIFAR10 train labels in numpy array with shape (50000, ). X_test: CIFAR10 test data in numpy array with shape (10000, 32, 32, 3). Y_test: CIFAR10 test labels in numpy array with shape (10000, ). """ X_train, Y_train, X_test, Y_test = load_cifar10(CIFAR10_FOLDER) return X_train, Y_train, X_test, Y_test
1606c8aa00729a6fb9dca8fbd5663e78d5c93503
7,749
from typing import Union from pathlib import Path import os import random def random_first_file(rootpath: Union[str, Path]) -> Path: """Donne un fichier aléatoire d'une arborescence, en descendant en profondeur d'abord. Args: rootpath (Union[str, Path]): chemin racine de recherche Returns: Path: Un chemin vers le fichier """ iterator = os.walk(rootpath) e = next(iterator) while e[2]: # == []: e = next(iterator) return Path(e[0]) / e[2][random.randint(0, len(e[2]))]
0df13bc9ba946ee21be2a9b9f9ffc564e8a4442f
7,750
from typing import OrderedDict def MovieMaker(images, dpath, site, scheck, coords, bandlist, datelist, bands): """ Function to build the movie """ failed = 0 while failed <2: spath = dpath + "UoL/FIREFLIES/VideoExports/%s" % coords["name"] # for bands in bandcombo: print("\n starting %s at:" % bands, pd.Timestamp.now()) # ========== Create a single dataarray for the raster images =========== sets = OrderedDict() if type(bands) == str: imstack = images[bands] sets[bands] = xr.concat(imstack, dim="time") fnout = "%s/LANDSAT_5_7_8_%s_%s.mp4" % (spath, coords["name"], bands) elif type(bands) == list: bndnm = "multi_" + "_".join(bands) for bnd in bands: imstack = images[bnd] sets[bnd] = xr.concat(imstack, dim="time") fnout = "%s/LANDSAT_5_7_8_%s_%s.mp4" % (spath, coords["name"], bndnm) else: ipdb.set_trace() # ========== Loop over each frame of the video ========== nx = [] def frame_maker(index): # ========== Pull the infomation from the pandas part of the loop ========== indx = int(index) info = datelist.iloc[int(indx)] #rowinfo[1] # # ========== Check the dates i'm exporting ========== # nx.append(frame.time.values) # ========== create and internal subplot ========== def _subplotmaker(ax, bnds, spt): # ========== Get the data for the frame ========== frame = sets[bnds].isel(time=int(indx)) # ========== Set the colors ========== # if bnds == "NRG": color = "blue" # else: # color = "purple" # ========== Grab the data ========== frame.plot.imshow(ax=ax, rgb="band")# , transform=ccrs.PlateCarree()) ## =========== Setup the annimation =========== ax.set_title(spt) ax.scatter(coords.lon, coords.lat, 5, c=color, marker='+')#, transform=ccrs.PlateCarree()) # ========== Set up the box ========== blonO = np.min([coords["lonb_COP_min"], coords["lonb_MOD_min"]]) blatO = np.min([coords["latb_COP_min"], coords["latb_MOD_min"]]) blonM = np.max([coords["lonb_COP_max"], coords["lonb_MOD_max"]]) blatM = np.max([coords["latb_COP_max"], coords["latb_MOD_max"]]) rect = mpl.patches.Rectangle( (blonO,blatO), blonM-blonO, blatM-blatO,linewidth=2,edgecolor=color,facecolor='none') ax.add_patch(rect) # +++++ change the number od ticks ax.xaxis.set_major_locator(plt.MaxNLocator(5)) # ========== Build the plots ========== if type(bands) == str: # Set up the figure fig, axs = plt.subplots(1, figsize=(11,10)) # create the title spt = "%s %s %s frame %d" % (bands, info.satellite, info.date.split(" ")[0], datelist.iloc[indx]["index"]) # make the figure _subplotmaker(axs, bands, spt) plt.axis('scaled') else: # Set up the figure fig, axs = plt.subplots(1,len(bands), sharey=True, figsize=(20,8),) # +++++ Loop over the figure combo +++++ for ax, bnds, in zip(axs, bands): # make the figure _subplotmaker(ax, bnds, bnds) ax.set_aspect('equal') # Get rid of the excess lats for ax in axs.flat: ax.label_outer() # ========== Change parms for the entire plot ========= fig.suptitle("%s %s - Frame%03d" % ( info.satellite, pd.Timestamp(info.date).strftime('%d-%m-%Y'), datelist.iloc[indx]["index"])) # ipdb.set_trace() plt.axis('scaled') # +++++ Make the images bigger by eleminating space +++++ fig.subplots_adjust(left=0.1, right=0.9, top=1, bottom=0, wspace=0, hspace=0) #top = 1, bottom = 1, right = 1, left = 1, plt.tight_layout() plt.margins(0,0) return mplfig_to_npimage(fig) # ========== Eposrt the videos ========== mov = mpe.VideoClip(frame_maker, duration=int(datelist.shape[0])) # plays the clip (and its mask and sound) twice faster # newclip = clip.fl_time(lambda: 2*t, apply_to=['mask','audio']) # fnout = "%s/LANDSAT_5_7_8_%s_complete.txt" % (spath, coords["name"]) print("Starting Write of the data at:", pd.Timestamp.now()) try: mov.write_videofile(fnout, fps=1) return except Exception as ex: warn.warn(str(ex)) print("Movie making failed. This will need to be redone") failed +=1 raise ValueError
3daba6917329e1be8d58c36bfe709488db34d430
7,751
def TypeUrlToMessage(type_url): """Returns a message instance corresponding to a given type URL.""" if not type_url.startswith(TYPE_URL_PREFIX): raise ValueError("Type URL has to start with a prefix %s: %s" % (TYPE_URL_PREFIX, type_url)) full_name = type_url[len(TYPE_URL_PREFIX):] try: return symbol_database.Default().GetSymbol(full_name)() except KeyError as e: raise ProtobufTypeNotFound(e.message)
03727fd60bdebed6b47768f2ec489c68b0a8a45b
7,752
def encode_sentence(tokenized_sentence, max_word_len): """ Encode sentence as one-hot tensor of shape [None, MAX_WORD_LENGTH, CHARSET_SIZE]. """ encoded_sentence = [] sentence_len = len(tokenized_sentence) for word in tokenized_sentence: # Encode every word as matrix of shape [MAX_WORD_LENGTH, # CHARSET_SIZE] where each valid character gets encoded as one-hot # row vector of word matrix. encoded_word = np.zeros([max_word_len, len(CHARSET)]) for char, encoded_char in zip(word, encoded_word): if char in CHARSET: encoded_char[ENCODER[char]] = 1.0 encoded_sentence.append(encoded_word) return np.array(encoded_sentence), sentence_len
77cadac1b4d29976883cc4d8f7540992b997c381
7,753
def footnote_ref(key, index): """Renders a footnote :returns: list of `urwid Text markup <http://urwid.org/manual/displayattributes.html#text-markup>`_ tuples. """ return render_no_change(key)
52182e90a73f2b0fb4499b919b3ebf71b562dcbf
7,754
def mconcat(*args): """ Apply monoidal concat operation in arguments. This function infers the monoid from value, hence it requires at least one argument to operate. """ values = args[0] if len(args) == 1 else args instance = semigroup[type(values[0])] return instance(*values)
0c939ab0da77843b96c11dcf523557351a602a65
7,755
def parallelMeasurements(filename='CCD204_05325-03-02_Hopkinson_EPER_data_200kHz_one-output-mode_1.6e10-50MeV.txt', datafolder='/Users/sammy/EUCLID/CTItesting/data/', gain1=1.17, limit=105, returnScale=False): """ :param filename: :param datafolder: :param gain1: :param limit: :return: """ tmp = np.loadtxt(datafolder + filename, usecols=(0, 5)) #5 = 152.55K ind = tmp[:, 0] values = tmp[:, 1] values *= gain1 if returnScale: return ind, values else: values = values[ind > -5.] values = np.abs(values[:limit]) return values
e40dedd715d76a52729c218623a90b53123f4c27
7,756
import os def _parse_env(name, default=None, dtype=None): """Parse input variable from `os.environ`. Parameters ---------- name : str Name of the variable to parse from env. default : any, optional Set default value of variable. If None (default), parameter is considered required and so must be defined in environment. Otherwise, RuntimeError will be raised. dtype : type or None, optional Expected dtype of the variable. If None (default), variable will be parsed as a string. Other accepted values are: float, int, bool, str. """ try: val = os.environ[name] except KeyError: if default is not None: # Let's use the default value if var not in env return default raise RuntimeError("variable {:} not specified".format(name)) # Parse var from env using the specified dtype if dtype is None or dtype == str: return str(val) if dtype == int or dtype == float or dtype == bool: return dtype(val) else: raise TypeError( "accepted dtypes are int, float, bool, str (or None)")
ab947506899ffefafc37197877bcb51ea9bc78da
7,757
from typing import Union def get_all_urls(the_json: str) -> list: """ Extract all URLs and title from Bookmark files Args: the_json (str): All Bookmarks read from file Returns: list(tuble): List of tublle with Bookmarks url and title """ def extract_data(data: dict): if isinstance(data, dict) and data.get('type') == 'url': urls.append({'name': data.get('name'), 'url': data.get('url')}) if isinstance(data, dict) and data.get('type') == 'folder': the_children = data.get('children') get_container(the_children) def get_container(o: Union[list, dict]): if isinstance(o, list): for i in o: extract_data(i) if isinstance(o, dict): for k, i in o.items(): extract_data(i) urls = list() get_container(the_json) s_list_dict = sorted(urls, key=lambda k: k['name'], reverse=False) ret_list = [(l.get('name'), l.get('url')) for l in s_list_dict] return ret_list
3a76a42fd303e603709c7703fbe877bb47a64a5f
7,758
from typing import Callable from typing import Iterable def _goertzel( block_size: int, sample_rate: float, freq: float ) -> Callable[[Iterable[float]], float]: """ Goertzel algorithm info: https://www.ti.com/lit/an/spra066/spra066.pdf """ k = round(block_size * (freq / sample_rate)) omega = (2 * pi * k) / block_size cos_omega = 2 * cos(omega) def _filter(samples: Iterable[float]) -> float: s_0 = 0 s_1 = 0 s_2 = 0 for x_n in samples: s_0 = x_n + cos_omega * s_1 - s_2 s_2 = s_1 s_1 = s_0 return s_0 - exp(-1.0 * omega) * s_1 return _filter
4e9a039435ccc63cfa1506730c89c915f8cc14c4
7,759
def rotate_xyz(x,y,z,angles=None,inverse=False): """ Rotate a set of vectors pointing in the direction x,y,z angles is a list of longitude and latitude angles to rotate by. First the longitude rotation is applied (about z axis), then the latitude angle (about y axis). """ if angles==None: return x,y,z xyz = np.array([x,y,z]) for dphi,dlon,dlat in angles: dphi*=c dlon*=c dlat*=c m0 = np.array([[1,0,0], [0, np.cos(dphi),np.sin(dphi)], [0, -np.sin(dphi), np.cos(dphi)]]) m1 = np.array([[np.cos(dlon),-np.sin(dlon),0], [np.sin(dlon), np.cos(dlon),0], [0,0,1]]) m2 = np.array([[np.cos(dlat),0,-np.sin(dlat)], [0,1,0], [np.sin(dlat), 0, np.cos(dlat)]]) m = np.dot(np.dot(m1,m2),m0) if inverse: m = np.linalg.inv(m) xyz2 = np.dot(m,xyz) return xyz2
803668619f1ad46f0a48db88f2aba05800f85487
7,760
def indented_open(Filename, Indentation = 3): """Opens a file but indents all the lines in it. In fact, a temporary file is created with all lines of the original file indented. The filehandle returned points to the temporary file.""" IndentString = " " * Indentation try: fh = open(Filename, "rb") except: print "%s:error: indented opening of file '%s' " % (this_name, Filename) sys.exit(-1) new_content = "" for line in fh.readlines(): new_content += IndentString + line fh.close() tmp_filename = Filename + ".tmp" if tmp_filename not in temporary_files: temporary_files.append(copy(tmp_filename)) fh = open(tmp_filename, "wb") fh.write(new_content) fh.close() fh = open(tmp_filename) return fh
26ba2213c5e9c8fd7932c92f4a162e68e642a01e
7,761
def gan_loss( gan_model: tfgan.GANModel, generator_loss_fn=tfgan.losses.modified_generator_loss, discriminator_loss_fn=tfgan.losses.modified_discriminator_loss, gradient_penalty_weight=None, gradient_penalty_epsilon=1e-10, gradient_penalty_target=1.0, feature_matching=False, add_summaries=False): """ Create A GAN loss set, with support for feature matching. Args: bigan_model: the model feature_matching: Whether to add a feature matching loss to the encoder and generator. """ gan_loss = tfgan.gan_loss( gan_model, generator_loss_fn=generator_loss_fn, discriminator_loss_fn=discriminator_loss_fn, gradient_penalty_weight=gradient_penalty_weight, gradient_penalty_target=1.0, add_summaries=add_summaries) if feature_matching: fm_loss = feature_matching_loss(scope=gan_model.discriminator_scope.name) if add_summaries: tf.summary.scalar("feature_matching_loss", fm_loss) # or combine the original adversarial loss with FM gen_loss = gan_loss.generator_loss + fm_loss disc_loss = gan_loss.discriminator_loss gan_loss = tfgan.GANLoss(gen_loss, disc_loss) return gan_loss
dfa1639e049737f943a70ea1d5dcdfe9463b0102
7,762
def list_subjects(): """ List all subjects """ check_admin() subjects = Subject.query.all() return render_template('admin/subjects/subjects.html', subjects=subjects, title="Subjects")
aef910bbae3d25a573b23646ca849a2b790be680
7,763
async def async_setup(opp, config): """Set up the Tibber component.""" opp.data[DATA_OPP_CONFIG] = config if DOMAIN not in config: return True opp.async_create_task( opp.config_entries.flow.async_init( DOMAIN, context={"source": config_entries.SOURCE_IMPORT}, data=config[DOMAIN], ) ) return True
4186491c248d862186a93954ed47e301c4526aea
7,764
def beautifyValue(v): """ Converts an object to a better version for printing, in particular: - if the object converts to float, then its float value is used - if the object can be rounded to int, then the int value is preferred Parameters ---------- v : object the object to try to beautify Returns ------- object or float or int the beautified value """ try: v = float(v) if v.is_integer(): return int(v) return v except: if type(v) == np.str_: v = v.replace('\n', '').replace(' ', '_') return v
aa0a8881989cbfde7a7b5f506c4f45b844df0753
7,765
def english_to_french(english_text): """ A function written using ibm api to translate from english to french""" translation = LT.translate(text=english_text, model_id='en-fr').get_result() french_text = translation['translations'][0]['translation'] return french_text
0f93fe02f8f0898b0d62c6ce4880b9eae4303459
7,766
def get_responsibilities(): """Returns a list of the rooms in the approvers responsibility.""" email = get_jwt_identity() # Checks if the reader is an approver approver = Approver.query.filter_by(email=email).first() if not approver: return bad_request("This user does not have the approver role!") room_list = get_responsibilites_helper(approver) return ok({"responsibilities": room_list})
22ca15c30c5dc5bf5c528e2e19b96af8ab8f2d53
7,767
import datasets def get_test_loader(dataset): """ Get test dataloader of source domain or target domain :return: dataloader """ if dataset == 'MNIST': transform = transforms.Compose([ transforms.ToTensor(), transforms.Lambda(lambda x: x.repeat(3, 1, 1)), transforms.Normalize(mean= params.dataset_mean, std= params.dataset_std) ]) data = datasets.MNIST(root= params.mnist_path, train= False, transform= transform, download= True) dataloader = DataLoader(dataset= data, batch_size= 1, shuffle= False) elif dataset == 'MNIST_M': transform = transforms.Compose([ # transforms.RandomCrop((28)), transforms.CenterCrop((28)), transforms.ToTensor(), transforms.Normalize(mean= params.dataset_mean, std= params.dataset_std) ]) data = datasets.ImageFolder(root=params.mnistm_path + '/test', transform= transform) dataloader = DataLoader(dataset = data, batch_size= 1, shuffle= False) elif dataset == 'MNIST_M_5': transform = transforms.Compose([ # transforms.RandomCrop((28)), transforms.CenterCrop((28)), transforms.ToTensor(), transforms.Normalize(mean= params.dataset_mean, std= params.dataset_std) ]) data = datasets.ImageFolder(root=params.mnistm_5_path + '/test', transform= transform) dataloader = DataLoader(dataset = data, batch_size= 1, shuffle= False) elif dataset == 'SVHN': transform = transforms.Compose([ transforms.CenterCrop((28)), transforms.ToTensor(), transforms.Normalize(mean= params.dataset_mean, std = params.dataset_std) ]) data = datasets.SVHN(root= params.svhn_path, split= 'test', transform = transform, download= True) dataloader = DataLoader(dataset = data, batch_size= 1, shuffle= False) #elif dataset == 'SynDig': # transform = transforms.Compose([ # transforms.CenterCrop((28)), # transforms.ToTensor(), # transforms.Normalize(mean=params.dataset_mean, std=params.dataset_std) # ]) # # data = SynDig.SynDig(root= params.syndig_path, split= 'test', transform= transform, download= False) # # dataloader = DataLoader(dataset= data, batch_size= 1, shuffle= False) elif dataset == 'dslr': transform = transforms.Compose([ transforms.RandomCrop((224)), transforms.ToTensor(), transforms.Normalize(mean=params.dataset_mean, std=params.dataset_std) ]) data = datasets.ImageFolder(params.dslr_path + '/test', transform=transform) dataloader = DataLoader(dataset=data, batch_size=params.batch_size, shuffle=True) elif dataset == "art": transform = transforms.Compose([ ResizeImage(256), transforms.RandomCrop((224)), transforms.ToTensor(), transforms.Normalize(mean=[0.485,0.456,0.406], std=[0.229,0.224,0.225]) ]) data = datasets.ImageFolder(params.art_path + '/test', transform=transform) dataloader = DataLoader(dataset=data, batch_size=params.batch_size, shuffle=True) elif dataset == "clipart": transform = transforms.Compose([ ResizeImage(256), transforms.RandomCrop((224)), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) data = datasets.ImageFolder(params.clipart_path + '/test', transform=transform) dataloader = DataLoader(dataset=data, batch_size=params.batch_size, shuffle=True) else: raise Exception('There is no dataset named {}'.format(str(dataset))) return dataloader
52e1570a7911bf9234e76c27b2156b1a7f358164
7,768
from typing import Optional from datetime import datetime def get_wem_facility_intervals(from_date: Optional[datetime] = None) -> WEMFacilityIntervalSet: """Obtains WEM facility intervals from NEM web. Will default to most recent date @TODO not yet smart enough to know if it should check current or archive """ content = wem_downloader(_AEMO_WEM_SCADA_URL, from_date) _models = parse_wem_facility_intervals(content) wem_set = WEMFacilityIntervalSet( crawled_at=datetime.now(), live=False, source_url=_AEMO_WEM_SCADA_URL, intervals=_models ) return wem_set
7e9140a2ffa1690dc8ac1eb346d8b42a1664b4cd
7,769
def push( message, user: str = None, api_token: str = None, device: str = None, title: str = None, url: str = None, url_title: str = None, priority: str = None, timestamp: str = None, sound: str = None, ) -> typing.Union[http.client.HTTPResponse, typing.BinaryIO]: """Pushes the notification. API Reference: https://pushover.net/api Args: message: Your message user: The user/group key (not e-mail address) of your user (or you), viewable when logged into our dashboard (often referred to as USER_KEY in our documentation and code examples) api_token: Your application's API token device: Your user's device name to send the message directly to that device, rather than all of the user's devices title: Your message's title, otherwise your app's name is used url: A supplementary URL to show with your message url_title: A title for your supplementary URL, otherwise just the URL is shown priority: Send as:1 to always send as a quiet notification, 1 to display as high--priority and bypass the user's quiet hours, or 2 to also require confirmation from the user timestamp: A Unix timestamp of your message's date and time to display to the user, rather than the time your message is received by our API sound: The name of one of the sounds supported by device clients to override the user's default sound choice Returns: HTTP response from API call """ if user is None or api_token is None: user, api_token = get_credentials() api_url = "https://api.pushover.net/1/messages.json" if title is None: if getattr(__main__, "__file__", None): title = os.path.basename(__main__.__file__) else: title = "n8scripts" payload_dict = { "token": api_token, "user": user, "message": message, "device": device, "title": title, "url": url, "url_title": url_title, "priority": priority, "timestamp": timestamp, "sound": sound, } payload = urllib.parse.urlencode({k: v for k, v in payload_dict.items() if v}) with urllib.request.urlopen(api_url, data=payload.encode()) as resp: return resp
3166be0bae5d21313cecaedf2fa8cf11c7bab0c5
7,770
def _get_static_predicate(pred): """Helper function for statically evaluating predicates in `cond`.""" if pred in {0, 1}: # Accept 1/0 as valid boolean values pred_value = bool(pred) elif isinstance(pred, bool): pred_value = pred elif isinstance(pred, tf.Tensor): pred_value = tf.get_static_value(pred) # TODO(jamieas): remove the dependency on `pywrap_tensorflow`. # pylint: disable=protected-access if pred_value is None: pred_value = c_api.TF_TryEvaluateConstant_wrapper(pred.graph._c_graph, pred._as_tf_output()) # pylint: enable=protected-access else: raise TypeError("`pred` must be a Tensor, or a Python bool, or 1 or 0. " "Found instead: %s" % pred) return pred_value
deb27bc9eb409d7557a74e268a67bbd671afa0a2
7,771
def add_init_or_construct(template, variable_slot, new_data, scope, add_location=-1): """Add init or construct statement.""" if isinstance(new_data, list): template[variable_slot][scope].extend(new_data) return template if add_location < 0: template[variable_slot][scope].append(new_data) else: template[variable_slot][scope].insert(add_location, new_data) return template
125bc4e34dff837372dbbdc70c69a08a1e83e176
7,772
def im2col_indices(x, field_height, field_width, padding=1, stride=1): """ An implementation of im2col based on some fancy indexing """ # Zero-pad the input p = padding x_padded = np.pad(x, ((0, 0), (0, 0), (p, p), (p, p)), mode='constant') k, i, j = get_im2col_indices(x.shape, field_height, field_width, padding, stride) cols = x_padded[:, k, i, j] C = x.shape[1] cols = cols.transpose(1, 2, 0).reshape(field_height * field_width * C, -1) return cols
b2f3f24b3a03ea70efbf5f41cbbfd61fe2bf8cbd
7,773
import os def get_subdirs(dir): """Get the sub-directories of a given directory.""" return [os.path.join(dir,entry) for entry in os.listdir(dir) \ if os.path.isdir(os.path.join(dir,entry))]
64b204b1c2878e454910b3d27a326d16a585477a
7,774
def nms_1d(src, win_size, file_duration): """1D Non maximum suppression src: vector of length N """ pos = [] src_cnt = 0 max_ind = 0 ii = 0 ee = 0 width = src.shape[0]-1 while ii <= width: if max_ind < (ii - win_size): max_ind = ii - win_size ee = np.minimum(ii + win_size, width) while max_ind <= ee: src_cnt += 1 if src[int(max_ind)] > src[int(ii)]: break max_ind += 1 if max_ind > ee: pos.append(ii) max_ind = ii+1 ii += win_size ii += 1 pos = np.asarray(pos).astype(np.int) val = src[pos] # remove peaks near the end inds = (pos + win_size) < src.shape[0] pos = pos[inds] val = val[inds] # set output to between 0 and 1, then put it in the correct time range pos = pos / float(src.shape[0]) pos = pos*file_duration return pos, val
70f2e15ce4044095d74d02fd87e779a2d3b206c2
7,775
import torch def tensor_text_to_canvas(image, text=None, col=8, scale=False): """ :param image: Tensor / numpy in shape of (N, C, H, W) :param text: [str, ] * N :param col: :return: uint8 numpy of (H, W, C), in scale [0, 255] """ if scale: image = image / 2 + 0.5 if torch.is_tensor(image): image = image.cpu().detach().numpy() image = write_text_on_image(image, text) # numpy (N, C, H, W) in scale [0, 1] image = vutils.make_grid(torch.from_numpy(image), nrow=col) # (C, H, W) image = image.numpy().transpose([1, 2, 0]) image = np.clip(255 * image, 0, 255).astype(np.uint8) return image
5c37d9b3e72d5df14d71fa88aff429081c1f5469
7,776
import six def is_sequence(input): """Return a bool indicating whether input is a sequence. Parameters ---------- input The input object. Returns ------- bool ``True`` if input is a sequence otherwise ``False``. """ return (isinstance(input, six.collections_abc.Sequence) and not isinstance(input, six.string_types))
1b11275843adaf32618a09d77ec6053039085b54
7,777
import os import re def extract_filename(path): """Parse out the file name from a file path Parameters ---------- path : string input path to parse filename from Returns ------- file_name : string file name (last part of path), empty string if none found """ # get last group of a path if path: file_name = os.path.basename(path) file_name = re.match(".*?\s*(\S+\.[^ \s,]+)\s*", file_name) if file_name: return file_name.group(1) return ''
cb30ea5177b5e563ab37056f2dd50ca11d0929e5
7,778
def auto_prefetch_relationship(name, prepare_related_queryset=noop, to_attr=None): """ Given the name of a relationship, return a prepare function which introspects the relationship to discover its type and generates the correct set of `select_related` and `include_fields` calls to apply to efficiently load it. A queryset function may also be passed, which will be applied to the related queryset. This is by far the most complicated part of the entire library. The reason it's so complicated is because Django's related object descriptors are inconsistent: each type has a slightly different way of accessing its related queryset, the name of the field on the other side of the relationship, etc. """ def prepare(queryset): related_descriptor = getattr(queryset.model, name) if type(related_descriptor) in ( ForwardOneToOneDescriptor, ForwardManyToOneDescriptor, ): return prefetch_forward_relationship( name, related_descriptor.field.related_model.objects.all(), prepare_related_queryset, to_attr, )(queryset) if type(related_descriptor) is ReverseOneToOneDescriptor: return prefetch_reverse_relationship( name, related_descriptor.related.field.name, related_descriptor.related.field.model.objects.all(), prepare_related_queryset, to_attr, )(queryset) if type(related_descriptor) is ReverseManyToOneDescriptor: return prefetch_reverse_relationship( name, related_descriptor.rel.field.name, related_descriptor.rel.field.model.objects.all(), prepare_related_queryset, to_attr, )(queryset) if type(related_descriptor) is ManyToManyDescriptor: field = related_descriptor.rel.field if related_descriptor.reverse: related_queryset = field.model.objects.all() else: related_queryset = field.target_field.model.objects.all() return prefetch_many_to_many_relationship( name, related_queryset, prepare_related_queryset, to_attr, )(queryset) return prepare
baac4ed7215c89311badb22bafe8365fd7be2263
7,779
def no_conjugate_member(magic_flag): """should not raise E1101 on something.conjugate""" if magic_flag: something = 1.0 else: something = 1.0j if isinstance(something, float): return something return something.conjugate()
5e32d31aa907ac9de2bd153bbe61354207262409
7,780
def sub_ntt(f_ntt, g_ntt): """Substraction of two polynomials (NTT representation).""" return sub_zq(f_ntt, g_ntt)
b2e538a00bb4b46e52258080ad9007358c82bc71
7,781
def is_pareto_efficient(costs): """ Find the pareto-efficient points given an array of costs. Parameters ---------- costs : np.ndarray Array of shape (n_points, n_costs). Returns ------- is_efficient_maek : np.ndarray (dtype:bool) Array of which elements in costs are pareto-efficient. """ is_efficient = np.arange(costs.shape[0]) n_points = costs.shape[0] next_point_index = 0 # Next index in the is_efficient array to search for while next_point_index<len(costs): nondominated_point_mask = np.any(costs<costs[next_point_index], axis=1) nondominated_point_mask[next_point_index] = True is_efficient = is_efficient[nondominated_point_mask] # Remove dominated points costs = costs[nondominated_point_mask] next_point_index = np.sum(nondominated_point_mask[:next_point_index])+1 is_efficient_mask = np.zeros(n_points, dtype=bool) is_efficient_mask[is_efficient] = True return is_efficient_mask
c7564cab171b833b84bf16a24242666f05022eb2
7,782
def merge_dict(base, delta, merge_lists=False, skip_empty=False, no_dupes=True, new_only=False): """ Recursively merges two dictionaries including dictionaries within dictionaries. Args: base: Target for merge delta: Dictionary to merge into base merge_lists: if a list is found merge contents instead of replacing skip_empty: if an item in delta is empty, dont overwrite base no_dupes: when merging lists deduplicate entries new_only: only merge keys not yet in base """ for k, d in delta.items(): b = base.get(k) if isinstance(d, dict) and isinstance(b, dict): merge_dict(b, d, merge_lists, skip_empty, no_dupes, new_only) else: if new_only and k in base: continue if skip_empty and not d: # dont replace if new entry is empty pass elif all((isinstance(b, list), isinstance(d, list), merge_lists)): if no_dupes: base[k] += [item for item in d if item not in base[k]] else: base[k] += d else: base[k] = d return base
74b9d29a3137826319f10dbf6f86e65015c53659
7,783
def redirect_or_error(opt, key, override=''): """ Tests if a redirect URL is available and redirects, or raises a MissingRequiredSetting exception. """ r = (override or opt) if r: return redirect(r) raise MissingRequiredSetting('%s.%s' % ( options.KEY_DATA_DICT, key))
2dbe71b8332b79c242108cc133fc51bf195fac8a
7,784
def stdev(df): """Calculate standard deviation of a dataframe.""" return np.std(df['rate'] - df['w1_rate'])
8f7d49548dac617855c9232af6aed7ec04e9b64c
7,785
def add_to_cart(listing_id): """Adds listing to cart with specified quantity""" listing = Listing.query.filter_by(id=listing_id, available=True).first() if not listing: abort(404) if not request.json: abort(400) if ('quantity' not in request.json or type(request.json['quantity']) is not int): abort(400) cart_item = CartItem.query.filter_by( merchant_id=current_user.id, listing_id=listing_id ).first() new_quantity = request.json['quantity'] is_currently_incart = cart_item is not None if new_quantity == 0 and is_currently_incart: db.session.delete(cart_item) elif new_quantity != 0 and is_currently_incart: cart_item.quantity = new_quantity elif new_quantity != 0 and not is_currently_incart: db.session.add( CartItem( merchant_id=current_user.id, listing_id=listing_id, quantity=new_quantity ) ) db.session.commit() name = Listing.query.filter_by(id=listing_id).first().name return jsonify({'quantity': new_quantity, 'name': name})
ee21468f432374c81f4fe8aada92a6ff757d8d38
7,786
def get_zip_code_prefixes(df_geolocation : pd.DataFrame) -> pd.DataFrame: """ Gets the first three and four first digits of zip codes. """ df = df_geolocation.copy() df['geolocation_zip_code_prefix_1_digits'] = df['geolocation_zip_code_prefix'].str[0:1] df['geolocation_zip_code_prefix_2_digits'] = df['geolocation_zip_code_prefix'].str[0:2] df['geolocation_zip_code_prefix_3_digits'] = df['geolocation_zip_code_prefix'].str[0:3] df['geolocation_zip_code_prefix_4_digits'] = df['geolocation_zip_code_prefix'].str[0:4] return df
8ba9ae223ba76871363b6c3ed452f157b8a848b0
7,787
def elina_scalar_infty(scalar): """ Return -1 if an ElinaScalar is -infinity, 0 if it is finite and 1 if it is +infinity. Parameters ----------- scalar : ElinaScalarPtr Pointer to the ElinaScalar that needs to be tested for infinity. Returns ------- result : c_int Integer stating the result of the testing. """ result = None try: elina_scalar_infty_c = elina_auxiliary_api.elina_scalar_infty elina_scalar_infty_c.restype = c_int elina_scalar_infty_c.argtypes = [ElinaScalarPtr] result = elina_scalar_infty_c(scalar) except: print('Problem with loading/calling "elina_scalar_infty" from "libelinaux.so"') print('Make sure you are passing ElinaScalarPtr to the function') return result
73d5dd7e552e94ce11f739386f225c3b2dbad741
7,788
import os def _package_dmg(paths, dist, config): """Packages a Chrome application bundle into a DMG. Args: paths: A |model.Paths| object. dist: The |model.Distribution| for which the product was customized. config: The |config.CodeSignConfig| object. Returns: A path to the produced DMG file. """ packaging_dir = paths.packaging_dir(config) if dist.channel_customize: dsstore_file = 'chrome_{}_dmg_dsstore'.format(dist.channel) icon_file = 'chrome_{}_dmg_icon.icns'.format(dist.channel) else: dsstore_file = 'chrome_dmg_dsstore' icon_file = 'chrome_dmg_icon.icns' dmg_path = os.path.join(paths.output, '{}.dmg'.format(config.packaging_basename)) app_path = os.path.join(paths.work, config.app_dir) # A locally-created empty directory is more trustworthy than /var/empty. empty_dir = os.path.join(paths.work, 'empty') commands.make_dir(empty_dir) # Make the disk image. Don't include any customized name fragments in # --volname because the .DS_Store expects the volume name to be constant. # Don't put a name on the /Applications symbolic link because the same disk # image is used for all languages. # yapf: disable pkg_dmg = [ os.path.join(packaging_dir, 'pkg-dmg'), '--verbosity', '0', '--tempdir', paths.work, '--source', empty_dir, '--target', dmg_path, '--format', 'UDBZ', '--volname', config.app_product, '--copy', '{}:/'.format(app_path), '--symlink', '/Applications:/ ', ] # yapf: enable if dist.inflation_kilobytes: pkg_dmg += [ '--copy', '{}/inflation.bin:/.background/inflation.bin'.format(packaging_dir) ] if config.is_chrome_branded(): # yapf: disable pkg_dmg += [ '--icon', os.path.join(packaging_dir, icon_file), '--copy', '{}/keystone_install.sh:/.keystone_install'.format(packaging_dir), '--mkdir', '.background', '--copy', '{}/chrome_dmg_background.png:/.background/background.png'.format( packaging_dir), '--copy', '{}/{}:/.DS_Store'.format(packaging_dir, dsstore_file), ] # yapf: enable commands.run_command(pkg_dmg) return dmg_path
83d98361fa6317489d14243829fcec8570de5cba
7,789
import math def get_target_grid(return_type, **kwargs): """ Function: get polar or cartasian coordinates of targets Inputs: - return_type: str. "cart" for cartasian coordinates; "polar" for polar coordinates. - kwargs: additional params. - rel_points: dictionary. relative length for target positions and heel positions Outputs: - if return cartasian coordinates: numpy array. x and y coordinates of targets in cartasian coordinates. - if return polar coordinates: dictionary {type('c', 'l', 'h'):numpy array}. polar coordinates of target centers ('c')/lower bounds ('l')/upper bounds ('h') """ ### unravel params. if('rel_points' in kwargs.keys()): rel_points = kwargs['rel_points'] ### calculate ideal grid #### before standardization ##### distance: normal dT0T2 = dT0T5 = dT2T4 = dT4T5 = 1 dT0T4 = dT2T3 = (dT0T5 ** 2 + dT4T5 ** 2 -2*dT4T5*dT0T5*math.cos(math.radians(100)))**0.5 dT2T5 = dT3T7 = (dT0T5 ** 2 + dT4T5 ** 2 -2*dT0T2*dT0T5*math.cos(math.radians(80)))**0.5 dT0T3 = dT0T7 = ((dT2T5/2) ** 2 + (dT2T3*1.5) ** 2) ** 0.5 ##### angles: normal aT0T2 = math.radians(80)/2 aT0T5 = - math.radians(80)/2 aT0T3 = math.acos((dT0T3 ** 2 + dT0T7 ** 2 - dT3T7 ** 2)/(2*dT0T3*dT0T7))/2 aT0T7 = - aT0T3 aT0T4 = 0 ##### target coordinates T0 = np.array((0,0)) T2 = np.array((aT0T2, dT0T2)) T3 = np.array((aT0T3, dT0T3)) T4 = np.array((aT0T4, dT0T4)) T5 = np.array((aT0T5, dT0T2)) T7 = np.array((aT0T7, dT0T7)) target_grid_polar = np.stack((T0, T2, T3, T4, T5, T7), axis = 0) target_grid_cart = np.zeros((6,2)) for i in range(6): target_grid_cart[i,:] = polar_to_cartesian(target_grid_polar[i,1], target_grid_polar[i,0]) ##### heel coordinates alpha = 0.2354 a = 0.2957 b = 0.5 r_heels_cart = np.zeros((6,2)) r_heels_polar = np.zeros((6,2)) for n in range(1,7): phi_n = -(alpha + (n-1)*(np.pi - 2*alpha)/5) x = a*np.cos(phi_n) y = b*np.sin(phi_n) r, theta = cartesian_to_polar(-y, x) r_heels_cart[n-1, :] = [-y,x] r_heels_polar[n-1, :] = [theta, r] ##### intersect c = my_help.line_intersection((r_heels_cart[2,:], target_grid_cart[2,:]),(r_heels_cart[3,:], target_grid_cart[5,:])) #### after standardization dTiC = np.zeros((6,1)) for i in range(1,6): dTiC[i] = np.linalg.norm(target_grid_cart[i,:] - c) dTiC = dTiC/dTiC[3] aTiCT4 = np.zeros((6,1)) for i in range(1,6): aTiCT4[i] = my_int.inner_angle(target_grid_cart[i,:] - c, target_grid_cart[3,:] - c, True) if(i in [4,5]): aTiCT4[i] = - aTiCT4[i] ### calculate output values if(return_type == 'cart'): grid_cart = np.zeros((6,2)) for i in range(1,6): grid_cart[i,0],grid_cart[i,1] = polar_to_cartesian(dTiC[i][0], aTiCT4[i][0]) return grid_cart elif(return_type == 'polar'): target_grid_polar = {} for t in ['c', 'l', 'h']: T0 = np.array((aTiCT4[0], -rel_points[f'T0{t}'])) T2 = np.array((aTiCT4[1], rel_points[f'T2{t}'])) T3 = np.array((aTiCT4[2], rel_points[f'T3{t}'])) T4 = np.array((aTiCT4[3], rel_points[f'T4{t}'])) T5 = np.array((aTiCT4[4], rel_points[f'T5{t}'])) T3_ = np.array((aTiCT4[5], rel_points[f'T7{t}'])) C0 = np.array((aTiCT4[0], rel_points['center'])) target_grid_polar[t] = np.stack((T0, T2, T3, T4, T5, T3_, C0), axis = 0) return target_grid_polar
d69bd81912502d0dde04fce0dc4a57201810f9df
7,790
from datetime import datetime def s2_filename_to_md(filename): """ This function converts the S2 filename into a small dict of metadata :param filename: :return: dict """ basename = system.basename(filename) metadata = dict() splits = basename.split("_") if len(splits) < 4: raise Exception("{} might not be a S2 product".format(filename)) metadata["tile"] = splits[3] datestr = splits[1] metadata["date"] = datetime.datetime.strptime(datestr[:-1], '%Y%m%d-%H%M%S-%f') return metadata
86c40009f915fd250091a68d52f79f5701f64270
7,791
import argparse import os def parse_command_line_args(): """Parses command line arguments.""" parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument( '--service_account_json', default=os.environ.get("GOOGLE_APPLICATION_CREDENTIALS"), help='Path to service account JSON file.') parser.add_argument( '--project_id', default=os.environ.get("GOOGLE_CLOUD_PROJECT"), help='GCP project name') parser.add_argument( '--cloud_region', default='us-central1', help='GCP region') parser.add_argument( '--dataset_id', default=None, help='Name of dataset') parser.add_argument( '--hl7v2_store_id', default=None, help='Name of HL7v2 store') parser.add_argument( '--hl7v2_message_file', default=None, help='A file containing a base64-encoded HL7v2 message') parser.add_argument( '--hl7v2_message_id', default=None, help='The identifier for the message returned by the server' ) parser.add_argument( '--label_key', default=None, help='Arbitrary label key to apply to the message' ) parser.add_argument( '--label_value', default=None, help='Arbitrary label value to apply to the message' ) command = parser.add_subparsers(dest='command') command.add_parser( 'create-hl7v2-message', help=create_hl7v2_message.__doc__) command.add_parser( 'delete-hl7v2-message', help=delete_hl7v2_message.__doc__) command.add_parser('get-hl7v2-message', help=get_hl7v2_message.__doc__) command.add_parser( 'ingest-hl7v2-message', help=ingest_hl7v2_message.__doc__) command.add_parser('list-hl7v2-messages', help=list_hl7v2_messages.__doc__) command.add_parser( 'patch-hl7v2-message', help=patch_hl7v2_message.__doc__) return parser.parse_args()
8f5fba52d9f1a988007469aba4ca322f1a51a46e
7,792
from typing import Optional def get_rest_api(id: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetRestApiResult: """ Resource Type definition for AWS::ApiGateway::RestApi """ __args__ = dict() __args__['id'] = id if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('aws-native:apigateway:getRestApi', __args__, opts=opts, typ=GetRestApiResult).value return AwaitableGetRestApiResult( api_key_source_type=__ret__.api_key_source_type, binary_media_types=__ret__.binary_media_types, body=__ret__.body, body_s3_location=__ret__.body_s3_location, clone_from=__ret__.clone_from, description=__ret__.description, disable_execute_api_endpoint=__ret__.disable_execute_api_endpoint, endpoint_configuration=__ret__.endpoint_configuration, fail_on_warnings=__ret__.fail_on_warnings, id=__ret__.id, minimum_compression_size=__ret__.minimum_compression_size, mode=__ret__.mode, name=__ret__.name, parameters=__ret__.parameters, policy=__ret__.policy, root_resource_id=__ret__.root_resource_id, tags=__ret__.tags)
86edad14cb2ccd8b5da5316db8ac9b66a25dbddd
7,793
def _uframe_post_instrument_driver_set(reference_designator, command, data): """ Execute set parameters for instrument driver using command and data; return uframe response. (POST) """ debug = False try: uframe_url, timeout, timeout_read = get_c2_uframe_info() if 'CAMDS' in reference_designator: timeout = 10 timeout_read = 200 url = "/".join([uframe_url, reference_designator, command]) if debug: print '\n debug -- (_uframe_post_instrument_driver_set) url: ', url response = requests.post(url, data=data, timeout=(timeout, timeout_read), headers=_post_headers()) return response except ConnectionError: message = 'ConnectionError for instrument driver set command.' raise Exception(message) except Timeout: message = 'Timeout for instrument driver set command.' raise Exception(message) except Exception: raise
aba81bb3720f78935fed561133de21716c316e95
7,794
def regroup_if_changed(group, op_list, name=None): """Creates a new group for op_list if it has changed. Args: group: The current group. It is returned if op_list is unchanged. op_list: The list of operations to check. name: The name to use if a new group is created. Returns: Either group or a new group (or if op_list is empty then no_op). """ has_deltas = isinstance(op_list, sequence_with_deltas.SequenceWithDeltas) if (group is None or len(group.control_inputs) != len(op_list) or (has_deltas and op_list.has_changed())): if has_deltas: op_list.mark() if op_list: return tf.group(*op_list, name=name) else: return tf.no_op(name=name) else: return group
f6a811e34ac79d2563906c4971fa23b7316a0976
7,795
def spike_train_order_profile(*args, **kwargs): """ Computes the spike train order profile :math:`E(t)` of the given spike trains. Returns the profile as a DiscreteFunction object. Valid call structures:: spike_train_order_profile(st1, st2) # returns the bi-variate profile spike_train_order_profile(st1, st2, st3) # multi-variate profile of 3 # spike trains spike_trains = [st1, st2, st3, st4] # list of spike trains spike_train_order_profile(spike_trains) # profile of the list of spike trains spike_train_order_profile(spike_trains, indices=[0, 1]) # use only the spike trains # given by the indices Additonal arguments: :param max_tau: Upper bound for coincidence window, `default=None`. :param indices: list of indices defining which spike trains to use, if None all given spike trains are used (default=None) :returns: The spike train order profile :math:`E(t)` :rtype: :class:`.DiscreteFunction` """ if len(args) == 1: return spike_train_order_profile_multi(args[0], **kwargs) elif len(args) == 2: return spike_train_order_profile_bi(args[0], args[1], **kwargs) else: return spike_train_order_profile_multi(args, **kwargs)
ab57e5de52c0064ad691501d131e66ed5b230093
7,796
def home(): """Home page.""" form = LoginForm(request.form) with open("POSCAR", "r") as samplefile: sample_input = samplefile.read() inputs = InputForm() current_app.logger.info("Hello from the home page!") # Handle logging in if request.method == "POST": if form.validate_on_submit(): login_user(form.user) flash("You are logged in.", "success") redirect_url = request.args.get("next") or url_for("user.members") return redirect(redirect_url) else: flash_errors(login) return render_template("public/home.html", form=form, inputs=inputs)
0494bc54040677d6ae09992280afe8922141a93a
7,797
def isUniqueSeq(objlist): """Check that list contains items only once""" return len(set(objlist)) == len(objlist)
4522c43967615dd54e261a229b05c742676c7f99
7,798
import torch def compute_kld(confidences: torch.Tensor, reduction="mean") -> torch.Tensor: """ Args: confidences (Tensor): a tensor of shape [N, M, K] of predicted confidences from ensembles. reduction (str): specifies the reduction to apply to the output. - none: no reduction will be applied, - mean: the sum of the output will be divided by the number of elements in the output. Returns: kld (Tensor): KL divergences for given confidences from ensembles. - a tensor of shape [N,] when reduction is "none", - a tensor of shape [,] when reduction is "mean". """ assert reduction in [ "none", "mean", ], f"Unknown reduction = \"{reduction}\"" kld = torch.zeros(confidences.size(0), device=confidences.device) # [N,] ensemble_size = confidences.size(1) if ensemble_size > 1: pairs = [] for i in range(ensemble_size): for j in range(ensemble_size): pairs.append((i, j)) for (i, j) in pairs: if i == j: continue kld += torch.nn.functional.kl_div( confidences[:, i, :].log(), confidences[:, j, :], reduction="none", log_target=False, ).sum(1) # [N,] kld = kld / (ensemble_size * (ensemble_size - 1)) if reduction == "mean": kld = kld.mean() # [,] return kld
4fb57a18fdfae56dc04a2502c4cd21590bc31c93
7,799