content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
import os def get_scenario(): """ Get scenario """ try: scenario = os.environ['DEPLOY_SCENARIO'] except KeyError: logger.error("Impossible to retrieve the scenario") scenario = "Unknown_scenario" return scenario
dc8ce4bb231ff9174f59f27a5a2dee618d6d4647
6,000
from typing import Optional from typing import Callable import requests def make_request( endpoint: str, method: str = "get", data: Optional[dict] = None, timeout: int = 15 ) -> Response: """Makes a request to the given endpoint and maps the response to a Response class""" method = method.lower() request_method: Callable = getattr(requests, method) if method not in SAFE_METHODS and data is None: raise ValueError("Data must be provided for POST, PUT and PATCH requests.") r: RequestsResponse if method not in SAFE_METHODS: r = request_method(endpoint, json=data, timeout=timeout) else: r = request_method(endpoint, timeout=timeout) return Response(status_code=r.status_code, data=r.json())
8dd88583f61e5c42689461dd6d316297d910f197
6,001
import socket def _is_rpc_timeout(e): """ check whether an exception individual rpc timeout. """ # connection caused socket timeout is being re-raised as # ThriftConnectionTimeoutError now return isinstance(e, socket.timeout)
ec832bec086b59698eed12b18b7a37e5eb541329
6,002
def fake_quantize_with_min_max(inputs, f_min, f_max, bit_width, quant_zero=True): """The fake quantization operation kernel. Args: inputs: a tensor containing values to be quantized. f_min: the minimum input value f_max: the maximum input value bit_width: the bit width Returns: a tensor containing quantized values. """ @tf.function def forward(inputs, f_min, f_max, bit_width, quant_zero): with tf.name_scope("FakeQuantizeWithMinMax"): float_bit_width = tf.cast(bit_width, dtype=tf.float32, name="bit_width") bound = tf.math.pow(2.0, float_bit_width - 1) q_min = tf.math.negative(bound, name="q_min") q_max = tf.math.subtract(bound, 1, name="q_max") scale = get_scale(f_min, f_max, q_min, q_max) if quant_zero: q_zero_point, new_f_min, new_f_max = quantize_zero_point( scale, f_min, f_max, q_min, q_max) shift = new_f_min if quant_zero else f_min quantized = quantize(inputs, scale, shift, q_min, q_max) dequantized = dequantize(quantized, scale, shift, q_min, q_max) return dequantized @tf.function def grad_fn(dy): float_bit_width = tf.cast(bit_width, dtype=tf.float32, name="bit_width") bound = tf.math.pow(2.0, float_bit_width - 1) q_min = tf.math.negative(bound, name="q_min") q_max = tf.math.subtract(bound, 1, name="q_max") scale = get_scale(f_min, f_max, q_min, q_max) if quant_zero: q_zero_point, new_f_min, new_f_max = quantize_zero_point( scale, f_min, f_max, q_min, q_max) between_min_max = (inputs >= new_f_min) & (inputs <= new_f_max) below_min = (inputs <= new_f_min) above_max = (inputs >= new_f_max) else: between_min_max = (inputs >= f_min) & (inputs <= f_max) below_min = (inputs <= f_min) above_max = (inputs >= f_max) ones = tf.ones_like(dy) zeros = tf.zeros_like(dy) grad_wrt_inputs = dy * tf.where(between_min_max, ones, zeros) grad_wrt_f_min = tf.reduce_sum(dy * tf.where(below_min, ones, zeros)) grad_wrt_f_max = tf.reduce_sum(dy * tf.where(above_max, ones, zeros)) return grad_wrt_inputs, grad_wrt_f_min, grad_wrt_f_max, None results = forward(inputs, f_min, f_max, bit_width, quant_zero) return results, grad_fn
2034dbe02d50ce0317dc4dbb6f2ed59137e671d2
6,003
def lorentzian(coordinates, center, fwhm): """ Unit integral Lorenzian function. Parameters ---------- coordinates : array-like Can be either a list of ndarrays, as a meshgrid coordinates list, or a single ndarray for 1D computation center : array-like Center of the lorentzian. Should be the same shape as `coordinates.ndim`. fwhm : float Full-width at half-max of the function. Returns ------- out : ndarray Lorentzian function of unit integral. Notes ----- The functional form of the Lorentzian is given by: .. math:: L(x) = \\frac{1}{\pi} \\frac{(\gamma/2)}{(x-c)^2 + (\gamma/2)^2} where :math:`\gamma` is the full-width at half-maximum, and :math:`c` is the center. For n dimensions, the functional form of the Lorentzian is given by: .. math:: L(x_1, ..., x_n) = \\frac{1}{n \pi} \\frac{(\gamma/2)}{(\sum_i{(x_i - c_i)^2} + (\gamma/2)^2)^{\\frac{1+n}{2}}} Example ------- >>> import numpy as np >>> from skued import lorentzian >>> >>> span = np.arange(-10, 10, 0.1) >>> xx, yy = np.meshgrid(span, span) >>> center = [0,0] >>> l = lorentzian( coordinates = [xx,yy], center = [0,0], fwhm = 1) >>> l.shape == xx.shape #True >>> np.sum(l)*0.1**2 #Integral should be unity (spacing = 0.1) """ width = 0.5 * fwhm # 1D is a special case, as coordinates are not given as a list of arrays if not isinstance(coordinates, (list, tuple)): # iterable but not ndarray return (width / pi) / ((coordinates - center) ** 2 + width ** 2) dim = len(coordinates) core = width / ( (sum([(x - c) ** 2 for x, c in zip(coordinates, center)]) + width ** 2) ) ** ((dim + 1) / 2) factor = 1 / (dim * pi) return factor * core
8631ef30f0fd50ac516f279cd130d6d9b099d953
6,004
def _to_gzip_base64(self, **kwargs): """ Reads the file as text, then turns to gzip+base64""" data = self.read_text(**kwargs) return Base.b64_gzip_encode(data)
bb3e01bcac5e551d862629e79f4c54827ca3783c
6,005
import traceback def get_recipe_data(published=False, complete_data=False): """Return published or unpublished recipe data.""" try: Changed = User.alias() recipes = recipemodel.Recipe.select( recipemodel.Recipe, storedmodel.Stored, pw.fn.group_concat(tagmodel.Tag.tagname).alias("taglist") ).where( recipemodel.Recipe.published == published ).join( storedmodel.Stored, pw.JOIN.LEFT_OUTER, on=(storedmodel.Stored.recipeID == recipemodel.Recipe.id) ).join( tagmodel.RecipeTags, pw.JOIN.LEFT_OUTER, on=(tagmodel.RecipeTags.recipeID == recipemodel.Recipe.id) ).join( tagmodel.Tag, pw.JOIN.LEFT_OUTER, on=(tagmodel.Tag.id == tagmodel.RecipeTags.tagID) ).group_by( recipemodel.Recipe.id) if complete_data: # Load in User table recipes = recipes.select( User, Changed, recipemodel.Recipe, storedmodel.Stored, pw.fn.group_concat(tagmodel.Tag.tagname).alias("taglist") ).switch( recipemodel.Recipe ).join( User, pw.JOIN.LEFT_OUTER, on=(User.id == recipemodel.Recipe.created_by).alias("a") ).switch( recipemodel.Recipe ).join( Changed, pw.JOIN.LEFT_OUTER, on=(Changed.id == recipemodel.Recipe.changed_by).alias("b")) data = recipemodel.get_recipes(recipes, complete_data=complete_data) return utils.success_response(msg="Data loaded", data=data, hits=len(data)) except Exception as e: current_app.logger.error(traceback.format_exc()) return utils.error_response(f"Failed to load data: {e}")
35afc8247912bd6814b5f4e76716f39d9f244e90
6,006
def html_anchor_navigation(base_dir, experiment_dir, modules): """Build header of an experiment with links to all modules used for rendering. :param base_dir: parent folder in which to look for an experiment folders :param experiment_dir: experiment folder :param modules: list of all loaded modules :return: str """ return "\n".join(( """<header class="w3-container w3-dark-grey"> <h5><a href='#'>{folder}</a></h5> </header>""".format(folder=experiment_dir), "\n".join(""" <div style='white-space: nowrap;'> <div class=\"show toggle-cookie padding-right\" data-toggle='toggle-{id}-all' data-class-off='no-show'>&nbsp;</div> <a class='' href='#{module_title}'>{module_title}</a> </div>""".format( folder=experiment_dir, module_title=module.title, id=module.id) for module in modules), "<hr />" ))
1fea16c0aae2f73be713271de5f003e608cee7e9
6,007
from re import M def connect_and_play(player, name, channel, host, port, logfilename=None, out_function=None, print_state=True, use_debugboard=False, use_colour=False, use_unicode=False): """ Connect to and coordinate a game with a server, return a string describing the result. """ # Configure behaviour of this function depending on parameters: out = out_function if out_function else (lambda *_, **__: None) # no-op if print_state: def display_state(players_str, game): out("displaying game info:") out(players_str, depth=1) out(game, depth=1) else: def display_state(players, game): pass # Set up a connection with the server out("connecting to battleground", depth=-1) out("attempting to connect to the server...") server = Server.from_address(host, port) out("connection established!") # Wait for some matching players out("looking for a game", depth=-1) channel_str = f"channel '{channel}'" if channel else "open channel" out(f"submitting game request as '{name}' in {channel_str}...") server.send(M.PLAY, name=name, channel=channel) server.recv(M.OKAY) out("game request submitted.") out(f"waiting for opponents in {channel_str}...") out("(press ^C to stop waiting)") # (wait through some OKAY-OKAY msg exchanges until a GAME message comes--- # the server is asking if we are still here waiting, or have disconnected) gamemsg = server.recv(M.OKAY|M.GAME) while gamemsg['mtype'] is not M.GAME: server.send(M.OKAY) gamemsg = server.recv(M.OKAY|M.GAME) # when we get a game message, it's time to play! out("setting up game", depth=-1, clear=True) out("opponents found!") out("white player:", gamemsg['white']) out("black player:", gamemsg['black']) # Initialise the player out("initialising player", depth=-1) out("waiting for colour assignment...") initmsg = server.recv(M.INIT) out("playing as", initmsg['colour'], depth=1) out("initialising your player class...") player.init(initmsg['colour']) out("ready to play!") server.send(M.OKAY) # Set up a new game and display the initial state and players out("game start", depth=-1) players_str = format_players_str(gamemsg, player.colour) game = Game(logfilename=logfilename, debugboard=use_debugboard, colourboard=use_colour, unicodeboard=use_unicode) display_state(players_str, game) # Now wait for messages from the sever and respond accordingly while True: msg = server.recv(M.TURN|M.UPD8|M.OVER|M.ERRO) if msg['mtype'] is M.TURN: # it's our turn! out("your turn!", depth=-1, clear=True) display_state(players_str, game) # decide on action and submit it to server action = player.action() server.send(M.ACTN, action=action) elif msg['mtype'] is M.UPD8: # someone made a move! colour = msg['colour'] action = msg['action'] # update our local state, out("receiving update", depth=-1, clear=True) game.update(colour, action) display_state(players_str, game) player.update(colour, action) # then notify server we are ready to continue: server.send(M.OKAY) elif msg['mtype'] is M.OVER: # the game ended! return msg['result'] elif msg['mtype'] is M.ERRO: # seems like the server encountered an error, but not # with our connection raise ServerEncounteredError(msg['reason'])
2b8a9fe1e0e2edeb888e57d7ffd9cc59e3bd4e4e
6,008
import typing def _to_int_and_fraction(d: Decimal) -> typing.Tuple[int, str]: """convert absolute decimal value into integer and decimal (<1)""" t = d.as_tuple() stringified = ''.join(map(str, t.digits)) fraction = '' if t.exponent < 0: int_, fraction = stringified[:t.exponent], stringified[t.exponent:] fraction = fraction.rjust(-t.exponent, '0') else: int_ = stringified + t.exponent * '0' return int(int_ or 0), fraction
d1f83df06ae42cdc3e6b7c0582397ee3a79ff99b
6,009
import json def json_to_obj(my_class_instance): """ Получает на вход JSON-представление, выдает на выходе объект класса MyClass. >>> a = MyClass('me', 'my_surname', True) >>> json_dict = get_json(a) >>> b = json_to_obj(json_dict) <__main__.MyClass object at 0x7fd8e9634510> """ some_dict = json.loads(my_class_instance) return MyClass(**some_dict)
1f881e609f1c895173f4c27ebdaf413a336a4b8f
6,010
def all_tags(path) -> {str: str}: """Method to return Exif tags""" file = open(path, "rb") tags = exifread.process_file(file, details=False) return tags
29132ad176ba68d7026ebb78d9fed6170833255e
6,011
def static(request): """ Backport django.core.context_processors.static to Django 1.2. """ return {'STATIC_URL': djangoSettings.STATIC_URL}
cf74daed50e7e15f15fbe6592f36a523e388e11e
6,012
def genBoard(): """ Generates an empty board. >>> genBoard() ["A", "B", "C", "D", "E", "F", "G", "H", "I"] """ # Empty board empty = ["A", "B", "C", "D", "E", "F", "G", "H", "I"] # Return it return empty
c47e766a0c897d3a1c589a560288fb52969c04a3
6,013
def _partition_at_level(dendrogram, level) : """Return the partition of the nodes at the given level A dendrogram is a tree and each level is a partition of the graph nodes. Level 0 is the first partition, which contains the smallest snapshot_affiliations, and the best is len(dendrogram) - 1. The higher the level is, the bigger are the snapshot_affiliations """ partition = dendrogram[0].copy() for index in range(1, level + 1) : for node, community in partition.items() : partition[node] = dendrogram[index][community] return partition
b179127076c386480c31a18a0956eb30d5f4ef2a
6,014
import logging import collections import sys def filter_multi_copy_clusters(idx): """ {cluster_id : {taxonomy : {genomeid : [gene_uuid,...]}}} """ logging.info('Filtering out multi-copy genes...') clust_cnt = collections.defaultdict(dict) to_remove = [] for cluster_id,v in idx.items(): per_genome_copy = {} for tax,vv in v.items(): for genome_id,x in vv.items(): per_genome_copy[genome_id] = len(set(x['gene_ids'])) # any multi-copy? if any([x > 1 for x in per_genome_copy.values()]): to_remove.append(cluster_id) for cluster_id in to_remove: idx.pop(cluster_id, None) # status logging.info(' Number of multi-copy clusters removed: {}'.format(len(to_remove))) logging.info(' Number of single-copy clusters remaining: {}'.format(len(idx.keys()))) if len(idx.keys()) < 1: logging.info('Exiting due to a lack of clusters') sys.exit(0) metadata_summary(idx) return idx
0c4122fb6119f827bd7b61debc12701064ec7d34
6,015
def generate_ab_data(): """ Generate data for a second order reaction A + B -> P d[A]/dt = -k[A][B] d[B]/dt = -k[A][B] d[P]/dt = k[A][B] [P] = ([B]0 - [A]0 h(t)) / (1 - h(t)) where h(t) = ([B]0 / [A]0) e^(kt ([B]0 - [A]0)) Data printed in a .csv file """ times = np.linspace(0, 10, num=100) # s a0 = 0.6 # mol dm^-3 b0 = 0.5 # mol dm^-3 k = 1.7 # mol^-1 dm^3 s^-1 with open('ab.csv', 'w') as data_file: print('Data for A + B -> P where v = k[A][B]', file=data_file) for i, t in enumerate(times): h = (b0 / a0) * np.exp(k * t * (b0 - a0)) p = (b0 - a0 * h) / (1.0 - h) a = a0 - p b = b0 - p # Time, [A], [B], [P] print(f'{t:.6f},{a:.6f},{b:.6f},{p:.6f}', file=data_file) return None
d36521953129b5e002d3a3d2bcf929322c75470c
6,016
import typing def autocomplete(segment: str, line: str, parts: typing.List[str]): """ :param segment: :param line: :param parts: :return: """ if parts[-1].startswith('-'): return autocompletion.match_flags( segment=segment, value=parts[-1], shorts=['f', 'a', 'd'], longs=['force', 'append', 'directory'] ) if len(parts) == 1: return autocompletion.match_path(segment, parts[0]) return []
8d929e96684d8d1c3ad492424821d27c4d1a2e66
6,017
def elast_tri3(coord, params): """Triangular element with 3 nodes Parameters ---------- coord : ndarray Coordinates for the nodes of the element (3, 2). params : tuple Material parameters in the following order: young : float Young modulus (>0). poisson : float Poisson coefficient (-1, 0.5). dens : float, optional Density (>0). Returns ------- stiff_mat : ndarray Local stiffness matrix for the element (6, 6). mass_mat : ndarray Local mass matrix for the element (6, 6). Examples -------- >>> coord = np.array([ ... [0, 0], ... [1, 0], ... [0, 1]]) >>> params = [8/3, 1/3] >>> stiff, mass = uel3ntrian(coord, params) >>> stiff_ex = 1/2 * np.array([ ... [4, 2, -3, -1, -1, -1], ... [2, 4, -1, -1, -1, -3], ... [-3, -1, 3, 0, 0, 1], ... [-1, -1, 0, 1, 1, 0], ... [-1, -1, 0, 1, 1, 0], ... [-1, -3, 1, 0, 0, 3]]) >>> np.allclose(stiff, stiff_ex) True """ stiff_mat = np.zeros([6, 6]) mass_mat = np.zeros([6, 6]) C = fem.umat(params[:2]) if len(params) == 2: dens = 1 else: dens = params[-1] gpts, gwts = gau.gauss_tri(order=2) for cont in range(gpts.shape[0]): r, s = gpts[cont, :] H, B, det = fem.elast_diff_2d(r, s, coord, fem.shape_tri3) factor = det * gwts[cont] stiff_mat += 0.5 * factor * (B.T @ C @ B) mass_mat += 0.5 * dens * factor * (H.T @ H) return stiff_mat, mass_mat
5a0381bb7961b811650cc57af7317737995dd866
6,018
import torch def make_offgrid_patches_xcenter_xincrement(n_increments:int, n_centers:int, min_l:float, patch_dim:float, device): """ for each random point in the image and for each increments, make a square patch return: I x C x P x P x 2 """ patches_xcenter = make_offgrid_patches_xcenter(n_centers, min_l, patch_dim, device) # C x P x P x 2 increments = min_l * torch.arange(0,n_increments,device=patches_xcenter.device) # expand patches for each increments size = (n_increments, *patches_xcenter.shape) patches_xcenter_xincrement = patches_xcenter.unsqueeze(0).expand(size) assert torch.allclose(patches_xcenter_xincrement[0,:,:], patches_xcenter) assert torch.allclose(patches_xcenter_xincrement[1,:,:], patches_xcenter) patches_xcenter_xincrement = patches_xcenter_xincrement + increments[:,None,None,None,None] # some checks assert len(patches_xcenter_xincrement.shape) == 5 assert patches_xcenter_xincrement.shape[-1] == 2 assert patches_xcenter_xincrement.shape[0] == n_increments assert patches_xcenter_xincrement.shape[1] == n_centers assert patches_xcenter_xincrement.shape[2] == patches_xcenter_xincrement.shape[3] == patch_dim*2 return patches_xcenter_xincrement
dc7fe393e6bee691f9c6ae399c52668ef98372c4
6,019
def load_gazes_from_xml(filepath: str) -> pd.DataFrame: """loads data from the gaze XML file output by itrace. Returns the responses as a pandas DataFrame Parameters ---------- filepath : str path to XML Returns ------- pd.DataFrame Gazes contained in the xml file """ root = ET.parse(filepath) return pd.DataFrame(list(map(lambda e: e.attrib, root.findall("./gazes/response"))))
b1fd17eace5ea253ce82617f5e8c9238a78d925a
6,020
def axis_rotation(points, angle, inplace=False, deg=True, axis='z'): """Rotate points angle (in deg) about an axis.""" axis = axis.lower() # Copy original array to if not inplace if not inplace: points = points.copy() # Convert angle to radians if deg: angle *= np.pi / 180 if axis == 'x': y = points[:, 1] * np.cos(angle) - points[:, 2] * np.sin(angle) z = points[:, 1] * np.sin(angle) + points[:, 2] * np.cos(angle) points[:, 1] = y points[:, 2] = z elif axis == 'y': x = points[:, 0] * np.cos(angle) + points[:, 2] * np.sin(angle) z = - points[:, 0] * np.sin(angle) + points[:, 2] * np.cos(angle) points[:, 0] = x points[:, 2] = z elif axis == 'z': x = points[:, 0] * np.cos(angle) - points[:, 1] * np.sin(angle) y = points[:, 0] * np.sin(angle) + points[:, 1] * np.cos(angle) points[:, 0] = x points[:, 1] = y else: raise ValueError('invalid axis. Must be either "x", "y", or "z"') if not inplace: return points
dccb663a9d8d4f6551bde2d6d26868a181c3c0a7
6,021
async def unhandled_exception(request: Request, exc: UnhandledException): """Raises a custom TableKeyError.""" return JSONResponse( status_code=400, content={"message": "Something bad happened" f" Internal Error: {exc.message!r}"}, )
bff466190f5804def1416ee6221dccb3739c7dec
6,022
def register_view(request): """Render HTTML page""" form = CreateUserForm() if request.method == 'POST': form = CreateUserForm(request.POST) if form.is_valid(): form.save() user = form.cleaned_data.get('username') messages.success(request, "Account was created for "+ user) return redirect('loginPage') context = {'form': form} return render(request, 'register.html', {'data': context})
c129a561c31c442ca091cfe38cb2f7a27f94a25d
6,023
def luv2rgb(luv, *, channel_axis=-1): """Luv to RGB color space conversion. Parameters ---------- luv : (..., 3, ...) array_like The image in CIE Luv format. By default, the final dimension denotes channels. Returns ------- out : (..., 3, ...) ndarray The image in RGB format. Same dimensions as input. Raises ------ ValueError If `luv` is not at least 2-D with shape (..., 3, ...). Notes ----- This function uses luv2xyz and xyz2rgb. """ return xyz2rgb(luv2xyz(luv))
bac8e7155f2249135158786d39c1f2d95af22fc8
6,024
def deposit_fetcher(record_uuid, data): """Fetch a deposit identifier. :param record_uuid: Record UUID. :param data: Record content. :returns: A :class:`invenio_pidstore.fetchers.FetchedPID` that contains data['_deposit']['id'] as pid_value. """ return FetchedPID( provider=DepositProvider, pid_type=DepositProvider.pid_type, pid_value=str(data['_deposit']['id']), )
c4505eff50473204c5615991f401a45cc53779a1
6,025
import time def make_filename(): """"This functions creates a unique filename.""" unique_filename = time.strftime("%Y%m%d-%H%M%S") #unique_filename = str(uuid.uuid1()) #unique_filename = str(uuid.uuid1().hex[0:7]) save_name = 'capture_ferhat_{}.png'.format(unique_filename) return(save_name)
bf16b642884381d795148e045de2387d0acaf23d
6,026
import re def compareLists(sentenceList, majorCharacters): """ Compares the list of sentences with the character names and returns sentences that include names. """ characterSentences = defaultdict(list) for sentence in sentenceList: for name in majorCharacters: if re.search(r"\b(?=\w)%s\b(?!\w)" % re.escape(name), sentence, re.IGNORECASE): characterSentences[name].append(sentence) return characterSentences
4b41da794ff936a3769fe67580b989e0de343ee7
6,027
import re def is_live_site(url): """Ensure that the tool is not used on the production Isaac website. Use of this tool or any part of it on Isaac Physics and related websites is a violation of our terms of use: https://isaacphysics.org/terms """ if re.search("http(s)?://isaac(physics|chemistry|maths|biology|science)\.org", url): return True else: return False
407624a049e92740eb82753d941780a446b1facf
6,028
def score_false(e, sel): """Return scores for internal-terminal nodes""" return e*(~sel).sum()
077cd38c6d1186e2d70fd8a93f44249b0cef2885
6,029
import os import tkinter as tk import tkinter.font as TkFont from .BiblioSys import DISPLAYS,GUI_DISP def Select_multi_items(list_item,mode='multiple', fact=2, win_widthmm=80, win_heightmm=100, font_size=16): """interactive selection of items among the list list_item Args: list_item (list): list of items used for the selection Returns: val (list): list of selected items without duplicate """ # Standard library imports # Local imports global val def selected_item(): global val val = [listbox.get(i) for i in listbox.curselection()] if os.name == 'nt': window.destroy() # Getting the ppi of the selected prime display. ppi = DISPLAYS[GUI_DISP]['ppi'] # Setting the window title if mode == 'single': title = 'Single item selection' else: title = 'Multiple items selection' # Creating the gui window window = tk.Tk() # Setting the window geometry parameters font_title = TkFont.Font(family='arial', size=font_size, weight='bold') title_widthmm,_ = _str_size_mm(title, font_title, ppi) win_widthmm = max(title_widthmm*fact,win_widthmm) win_widthpx = str(_mm_to_px(win_widthmm,ppi)) win_heightpx = str(_mm_to_px(win_heightmm,ppi)) #win_heightpx = '500' win_xpx = str(int(DISPLAYS[GUI_DISP]['x']) + 50) win_ypx = str(int(DISPLAYS[GUI_DISP]['y']) + 50) window.geometry(f'{win_widthpx}x{win_heightpx}+{win_xpx}+{win_ypx}') window.attributes("-topmost", True) window.title(title) yscrollbar = tk.Scrollbar(window) yscrollbar.pack(side = tk.RIGHT, fill = tk.Y) selectmode = tk.MULTIPLE if mode == 'single':selectmode = tk.SINGLE listbox = tk.Listbox(window, width=40, height=10, selectmode=selectmode, yscrollcommand = yscrollbar.set) x = list_item for idx,item in enumerate(x): listbox.insert(idx, item) listbox.itemconfig(idx, bg = "white" if idx % 2 == 0 else "white") btn = tk.Button(window, text='OK', command=selected_item) btn.pack(side='bottom') listbox.pack(padx = 10, pady = 10,expand = tk.YES, fill = "both") yscrollbar.config(command = listbox.yview) window.mainloop() return val
ba0eacf5d05e0a51ef5a2415ef94a39d395afbdf
6,030
import logging import numpy def retrieveXS(filePath, evMin=None, evMax=None): """Open an ENDF file and return the scattering XS""" logging.info('Retrieving scattering cross sections from file {}' .format(filePath)) energies = [] crossSections = [] with open(filePath) as fp: line = fp.readline() while line[0] == '#': line = fp.readline() while line != '' and '#END' not in line: ev, xs = [float(xx) for xx in line.split()[:2]] energies.append(ev) crossSections.append(xs) line = fp.readline() logging.info('Done') energies = numpy.array(energies) crossSections = numpy.array(crossSections) bounds = energies.min(), energies.max() if evMin is None: evMin = bounds[0] else: if bounds[0] > evMin: logging.warning('Could not find requested minimum energy ' '{:.4E} eV in cross section file {}. ' 'Using minimum found: {:.4E} eV' .format(evMin, filePath, bounds[0])) evMin = bounds[0] indices = numpy.where(energies >= evMin) energies = energies[indices] crossSections = crossSections[indices] if evMax is None: evMax = bounds[1] else: if bounds[1] < evMax: logging.warning('Could not find requested maximum energy ' '{:.4E} eV in cross section file {}. ' 'Using maximum found: {:.4E} eV' .format(evMax, filePath, bounds[1])) evMax = bounds[1] indices = numpy.where(energies <= evMax) energies = energies[indices] crossSections = crossSections[indices] return energies, crossSections
388986facd75540983870f1f7e0a6f51b6034271
6,031
import string def _parse_java_simple_date_format(fmt): """ Split a SimpleDateFormat into literal strings and format codes with counts. Examples -------- >>> _parse_java_simple_date_format("'Date:' EEEEE, MMM dd, ''yy") ['Date: ', ('E', 5), ', ', ('M', 3), ' ', ('d', 2), ", '", ('y', 2)] """ out = [] quoted = False prev_c = None prev_count = 0 literal_text = '' k = 0 while k < len(fmt): c = fmt[k] k += 1 if not quoted and c == "'" and k < len(fmt) and fmt[k] == "'": # Repeated single quote. if prev_c is not None: out.append((prev_c, prev_count)) prev_c = None prev_count = 0 literal_text += c k += 1 continue if c == "'": if not quoted: if prev_c is not None: out.append((prev_c, prev_count)) prev_c = None prev_count = 0 if literal_text: out.append(literal_text) literal_text = '' quoted = not quoted continue if quoted: literal_text += c continue if c not in string.ascii_letters: if prev_c is not None: out.append((prev_c, prev_count)) prev_c = None prev_count = 0 literal_text += c continue if c not in 'GyMdhHmsSEDFwWakKzZ': raise ValueError(f"unknown format character {c}") if literal_text != '': out.append(literal_text) literal_text = '' if prev_c is not None and c != prev_c: out.append((prev_c, prev_count)) prev_count = 0 prev_c = c prev_count += 1 else: if quoted: raise ValueError("missing closing quote; input ends " f"with '{literal_text}") if literal_text != '': out.append(literal_text) elif prev_c is not None: out.append((prev_c, prev_count)) return out
3fe42e4fc96ee96c665c3c240cb00756c8534c84
6,032
import logging def rekey_by_sample(ht): """Re-key table by sample id to make subsequent ht.filter(ht.S == sample_id) steps 100x faster""" ht = ht.key_by(ht.locus) ht = ht.transmute( ref=ht.alleles[0], alt=ht.alleles[1], het_or_hom_or_hemi=ht.samples.het_or_hom_or_hemi, #GQ=ht.samples.GQ, HL=ht.samples.HL, S=ht.samples.S, ) ht = ht.key_by(ht.S) ht = ht.transmute( chrom=ht.locus.contig.replace("chr", ""), pos=ht.locus.position ) logging.info("Schema after re-key by sample:") ht.describe() return ht
3e879e6268017de31d432706dab9e672e85673aa
6,033
import collections def _sample_prior_fixed_model(formula_like, data=None, a_tau=1.0, b_tau=1.0, nu_sq=1.0, n_iter=2000, generate_prior_predictive=False, random_state=None): """Sample from prior for a fixed model.""" rng = check_random_state(random_state) y, X = patsy.dmatrices(formula_like, data=data) y, X = _check_design_matrices(y, X) outcome_names = y.design_info.column_names coef_names = [rdu.get_default_coefficient_name(n) for n in X.design_info.column_names] n_coefs = len(coef_names) beta, tau_sq, lp = _sample_parameters_conjugate_priors( n_coefs, a_tau=a_tau, b_tau=b_tau, nu_sq=nu_sq, size=n_iter, random_state=rng) chains = collections.OrderedDict({'tau_sq': tau_sq}) for j, t in enumerate(coef_names): chains[t] = beta[:, j] chains['lp__'] = lp outcome_chains = None if generate_prior_predictive: sampled_outcomes, _ = _sample_outcomes( X, beta, tau_sq, random_state=rng) outcome_chains = collections.OrderedDict( {n: sampled_outcomes[..., i] for i, n in enumerate(outcome_names)}) args = {'random_state': random_state, 'n_iter': n_iter} results = {'chains': chains, 'args': args, 'acceptance': 1.0, 'accept_stat': np.ones((n_iter,), dtype=float), 'mean_lp__': np.mean(chains['lp__'])} prior_predictive = None if generate_prior_predictive: prior_predictive = { 'chains': outcome_chains, 'args': args, 'acceptance': 1.0, 'accept_stat': np.ones((n_iter,), dtype=float) } return results, prior_predictive
1e57cd3f8812e28a8d178199d3dd9c6a23614dc0
6,034
from typing import Any async def validate_input( hass: core.HomeAssistant, data: dict[str, Any] ) -> dict[str, str]: """Validate the user input allows us to connect. Data has the keys from STEP_USER_DATA_SCHEMA with values provided by the user. """ zeroconf_instance = await zeroconf.async_get_instance(hass) async_client = get_async_client(hass) device = Device(data[CONF_IP_ADDRESS], zeroconf_instance=zeroconf_instance) await device.async_connect(session_instance=async_client) await device.async_disconnect() return { SERIAL_NUMBER: str(device.serial_number), TITLE: device.hostname.split(".")[0], }
5ececb6dfc84e232d413b2ada6c6076a75420b49
6,035
def closeWindow(plotterInstance=None): """Close the current or the input rendering window.""" if not plotterInstance: plotterInstance = settings.plotter_instance if not plotterInstance: return if plotterInstance.interactor: plotterInstance.interactor.ExitCallback() plotterInstance.closeWindow() return plotterInstance
af3df7fa07069413c59f498529f4d21a9b88e9f4
6,036
def _format_stages_summary(stage_results): """ stage_results (list of (tuples of (success:boolean, stage_name:string, status_msg:string))) returns a string of a report, one line per stage. Something like: Stage: <stage x> :: SUCCESS Stage: <stage y> :: FAILED Stage: <stage z> :: SUCCESS """ #find the longest stage name to pad report lines max_name_len = 0 for entry in stage_results: x, stage_name, y = entry name_len = len(stage_name) if name_len > max_name_len: max_name_len = name_len summary = "" for entry in stage_results: x, stage_name, status_msg = entry summary += 'Stage: ' + stage_name.ljust(max_name_len) + ":: " summary += status_msg + '\n' return summary
2f5c757342e98ab258bdeaf7ffdc0c5d6d4668ca
6,037
import json def pack(envelope, pack_info): """Pack envelope into a byte buffer. Parameters ---------- envelope : data structure pack_info : packing information Returns ------- packet : bytes """ ptype = pack_info.ptype packer = packers[ptype] payload = packer.pack(envelope) hdr = dict(packer=packer.kind, ver=packer.version, nbytes=len(payload)) hdr_buf = json.dumps(hdr).encode() packet = hdr_buf + partition + payload return packet
5202e9eef7fc658157798d7f0d64820b1dfa3ac3
6,038
import tempfile def tmpnam_s(): """Implementation of POSIX tmpnam() in scalar context""" ntf = tempfile.NamedTemporaryFile(delete=False) result = ntf.name ntf.close() return result
a8c193a0e1ed6cd386dda9e0c084805cbed5f189
6,039
def timezone_lookup(): """Force a timezone lookup right now""" TZPP = NSBundle.bundleWithPath_("/System/Library/PreferencePanes/" "DateAndTime.prefPane/Contents/" "Resources/TimeZone.prefPane") TimeZonePref = TZPP.classNamed_('TimeZonePref') ATZAdminPrefererences = TZPP.classNamed_('ATZAdminPrefererences') atzap = ATZAdminPrefererences.defaultPreferences() pref = TimeZonePref.alloc().init() atzap.addObserver_forKeyPath_options_context_(pref, "enabled", 0, 0) result = pref._startAutoTimeZoneDaemon_(0x1) # If this is not set to 1 then AutoTimezone still isn't enabled. # This additional preference check makes this script work with 10.12 if pref.isTimeZoneAutomatic() is not 1: return False return True
a78a7f32f02e4f6d33b91bb68c1330d531b0208e
6,040
def rollingCPM(dynNetSN:DynGraphSN,k=3): """ This method is based on Palla et al[1]. It first computes overlapping snapshot_communities in each snapshot based on the clique percolation algorithm, and then match snapshot_communities in successive steps using a method based on the union graph. [1] Palla, G., Barabási, A. L., & Vicsek, T. (2007). Quantifying social group evolution. Nature, 446(7136), 664. :param dynNetSN: a dynamic network (DynGraphSN) :param k: the size of cliques used as snapshot_communities building blocks :return: DynCommunitiesSN """ DynCom = DynCommunitiesSN() old_communities = None old_graph = nx.Graph() graphs=dynNetSN.snapshots() for (date, graph) in graphs.items(): communitiesAtT = list(_get_percolated_cliques(graph, k)) #get the percolated cliques (snapshot_affiliations) as a list of set of nodes for c in communitiesAtT: DynCom.add_community(date, c) if old_communities == None: #if first snapshot old_graph = graph dateOld=date old_communities = communitiesAtT else: if len(communitiesAtT)>0: #if there is at least one community union_graph = nx.compose(old_graph, graph) #create the union graph of the current and the previous communities_union = list(_get_percolated_cliques(union_graph, k)) #get the snapshot_affiliations of the union graph jaccardBeforeAndUnion = _included(old_communities, communities_union) #we only care if the value is above 0 jaccardUnionAndAfter = _included(communitiesAtT,communities_union) #we only care if the value is above 0 for c in jaccardBeforeAndUnion: #for each community in the union graph matched = [] born = [] killed = [] allJaccards = set() for oldC in jaccardBeforeAndUnion[c]: for newC in jaccardUnionAndAfter[c]: allJaccards.add(((oldC,newC),_singleJaccard(oldC,newC))) #compute jaccard between candidates before and after allJaccards = sorted(allJaccards, key=itemgetter(1), reverse=True) sortedMatches = [k[0] for k in allJaccards] oldCToMatch = dict(jaccardBeforeAndUnion[c]) #get all coms before newCToMatch = dict(jaccardUnionAndAfter[c]) #get all new coms while len(sortedMatches)>0: #as long as there are couples of unmatched snapshot_affiliations matchedKeys = sortedMatches[0] #pair of snapshot_affiliations of highest jaccard matched.append(matchedKeys) #this pair will be matched del oldCToMatch[matchedKeys[0]] #delete chosen com from possible to match del newCToMatch[matchedKeys[1]] sortedMatches = [k for k in sortedMatches if len(set(matchedKeys) & set(k))==0] #keep only pairs of unmatched snapshot_affiliations if len(oldCToMatch)>0: killed.append(list(oldCToMatch.keys())[0]) if len(newCToMatch)>0: born.append(list(newCToMatch.keys())[0]) for aMatch in matched: DynCom.events.add_event((dateOld, DynCom._com_ID(dateOld, aMatch[0])), (date, DynCom._com_ID(date, aMatch[1])), dateOld, date, "continue") for kil in killed:#these are actual merge (unmatched snapshot_affiliations are "merged" to new ones) for com in jaccardUnionAndAfter[c]: DynCom.events.add_event((dateOld, DynCom._com_ID(dateOld, kil)), (date, DynCom._com_ID(date, com)), dateOld, date, "merged") for b in born:#these are actual merge (unmatched snapshot_affiliations are "merged" to new ones) for com in jaccardBeforeAndUnion[c]: DynCom.events.add_event((dateOld, DynCom._com_ID(dateOld, com)), (date, DynCom._com_ID(date, b)), dateOld, date, "split") old_graph = graph dateOld=date old_communities = communitiesAtT print(DynCom.snapshots) print(DynCom.events.nodes) DynCom._relabel_coms_from_continue_events() return(DynCom)
b4050544cd8a98346f436c75e5c3eeeb9a64c030
6,041
def penalty_eqn(s_m, Dt): """ Description: Simple function for calculating the penalty for late submission of a project. Args: :in (1): maximum possible score :in (2): difference between the date of deadline and the date of assignment of the project (in hours) :out (1): rounded result of the calculation """ # difference between the date of deadline and the date of assignment delta_p = s_m/10 # main equation of penalty for late submission p_s = abs((Dt/24)*np.exp(0.5)) + delta_p return round(s_m - p_s)
694a2b77c1612d7036c46768ee834043a1af3902
6,042
import re def stop(name=None, id=None): """ Stop (terminate) the VM identified by the given id or name. When both a name and id are provided, the id is ignored. name: Name of the defined VM. id: VM id. CLI Example: .. code-block:: bash salt '*' vmctl.stop name=alpine """ ret = {} cmd = ["vmctl", "stop"] if not (name or id): raise SaltInvocationError('Must provide either "name" or "id"') elif name: cmd.append(name) else: cmd.append(id) result = __salt__["cmd.run_all"](cmd, output_loglevel="trace", python_shell=False) if result["retcode"] == 0: if re.match("^vmctl: sent request to terminate vm.*", result["stderr"]): ret["changes"] = True else: ret["changes"] = False else: raise CommandExecutionError( "Problem encountered running vmctl", info={"errors": [result["stderr"]], "changes": ret}, ) return ret
3dbb2771f7407f3a28a9249551268b9ba23d906e
6,043
def ky_att(xs, b, Mach, k0, Att=-20): """ Returns the spanwise gust wavenumber 'ky_att' with response at 'xs' attenuated by 'Att' decibels Parameters ---------- xs : float Chordwise coordinate of reference point, defined in interval (-b, +b]. b : float Airfoil semi chord. Mach : float Mean flow Mach number. k0 : float Acoustic wavenumber 'k0'. Can be obtained from the temporal frequency 'f' [in Hz] and the speed of sound 'c0' [in m/s] as 'k0 = 2*pi*f/c0'. Att : float, optional Level of attenuation of the surface pressure at point 'xs', in decibels. Defaults to -20 dB. Returns ------- ky_att : float Subcritical gust spanwise wavenumber 'ky_att' such that the aerofoil response at point 'xs' is 'Att' dB reduced. """ beta = np.sqrt(1-Mach**2) # critical gust spanwise wavenumber ky_crit = k0/beta term1 = -(beta**2)*np.log(10**(Att/20))/(k0*(xs + b)) return ky_crit*np.sqrt(term1**2 + 1)
78d62081d0849d035953a694bbb7a0fcf956f76b
6,044
from typing import Optional def has_multiline_items(strings: Optional[Strings]) -> bool: """Check whether one of the items in the list has multiple lines.""" return any(is_multiline(item) for item in strings) if strings else False
75dd6ce7d7152a200ff12c53104ff839a21d28f4
6,045
from typing import Optional from typing import Tuple import inspect def eval_ctx( layer: int = 0, globals_: Optional[DictStrAny] = None, locals_: Optional[DictStrAny] = None ) -> Tuple[DictStrAny, DictStrAny]: """获取一个上下文的全局和局部变量 Args: layer (int, optional): 层数. Defaults to 0. globals_ (Optional[DictStrAny], optional): 全局变量. Defaults to None. locals_ (Optional[DictStrAny], optional): 局部变量. Defaults to None. Returns: Tuple[DictStrAny, DictStrAny]: 全局和局部变量字典. """ frame = inspect.stack()[layer + 1].frame # add the current frame global_dict, local_dict = frame.f_globals, frame.f_locals global_dict.update(globals_ or {}) local_dict.update(locals_ or {}) return global_dict, local_dict
81b782596bcc29f1be4432cc1b95230ac952bf2b
6,046
import os import re def find_ports(device): """ Find the port chain a device is plugged on. This is done by searching sysfs for a device that matches the device bus/address combination. Useful when the underlying usb lib does not return device.port_number for whatever reason. """ bus_id = device.bus dev_id = device.address for dirent in os.listdir(USB_SYS_PREFIX): matches = re.match(USB_PORTS_STR + '$', dirent) if matches: bus_str = readattr(dirent, 'busnum') if bus_str: busnum = float(bus_str) else: busnum = None dev_str = readattr(dirent, 'devnum') if dev_str: devnum = float(dev_str) else: devnum = None if busnum == bus_id and devnum == dev_id: return str(matches.groups()[1])
aec31745b0d28ea58803242faca43258e8a78dd6
6,047
def extract_vcalendar(allriscontainer): """Return a list of committee meetings extracted from html content.""" vcalendar = { 'vevents': findall_events(allriscontainer), } if vcalendar.get('vevents'): base_url = allriscontainer.base_url vcalendar['url'] = find_calendar_url(base_url) vcalendar['uid'] = find_calendar_uid(base_url) vcalendar['borough'] = find_calendar_borough(base_url) vcalendar['committee'] = find_calendar_committee(allriscontainer) vcalendar['name'] = '{}: {}'.format( vcalendar['borough'], vcalendar['committee'] ) return vcalendar
f792ae3d8826d37b2fba874524ec78ac502fb1f0
6,048
def rnn_helper(inp, length, cell_type=None, direction="forward", name=None, reuse=None, *args, **kwargs): """Adds ops for a recurrent neural network layer. This function calls an actual implementation of a recurrent neural network based on `cell_type`. There are three modes depending on the value of `direction`: forward: Adds a forward RNN. backward: Adds a backward RNN. bidirectional: Adds both forward and backward RNNs and creates a bidirectional RNN. Args: inp: A 3-D tensor of shape [`batch_size`, `max_length`, `feature_dim`]. length: A 1-D tensor of shape [`batch_size`] and type int64. Each element represents the length of the corresponding sequence in `inp`. cell_type: Cell type of RNN. Currently can only be "lstm". direction: One of "forward", "backward", "bidirectional". name: Name of the op. *args: Other arguments to the layer. **kwargs: Keyword arugments to the layer. Returns: A 3-D tensor of shape [`batch_size`, `max_length`, `num_nodes`]. """ assert cell_type is not None rnn_func = None if cell_type == "lstm": rnn_func = lstm_layer assert rnn_func is not None assert direction in ["forward", "backward", "bidirectional"] with tf.variable_scope(name, reuse=reuse): if direction in ["forward", "bidirectional"]: forward = rnn_func( inp=inp, length=length, backward=False, name="forward", reuse=reuse, *args, **kwargs) if isinstance(forward, tuple): # lstm_layer returns a tuple (output, memory). We only need the first # element. forward = forward[0] if direction in ["backward", "bidirectional"]: backward = rnn_func( inp=inp, length=length, backward=True, name="backward", reuse=reuse, *args, **kwargs) if isinstance(backward, tuple): # lstm_layer returns a tuple (output, memory). We only need the first # element. backward = backward[0] if direction == "forward": out = forward elif direction == "backward": out = backward else: out = tf.concat(axis=2, values=[forward, backward]) return out
d6d457a10bd921560a76bc54a083271c82b144ec
6,049
def get_data(dataset): """ :return: encodings array of (2048, n) labels list of (n) """ query = "SELECT * FROM embeddings WHERE label IS NOT NULL" cursor, connection = db_actions.connect(dataset) cursor.execute(query) result_list = cursor.fetchall() encodings = np.zeros((2048, len(result_list))) labels = [] for i in range(len(result_list)): encodings[:, i] = result_list[i][0] labels.append(result_list[i][1].encode()) encodings = np.nan_to_num(encodings) labels = [x.decode('utf-8') for x in labels] return encodings.astype('float32'), labels
9f23631c6e263f99bab976e1225adbb448323783
6,050
def read_hdr(name, order='C'): """Read hdr file.""" # get dims from .hdr h = open(name + ".hdr", "r") h.readline() # skip line l = h.readline() h.close() dims = [int(i) for i in l.split()] if order == 'C': dims.reverse() return dims
57daadfdf2342e1e7ef221cc94f2e8f70c504944
6,051
def IsTouchDevice(dev): """Check if a device is a touch device. Args: dev: evdev.InputDevice Returns: True if dev is a touch device. """ keycaps = dev.capabilities().get(evdev.ecodes.EV_KEY, []) return evdev.ecodes.BTN_TOUCH in keycaps
6fd36c4921f3ee4bf37c6ce8bcaf435680fc82d5
6,052
def load_users(): """ Loads users csv :return: """ with open(USERS, "r") as file: # creates dictionary to separate csv values to make it easy to iterate between them # the hash() function is used to identify the values in the csv, as they have their individual hash # keys, and as the csv is immutable it'll be the same throughout users = {} for user in file: user = user.strip().split(",") user_tuple = create_user(*user[:5], int(user[5])) users[hash(user_tuple)] = user_tuple return users
255745d36b5b995dfd9a8c0b13a154a87ab6f25e
6,053
import re import sys def main(argv): """ Push specified revision as a specified bookmark to repo. """ args = parse_arguments(argv) pulled = check_output([args.mercurial_binary, 'pull', '-B', args.bookmark, args.repo]).decode('ascii') print(pulled) if re.match("adding changesets", pulled): print("Unseen changes found on bookmark", args.bookmark, "you should probably rebase first", file=sys.stderr) check_call([args.mercurial_binary, 'bookmark', '-f', '-r', args.rev, args.bookmark]) check_call([args.mercurial_binary, 'push', '-B', args.bookmark, args.repo]) return 0
83bb61ab9a1a1fcd138782b268d78a1f63164131
6,054
def clustering_consistency_check(G): """ Check consistency of a community detection algorithm by running it a number of times. """ Hun = G.to_undirected() Hun = nx.convert_node_labels_to_integers(Hun,label_attribute='skeletonname') WHa = np.zeros((len(Hun.nodes()),len(Hun.nodes()))) for i in range(100): partition = community.best_partition(Hun, randomize=None, resolution=1.0) for com in set(partition.values()) : list_nodes = [nodes for nodes in partition.keys() if partition[nodes] == com] list_nodes = np.array(list_nodes) WHa[np.ix_(list_nodes,list_nodes)] += 1 print('Iteration:', i) return WHa
917bb7a23b651821389edbcc62c81fbe4baf3d08
6,055
def l2_normalize_rows(frame): """ L_2-normalize the rows of this DataFrame, so their lengths in Euclidean distance are all 1. This enables cosine similarities to be computed as dot-products between these rows. Rows of zeroes will be normalized to zeroes, and frames with no rows will be returned as-is. """ if frame.shape[0] == 0: return frame index = frame.index return pd.DataFrame( data=normalize(frame, norm='l2', copy=False, axis=1), index=index )
889c2f4473fdab4661fecdceb778aae1bb62652d
6,056
import socket def canonical_ipv4_address(ip_addr): """Return the IPv4 address in a canonical format""" return socket.inet_ntoa(socket.inet_aton(ip_addr))
edacc70ccc3eef12030c4c597c257775d3ed5fa4
6,057
def _build_dynatree(site, expanded): """Returns a dynatree hash representation of our pages and menu hierarchy.""" subtree = _pages_subtree(site.doc_root, site.default_language, True, 1, expanded) subtree['activate'] = True pages_node = { 'title': 'Pages', 'key': 'system:pages', 'expand': True, 'icon': 'fatcow/folders_explorer.png', 'children': [subtree, ], } language = site.default_language menus = [] for menu in Menu.objects.filter(site=site): items = [] for item in menu.first_level.all(): items.append(_menuitem_subtree(item, language, True, 1, expanded)) menus.append({ 'title': menu.name, 'key': 'menu:%d' % menu.id, 'expand': True, 'icon': 'fatcow/folders.png', 'children':items, }) menus_node = { 'title': 'Menus', 'key': 'system:menus', 'expand': True, 'icon': 'fatcow/folders_explorer.png', 'children': menus, } tags = [] for tag in Tag.objects.filter(site=site): title = tag.display_text(language) if not title: title = '<i>None</i>' tags.append({ 'title': title, 'key':'tag:%d' % tag.id, 'icon': 'fatcow/document_tag.png', 'expand': False, }) tags_node = { 'title':'Tags', 'key':'system:tags', 'expand':False, 'icon': 'fatcow/folders_explorer.png', 'children': tags, } tree = [pages_node, menus_node, tags_node] return tree
38dd222ed5cde6b4d6bff4a632c6150666580b92
6,058
import logging def check_tie_condition(board): """ tie = if no empty cells and no win """ logging.debug('check_tie_condition()') # is the board full and no wins empty_cells = board.count('-') logging.debug(f'Number of empty cells {empty_cells}') tie = (empty_cells == 0) return tie
81325de769d401d1dd11dcf60f490bb76653b6e9
6,059
def aggregator(df, groupbycols): """ Aggregates flowbyactivity or flowbysector df by given groupbycols :param df: Either flowbyactivity or flowbysector :param groupbycols: Either flowbyactivity or flowbysector columns :return: """ # tmp replace null values with empty cells df = replace_NoneType_with_empty_cells(df) # drop columns with flowamount = 0 df = df[df['FlowAmount'] != 0] # list of column headers, that if exist in df, should be aggregated using the weighted avg fxn possible_column_headers = ('Spread', 'Min', 'Max', 'DataReliability', 'TemporalCorrelation', 'GeographicalCorrelation', 'TechnologicalCorrelation', 'DataCollection') # list of column headers that do exist in the df being aggregated column_headers = [e for e in possible_column_headers if e in df.columns.values.tolist()] df_dfg = df.groupby(groupbycols).agg({'FlowAmount': ['sum']}) # run through other columns creating weighted average for e in column_headers: df_dfg[e] = weighted_average(df, e, 'FlowAmount', groupbycols) df_dfg = df_dfg.reset_index() df_dfg.columns = df_dfg.columns.droplevel(level=1) # if datatypes are strings, ensure that Null values remain NoneType df_dfg = replace_strings_with_NoneType(df_dfg) return df_dfg
f8333087efc4a48d70aa6e3d727f73a7d03c8252
6,060
def unpack(X): """ Unpack a comma separated list of values into a flat list """ return flatten([x.split(",") for x in list(X)])
1033fd5bdcd292a130c08a8f9819bf66a38fccac
6,061
def doize(tock=0.0, **opts): """ Decorator that returns Doist compatible decorated generator function. Usage: @doize def f(): pass Parameters: tock is default tock attribute of doized f opts is dictionary of remaining parameters that becomes .opts attribute of doized f """ def decorator(f): # must create copy not wrapper so inspect.isgeneratorfunction works # result of decoration g = helping.copy_func(f) g.tock = tock # default tock attributes g.done = None # default done state g.opts = dict(opts) # default opts attribute return g return decorator
0c4a4220546b8c0cbc980c10de0476c9fc6c7995
6,062
import codecs import yaml import logging def get_env(path): """ Read the environment file from given path. :param path: Path to the environment file. :return: the environment (loaded yaml) """ with codecs.open(path, 'r', 'UTF-8') as env_file: conf_string = env_file.read() env = yaml.load(conf_string) logging.debug('env: %s', env) return env
92118337c73a2d01df27242145687619dc7571a7
6,063
def make_chained_transformation(tran_fns, *args, **kwargs): """Returns a dataset transformation function that applies a list of transformations sequentially. Args: tran_fns (list): A list of dataset transformation. *args: Extra arguments for each of the transformation function. **kwargs: Extra keyword arguments for each of the transformation function. Returns: A transformation function to be used in :tf_main:`tf.data.Dataset.map <data/Dataset#map>`. """ def _chained_fn(data): for tran_fns_i in tran_fns: data = tran_fns_i(data, *args, **kwargs) return data return _chained_fn
5f24e030df74a0617e633ca8f8d4a3954674b001
6,064
def configure_optimizer(learning_rate): """Configures the optimizer used for training. Args: learning_rate: A scalar or `Tensor` learning rate. Returns: An instance of an optimizer. Raises: ValueError: if FLAGS.optimizer is not recognized. """ if FLAGS.optimizer == 'adadelta': optimizer = tf.train.AdadeltaOptimizer( learning_rate, rho=FLAGS.adadelta_rho, epsilon=FLAGS.opt_epsilon) elif FLAGS.optimizer == 'adagrad': optimizer = tf.train.AdagradOptimizer( learning_rate, initial_accumulator_value=FLAGS.adagrad_initial_accumulator_value) elif FLAGS.optimizer == 'adam': optimizer = tf.train.AdamOptimizer( learning_rate, beta1=FLAGS.adam_beta1, beta2=FLAGS.adam_beta2, epsilon=FLAGS.opt_epsilon) elif FLAGS.optimizer == 'ftrl': optimizer = tf.train.FtrlOptimizer( learning_rate, learning_rate_power=FLAGS.ftrl_learning_rate_power, initial_accumulator_value=FLAGS.ftrl_initial_accumulator_value, l1_regularization_strength=FLAGS.ftrl_l1, l2_regularization_strength=FLAGS.ftrl_l2) elif FLAGS.optimizer == 'momentum': optimizer = tf.train.MomentumOptimizer( learning_rate, momentum=FLAGS.momentum, name='Momentum') elif FLAGS.optimizer == 'rmsprop': optimizer = tf.train.RMSPropOptimizer( learning_rate, decay=FLAGS.rmsprop_decay, momentum=FLAGS.rmsprop_momentum, epsilon=FLAGS.opt_epsilon) elif FLAGS.optimizer == 'sgd': optimizer = tf.train.GradientDescentOptimizer(learning_rate) elif FLAGS.optimizer == "adamweightdecay": optimizer = AdamWeightDecayOptimizer( learning_rate=learning_rate, weight_decay_rate=0.01, beta_1=FLAGS.adam_beta1, beta_2=FLAGS.adam_beta2, epsilon=FLAGS.opt_epsilon, exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"]) else: raise ValueError('Optimizer [%s] was not recognized' % FLAGS.optimizer) return optimizer
bf7dd03c4133675d58428a054cc16e7be41e88b4
6,065
import functools def train_and_evaluate(config, workdir): """Runs a training and evaluation loop. Args: config: Configuration to use. workdir: Working directory for checkpoints and TF summaries. If this contains checkpoint training will be resumed from the latest checkpoint. Returns: Training state. """ rng = jax.random.PRNGKey(config.seed) rng, data_rng = jax.random.split(rng) # Make sure config defines num_epochs and num_train_steps appropriately. utils.check_epochs_and_steps(config) # Check that perturbed-topk is selection method. assert config.selection_method == "perturbed-topk", ( "ntsnet only supports perturbed-topk as selection method. Got: {}".format( config.selection_method)) train_preprocessing_fn, eval_preprocessing_fn = data.parse_preprocessing_strings( config.get("train_preprocess_str", ""), config.get("eval_preprocess_str", "")) assert config.batch_size % jax.local_device_count() == 0, ( f"Batch size ({config.batch_size}) should be divisible by number of " f"devices ({jax.local_device_count()}).") per_device_batch_size = config.batch_size // jax.local_device_count() train_ds, eval_ds, num_classes = data.get_dataset( config.dataset, per_device_batch_size, data_rng, train_preprocessing_fn=train_preprocessing_fn, eval_preprocessing_fn=eval_preprocessing_fn, **config.get("data", {})) module = AttentionNet.partial(config=config, num_classes=num_classes) optimizer = create_optimizer(config) loss_fn = functools.partial(ntsnet_loss, config=config) train_metrics_dict = { "train_loss": loss_fn, "train_loss_raw": cross_entropy_raw_logits, "train_loss_concat": cross_entropy_concat_logits, "train_loss_part": cross_entropy_part_logits, "train_accuracy": accuracy, "train_rpn_scores_entropy": rpn_scores_entropy, } eval_metrics_dict = { "eval_loss": loss_fn, "eval_loss_raw": cross_entropy_raw_logits, "eval_loss_concat": cross_entropy_concat_logits, "eval_loss_part": cross_entropy_part_logits, "eval_accuracy": accuracy, "eval_rpn_scores_entropy": rpn_scores_entropy, } # Enables relevant statistics aggregator. stats_aggregators = [] def add_image_prefix(image_aggregator): def aggregator(stats): d = image_aggregator(stats) return {f"image_{k}": v for k, v in d.items()} return aggregator if config.get("log_images", True): @add_image_prefix def plot_patches(stats): d = { "part_imgs": (stats["part_imgs"] + 1.0) / 2.0, "x": (stats["x"] + 1.0) / 2.0 } for i, sc in enumerate(stats["scores"]): d[f"scores_{i}"] = sc return d stats_aggregators.append(plot_patches) stats_aggregators.append(lambda x: {"sigma": x["sigma"]}) state = classification_lib.training_loop( module=module, rng=rng, train_ds=train_ds, eval_ds=eval_ds, loss_fn=loss_fn, optimizer=optimizer, train_metrics_dict=train_metrics_dict, eval_metrics_dict=eval_metrics_dict, stats_aggregators=stats_aggregators, config=config, workdir=workdir) return state
87f1dba561563acc0033663a30f105fe4056d235
6,066
def increment(i,k): """ this is a helper function for a summation of the type :math:`\sum_{0 \leq k \leq i}`, where i and k are multi-indices. Parameters ---------- i: numpy.ndarray integer array, i.size = N k: numpy.ndarray integer array, k.size = N Returns ------- changes k on return Example ------- k = [1,0,1] i = [2,0,2] increment(i, k) # changes k to [1,0,2] increment(i, k) # changes k to [2,0,0] increment(i, k) # changes k to [2,0,1] """ carryover = 1 if len(k) != len(i): raise ValueError('size of i and k do not match up') for n in range(len(k))[::-1]: if i[n] == 0: continue tmp = k[n] + carryover # print 'tmp=',tmp carryover = tmp // (i[n]+1) # print 'carryover=',carryover k[n] = tmp % (i[n]+1) if carryover == 0: break return k
1ac8ef592376fbfa0d04cdd4b1c6b29ad3ed9fbd
6,067
def sample_lopt(key: chex.PRNGKey) -> cfgobject.CFGObject: """Sample a small lopt model.""" lf = cfgobject.LogFeature rng = hk.PRNGSequence(key) task_family_cfg = para_image_mlp.sample_image_mlp(next(rng)) lopt_name = parametric_utils.choice( next(rng), [ "LearnableAdam", "LearnableSGDM", "LearnableSGD", "MLPLOpt", "AdafacMLPLOpt" ]) kwargs = {} if lopt_name in ["MLPLOpt", "AdafacMLPLOpt"]: kwargs["hidden_size"] = lf(parametric_utils.log_int(next(rng), 2, 512)) kwargs["hidden_layers"] = parametric_utils.log_int(next(rng), 1, 4) kwargs["exp_mult"] = lf(parametric_utils.log_float(next(rng), 1e-5, 1)) kwargs["step_mult"] = lf(parametric_utils.log_float(next(rng), 1e-5, 1)) lopt_cfg = cfgobject.CFGObject(lopt_name, kwargs) num_steps = lf(parametric_utils.log_int(next(rng), 1, 100)) outer_bs = lf(parametric_utils.log_int(next(rng), 1, 8)) return cfgobject.CFGObject( "ParametricLOpt", { "lopt": lopt_cfg, "task_family": task_family_cfg, "num_steps": num_steps, "outer_batch_size": outer_bs, })
b52a7640532ed8ce7760474edbd9832d93e7bdc3
6,068
import numpy import time def gen_df_groupby_usecase(method_name, groupby_params=None, method_params=''): """Generate df groupby method use case""" groupby_params = {} if groupby_params is None else groupby_params groupby_params = get_groupby_params(**groupby_params) func_text = groupby_usecase_tmpl.format(**{ 'method_name': method_name, 'groupby_params': groupby_params, 'method_params': method_params }) global_vars = {'np': numpy, 'time': time} loc_vars = {} exec(func_text, global_vars, loc_vars) _df_groupby_usecase = loc_vars[f'df_groupby_{method_name}_usecase'] return _df_groupby_usecase
3a4f5745744299db354c17198d3175ad8b7ce4e4
6,069
import os def hydra_breakpoints(in_bam, pair_stats): """Detect structural variation breakpoints with hydra. """ in_bed = convert_bam_to_bed(in_bam) if os.path.getsize(in_bed) > 0: pair_bed = pair_discordants(in_bed, pair_stats) dedup_bed = dedup_discordants(pair_bed) return run_hydra(dedup_bed, pair_stats) else: return None
8df6dc1e4b8649cf9059c9871955fc7e24ff01b6
6,070
import csv def merge_csvfiles(options): """ Think of this as a 'join' across options.mergefiles on equal values of the column options.timestamp. This function takes each file in options.mergefiles, reads them, and combines their columns in options.output. The only common column should be options.timestamp. The results are undefined if the mergefiles share other column names. Args: options.mergefiles - list of csv filenames options.output - filename of merged csv file from this operation Returns: bool - True if success Raises: AssertionError - if merging encounters an error. """ records = {} all_header_names = [] records_list = [] # collect all header fields from mergefiles for filename in options.mergefiles: records = read_csvfile(filename, True) records_list.append(records) all_header_names += records.fieldnames all_header_names = sorted(set(all_header_names)) # eliminate duplicate $header output_fd = open(options.output,'w') writer = csv.DictWriter(output_fd, all_header_names) writer.writeheader() try: # read all values until StopIteration is reached. while True: merge_list = [ records.next() for records in records_list ] merge_dict = merge_rows(merge_list, options) writer.writerow(merge_dict) except StopIteration: pass output_fd.close() return True
171b448c2b49584ce5a601f7d8789d7198fdf935
6,071
import html def row_component(cards): """ Creates a horizontal row used to contain cards. The card and row_component work together to create a layout that stretches and shrinks when the user changes the size of the window, or accesses the dashboard from a mobile device. See https://developer.mozilla.org/en-US/docs/Learn/CSS/CSS_layout for more information. """ return html.Div( cards, className="govuk-list card-container", style={"alignItems": "stretch"} )
baa9f86bcac786a94802d003b1abcc75686e08d8
6,072
def recCopyElement(oldelement): """Generates a copy of an xml element and recursively of all child elements. :param oldelement: an instance of lxml.etree._Element :returns: a copy of the "oldelement" .. warning:: doesn't copy ``.text`` or ``.tail`` of xml elements """ newelement = ETREE.Element(oldelement.tag, oldelement.attrib) if len(oldelement.getchildren()) > 0: for childelement in oldelement.getchildren(): newelement.append(recCopyElement(childelement)) return newelement
981f0c5ccdeacc1d82ebbde2de6f51298e82fa14
6,073
def NameExpansionIterator(command_name, debug, logger, gsutil_api, url_strs, recursion_requested, all_versions=False, cmd_supports_recursion=True, project_id=None, ignore_symlinks=False, continue_on_error=False, bucket_listing_fields=None): """Static factory function for instantiating _NameExpansionIterator. This wraps the resulting iterator in a PluralityCheckableIterator and checks that it is non-empty. Also, allows url_strs to be either an array or an iterator. Args: command_name: name of command being run. debug: Debug level to pass to underlying iterators (range 0..3). logger: logging.Logger object. gsutil_api: Cloud storage interface. Settable for testing/mocking. url_strs: Iterable URL strings needing expansion. recursion_requested: True if -r specified on command-line. If so, listings will be flattened so mapped-to results contain objects spanning subdirectories. all_versions: Bool indicating whether to iterate over all object versions. cmd_supports_recursion: Bool indicating whether this command supports a '-r' flag. Useful for printing helpful error messages. project_id: Project id to use for the current command. ignore_symlinks: If True, ignore symlinks during iteration. continue_on_error: If true, yield no-match exceptions encountered during iteration instead of raising them. bucket_listing_fields: Iterable fields to include in expanded results. Ex. ['name', 'acl']. Underyling iterator is responsible for converting these to list-style format ['items/name', 'items/acl']. If this is None, only the object name is included in the result. Raises: CommandException if underlying iterator is empty. Returns: Name expansion iterator instance. For example semantics, see comments in NameExpansionIterator.__init__. """ url_strs = PluralityCheckableIterator(url_strs) name_expansion_iterator = _NameExpansionIterator( command_name, debug, logger, gsutil_api, url_strs, recursion_requested, all_versions=all_versions, cmd_supports_recursion=cmd_supports_recursion, project_id=project_id, ignore_symlinks=ignore_symlinks, continue_on_error=continue_on_error, bucket_listing_fields=bucket_listing_fields) name_expansion_iterator = PluralityCheckableIterator(name_expansion_iterator) if name_expansion_iterator.IsEmpty(): raise CommandException(NO_URLS_MATCHED_GENERIC) return name_expansion_iterator
d84575c5e26e489853f3ead760af60cc15c7a84c
6,074
import hashlib def KETAMA(key): """ MD5-based hashing algorithm used in consistent hashing scheme to compensate for servers added/removed from memcached pool. """ d = hashlib.md5(key).digest() c = _signed_int32 h = c((ord(d[3])&0xff) << 24) | c((ord(d[2]) & 0xff) << 16) | \ c((ord(d[1]) & 0xff) << 8) | c(ord(d[0]) & 0xff) return h
6baec2ea79a166389625b19c56cbcd3734e819b7
6,075
import calendar def add_months(dt, months): """ 月加减 """ month = dt.month - 1 + months year = dt.year + month / 12 month = month % 12 + 1 day = min(dt.day, calendar.monthrange(year, month)[1]) return dt.replace(year=year, month=month, day=day)
5770c1b61e53fc692f3b13efef203d2f5d544b80
6,076
def _decomposer_interp(fp, x=None, xp=None): """Do the actual interpolation for multiprocessing""" return np.interp(x, xp, fp)
eef6debf668c62f4d817a0b3697019d0bd4007c9
6,077
import tensorflow as tf from nn4omtf import utils import numpy as np def create_nn(x, x_shape, is_training): """ Args: x: input hits array x_shape: input tensor shape for single event is_training: placeholder for indicating train or valid/test phase Note: Only code in `create_nn` function scope will be exctracted and saved in model directory. It's important to provide all necessary imports within. """ arr = [0, 5, 10, 15, 20, 25, 30] out_sz = 2 * len(arr) + 1 in_sz = np.prod(x_shape) hidden_layers = [128, 64, 64] x = tf.reshape(x, [-1, in_sz]) for sz in hidden_layers: # Pass is_training to setup batch normalization on these layers x = utils.mk_fc_layer(x, sz, act_fn=tf.nn.relu, is_training=is_training) logits = utils.mk_fc_layer(x, out_sz, is_training=is_training) return logits, arr
8c7a4ce128e434e964b951ca6fe65722c9936be9
6,078
import os from unittest.mock import call def create_new_case(case_dir): """Creates new case directory""" # Check that the specified case directory does not already exist if os.path.exists(case_dir): call(["rm", "-r", "snappy"]) #raise RuntimeError( # 'Refusing to write to existing path: {}'.format(case_dir) #) # Create the case return Case(case_dir)
b9aba60caa0862e09037b16712f35ff5ca993143
6,079
def generate_outlier_bounds_iqr(df, column, multiplier=1.5): """ Takes in a dataframe, the column name, and can specify a multiplier (default=1.5). Returns the upper and lower bounds for the values in that column that signify outliers. """ q1 = df[column].quantile(.25) q3 = df[column].quantile(.75) iqr = q3 - q1 upper = q3 + (multiplier * iqr) lower = q1 - (multiplier * iqr) return upper, lower
7f096d5f5cf2417cbc161713715a39560efd140a
6,080
import random def generate_data(Type): """ 随机生成CAN帧中所包含的数据 :param Type: 需要生成数据的类型 :return: 生成的随机数据序列,长度为8,如['88', '77', '55', '44', '22', '11', '33'', '44'] """ data = [] if Type == 1: # 生成反馈帧单体电池Cell1-24电压信息 standard_vol = 35 offset = random.randint(0, 15) max_vol = standard_vol + offset min_vol = standard_vol - offset // 2 data.append('44') data.append(str(max_vol)) data.append('44') data.append(str(min_vol)) offset = random.randint(0, 15) max_vol = standard_vol + offset min_vol = standard_vol - offset // 2 data.append('44') data.append(str(max_vol)) data.append('44') data.append(str(min_vol)) elif Type == 2: # 生成反馈帧单体电池Cell1-8温度信息 stanard_temp = 45 offest = random.randint(0, 20) max_temp = stanard_temp + offest min_temp = stanard_temp - offest - 5 data.append(str(max_temp)) data.append('6c') data.append(str(min_temp)) data.append('6c') offest = random.randint(0, 20) max_temp = stanard_temp + offest min_temp = stanard_temp - offest - 5 data.append(str(max_temp)) data.append('6c') data.append(str(min_temp)) data.append('6c') elif Type == 3: # 生成反馈帧单体电池最高最低电压温度信息 standard_vol = 35 standard_temp = 45 vol_offset = random.randint(0, 15) temp_offset = random.randint(0, 20) max_temp = standard_temp + temp_offset min_temp = standard_temp - temp_offset - 5 max_vol = standard_vol + vol_offset min_vol = standard_vol - vol_offset // 2 data.append('44') data.append(str(max_vol)) data.append('44') data.append((str(min_vol))) data.append(str(max_temp)) data.append('5c') data.append(str(min_temp)) data.append('5c') elif Type == 4: # 生成常发帧系统电压信息 standard_vol = 55 offset = random.randint(0, 10) max_vol = standard_vol * offset * 10 min_vol = standard_vol - offset - 5 data.append('c5') data.append(str(max_vol)) data.append('f2') data.append(str(min_vol)) data.append('ed') for i in range(3): data.append(str(standard_vol + 5 * i)) elif Type == 5: pass else: pass return data
3a920be4b7ef5c5c3e258b3e3c79bc028004179a
6,081
def counting_sort(array): """ SORTING FUNCTION USING COUNTING SORT ALGORITHM ARG array = LIST(ARRAY) OF NUMBERS """ ## counter lists has elements for every maximum = max(array) counter = [0]*(maximum+1) for i in range(len(array)): counter[array[i]] += 1 for i in range(1, maximum + 1): counter[i] = counter[i] + counter[i-1] #print_array(counter) result = [0]*len(array) for i in range(len(array)): result[counter[array[i]] -1] = array[i] counter[array[i]] -= 1 return result
986e2f9277fa71dcd9897ac409653009c651c49f
6,082
import math from PIL import ImageColor def indexedcolor(i, num, npersat=15, lightness=60): """Returns an rgb color triplet for a given index, with a finite max 'num'. Thus if you need 10 colors and want to get color #5, you would call this with (5, 10). The colors are "repeatable". """ nsats = int(math.ceil(num/float(npersat))) sat = 100 - int((i//npersat)*(100/nsats)) l = lightness nhues = int(math.ceil(num/float(nsats))) hue = (i % nhues) * (360//nhues) #print >>sys.stderr, 'For i %d, num %d, got %d sats, %d hues -> %d, %d, %d' % (i, num, nsats, nhues, hue, sat, l) return ImageColor.getrgb('hsl(%d,%d%%,%d%%)' % (hue, sat, l))
418a875bc8ae50ce21f9667f46718863ba0f55e3
6,083
def make_customer_satisfaction(branch_index='A'): """Create average customer satisfaction heat map""" customer_satisfaction = make_heat_map(branch_index, 'mean(Rating)', 'Average Satisfaction') return customer_satisfaction
b891b74a8942da7c212ba7112ffb865deb52aec2
6,084
def extract_infos(fpath): """Extract information about file""" try: pe = pefile.PE(fpath) except pefile.PEFormatError: return {} res = {} res['Machine'] = pe.FILE_HEADER.Machine res['SizeOfOptionalHeader'] = pe.FILE_HEADER.SizeOfOptionalHeader res['Characteristics'] = pe.FILE_HEADER.Characteristics res['MajorLinkerVersion'] = pe.OPTIONAL_HEADER.MajorLinkerVersion res['MinorLinkerVersion'] = pe.OPTIONAL_HEADER.MinorLinkerVersion res['SizeOfCode'] = pe.OPTIONAL_HEADER.SizeOfCode res['SizeOfInitializedData'] = pe.OPTIONAL_HEADER.SizeOfInitializedData res['SizeOfUninitializedData'] = pe.OPTIONAL_HEADER.SizeOfUninitializedData res['AddressOfEntryPoint'] = pe.OPTIONAL_HEADER.AddressOfEntryPoint res['BaseOfCode'] = pe.OPTIONAL_HEADER.BaseOfCode try: res['BaseOfData'] = pe.OPTIONAL_HEADER.BaseOfData except AttributeError: res['BaseOfData'] = 0 res['ImageBase'] = pe.OPTIONAL_HEADER.ImageBase res['SectionAlignment'] = pe.OPTIONAL_HEADER.SectionAlignment res['FileAlignment'] = pe.OPTIONAL_HEADER.FileAlignment res['MajorOperatingSystemVersion'] = pe.OPTIONAL_HEADER.MajorOperatingSystemVersion res['MinorOperatingSystemVersion'] = pe.OPTIONAL_HEADER.MinorOperatingSystemVersion res['MajorImageVersion'] = pe.OPTIONAL_HEADER.MajorImageVersion res['MinorImageVersion'] = pe.OPTIONAL_HEADER.MinorImageVersion res['MajorSubsystemVersion'] = pe.OPTIONAL_HEADER.MajorSubsystemVersion res['MinorSubsystemVersion'] = pe.OPTIONAL_HEADER.MinorSubsystemVersion res['SizeOfImage'] = pe.OPTIONAL_HEADER.SizeOfImage res['SizeOfHeaders'] = pe.OPTIONAL_HEADER.SizeOfHeaders res['CheckSum'] = pe.OPTIONAL_HEADER.CheckSum res['Subsystem'] = pe.OPTIONAL_HEADER.Subsystem res['DllCharacteristics'] = pe.OPTIONAL_HEADER.DllCharacteristics res['SizeOfStackReserve'] = pe.OPTIONAL_HEADER.SizeOfStackReserve res['SizeOfStackCommit'] = pe.OPTIONAL_HEADER.SizeOfStackCommit res['SizeOfHeapReserve'] = pe.OPTIONAL_HEADER.SizeOfHeapReserve res['SizeOfHeapCommit'] = pe.OPTIONAL_HEADER.SizeOfHeapCommit res['LoaderFlags'] = pe.OPTIONAL_HEADER.LoaderFlags res['NumberOfRvaAndSizes'] = pe.OPTIONAL_HEADER.NumberOfRvaAndSizes # Sections res['SectionsNb'] = len(pe.sections) entropy = list(map(lambda x: x.get_entropy(), pe.sections)) res['SectionsMeanEntropy'] = sum(entropy) / float(len(entropy)) res['SectionsMinEntropy'] = min(entropy) res['SectionsMaxEntropy'] = max(entropy) raw_sizes = list(map(lambda x: x.SizeOfRawData, pe.sections)) res['SectionsMeanRawsize'] = sum(raw_sizes) / float(len(raw_sizes)) res['SectionsMinRawsize'] = min(raw_sizes) res['SectionsMaxRawsize'] = max(raw_sizes) virtual_sizes = list(map(lambda x: x.Misc_VirtualSize, pe.sections)) res['SectionsMeanVirtualsize'] = sum( virtual_sizes) / float(len(virtual_sizes)) res['SectionsMinVirtualsize'] = min(virtual_sizes) res['SectionMaxVirtualsize'] = max(virtual_sizes) # Imports try: res['ImportsNbDLL'] = len(pe.DIRECTORY_ENTRY_IMPORT) imports = sum([x.imports for x in pe.DIRECTORY_ENTRY_IMPORT], []) res['ImportsNb'] = len(imports) res['ImportsNbOrdinal'] = len( list(filter(lambda x: x.name is None, imports))) except AttributeError: res['ImportsNbDLL'] = 0 res['ImportsNb'] = 0 res['ImportsNbOrdinal'] = 0 # Exports try: res['ExportNb'] = len(pe.DIRECTORY_ENTRY_EXPORT.symbols) except AttributeError: # No export res['ExportNb'] = 0 # Resources resources = get_resources(pe) res['ResourcesNb'] = len(resources) if len(resources) > 0: entropy = list(map(lambda x: x[0], resources)) res['ResourcesMeanEntropy'] = sum(entropy) / float(len(entropy)) res['ResourcesMinEntropy'] = min(entropy) res['ResourcesMaxEntropy'] = max(entropy) sizes = list(map(lambda x: x[1], resources)) res['ResourcesMeanSize'] = sum(sizes) / float(len(sizes)) res['ResourcesMinSize'] = min(sizes) res['ResourcesMaxSize'] = max(sizes) else: res['ResourcesNb'] = 0 res['ResourcesMeanEntropy'] = 0 res['ResourcesMinEntropy'] = 0 res['ResourcesMaxEntropy'] = 0 res['ResourcesMeanSize'] = 0 res['ResourcesMinSize'] = 0 res['ResourcesMaxSize'] = 0 # Load configuration size try: res['LoadConfigurationSize'] = pe.DIRECTORY_ENTRY_LOAD_CONFIG.struct.Size except AttributeError: res['LoadConfigurationSize'] = 0 # Version configuration size try: version_infos = get_version_info(pe) res['VersionInformationSize'] = len(version_infos.keys()) except AttributeError: res['VersionInformationSize'] = 0 return res
f7f3cbef72f7b9d05c25e2aabde33c7a814d05bd
6,085
def calibrate_eye_in_hand(calibration_inputs): """Perform eye-in-hand calibration. Args: calibration_inputs: List of HandEyeInput Returns: A HandEyeOutput instance containing the eye-in-hand transform """ return HandEyeOutput( _zivid.calibration.calibrate_eye_in_hand( [ calibration_input._HandEyeInput__impl # pylint: disable=protected-access for calibration_input in calibration_inputs ] ) )
d8bc7b8cfe821809c441d3151297edf7f8267803
6,086
from typing import Optional def get_intersect(A: np.ndarray, B: np.ndarray, C: np.ndarray, D: np.ndarray) -> Optional[np.ndarray]: """ Get the intersection of [A, B] and [C, D]. Return False if segment don't cross. :param A: Point of the first segment :param B: Point of the first segment :param C: Point of the second segment :param D: Point of the second segment :return: The intersection if any, otherwise None. """ det = (B[0] - A[0]) * (C[1] - D[1]) - (C[0] - D[0]) * (B[1] - A[1]) if det == 0: # Parallel return None else: t1 = ((C[0] - A[0]) * (C[1] - D[1]) - (C[0] - D[0]) * (C[1] - A[1])) / det t2 = ((B[0] - A[0]) * (C[1] - A[1]) - (C[0] - A[0]) * (B[1] - A[1])) / det if t1 > 1 or t1 < 0 or t2 > 1 or t2 < 0: # not intersect return None else: xi = A[0] + t1 * (B[0] - A[0]) yi = A[1] + t1 * (B[1] - A[1]) return np.array([xi, yi])
1c3fab6d189f218e9f5f7e6648a46a9e53683366
6,087
from typing import Callable def _make_vector_laplace_scipy_nd(bcs: Boundaries) -> Callable: """ make a vector Laplacian using the scipy module This only supports uniform discretizations. Args: bcs (:class:`~pde.grids.boundaries.axes.Boundaries`): |Arg_boundary_conditions| Returns: A function that can be applied to an array of values """ scaling = bcs._uniform_discretization**-2 args = bcs._scipy_border_mode dim = bcs.grid.dim shape_out = (dim,) + bcs.grid.shape def vector_laplace(arr, out=None): """ apply vector Laplacian operator to array `arr` """ if out is None: out = np.empty(shape_out) for i in range(dim): ndimage.laplace(arr[i], output=out[i], **args) return out * scaling return vector_laplace
3cda36d53755c84fcb47259ade64752610aeffbe
6,088
def dot_to_dict(values): """Convert dot notation to a dict. For example: ["token.pos", "token._.xyz"] become {"token": {"pos": True, "_": {"xyz": True }}}. values (iterable): The values to convert. RETURNS (dict): The converted values. """ result = {} for value in values: path = result parts = value.lower().split(".") for i, item in enumerate(parts): is_last = i == len(parts) - 1 path = path.setdefault(item, True if is_last else {}) return result
a2c56a01b179d27eabc728d6ff2ec979885d5feb
6,089
def _draw_edges(G, pos, nodes, ax): """Draw the edges of a (small) networkx graph. Params: G (nx.classes.*) a networkx graph. pos (dict) returned by nx.layout methods. nodes (dict) of Circle patches. ax (AxesSubplot) mpl axe. Return: (dict) of Circle patches. """ pointer = ArrowStyle.Fancy(head_width=10, head_length=15) curved_edge = ConnectionStyle('arc3', rad=.2) arrow_kwargs = {'arrowstyle': pointer, 'antialiased': True, 'connectionstyle': curved_edge, 'edgecolor': None, 'facecolor': None, 'linewidth': None} edges = {} for i, (a, b, attr) in enumerate(G.edges.data()): arrow_kwargs['edgecolor'] = attr['color'] arrow_kwargs['facecolor'] = attr['color'] arrow_kwargs['linewidth'] = 1.0 edge = FancyArrowPatch(pos[a], pos[b], patchA=nodes[a], patchB=nodes[b], shrinkA=5, shrinkB=5, **arrow_kwargs) ax.add_patch(edge) edges[(a, b)] = edge return edges
28a207a190a7066656518de7c8e8626b2f534146
6,090
def benjamini_hochberg_stepup(p_vals): """ Given a list of p-values, apply FDR correction and return the q values. """ # sort the p_values, but keep the index listed index = [i[0] for i in sorted(enumerate(p_vals), key=lambda x:x[1])] # keep the p_values sorted p_vals = sorted(p_vals) q_vals = [None]*len(p_vals) # initialize an empty list prev_q = 0 # BH Step Up begins here. for i, p in enumerate(p_vals): q = len(p_vals)/(i+1)*p # calculate the q_value for the current point q = min(q, 1) # if q >1, make it == 1 q = max(q, prev_q) # preserve monotonicity q_vals[i] = q # store the q_value prev_q = q # update the previous q_value # prevent the lowest q value from going to zero if np.sum(q_vals == 0) > 0: # set the min q-value to 10x less than the smallest non-zero value q_vals[np.where(q_vals == 0)] = np.min(q_vals[np.where(q_vals != 0)])/10 # return q_vals and the index so we can match up each q-value to its index return q_vals, index
7cff2e8d28cda37c4271935ef2e6fb48441137c3
6,091
def remove_transcription_site(rna, foci, nuc_mask, ndim): """Distinguish RNA molecules detected in a transcription site from the rest. A transcription site is defined as as a foci detected within the nucleus. Parameters ---------- rna : np.ndarray, np.int64 Coordinates of the detected RNAs with shape (nb_spots, 4) or (nb_spots, 3). One coordinate per dimension (zyx or yx coordinates) plus the index of the foci assigned to the RNA. If no foci was assigned, value is -1. foci : np.ndarray, np.int64 Array with shape (nb_foci, 5) or (nb_foci, 4). One coordinate per dimension for the foci centroid (zyx or yx coordinates), the number of RNAs detected in the foci and its index. nuc_mask : np.ndarray, bool Binary mask of the nuclei region with shape (y, x). ndim : int Number of spatial dimensions to consider (2 or 3). Returns ------- rna_out_ts : np.ndarray, np.int64 Coordinates of the detected RNAs with shape (nb_spots, 4) or (nb_spots, 3). One coordinate per dimension (zyx or yx coordinates) plus the index of the foci assigned to the RNA. If no foci was assigned, value is -1. RNAs from transcription sites are removed. foci : np.ndarray, np.int64 Array with shape (nb_foci, 5) or (nb_foci, 4). One coordinate per dimension for the foci centroid (zyx or yx coordinates), the number of RNAs detected in the foci and its index. ts : np.ndarray, np.int64 Array with shape (nb_ts, 5) or (nb_ts, 4). One coordinate per dimension for the transcription site centroid (zyx or yx coordinates), the number of RNAs detected in the transcription site and its index. """ # check parameters check_array(rna, ndim=2, dtype=np.int64) # discriminate foci from transcription sites ts, foci = identify_objects_in_region( nuc_mask, foci, ndim) # filter out rna from transcription sites rna_in_ts = ts[:, ndim + 1] mask_rna_in_ts = np.isin(rna[:, ndim], rna_in_ts) rna_out_ts = rna[~mask_rna_in_ts] return rna_out_ts, foci, ts
3f6fe083cb85dbf2f7bc237e750be57f13398889
6,092
def hexagonal_numbers(length: int) -> list[int]: """ :param len: max number of elements :type len: int :return: Hexagonal numbers as a list Tests: >>> hexagonal_numbers(10) [0, 1, 6, 15, 28, 45, 66, 91, 120, 153] >>> hexagonal_numbers(5) [0, 1, 6, 15, 28] >>> hexagonal_numbers(0) Traceback (most recent call last): ... ValueError: Length must be a positive integer. """ if length <= 0 or not isinstance(length, int): raise ValueError("Length must be a positive integer.") return [n * (2 * n - 1) for n in range(length)]
632e60505cb17536a17b20305a51656261e469f5
6,093
def get_free_remote_port(node: Node) -> int: """Returns a free remote port. Uses a Python snippet to determine a free port by binding a socket to port 0 and immediately releasing it. :param node: Node to find a port on. """ output = node.run("python -c 'import socket; s=socket.socket();" " s.bind((str(), 0)); print(s.getsockname()[1]);" " s.close()'") return int(output)
4cdb0f62909abae1af8470611f63fcc9f5495095
6,094
from typing import Tuple from typing import List import tqdm def read_conll_data(data_file_path: str) -> Tuple[List[Sentence], List[DependencyTree]]: """ Reads Sentences and Trees from a CONLL formatted data file. Parameters ---------- data_file_path : ``str`` Path to data to be read. """ sentences: List[Sentence] = [] trees: List[DependencyTree] = [] with open(data_file_path, 'r') as file: sentence_tokens = [] tree = DependencyTree() for line in tqdm(file): line = line.strip() array = line.split('\t') if len(array) < 10: if sentence_tokens: trees.append(tree) sentences.append(sentence_tokens) tree = DependencyTree() sentence_tokens = [] else: word = array[1] pos = array[4] head = int(array[6]) dep_type = array[7] token = Token(word=word, pos=pos, head=head, dep_type=dep_type) sentence_tokens.append(token) tree.add(head, dep_type) if not sentences: raise Exception(f"No sentences read from {data_file_path}. " f"Make sure you have not replaced tabs with spaces " f"in conll formatted file by mistake.") return sentences, trees
6bee76277fb6a15d03c5c80a5d083920a4412222
6,095
from typing import Optional def get_algo_meta(name: AlgoMeta) -> Optional[AlgoMeta]: """ Get meta information of a built-in or registered algorithm. Return None if not found. """ for algo in get_all_algo_meta(): if algo.name == name: return algo return None
3a568356d56d26192a1e38be6ec5dd57b52a9bba
6,096
def do_eval(sess,input_ids,input_mask,segment_ids,label_ids,is_training,loss,probabilities,vaildX, vaildY, num_labels,batch_size,cls_id): """ evalution on model using validation data """ num_eval=1000 vaildX = vaildX[0:num_eval] vaildY = vaildY[0:num_eval] number_examples = len(vaildX) eval_loss, eval_counter, eval_f1_score, eval_p, eval_r = 0.0, 0, 0.0, 0.0, 0.0 label_dict = init_label_dict(num_labels) print("do_eval.number_examples:",number_examples) f1_score_micro_sklearn_total=0.0 # batch_size=1 # TODO for start, end in zip(range(0, number_examples, batch_size), range(batch_size, number_examples, batch_size)): input_mask_, segment_ids_, input_ids_ = get_input_mask_segment_ids(vaildX[start:end],cls_id) feed_dict = {input_ids: input_ids_,input_mask:input_mask_,segment_ids:segment_ids_, label_ids:vaildY[start:end],is_training:False} curr_eval_loss, prob = sess.run([loss, probabilities],feed_dict) target_labels=get_target_label_short_batch(vaildY[start:end]) predict_labels=get_label_using_logits_batch(prob) if start%100==0: print("prob.shape:",prob.shape,";prob:",prob) print("predict_labels:",predict_labels) #print("predict_labels:",predict_labels) label_dict=compute_confuse_matrix_batch(target_labels,predict_labels,label_dict,name='bert') eval_loss, eval_counter = eval_loss + curr_eval_loss, eval_counter + 1 f1_micro, f1_macro = compute_micro_macro(label_dict) # label_dictis a dict, key is: accusation,value is: (TP,FP,FN). where TP is number of True Positive f1_score_result = (f1_micro + f1_macro) / 2.0 return eval_loss / float(eval_counter+0.00001), f1_score_result, f1_micro, f1_macro
f3059d0dbbf00c0d0a93273dbf3f1335d2feefeb
6,097
def read_gbt_target(sdfitsfile, objectname, verbose=False): """ Give an object name, get all observations of that object as an 'obsblock' """ bintable = _get_bintable(sdfitsfile) whobject = bintable.data['OBJECT'] == objectname if verbose: print("Number of individual scans for Object %s: %i" % (objectname,whobject.sum())) calON = bintable.data['CAL'] == 'T' # HACK: apparently bintable.data can sometimes treat itself as scalar... if np.isscalar(calON): calON = np.array([(val in ['T',True]) for val in bintable.data['CAL']]) n_nods = np.unique(bintable.data['PROCSIZE']) blocks = {} for sampler in np.unique(bintable.data[whobject]['SAMPLER']): whsampler = bintable.data['SAMPLER'] == sampler nods = np.unique(bintable.data['PROCSEQN'][whsampler*whobject]) for nod in nods: whnod = bintable.data['PROCSEQN'] == nod for onoff in ('ON','OFF'): calOK = (calON - (onoff=='OFF')) whOK = (whobject*whsampler*calOK*whnod) if whOK.sum() == 0: continue if verbose: print("Number of spectra for sampler %s, nod %i, cal%s: %i" % (sampler,nod,onoff,whOK.sum())) crvals = bintable.data[whOK]['CRVAL1'] if len(crvals) > 1: maxdiff = np.diff(crvals).max() else: maxdiff = 0 freqres = np.max(bintable.data[whOK]['FREQRES']) if maxdiff < freqres: splist = [read_gbt_scan(bintable,ii) for ii in np.where(whOK)[0]] blocks[sampler+onoff+str(nod)] = pyspeckit.ObsBlock(splist,force=True) blocks[sampler+onoff+str(nod)]._arithmetic_threshold = np.diff(blocks[sampler+onoff+str(nod)].xarr).min() / 5. else: print("Maximum frequency difference > frequency resolution: %f > %f" % (maxdiff, freqres)) return blocks
1215fdccee50f0ab5d135a5cccf0d02da09410e2
6,098
def regression_target(label_name=None, weight_column_name=None, target_dimension=1): """Creates a _TargetColumn for linear regression. Args: label_name: String, name of the key in label dict. Can be null if label is a tensor (single headed models). weight_column_name: A string defining feature column name representing weights. It is used to down weight or boost examples during training. It will be multiplied by the loss of the example. target_dimension: dimension of the target for multilabels. Returns: An instance of _TargetColumn """ return _RegressionTargetColumn(loss_fn=_mean_squared_loss, label_name=label_name, weight_column_name=weight_column_name, target_dimension=target_dimension)
064954b58b57caeb654ed30f31b9560ab01d7c42
6,099