content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def get_duplicated_members(first_name, last_name): """同じ名前の持つメンバーが存在するかどうか :param first_name: :param last_name: :return: """ first_name = first_name.strip() if first_name else None last_name = last_name.strip() if last_name else None queryset = models.Member.objects.filter( first_name=first_name, last_name=last_name, ) return queryset
f7160d1a710b123e62a5e0ebf0d4e973303f4c2b
9,200
import logging def get_oauth_id(): """Returns user email ID if OAUTH token present, or None.""" try: user_email = oauth.get_current_user(SCOPE).email() except oauth.Error as e: user_email = None logging.error('OAuth failure: {}'.format(e)) return user_email
d16a1785cf3cfd12f57ab3bbc1fd5318bc634dd2
9,201
import re def check_for_publication(form, formsets, user_data): """ Run additional validation across forms fields for status LILACS-Express and LILACS """ valid = valid_descriptor = valid_url = True # regex match starts with S (Serial) and ends with (as) analytic regex_sas = r"^S.*as$" Sas_record = re.search(regex_sas, form.document_type) status = form.cleaned_data.get('status') user_role = user_data['service_role'].get('LILDBI') # for LILACS status and not Serie Source is required at least one primary descriptor if status == 1 and form.document_type != 'S': valid_descriptor = check_descriptor(form, formsets['descriptor']) # for LILACS indexed check url/fulltext/page if form.is_LILACS and status != -1: # for journal article (Sas record) check for electronic_address OR fulltext file #159 if Sas_record: valid_url = check_url_or_attachment(form, formsets['attachment']) elif form.document_type != 'S' and form.document_type != 'Mc': # for other types of analytic records check for page or electronic_address #160 valid_url = check_url_or_page(form, formsets['attachment']) if not valid_descriptor or not valid_url: valid = False return valid
c36364c75f97c7eb299611471df1ff29e9837bfd
9,202
def _generate_var_name(prefix, field_name): """ Generate the environment variable name, given a prefix and the configuration field name. Examples: >>> _generate_var_name("", "some_var") "SOME_VAR" >>> _generate_var_name("my_app", "some_var") "MY_APP_SOME_VAR" :param prefix: the prefix to be used, can be empty :param field_name: the name of the field from which the variable is derived """ return ( "_".join((prefix, field_name)).upper() if prefix else field_name.upper() )
9065d1deb76789582e68df779ec2c961a7d4aedc
9,203
def VelocityPostProcessingChooser(transport): """ pick acceptable velocity postprocessing based on input """ tryNew = True velocityPostProcessor = None if transport.conservativeFlux is not None: if (transport.mesh.parallelPartitioningType == 0 and transport.mesh.nLayersOfOverlap==0): #element-based partition logEvent("Cannot specify conservative flux if partitioned by element with no element overlaps") exit() ppcomps = [] pptypes = {} for ci in list(transport.conservativeFlux.keys()): if (transport.conservativeFlux[ci] == 'p1-nc' and isinstance(transport.u[ci].femSpace,FemTools.NC_AffineLinearOnSimplexWithNodalBasis)): ppcomps.append(ci) pptypes[ci] = 'p1-nc' #end p1-nc for comp ci elif 'pwl' in transport.conservativeFlux[ci]: ppcomps.append(ci) pptypes[ci] = transport.conservativeFlux[ci] elif transport.conservativeFlux[ci] in ['point-eval','dg-point-eval','point-eval-gwvd']: #tjp addin for gwvd ppcomps.append(ci) pptypes[ci] = transport.conservativeFlux[ci] elif transport.conservativeFlux[ci] == 'pwc': ppcomps.append(ci) pptypes[ci] = 'pwc' elif 'sun-' in transport.conservativeFlux[ci]: ppcomps.append(ci) pptypes[ci] = transport.conservativeFlux[ci] elif transport.conservativeFlux[ci] in ['dg','dg-bdm']: ppcomps.append(ci) pptypes[ci] = transport.conservativeFlux[ci] else: logEvent("Unrecognized conservative flux", transport.conservativeFlux[ci]) #for ci if tryNew: velocityPostProcessor = AggregateVelocityPostProcessor(pptypes,transport) else: velocityPostProcessor = VelocityPostProcessor_Original(pptypes, transport, ppcomps) #conservative flux specified return velocityPostProcessor
44484d2b1f35ac865d9b5a1d53a62f4234bea4ee
9,204
def get_node_hierarchical_structure(graph: nx.Graph, node: str, hop: int): """ explore hierarchical neighborhoods of node """ layers = [[node]] curLayer = {node} visited = {node} for _ in range(hop): if len(curLayer) == 0: break nextLayer = set() for neighbor in curLayer: for next_hop_neighbor in nx.neighbors(graph, neighbor): if next_hop_neighbor not in visited: nextLayer.add(next_hop_neighbor) visited.add(next_hop_neighbor) curLayer = nextLayer layers.append(list(nextLayer)) return layers
132db2de60459ea41a142ae17e5ad08fb325692c
9,205
def svn_utf_cstring_from_utf8_string(*args): """svn_utf_cstring_from_utf8_string(svn_string_t src, apr_pool_t pool) -> svn_error_t""" return _core.svn_utf_cstring_from_utf8_string(*args)
0651604821f3164f6b4847397a7a484bd0a51568
9,206
def fitness_sum(element): """ Test fitness function. """ return np.sum(element)
c2a6881864e4a31ed0ffe18c276573a7bdd6a867
9,207
def Stepk(k, basetree=[]): # XXX. make sure basetree is passed as expected. """Try to solve the puzzle using assumptions. k --> The step number. (1st step is solving exactly, 2nd step is solving using 1 assumption, 3rd step is solving using 2 assumptions and so on.) Note: The assumption level of this step will be k-1. basetree --> list of parent assumption levels. It helps in getting the tree structure of (nested) assumptions. Example- basetree = [3,2] --> This means that this Stepk function has been called (recursively) from another Stepk function (with k = 3) which was itself called from another Stepk function (with k = 4). ============== Return value: ============== 1 - puzzle was solved in this step. 0 - puzzle was not solved in this step. """ # Note: If the puzzle being solved does not have a unique solution and # the parameter k is large (say 5 or more) then this function will give # one of the many possible solutions. # But whichever solution it gives, it will be definately correct! print "Puzzle complete?" if isPuzzleComplete(): print "> Complete!" return 1 else: print "> Not yet!" assumptionleveltree = basetree + [k - 1] print "\n(New Assumption Level.\nAssumption Tree: %s\n" \ "Saving puzzle...)\n" % assumptionleveltree initialpuzzle, initiallabelrestrictionscount = SavePuzzle() for row in xrange(9): for col in xrange(9): # substitute for sudokucellswithonly2possibilities if (not (IsCellEmpty(row, col) and (lenLabelsPermissible(row, col) == 3))): continue # ==3 becoz 1st is a '' _labels = GetPermissibleLabels(row, col, 2) for i in (0, 1): # iterate through the permissible labels. # XXX. improve this if i == 0: otherlabel = _labels[1] else: otherlabel = _labels[0] print "Assuming %s in cell (%d,%d)\n[Other can be %s]\n" \ % (_labels[i], row + 1, col + 1, otherlabel) setSudokuCellLabel(row, col, _labels[i]) if k != 2: print "(Entering into nested\nassumption...)\n" SolveUptoSteps(k - 1, assumptionleveltree) if k != 2: print "(Exiting from nested\nassumption...)\n" print "Puzzle complete?" if isPuzzleComplete(): # This means that the assumption taken above was # correct and the puzzle got solved. Hence, return 1. print "> Complete!" \ # add this later.. (Assumption Level Tree: %s) return 1 else: print "> Not yet!\n\nAssumption correct?" if isPuzzleCorrect(): # This means that the puzzle is incompletely filled # and it cannot be decided from this point whether # the assumption taken above is correct or # incorrect. print "Maybe. Can't say anything\nas of now."\ " Assumption was\n%s in (%d,%d)\n" \ % (_labels[i], row + 1, col + 1) # caching if i == 0: # This is caching, for speeding up the solve # process. If 'label' is the 1st of the 2 # permissible labels then save the solution, it # might be possible that the 2nd of the 2 # permissible options is definitely incorrect, # (and consequently this assumption is correct) # so we will need this solution! # (better to save it, rather than finding it # again later.) print "Saving the above puzzle.\n" \ "Will be useful if other\n" \ "assumption (on same cell)\n"\ "is definitely incorrect.\n" temppuzzle, templabelrestrictionscount = \ SavePuzzle() # As it cannot be decided standing at this point # whether the above assumption is correct or # incorrect, revert to initial conditions and try # the other options! print "Reverting to this puzzle\n"\ "(saved at the beginning \n"\ "of this assumption) -" LoadPuzzle(initialpuzzle, initiallabelrestrictionscount) PrintPuzzle() else: # This means that puzzle is incorrectly filled, so # it is sure that the above asumption is definately # incorrect, so the other among the 2 permissible # labels is definately correct. print "Definately incorrect!\n" \ "[%s in cell (%d,%d)]\n" \ % (_labels[i], row + 1, col + 1) # decide whether label is the 1st of the permissible # the 1st labels or the 2nd one. if i == 1: # This means that the assumption we took # (2nd of the 2 permissible labels) is # incorrect, & as this assumption is incorrect, # the 1st of the 2 assumptions is definately # correct. Moreover, the puzzle solution to # the 1st permissible label is already saved in # temppuzzle, so just load it. print "Hence previous assumption\n" \ "was correct - \n" \ "[%s in cell (%d,%d)]\n" \ "Revert to the its\n" \ "solution puzzle. \n" \ "(Good, I had saved it!\n" \ "Saved my time!)" \ % (otherlabel, row + 1, col + 1) PrintPuzzle() LoadPuzzle(temppuzzle, templabelrestrictionscount) else: print "Hence, defintely correct-\n" \ "[%s in cell (%d,%d)]\n" \ % (otherlabel, row + 1, col + 1) # This means that 2nd of the 2 permissible # labels is correct, so revert to the puzzle # that was at the beginning of the outermost # for loop and then set the 2nd of the # 2 permissible labels. LoadPuzzle(initialpuzzle, initiallabelrestrictionscount) setSudokuCellLabel(row, col, _labels[1]) # Delete all the variables defined at this point, # as this function will be going into a recursive # loop from here on, and this data, unnecessarily, # will form a stack. del initialpuzzle del initiallabelrestrictionscount del row del col del _labels del i del otherlabel # Now, the puzzle solution has moved one step # ahead, so try to solve it further using the # "less complex", "previous" steps. if k != 2: print "(Entering into nested\nassumption...)\n" SolveUptoSteps(k - 1, assumptionleveltree) if k != 2: print "(Exiting from nested\nassumption...)\n" # Finally, repeat this step again to solve the # puzzle further. (it is quite possile that in the # previous step itself, the puzzle might have got # solved. If so, it will just enter this function # (in recursion) and return from the very # 1st check) return(Stepk(k, basetree)) # If this part is getting executed means this function did not help # in solving the puzzle any further. print "Didn't get anything from\nthis Assumption Level.\n" \ "Assumption Tree: %s\n" % assumptionleveltree return 0
cbf28b995deee1ff3432c46d3e48cf9b0c8fd31a
9,208
def load_handler(path, *args, **kwargs): """ Given a path to a handler, return an instance of that handler. E.g.:: >>> load_handler('anthill.framework.core.files.uploadhandler.TemporaryFileUploadHandler', request) <TemporaryFileUploadHandler object at 0x...> """ return import_string(path)(*args, **kwargs)
26a9b3ebaa0ab2362a9a1ab977281c25334e0d9c
9,209
def validate_twilio_request(): """Ensure a request is coming from Twilio by checking the signature.""" validator = RequestValidator(current_app.config['TWILIO_AUTH_TOKEN']) if 'X-Twilio-Signature' not in request.headers: return False signature = request.headers['X-Twilio-Signature'] if 'SmsSid' in request.form: url = url_for('check_raffle', _external=True) else: return False return validator.validate(url, request.form, signature.encode('UTF-8'))
bb35e83223ac8530a6da8fed581ba5cbc8afe47e
9,210
def paper_selection(text=[], keywords=[]): """ This function calculates the similarity between keywords or phrases relating a text. So it is possible to compare several texts and keywords in once to see which text is the best relating special keywords. Also a plot is generated, where it is possible to see the scores of all paper and keywords :param text: This is a list of texts which you want to compare with the keywords :param keywords: The keywords in this list are used to compare the single texts. :return: """ df = PaperSelection.paper_importance(text, keywords) fig = PaperSelection.plot_paper_selection(df) return df, fig
ac6d16ac183f081ef193bf43e782019c38c04106
9,211
import os def list_subpackages(package_trail,verbose=False): """ package_trails = list_subpackages(package_trail) returns a list of package trails Inputs: package_trail : a list of dependant package names, as strings example: os.path -> ['os','path'] Outputs: package_trails : a list of package trails can be processed with >>> map( '.'.join, package_trails ) """ # imports # error checking if isinstance(package_trail,str): package_trail = [package_trail] elif not isinstance(package_trail,(list,tuple)): raise Exception('%s is not iterable' % package) # print current package if verbose: print('.'.join(package_trail)) # get absolute path for package package_dir = os.path.abspath( os.path.join(*package_trail) ) # find all packages packages = [ p for p in os.listdir( package_dir ) \ if ( os.path.isdir(os.path.join(package_dir, p)) and # package is a directory os.path.isfile(os.path.join(package_dir, p, '__init__.py')) ) # and has __init__.py ] # append package trail packages = [ package_trail + [p] for p in packages ] # recursion, check for sub packages packages = [ subpackage \ for package in packages \ for subpackage in list_subpackages(package,verbose) ] # include this package trail package_trails = [package_trail] + packages # done! return package_trails
b1aebf9a87041da92ac5e2eee9d4e668ab88839d
9,212
def _get_out_of_bounds_window(radius, padding_value): """Return a window full of padding_value.""" return padding_value * np.ones((2 * radius + 1, 2 * radius + 1), dtype=int)
0dacf7d63f5e0be21deb92f02fe3b76bd201b5ec
9,213
import sys import io def open_fw(file_name, encoding=ENCODING, encode=True): """Open file for writing respecting Python version and OS differences. Sets newline to Linux line endings on Python 3 When encode=False does not set encoding on nix and Python 3 to keep as bytes """ if sys.version_info >= (3, 0, 0): if encode: file_obj = io.open(file_name, "w", newline="", encoding=encoding) else: file_obj = io.open(file_name, "w", newline="") else: file_obj = io.open(file_name, "wb") return file_obj
296bde81f1af70d861be641ec698deba58958915
9,214
import logging def covid_API_request( location: str = "Exeter", location_type: str = "ltla") -> dict[str]: """Requests current COVID data from the Cov19API for a given area. Uses the Cov19API to request the most recent COVID data for a given area. Returns data as a list of comma separated strings. Args: location: The requested COVID data location. location_type: The type of area requested ("nation" or "ltla"). Returns: A dictionary containing a csv file containing COVID information for an area, indexed by the area's name. """ requested_area = ["areaType="+location_type, "areaName="+location] requested_data = { "areaCode": "areaCode", "areaName": "areaName", "areaType": "areaType", "date": "date", "cumDailyNsoDeathsByDeathDate": "cumDailyNsoDeathsByDeathDate", "hospitalCases": "hospitalCases", "newCasesBySpecimenDate": "newCasesBySpecimenDate" } logging.info("Requesting COVID data for %s...", location) api = Cov19API(filters=requested_area, structure=requested_data) data = api.get_csv() covid_data[location] = data.split("\n")[:-1] logging.info("COVID data for %s updated.", location) return covid_data
5b931e3d30f51ff64fc206cf5d30f7fd925d2b78
9,215
def resize(img, height, width, is_flow, mask=None): """Resize an image or flow field to a new resolution. In case a mask (per pixel {0,1} flag) is passed a weighted resizing is performed to account for missing flow entries in the sparse flow field. The weighting is based on the resized mask, which determines the 'amount of valid flow vectors' that contributed to each individual resized flow vector. Hence, multiplying by the reciprocal cancels out the effect of considering non valid flow vectors. Args: img: tf.tensor, image or flow field to be resized of shape [b, h, w, c] height: int, heigh of new resolution width: int, width of new resolution is_flow: bool, flag for scaling flow accordingly mask: tf.tensor, mask (optional) per pixel {0,1} flag Returns: Resized and potentially scaled image or flow field (and mask). """ def _resize(image, mask=None): # _, orig_height, orig_width, _ = img.shape.as_list() orig_height = tf.shape(input=image)[1] orig_width = tf.shape(input=image)[2] if mask is not None: # multiply with mask, to ensure non-valid locations are zero image = tf.math.multiply(image, mask) # resize image img_resized = tf.compat.v2.image.resize( image, (int(height), int(width)), antialias=True) # resize mask (will serve as normalization weights) mask_resized = tf.compat.v2.image.resize( mask, (int(height), int(width)), antialias=True) # normalize sparse flow field and mask img_resized = tf.math.multiply( img_resized, tf.math.reciprocal_no_nan(mask_resized)) mask_resized = tf.math.multiply( mask_resized, tf.math.reciprocal_no_nan(mask_resized)) else: # normal resize without anti-alaising img_resized = tf.compat.v2.image.resize(image, (tf.cast(height, tf.int32), tf.cast(width, tf.int32))) if is_flow: # If image is a flow image, scale flow values to be consistent with the # new image size. scaling = tf.reshape([ float(height) / tf.cast(orig_height, tf.float32), float(width) / tf.cast(orig_width, tf.float32) ], [1, 1, 1, 2]) img_resized *= scaling if mask is not None: return img_resized, mask_resized return img_resized # Apply resizing at the right shape. shape = img.shape.as_list() if img.shape.rank == 3: if mask is not None: img_resized, mask_resized = _resize(img[None], mask[None]) return img_resized[0], mask_resized[0] else: return _resize(img[None])[0] if img.shape.rank == 4: # Input at the right shape. return _resize(img, mask) if img.shape.rank > 4: # Reshape input to [b, h, w, c], resize and reshape back. outer_shape = tf.shape(input=img)[:-3] required_shape = tf.concat([[-1], tf.shape(input=img)[-3:]], axis=0) img_flattened = tf.reshape(img, required_shape) if mask is not None: mask_flattened = tf.reshape(mask, required_shape) img_resized, mask_resized = _resize(img_flattened, mask_flattened) else: img_resized = _resize(img_flattened) final_shape = tf.concat( [outer_shape, tf.shape(input=img_resized)[-3:]], axis=0) result_img = tf.reshape(img_resized, final_shape) if mask is not None: final_mask_shape = tf.concat( [outer_shape, tf.shape(input=mask_resized)[-3:]], axis=0) result_mask = tf.reshape(mask_resized, final_mask_shape) return result_img, result_mask return result_img else: raise ValueError('Cannot resize an image of shape', shape)
9d0543a88382028522ae469fc773dcebc006b5c3
9,216
def num_decodings2(enc_mes): """ :type s: str :rtype: int """ if not enc_mes or enc_mes.startswith('0'): return 0 stack = [1, 1] for i in range(1, len(enc_mes)): if enc_mes[i] == '0': if enc_mes[i-1] == '0' or enc_mes[i-1] > '2': # only '10', '20' is valid return 0 stack.append(stack[-2]) elif 9 < int(enc_mes[i-1:i+1]) < 27: # '01 - 09' is not allowed stack.append(stack[-2]+stack[-1]) else: # other case '01, 09, 27' stack.append(stack[-1]) return stack[-1]
ae4ff7181e34003dcc7ec264ed2727bc716708a5
9,217
def spot2Cmyk(spot, default=None): """Answers the CMYK value of spot color. If the value does not exist, answer default of black. Note that this is a double conversion: spot-->rgb-->cmyk >>> '%0.2f, %0.2f, %0.2f, %0.2f' % spot2Cmyk(300) '0.78, 0.33, 0.00, 0.22' >>> # Nonexistent spot colors map to default or black. >>> spot2Cmyk(10000000) (0, 0, 0, 1) """ return rgb2Cmyk(spot2Rgb(spot, default=default))
307c8e934cdac2f5fb857e8f8f122c9862adab6d
9,218
import re def clean(text): """ Removes irrelevant parts from :param: text. """ # Collect spans spans = [] # Drop HTML comments for m in comment.finditer(text): spans.append((m.start(), m.end())) # Drop self-closing tags for pattern in selfClosing_tag_patterns: for m in pattern.finditer(text): spans.append((m.start(), m.end())) # Drop ignored tags # for left, right in options.ignored_tag_patterns: # for m in left.finditer(text): # spans.append((m.start(), m.end())) # for m in right.finditer(text): # spans.append((m.start(), m.end())) # Bulk remove all spans text = dropSpans(spans, text) # Drop discarded elements # for tag in options.discardElements: # text = dropNested(text, r'<\s*%s\b[^>/]*>' % tag, r'<\s*/\s*%s>' % tag) # Expand placeholders for pattern, placeholder in placeholder_tag_patterns: index = 1 for match in pattern.finditer(text): text = text.replace(match.group(), '%s_%d' % (placeholder, index)) index += 1 text = text.replace('<<', '«').replace('>>', '»') ############################################# # Cleanup text text = text.replace('\t', ' ') text = spaces.sub(' ', text) text = dots.sub('...', text) text = re.sub(' (,:\.\)\]»)', r'\1', text) text = re.sub('\([^a-zA-Z\d]*\)', '', text) text = re.sub('(\[\(«) ', r'\1', text) text = re.sub(r'\n\W+?\n', '\n', text, flags=re.U) # lines with only punctuations text = text.replace(',,', ',').replace(',.', '.') text = text.replace(' , ', ', ') if keep_tables: # the following regular expressions are used to remove the wikiml chartacters around table strucutures # yet keep the content. The order here is imporant so we remove certain markup like {| and then # then the future html attributes such as 'style'. Finally we drop the remaining '|-' that delimits cells. text = re.sub(r'!(?:\s)?style=\"[a-z]+:(?:\d+)%;\"', r'', text) text = re.sub(r'!(?:\s)?style="[a-z]+:(?:\d+)%;[a-z]+:(?:#)?(?:[0-9a-z]+)?"', r'', text) text = text.replace('|-', '') text = text.replace('|', '') text = text.replace('(; ', '(') text = text.strip() return text
0e942e36035d2129ca0be814268e7c6e4552435e
9,219
from dateutil import tz from datetime import datetime import numpy def get_offset(t0,t1,zone,station,gps): """ Determine UTC to local Local offset to be applied. Parameters ---------- t0 : datetime Starting timestamp t1 : datetime End timestamp zone : str Define timing zone, either Local or UTC city : str City where the sensor is located Return ------ offset : datetime Offset time to match time in targeted filename """ # Identifying the time zone utc_zone = tz.gettz('UTC') # Format input timestamp into UTC time utc_epoch = t0.replace(tzinfo=utc_zone) # Get time in local California time local_epoch = utc_epoch.astimezone(tz.gettz('America/Los_Angeles')) # Calculate offset between UTC and PST timestamps utc2pst = datetime.utcoffset(local_epoch).total_seconds() # Consider UTC to PST offset if requested time is before fix date utc2pst = utc2pst if t0<datetime(2017,12,7) else 0 # Look-up table to identify station's location over time locations = numpy.array([[1,datetime(2015,11,1),datetime(2017,12,3),tz.gettz('America/Los_Angeles')], [1,datetime(2017,12,3),datetime.max ,tz.gettz('America/New_York') ], [2,datetime(2015,11,1),datetime.max ,tz.gettz('America/Los_Angeles')], [3,datetime(2015,11,1),datetime(2017,10,6),tz.gettz('America/Los_Angeles')], [3,datetime(2017,10,6),datetime.max ,tz.gettz('America/New_York') ], [4,datetime(2015,11,1),datetime(2017,12,3),tz.gettz('America/Los_Angeles')], [4,datetime(2017,12,3),datetime.max ,tz.gettz('America/New_York') ]]) # Identify the location for requested data for n,start,end,loc in locations: if n==station and start<t0<end: local_zone = loc # Identifying the time zone utc_zone = tz.gettz('UTC') # Format input timestamp into UTC time utc_epoch = t0.replace(tzinfo=utc_zone) # Get time in local California time local_epoch = utc_epoch.astimezone(local_zone) # Calculate offset between Local and UTC timestamps utc2local = datetime.utcoffset(local_epoch).total_seconds() # Check if first version of timing data if t1<datetime(2016,6,10): # Calculate offset between provided UTC to local timestamps offset = -utc2local if zone=='UTC' else 0 # Check if second version of timing data if t0>datetime(2016,6,10): # Calculate offset between provided local to UTC timestamps offset = -utc2local if zone=='Local' and gps=='on' else 0 return utc2local,offset,utc2pst
f6ed5f50528a67735097abf17d3039008a61b547
9,220
def my_render_template(html, **arguments): """Call render_template with comparison_types as one of the arguments. :param string html: name of the template :param **arguments: other arguments to be passed while rendering template """ arguments.setdefault( 'comparison_types', ComparisonType.get_cache(g.db_session) ) return render_template(html, **arguments)
0a639a9dd8cef8c0cc659444d32138acf9a43e41
9,221
def find_or_create_qualification(qualification_name, description, must_be_owned=True): """Query amazon to find the existing qualification name, return the Id. If it exists and must_be_owned is true but we don't own it, this prints an error and returns none. If it doesn't exist, the qualification is created """ qual_id = find_qualification( qualification_name, must_be_owned=must_be_owned ) if qual_id is False: return None if qual_id is not None: return qual_id # Create the qualification, as it doesn't exist yet client = boto3.client( service_name='mturk', region_name='us-east-1', endpoint_url='https://mturk-requester-sandbox.us-east-1.amazonaws.com' ) response = client.create_qualification_type( Name=qualification_name, Description=description, QualificationTypeStatus='Active', ) return response['QualificationType']['QualificationTypeId']
92855fbaee2c1f5d190b2c4cd67078b07c6f4e51
9,222
import torch def hard_to_soft(Y_h, k): """Converts a 1D tensor of hard labels into a 2D tensor of soft labels Source: MeTaL from HazyResearch, https://github.com/HazyResearch/metal/blob/master/metal/utils.py Args: Y_h: an [n], or [n,1] tensor of hard (int) labels in {1,...,k} k: the largest possible label in Y_h Returns: Y_s: a torch.FloatTensor of shape [n, k] where Y_s[i, j-1] is the soft label for item i and label j """ Y_h = Y_h.clone() if Y_h.dim() > 1: Y_h = Y_h.squeeze() assert Y_h.dim() == 1 assert (Y_h >= 0).all() assert (Y_h < k).all() n = Y_h.shape[0] Y_s = torch.zeros((n, k), dtype=Y_h.dtype, device=Y_h.device) for i, j in enumerate(Y_h): Y_s[i, int(j)] = 1.0 return Y_s
d31c6749569e19cbbdd91c59e66982497190163d
9,223
def find_negations(doc, neg_comma=True, neg_modals=True, debug=False): """ Takes as input a list of words and returns the positions (indices) of the words that are in the context of a negation. :param list doc: a list of words (strings) :param bool neg_comma: if True, the negation context ends on a comma :param bool neg_modals: if True, include negation modals in the set of negation words :param bool debug: if True, print the text color coded by context :return set: a set of the word positions inside a negation """ doc_context = [] append = doc_context.append negation_stopset = neg_puncts | {","} if neg_comma else set() negation_startset = negation_words | negation_modals if neg_modals else set() # status == "normal" means outside of parentheses # status == "parentheses" means inside parentheses # status[XXX] == True means that the context XXX is negated # status[XXX] == False means that the context XXX is affirmative status = {"normal": False, "parentheses": False} # pointer to the current context current = "normal" for i, tok in enumerate(doc): if tok in negation_startset: status[current] = True if debug: cprint(tok, 'red', attrs=['bold'], end=' ') continue if tok in negation_stopset | contrast_words: if debug: if status[current]: cprint(tok, 'green', attrs=['bold'], end=' ') else: print(tok, end=" ") status[current] = False continue if tok == "(": current = "parentheses" if debug: cprint(tok, 'green', attrs=['bold'], end=' ') continue if tok == ")": status[ "parentheses"] = False # in order to be false the next time it goes in to a parentheses current = "normal" if debug: cprint(tok, 'green', attrs=['bold'], end=' ') continue if debug: if status[current]: cprint(tok, 'magenta', end=' ') else: print(tok, end=" ") if status[current]: append(i) if debug: print() # input("press to continue...") return set(doc_context)
7b609949c3f2ea22887147e6bb13ad41de71bba3
9,224
from typing import List from sys import getrecursionlimit from sys import setrecursionlimit def int_pow(base: int, power: int, modulus: int=None, safe: bool=True): """ Calculate `base` raised to `power`, optionally mod `modulus` The python standard library offers the same functionality, and this function exists only as a proof of Concept. This function only aims to support positive integer operands. the `safe` parameter only applies to modular exponentiation. for values with a large hamming weight, the recursion limit can be hit quite easily, as one round of recursion is needed for every set bit. If `safe` is set to true, the recursion depth is adjusted accordingly during the computation, then restored. --------------------------------------------------------------- Benchmark compared to native python pow(): pow(a, b, c) 10k times using random pool of a, b, c { [2, 99999999999999999999999999999999999999999999999999999]: 702 ms ± 5.44 ms per loop (mean ± std. dev. of 7 runs, 1 loop each) int_pow(a, b, c) 10k times using same pool: 1.31 s ± 2.81 ms per loop (mean ± std. dev. of 7 runs, 1 loop each) """ if base < 0 or power < 0 or (modulus and modulus < 0): raise ValueError("Invalid operand. Only positive integer operands allowed.") def pow_nomod(base: int, power: int): """Calculate `base` raised to `power`.""" # Keep a copy base_ = base for _ in range(power - 1): base *= base_ return base if not modulus: return pow_nomod(base, power) # Here the fun part comes. # There exists an optimization for modular exponentiation which # allows for a much faster computation than (base**power) % modulus. # the identity `(a * b) mod n = (a mod n) * (b mod n) mod n` aids us here. # We start by splitting the power up in a sum of powers of two. n = 0 po2 = [] while power >> n: # if the bit is set, we have a match: if power & (1 << n): po2.append(n) n += 1 # We can now represent our evaluation as an expression of the form: # (base**(2**a_0) * base**(2**a_1) * ... * base**(2**a_2) ) % modulus # which we can calculate quite fast using the identity below # Take the highest power of two and evaluate it using our identity. # We can fill the cache with the results of all the lower powers, mod n. highest = po2[-1] # cache for `base` raised to powers of two, modulus `n`. # the indices shall denote the power. cache = [None] * (highest + 1) result = cache[0] = base % modulus # base**1 # modulus # Square, then reduce modulo `modulus` for cycle in range(highest): result *= result result %= modulus cache[cycle + 1] = result def product_mod_n(args: List[int], n: int): """ Calculate (base**(2**a_0) * base**(2**a_1) * ... * base**(2**a_k)) mod n, with every `a` in cache. """ # BEWARE: this function can easily exceed python max recursion depth (of 1000). # for values with a large hamming weight, adjust the recursion depth limit accordingly. # Identity: (a * b) mod n = (a mod n) * (b mod n) mod n # this can be applied recursively with relative ease. # Recursion ending condition: if len(args) == 1: return cache[args[0]] # return (cache[args.pop()]) * (product_mod_n(args, n)) % n if safe: # Make sure we won't hit the recursion limit old = getrecursionlimit() setrecursionlimit(999999999) result = product_mod_n(po2, modulus) setrecursionlimit(old) return result else: return product_mod_n(po2, modulus)
d578449c45a0b10d19a521b95866b8cc0025df43
9,225
import decimal def truncate_decimal_places(value: decimal.Decimal, places: int = 1) -> float: """ Truncate a float (i.e round towards zero) to a given number of decimal places. NB: Takes a decimal but returns a float! >>> truncate_decimal_places(12.364, 1) 12.3 >>> round_decimal_places(-12.364, 1) -12.3 # -12.3 is bigger than -12.4 >>> round_decimal_places(12.364, 0) 12.0 # rounding to 0 returns float with no decmial part """ if places == 0: quantize_string = "1" else: quantize_string = "0." + ((places - 1) * "0") + "1" exponent = decimal.Decimal(quantize_string) decimal_result = value.quantize(exponent, rounding=decimal.ROUND_DOWN) return float(decimal_result)
11b924a5e4f6560674b1f7378f6a4001a3265a97
9,226
def site_url(self, url): """ Return the fully qualified URL for the given URL fragment. """ try: # In Django < 1.9, `live_server_url` is decorated as a `property`, but # we need to access it on the class. base_url = self.testclass.live_server_url.__get__(self.testclass) except AttributeError: # Dango 1.9 updates `live_server_url` to be a `classproperty`. base_url = self.testclass.live_server_url return urljoin(base_url, url)
4f82cc766d0144fb11e897e7c9ceba57a6881f23
9,227
def myisinteger( num: int) -> bool: """ Checks if num is an integer """ val = 1 if num == floor(num) else 0 return val
a8a1980fb35429d300cb262629f1d20774202f95
9,228
def _get_timeunit(min_time: pd.Timestamp, max_time: pd.Timestamp, dflt: int) -> str: """Auxillary function to find an appropriate time unit. Will find the time unit such that the number of time units are closest to dflt.""" dt_secs = { "year": 60 * 60 * 24 * 365, "quarter": 60 * 60 * 24 * 91, "month": 60 * 60 * 24 * 30, "week": 60 * 60 * 24 * 7, "day": 60 * 60 * 24, "hour": 60 * 60, "minute": 60, "second": 1, } time_rng_secs = (max_time - min_time).total_seconds() prev_bin_cnt, prev_unit = 0, "year" for unit, secs_in_unit in dt_secs.items(): cur_bin_cnt = time_rng_secs / secs_in_unit if abs(prev_bin_cnt - dflt) < abs(cur_bin_cnt - dflt): return prev_unit prev_bin_cnt = cur_bin_cnt prev_unit = unit return prev_unit
96b1a036bdb64b9c684ed8ac9123cf788ddc189d
9,229
import pathlib import sys def get_resource_path(relative_path): """ relative_path = "data/beach.jpg" relative_path = pathlib.Path("data") / "beach.jpg" relative_path = os.path.join("data", "beach.jpg") """ rel_path = pathlib.Path(relative_path) dev_base_path = pathlib.Path(__file__).resolve().parent.parent base_path = getattr(sys, "_MEIPASS", dev_base_path) return base_path / rel_path
becad13eb95d988b49ea7ef141e9c3436379af6e
9,230
def data_sample(df, x, y, group_number, quantile): """ 分组选点法 x: 分组变量 y: 取值变量 """ group_width = (np.max(df[x]) - np.min(df[x])) / group_number # 分组宽度 x_group = np.arange(np.min(df[x]), np.max(df[x]), group_width) # 分组的X # 选取每组中设定的分位数的点, 对点数大于零的组选点 if len(quantile) == 3: data_x = np.array([]) data_y = np.array([]) for i in x_group: if len(df[(df[x] >= i) & (df[x] < i + group_width)]) > 0: temp_y = np.array(df[(df[x] >= i) & (df[x] < i + group_width)][y].quantile(quantile)) temp_x = np.array([(i + group_width / 4), (i + group_width / 2), (i + 3 * group_width / 4)]) data_x = np.concatenate([data_x, temp_x], axis = 0) data_y = np.concatenate([data_y, temp_y], axis = 0) elif len(quantile) == 1: data_x = [] data_y = [] for i in x_group: if len(df[(df[x] >= i) & (df[x] < i + group_width)]) > 0: temp_y = float(df[(df[x] >= i) & (df[x] < i + group_width)][y].quantile(quantile)) temp_x = float(i + group_width / 2) data_x.append(temp_x) data_y.append(temp_y) return data_x, data_y
9be1ec948f9d427f7b6136b0c4f6bf5622be5843
9,231
def index(request): """ Shows all challenges related to the current user """ profile = request.user.get_profile() chall_user = profile.get_extension(ChallengeUser) challs = ChallengeGame.get_active(chall_user) played = ChallengeGame.get_played(chall_user)[:10] if not chall_user.is_eligible(): messages.error(request, _('Your race can\'t play. Go home')) return render_to_response('challenge/index.html', {'challenges': challs, 'played': played, 'challuser': chall_user, 'challenge': ChallengeGame}, context_instance=RequestContext(request))
b649f74777eedd1093e884f71949cd43c7a215ad
9,232
def hansen(threshold, geojson, begin, end, logger): """For a given threshold and geometry return a dictionary of ha area. The threshold is used to identify which band of loss and tree to select. asset_id should be 'projects/wri-datalab/HansenComposite_14-15' Methods used to identify data: Gain band is a binary (0 = 0, 255=1) of locations where tree cover increased over data collction period. Calculate area of gain, by converting 255 values to 1, and then using a trick to convert this to pixel area (1 * pixelArea()). Finally, we sum the areas over a given polygon using a reducer, and convert from square meters to hectares. Tree_X bands show percentage canopy cover of forest, If missing, no trees present. Therefore, to count the tree area of a given canopy cover, select the band, convert it to binary (0=no tree cover, 1 = tree cover), and identify pixel area via a trick, multiplying all 1 vals by image.pixelArea. Then, sum the values over a region. Finally, divide the result (meters squared) by 10,000 to convert to hectares """ asset_id = 'projects/wri-datalab/HansenComposite_14-15' d = {} begin = int(begin.split('-')[0][2:]) end = int(end.split('-')[0][2:]) region = get_region(geojson) reduce_args = {'reducer': ee.Reducer.sum().unweighted(), 'geometry': region, 'bestEffort': True, 'scale': 90} gfw_data = ee.Image(asset_id) loss_band = 'loss_{0}'.format(threshold) cover_band = 'tree_{0}'.format(threshold) # Identify 2000 forest cover at given threshold tree_area = gfw_data.select(cover_band).gt(0).multiply( ee.Image.pixelArea()).reduceRegion(**reduce_args).getInfo() d['tree-extent'] = squaremeters_to_ha(tree_area[cover_band]) # Identify tree gain over data collection period gain = gfw_data.select('gain').divide(255.0).multiply( ee.Image.pixelArea()).reduceRegion(**reduce_args).getInfo() d['gain'] = squaremeters_to_ha(gain['gain']) # Identify area lost from begin year up untill end year tmp_img = gfw_data.select(loss_band) loss_area_img = tmp_img.gte(begin).And(tmp_img.lte(end)).multiply(ee.Image.pixelArea()) loss_total = loss_area_img.reduceRegion(**reduce_args).getInfo() d['loss'] = squaremeters_to_ha(loss_total[loss_band]) return d
f7d43c8a0d5c8869232d53b2f625c2568de3a1b0
9,233
def rectangluarMask(image): """ this function will take an image as an input and created a rectangluar mask(image sized) and in the center of canvas """ mask = np.zeros(image.shape[:2], dtype = 'uint8') (cX, cY) = (image.shape[1]//2, image.shape[0]//2) cv2.rectangle(mask, (cX-75, cY-75), (cX+75, cY+75), 255, -1) # cv2.imshow('Rectangle Mask', mask) # cv2.waitKey(0) return mask
df5ae1e31eb259bc02ff75282d2dea2b4a7f547b
9,234
from datetime import datetime def get_artist_listen_for_change_streams(artist: Artist=None): """ Computation steps: 1. Define start and end dates 2. Create stream filters for the current artist 3. aggregate the streams from the Model 4. Return just the number (maybe a dict idk) """ # Validate argument data types if not isinstance(artist, Artist): raise TypeError("Param 'artist' must be an Artist object") # 1 start_date = datetime.date(year=2020, month=6, day=22) end_date = datetime.date(year=2020, month=6, day=28) # 2 stream_song_filter = Q(song__uploaded_by=artist) stream_time_filter = Q(timestamp__gte=start_date, timestamp__lte=end_date) # 3 streams = Stream.objects.filter(stream_song_filter, stream_time_filter) stream_count = streams.aggregate(num_streams=Count('id')) return stream_count
c0620809e7ebf10138e3c6c93520787c30efa4f9
9,235
def flip_dict(d): """Returns a dict with values and keys reversed. Args: d: The dict to flip the values and keys of. Returns: A dict whose keys are the values of the original dict, and whose values are the corresponding keys. """ return {v: k for k, v in d.items()}
c9c960209663639613739979c0dc4066a63c44cb
9,236
import subprocess def branch_exists(branch: str) -> bool: """ Check if the branch exists in the current Git repo. """ try: subprocess.check_call( ["git", "rev-parse", "--quiet", "--verify", branch], stdout=subprocess.DEVNULL, ) return True except subprocess.CalledProcessError: return False
352adba56d824fff29bf5c91788a1154bea64f1b
9,237
def has_sample(args): """Returns if some kind of sample id is given in args. """ return args.sample or args.samples or args.sample_tag
c2ae87acb11232d7f56cb9e09eb8509720669058
9,238
import sys def get_search_selection(config: models.Config) -> models.Config: """Gets search criteria for search mode""" search_selection: models.SearchSelection = models.SearchSelection() print('\nPlease select what system you want to search') print('Press Enter to do a general site wide search') helpers.print_console_list() while True: user_input: str = sys.stdin.readline() try: if user_input == '\n': search_selection.System = 'general' config.Query.SearchSelections = search_selection break if not (int(user_input) > 17 or int(user_input) < 0): search_selection.System = \ helpers.get_selection_from_num(int(user_input)) config.Query.SearchSelections = search_selection break else: print('Not a selection') print('Please select a value from the list') except ValueError: print('Please select a value from the list') continue print('Input what rom you want to search for') search_selection.Query = sys.stdin.readline() return config
9e84a8aad45b53df9312d2e0ae03fa94b9496ff9
9,239
from typing import Callable from typing import Any from typing import Type import inspect def make_key_type(func: Callable[..., Any]) -> Type[CallKey]: """Construct a type representing a functions signature.""" sig = inspect.signature(func) # make a format string that unpacks and names the parameters nicely repr_fmt = ( ( func.__name__ if "<locals>" in func.__qualname__ else func.__module__ + "." + func.__qualname__ ) + "(" + ", ".join(name + "={!r}" for name in sig.parameters.keys()) + ")" ) # patch the repr so it looked pretty def _repr(self: Any) -> str: return repr_fmt.format(*self[:-1]) key_type = type( func.__name__, ( namedtuple( func.__name__, tuple(sig.parameters.keys()) + ("func__",), defaults=tuple(p.default for p in sig.parameters.values()) + (func,), module=func.__module__, ), CallKey, ), { "__repr__": _repr, "__func__": func, "__module__": func.__module__, "__signature__": sig, "from_call": classmethod(_from_call), }, ) return key_type
9f6ab0a5ac20fcc69518f24669035a6b7c6246b6
9,240
def gen_string(prop=None): """ Generate String value :param prop: dict Examples: {'minLength': 10, 'maxLength': 154} {'pattern': '^\\d+\\w*$'} """ if not prop: prop = {} min_length = prop.get("minLength", 1) max_length = prop.get("maxLength", 1024) pattern = prop.get("pattern", None) if pattern: if min_length or max_length: # TODO implement pattern with min/max length raise NotImplementedError return Xeger().xeger(pattern) return random_string(strlen=randint(min_length, max_length))
6a7a51712b2f8a47711e76901d5f425226f9e2ef
9,241
def standardize_data(data, eps=None): """ Standardize each image data to have zero mean and unit standard-deviation (z-score) Inputs: data: [np.ndarray] unnormalized data Outputs: data: [np.ndarray] normalized data """ if eps is None: eps = 1.0 / np.sqrt(data[0,...].size) data, orig_shape = reshape_data(data, flatten=True)[:2] # Adds channel dimension if it's missing num_examples = data.shape[0] data_axis = tuple(range(data.ndim)[1:]) # standardize each example individually data_mean = np.mean(data, axis=data_axis, keepdims=True) data_true_std = np.std(data, axis=data_axis, keepdims=True) data_std = np.where(data_true_std >= eps, data_true_std, eps*np.ones_like(data_true_std)) for idx in range(data.shape[0]): # TODO: Broadcasting should work here data[idx, ...] = (data[idx, ...] - data_mean[idx]) / data_std[idx] if data.shape != orig_shape: data = reshape_data(data, out_shape=orig_shape)[0] return data, data_mean, data_std
3267ed737bfb35f08daa91040d87698e157d6ccf
9,242
def _get_transmission(self,d,E='config'): """ calculate the transmittion after thickness d (in m) of material at energy E (in eV).""" return np.exp(-d*1e6/self.absorption_length(E))
ac11a97e424390e40544f16b7259bbf9ace30dcb
9,243
def calculate_density( input_layer, field=None, cell_size=None, cell_size_units="Meters", radius=None, radius_units=None, bounding_polygon_layer=None, area_units=None, classification_type="EqualInterval", num_classes=10, output_name=None, context=None, gis=None, estimate=False, future=False): """ .. image:: _static/images/cal_density_standard/calculate_density.png The calculate_density function creates a density map from point or line features by spreading known quantities of some phenomenon (represented as attributes of the points or lines) across the map. The result is a layer of areas classified from least dense to most dense. For point input, each point should represent the location of some event or incident, and the result layer represents a count of the incident per unit area. A higher density value in a new location means that there are more points near that location. In many cases, the result layer can be interpreted as a risk surface for future events. For example, if the input points represent locations of lightning strikes, the result layer can be interpreted as a risk surface for future lightning strikes. For line input, the line density surface represents the total amount of line that is near each location. The units of the calculated density values are the length of line per unit area. For example, if the lines represent rivers, the result layer will represent the total length of rivers that are within the search radius. This result can be used to identify areas that are hospitable to grazing animals. ========================= ========================================================= **Argument** **Description** ------------------------- --------------------------------------------------------- input_layer Required layer. The point or line features from which to calculate density. See :ref:`Feature Input<FeatureInput>`. ------------------------- --------------------------------------------------------- field Optional string. A numeric field name specifying the number of incidents at each location. For example, if you have points that represent cities, you can use a field representing the population of the city as the count field, and the resulting population density layer will calculate larger population densities near cities with larger populations. If not specified, each location will be assumed to represent a single count. ------------------------- --------------------------------------------------------- cell_size Optional float. This value is used to create a mesh of points where density values are calculated. The default is approximately 1/1000th of the smaller of the width and height of the analysis extent as defined in the context parameter. The smaller the value, the smoother the polygon boundaries will be. Conversely, with larger values, the polygon boundaries will be more coarse and jagged. ------------------------- --------------------------------------------------------- cell_size_units Optional string. The units of the cell_size value. Choice list: ['Miles', 'Feet', 'Kilometers', 'Meters'] ------------------------- --------------------------------------------------------- radius Optional float. A distance specifying how far to search to find point or line features when calculating density values. ------------------------- --------------------------------------------------------- radius_units Optional string. The units of the radius parameter. If no distance is provided, a default will be calculated that is based on the locations of the input features and the values in the count field (if a count field is provided). Choice list: ['Miles', 'Feet', 'Kilometers', 'Meters'] ------------------------- --------------------------------------------------------- bounding_polygon_layer Optional layer. A layer specifying the polygon(s) where you want densities to be calculated. For example, if you are interpolating densities of fish within a lake, you can use the boundary of the lake in this parameter and the output will only draw within the boundary of the lake. See :ref:`Feature Input<FeatureInput>`. ------------------------- --------------------------------------------------------- area_units Optional string. The units of the calculated density values. Choice list: ['areaUnits', 'SquareMiles'] ------------------------- --------------------------------------------------------- classification_type Optional string. Determines how density values will be classified into polygons. Choice list: ['EqualInterval', 'GeometricInterval', 'NaturalBreaks', 'EqualArea', 'StandardDeviation'] * EqualInterval - Polygons are created such that the range of density values is equal for each area. * GeometricInterval - Polygons are based on class intervals that have a geometric series. This method ensures that each class range has approximately the same number of values within each class and that the change between intervals is consistent. * NaturalBreaks - Class intervals for polygons are based on natural groupings of the data. Class break values are identified that best group similar values and that maximize the differences between classes. * EqualArea - Polygons are created such that the size of each area is equal. For example, if the result has more high density values than low density values, more polygons will be created for high densities. * StandardDeviation - Polygons are created based upon the standard deviation of the predicted density values. ------------------------- --------------------------------------------------------- num_classes Optional int. This value is used to divide the range of predicted values into distinct classes. The range of values in each class is determined by the classification_type parameter. ------------------------- --------------------------------------------------------- output_name Optional string. Additional properties such as output feature service name. ------------------------- --------------------------------------------------------- context Optional string. Additional settings such as processing extent and output spatial reference. For calculate_density, there are two settings. #. Extent (extent)-a bounding box that defines the analysis area. Only those points in the input_layer that intersect the bounding box will be analyzed. #. Output Spatial Reference (outSR) the output features will be projected into the output spatial reference. ------------------------- --------------------------------------------------------- gis Optional, the GIS on which this tool runs. If not specified, the active GIS is used. ------------------------- --------------------------------------------------------- estimate Optional Boolean. Is true, the number of credits needed to run the operation will be returned as a float. ------------------------- --------------------------------------------------------- future Optional boolean. If True, the result will be a GPJob object and results will be returned asynchronously. ========================= ========================================================= :returns: result_layer : feature layer Item if output_name is specified, else Feature Collection. .. code-block:: python USAGE EXAMPLE: To create a layer that shows density of collisions within 2 miles. The density is classified based upon the standard deviation. The range of density values is divided into 5 classes. collision_density = calculate_density(input_layer=collisions, radius=2, radius_units='Miles', bounding_polygon_layer=zoning_lyr, area_units='SquareMiles', classification_type='StandardDeviation', num_classes=5, output_name='density_of_incidents') """ gis = _arcgis.env.active_gis if gis is None else gis return gis._tools.featureanalysis.calculate_density( input_layer, field, cell_size, cell_size_units, radius, radius_units, bounding_polygon_layer, area_units, classification_type, num_classes, output_name, context, estimate=estimate, future=future)
271d1d50cd362f8e660de4ac93cef8a6cb43d967
9,244
def reverse(operation): """ decorator that returns sa.not_ for sending operation""" def decorated(*args, **kwargs): return sqlalchemy.not_(operation(*args, **kwargs)) return decorated
3a77ed5e0db081bd67ccbc1c90731f46001288f2
9,245
def disable_static_generator(view_func): """Decorator which prevents caching the response from a view on disk Flags the view with a ``disable_static_generator`` attribute so staticgenerator won't ever save its response on the filesystem. Example:: @disable_static_generator def myview(request): # ... """ # We could just do view_func.disable_static_generator = True, but # decorators are nicer if they don't have side-effects, so we return a new # function. def wrapped_view(*args, **kwargs): return view_func(*args, **kwargs) wrapped_view.disable_static_generator = True return wraps(view_func, assigned=available_attrs(view_func))(wrapped_view)
5ad9dff33b1340d909467dcada90a43c1cc7618d
9,246
import tqdm def create_lengths(text): """Create a data frame of the sentence lengths from a text""" lengths = [] for sentence in tqdm(text): lengths.append(len(sentence)) return pd.DataFrame(lengths, columns=['counts'])
6a239563b19d1d2b2f72ae3d425e94f7b28a0d62
9,247
def basic_auth_string(key, value): """Returns basic auth string from key and value""" key_pass = b":".join((_to_bytes(key), _to_bytes(value))) token = b64encode(key_pass).decode() return f"Basic {token}"
3de47ff05251792d0f5e782af4d7c30d83dfd860
9,248
from .reader import get_file_hashes import traceback import traceback import os import argparse import sys import yaml def main(argv = None): """ Main function for the ``amplimap`` executable. This function: - parses command line arguments - reads, merges and checks each of these config files, if they exist: + ``config_default.yaml`` in the amplimap package + ``/etc/amplimap/VERSION/config.yaml`` (where VERSION is the amplimap version) + ``$AMPLIMAP_CONFIG`` + ``config.yaml`` in the working directory - checks for an existing analysis directory (and compares the amplimap version used to create it) - adds its own parent directory to the config file (to be inserted back into the python path inside Snakemake) - creates an analysis directory - writes ``config_used.yaml`` to the new analysis directory - creates a ``cluster_log`` directory (if running in cluster mode) - launches Snakemake, using the amplimap Snakefile, ``config_used.yaml`` as the config file and cluster parameters as specified in the command line arguments and config. """ try: basedir = os.path.dirname(os.path.realpath(__file__)) # parse the arguments, which will be available as properties of args (e.g. args.probe) parser = argparse.ArgumentParser( description = "amplimap v{} - amplicon mapping and analysis pipeline".format(__version__), formatter_class = argparse.ArgumentDefaultsHelpFormatter) # specify parameters parser.add_argument("-v", "--version", help="print version and exit", action="store_true") parser.add_argument("--basedir", help="print basedir and exit", action="store_true") parser.add_argument("--print-config", help="print configuration (including global and local settings) and exit", action="store_true") parser.add_argument("-r", "--run", help="actually run (will perform a dry run otherwise)", action="store_true") parser.add_argument("--resume", help="resume analysis in existing analysis directory", action="store_true") parser.add_argument("--cluster", help="specify a cluster type defined in your configuration files to run jobs on cluster.") parser.add_argument("--skip-file-check", help="skip check for changes in input files when resuming (not recommended)", action="store_true") parser.add_argument("--unlock", help="unlock working directory (Snakemake parameter)", action="store_true") parser.add_argument("--working-directory", help="path to the working directory", default=".") parser.add_argument("--ncores", help="number of local cores to run in parallel (only applies if --cluster is NOT set)", default=1, type=int) parser.add_argument("--njobs", help="number of cluster jobs to run in parallel (only applies if --cluster is set)", default=10, type=int) parser.add_argument("--latency-wait", help="How long to wait for output files to appear after job completes. Increase this if you get errors about missing output files. (Snakemake parameter)", default=5, type=int) parser.add_argument("--snakemake-args", help="For debugging: Extra arguments to the snakemake function (comma-separated key=value pairs - eg. 'printreason=True')") parser.add_argument("--debug", help="debug mode", action="store_true") # parser.add_argument("--debug-dag", help="debug DAG", action="store_true") parser.add_argument("TARGET", help="targets to run (eg. pileups variants coverages)", nargs="*") if argv is None: args = parser.parse_args() else: args = parser.parse_args(argv) if args.debug: print('Incoming argv: {}'.format(str(argv))) print('Targets: {}'.format(str(args.TARGET))) if args.version: print('{} {}'.format(__title__, __version__)) return 0 if args.basedir: print(basedir) return 0 # read base config to know which parameters etc are allowed default_config = read_config_file(args.print_config, os.path.join(basedir, 'config_default.yaml')) if not default_config: raise Exception('config_default.yaml file missing') # add undocumented config keys to make sure these don't raise an error for key in ['include_gbrowse_links', 'include_exon_distance', 'include_score']: if not key in default_config['annotate']: default_config['annotate'][key] = False # override with data from /etc/amplimap, if exists etc_config = read_config_file(args.print_config, '/etc/amplimap/%s/config.yaml' % __version__) # override with data from $AMPLIMAP_CONFIG, if exists env_config = {} try: env_config = read_config_file(args.print_config, os.environ['AMPLIMAP_CONFIG']) except KeyError: pass # read local config local_config = read_config_file(args.print_config, os.path.join(args.working_directory, 'config.yaml')) if not local_config: if args.print_config: sys.stderr.write('No local config.yaml found, using default configuration.\n') # merge configs together config = default_config for my_config in [etc_config, env_config, local_config]: # check that all settings actually exist differences = check_config_keys(default_config, my_config) # allow custom tools allowed_tools = set(default_config['tools'].keys()) if 'tools' in my_config: allowed_tools.update(my_config['tools'].keys()) differences = [ d for d in differences if not (len(d) == 2 and d[0] in ['modules'] and d[1] in allowed_tools) and d[0] != 'tools' ] if len(differences) > 0: sys.stderr.write('Your configuration file(s) contain unknown or invalid settings:\n') for diff in differences: sys.stderr.write('\t- {}\n'.format(':'.join(diff))) sys.stderr.write('Please check their spelling and location and try again.\n') return 1 snakemake.utils.update_config(config, my_config) # check basic config aligners = ['naive', 'bwa', 'bowtie2', 'star'] # allowed values for the aligner # add custom tools for tool_name, tool_config in config['tools'].items(): if 'align_command' in tool_config: aligners.append(tool_name) if not config['align']['aligner'] in aligners: raise Exception('align: aligner must be one of {}'.format(','.join(aligners))) callers = ['gatk', 'platypus', 'wecall'] # allowed values for the variant caller # add custom tools for tool_name, tool_config in config['tools'].items(): if 'call_command' in tool_config: callers.append(tool_name) if not config['variants']['caller'] in callers: raise Exception('variants: caller must be one of {}'.format(','.join(callers))) if config['parse_reads']['quality_trim_threshold'] != False: if not isinstance(config['parse_reads']['quality_trim_threshold'], float): raise Exception('quality_trim_threshold must be a decimal number') if not config['parse_reads']['quality_trim_threshold'] > 0 and config['parse_reads']['quality_trim_threshold'] < 1: raise Exception('quality_trim_threshold must be either "false" or above 0 and below 1') if not (config['general']['umi_min_consensus_percentage'] >= 0 and config['general']['umi_min_consensus_percentage'] <= 100): raise Exception('umi_min_consensus_percentage must be between 0 and 100') if not (config['parse_reads']['min_percentage_good'] >= 0 and config['parse_reads']['min_percentage_good'] <= 100): raise Exception('min_percentage_good must be between 0 and 100') if not (config['parse_reads']['umi_one'] >= 0 and config['parse_reads']['umi_two'] >= 0): raise Exception('umi_one and umi_two must be 0 or greater') if config['annotate']['annovar']['protocols'].count(',') != config['annotate']['annovar']['operations'].count(','): raise Exception('The number of comma-separated protocols and operations under `annotate: annovar:` must match') # if we don't have UMIs (either on reads or as bam tag) we definitely have to ignore them # this makes it possible to "auto-detect" whether we need to ignore_umis or not if (config['parse_reads']['umi_one'] + config['parse_reads']['umi_two'] == 0) and config['general']['umi_tag_name'] == "": config['general']['ignore_umis'] = True # check we have proper paths if not config['general']['genome_name'] in config['paths'] or not isinstance(config['paths'][config['general']['genome_name']], dict): raise Exception('Could not find list of paths for genome_name: "{}". Please add the paths to your default configuration or your local config.yaml file.'.format(config['general']['genome_name'])) for name, path in config['paths'][config['general']['genome_name']].items(): if path.startswith('/PATH/TO/'): raise Exception('Path for {} reference is set to {}, which is probably incorrect. Please set the correct path in your default configuration or your local config.yaml file, or leave it empty.'.format( name, path)) if args.print_config: yaml.dump(config, sys.stdout, default_flow_style=False) return 0 # do some basic checks assert os.path.isdir(args.working_directory), 'working directory does not exist' # check for one (and only one) input directory input_directory = None input_directory_count = 0 input_directories = ['reads_in', 'unmapped_bams_in', 'mapped_bams_in', 'bams_in'] for input_name in input_directories: if os.path.isdir(os.path.join(args.working_directory, input_name)): input_directory_count += 1 input_directory = input_name if input_directory_count < 1: raise Exception( 'An input directory (one of: %s) needs to exist. Please see the documentation for the appropriate directory to use and place your sequencing data there.' % (', '.join(input_directories)) ) elif input_directory_count > 1: raise Exception( 'More than one of the possible input directories (%s) exists. Please only provide a single input directory with all your data.' % (', '.join(input_directories)) ) if input_directory in ['unmapped_bams_in', 'mapped_bams_in']: if not config['general']['use_raw_reads']: raise Exception( 'general: use_raw_reads needs to be set to true when using %s for input.' % (input_directory) ) # check input files sys.stderr.write('Checking input files...\n') if os.path.isfile(os.path.join(args.working_directory, 'probes.csv')): read_new_probe_design(os.path.join(args.working_directory, 'probes.csv'), reference_type = 'genome') if os.path.isfile(os.path.join(args.working_directory, 'probes_mipgen.csv')): process_probe_design(read_and_convert_mipgen_probes(os.path.join(args.working_directory, 'probes_mipgen.csv'))) if os.path.isfile(os.path.join(args.working_directory, 'picked_mips.txt')): process_probe_design(read_and_convert_mipgen_probes(os.path.join(args.working_directory, 'picked_mips.txt'), sep='\t')) if os.path.isfile(os.path.join(args.working_directory, 'probes_heatseq.tsv')): process_probe_design(read_and_convert_heatseq_probes(os.path.join(args.working_directory, 'probes_heatseq.tsv'))) if os.path.isfile(os.path.join(args.working_directory, 'targets.csv')): # note: this will fail on overlapping targets read_targets(os.path.join(args.working_directory, 'targets.csv'), check_overlaps=True, reference_type = 'genome', file_type = 'csv') if os.path.isfile(os.path.join(args.working_directory, 'targets.bed')): # note: this will fail on overlapping targets read_targets(os.path.join(args.working_directory, 'targets.bed'), check_overlaps=True, reference_type = 'genome', file_type = 'bed') if os.path.isfile(os.path.join(args.working_directory, 'snps.txt')): read_snps_txt(os.path.join(args.working_directory, 'snps.txt'), reference_type = 'genome') # this will be used to (very hackily) make sure amplimap can be imported as amplimap.xxx # by adding the parent dir to the top of sys.path in the Snakefile config['general']['amplimap_parent_dir'] = os.path.dirname(basedir) # check if analysis dir exists already analysis_dir = os.path.join(args.working_directory, 'analysis') configfile = os.path.join(analysis_dir, 'config_used.yaml') used_versions_path = os.path.join(analysis_dir, 'versions.yaml') # the analysis dir may exist just because we did a dry run, but once the versions exist we actually executed snakemake! if os.path.exists(analysis_dir) and os.path.exists(used_versions_path): if not args.resume: raise Exception('An analysis directory already exists. Please rename it or set --resume to reuse it and possibly overwrite existing files.') else: # check version if os.path.isfile(used_versions_path): with open(used_versions_path, 'r') as used_versions_file: used_versions = yaml.safe_load(used_versions_file.read()) if used_versions['_amplimap'] != str(__version__): sys.stderr.write('This analysis was performed with {} {} but this is {} {}!\n\n'.format(__title__, used_versions['_amplimap'], __title__, __version__)) sys.stderr.write('Please use the correct version of {} or start a new analysis.\n'.format(__title__)) return 1 else: sys.stderr.write('{} version checked.\n'.format(__title__)) # check used config file if os.path.isfile(configfile): with open(configfile, 'r') as used_config_file: used_config = yaml.safe_load(used_config_file.read()) differences = compare_config_dicts(config, used_config) if len(differences) > 0: sys.stderr.write('config_used.yaml in analysis directory differs from current config.yaml in working directory! Please rename or delete the old analysis directory to restart analysis with the new configuration.\n') sys.stderr.write('Different settings:\n') for diff in differences: sys.stderr.write('\t- {}\n'.format(':'.join(diff))) return 1 else: sys.stderr.write('Config files checked.\n') # check hashes of input files if not args.skip_file_check: used_file_hashes_path = os.path.join(analysis_dir, 'file_hashes.yaml') if os.path.isfile(used_file_hashes_path): with open(used_file_hashes_path, 'r') as used_file_hashes_file: used_file_hashes = yaml.safe_load(used_file_hashes_file.read()) for fn, current_hash in get_file_hashes(args.working_directory).items(): if used_file_hashes[fn] != current_hash: sys.stderr.write('File {} seems to have changed since the last run!\n\n'.format(fn)) sys.stderr.write('To ensure consistent results, you should rename or delete the old analysis directory and start a new analysis.\n') sys.stderr.write('To ignore this error, add the --skip-file-check parameter.\n') return 1 sys.stderr.write('Input files checked.\n') else: sys.stderr.write('Warning: Skipping input file check.\n') # ensure analysis dir exists now os.makedirs(analysis_dir, exist_ok=True) # write config to analysis directory, and then use that for snakemake with open(configfile, 'w') as f: yaml.dump(config, f, default_flow_style=False) # set up cluster commands cluster_command_nosync = None cluster_command_sync = None if args.cluster: if args.cluster in config['clusters'] and isinstance(config['clusters'][args.cluster], dict): if 'command_sync' in config['clusters'][args.cluster]: cluster_command_sync = config['clusters'][args.cluster]['command_sync'] elif 'command_nosync' in config['clusters'][args.cluster]: cluster_command_nosync = config['clusters'][args.cluster]['command_nosync'] else: raise Exception('Invalid cluster configuration -- need either command_sync or command_nosync for: {}'.format(args.cluster)) else: raise Exception('Cluster type not found in config: {}'.format(args.cluster)) sys.stderr.write('Running in cluster mode {} with {} parallel jobs\n'.format(args.cluster, args.njobs)) sys.stderr.write('cluster_command_nosync={}\n'.format(cluster_command_nosync)) sys.stderr.write('cluster_command_sync={}\n'.format(cluster_command_sync)) # make sure cluster log directory exists (this assumed the cluster command is using this as a parameter) cluster_logs = os.path.join(args.working_directory, 'cluster_log') os.makedirs(cluster_logs, exist_ok=True) sys.stderr.write('Will write cluster logs to: {}\n'.format(cluster_logs)) else: sys.stderr.write('Running locally with {} cores\n'.format(args.ncores)) extra_snakemake_args = {} if args.snakemake_args: extra_snakemake_args = { kv[0]: (True if kv[1].lower() == 'true' else False if kv[1].lower() == 'false' else kv[1]) for kv in [ x.split('=') for x in args.snakemake_args.split(',') ] } sys.stderr.write('Using extra Snakemake arguments: {}\n'.format(str(extra_snakemake_args))) success = snakemake.snakemake( snakefile = os.path.join(basedir, "Snakefile"), configfile = configfile, cores = args.ncores, # ignored if cluster nodes = args.njobs, # ignored if not cluster workdir = args.working_directory, targets = args.TARGET, dryrun = not args.run, cluster = cluster_command_nosync, cluster_sync = cluster_command_sync, jobname = "{}.{{rulename}}.{{jobid}}.sh".format(__title__), unlock = args.unlock, latency_wait = args.latency_wait, **extra_snakemake_args ) sys.stderr.write('\n===============================================\n\n') if success: if args.unlock: sys.stderr.write('Unlocked working directory. Run without --unlock to start.\n') elif not args.run: sys.stderr.write('{} {} dry run successful. Set --run to run!\n'.format(__title__, __version__)) else: sys.stderr.write('{} {} finished!\n'.format(__title__, __version__)) return 0 else: if args.cluster: sys.stderr.write('{} {} failed! Please see output above or the cluster log files for details.\n'.format(__title__, __version__)) sys.stderr.write('\nFor details on how to find the correct cluster log file for a failed job, see: https://amplimap.readthedocs.io/en/latest/usage.html#cluster-log-files\n') sys.stderr.write('You can also try to run amplimap without the cluster parameter to see the error message.\n') else: sys.stderr.write('{} {} failed! Please see output above for details.\n'.format(__title__, __version__)) return 1 except AmplimapReaderException as e: if args.debug: traceback.print_exc() sys.stderr.write(str(e)) sys.stderr.write('{} {} failed!\n'.format(__title__, __version__)) return 2 except Exception as e: if args.debug: traceback.print_exc() sys.stderr.write('\nERROR: {}\n\n'.format(e)) sys.stderr.write('{} {} failed!\n'.format(__title__, __version__)) return 1
ce59067d09976d9e16014acf395151e1ac42aa00
9,249
import functools def measureit(_func=None, *, output: Output = None, number: int = 1): """ Measure the energy consumption of monitored devices during the execution of the decorated function (if multiple runs it will measure the mean energy) :param output: output instance that will receive the power consummation data :param number: number of iteration in the loop in case you need multiple runs or the code is too fast to be measured """ def decorator_measure_energy(func): @functools.wraps(func) def wrapper_measure(*args, **kwargs): sensor = Measurement(func.__name__, output) sensor.begin() for i in range(number): val = func(*args, **kwargs) sensor.end() sensor._results = sensor._results / number sensor.export() return val return wrapper_measure if _func is None: # to ensure the working system when you call it with parameters or without parameters return decorator_measure_energy else: return decorator_measure_energy(_func)
bb28c7423f5d2a479de052554f68d6c99494180d
9,250
def csv_template(n_types, n_type_covariates, initialize_coeffs=True): """Creates a template for the parameter specification. Parameters ---------- n_types : int, optional Number of types in the model. Default is one. n_type_covariates : int, optional Number of covariates to predict type probabilities. Can be two or three. initialize_coeffs : bool, optional Whether coefficients are initialized with values or not. Default is ``True``. """ template = _base_template() if n_types > 1: to_concat = [ template, _type_prob_template(n_types, n_type_covariates), _type_shift_template(n_types), ] template = pd.concat(to_concat, axis=0, sort=False) if initialize_coeffs is False: template["value"] = np.nan return template
d211373b1939242600b0c5c15a30b16f58eab229
9,251
import time import os import scipy def get_surround(ppath, recordings, istate, win, signal_type, recalc_highres=False, tstart=0, tend=-1, ma_thr=20, ma_state=3, flatten_tnrem=False, nsr_seg=2, perc_overlap=0.95, null=False, null_win=[0.5,0.5], p_iso=0, pcluster=0, clus_event='waves', psave=False): """ Collect raw signal surrounding events @Params ppath - base folder recordings - list of recordings istate - brain state(s) to analyze win: time window (s) to collect data relative to the event signal_type: specifies the type of data to collect 'EEG', 'EEG2' --> raw hippocampal or prefrontal EEG 'SP', 'SP2' --> hippocampal or prefrontal SP 'SP_NORM', 'SP2_NORM' --> norm. hippocampal or prefrontal SP 'SP_CALC', 'SP2_CALC' --> calculate each SP using surrounding EEG 'SP_CALC_NORM', 'SP2_CALC_NORM' --> normalize calculated SP by whole SP mean 'LFP' --> processed LFP signal recalc_highres - if True, recalculate high-resolution spectrogram from EEG, using $nsr_seg and $perc_overlap params tstart, tend - time (s) into recording to start and stop collecting data ma_thr, ma_state - max duration and brain state for microarousals flatten_tnrem - brain state for transition sleep nsr_seg, perc_overlap - set FFT bin size (s) and overlap (%) for spectrogram calculation null - if True, also collect data surrounding randomized control points in $istate null_win - if > 0, qualifying "null" points must be free of P-waves and laser pulses in surrounding $null_win interval (s) if = 0, "null" points are randomly selected from all state indices p_iso, pcluster, clus_event - see SOMETHING ELSE psave - optional string specifying a filename to save the data (if False, data is not saved) @Returns p_signal - dictionary with brain states as keys, and sub-dictionaries as values Sub-dictionaries have mouse recordings as keys, with lists of 2D or 3D signals as values Signals (SPs, EEGs, or LFPs) represent the time window ($win s) surrounding each P-wave null_signal - dictionary structured as above, but containing signals surrounding each randomly selected control point data.shape - tuple with shape of the data from one trial """ START = time.perf_counter() # clean data inputs if not isinstance(recordings, list): recordings = [recordings] if not isinstance(istate, list): istate = [istate] if len(istate) == 0: istate = ['total'] brstate = 'total' p_signal = {s:{rec:[] for rec in recordings} for s in istate} # signal surrounding P-waves null_signal = {s:{rec:[] for rec in recordings} for s in istate} # signal surrounding randomized time points for rec in recordings: print('Getting P-waves for ' + rec + ' ...') p_signal = {s:[] for s in istate} # signal surrounding P-waves null_signal = {s:[] for s in istate} # signal surrounding randomized time points # load sampling rate sr = sleepy.get_snr(ppath, rec) nbin = int(np.round(sr) * 2.5) dt = (1.0 / sr) * nbin iwin1, iwin2 = get_iwins(win, sr) # load EEG and EEG2 EEG = so.loadmat(os.path.join(ppath, rec, 'EEG.mat'), squeeze_me=True)['EEG'] if os.path.exists(os.path.join(ppath, rec, 'EEG2.mat')): EEG2 = so.loadmat(os.path.join(ppath, rec, 'EEG2.mat'), squeeze_me=True)['EEG2'] # adjust Intan idx to properly translate to SP idx spi_adjust = np.linspace(-sr, sr, len(EEG)) # load or calculate entire high-res spectrogram # SP calculated using EEG2 if ('SP2' in signal_type) and (signal_type != 'SP2_CALC'): SP, f, t, sp_dt, sp_nbin, _ = AS.highres_spectrogram(ppath, rec, nsr_seg=nsr_seg, perc_overlap=perc_overlap, recalc_highres=recalc_highres, mode='EEG2') #sp_nbin = len(EEG) / SP.shape[1] sp_win1 = int(round(iwin1/sp_nbin)) sp_win2 = int(round(iwin2/sp_nbin)) # SP calculated using EEG elif ('SP' in signal_type) and (signal_type != 'SP_CALC'): SP, f, t, sp_dt, sp_nbin, _ = AS.highres_spectrogram(ppath, rec, nsr_seg=nsr_seg, perc_overlap=perc_overlap, recalc_highres=recalc_highres, mode='EEG') sp_win1 = int(round(iwin1/sp_nbin)) sp_win2 = int(round(iwin2/sp_nbin)) # calculate SP mean if '_NORM' in signal_type: SP_mean = SP.mean(axis=1) SP_norm = np.divide(SP, np.repeat([SP_mean], SP.shape[1], axis=0).T) # normalize entire spectrogram # load and adjust brain state annotation M, _ = sleepy.load_stateidx(ppath, rec) M = AS.adjust_brainstate(M, dt, ma_thr=ma_thr, ma_state=ma_state, flatten_tnrem=flatten_tnrem) # load LFP and P-wave indices LFP, p_idx = load_pwaves(ppath, rec) # isolate single or clustered P-waves if p_iso and pcluster: print('ERROR: cannot accept both p_iso and pcluster arguments') return elif p_iso: p_idx = get_p_iso(p_idx, sr, win=p_iso) elif pcluster: p_idx = get_pclusters(p_idx, sr, win=pcluster, return_event=clus_event) # define start and end points of analysis istart = int(np.round(tstart*sr)) if tend == -1: iend = len(EEG) - 1 else: iend = int(np.round(tend*sr)) for pi in p_idx: if pi >= iwin1 and pi + iwin2 < len(EEG) and istart <= pi <= iend: if istate[0] != 'total': brstate = int(M[int(pi/nbin)]) # get data of desired signal type if signal_type == 'EEG': data = EEG[pi-iwin1 : pi+iwin2] elif signal_type == 'EEG2': data = EEG2[pi-iwin1 : pi+iwin2] # calculate SP from EEG or EEG2 elif 'CALC' in signal_type: if 'SP2' in signal_type: tmp = EEG2[pi-iwin1 : pi+iwin2] elif 'SP' in signal_type: tmp = EEG[pi-iwin1 : pi+iwin2] f, t, data = scipy.signal.spectrogram(tmp, fs=sr, window='hanning', nperseg=int(nsr_seg*sr), noverlap=int(nsr_seg*sr*perc_overlap)) # normalize calculated SP based on entire recording if 'NORM' in signal_type: data = np.divide(data, np.repeat([SP_mean], data.shape[1], axis=0).T) # if not calculating, get SP or SP_NORM from whole recording calculation elif 'SP' in signal_type: spi = int(round((pi + spi_adjust[pi])/sp_nbin)) if 'NORM' in signal_type: data = SP_norm[:, spi-sp_win1 : spi+sp_win2] else: data = SP[:, spi-sp_win1 : spi+sp_win2] elif signal_type == 'LFP': data = LFP[pi-iwin1 : pi+iwin2] else: print(signal_type + ' IS AN INVALID SIGNAL TYPE') return # collect data in relevant dictionary if brstate in istate: p_signal[brstate].append(data) # collect signals surrounding random control time points if null: null_iwin1, null_iwin2 = get_iwins(null_win, sr) # sample "null" REM epochs with no P-waves/laser pulses if null_win != 0: # find all points that don't qualify as "null" not_null_idx = np.zeros((10000000)) for i, pi in enumerate(p_idx): p_win = np.arange(pi-null_iwin1, pi+null_iwin2) not_null_idx[i*len(p_win) : i*len(p_win)+len(p_win)] = p_win # get rid of trailing zeros (computational efficiency) not_null_idx = np.trim_zeros(not_null_idx, 'b') for s in istate: if istate[0] != 'total': # get array of all possible indices in state s sseq = sleepy.get_sequences(np.where(M==s)[0]) sseq_idx = [np.arange(seq[0]*nbin, seq[-1]*nbin+nbin) for seq in sseq] sseq_idx = np.array((list(chain.from_iterable(sseq_idx)))) sseq_idx = [sidx for sidx in sseq_idx if sidx > iwin1 and sidx < len(EEG)-iwin2 and istart < sidx < iend] else: sseq_idx = np.arange(iwin1, len(EEG)-iwin2) # keep only state indices that are not next to a P-wave/laser pulse if null_win != 0: sseq_idx = np.setdiff1d(sseq_idx, not_null_idx) # randomly select from all state indices else: sseq_idx = np.array((sseq_idx)) np.random.seed(0) # select number of random indices matching the number of P-waves r_idx = np.random.randint(low=0, high=len(sseq_idx), size=len(p_signal[s])) null_idx = sseq_idx[r_idx] for ni in null_idx: # get data of desired signal type if signal_type == 'EEG': data = EEG[ni-iwin1 : ni+iwin2] elif signal_type == 'EEG2': data = EEG2[ni-iwin1 : ni+iwin2] # calculate SP from EEG or EEG2 elif 'CALC' in signal_type: if 'SP2' in signal_type: tmp = EEG2[ni-iwin1 : ni+iwin2] elif 'SP' in signal_type: tmp = EEG[ni-iwin1 : ni+iwin2] f, t, data = scipy.signal.spectrogram(tmp, fs=sr, window='hanning', nperseg=int(nsr_seg * sr), noverlap=int(nsr_seg * sr * perc_overlap)) # normalize calculated SP based on entire recording if 'NORM' in signal_type: data = np.divide(data, np.repeat([SP_mean], data.shape[1], axis=0).T) # if not calculating, get SP or SP_NORM from whole recording calculation elif 'SP' in signal_type: spi = int(round((ni + spi_adjust[ni])/sp_nbin)) if 'NORM' in signal_type: data = SP_norm[:, spi-sp_win1 : spi+sp_win2] else: data = SP[:, spi-sp_win1 : spi+sp_win2] elif signal_type == 'LFP': data = LFP[ni-iwin1 : ni+iwin2] else: print(signal_type + ' IS AN INVALID SIGNAL TYPE') return # collect data in null dictionary null_signal[s].append(data) # save tmp files to free up more room for computation for s in istate: so.savemat(f'TMP_{rec}_pwaves_{s}.mat', {'data':p_signal[s]}) so.savemat(f'TMP_{rec}_null_{s}.mat', {'data':null_signal[s]}) if psave: print('\n Assembling data dictionaries and saving .mat files ...\n') else: print('\n Assembling data dictionaries ...\n') # collect data from all recordings for each state from tmp files p_signal = {s:{rec:0 for rec in recordings} for s in istate} null_signal = {s:{rec:0 for rec in recordings} for s in istate} # assemble final data dictionaries for s in istate: for rec in recordings: p_signal[s][rec] = so.loadmat(f'TMP_{rec}_pwaves_{s}.mat')['data'] null_signal[s][rec] = so.loadmat(f'TMP_{rec}_null_{s}.mat')['data'] # remove temporary files for rec in recordings: os.remove(f'TMP_{rec}_pwaves_{s}.mat') os.remove(f'TMP_{rec}_null_{s}.mat') # save files if psave: for s in istate: filename = psave if isinstance(psave, str) else f'Surround_{signal_type}' so.savemat(os.path.join(ppath, f'{filename}_pwaves_{s}.mat'), p_signal[s]) so.savemat(os.path.join(ppath, f'{filename}_null_{s}.mat'), null_signal[s]) so.savemat(os.path.join(ppath, f'{filename}_data_shape.mat'), {'data_shape':data.shape}) END = time.perf_counter() print(f'COMPUTING TIME --> {END-START:0.2f} seconds ({len(recordings)} recordings, {len(istate)} brainstates, signal type = {signal_type})') return p_signal, null_signal, data.shape
adee4efc854002570f6a6151e3f86967b94a1bf5
9,252
import os def load_quantized_bert_base(batch_size=1, seq_len=384): """ Load the quantized bert-base model from TLCBench, possibly downloading it from github and caching the converted int8 QNN module to disk. In addition to returing the relay module and its parameters, it also returns input name and shape information, which can be used at the deployment time as follows: ``` mod, params, input_info = load_quantized_bert_base() ... runtime = tvm.contrib.graph_executor.GraphModule(lib["default"](dev)) for name, shape in input_info: arr = np.random.uniform(1, 10, size=shape).astype("int64") runtime.set_input(name, arr) runtime.run() ``` """ url = "https://github.com/tlc-pack/TLCBench/raw/main/models/bert-base-qat.onnx" log.info("Downloading quantized bert-base model.") onnx_path = download_testdata(url, "bert-base-qat.onnx", module="tlcbench") data_dir = os.path.dirname(onnx_path) json_path = os.path.join(data_dir, "bert_base_int8_b%d_s%d.json" % (batch_size, seq_len)) params_path = os.path.join(data_dir, "bert_base_int8_b%d_s%d.params" % (batch_size, seq_len)) # Input names and order encoded in the ONNX model input_info = [ ("input_ids", (batch_size, seq_len)), ("segment_ids", (batch_size, seq_len)), ("input_mask", (batch_size, seq_len)), ] if not os.path.exists(json_path) or not os.path.exists(params_path): convert_to_qnn(onnx_path, json_path, params_path, input_info) def deserialize(): try: return deserialize_relay(json_path, params_path) except TVMError: # A serialized Relay json file may become invalid after TVM bump # Update the serialized model and try loading again convert_to_qnn(onnx_path, json_path, params_path, input_info) return deserialize_relay(json_path, params_path) mod, params = deserialize() return mod, params, input_info
3f7a533f7424bd4727c963c6c8dc8c2c46b2d9b0
9,253
from . import setup as jssetup def setup(app): """A temporary setup function so that we can use it for backwards compatability. This should be removed after a deprecation cycle. """ # To avoid circular imports we'll lazily import js.logger.warning( ( "`jupyter-sphinx` was initialized with the " "`jupyter_sphinx.execute` sub-module. Replace this with " "`jupyter_sphinx`. Initializing with " "`jupyter_sphinx.execute` will be removed in " "version 0.3" ) ) out = jssetup(app) return out
16a64701d3b77a1d58126df458d4a3016be1e366
9,254
import torch def hinge_loss(logit, target, margin, reduce='sum'): """ Args: logit (torch.Tensor): (N, C, d_1, d_2, ..., d_K) target (torch.Tensor): (N, d_1, d_2, ..., d_K) margin (float): """ target = target.unsqueeze(1) tgt_logit = torch.gather(logit, dim=1, index=target) loss = logit - tgt_logit + margin loss = torch.masked_fill(loss, loss < 0, 0) loss = torch.scatter(loss, dim=1, index=target, value=0) reduce_fn = REDUCE_FN_MAPPINGS[reduce] return reduce_fn(loss)
0eb499d4164b37dee657ad0e0a5c1480324434bc
9,255
import subprocess def setTimeSync(state): """ Set the state of host/guest time synchronization using vmware-toolbox-cmd. Returns None on success and an error message on failure. """ # Translate the boolean to a string for vmware-toolbox-cmd if state: setStr = 'enable' else: setStr = 'disable' try: # Run the command to set the status of host/guest time sync retval = subprocess.Popen(['vmware-toolbox-cmd', 'timesync', setStr]).wait() except OSError as e: msg = "Unable to execute the 'vmware-toolbox-cmd' command: %s" % str(e) log.exception(msg) return msg if retval != 0: msg = 'Setting the state of host/guest time synchronization failed, '\ 'exit code: %d' % retval log.info(msg) return msg else: log.info("Successfully set status of host/guest time synchronization "\ "to: '%s'", setStr) return None
74369a46b357175615b28c4b9bcb57f3e2d501fe
9,256
import threading def do_cleanup(ips, args): """ :param ips: :param args: :return: None """ def _cleanup_single_node(ip): def _rpc(cmd): return rpc(ip, cmd, args.user, args.password, args.key, suppress_output=not args.verbose) # TODO: (Make this more targeted) # Stop services. _rpc('sudo service cassandra stop') _rpc('sudo service dsc stop') _rpc('sudo service dse stop') _rpc('sudo service datastax-agent stop') _rpc('sudo /etc/init.d/cassandra stop') _rpc('sudo /etc/init.d/dsc stop') _rpc('sudo /etc/init.d/dse stop') _rpc('sudo /etc/init.d/datastax-agent stop') # Uninstall packages. _rpc('sudo yum remove -y \'*cassandra*\' \'*dsc*\' \'*dse*\' \'*datastax*\'') # Cleanup install folders. _rpc('sudo rm -rf /var/lib/cassandra/*') _rpc('sudo rm -rf /var/log/{cassandra,hadoop,hive,pig}/*') _rpc('sudo rm -rf /etc/{cassandra,dsc,dse}/*') _rpc('sudo rm -rf /usr/share/{dse,dse-demos}') _rpc('sudo rm -rf /etc/default/{cassandra,dsc,dse}') # Start a progress bar if not in verbose mode. if not args.verbose: e = threading.Event() bar_thread = threading.Thread(target=progress_bar, args=('Performing pre-install cleanup.', e)) bar_thread.setDaemon(True) bar_thread.start() # Spawn threads to run instructions on all nodes at once threads = [] for pub, priv in ips: t = threading.Thread(target=_cleanup_single_node, args=(pub,)) t.setDaemon(True) t.start() threads.append(t) # Wait for all threads to complete for t in threads: t.join() # Terminate the progress bar if not in verbose mode. if not args.verbose: e.set() bar_thread.join()
7623608a3a73a2196c9b2636a066a2aa85c6b659
9,257
import argparse def getargopts(): """Parse command line arguments.""" opts = argparse.ArgumentParser() opts.add_argument('--port', type=int, help="Port to listen to (default 8888)", default=8888) opts.add_argument('srcbase', help="Base source directory.") opts.add_argument('targetbase', help="Base target directory.") args = opts.parse_args() return args.srcbase, args.targetbase, args.port
ef565edbde535c1078e63b50ff5ba18a17cf4efb
9,258
def roc(model, image, mask, ignore=None, sky=None, n_mask=1, seed=1, thresholds=np.linspace(0.001, 0.999, 500), dilate=False, rad=1): """ evaluate model on test set with the ROC curve :param model: deepCR object :param image: np.ndarray((N, W, H)) image array :param mask: np.ndarray((N, W, H)) CR mask array :param ignore: np.ndarray((N, W, H)) bad pixel array incl. saturation, etc. :param thresholds: np.ndarray(N) FPR grid on which to evaluate ROC curves :return: np.ndarray(N), np.ndarray(N): TPR and FPR """ kernel = None if dilate: kernel = disk(rad) if type(image) == np.ndarray and len(image.shape) == 3: data = dataset(image, mask, ignore) elif type(image[0]) == str: data = DatasetSim(image, mask, sky=sky, n_mask=n_mask, seed=seed) else: raise TypeError('Input must be numpy data arrays or list of file paths!') (tpr, fpr), (tpr_dilate, fpr_dilate) = _roc(model, data, thresholds=thresholds, dilate=kernel) if dilate: return (tpr, fpr), (tpr_dilate, fpr_dilate) else: return tpr, fpr
741381b707e4c732202c0cfdac512b13483f533f
9,259
def parse_testconfig(conffile): """Parses the config file for the whole testsuite.""" repo_path, drop_caches, tests_dir, testlog_dir = '', '', '', '' basebranch, baserev, repo_prof_path, repo_gprof_path = '', '', None, None fileopen = open(conffile, 'r') for line in fileopen: line = line.split('#')[0] # Discard comments if line == '' or line == '\n': continue # Discard lines with comments only and empty lines opt, args = line.split(' ', 1) # Get arguments if opt == 'MOSES_REPO_PATH:': repo_path = args.replace('\n', '') elif opt == 'DROP_CACHES_COMM:': drop_caches = args.replace('\n', '') elif opt == 'TEST_DIR:': tests_dir = args.replace('\n', '') elif opt == 'TEST_LOG_DIR:': testlog_dir = args.replace('\n', '') elif opt == 'BASEBRANCH:': basebranch = args.replace('\n', '') elif opt == 'BASEREV:': baserev = args.replace('\n', '') elif opt == 'MOSES_PROFILER_REPO:': # Optional repo_prof_path = args.replace('\n', '') elif opt == 'MOSES_GOOGLE_PROFILER_REPO:': # Optional repo_gprof_path = args.replace('\n', '') else: raise ValueError('Unrecognized option ' + opt) config = Configuration(repo_path, drop_caches, tests_dir, testlog_dir,\ basebranch, baserev, repo_prof_path, repo_gprof_path) fileopen.close() return config
a01e30a0355eac229018c7736e7d9903f59402ed
9,260
def get_filtered_df(df, vocab_file): """ Return a data frame with only the words present in the vocab file. """ if vocab_file: vocab = open(vocab_file).readlines() vocab = [v.strip() for v in vocab] # Get the set of words. words = pd.Series(df.word.values.ravel()).unique() set_words = set(words) # Find the words common to data frame and vocab common_set_words = set_words & set(vocab) # Filter the dataframe df_filtered = df[df.word.isin(common_set_words)] return df_filtered else: return df
7fbfcfd92adc2b55ad3024b6e31ced743fa9ac50
9,261
def pkcs5_pad(data): """ Pad data using PKCS5 """ pad = KEYCZAR_AES_BLOCK_SIZE - len(data) % KEYCZAR_AES_BLOCK_SIZE data = data + pad * chr(pad).encode("utf-8") return data
c4bb6f28284fe8d5d14f8efcede6858959f1b4cc
9,262
import pickle def gatherData(data, neat, gen, hyp, fileName, savePop=False): """Collects run data, saves it to disk, and exports pickled population Args: data - (DataGatherer) - collected run data neat - (Neat) - neat algorithm container .pop - [Ind] - list of individuals in population .species - (Species) - current species gen - (ind) - current generation hyp - (dict) - algorithm hyperparameters savePop - (bool) - save current population to disk? Return: data - (DataGatherer) - updated run data """ data.gatherData(neat.pop, neat.species) if (gen % hyp['save_mod']) == 0: data = checkBest(data, hyp) data.save(gen) if savePop is True: # Get a sample pop to play with in notebooks pref = 'log/' + fileName with open(pref + '_pop.obj', 'wb') as fp: pickle.dump(neat.pop, fp) return data
56c8a01b2e07280dc17a5fa3d76331e39e112d8d
9,263
def impute_bad_concentration_fits(c_lgtc, c_lgtc_min=0.1): """Overwrite bad concentration parameter fit values.""" c_lgtc = np.where(c_lgtc < c_lgtc_min, c_lgtc_min, c_lgtc) return c_lgtc
88f85003a2030ea34cfe72de85c1061981f86957
9,264
from re import DEBUG def _build_sub_nics(all_nics): """ Aggregate all sub nics into their sub groups. I.E. All nic\.X.\.*\.Y nics go into a list where all Y's are the same. :param all_nics: All nics to consider. :type all_nics: list """ sub_nics = {} for nic in all_nics['nics']: possible_sub_nic = get_nic_sub_number.match(nic.key) if not possible_sub_nic: log("System {0} and NIC {1} not in valid format. " "Skipping.".format(nic.obj, nic.key), DEBUG) continue sub_nic_number = possible_sub_nic.group(1) if sub_nic_number in sub_nics: sub_nics[sub_nic_number].append(nic) else: sub_nics[sub_nic_number] = [nic] return sub_nics
4285205790cb9b2e7ae2b646ea34feb4e22a395a
9,265
import numpy import math def pearsonr(a0, a1): """Pearson r, product-moment correlation coefficient, of two samples. Covariance divided by product of standard deviations. https://en.wikipedia.org/wiki/Pearson_product-moment_correlation_coefficient#For_a_sample """ n = len(a0) assert n == len(a1) if n == 0: # No data, so no notion of correlation. return float('NaN') a0 = numpy.array(a0) a1 = numpy.array(a1) m0 = numpy.mean(a0) m1 = numpy.mean(a1) num = numpy.sum((a0 - m0)*(a1 - m1)) den0_sq = numpy.sum((a0 - m0)**2) den1_sq = numpy.sum((a1 - m1)**2) den = math.sqrt(den0_sq*den1_sq) if den == 0.0: # No variation in at least one column, so no notion of # correlation. return float('NaN') r = num / den # Clamp r in [-1, +1] in case of floating-point error. r = min(r, +1.0) r = max(r, -1.0) return r
64135ebc840bb1673ece1aec24f22c960f89af20
9,266
def __column(matrix, i): """Returns columns from a bidimensional Python list (a list of lists)""" return [row[i] for row in matrix]
f455245eb8bbda90f185479afc85eecfb481c70c
9,267
import math def datamask(fmri_data, mask_data): """ filter the data by a ROI mask Parameters: fmri_data : array The fMRI data. The shape of fmri_data is [nx, ny, nz]. nx, ny, nz represent the size of the fMRI data. mask_data : array The mask data. The shape of mask_data is [nx, ny, nz]. nx, ny, nz represent the size of the fMRI data. Returns ------- newfmri_data : array The new fMRI data. The shape of newfmri_data is [nx, ny, nz]. nx, ny, nz represent the size of the fMRI data. """ nx, ny, nz = fmri_data.shape newfmri_data = np.full([nx, ny, nz], np.nan) for i in range(nx): for j in range(ny): for k in range(nz): if (mask_data[i, j, k] != 0) and (math.isnan(mask_data[i, j, k]) is False): newfmri_data[i, j, k] = fmri_data[i, j, k] return newfmri_data
235c676636b5cff42fba4da539ad83ed7c4f999a
9,268
def make_global_batch_norm_tests(options): """Make a set of tests to do batch_norm_with_global_normalization.""" test_parameters = [{ "dtype": [tf.float32], "input_shape": [[1, 1, 6, 2], [3, 4, 5, 4]], "epsilon": [0.1, 0.0001], "scale_after": [True, False], }] def build_graph(parameters): """Build the global batch norm testing graph.""" input_shape = parameters["input_shape"] scale_shape = input_shape[3] scale = create_tensor_data(parameters["dtype"], scale_shape) offset = create_tensor_data(parameters["dtype"], scale_shape) mean = create_tensor_data(parameters["dtype"], scale_shape) variance = create_tensor_data(parameters["dtype"], scale_shape) x = create_tensor_data(parameters["dtype"], parameters["input_shape"]) x_norm = tf.nn.batch_norm_with_global_normalization( x, mean, variance, scale, offset, parameters["epsilon"], parameters["scale_after"]) input_tensor = tf.placeholder( dtype=parameters["dtype"], name="input", shape=parameters["input_shape"]) out = tf.add(input_tensor, x_norm) return [input_tensor], [out] def build_inputs(parameters, sess, inputs, outputs): input_value = create_tensor_data(parameters["dtype"], parameters["input_shape"]) return [input_value], sess.run( outputs, feed_dict=dict(zip(inputs, [input_value]))) make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
eb68bd9cdd09c98471939ea88b8ea60b9772ab90
9,269
import copy import os def write_stats_file(run_output_dict): """Writes a dummy PolyChord format .stats file for tests functions for processing stats files. This is written to: base_dir/file_root.stats Also returns the data in the file as a dict for comparison. Parameters ---------- run_output_dict: dict Output information to write to .stats file. Must contain file_root and base_dir. If other settings are not specified, default values are used. Returns ------- output: dict The expected output of nestcheck.process_polychord_stats(file_root, base_dir) """ mandatory_keys = ['file_root', 'base_dir'] for key in mandatory_keys: assert key in run_output_dict, key + ' not in run_output_dict' default_output = {'logZ': 0.0, 'logZerr': 0.0, 'logZs': [0.0], 'logZerrs': [0.0], 'ncluster': 1, 'nposterior': 0, 'nequals': 0, 'ndead': 0, 'nlike': 0, 'nlive': 0, 'avnlike': 0.0, 'avnlikeslice': 0.0, 'param_means': [0.0, 0.0, 0.0], 'param_mean_errs': [0.0, 0.0, 0.0]} allowed_keys = set(mandatory_keys) | set(default_output.keys()) assert set(run_output_dict.keys()).issubset(allowed_keys), ( 'Input dict contains unexpected keys: {}'.format( set(run_output_dict.keys()) - allowed_keys)) output = copy.deepcopy(run_output_dict) for key, value in default_output.items(): if key not in output: output[key] = value # Make a PolyChord format .stats file corresponding to output file_lines = [ 'Evidence estimates:', '===================', (' - The evidence Z is a log-normally distributed, with location and ' 'scale parameters mu and sigma.'), ' - We denote this as log(Z) = mu +/- sigma.', '', 'Global evidence:', '----------------', '', 'log(Z) = {0} +/- {1}'.format( output['logZ'], output['logZerr']), '', '', 'Local evidences:', '----------------', ''] for i, (lz, lzerr) in enumerate(zip(output['logZs'], output['logZerrs'])): file_lines.append('log(Z_ {0}) = {1} +/- {2}'.format( str(i + 1).rjust(2), lz, lzerr)) file_lines += [ '', '', 'Run-time information:', '---------------------', '', ' ncluster: 0 / 1', ' nposterior: {0}'.format(output['nposterior']), ' nequals: {0}'.format(output['nequals']), ' ndead: {0}'.format(output['ndead']), ' nlive: {0}'.format(output['nlive']), ' nlike: {0}'.format(output['nlike']), ' <nlike>: {0} ( {1} per slice )'.format( output['avnlike'], output['avnlikeslice']), '', '', 'Dim No. Mean Sigma'] for i, (mean, meanerr) in enumerate(zip(output['param_means'], output['param_mean_errs'])): file_lines.append('{0} {1} +/- {2}'.format( str(i + 1).ljust(3), mean, meanerr)) file_path = os.path.join(output['base_dir'], output['file_root'] + '.stats') with open(file_path, 'w') as stats_file: stats_file.writelines('{}\n'.format(line) for line in file_lines) return output
5a3d7b81d8315fd39d5348f9140a001b020c7584
9,270
import json def dict_serialize(seqlen_dist_dict): """ dict->str Turns {1:'a',2:'b'}->"[[1,'a'],[2,'b']]" Why? Because this format plays nice with shell script that runs xlmr_bench. Avoids curly braces and spaces that makes shell script str input unhappy. """ seqlen_dist_lst = list(seqlen_dist_dict.items()) seqlen_dist_str = json.dumps(seqlen_dist_lst) seqlen_dist_str = seqlen_dist_str.replace(" ", "") # remove spaces return seqlen_dist_str
a61c51debff922d128fbb26bbe2121063511d4c4
9,271
def pi_cdecimal(): """cdecimal""" D = C.Decimal lasts, t, s, n, na, d, da = D(0), D(3), D(3), D(1), D(0), D(0), D(24) while s != lasts: lasts = s n, na = n+na, na+8 d, da = d+da, da+32 t = (t * n) / d s += t return s
384bedfc4ca9ba2f869e062581eddc917f9a0104
9,272
def e_list(a_list: AList) -> set[E]: """Unique elements in adjacency list.""" return set(e for n in a_list for nb in a_list[n] for e in a_list[n][nb])
a59f6170b08faf94d05059f2e77c68f3290acf88
9,273
import os import logging def load_complete_state(options, cwd, subdir, skip_update): """Loads a CompleteState. This includes data from .isolate and .isolated.state files. Never reads the .isolated file. Arguments: options: Options instance generated with process_isolate_options. For either options.isolate and options.isolated, if the value is set, it is an absolute path. cwd: base directory to be used when loading the .isolate file. subdir: optional argument to only process file in the subdirectory, relative to CompleteState.root_dir. skip_update: Skip trying to load the .isolate file and processing the dependencies. It is useful when not needed, like when tracing. """ assert not options.isolate or os.path.isabs(options.isolate) assert not options.isolated or os.path.isabs(options.isolated) cwd = file_path.get_native_path_case(unicode(cwd)) if options.isolated: # Load the previous state if it was present. Namely, "foo.isolated.state". # Note: this call doesn't load the .isolate file. complete_state = CompleteState.load_files(options.isolated) else: # Constructs a dummy object that cannot be saved. Useful for temporary # commands like 'run'. There is no directory containing a .isolated file so # specify the current working directory as a valid directory. complete_state = CompleteState(None, SavedState(os.getcwd())) if not options.isolate: if not complete_state.saved_state.isolate_file: if not skip_update: raise ExecutionError('A .isolate file is required.') isolate = None else: isolate = complete_state.saved_state.isolate_filepath else: isolate = options.isolate if complete_state.saved_state.isolate_file: rel_isolate = file_path.safe_relpath( options.isolate, complete_state.saved_state.isolated_basedir) if rel_isolate != complete_state.saved_state.isolate_file: # This happens if the .isolate file was moved for example. In this case, # discard the saved state. logging.warning( '--isolated %s != %s as saved in %s. Discarding saved state', rel_isolate, complete_state.saved_state.isolate_file, isolatedfile_to_state(options.isolated)) complete_state = CompleteState( options.isolated, SavedState(complete_state.saved_state.isolated_basedir)) if not skip_update: # Then load the .isolate and expands directories. complete_state.load_isolate( cwd, isolate, options.path_variables, options.config_variables, options.extra_variables, options.blacklist, options.ignore_broken_items, options.collapse_symlinks) # Regenerate complete_state.saved_state.files. if subdir: subdir = unicode(subdir) # This is tricky here. If it is a path, take it from the root_dir. If # it is a variable, it must be keyed from the directory containing the # .isolate file. So translate all variables first. translated_path_variables = dict( (k, os.path.normpath(os.path.join(complete_state.saved_state.relative_cwd, v))) for k, v in complete_state.saved_state.path_variables.iteritems()) subdir = isolate_format.eval_variables(subdir, translated_path_variables) subdir = subdir.replace('/', os.path.sep) if not skip_update: complete_state.files_to_metadata(subdir, options.collapse_symlinks) return complete_state
4f33fefd254519e2a7c3e8c98859e70b553be015
9,274
def GDAL_QUERY(filename, sql, data={}): """ GDAL_QUERY """ res = [] sql = sformat(sql, data) ds = ogr.OpenShared(filename) if ds: try: layer = ds.ExecuteSQL(sql) definition = layer.GetLayerDefn() n = definition.GetFieldCount() for feature in layer: row = {} for i in range(n): fieldname = definition.GetFieldDefn(i).GetName() row[fieldname] = feature.GetField(fieldname) res += [row] except Exception, ex: print "GDAL_QUERY Exception:", ex return res
33e455ef64bf0d168f9d2c03c9ba630a2d9729c3
9,275
def get_event_bpt_hea(): """ Get hardware address for BREAKPOINT event @return: hardware address """ ev = ida_dbg.get_debug_event() assert ev, "Could not retrieve debug event" return ida_idd.get_event_bpt_hea(ev)
ecbb928b48788055328f45ed796a76f8ee7e7fa8
9,276
import torch def solve_maxent_ce(payoffs, steps=1000000, lams=None, lr=None): """Calculates the maximum-entropy correlated equilibrium as defined in Ortiz et al. (2007). payoffs (torch.Tensor): Joint payoff tensor. steps (int, optional): Number of SGD steps to use in calculations (default: 1000000). lams (torch.Tensor): Initialization logits (default: auto-initialied). lr (float): SGD learning rate (default: auto-computed). Ortiz et al., "Maximum entropy correlated equilibria", 2007, http://proceedings.mlr.press/v2/ortiz07a/ortiz07a.pdf """ n = payoffs.size(0) action_counts = tuple(payoffs.shape[1:]) if lr is None: tot = 0.0 for i in range(n): ac = action_counts[i] payoff_permuted = payoffs[i].transpose(0, i) gain_mat = payoff_permuted.view(ac, 1, -1) - payoff_permuted.view(1, ac, -1) tot += torch.abs(gain_mat).sum(dim=0).max().item() lr = 0.9 / tot if lams is None: lams = [(lr * payoffs.new_ones((i, i))) for i in action_counts] for i in range(n): rac = torch.arange(action_counts[i]) lams[i][rac, rac] = 0.0 for _ in range(steps): log_policy = _lams_to_log_policy(lams, payoffs) policy = torch.exp(log_policy) pos_regrets = _get_regret(policy, payoffs, positive=True) neg_regrets = _get_regret(policy, payoffs, positive=False) eps = 0.5 ** 125 for i in range(n): ac = action_counts[i] rac = torch.arange(ac) chg = ((pos_regrets[i] + eps) / (pos_regrets[i] + neg_regrets[i] + 2 * eps)) - 0.5 chg[rac, rac] = 0.0 lams[i].add_(lr, chg) lams[i].clamp_(min=0.0) return policy
0004b6bbdd5347987c069a68d5baf9a707c85b0c
9,277
def d_latlon(p1, p2): """ 计算两点间的距离。原文件使用了较复杂的算法,代价较高 这里使用较为相对简单的算法代替,精度不会损失很多 """ lon_diff, lat_diff = p1 - p2 lon_diff *= cos((p1[1] + p2[1]) * 0.00872664625997165) return sqrt(lat_diff * lat_diff + lon_diff * lon_diff) * earth_radians
53fb2c89f5df196f3ae0fd5ccc67b082b246a580
9,278
def _path_list_creator(path, file_prefix_name, number_of_digits_zfill, file_suffix_name): """Creates a list of paths where the files have a predefined prefix, an incremental number and a predefined suffix on their name, respectively. Eg.: img01.zdf Args: path: a path that leads to the files directory file_prefix_name: a string that comes before the number number_of_digits_zfill: a number of digits in the number file_suffix_name: a string that comes after the number Returns: list_of_paths: list of appended paths """ num = 1 list_of_paths = [] while True: file_path = path / f"{file_prefix_name}{str(num).zfill(number_of_digits_zfill)}{file_suffix_name}" list_of_paths.append(file_path) next_file_path = path / f"{file_prefix_name}{str(num+1).zfill(number_of_digits_zfill)}{file_suffix_name}" if not next_file_path.exists(): return list_of_paths num = num + 1
4850edbbf544284b0736ee52188bd53119c50fdf
9,279
import sys import os from warnings import warn def determine_paths(env): """ Fill the 'CUDA_TOOLKIT_PATH' into environment if it is not there. @return: the paths. @rtype: tuple """ home = os.environ.get('HOME', '') programfiles = os.environ.get('PROGRAMFILES', '') homedrive = os.environ.get('HOMEDRIVE', '') # find CUDA Toolkit path and set CUDA_TOOLKIT_PATH. cudaToolkitPath = os.environ.get('CUDA_TOOLKIT_PATH', '') if not cudaToolkitPath: paths = [ '/'.join([home, 'NVIDIA_CUDA_TOOLKIT']), '/'.join([home, 'Apps', 'NVIDIA_CUDA_TOOLKIT']), '/'.join([home, 'Apps', 'CudaToolkit']), '/'.join([home, 'Apps', 'CudaTK']), '/'.join(['/usr', 'local', 'NVIDIA_CUDA_TOOLKIT']), '/'.join(['/usr', 'local', 'CUDA_TOOLKIT']), '/'.join(['/usr', 'local', 'cuda_toolkit']), '/'.join(['/usr', 'local', 'CUDA']), '/'.join(['/usr', 'local', 'cuda']), '/'.join(['/Developer', 'NVIDIA CUDA TOOLKIT']), '/'.join(['/Developer', 'CUDA TOOLKIT']), '/'.join(['/Developer', 'CUDA']), '/'.join([programfiles, 'NVIDIA Corporation', 'NVIDIA CUDA TOOLKIT']), '/'.join([programfiles, 'NVIDIA Corporation', 'NVIDIA CUDA']), '/'.join([programfiles, 'NVIDIA Corporation', 'CUDA TOOLKIT']), '/'.join([programfiles, 'NVIDIA Corporation', 'CUDA']), '/'.join([programfiles, 'NVIDIA', 'NVIDIA CUDA TOOLKIT']), '/'.join([programfiles, 'NVIDIA', 'NVIDIA CUDA']), '/'.join([programfiles, 'NVIDIA', 'CUDA TOOLKIT']), '/'.join([programfiles, 'NVIDIA', 'CUDA']), '/'.join([programfiles, 'CUDA TOOLKIT']), '/'.join([programfiles, 'CUDA']), '/'.join([homedrive, 'CUDA TOOLKIT']), '/'.join([homedrive, 'CUDA']), ] cudaToolkitPath = find_paths(paths) if cudaToolkitPath: sys.stdout.write( 'scons: CUDA Toolkit found in %s\n' % cudaToolkitPath) else: warn('Cannot find the CUDA Toolkit path. ' 'Please set it to CUDA_TOOLKIT_PATH environment variable.') env['CUDA_TOOLKIT_PATH'] = cudaToolkitPath return cudaToolkitPath
6ff582d08ce449a57f79da2eba390459b9e2b799
9,280
def crosscorrelation(array1, array2, std1, std2, **kwargs): """ Compute crosscorrelation. """ _ = std1, std2, kwargs xp = cp.get_array_module(array1) if CUPY_AVAILABLE else np window = array1.shape[-1] pad_width = [(0, 0)] * (array2.ndim - 1) + [(window//2, window - window//2)] padded = xp.pad(array2, pad_width=tuple(pad_width)) accumulator = Accumulator('argmax') for i in range(window): corrs = (array1 * padded[..., i:i+window]).sum(axis=-1) accumulator.update(corrs) return accumulator.get(final=True).astype(float) - window//2
b24e3577d2a8d28444a4eefd1ef1d80924f08aaf
9,281
def score_max_depths(graph, max_depths): """ In order to assess the quality of the approximate partitioning method we've developed, we will run it with different values for max_depth and see how it affects the norm_cut score of the resulting partitions. Recall that smaller norm_cut scores correspond to better partitions. Params: graph........a networkx Graph max_depths...a list of ints for the max_depth values to be passed to calls to partition_girvan_newman Returns: A list of (int, float) tuples representing the max_depth and the norm_cut value obtained by the partitions returned by partition_girvan_newman. See Log.txt for an example. """ ###TODO result =[] for n in max_depths: components = partition_girvan_newman(graph, n) result.append((n,norm_cut(components[0], components[1], graph))) return result
35abf18b9bb3299262dd25923c3dc7cc832a90fc
9,282
def build_multi(mapping, inserts, key_residues, pdbfnames, chains): """Superimpose multiple structures onto a reference, showing equivalent selected residues in each. To reduce clutter, only show residues deviating from the reference side chain by at least `threshold` Angstroms RMS. """ # TODO - ensure Pymol's automatic struct colors aren't clobbered pml_names = [get_pml_name(pfn, cid) for pfn, cid in zip(pdbfnames, chains)] ref_pml_name, ref_chn = (pml_names[0], chains[0]) outs = [mk_intro_multi(pdbfnames, pml_names, chains), HR, mk_struct(ref_pml_name, 'RefStruct_'+ref_pml_name, chain=ref_chn, color='smudge', transparency=0.7)] if inserts: outs.extend(make_inserts(inserts, 'RefStruct_'+ref_pml_name, ref_chn, 'gray70')) if key_residues: # Side chains for the reference PDB outs.extend(make_residues(mapping, key_residues, ref_pml_name, ref_chn)) for eqv_pml_name, eqv_chn in zip(pml_names[1:], chains[1:]): outs.append(mk_struct(eqv_pml_name, 'EqvStruct_'+eqv_pml_name, chain=eqv_chn, color='slate', transparency=0.7)) # Side chains for the other PDBs if inserts: outs.extend(make_inserts(inserts, 'EqvStruct_'+eqv_pml_name, eqv_chn, 'marine')) if key_residues: # Generate PyMOL script lines outs.extend(make_residues(mapping, key_residues, eqv_pml_name, eqv_chn)) outs.extend([HR, mk_outro()]) return '\n'.join(outs) # just the script
7ab41587b08d30960ca28e651c71e7e03d323df9
9,283
import re def tamper(payload, **kwargs): """ Replaces instances of UNION with -.1UNION Requirement: * MySQL Notes: * Reference: https://raw.githubusercontent.com/y0unge/Notes/master/SQL%20Injection%20WAF%20Bypassing%20shortcut.pdf >>> tamper('1 UNION ALL SELECT') '1-.1UNION ALL SELECT' >>> tamper('1" UNION ALL SELECT') '1"-.1UNION ALL SELECT' """ return re.sub(r"(?i)\s+(UNION )", r"-.1\g<1>", payload) if payload else payload
cbf4fc5b81bc7760aafe6cf65fa498945285e5bb
9,284
def svn_wc_transmit_prop_deltas(*args): """ svn_wc_transmit_prop_deltas(char path, svn_wc_adm_access_t adm_access, svn_wc_entry_t entry, svn_delta_editor_t editor, void baton, apr_pool_t pool) -> svn_error_t """ return _wc.svn_wc_transmit_prop_deltas(*args)
d92cff618027f3bc763491c7122bdf5187b6ba15
9,285
from typing import Mapping from typing import Container from typing import Set from typing import Sequence def _make_immutable(obj): """Recursively convert a container and objects inside of it into immutable data types.""" if isinstance(obj, (text_type, binary_type)): return obj elif isinstance(obj, Mapping): temp_dict = {} for key, value in obj.items(): if isinstance(value, Container): temp_dict[key] = _make_immutable(value) else: temp_dict[key] = value return ImmutableDict(temp_dict) elif isinstance(obj, Set): temp_set = set() for value in obj: if isinstance(value, Container): temp_set.add(_make_immutable(value)) else: temp_set.add(value) return frozenset(temp_set) elif isinstance(obj, Sequence): temp_sequence = [] for value in obj: if isinstance(value, Container): temp_sequence.append(_make_immutable(value)) else: temp_sequence.append(value) return tuple(temp_sequence) return obj
1f7b51c7b0c5d16dfd9fb0eb10e1ca9410287f85
9,286
def get_source_tokens_tensor(src_tokens): """ To enable integration with PyText, src_tokens should be able to support more features than just token embeddings. Hence when dictionary features are passed from PyText it will be passed as a tuple (token_embeddings, dict_feat, ..). Thus, in this case where we need the source tokens tensor (eg to calculate batch size = source_tokens_tensor.size(0)), we get the first element on the tuple which is always guaranteed to be source tokens and do the necessary operation. eg : bsz, _ = get_source_tokens_tensor(source_tokens)[0].size(0) """ if type(src_tokens) is tuple: return src_tokens[0] else: return src_tokens
cf20ceeba82c595dc62b267794ca758360e0386b
9,287
def merge_config_and_args(config, args): """ Creates a configuration dictionary based upon command line arguments. Parameters ---------- config : dict configurations loaded from the config file args : object arguments and there values which could be \ passed in the command line. Returns ------- dict updated configuration dictionary \ with arguments passed in command line. """ arg_dict = vars(args) stripped_dict = { k: v for k, v in arg_dict.items() if (v is not None) } return {**config, **stripped_dict}
3935cfc525fb99b9513a608ef0e5e8fd7de708f3
9,288
def contemp2pottemp(salt, tcon, tpot0=None, **rootkwargs): """Calculate conservative temp -> potential temp. Calculate the potential temperature from the absolute salinity and conservative temperature. Applies either Newton's method or Halley's method. See `aux.rootfinder` for details on implementation and control arguments. Arguments: salt (float or array): Absolute salinity in g kg-1. tcon (float or array): Conservative temperature in degrees Celsius. tpot0 (float or array, optional): Initial estimate of potential temperature in degrees Celsius. If None (default) then the conservative temperature is used. rootkwargs (dict, optional): Additional arguments for the root finder; see `aux.rootfinder` for available arguments and defaults. Returns: tpot (float or array): Potential temperature in degrees Celsius. """ # Set initial guess if tpot0 is None: tpot0 = tcon # Set up a function for the rootfinder update = rootkwargs.get('update', 'newton') if update == 'newton': dtpmax = 2 elif update == 'halley': dtpmax = 3 else: raise ValueError( 'The update method must be either "newton" or "halley"') y0 = CSEA*tcon args = (salt,) def derfun(tpot, salt): # Calculate Gibbs function *with adjusted coefficients* (g0s, *__) = gibbs0(salt, tpot, dtpmax, orig=False) tabs = TCELS + tpot hs = [g0s[0]-tabs*g0s[1], -tabs*g0s[2]] if dtpmax > 2: hs.append(-g0s[2] - tabs*g0s[3]) return hs # Apply the root-finding method tpot = aux.rootfinder( derfun, y0, tpot0, TMIN, CSEA*TMIN, args, **rootkwargs) return tpot
fd627f1561e21daaa18f9d84c0fc12d5ab87e7e5
9,289
import random import string def get_random_string(length: int) -> str: """ With combination of lower and upper case """ return ''.join(random.choice(string.ascii_letters) for i in range(length))
b9d0c760e92603a4fe1f625615b96a1c2265f22a
9,290
import errno def _write_callback(connection_id, data_buffer, data_length_pointer): """ Callback called by Secure Transport to actually write to the socket :param connection_id: An integer identifing the connection :param data_buffer: A char pointer FFI type containing the data to write :param data_length_pointer: A size_t pointer FFI type of the amount of data to write. Will be overwritten with the amount of data actually written on return. :return: An integer status code of the result - 0 for success """ try: self = _connection_refs.get(connection_id) if not self: socket = _socket_refs.get(connection_id) else: socket = self._socket if not self and not socket: return 0 data_length = deref(data_length_pointer) data = bytes_from_buffer(data_buffer, data_length) if self and not self._done_handshake: self._client_hello += data error = None try: sent = socket.send(data) except (socket_.error) as e: error = e.errno if error is not None and error != errno.EAGAIN: if error == errno.ECONNRESET or error == errno.EPIPE: return SecurityConst.errSSLClosedNoNotify return SecurityConst.errSSLClosedAbort if sent != data_length: pointer_set(data_length_pointer, sent) return SecurityConst.errSSLWouldBlock return 0 except (KeyboardInterrupt) as e: self._exception = e return SecurityConst.errSSLPeerUserCancelled
4daa1130c18b28abe92b5a550d1aac734f74d3dc
9,291
import functools import pickle def cache(**kwargs): """ Cache decorator. Should be called with `@cache(ttl_sec=123, transform=transform_response)` Arguments: ttl_sec: optional,number The time in seconds to cache the response if status code < 400 transform: optional,func The transform function of the wrapped function to convert the function response to request response Usage Notes: If the wrapped function returns a tuple, the transform function will not be run on the response. The first item of the tuple must be serializable. If the wrapped function returns a single response, the transform function must be passed to the decorator. The wrapper function response must be serializable. Decorators in Python are just higher-order-functions that accept a function as a single parameter, and return a function that wraps the input function. In this case, because we need to pass kwargs into our decorator function, we need an additional layer of wrapping; the outermost function accepts the kwargs, and when called, returns the decorating function `outer_wrap`, which in turn returns the wrapped input function, `inner_wrap`. @functools.wraps simply ensures that if Python introspects `inner_wrap`, it refers to `func` rather than `inner_wrap`. """ ttl_sec = kwargs["ttl_sec"] if "ttl_sec" in kwargs else default_ttl_sec transform = kwargs["transform"] if "transform" in kwargs else None redis = redis_connection.get_redis() def outer_wrap(func): @functools.wraps(func) def inner_wrap(*args, **kwargs): has_user_id = 'user_id' in request.args and request.args['user_id'] is not None key = extract_key(request.path, request.args.items()) if not has_user_id: cached_resp = redis.get(key) if cached_resp: logger.info(f"Redis Cache - hit {key}") try: deserialized = pickle.loads(cached_resp) if transform is not None: return transform(deserialized) return deserialized, 200 except Exception as e: logger.warning(f"Unable to deserialize cached response: {e}") logger.info(f"Redis Cache - miss {key}") response = func(*args, **kwargs) if len(response) == 2: resp, status_code = response if status_code < 400: serialized = pickle.dumps(resp) redis.set(key, serialized, ttl_sec) return resp, status_code serialized = pickle.dumps(response) redis.set(key, serialized, ttl_sec) return transform(response) return inner_wrap return outer_wrap
10be4de3f0c6125fb502e2b3598bce18eff52375
9,292
def RegisterTensorTransformer(name): """Registers a dataset.""" def decorator(obj): TENSOR_TRANSFORMER_REGISTRY[name] = obj obj.name = name return obj return decorator
e033e09ff5172175328c02638a07e9b0ae112615
9,293
import os import configparser def read_token(): """Reads and returns the authentication token. It tries to read the token from a already existing config file first. If there is no token it will get one from the putio api and store it in a config file. Location of the config file:: ~/.putiodown :returns: putio authentication token :rtype: str """ home = os.path.expanduser('~') config_file = os.path.join(home, '.putiodown') config = configparser.ConfigParser() if os.path.exists(config_file): config.read(config_file) token = config.get('putiodown', 'token') else: token = get_token() config['putiodown'] = {} config['putiodown']['token'] = token with open(config_file, 'w') as f: config.write(f) return token
9358ddbc530804e4efaa976881f33bd8848b21e0
9,294
from pathlib import Path import copy import threading import shutil def handler_factory( jinja_template_rendered: BytesIO, base_dir: Path, events: list = None, username: str = "thqm", password: str = None, oneshot: bool = False, allow_custom_events: bool = False, ): """Create a HTTPHandler class with the desired properties. Events should appear following the url paremeter 'event', controlling the server is done through the 'command' url parameter. Args: jinja_template_rendered: BytesIO object of the rendered template. base_dir: directory containing the static/ and templates/ folders. events: allowed events. username: basic auth username. password: basic auth password. oneshot: stop server after first click. allow_custom_events: the server will echo the event regardless of it being in the events list. Returns: HTTPHandler class. """ class HTTPHandler(BaseHTTPRequestHandler): extensions_map = { ".html": "text/html", "": "application/octet-stream", # Default ".css": "text/css", ".js": "text/javascript", ".png": "image/png", ".jpg": "image/jpeg", ".jpeg": "image/jpeg", ".svg": "image/svg+xml", } def __init__(self, *args, **kwargs): if events is None: self.events = [] else: self.events = events self.require_login = password is not None self._auth = b64encode(f"{username}:{password}".encode()).decode() super().__init__(*args, **kwargs) def _do_GET(self): f_obj = self.send_head() if f_obj: self.copyfile(f_obj, self.wfile) f_obj.close() def do_GET(self): """Serve a GET request.""" if self.require_login: if self.headers.get("Authorization") == "Basic " + self._auth: self._do_GET() else: self.do_HEADAUTH() else: self._do_GET() def do_HEAD(self): """Serve a HEAD request.""" f_obj = self.send_head() if f_obj: f_obj.close() def do_HEADAUTH(self): """Handle the authentication in the header.""" self.send_response(401) self.send_header("WWW-Authenticate", 'Basic realm="thqm"') self.send_header("Content-type", "text/html") self.end_headers() def reset(self): """Redirect to /.""" self.send_response(302) self.send_header("Location", "/") self.end_headers() def send_head(self): """Common code for GET and HEAD commands. This sends the response code and MIME headers. Return value is either a file object (which has to be copied to the outputfile by the caller unless the command was HEAD, and must be closed by the caller under all circumstances), or None, in which case the caller has nothing further to do. """ parsed_path = urlparse(self.path) if parsed_path.query: query = parse_qs(parsed_path.query) if "event" in query: event = query["event"][0] if allow_custom_events or event in self.events: echo(event) if oneshot: self.shutdown() else: self.reset() if "command" in query: command = query["command"][0] if command == "shutdown": self.shutdown() path = unquote(parsed_path.path) f_obj = None ctype = None if path == "/": # if main page f_obj = copy(jinja_template_rendered) ctype = "text/html" else: try: f_obj = open(base_dir / path[1:], "rb") except IOError: pass if f_obj is not None: if not ctype: ctype = self.guess_type(path) self.send_response(200) self.send_header("Content-type", ctype) self.end_headers() return f_obj @staticmethod def translate_path(path: str) -> str: """Cleanup path.""" # abandon query parameters path = path.split("?", 1)[0] path = path.split("#", 1)[0] # remove first / return unquote(path)[1:] @staticmethod def get_query(path: str) -> str: """Get the first query parameter.""" paths = path.split("?", 1) if len(paths) > 1: return paths[1] return "" def shutdown(self): """Shutdown the server.""" killer = threading.Thread(target=self.server.shutdown) killer.start() @staticmethod def copyfile(source, outputfile): """Copy all data between two file objects. The SOURCE argument is a file object open for reading (or anything with a read() method) and the DESTINATION argument is a file object open for writing (or anything with a write() method). The only reason for overriding this would be to change the block size or perhaps to replace newlines by CRLF -- note however that this the default server uses this to copy binary data as well. """ shutil.copyfileobj(source, outputfile) def guess_type(self, path: str) -> str: """Guess the type of a file. Argument is a PATH (a filename). Return value is a string of the form type/subtype, usable for a MIME Content-type header. The default implementation looks the file's extension up in the table self.extensions_map, using application/octet-stream as a default; however it would be permissible (if slow) to look inside the data to make a better guess. """ ext = Path(path).suffix.lower() return self.extensions_map.get(ext, self.extensions_map[""]) def log_message(self, *args, **kwargs): """Disable all prints.""" return HTTPHandler
119f1ecd6ba6b3172087f85091360f5e5c0c909d
9,295
def add(): """This is a temporary function to allow users to easily add tracks, mainly for testing.""" form = SQLFORM(db.memo) if form.process().accepted: redirect(URL('default', 'index')) return dict(form=form)
9ae3f5f707b880667790fa1396e25999188a6c68
9,296
from inmanta_plugins.terraform.helpers.const import ( TERRAFORM_RESOURCE_STATE_PARAMETER, ) from typing import Callable from typing import Optional from typing import Dict from typing import List from pathlib import Path async def test_update_failed( project: Project, server: Server, client: Client, environment: str, agent_factory: Callable[ [UUID, Optional[str], Optional[Dict[str, str]], bool, List[str]], Agent ], function_temp_dir: str, cache_agent_dir: str, ): """ This test creates a file, then update it by moving it in a forbidden location. The update should fail but the param containing the state should be updated anyway, showing the current file state, which is null. """ file_path_object = Path(function_temp_dir) / Path("test-file.txt") provider = LocalProvider() local_file = LocalFile( "my file", str(file_path_object), "my original content", provider ) await agent_factory( environment=environment, hostname="node1", agent_map={provider.agent: "localhost"}, code_loader=False, agent_names=[provider.agent], ) def model(purged: bool = False) -> str: m = ( "\nimport terraform\n\n" + provider.model_instance("provider") + "\n" + local_file.model_instance("file", purged) ) LOGGER.info(m) return m assert not file_path_object.exists() # Create create_model = model() project.compile(create_model) resource: Resource = project.get_resource( local_file.resource_type, resource_name="my file" ) assert resource is not None resource_id = Id.resource_str(resource.id) async def get_param_short() -> Optional[str]: return await get_param( environment=environment, client=client, param_id=TERRAFORM_RESOURCE_STATE_PARAMETER, resource_id=resource_id, ) assert ( await get_param_short() is None ), "There shouldn't be any state set at this point for this resource" assert ( await deploy_model(project, create_model, client, environment) == VersionState.success ) assert await get_param_short() is not None, "A state should have been set by now" # Update forbidden_path_object = Path("/dev/test-file.txt") local_file.path = str(forbidden_path_object) update_model = model() assert ( await deploy_model(project, update_model, client, environment) == VersionState.failed ) param = await get_param_short() assert param is not None, "The state should still be there" assert param == "null", "The state should be empty as the new file couldn't deploy" # Delete delete_model = model(True) assert ( await deploy_model(project, delete_model, client, environment) == VersionState.success ) assert ( await get_param_short() is None ), "The state should have been removed, but wasn't"
969fb6136ecf2fd1adc3651aaba6d5b44e795e70
9,297
def is_valid_mac(mac): """ Validate mac address :param mac: :return: boolean """ res = False if isinstance(mac, str): if mac: res = mac_address.match(mac.lower()) is not None return res
2d89a6afbe76d99d6d7cf3e1bfa2e9954c5f2a20
9,298
def get_model(tokenizer, lstm_units): """ Constructs the model, Embedding vectors => LSTM => 2 output Fully-Connected neurons with softmax activation """ # get the GloVe embedding vectors embedding_matrix = get_embedding_vectors(tokenizer) model = Sequential() model.add(Embedding(len(tokenizer.word_index)+1, EMBEDDING_SIZE, weights=[embedding_matrix], trainable=False, input_length=SEQUENCE_LENGTH)) model.add(LSTM(lstm_units, recurrent_dropout=0.2)) model.add(Dropout(0.3)) model.add(Dense(2, activation="softmax")) # compile as rmsprop optimizer # aswell as with recall metric model.compile(optimizer="rmsprop", loss="categorical_crossentropy", metrics=["accuracy", keras_metrics.precision(), keras_metrics.recall()]) model.summary() return model
fd8352081898b4fcffe122a7058d0069caa7ab21
9,299