content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def errorString(node, error): """ Format error messages for node errors returned by checkLinkoStructure. inputs: node - the node for the error. error - a (backset, foreset) tuple, where backset is the set of missing backlinks and foreset is the set of missing forelinks. returns: string string - the error string message. """ back, fore = error[0], error[1] if len(back) == 0: back = 'None' if len(fore) == 0: fore = 'None' return ('Node {0}: missing backlinks {1},' ' missing forelinks {2}').format(node, back, fore)
df87b7838ed84fe4e6b95002357f616c96d04ad0
14,182
def deep_update(target, source): """ Deep merge two dicts """ if isinstance(source, dict): for key, item in source.items(): if key in target: target[key] = deep_update(target[key], item) else: target[key] = source[key] return target
5db0c6fa31f3d4408a359d90dbf6e50dfdc12cdc
14,183
import hashlib def md5_hash_file(path): """ Return a md5 hashdigest for a file or None if path could not be read. """ hasher = hashlib.md5() try: with open(path, 'rb') as afile: buf = afile.read() hasher.update(buf) return hasher.hexdigest() except IOError: # This may happen if path has been deleted return None
514cafcffa0ae56d54f43508ece642d25b4be442
14,184
def Constant(value): """ Produce an object suitable for use as a source in the 'connect' function that evaluates to the given 'value' :param value: Constant value to provide to a connected target :return: Output instance port of an instance of a Block that produces the given constant when evaluated """ global _constantCounter blockName = "Constant" + str(_constantCounter) constBlock = defineBlock(blockName) defineOutputs(constBlock, "out") defineBlockOutputBehaviour(constBlock.out, lambda: value) setMetaData(constBlock.out, "Sensation-Producing", False) inst = createInstance(blockName, "constant" + str(_constantCounter)) _constantCounter += 1 return inst.out
1763d657e3396286516e6669e57b7ee297463b14
14,185
def _Backward3a_T_Ps(P, s): """Backward equation for region 3a, T=f(P,s) Parameters ---------- P : float Pressure [MPa] s : float Specific entropy [kJ/kgK] Returns ------- T : float Temperature [K] References ---------- IAPWS, Revised Supplementary Release on Backward Equations for the Functions T(p,h), v(p,h) and T(p,s), v(p,s) for Region 3 of the IAPWS Industrial Formulation 1997 for the Thermodynamic Properties of Water and Steam, http://www.iapws.org/relguide/Supp-Tv%28ph,ps%293-2014.pdf, Eq 6 Examples -------- >>> _Backward3a_T_Ps(20,3.8) 628.2959869 >>> _Backward3a_T_Ps(100,4) 705.6880237 """ I = [-12, -12, -10, -10, -10, -10, -8, -8, -8, -8, -6, -6, -6, -5, -5, -5, -4, -4, -4, -2, -2, -1, -1, 0, 0, 0, 1, 2, 2, 3, 8, 8, 10] J = [28, 32, 4, 10, 12, 14, 5, 7, 8, 28, 2, 6, 32, 0, 14, 32, 6, 10, 36, 1, 4, 1, 6, 0, 1, 4, 0, 0, 3, 2, 0, 1, 2] n = [0.150042008263875e10, -0.159397258480424e12, 0.502181140217975e-3, -0.672057767855466e2, 0.145058545404456e4, -0.823889534888890e4, -0.154852214233853, 0.112305046746695e2, -0.297000213482822e2, 0.438565132635495e11, 0.137837838635464e-2, -0.297478527157462e1, 0.971777947349413e13, -0.571527767052398e-4, 0.288307949778420e5, -0.744428289262703e14, 0.128017324848921e2, -0.368275545889071e3, 0.664768904779177e16, 0.449359251958880e-1, -0.422897836099655e1, -0.240614376434179, -0.474341365254924e1, 0.724093999126110, 0.923874349695897, 0.399043655281015e1, 0.384066651868009e-1, -0.359344365571848e-2, -0.735196448821653, 0.188367048396131, 0.141064266818704e-3, -0.257418501496337e-2, 0.123220024851555e-2] Pr = P/100 sigma = s/4.4 suma = 0 for i, j, ni in zip(I, J, n): suma += ni * (Pr+0.240)**i * (sigma-0.703)**j return 760*suma
cb0b9b55106cf771e95505c00043e5772faaef40
14,186
import re def expandvars(s): """Expand environment variables of form %var%. Unknown variables are left unchanged. """ global _env_rx if '%' not in s: return s if _env_rx is None: _env_rx = re.compile(r'%([^|<>=^%]+)%') return _env_rx.sub(_substenv, s)
ede7861831ea9d9e74422eb3a92a13ba4d1937f2
14,187
def make_map_counts(events, ref_geom, pointing, offset_max): """Build a WcsNDMap (space - energy) with events from an EventList. The energy of the events is used for the non-spatial axis. Parameters ---------- events : `~gammapy.data.EventList` Event list ref_geom : `~gammapy.maps.WcsGeom` Reference WcsGeom object used to define geometry (space - energy) offset_max : `~astropy.coordinates.Angle` Maximum field of view offset. Returns ------- cntmap : `~gammapy.maps.WcsNDMap` Count cube (3D) in true energy bins """ count_map = WcsNDMap(ref_geom) fill_map_counts(count_map, events) # Compute and apply FOV offset mask offset_map = make_separation_map(ref_geom, pointing) offset_mask = offset_map.data >= offset_max count_map.data[:, offset_mask] = 0 return count_map
7a22340c8f3909d6ca559361290a4608a0321de1
14,188
def stats_aggregate(): """ RESTful CRUD Controller """ return crud_controller()
4a8439139257f39e0d2a34b576e9a9bd98cded5c
14,189
def format_dB(num): """ Returns a human readable string of dB. The value is divided by 10 to get first decimal digit """ num /= 10 return f'{num:3.1f} {"dB"}'
13d6313834333ee2ea432cf08470b6ce1efe1ad6
14,190
def _check_index_dtype(k): """ Check the dtype of the index. Parameters ---------- k: slice or array_like Index into an array Examples -------- >>> _check_index_dtype(0) dtype('int64') >>> _check_index_dtype(np.datetime64(0, 'ms')) dtype('<M8[ms]') >>> _check_index_dtype(slice(5, 8)) dtype('int64') """ if not isinstance(k, slice): if hasattr(k, "__len__") and len(k) == 0: return np.intp return np.asarray(k).dtype arr = [v for v in (k.start, k.stop, k.step) if v is not None] return _check_index_dtype(arr)
f9f7bac24f7ceba57978d7e1aed7c4e052c79f35
14,191
def _wrapper_for_precessing_snr(args): """Wrapper function for _precessing_snr for a pool of workers Parameters ---------- args: tuple All args passed to _precessing_snr """ return _precessing_snr(*args)
4d64d7e658ecfeed6206abd0827f37805c3ecd0c
14,192
import logging def put_file_store(store_name, store, block_on_existing=None, user=None): # noqa: E501 """Create/update store # noqa: E501 :param store_name: Name of the store :type store_name: str :param store: Store information :type store: dict | bytes :rtype: FileStore """ if connexion.request.is_json: store = SwaggerFileStore.from_dict(connexion.request.get_json()) # noqa: E501 if store_name != store.name: return Error(code=400, message="URL and body names don't match"), 400 session = Database.get_session() try: # Check the store q = session.query(FileStore).filter(FileStore.name == store_name) # type: Query # Create new store or use existing model = None if q.first(): # Existing store if block_on_existing: return Error(code=1000, message="Already exists."), 400 model = q.first() else: model = FileStore() session.add(model) model.from_swagger_model(store, user=user) session.commit() q = session.query(FileStore).filter(FileStore.uid == model.uid) return q.first().to_swagger_model(user=user), 200 except Exception as e: logging.exception("File store put failed") session.rollback() return Error(code=500, message="Exception occurred"), 500
2e799c7fc2f394c925562c8600b83a1149586ad2
14,194
from typing import Dict from typing import Set from typing import Tuple def assign_sections(region_table: RegionTable, sections: Dict[str, int]): """Assign memory sections. This is a packing problem and therefore reasonably complex. A simplistic algorithm is used here which may not always be optimal if user assigned addresses are used for some sections. """ used_space: Set[Tuple[int, int]] = set() def in_used_space(start, end): return start > 0xfff or end > 0xfff or any( map(lambda x: (start >= x[0] and start <= x[1]) or (end >= x[0] and end <= x[1]), used_space)) def find_free_space(size): for _, end in used_space: start_to_try = end + 1 end_to_try = end + size if not in_used_space(start_to_try, end_to_try): return start_to_try, end_to_try raise AssemblyError("ran out of free space") for name, item in region_table.items(): if in_used_space(item.start, item.end): raise AssemblyError("region {} assigned in used space, memory is likely full".format(name)) used_space.add((item.start, item.end)) for section_name, section_size in sections.items(): section_start, section_end = find_free_space(section_size) used_space.add((section_start, section_end)) region_table[section_name] = Region(type="user", start=section_start, end=section_end, count=0)
6ad4af4c67be9e9d07a4464bfc1d3c529a5afd4b
14,195
def humanize_arrow_date( date ): """ Date is internal UTC ISO format string. Output should be "today", "yesterday", "in 5 days", etc. Arrow will try to humanize down to the minute, so we need to catch 'today' as a special case. """ try: then = arrow.get(date).to('local') now = arrow.utcnow().to('local') if then.date() == now.date(): human = "Today" else: human = then.humanize(now) if human == "in a day": human = "Tomorrow" except: human = date return human
511e5f7a85a5906d78ed9b252076b1f0e8ea02d9
14,196
def getCourseTeeHoles(request, courseId, courseTeeId): """ Getter function for list of courses and tees """ resultList = list(Tee.objects.filter(course_tee_id=courseTeeId).values('id', 'yardage', 'par', 'handicap', 'hole__id', 'hole__name', 'hole__number')) return JsonResponse({'data' : resultList})
7abac65449503d2309cf8cae49b7e555488333f9
14,197
def create_zone_ajax(request): """ This view tries to create a new zone and returns an JSON with either 'success' = True or 'success' = False and some errors. """ qd = request.POST.copy() # See if the domain exists. # Fail if it already exists or if it's under a delegated domain. root_domain = qd.get('root_domain', None) primary = qd.get('soa_primary', None) contact = qd.get('soa_contact', None) # Find all the NS entries nss = [] number_re = re.compile('nameserver_(\d+)') # parse nameserver bits from POST request. # compile some tuples that look like: # (<server_fqdn>, <ttl>, [<view_name>,..]) for k, server in request.POST.iteritems(): if k.startswith('nameserver_'): n = number_re.search(k) if not n: continue ns_number = n.groups()[0] views = [] if qd.get('private_view_{0}'.format(ns_number), 'off') == 'on': views.append('private') if qd.get('public_view_{0}'.format(ns_number), 'off') == 'on': views.append('public') ttl = qd.get('ttl_{0}'.format(ns_number)) if ttl and ttl.isdigit(): ttl = int(ttl) else: ttl = None nss.append( (server, ttl, views) ) try: with transaction.commit_on_success(): domain = _create_zone(root_domain, primary, contact, nss) except (ValueError, ValidationError), e: return HttpResponse(json.dumps({ 'success': False, 'error': str(e) }), status=400) return HttpResponse(json.dumps({ 'success': True, 'success_url': '/en-US/core/search/#q=zone=:{0}'.format( domain.name ) }))
05a0e8a3edc38bd2821057c131760b2c06fc452c
14,200
def main_plot(): """The view for rendering the scatter chart""" img = get_main_image() return send_file(img, mimetype='image/png', cache_timeout=0)
a285cd3bc9a54b96d4fa52d9cc8b13c1bd070cd2
14,201
def Ising2dT(beta = 0.4, h = 0, isSym = False): """ T = Ising2dT(J,h). ------------------------- Set up the initial tensor for 2d classical Ising model on a square lattice. Argument: J is defined to be beta * J = J / kT, and h is defined to be beta*h = h / kT, where J and h are conventional coupling constants. Return: a rank 4 tensor T[i,j,k,l]. Each index of the tensor represents physical classical spin, and the tensor T represents the Boltzmann weight for interaction on one plaquettes. """ pars = {"model":"ising", "dtype":"float64", "J":1, "H":h, "beta":beta, "symmetry_tensors":isSym} T0 = get_initial_tensor(pars) return T0
416fabb06a1e8aa0f57456d22c2a89fc4da869c6
14,202
def __grid_count(self): """Get number of grids in the case""" try: return self.__case_stub.GetGridCount(self.__request()).count except grpc.RpcError as exception: if exception.code() == grpc.StatusCode.NOT_FOUND: return 0 return 0
e6237a092b4714d787eb9d145f4e972deeaafb69
14,203
import math def timer(method): """ Decorator to time a function. :param method: Method to time. :type method: function """ def wrapper(*args, **kwargs): """ Start clock, do function with args, print rounded elapsed time. """ starttime = compat.perf_clock() method(*args, **kwargs) endtime = compat.perf_clock() - starttime endtime_proper = math.ceil(endtime * 100) / 100 # rounding mins, secs = divmod(endtime_proper, 60) hrs, mins = divmod(mins, 60) print("COMPLETED IN {0:02d}:{1:02d}:{2:02d}".format(int(hrs), int(mins), int(secs))) return wrapper
10105d77a32ce62500bdb86c7fbb772f03b4eff9
14,205
def plot_map_from_nc(path_nc, out_path, var_name, xaxis_min=0.0, xaxis_max=1.1, xaxis_step=0.1, annotate_date=False, yr=0, date=-1, xlabel='', title='', tme_name='time', show_plot=False, any_time_data=True, format='%.2f', land_bg=True, cmap=plt.cm.RdBu, grid=False, fill_mask=False): """ Plot var_name variable from netCDF file \b Args: path_nc: Name of netCDF file including path out_path: Output directory path + file name var_name: Name of variable in netCDF file to plot on map Returns: Nothing, side-effect: save an image """ logger.info('Plotting ' + var_name + ' in ' + path_nc) # Read netCDF file and get time dimension nc = util.open_or_die(path_nc, 'r', format='NETCDF4') lon = nc.variables['lon'][:] lat = nc.variables['lat'][:] if any_time_data: ts = nc.variables[tme_name][:] # time-series if date == -1: # Plot either the last year {len(ts)-1} or whatever year the user wants plot_yr = len(ts) - 1 else: plot_yr = date - ts[0] # Draw empty basemap m = Basemap(projection='robin', resolution='c', lat_0=0, lon_0=0) # m.drawcoastlines() # m.drawcountries() # Find x,y of map projection grid. lons, lats = np.meshgrid(lon, lat) x, y = m(lons, lats) if fill_mask: nc_vars = np.ma.filled(nc.variables[var_name], fill_value=np.nan) else: nc_vars = np.array(nc.variables[var_name]) # Plot # Get data for the last year from the netCDF file array if any_time_data: mask_data = maskoceans(lons, lats, nc_vars[int(plot_yr), :, :]) else: mask_data = maskoceans(lons, lats, nc_vars[:, :]) m.etopo() if land_bg: m.drawlsmask(land_color='white', ocean_color='none', lakes=True) # land_color = (0, 0, 0, 0) for transparent else: m.drawlsmask(land_color=(0, 0, 0, 0), ocean_color='none', lakes=True) cs = m.contourf(x, y, mask_data, np.arange(xaxis_min, xaxis_max, xaxis_step), cmap=cmap) if annotate_date: plt.annotate(str(yr), xy=(0.45, 0.1), xycoords='axes fraction', size=20) if grid: # where labels intersect = [left, right, top, bottom] m.drawmeridians(np.arange(-180, 180, 60), labels=[0,0,1,0], labelstyle='+/-', linewidth=0.5) m.drawparallels([-40, 0, 40], labels=[1, 0, 0, 0], labelstyle='+/-', linewidth=0.5) # Add colorbar cb = m.colorbar(cs, "bottom", size="3%", pad='2%', extend='both', drawedges=False, spacing='proportional', format=format) cb.set_label(xlabel) plt.title(title, y=1.08) plt.tight_layout() if not show_plot: plt.savefig(out_path, dpi=constants.DPI) plt.close() else: plt.show() nc.close() return out_path
7e688fd8e5baae173afc711f47633b3037b03e7d
14,206
def f5_add_policy_method_command(client: Client, policy_md5: str, new_method_name: str, act_as_method: str) -> CommandResults: """ Add allowed method to a certain policy. Args: client (Client): f5 client. policy_md5 (str): MD5 hash of the policy. new_method_name (str): Display name of the new method. act_as_method(str): functionality of the new method. default is GET. """ result = client.add_policy_method(policy_md5, new_method_name, act_as_method) outputs, headers = build_output(OBJECT_FIELDS, result) readable_output = tableToMarkdown('f5 data for adding policy methods:', outputs, headers, removeNull=True) command_results = CommandResults( outputs_prefix='f5.PolicyMethods', outputs_key_field='id', readable_output=readable_output, outputs=remove_empty_elements(outputs), raw_response=result ) return command_results
0b7297de004913eeeb5dd962a6cd62fee6f3458a
14,207
def test_set(sc, idfModel, numFeatures, test_file = "data/test_clean.csv" ): """ Input : IDF model obtained in the training phase number of retained features in the tweet-term structure Output : normalized tweet-term format test set """ test_text = sc.textFile(test_file) test_df = test_text.map(lambda x : (0,x)).toDF(["nothing" , "sentence"]) tokenizer_test = Tokenizer(inputCol="sentence", outputCol="words") wordsData_test = tokenizer_test.transform(test_df) hashingTF_test = HashingTF(inputCol="words", outputCol="rawFeatures", numFeatures=numFeatures) featurizedData_test = hashingTF_test.transform(wordsData_test) rescaledData_test = idfModel.transform(featurizedData_test) rescaled_test_df = rescaledData_test.select("features") return rescaled_test_df
7e68e536f3f40761e7885d784c392b2e9a6ca428
14,209
def is_object_based_ckpt(ckpt_path: str) -> bool: """Returns true if `ckpt_path` points to an object-based checkpoint.""" var_names = [var[0] for var in tf.train.list_variables(ckpt_path)] return '_CHECKPOINTABLE_OBJECT_GRAPH' in var_names
043069aae83845be44a3248ce0b95096e86d4b8f
14,210
from typing import Any def gather(first_step: str = PATH, *, filename: str = FILE, stamp: bool = True) -> dict[str, dict[str, Any]]: """Walk the steps on the path to read the trees of configuration.""" user = USER if filename == FILE else filename.split('.')[0] trees = [(where, tree) for where, tree in walk_the_path(first_step, filename=filename) if tree is not None] return {f'{user}_{steps:{PAD}}': dict(tree, **{LABEL: where}) if stamp else dict(tree) for steps, (where, tree) in enumerate(reversed(trees))}
c290b7bffcf3cb2b022ab1e4bbef68e6ebf4da3c
14,213
def _extractKernelVersion(kernel): """ Extract version string from raw kernel binary. @param bytes kernel Raw kernel binary. @return string Version string if found. """ try: versionOffset = kernel.index(b'Linux version') for i in range(versionOffset, versionOffset+1024): if kernel[i]==0x00: return kernel[versionOffset:i] return None except IndexError as exc: return None
f32e995a4a16376b26b0e1d5af826f2f0e71df87
14,214
def get_vdw_rad(atomic_num): """Function to get the user defined atomic radius""" atomic_rad_dict = {6: 1.7, 7: 1.55, 8: 1.52, 9: 1.47} if atomic_num in atomic_rad_dict: return atomic_rad_dict[atomic_num] else: return float(Chem.GetPeriodicTable().GetRvdw(atomic_num))
98bd3e346afce37458c4ab1ea298e50af1121c21
14,215
def input_literal(term, prompt): """Get console input of literal values and structures.""" while True: input_string = read_line(term, prompt) if input_string: break return eval_literal(input_string)
93611e823a59bc61002cc80b481525ac5c91354e
14,216
from typing import List from typing import Union from typing import Mapping from typing import Pattern def _parse_string( value_expr: str, target_expr: str, ref_parts: List[str], a_type: Union[mapry.String, mapry.Path], pattern_uids: Mapping[Pattern[str], int], auto_id: mapry.go.generate.AutoID) -> str: """ Generate the code to parse a string. The code parses the JSONable ``value_expr`` into the ``target_expr``. :param value_expr: Go expression of the value :param target_expr: Go expression of where to store the parsed value :param ref_parts: Go expression of reference path segments to the value :param a_type: mapry definition of the value type :param pattern_uids: uniquely identified patterns :param auto_id: generator of unique identifiers :return: generated code """ uid = auto_id.next_identifier() return _PARSE_STRING_TPL.render( uid=uid, value_expr=value_expr, ref_parts=ref_parts, target_expr=target_expr, a_type=a_type, pattern_uids=pattern_uids).rstrip("\n")
bcb653ea8d02ea88569d67fedd5d1e83893a1519
14,217
from datetime import datetime def get_dots_case_json(casedoc, anchor_date=None): """ Return JSON-ready array of the DOTS block for given patient. Pulling properties from PATIENT document. Patient document trumps casedoc in this use case. """ if anchor_date is None: anchor_date = datetime.now(tz=timezone(settings.TIME_ZONE)) enddate = anchor_date ret = { 'regimens': [ # non art is 0 int(getattr(casedoc, CASE_NONART_REGIMEN_PROP, None) or 0), # art is 1 int(getattr(casedoc, CASE_ART_REGIMEN_PROP, None) or 0), ], 'regimen_labels': [ list(casedoc.nonart_labels), list(casedoc.art_labels) ], 'days': [], # dmyung - hack to have query_observations timezone # be relative specific to the eastern seaboard 'anchor': anchor_date.strftime("%d %b %Y"), } observations = query_observations( casedoc._id, enddate-timedelta(days=DOT_DAYS_INTERVAL), enddate) for delta in range(DOT_DAYS_INTERVAL): obs_date = enddate - timedelta(days=delta) day_arr = filter_obs_for_day(obs_date.date(), observations) day_data = DOTDay.merge_from_observations(day_arr) ret['days'].append(day_data.to_case_json(casedoc, ret['regimen_labels'])) ret['days'].reverse() return ret
4f9e6febcdc7e66f855411d601b69b4aad6955f3
14,218
import time def sleeping_func(arg, secs=10, result_queue=None): """This methods illustrates how the workers can be used.""" time.sleep(secs) if result_queue is not None: result_queue.put(arg) else: return arg
c15dfac46f9b47fcc82ff539116ecc683a593b9c
14,220
def make_coll(db_auth, db_user, db_pass, mongo_server_ip='127.0.0.1'): """ Function to establish a connection to a local MonoDB instance. Parameters ---------- coll_name: String. Name of MongoDB collection to retrieve. db_auth: String. MongoDB database that should be used for user authentication. db_user: String. Username for MongoDB authentication. db_user: String. Password for MongoDB authentication. Returns ------- collection: pymongo.collection.Collection. Collection within MongoDB that holds the scraped news stories. """ connection = MongoClient(mongo_server_ip) if db_auth: connection[db_auth].authenticate(db_user, db_pass) db = connection.event_scrape collection = db['stories'] return collection
eb4297e76c5c0a4bf344430eba26d4ed6e68128c
14,222
def sms_send(recipient): """ Attempt to send SMS message using Twilio's API. If this fails, use the Summit API to send the SMS message. """ body = request.get_data() try: message = send_sms_through_provider('Twilio', recipient, body) except TwilioRestException: message = send_sms_through_provider('Summit', recipient, body) return jsonify({ message.id_key: getattr(message, message.id_key), 'from': message.from_, 'to': message.to, 'body': message.body, })
15f6049af35970ccbefc3e75ba726281ed2d3329
14,223
from typing import Dict from typing import Optional def cat_to_sub_cat( dp: Image, categories_dict_names_as_key: Dict[str, str], cat_to_sub_cat_dict: Optional[Dict[str, str]] = None ) -> Image: """ Replace some category with its affiliated sub category of CategoryAnnotations. Suppose your category name is 'foo' and comes along with sub_category_annotations 'foo_1' and 'foo_2' then this adapter will replace 'foo' with 'foo_1' or 'foo_2', respectively. :param dp: Image datapoint :param categories_dict_names_as_key: A dict of all possible categories and their ids :param cat_to_sub_cat_dict: e.g. {"foo": "sub_cat_1", "bak":"sub_cat_2"} :return: Image with updated Annotations """ if cat_to_sub_cat_dict is None: return dp categories_dict = categories_dict_names_as_key for ann in dp.get_annotation_iter(category_names=list(cat_to_sub_cat_dict.keys())): sub_cat_type = cat_to_sub_cat_dict.get(ann.category_name, "") sub_cat = ann.get_sub_category(sub_cat_type) if sub_cat: ann.category_name = sub_cat.category_name ann.category_id = categories_dict[ann.category_name] return dp
f2c7dbb95e1a47e4a6775db3857a5f37c9c6b5a8
14,224
def index_to_str(idx): """ Generates a string representation from an index array. :param idx: The NumPy boolean index array. :return: The string representation of the array. """ num_chars = int(idx.shape[0] / 6 + 0.5) s = "" for i in range(num_chars): b = i * 6 six = idx[b:b+6] c = 0 for j in range(six.shape[0]): c = c * 2 + int(six[j]) s = s + chr(c+32) return s
7f7d49ca31bd70e5f19addaa4913a2cf14382e2d
14,225
def ArclinkStatusLine_ClassName(): """ArclinkStatusLine_ClassName() -> char const *""" return _DataModel.ArclinkStatusLine_ClassName()
4589b3c8bae93b28f5c17b8d432813ac504e58e6
14,226
import re def build_sfdisk_partition_line(table_type, dev_path, size, details): """Build sfdisk partition line using passed details, returns str.""" line = f'{dev_path} : size={size}' dest_type = '' source_filesystem = str(details.get('fstype', '')).upper() source_table_type = '' source_type = details.get('parttype', '') # Set dest type if re.match(r'^0x\w+$', source_type): # Both source and dest are MBR source_table_type = 'MBR' if table_type == 'MBR': dest_type = source_type.replace('0x', '').lower() elif re.match(r'^\w{8}-\w{4}-\w{4}-\w{4}-\w{12}$', source_type): # Source is a GPT type source_table_type = 'GPT' if table_type == 'GPT': dest_type = source_type.upper() if not dest_type: # Assuming changing table types, set based on FS if source_filesystem in cfg.ddrescue.PARTITION_TYPES.get(table_type, {}): dest_type = cfg.ddrescue.PARTITION_TYPES[table_type][source_filesystem] line += f', type={dest_type}' # Safety Check if not dest_type: std.print_error(f'Failed to determine partition type for: {dev_path}') raise std.GenericAbort() # Add extra details if details.get('partlabel', ''): line += f', name="{details["partlabel"]}"' if details.get('partuuid', '') and source_table_type == table_type: # Only add UUID if source/dest table types match line += f', uuid={details["partuuid"].upper()}' # Done return line
8ef87f9c4db06382d5788ab846ae5b8cf1c7d2f4
14,227
def get_allocation_window(allocation, default_start_date=_get_zero_date_utc(), default_end_date=_get_current_date_utc()): """ Returns a tuple containing the allocation windows start and end date """ if not allocation.start_date: window_start_date = default_start_date else: window_start_date = allocation.start_date if not allocation.end_date: window_end_date = default_end_date else: window_end_date = allocation.end_date return window_start_date, window_end_date
7367eb11eac50829de27315155b934297f6bc684
14,229
def IDFromUID(s,code=''): """ Create an ID object from the given string UID. This can raise an Error in case the string does not map to a valid UID. code is used in the verification process if given. """ id = _EmptyClass() id.__class__ = ID id.set_uid(s,code) return id
5e37d90313517e11bc914fb57320406653da3e3a
14,231
def ordered_pair_accuracy(labels, predictions, weights=None, name=None): """Computes the percentage of correctedly ordered pair. For any pair of examples, we compare their orders determined by `labels` and `predictions`. They are correctly ordered if the two orders are compatible. That is, labels l_i > l_j and predictions s_i > s_j and the weight for this pair is the weight from the l_i. Args: labels: A `Tensor` of the same shape as `predictions`. predictions: A `Tensor` with shape [batch_size, list_size]. Each value is the ranking score of the corresponding example. weights: A `Tensor` of the same shape of predictions or [batch_size, 1]. The former case is per-example and the latter case is per-list. name: A string used as the name for this metric. Returns: A metric for the accuracy or ordered pairs. """ with ops.name_scope(name, 'ordered_pair_accuracy', (labels, predictions, weights)): clean_labels, predictions, weights, _ = _prepare_and_validate_params( labels, predictions, weights) label_valid = math_ops.equal(clean_labels, labels) valid_pair = math_ops.logical_and( array_ops.expand_dims(label_valid, 2), array_ops.expand_dims(label_valid, 1)) pair_label_diff = array_ops.expand_dims( clean_labels, 2) - array_ops.expand_dims(clean_labels, 1) pair_pred_diff = array_ops.expand_dims( predictions, 2) - array_ops.expand_dims(predictions, 1) # Correct pairs are represented twice in the above pair difference tensors. # We only take one copy for each pair. correct_pairs = math_ops.to_float(pair_label_diff > 0) * math_ops.to_float( pair_pred_diff > 0) pair_weights = math_ops.to_float( pair_label_diff > 0) * array_ops.expand_dims( weights, 2) * math_ops.to_float(valid_pair) return math_ops.reduce_mean(correct_pairs * pair_weights)
5e6c5e0bc480822149a04b5efaffe2474d1a8394
14,232
def samp(*args, **kwargs): """ The HTML <samp> element is an element intended to identify sample output from a computer program. It is usually displayed in the browser's default monotype font (such as Lucida Console). """ return el('samp', *args, **kwargs)
eaf9e69413b3ccafc1f0fed9549efb89b7fb5715
14,233
def knownTypes(): """Returns all known resource types""" return loader.typeToExtension.keys()+['WorldModel','MultiPath','Point','Rotation','Matrix3','ContactPoint']
d332a3344e43bc8f2026eed6feff137fdb2b9b2e
14,237
def args_for_blocking_web_whatsapp_com_http(): """ Returns arguments for blocking web.whatsapp.com over http """ return ["-iptables-reset-keyword", "Host: web.whatsapp.com"]
a15a8ebc087467ec1a8e6817366f93df7b0a181b
14,238
def zeta_vector(): """The :func:`zeta` vector. :func:`zeta_vector` returns :math:`\zeta` parameters calculated by formula (5) on page 17 in `the technical paper`_, which is .. math:: \\bf \zeta= W^{-1}(p-\mu) """ return np.linalg.inv(W_matrix()) @ (m_vector() - mu_vector())
7650ad5fb443344e82f6e4bd9fd2cba697e7f768
14,239
def get_fourier_col_name(k, col_name, function_name="sin", seas_name=None): """Returns column name corresponding to a particular fourier term, as returned by fourier_series_fcn :param k: int fourier term :param col_name: str column in the dataframe used to generate fourier series :param function_name: str sin or cos :param seas_name: strcols_interact appended to new column names added for fourier terms :return: str column name in DataFrame returned by fourier_series_fcn """ # patsy doesn't allow "." in formula term. Replace "." with "_" rather than quoting "Q()" all fourier terms name = f"{function_name}{k:.0f}_{col_name}" if seas_name is not None: name = f"{name}_{seas_name}" return name
5c15b52728d0333c9c7df59030d6ead66473c823
14,240
import uuid def unique_filename(): """Creates a UUID-based unique filename""" return str(uuid.uuid1())
ee0d9090a4c5f8a6f0ddef2d670f7beb845a4114
14,241
import mdtraj import tempfile def _create_trajectory(molecule): """Create an `mdtraj` topology from a molecule object. Parameters ---------- molecule: openff.toolkit.topology.Molecule The SMILES pattern. Returns ------- mdtraj.Trajectory The created trajectory. """ # Check whether the molecule has a configuration defined, and if not, # define one. if molecule.n_conformers <= 0: molecule.generate_conformers(n_conformers=1) # We need to save out the molecule and then reload it as the toolkit # will not always save the atoms in the same order that they are # present in the molecule object. with tempfile.NamedTemporaryFile(suffix=".pdb") as file: molecule.to_file(file.name, "PDB") # Load the pdb into an mdtraj object. mdtraj_trajectory = mdtraj.load_pdb(file.name) # Change the assigned residue name (sometimes molecules are assigned # an amino acid residue name even if that molecule is not an amino acid, # e.g. C(CO)N is not Gly) and save the altered object as a pdb. for residue in mdtraj_trajectory.topology.residues: _generate_residue_name(residue, molecule.to_smiles()) return mdtraj_trajectory
de9e2a94d266dbdc3201ff74cb2bd27e939850d1
14,242
def preprocess(image): """Load and preprocess image.""" # Create the array of the right shape to feed into the keras model data = [] size = (96, 96) image = ImageOps.fit(image, size, Image.ANTIALIAS) image = np.asarray(image) x = preprocess_input(image) data.append(x) data = np.array(data) return data
d59eb9e10f6d69e6a1cdcc0d25230f6bd35947d1
14,243
import torch def move_to(obj, device): """Credit: https://discuss.pytorch.org/t/pytorch-tensor-to-device-for-a-list-of-dict/66283 Arguments: obj {dict, list} -- Object to be moved to device device {torch.device} -- Device that object will be moved to Raises: TypeError: object is of type that is not implemented to process Returns: type(obj) -- same object but moved to specified device """ if torch.is_tensor(obj): return obj.to(device) elif isinstance(obj, dict): res = {k: move_to(v, device) for k, v in obj.items()} return res elif isinstance(obj, list): return [move_to(v, device) for v in obj] elif isinstance(obj, tuple): return tuple(move_to(list(obj), device)) else: raise TypeError("Invalid type for move_to")
97abd322f292fe605a06e8235ecb353ed9a01bf8
14,244
def split(C, dims, axis=1): """ Splits the columns or rows of C. Suppse C = [X_1, X_2, ..., X_B] is an (n x sum_b d_b) matrix. Returns a list of the constituent matrices as a list. Parameters ---------- C: array-like, shape (n, sum_b d_b) The concatonated block matrix. dims: list of ints The dimensions of each matrix i.e. [d_1, ..., d_B] axis: int [0, 1] Which axis to split (1 mean columns 0 means rows) Output ------ blocks: list of array-like [X_1, X_2, ..., X_B] """ idxs = np.append([0], np.cumsum(dims)) blocks = [] if axis == 1: assert idxs[-1] == C.shape[1] for b in range(len(dims)): blocks.append(C[:, idxs[b]:idxs[b + 1]]) elif axis == 0: for b in range(len(dims)): blocks.append(C[idxs[b]:idxs[b + 1], :]) else: raise ValueError('axis must be either 0 or 1') return blocks
2fd55cdde7bc5315f2a78236775c1f36aa8714fd
14,245
def build_binary_value(char_str, bits, alphabet) -> str: """ This method converts a string char_str into binary, using n bits per character and decoding from the supplied alphabet or from ASCII when bits=7 This is almost the inverse method to build_string in the decompress module. :param char_str: string. :param bits: number of bits per character. :param alphabet: Alphabet. :return: binary value. """ if bits == 7: indices = [ord(char_) for char_ in char_str] else: indices = [alphabet.index(char_) for char_ in char_str] binary_char_list = ["{0:b}".format(index).zfill(bits) for index in indices] return ''.join(binary_char_list)
50830dd5cfa3f5428b0946e7382220f9b5ff1915
14,246
def computeAnswer(inputData): """Compute the answer to the task, from the input data.""" # Do some calculations on the inputData answer = str(int(inputData) * 2) # EDIT ME (remove this line once done) return answer
3bf90dc1c05ca422ffda70d8a053eb76f6dcc66b
14,247
import re import itertools import collections def label_schema_matching( df, endpoint=DBpedia, uri_data_model=False, to_lowercase=True, remove_prefixes=True, remove_punctuation=True, prefix_threshold=1, progress=True, caching=True): """A schema matching method by checking for attribute -- rdfs:label between links. Args: df (pd.DataFrame): The dataframe where matching attributes are supposed to be found. endpoint (Endpoint, optional): SPARQL Endpoint to be queried. Defaults to DBpedia. uri_data_model (bool, optional): If enabled, the URI is directly queried instead of a SPARQL endpoint. Defaults to False. to_lowercase (bool, optional): Converts queried strings to lowercase. Defaults to True. remove_prefixes (bool, optional): Removes prefices of queried strings. Defaults to True. remove_punctuation (bool, optional): Removes punctuation from queried strings. Defaults to True. prefix_threshold (int, optional): The number of occurences after which a prefix is considered "common". Defaults to 1. progress (bool, optional): If True, progress bars will be shown to inform the user about the progress made by the process (if "uri_data_model" = True). Defaults to True. caching (bool, optional): Turn result-caching for queries issued during the execution on or off. Defaults to True. Returns: pd.DataFrame: Two columns with matching links and a third column with the overlapped label. """ matches = pd.DataFrame( columns=["uri_1", "uri_2", "same_label"]) # Get URIs from the column names cat_cols = [col for col in df.columns if re.findall("https*:", col)] cat_cols_stripped = [re.sub(r"^.*http://", "http://", col) for col in cat_cols] # transform attributes to sparql values list form values = "(<"+pd.Series(cat_cols_stripped).str.cat(sep=">) (<")+">) " if uri_data_model: # Query these URIs for the label query = "SELECT ?value ?o WHERE {VALUES (?value) {(<**URI**>)} ?value rdfs:label ?o. FILTER (lang(?o) = 'en') }" labels = uri_querier(pd.DataFrame(cat_cols_stripped), 0, query, progress = progress, caching=caching).drop_duplicates().set_index("value") else: query = "SELECT ?value ?o WHERE {VALUES (?value) {" + values + \ "} ?value rdfs:label ?o. FILTER (lang(?o) = 'en') }" # query the equivalent classes/properties labels = endpoint_wrapper(query, endpoint, caching=caching).reset_index(drop=True) if labels.empty: return matches # Get common prefixes common_prefixes = get_common_prefixes(labels, prefix_threshold) # Clean the results (i.e. the labels) labels["o"] = labels["o"].apply(lambda x: clean_string( x, common_prefixes, to_lowercase, remove_prefixes, remove_punctuation)) # Create a dictionary if labels.index.name == "value": labels.reset_index(inplace=True) labels_dict = labels.set_index("value").T.to_dict("list") #check if there are no matches tmp = set() for v in labels_dict.values(): tmp.update(v) if len(labels_dict) == len(tmp): combinations = list(itertools.combinations(cat_cols_stripped,2)) combinations_sorted = [sorted(x) for x in combinations] matches = pd.DataFrame(combinations_sorted, columns=["uri_1", "uri_2"]) matches["same_label"] = 0 return matches else: # Combine the uris that have the same labels into a DataFrame new_labels_dict = collections.defaultdict(list) for key, values in labels_dict.items(): for i in values: new_labels_dict[i].append(key) df_labels = pd.DataFrame( list(new_labels_dict.values()), columns=["uri_1", "uri_2"]) #df_labels["same_label"] = pd.DataFrame(list(new_labels_dict.keys())) df_labels.dropna(inplace=True) # restrict the order of uris in one row for _, row in df_labels.iterrows(): new_match = {"uri_1": min(row["uri_1"], row["uri_2"]), "uri_2": max(row["uri_1"], row["uri_2"]), "same_label": 1} matches = matches.append(new_match, ignore_index=True) # Get back the uris that are not quired by rdfs:label and turn df into dict no_label = pd.DataFrame({"value": [ x for x in cat_cols_stripped if x not in list(labels["value"])], "o": np.nan}) labels = labels.append(no_label, ignore_index=True) full_labels_dict = labels.set_index("value").T.to_dict("list") # Create all unique combinations from the URIs, order them alphabetically and turn them into a DataFrame combinations = list(itertools.combinations(full_labels_dict.keys(), 2)) combinations_sorted = [sorted(x) for x in combinations] result = pd.DataFrame(combinations_sorted, columns=["uri_1", "uri_2"]) # merged with the non_matched combinations and drop duplicates for _, row in result.iterrows(): new_match = {"uri_1": min(row["uri_1"], row["uri_2"]), "uri_2": max(row["uri_1"], row["uri_2"]), "same_label": 0} matches = matches.append(new_match, ignore_index=True) matches.drop_duplicates( subset=["uri_1", "uri_2"], inplace=True, ignore_index=True) return matches
0577c29206da3c6528b85a4868a6f4db12450122
14,248
from datetime import datetime def get_last_month_date_dmy() -> str: """Returns last month date (dd/mm/yyyy for calls report).""" return (datetime.now() - timedelta(30)).date().strftime("%d/%m/%Y")
b1dc2066c30797195a8e5e03b994d0374c0b5a2f
14,250
def irange(start, end): """Inclusive range from start to end (vs. Python insanity.) irange(1,5) -> 1, 2, 3, 4, 5""" return range( start, end + 1 )
91d4c270b1d9304b4ee82c0cb16aee5d518db3d5
14,251
def get_required_params(request, expected_params: list, type: str = 'POST') -> dict: """Gets the list of params from request, or returns None if ANY is missing. :param request: The Request :type request: flask.Request :param expected_params: The list of expected parameters :type expected_params: list :param type: The request type, defaults to POST, can be GET to get query params. :type type: str :return: Dictorinary with parameters as keys and values as values :rtype: dict """ res = {} for param in expected_params: if type == 'POST': val = request.form.get(param) elif type == 'GET': val = request.args.get(param) else: val = None if not val: return None res[param] = val return res
2d0b2970464877ed74ecf3bfe0d45325ce3fafe4
14,253
def bot_send(msg, bot_id, broadcast): """ Send a message to a telegram user or group specified on chat_id chat_id must be a number! bot_id == bot_username """ if broadcast == True: bot = telegram.Bot(token=config[bot_id]["bot_api_token"]) bot.sendMessage(chat_id=config[bot_id]["group_chat_id"], text=msg) else: print(msg) return None
f5647d489c6c4873a031a7a11f9112164881c2e7
14,255
import random def split_dataset(dataset, num_train=1200): """ Split the dataset into a training and test set. Args: dataset: an iterable of Characters. Returns: A tuple (train, test) of Character sequences. """ all_data = list(dataset) random.shuffle(all_data) return all_data[:num_train], all_data[num_train:]
140a9926ff5dc70e1a2b3ec9887111595c030355
14,257
def get_constant(): """ Keep learning rate constant """ def update(lr, epoch): return lr return update
1b68c67202c1c22c1aa6a6d532796e2bba0b42ee
14,258
def spatial_pack_nhwc(data, kernel, stride, padding, in_bits, weight_bits, pack_dtype, out_dtype, dorefa=False): """ Compute convolution with pack on spatial axes. """ assert data.shape[0].value == 1, "spatial pack convolution only support batch size=1" data_q = bitpack(data, in_bits, pack_axis=3, bit_axis=4, pack_type=pack_dtype) kernel_q = bitpack(kernel, weight_bits, pack_axis=2, bit_axis=4, pack_type=pack_dtype) _, H, W, CI, IB = data_q.shape KH, KW, _, CO, KB = kernel_q.shape HPAD, WPAD, _, _ = get_pad_tuple(padding, kernel) if isinstance(stride, (tuple, list)): HSTR, WSTR = stride else: HSTR, WSTR = stride, stride HCAT, WCAT = KH-1, KW-1 wkl = _get_workload(data, kernel, stride, padding, out_dtype, "NHWC") sch = _get_schedule(wkl, "NHWC") VH = sch.vh VW = sch.vw VC = sch.vc PAD_H = H + 2*HPAD PAD_W = W + 2*WPAD OH = (H + 2*HPAD - KH) // HSTR + 1 OW = (W + 2*WPAD - KW) // WSTR + 1 dvshape = (1, PAD_H//(VH*HSTR), PAD_W//(VW*WSTR), VH*HSTR+HCAT, VW*WSTR+WCAT, CI, IB) kvshape = (CO, KH, KW, CI, VC, KB) ovshape = (1, OH, OW, CO, VH, VW, VC) oshape = (1, OH, OW, CO) if (HPAD != 0 and WPAD != 0): data_pad = pad(data_q, (0, HPAD, WPAD, 0, 0), name="data_pad") else: data_pad = data_q data_vec = tvm.compute(dvshape, lambda n, h, w, vh, vw, ci, b: \ data_pad[n][h*VH*HSTR+vh][w*VW*WSTR+vw][ci][b], name='data_vec') kernel_vec = tvm.compute(kvshape, lambda co, dh, dw, ci, vc, b: \ kernel_q[dh][dw][ci][co*VC+vc][b], name='kernel_vec') ci = tvm.reduce_axis((0, CI), name='ci') dh = tvm.reduce_axis((0, KH), name='dh') dw = tvm.reduce_axis((0, KW), name='dw') b1 = tvm.reduce_axis((0, IB), name='ib') b2 = tvm.reduce_axis((0, KB), name='kb') def _conv(n, h, w, co, vh, vw, vc): b1b2 = (b1+b2).astype(out_dtype) if dorefa: return tvm.sum( (tvm.popcount(data_vec[n, h, w, vh*HSTR+dh, vw*WSTR+dw, ci, b1].astype(out_dtype) & kernel_vec[co, dh, dw, ci, vc, b2].astype(out_dtype)) - tvm.popcount(data_vec[n, h, w, vh*HSTR+dh, vw*WSTR+dw, ci, b1].astype(out_dtype) & ~kernel_vec[co, dh, dw, ci, vc, b2]).astype(out_dtype)) << b1b2, axis=[dh, dw, ci, b1, b2]) return tvm.sum(tvm.popcount( data_vec[n, h, w, vh*HSTR+dh, vw*WSTR+dw, ci, b1] & kernel_vec[co, dh, dw, ci, vc, b2]).astype(out_dtype) << b1b2, axis=[dh, dw, ci, b1, b2]) conv = tvm.compute(ovshape, _conv, name='conv') return tvm.compute(oshape, lambda n, h, w, co: conv[n][h//VH][w//VW][co//VC][h%VH][w%VW][co%VC], name='output_unpack', tag='spatial_bitserial_conv_nhwc')
9d2527fb9878cc759e5cad0d1df4057cd852bc9f
14,259
import pickle def load_random_tt_distribution(numAgents, r, pu, samples): """ Load a file with a population of random turn-taking values, assuming that it exists Parameters: * numAgents -- the desired number of probabilistic agents to include * r -- the turn-taking resolution * pu -- the probability that a bit in each usage attempt sequence will be 1 * samples -- the number of random turn-taking values to generate See Section 3 of: Raffensperger, P. A., Webb, R. Y., Bones, P. J., and McInnes, A. I. (2012). A simple metric for turn-taking in emergent communication. Adaptive Behavior, 20(2):104-116. """ filename = get_tt_distribution_filename(numAgents, r, pu, samples) file = open(filename, 'r') return pickle.load(file)
948dfa02ff387fbb69902bf35b5cc428f054a6e7
14,260
from typing import ContextManager def fail_after(seconds: float) -> ContextManager[CancelScope]: """ Create a cancel scope with the given timeout, and raises an error if it is actually cancelled. This function and move_on_after() are similar in that both create a cancel scope with a given timeout, and if the timeout expires then both will cause CancelledError to be raised within the scope. The difference is that when the CancelledError exception reaches move_on_after(), it’s caught and discarded. When it reaches fail_after(), then it’s caught and TimeoutError is raised in its place. """ return fail_at(get_running_loop().time() + seconds)
917fe4d7d0a599caa855210bd86bb0b57263e71c
14,261
from typing import Union from typing import Set from typing import Dict import copy def _get_dataset_names_mapping( names: Union[str, Set[str], Dict[str, str]] = None ) -> Dict[str, str]: """Take a name or a collection of dataset names and turn it into a mapping from the old dataset names to the provided ones if necessary. Args: names: A dataset name or collection of dataset names. When str or Set[str] is provided, the listed names will stay the same as they are named in the provided pipeline. When Dict[str, str] is provided, current names will be mapped to new names in the resultant pipeline. Returns: A dictionary that maps the old dataset names to the provided ones. Examples: >>> _get_dataset_names_mapping("dataset_name") {"dataset_name": "dataset_name"} # a str name will stay the same >>> _get_dataset_names_mapping(set(["ds_1", "ds_2"])) {"ds_1": "ds_1", "ds_2": "ds_2"} # a Set[str] of names will stay the same >>> _get_dataset_names_mapping({"ds_1": "new_ds_1_name"}) {"ds_1": "new_ds_1_name"} # a Dict[str, str] of names will map key to value """ if names is None: return {} if isinstance(names, str): return {names: names} if isinstance(names, dict): return copy.deepcopy(names) return {item: item for item in names}
df271cb4cd102eb3731e12b8d92fd4cca8ef8145
14,262
import json def _json_keyify(args): """ converts arguments into a deterministic key used for memoizing """ args = tuple(sorted(args.items(), key=lambda e: e[0])) return json.dumps(args)
2800a9a0db0cf8d51efbcbeda2c023172f6662f5
14,263
def tgsegsm_vect(time_in, data_in): """ Transform data from GSE to GSM. Parameters ---------- time_in: list of float Time array. data_in: list of float xgse, ygse, zgse cartesian GSE coordinates. Returns ------- xgsm: list of float Cartesian GSM coordinates. ygsm: list of float Cartesian GSM coordinates. zgsm: list of float Cartesian GSM coordinates. """ xgsm, ygsm, zgsm = 0, 0, 0 d = np.array(data_in) xgse, ygse, zgse = d[:, 0], d[:, 1], d[:, 2] gd1, gd2, gd3 = cdipdir_vect(time_in) gst, slong, sra, sdec, obliq = csundir_vect(time_in) gs1 = np.cos(sra) * np.cos(sdec) gs2 = np.sin(sra) * np.cos(sdec) gs3 = np.sin(sdec) sgst = np.sin(gst) cgst = np.cos(gst) ge1 = 0.0 ge2 = -np.sin(obliq) ge3 = np.cos(obliq) gm1 = gd1 * cgst - gd2 * sgst gm2 = gd1 * sgst + gd2 * cgst gm3 = gd3 gmgs1 = gm2 * gs3 - gm3 * gs2 gmgs2 = gm3 * gs1 - gm1 * gs3 gmgs3 = gm1 * gs2 - gm2 * gs1 rgmgs = np.sqrt(gmgs1**2 + gmgs2**2 + gmgs3**2) cdze = (ge1 * gm1 + ge2 * gm2 + ge3 * gm3)/rgmgs sdze = (ge1 * gmgs1 + ge2 * gmgs2 + ge3 * gmgs3)/rgmgs xgsm = xgse ygsm = cdze * ygse + sdze * zgse zgsm = -sdze * ygse + cdze * zgse return xgsm, ygsm, zgsm
1c1809c722ae84e2d7bd467f78e9cefddb7cf884
14,264
def choose_a_pick_naive(numbers_left): """ Choose any larger number :param numbers_left: :return: """ if numbers_left[0] > numbers_left[-1]: return 0, numbers_left[0] elif numbers_left[-1] > numbers_left[0]: return -1, numbers_left[-1] else: return 0, numbers_left[0]
70405a4ad9d1ee1afbec93bea13d7eab3068b42e
14,265
def _Run(args, holder, target_https_proxy_arg, release_track): """Issues requests necessary to import target HTTPS proxies.""" client = holder.client resources = holder.resources target_https_proxy_ref = target_https_proxy_arg.ResolveAsResource( args, holder.resources, default_scope=compute_scope.ScopeEnum.GLOBAL, scope_lister=compute_flags.GetDefaultScopeLister(client)) data = console_io.ReadFromFileOrStdin(args.source or '-', binary=False) try: target_https_proxy = export_util.Import( message_type=client.messages.TargetHttpsProxy, stream=data, schema_path=_GetSchemaPath(release_track)) except yaml_validator.ValidationError as e: raise compute_exceptions.ValidationError(str(e)) # Get existing target HTTPS proxy. try: old_target_https_proxy = target_https_proxies_utils.SendGetRequest( client, target_https_proxy_ref) except apitools_exceptions.HttpError as error: if error.status_code != 404: raise error # Target HTTPS proxy does not exist, create a new one. return _SendInsertRequest(client, resources, target_https_proxy_ref, target_https_proxy) if old_target_https_proxy == target_https_proxy: return console_io.PromptContinue( message=('Target Https Proxy [{0}] will be overwritten.').format( target_https_proxy_ref.Name()), cancel_on_no=True) # Populate id and fingerprint fields. These two fields are manually # removed from the schema files. target_https_proxy.id = old_target_https_proxy.id if hasattr(old_target_https_proxy, 'fingerprint'): target_https_proxy.fingerprint = old_target_https_proxy.fingerprint # Unspecified fields are assumed to be cleared. cleared_fields = [] if target_https_proxy.description is None: cleared_fields.append('description') if target_https_proxy.serverTlsPolicy is None: cleared_fields.append('serverTlsPolicy') if target_https_proxy.authorizationPolicy is None: cleared_fields.append('authorizationPolicy') if hasattr(target_https_proxy, 'certificateMap') and target_https_proxy.certificateMap is None: cleared_fields.append('certificateMap') if hasattr(target_https_proxy, 'httpFilters') and not target_https_proxy.httpFilters: cleared_fields.append('httpFilters') if target_https_proxy.proxyBind is None: cleared_fields.append('proxyBind') if target_https_proxy.quicOverride is None: cleared_fields.append('quicOverride') if not target_https_proxy.sslCertificates: cleared_fields.append('sslCertificates') if target_https_proxy.sslPolicy is None: cleared_fields.append('sslPolicy') if target_https_proxy.urlMap is None: cleared_fields.append('urlMap') with client.apitools_client.IncludeFields(cleared_fields): return _SendPatchRequest(client, resources, target_https_proxy_ref, target_https_proxy)
26163d575701045e126ec52ae9adbb24fb98a54a
14,268
def get_mono_cell(locus_file, TotalSNPs, TotalBi_SNPs_used): """Determine value to add to [0,0] cell""" TotalBP, Loci_count = totalbp(locus_file) return int((TotalBi_SNPs_used * TotalBP) / TotalSNPs) - TotalBi_SNPs_used, \ TotalBP, Loci_count
b6890f4a5129eb0892c6af9f5385dba98612776f
14,269
def remove_bad_particles(st, min_rad='calc', max_rad='calc', min_edge_dist=2.0, check_rad_cutoff=[3.5, 15], check_outside_im=True, tries=50, im_change_frac=0.2, **kwargs): """ Removes improperly-featured particles from the state, based on a combination of particle size and the change in error on removal. Parameters ----------- st : :class:`peri.states.State` The state to remove bad particles from. min_rad : Float, optional All particles with radius below min_rad are automatically deleted. Set to 'calc' to make it the median rad - 25* radius std. Default is 'calc'. max_rad : Float, optional All particles with radius above max_rad are automatically deleted. Set to 'calc' to make it the median rad + 15* radius std. Default is 'calc'. min_edge_dist : Float, optional All particles within min_edge_dist of the (padded) image edges are automatically deleted. Default is 2.0 check_rad_cutoff : 2-element list of floats, optional Particles with radii < check_rad_cutoff[0] or > check_rad_cutoff[1] are checked if they should be deleted. Set to 'calc' to make it the median rad +- 3.5 * radius std. Default is [3.5, 15]. check_outside_im : Bool, optional If True, checks if particles located outside the unpadded image should be deleted. Default is True. tries : Int, optional The maximum number of particles with radii < check_rad_cutoff to try to remove. Checks in increasing order of radius size. Default is 50. im_change_frac : Float, , optional Number between 0 and 1. If removing a particle decreases the error by less than im_change_frac*the change in the image, then the particle is deleted. Default is 0.2 Returns ------- removed: Int The cumulative number of particles removed. """ is_near_im_edge = lambda pos, pad: (((pos + st.pad) < pad) | (pos > np.array(st.ishape.shape) + st.pad - pad)).any(axis=1) # returns True if the position is within 'pad' of the _outer_ image edge removed = 0 attempts = 0 n_tot_part = st.obj_get_positions().shape[0] q10 = int(0.1 * n_tot_part) # 10% quartile r_sig = np.sort(st.obj_get_radii())[q10:-q10].std() r_med = np.median(st.obj_get_radii()) if max_rad == 'calc': max_rad = r_med + 15*r_sig if min_rad == 'calc': min_rad = r_med - 25*r_sig if check_rad_cutoff == 'calc': check_rad_cutoff = [r_med - 7.5*r_sig, r_med + 7.5*r_sig] # 1. Automatic deletion: rad_wrong_size = np.nonzero( (st.obj_get_radii() < min_rad) | (st.obj_get_radii() > max_rad))[0] near_im_edge = np.nonzero(is_near_im_edge(st.obj_get_positions(), min_edge_dist - st.pad))[0] delete_inds = np.unique(np.append(rad_wrong_size, near_im_edge)).tolist() delete_poses = st.obj_get_positions()[delete_inds].tolist() message = ('-'*27 + 'SUBTRACTING' + '-'*28 + '\n Z\t Y\t X\t R\t|\t ERR0\t\t ERR1') with log.noformat(): CLOG.info(message) for pos in delete_poses: ind = st.obj_closest_particle(pos) old_err = st.error p, r = st.obj_remove_particle(ind) p = p[0] r = r[0] part_msg = '%2.2f\t%3.2f\t%3.2f\t%3.2f\t|\t%4.3f \t%4.3f' % ( tuple(p) + (r,) + (old_err, st.error)) with log.noformat(): CLOG.info(part_msg) removed += 1 # 2. Conditional deletion: check_rad_inds = np.nonzero((st.obj_get_radii() < check_rad_cutoff[0]) | (st.obj_get_radii() > check_rad_cutoff[1]))[0] if check_outside_im: check_edge_inds = np.nonzero( is_near_im_edge(st.obj_get_positions(), st.pad))[0] check_inds = np.unique(np.append(check_rad_inds, check_edge_inds)) else: check_inds = check_rad_inds check_inds = check_inds[np.argsort(st.obj_get_radii()[check_inds])] tries = np.min([tries, check_inds.size]) check_poses = st.obj_get_positions()[check_inds[:tries]].copy() for pos in check_poses: old_err = st.error ind = st.obj_closest_particle(pos) killed, p, r = check_remove_particle( st, ind, im_change_frac=im_change_frac) if killed: removed += 1 check_inds[check_inds > ind] -= 1 # cleaning up indices.... delete_poses.append(pos) part_msg = '%2.2f\t%3.2f\t%3.2f\t%3.2f\t|\t%4.3f \t%4.3f' % ( p + r + (old_err, st.error)) with log.noformat(): CLOG.info(part_msg) return removed, delete_poses
09d767cc2513b542a99f8a846c866a1a8902ebf5
14,270
def _pyside_import_module(moduleName): """ The import for PySide """ pyside = __import__('PySide', globals(), locals(), [moduleName], -1) return getattr(pyside, moduleName)
7b3b18214d12322e230c78678f3ef4fdc1717f10
14,271
import unicodedata def sanitize_str(value: str) -> str: """Removes Unicode control (Cc) characters EXCEPT for tabs (\t), newlines (\n only), line separators (U+2028) and paragraph separators (U+2029).""" return "".join(ch for ch in value if unicodedata.category(ch) != 'Cc' and ch not in {'\t', '\n', '\u2028', '\u2029'})
5b5eae2b377a834e377a8bf7bcd7cefc2278c2f7
14,273
def get_tariff_estimated(reporter, partner='000', product='all', year=world_trade_data.defaults.DEFAULT_YEAR, name_or_id='name'): """Tariffs (estimated)""" return _get_data(reporter, partner, product, year, datatype='aveestimated', datasource='trn', name_or_id=name_or_id)
d4fd81e640d014bf52725a1274d0f3a2c0eebeba
14,274
def task_result_api_view(request, taskid): """ Get task `state` and `result` from API endpoint. Use case: you want to provide to some user with async feedback about about status of some task. Example: # urls.py urlpatterns = [ url(r'^api/task/result/(.+)/', task_result_api_view), ... ] # some_views.py context = {} # ... async_request = some_important_task.delay(...) # ... context['async_task_id'] = str(async_request.id) Now we can check the state and result form Front-end side. """ result = AsyncResult(taskid) response = {'task-id': taskid, 'state': result.state} response.update({'result': _safe_result(result.result)}) return JsonResponse(response)
94e46b3282a1f69e16a8979906b118d8684e1799
14,275
def get_horizon_coordinates(fp_pointings_spherical): """ It converts from spherical to Horizon coordinates, with the conventions: Altitute = np.pi / 2 - zenith angle (theta) Azimuth = 2 * np.pi - phi Parameters ---------- fp_pointings_spherical : numpy array of shape (..., 2), radians They are the spherical coordinates (theta, phi) that will be converted. Returns ------- out : numpy array of shape (..., ), numpy array of shape (..., ) """ Alt = np.pi/2 - fp_pointings_spherical[..., 0] #rad Az = 2 * np.pi - fp_pointings_spherical[..., 1] #rad return Alt, Az
7fbc11fe6195129d9c18c0161fe59fab6e31a29c
14,276
from typing import Any def patch_object_type() -> None: """ Patches `graphene.ObjectType` to make it indexable at runttime. This is necessary for it be generic at typechecking time. """ # Lazily import graphene as it is actually an expensive thing to do and we don't want to slow down things at # type-checking time. from graphene import ObjectType # pylint: disable=import-outside-toplevel ObjectTypeMetaclass = type(ObjectType) def __getitem__(cls: TypeOf[TypeOf[ObjectType]], _: Any) -> TypeOf[TypeOf[ObjectType]]: return cls ObjectTypeMetaclass.__getitem__ = __getitem__
4ed77870c9df03d072b55bc3a919c59d3e761f38
14,278
import time def date_format(time_obj=time, fmt='%Y-%m-%d %H:%M:%S') -> str: """ 时间转字符串 :param time_obj: :param fmt: :return: """ _tm = time_obj.time() _t = time.localtime(_tm) return time.strftime(fmt, _t)
0a614763b040587b80743ffacfff6bbb0a6c7365
14,280
from typing import Optional def clean_pin_cite(pin_cite: Optional[str]) -> Optional[str]: """Strip spaces and commas from pin_cite, if it is not None.""" if pin_cite is None: return pin_cite return pin_cite.strip(", ")
9c495fcc4f1cf192c1358f50fef569c4d6b36290
14,281
def instrument_code_to_name(rwc_instrument_code): """Use the rwc_instrument_map.json to convert an rwc_instrument_code to its instrument name. Parameters ---------- rwc_instrument_code : str Two character instrument code Returns ------- instrument_name : str Full instrument name, if it exists, else None """ code = RWC_INSTRUMENT_MAP.get(rwc_instrument_code, None) return code if code else None
9059bb69b86e5c8e326b5c51a745e61c15c41389
14,282
def record_time(ad, fallback_to_launch=True): """ RecordTime falls back to launch time as last-resort and for jobs in the queue For Completed/Removed/Error jobs, try to update it: - to CompletionDate if present - else to EnteredCurrentStatus if present - else fall back to launch time """ if ad["JobStatus"] in [3, 4, 6]: if ad.get("CompletionDate", 0) > 0: return ad["CompletionDate"] elif ad.get("EnteredCurrentStatus", 0) > 0: return ad["EnteredCurrentStatus"] if fallback_to_launch: return _LAUNCH_TIME return 0
517eb369f9d04048bce87b4301761f2b3b629303
14,283
def getTeamCompatibility(mentor, team): """ Gets a "compatibility score" between a mentor and a team (used as the weight in the later optimization problem) Uses the functions defined above to compute different aspects of the score """ score = 0 # find value from overlapping availabilities # value may differ depending on transportation type used, so try them all bestOverlap = -noOverlapCost # baseline to beat is no overlap at all for transitType in range(numTypesTransit): # check if this transit type is better than previous best and update if needed bestOverlap = max(bestOverlap, getSingleOverlapValue(mentor, team, transitType)) score += bestOverlap # find value from team type matches score += getTeamTypeValue(mentor, team) # find value from team requests / requirements score += getTeamRequestedValue(mentor, team) return score
a9cd0c65b4419051045706852c3d64baff787e4f
14,284
def mean_edges(graph, feat, weight=None): """Averages all the values of edge field :attr:`feat` in :attr:`graph`, optionally multiplies the field by a scalar edge field :attr:`weight`. Parameters ---------- graph : DGLGraph The graph. feat : str The feature field. weight : optional, str The weight field. If None, no weighting will be performed, otherwise, weight each edge feature with field :attr:`feat`. for calculating mean. The weight feature associated in the :attr:`graph` should be a tensor of shape ``[graph.number_of_edges(), 1]``. Returns ------- tensor The averaged tensor. Notes ----- Return a stacked tensor with an extra first dimension whose size equals batch size of the input graph. The i-th row of the stacked tensor contains the readout result of the i-th graph in the batched graph. If a graph has no edges, a zero tensor with the same shape is returned at the corresponding row. Examples -------- >>> import dgl >>> import torch as th Create two :class:`~dgl.DGLGraph` objects and initialize their edge features. >>> g1 = dgl.DGLGraph() # Graph 1 >>> g1.add_nodes(2) >>> g1.add_edges([0, 1], [1, 0]) >>> g1.edata['h'] = th.tensor([[1.], [2.]]) >>> g1.edata['w'] = th.tensor([[3.], [6.]]) >>> g2 = dgl.DGLGraph() # Graph 2 >>> g2.add_nodes(3) >>> g2.add_edges([0, 1, 2], [1, 2, 0]) >>> g2.edata['h'] = th.tensor([[1.], [2.], [3.]]) Average over edge attribute :attr:`h` without weighting for each graph in a batched graph. >>> bg = dgl.batch([g1, g2], edge_attrs='h') >>> dgl.mean_edges(bg, 'h') tensor([[1.5000], # (1 + 2) / 2 [2.0000]]) # (1 + 2 + 3) / 3 Sum edge attribute :attr:`h` with normalized weight from edge attribute :attr:`w` for a single graph. >>> dgl.mean_edges(g1, 'h', 'w') # h1 * (w1 / (w1 + w2)) + h2 * (w2 / (w1 + w2)) tensor([[1.6667]]) # 1 * (3 / (3 + 6)) + 2 * (6 / (3 + 6)) See Also -------- sum_nodes mean_nodes sum_edges """ return _mean_on(graph, 'edges', feat, weight)
8219a4f543fe0903e3a9b313fd3cc142435da788
14,285
from typing import Optional async def update_config_file(config: ConfigDTO, reboot_processor: Optional[bool] = True): """ Overwrites the configuration used by the processor. """ config_dict = map_to_file_format(config) success = update_config(config_dict, reboot_processor) if not success: return handle_response(config_dict, success) return map_config(extract_config(), "")
165c2f59056ce0d71b237897e7379f517b158dc5
14,286
import requests def integration_session(scope="session"): """ creates a Session object which will persist over the entire test run ("session"). http connections will be reused (higher performance, less resource usage) Returns a Session object """ s = requests.sessions.Session() s.headers.update(test_headers) return s
c002b6d7875be41355990efe0bb10712661f50fe
14,287
import json def get_json_dump(json_object, indent=4, sort_keys=False): """ Short handle to get a pretty printed str from a JSON object. """ return json.dumps(json_object, indent=indent, sort_keys=sort_keys)
505548cdf972ef891b7bcc3bcd7be3347769faec
14,288
from re import S def number_of_real_roots(f, *gens, **args): """Returns the number of distinct real roots of `f` in `(inf, sup]`. Examples ======== >>> from sympy import Poly >>> from sympy.abc import x, y >>> from sympy.polys.polyroots import number_of_real_roots >>> f = Poly(x**2 - 1, x) Count real roots in the (-oo, oo) interval: >>> number_of_real_roots(f) 2 Count real roots in the (0, 2) interval: >>> number_of_real_roots(f, inf=0, sup=2) 1 Count real roots in the (2, oo) interval: >>> number_of_real_roots(f, inf=2) 0 References ========== .. [Davenport88] J.H. Davenport, Y. Siret, E. Tournier, Computer Algebra Systems and Algorithms for Algebraic Computation, Academic Press, London, 1988, pp. 124-128 """ def sign_changes(seq): count = 0 for i in xrange(1, len(seq)): if (seq[i-1] < 0 and seq[i] >= 0) or \ (seq[i-1] > 0 and seq[i] <= 0): count += 1 return count F = Poly(f, *gens, **args) if not F.is_Poly: return 0 if F.is_multivariate: raise ValueError('multivariate polynomials not supported') if F.degree() < 1: return 0 inf = args.get('inf', None) if inf is not None: inf = sympify(inf) if not inf.is_number: raise ValueError("Not a number: %s" % inf) elif abs(inf) is S.Infinity: inf = None sup = args.get('sup', None) if sup is not None: sup = sympify(sup) if not sup.is_number: raise ValueError("Not a number: %s" % sup) elif abs(sup) is S.Infinity: sup = None sturm = F.sturm() if inf is None: signs_inf = sign_changes([ s.LC()*(-1)**s.degree() for s in sturm ]) else: signs_inf = sign_changes([ s.eval(inf) for s in sturm ]) if sup is None: signs_sup = sign_changes([ s.LC() for s in sturm ]) else: signs_sup = sign_changes([ s.eval(sup) for s in sturm ]) return abs(signs_inf - signs_sup)
d0ed0923aba4e5749a5b5baf267914ea29800c6f
14,289
def heap_sort(arr: list): """ Heap sorting a list. Big-O: O(n log n). @see https://www.geeksforgeeks.org/heap-sort/ """ def heapify(sub: list, rdx: int, siz: int): """ Heapifying range between rdx and size ([rdx:siz]). @param sub: a slice of list. @param rdx: root/parent index to start. @param siz: size of heap. """ largest = ndx = rdx # assuming the root is the largest while ndx < siz: l_index = 2 * ndx + 1 # child index at left = 2*i + 1 r_index = 2 * ndx + 2 # child index at right = 2*i + 2 # reset largest index if left child exists and is greater than root. if l_index < siz and sub[ndx] < sub[l_index]: largest = l_index # check if right child is greater than the value at the largest index. if r_index < siz and sub[largest] < sub[r_index]: largest = r_index # change root, if needed if largest != ndx: sub[ndx], sub[largest] = sub[largest], sub[ndx] # swap ndx = largest # heapify the root. continue return pass n = len(arr) # build a max heap. parent = n // 2 - 1 # the last parent (that can have children) for i in range(parent, -1, -1): heapify(arr, i, n) # extract elements one by one. for i in range(n-1, 0, -1): arr[i], arr[0] = arr[0], arr[i] # swap heapify(arr, 0, i) return arr
9b53f3027804cab16c9850d4858377f49afe7bbf
14,290
def find_max_path(triangle): """ Find maximum-sum path from top of triangle to bottom """ # Start by copying the values sums = [[x for x in row] for row in triangle] # Efficient algorithm: start at the bottom and work our way up, computing max sums for reverse_index, row in enumerate(reversed(sums)): if reverse_index == 0: # Easy: max value for subpaths from last row is cell value itself continue # Now we need to take sum of each cell and max of two subpaths row_below = sums[-reverse_index] for col_index, col in enumerate(row): left = row_below[col_index] right = row_below[col_index + 1] row[col_index] = col + max(left, right) return sums[0][0]
1eb0afd076c455e67eacc867d04020ae82c68936
14,291
def plot_partregress(results, exog_idx=None, xnames=None, grid=None, fig=None): """Plot partial regression for a set of regressors. Parameters ---------- results : results instance A regression model results instance exog_idx : None or list of int (column) indices of the exog used in the plot, default is all. xnames : None or list of strings Names for the numbers given in exog_idx. Default is results.model.exog_names. grid : None or tuple of int (nrows, ncols) If grid is given, then it is used for the arrangement of the subplots. If grid is None, then ncol is one, if there are only 2 subplots, and the number of columns is two otherwise. fig : Matplotlib figure instance, optional If given, this figure is simply returned. Otherwise a new figure is created. Returns ------- fig : Matplotlib figure instance If `fig` is None, the created figure. Otherwise `fig` itself. Notes ----- A subplot is created for each explanatory variable given by exog_idx. The partial regression plot shows the relationship between the response and the given explanatory variable after removing the effect of all other explanatory variables in exog. See Also -------- plot_partregress_ax : Plot partial regression for a single regressor. plot_ccpr References ---------- See http://www.itl.nist.gov/div898/software/dataplot/refman1/auxillar/partregr.htm """ fig = utils.create_mpl_fig(fig) #maybe add option for using wendog, wexog instead y = results.model.endog exog = results.model.exog k_vars = exog.shape[1] #this function doesn't make sense if k_vars=1 if xnames is None: exog_idx = range(k_vars) xnames = results.model.exog_names else: exog_idx = [] for name in xnames: exog_idx.append(results.model.exog_names.index(name)) if not grid is None: nrows, ncols = grid else: if len(exog_idx) > 2: nrows = int(np.ceil(len(exog_idx)/2.)) ncols = 2 title_fontsize = 'small' else: nrows = len(exog_idx) ncols = 1 title_fontsize = None for i,idx in enumerate(exog_idx): others = range(k_vars) others.pop(idx) exog_others = exog[:, others] ax = fig.add_subplot(nrows, ncols, i+1) plot_partregress_ax(y, exog[:, idx], exog_others, ax=ax, varname=xnames[i]) return fig
c858b08b732bcd4b325c548ba59bed76316b5551
14,292
def ufloats_overlap_range(ufloats, vmin, vmax): """Return whether the +/- 1 sigma range overlaps the value range.""" vals = [] sigmas = [] for val in ufloats: if isinstance(val, float): vals.append(val) sigmas.append(0) else: vals.append(val.nominal_value) sigmas.append(val.std_dev) vals = np.array(vals) sigmas = np.array(sigmas) return ((vals - sigmas <= vmax) | (vals + sigmas >= vmin)).all()
1dee17437e1ba8904450895a748c9871a9964909
14,293
from typing import Tuple import multiprocessing import itertools def exact_qaoa_values_on_grid( graph: nx.Graph, xlim: Tuple[float, float] = (0, np.pi / 2), ylim: Tuple[float, float] = (-np.pi / 4, np.pi / 4), x_grid_num: int = 20, y_grid_num: int = 20, num_processors: int = 1, dtype=np.complex128): """Compute exact p=1 QAOA values on a grid. Args: graph: The graph representing the Hamiltonian. xlim: The range of values for gamma. ylim: The range of values for beta. num: The number of points in a single dimension of the grid. The total number of points evaluated will be num^2. Returns: A 2-dimensional Numpy array containing the QAOA values. The rows index the betas and the columns index the gammas. """ a, b = xlim c, d = ylim gammas = np.linspace(a, b, x_grid_num) betas = np.linspace(c, d, y_grid_num) HamC = create_ZZ_HamC(graph, dtype=dtype) N = graph.number_of_nodes() with multiprocessing.Pool(num_processors) as pool: vals = pool.starmap(_ising_qaoa_expectation, [(N, HamC, x, True, dtype) for x in itertools.product(gammas, betas)]) return np.reshape(np.array(vals), (x_grid_num, y_grid_num)).T
1ac1f93a9716e687983c3c557f5ee19cea8afb2d
14,294
def typecheck_eq(expr, ctxt=[]): """(par (A) (= A A Bool :chainable)) (par (A) (distinct A A Bool :pairwise)) """ typ = typecheck_expr(expr.subterms[0], ctxt) for term in expr.subterms[1:]: t = typecheck_expr(term, ctxt) if t != typ: if not (is_subtype(t, typ) or is_subtype(typ, t)): raise TypeCheckError(expr, term, typ, t) return BOOLEAN_TYPE
78cbf7b3510b30adde74d03a9f0168fdbfbc6bab
14,295
def precision(x, for_sum=False): """ This function returns the precision of a given datatype using a comporable numpy array """ if not for_sum: return np.finfo(x.dtype).eps else: return np.finfo(x.dtype).eps * x.size
c8d634638c0c8ce43c024d9c342e71adae6534bc
14,296
def parse_line(line, line_count, retries): """Coordinate retrieval of scientific name or taxonomy ID. Read line from input file, calling functions as appropriate to retrieve scientific name or taxonomy ID. :param line: str, line from input file :line_count: number of line in input file - enable tracking if error occurs :param retries: parser argument, maximum number of retries excepted if network error encountered Return list of genus, species and taxonomy ID """ line_data = [] # For taxonomy ID retrieve scientific name if line.startswith("NCBI:txid"): gs_name = get_genus_species_name(line[9:], line_count, retries) line_data = gs_name.split(" ", 1) line_data.append(line) # For scientific name retrieve taxonomy ID else: tax_id = get_tax_id(line, line_count, retries) line_data = line.split() line_data.append(tax_id) return line_data
895ae24672221fe78654f4c2796a419640c19d42
14,297
def prop_rotate(old_image, theta, **kwargs): """Rotate and shift an image via interpolation (bilinear by default) Parameters ---------- old_image : numpy ndarray Image to be rotated theta : float Angle to rotate image in degrees counter-clockwise Returns ------- new_image : numpy ndarray Returns rotated & shifted image with the same dimensions as the input image Other Parameteres ----------------- XC, YC : float Center of rotation in image pixels; (0,0) is at center of first pixel; if not specified, the center of the image is assumed to be the center of rotation XSHIFT, YSHIFT : float Amount to shift rotated image in pixels MISSING : float Value to set extrapolated pixels. """ if old_image.dtype == np.dtype("complex128") or old_image.dtype == np.dtype("complex64"): is_complex = 1 else: is_complex = 0 new_image = np.copy(old_image) if proper.use_cubic_conv: n = old_image.shape[0] if not "XC" in kwargs: XC = int(n / 2) if not "YC" in kwargs: YC = int(n / 2) if not "XSHIFT" in kwargs: xshift = 0. if not "YSHIFT" in kwargs: yshift = 0. if not "MISSING" in kwargs: missing = 0. t = -theta * np.pi / 180. x0 = np.arange(n, dtype = np.float64) - XC - xshift for j in range(n): y0 = j - YC - yshift xi = x0 * np.cos(t) - y0 * np.sin(t) + YC yi = x0 * np.sin(t) + y0 * np.cos(t) + XC new_image[j,:] = proper.prop_cubic_conv(old_image, xi, yi, GRID = False) else: theta = -1. * theta if is_complex: new_image.real = rotate(old_image.real, theta, reshape = False, prefilter = True) new_image.imag = rotate(old_image.imag, theta, reshape = False, prefilter = True) else: new_image = rotate(old_image, theta, reshape = False, prefilter = False) return new_image
b7c94899aba6dc5507bba1f1231954740dfbae1e
14,298
from typing import Dict from typing import Any def append_tf_example(data: Dict[Text, Any], schema: Dict[Text, Any]) -> tf.train.Example: """Add tf example to row""" feature = {} for key, value in data.items(): data_type = schema[key] value = CONVERTER_MAPPING[data_type](value) if data_type == DataType.INT: feature[key] = tf.train.Feature( int64_list=tf.train.Int64List(value=value)) elif data_type == DataType.FLOAT: feature[key] = tf.train.Feature( float_list=tf.train.FloatList(value=value)) elif data_type == DataType.BYTES: feature[key] = tf.train.Feature( bytes_list=tf.train.BytesList(value=value)) else: feature[key] = tf.train.Feature( bytes_list=tf.train.BytesList(value=value)) tf_example = tf.train.Example(features=tf.train.Features(feature=feature)) return tf_example
15fb71794c4e87923197927d80597a8f0e960690
14,299
def can_login(email, password): """Validation login parameter(email, password) with rules. return validation result True/False. """ login_user = User.find_by_email(email) return login_user is not None and argon2.verify(password, login_user.password_hash)
41908f753efa1075d6583ee8a6159011bd8af661
14,300
def toPlanar(arr: np.ndarray, shape: tuple = None) -> np.ndarray: """ Converts interleaved frame into planar Args: arr (numpy.ndarray): Interleaved frame shape (tuple, optional): If provided, the interleaved frame will be scaled to specified shape before converting into planar Returns: numpy.ndarray: Planar frame """ if shape is None: return arr.transpose(2, 0, 1) return cv2.resize(arr, shape).transpose(2, 0, 1)
0f54b14b72a05fe0b20bdfd14c31084aa9c917ca
14,301
def downsample_grid( xg: np.ndarray, yg: np.ndarray, distance: float, mask: np.ndarray = None ) -> tuple[np.ndarray, np.ndarray, np.ndarray]: """ Downsample grid locations to approximate spacing provided by 'distance'. Notes ----- This implementation is more efficient than the 'downsample_xy' function for locations on a regular grid. :param xg: Meshgrid-like array of Easting coordinates. :param yg: Meshgrid-like array of Northing coordinates. :param distance: Desired coordinate spacing. :param mask: Optionally provide an existing mask and return the union of the two masks and it's effect on xg and yg. :return: mask: Boolean mask that was applied to xg, and yg. :return: xg[mask]: Masked input array xg. :return: yg[mask]: Masked input array yg. """ u_diff = lambda u: np.unique(np.diff(u, axis=1))[0] v_diff = lambda v: np.unique(np.diff(v, axis=0))[0] du = np.linalg.norm(np.c_[u_diff(xg), u_diff(yg)]) dv = np.linalg.norm(np.c_[v_diff(xg), v_diff(yg)]) u_ds = np.max([int(np.rint(distance / du)), 1]) v_ds = np.max([int(np.rint(distance / dv)), 1]) downsample_mask = np.zeros_like(xg, dtype=bool) downsample_mask[::v_ds, ::u_ds] = True if mask is not None: downsample_mask &= mask return downsample_mask, xg[downsample_mask], yg[downsample_mask]
6ba36486bc081b85670918c63b8c1f183c284503
14,303
def convert_12bit_to_type(image, desired_type=np.uint8): """ Converts the 12-bit tiff from a 6X sensor to a numpy compatible form :param desired_type: The desired type :return: The converted image in numpy.array format """ image = image / MAX_VAL_12BIT # Scale to 0-1 image = np.iinfo(desired_type).max * image # Scale back to desired type return image.astype(desired_type)
6a3287946d1f56f57c6a44fc3f797753ebcd251a
14,304
def dm_hdu(hdu): """ Compute DM HDU from the actual FITS file HDU.""" if lsst.afw.__version__.startswith('12.0'): return hdu + 1 return hdu
87d93549f3d45ae060ced0f103065b6221e343db
14,305