content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
import base64 def int_to_base64(i: int) -> str: """ Returns a 12 char length representation of i in base64 """ return base64.b64encode(i.to_bytes(8, 'big'))
5bd7bb032926a8f429d766632c2ef2af9ee01edc
16,514
def payment_provider(provider_base_config): """When it doesn't matter if request is contained within provider the fixture can still be used""" return TurkuPaymentProviderV3(config=provider_base_config)
d6439a5ef097350682a2e17ccc41aeba1310a78a
16,515
import re def gradle_extract_data(build_gradle): """ Extract the project name and dependencies from a build.gradle file. :param Path build_gradle: The path of the build.gradle file :rtype: dict """ # Content for dependencies content_build_gradle = extract_content(build_gradle) match = re.search(r'apply plugin: ("|\')org.ros2.tools.gradle\1', content_build_gradle) if not match: raise RuntimeError("Gradle plugin missing, please add the following to build.gradle: \"apply plugin: 'org.ros2.tools.gradle'\"") return extract_data(build_gradle)
dd802b8fedb682493a1978ae6cd60be9706580ff
16,516
from typing import Sequence import torch def stack_batch_img( img_tensors: Sequence[torch.Tensor], divisible: int = 0, pad_value: float = 0 ) -> torch.Tensor: """ Args :param img_tensors (Sequence[torch.Tensor]): :param divisible (int): :param pad_value (float): value to pad :return: torch.Tensor. """ assert len(img_tensors) > 0 assert isinstance(img_tensors, (tuple, list)) assert divisible >= 0 img_heights = [] img_widths = [] for img in img_tensors: assert img.shape[:-2] == img_tensors[0].shape[:-2] img_heights.append(img.shape[-2]) img_widths.append(img.shape[-1]) max_h, max_w = max(img_heights), max(img_widths) if divisible > 0: max_h = (max_h + divisible - 1) // divisible * divisible max_w = (max_w + divisible - 1) // divisible * divisible batch_imgs = [] for img in img_tensors: padding_size = [0, max_w - img.shape[-1], 0, max_h - img.shape[-2]] batch_imgs.append(F.pad(img, padding_size, value=pad_value)) return torch.stack(batch_imgs, dim=0).contiguous()
9952965a89688d742a3342804062cb8051f47f54
16,517
from evo.core import lie_algebra as lie def convert_rel_traj_to_abs_traj(traj): """ Converts a relative pose trajectory to an absolute-pose trajectory. The incoming trajectory is processed elemente-wise. Poses at each timestamp are appended to the absolute pose from the previous timestamp. Args: traj: A PoseTrajectory3D object with timestamps as indices containing, at a minimum, columns representing the xyz position and wxyz quaternion-rotation at each timestamp, corresponding to the pose between previous and current timestamps. Returns: A PoseTrajectory3D object with xyz position and wxyz quaternion fields for the relative pose trajectory corresponding to the relative one given in `traj`. """ new_poses = [lie.se3()] # origin at identity for i in range(0, len(traj.timestamps)): abs_pose = np.dot(new_poses[-1], traj.poses_se3[i]) new_poses.append(abs_pose) return trajectory.PoseTrajectory3D(timestamps=traj.timestamps[1:], poses_se3=new_poses)
57e4972f5bc4ea67bf62b88ea87fc5df8dda0d7c
16,518
def remove(handle): """The remove action allows users to remove a roommate.""" user_id = session['user'] roommate = model.roommate.get_roommate(user_id, handle) # Check if roommate exists if not roommate: return abort(404) if request.method == 'POST': model.roommate.delete_roommate(roommate.id) return redirect(url_for('roommate.overview')) return render_template('/roommate/remove.jinja', roommate=roommate)
b1a279989d3cb463d54c8559352f2ae67f198b40
16,519
def maxsubarray(list): """ Find a maximum subarray following this idea: Knowing a maximum subarray of list[0..j] find a maximum subarray of list[0..j+1] which is either (I) the maximum subarray of list[0..j] (II) or is a maximum subarray list[i..j+1] for some 0 <= i <= j We can determine (II) in constant time by keeping a max subarray ending at the current j. This is done in the first if of the loop, where the max subarray ending at j is max(previousSumUntilJ + array[j], array[j]) This works because if array[j] + sum so far is less than array[j] then the sum of the subarray so far is negative (and less than array[j] in case it is also negative) so it has a bad impact on the subarray until J sum and we can safely discard it and start anew from array[j] Complexity (n = length of list) Time complexity: O(n) Space complexity: O(1) """ if len(list) == 0: return (-1, -1, 0) # keep the max sum of subarray ending in position j maxSumJ = list[0] # keep the starting index of the maxSumJ maxSumJStart = 0 # keep the sum of the maximum subarray found so far maxSum = list[0] # keep the starting index of the current max subarray found maxStart = 0 # keep the ending index of the current max subarray found maxEnd = 0 for j in range(1, len(list)): if maxSumJ + list[j] >= list[j]: maxSumJ = maxSumJ + list[j] else: maxSumJ = list[j] maxSumJStart = j if maxSum < maxSumJ: maxSum = maxSumJ maxStart = maxSumJStart maxEnd = j return (maxSum, maxStart, maxEnd)
a991ca09c0594b0d47eb4dd8be44d093d593cd36
16,520
def get_merged_threadlocal(bound_logger: BindableLogger) -> Context: """ Return a copy of the current thread-local context merged with the context from *bound_logger*. .. versionadded:: 21.2.0 """ ctx = _get_context().copy() ctx.update(structlog.get_context(bound_logger)) return ctx
03c2689fd71542c7c007512fb4c2bf76a841a7bc
16,521
def sort_cipher_suites(cipher_suites, ordering): """Sorts the given list of CipherSuite instances in a specific order.""" if ordering == 'asc': return cipher_suites.order_by('name') elif ordering == 'desc': return cipher_suites.order_by('-name') else: return cipher_suites
5a554ba1e2e4d82f53f29c5a1c2f4d311f538889
16,522
def make_1D_distributions(lims, n_points, all_shifts, all_errs, norm=None, max_shifts=None, seed=None): """ Generate 1D distributions of chemical shifts from arrays of shifts and errors of each distribution Inputs: - lims Limits of the distributions - n_points Number of points in the distributions - all_shifts Array of shifts for each distribution - all_errs Array of predicted error for each distribution - norm Distribution normalization to apply None: no normalization "max": top of each distribution set to 1 - max_shifts Maximum number of shifts to consider when constructing the distribution - seed Seed for the random selection of shifts Outputs: - x Array of shielding values to plot the distributions against - ys List of distributions """ # Construct the array of shielding values x = np.linspace(lims[0], lims[1], n_points) # Generate the distributions ys = [] for i, (sh, er) in enumerate(zip(all_shifts, all_errs)): print(" Constructing distribution {}/{}...".format(i+1, len(all_shifts))) ys.append(make_1D_distribution(x, sh, er, norm=norm, max_shifts=max_shifts, seed=seed)) print(" Distribution constructed!\n") return x, ys
87c48b80dc395b4423b88fcbb3307dd53655333e
16,523
def fill_column_values(df, icol=0): """ Fills empty values in the targeted column with the value above it. Parameters ---------- df: pandas.DataFrame icol: int Returns ------- pandas.DataFrame """ v = df.iloc[:,icol].fillna('').values.tolist() vnew = fill_gaps(v) dfnew = df.copy() # type: pd.DataFrame dfnew.iloc[:,icol] = vnew return dfnew
158939f6436a4c9b5a13a18567ee6061e71df51c
16,524
import torch def reward(static, tour_indices): """ Euclidean distance between all cities / nodes given by tour_indices """ # Convert the indices back into a tour idx = tour_indices.unsqueeze(1).expand(-1, static.size(1), -1) tour = torch.gather(static.data, 2, idx).permute(0, 2, 1) # Ensure we're always returning to the depot - note the extra concat # won't add any extra loss, as the euclidean distance between consecutive # points is 0 start = static.data[:, :, 0].unsqueeze(1) y = torch.cat((start, tour, start), dim=1) # Euclidean distance between each consecutive point tour_len = torch.sqrt(torch.sum(torch.pow(y[:, :-1] - y[:, 1:], 2), dim=2)) return tour_len.sum(1)
f7197bcfb3699cafa4df3c1430b4f9ee1bf53242
16,525
def valid_review_queue_name(request): """ Given a name for a queue, validates the correctness for our review system :param request: :return: """ queue = request.matchdict.get('queue') if queue in all_queues: request.validated['queue'] = queue return True else: _tn = Translator(get_language_from_cookie(request)) add_error(request, 'Invalid queue', _tn.get(_.internalError)) return False
fc6ef2fb728b18ce84669736f0e4ec1f020ea2bf
16,526
def get_best_straight(possible_straights, hand): """ get list of indices of hands that make the strongest straight if no one makes a straight, return empty list :param possible_straights: ({tuple(str): int}) map tuple of connecting cards --> best straight value they make :param hand: (set(str)) set of strings :return: (int) top value in the straight, or 0 if no straight """ highest_straight_value = 0 # e.g. 14 for broadway, 5 for the wheel hand_values = set( ranks_to_sorted_values( ranks=[r for r, _ in hand], aces_high=True, aces_low=True ) ) for connecting_values, max_value in possible_straights.items(): connecting_cards = set(connecting_values) & hand_values if len(connecting_cards) == 2: # we've made a straight! if max_value > highest_straight_value: highest_straight_value = max_value return highest_straight_value
f2a470ef3033cac27cb406702daead42d59683aa
16,528
from django.shortcuts import render_to_response, RequestContext def stats(request): """ Display statistics for the web site """ views = list(View.objects.all().only('internal_url', 'browser')) urls = {} mob_vs_desk = { 'desktop': 0, 'mobile': 0 } for view in views: if is_mobile(view.browser): mob_vs_desk['mobile'] += 1 else: mob_vs_desk['desktop'] += 1 if not urls.has_key(view.internal_url): urls[view.internal_url] = 0 urls[view.internal_url] += 1 stats = [] count = 0 for url in urls: stats.append({'url': url, 'count': urls[url]}) count += urls[url] stats = sorted(stats, key=lambda k: k['count'], reverse=True) return render_to_response('admin/appview/view/display_stats.html', RequestContext(request, { 'stats' : stats, 'total' : count, 'views': mob_vs_desk } ) )
3b63250e6ce3c9ddd09ec8d19c9961b22bfab62a
16,529
def build_argparser(): """ Builds argument parser. :return argparse.ArgumentParser """ banner = "%(prog)s - generate a static file representation of a PEP data repository." additional_description = "\n..." parser = _VersionInHelpParser( description=banner, epilog=additional_description) parser.add_argument( "-V", "--version", action="version", version="%(prog)s {v}".format(v=__version__)) parser.add_argument( "-d", "--data", required=False, default=PEPHUB_URL, help="URL/Path to PEP storage tree.") parser.add_argument( "-o", "--out", required=False, default="./out", help="Outpath for generated PEP tree.") parser.add_argument( "-p", "--path", required=False, help="Path to serve the file server at." ) # parser for serve command subparsers = parser.add_subparsers( help="Functions", dest="serve" ) serve_parser = subparsers.add_parser("serve", help="Serve a directory using pythons built-in http library") serve_parser.set_defaults( func=serve_directory ) serve_parser.add_argument( "-f", "--files", required=False, help="Files to serve.", default="./out" ) return parser
f33679c82a1499db83caf3473b0e5403ebfa52fe
16,530
def abc19(): """Solution to exercise C-1.19. Demonstrate how to use Python’s list comprehension syntax to produce the list [ a , b , c , ..., z ], but without having to type all 26 such characters literally. """ a_idx = 97 return [chr(a_idx + x) for x in range(26)]
c9bb948ad57ddbc138dfbc0c481fabb45de620ba
16,531
def filter_words(data: TD_Data_Dictionary): """This function removes all instances of Key.ctrl from the list of keys and any repeats because of Press and Realese events""" # NOTE: We may just want to remove all instances of Key.ctrl from the list and anything that follows that keys = data.get_letters() return keys
fb34e1758c83af0e30b5ae807a3f852ab7e3be29
16,533
from typing import Dict def check_url_secure( docker_ip: str, public_port: int, *, auth_header: Dict[str, str], ssl_context: SSLContext, ) -> bool: """ Secure form of lovey/pytest/docker/compose.py::check_url() that checks when the secure docker registry service is operational. Args: docker_ip: IP address on which the service is exposed. public_port: Port on which the service is exposed. auth_header: HTTP basic authentication header to using when connecting to the service. ssl_context: SSL context referencing the trusted root CA certificated to used when negotiating the TLS connection. Returns: (bool) True when the service is operational, False otherwise. """ try: https_connection = HTTPSConnection( context=ssl_context, host=docker_ip, port=public_port ) https_connection.request("HEAD", "/v2/", headers=auth_header) return https_connection.getresponse().status < 500 except Exception: # pylint: disable=broad-except return False
ebdc8f4d175f3be70000022424382f71d9fd73b5
16,534
def ResNet101(pretrained=False, use_ssld=False, **kwargs): """ ResNet101 Args: pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. If str, means the path of the pretrained model. use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. Returns: model: nn.Layer. Specific `ResNet101` model depends on args. """ model = ResNet(config=NET_CONFIG["101"], version="vb", **kwargs) _load_pretrained(pretrained, model, MODEL_URLS["ResNet101"], use_ssld) return model
0277c59f9b60d5c6127fb1021eb71b10691bd0f8
16,535
def LineTextInCurrentBuffer( line_number ): """ Returns the text on the 1-indexed line (NOT 0-indexed) """ return vim.current.buffer[ line_number - 1 ]
8c3b51a48e25e8955a00d89619da9e191612861a
16,536
def imported_instrumentor(library): """ Convert a library name to that of the correlated auto-instrumentor in the libraries package. """ instrumentor_lib = "signalfx_tracing.libraries.{}_".format(library) return get_module(instrumentor_lib)
db26277b23f989d8d5323c7c6bde0905b1e2f5ef
16,537
from datetime import datetime def parse_runtime(log_file): """ Parse the job run-time from a log-file """ with open(log_file, 'r') as f: for line in f: l0 = line.rstrip("\n") break l1 = tail(log_file, 1)[0].rstrip("\n") l0 = l0.split()[:2] l1 = l1.split()[:2] try: y0, m0, d0 = list(map(int, l0[0].split('-'))) h0, min0, s0 = list(map(float, l0[1][:-1].split(':'))) except ValueError as e: print(log_file) print(l0) raise e try: y1, m1, d1 = list(map(int, l1[0].split('-'))) h1, min1, s1 = list(map(float, l1[1][:-1].split(':'))) except ValueError as e: print(log_file) print(l1) raise e date0 = datetime.datetime(y0, m0, d0, int(h0), int(min0), int(s0)) date1 = datetime.datetime(y1, m1, d1, int(h1), int(min1), int(s1)) diff = (date1 - date0).total_seconds() return diff
75a5a80409918779173eb1e80d6b3f95abf242cb
16,539
def calculateEMA(coin_pair, period, unit): """ Returns the Exponential Moving Average for a coin pair """ closing_prices = getClosingPrices(coin_pair, period, unit) previous_EMA = calculateSMA(coin_pair, period, unit) constant = (2 / (period + 1)) current_EMA = (closing_prices[-1] * (2 / (1 + period))) + (previous_EMA * (1 - (2 / (1 + period)))) return current_EMA
ec884f89c2e8e64ada4384767251d6722c7b63c8
16,540
def euler_method(r0, N): """ euler_method function description: This method computes the vector r(t)'s using Euler's method. Args: r0 - the initial r-value N - the number of steps in each period """ delta_t = (2*np.pi)/N # delta t r = np.zeros((5*N, 2)) # 5Nx2 array r[0] = r0 # initial r-value J = np.array(([0,1],[-1,0])) # antisymmetric matrix (meaning its transpose equals its negative) for i in range(1, 5*N): r[i] = r[i-1] + delta_t*(J@(r[i-1])) # euler's method return r
6ac3deae5cdb5ce84fa19433b55de80bf04ddf47
16,541
def main_menu(found_exists): """prints main menu and asks for user input returns task that is chosen by user input""" show_main_menu(found_exists) inp = input(">> ") if inp == "1": return "update" elif inp == "2": return "show_all" elif inp == "3": return "show_waypoints" elif inp == "4": return "map-menu" elif inp == "5": return "show_one" elif inp == "6": return "search" elif inp == "7" and found_exists: return "show_founds" elif inp == "8" and found_exists: return "exit" elif inp == "7" and not found_exists: return "exit" else: print("Ungueltige Eingabe!")
61d0bda6a1ddf8bf70a79ff6e7488601d781c5fc
16,542
def fromPsl(psl, qCdsRange=None, inclUnaln=False, projectCds=False, contained=False): """generate a PairAlign from a PSL. cdsRange is None or a tuple. In inclUnaln is True, then include Block objects for unaligned regions""" qCds = _getCds(qCdsRange, psl.qStrand, psl.qSize) qSeq = _mkPslSeq(psl.qName, psl.qStart, psl.qEnd, psl.qSize, psl.qStrand, qCds) tSeq = _mkPslSeq(psl.tName, psl.tStart, psl.tEnd, psl.tSize, psl.tStrand) aln = PairAlign(qSeq, tSeq) prevBlk = None for i in range(psl.blockCount): prevBlk = _addPslBlk(psl.blocks[i], aln, prevBlk, inclUnaln) if projectCds and (aln.qSeq.cds is not None): aln.projectCdsToTarget(contained) return aln
f1da225d53f36abf5d10589077de934f13c1ca2a
16,543
from typing import Optional def get_graph(identifier: str, *, rows: Optional[int] = None) -> pybel.BELGraph: """Get the graph surrounding a given GO term and its descendants.""" graph = pybel.BELGraph() enrich_graph(graph, identifier, rows=rows) return graph
fc004ebd3cdfa70edd01b611987dfd48306ceb80
16,545
def root_histogram_shape(root_hist, use_matrix_indexing=True): """ Return a tuple corresponding to the shape of the histogram. If use_matrix_indexing is true, the tuple is in 'reversed' zyx order. Matrix-order is the layout used in the internal buffer of the root histogram - keep True if reshaping the array). """ dim = root_hist.GetDimension() shape = np.array([root_hist.GetNbinsZ(), root_hist.GetNbinsY(), root_hist.GetNbinsX()][3 - dim:]) + 2 if not use_matrix_indexing: shape = reversed(shape) return tuple(shape)
8df83a84f0a3b12bab248949042cd2df5df6f53e
16,548
from typing import Union def get_weather_sensor_by( weather_sensor_type_name: str, latitude: float = 0, longitude: float = 0 ) -> Union[WeatherSensor, ResponseTuple]: """ Search a weather sensor by type and location. Can create a weather sensor if needed (depends on API mode) and then inform the requesting user which one to use. """ # Look for the WeatherSensor object weather_sensor = ( WeatherSensor.query.filter( WeatherSensor.weather_sensor_type_name == weather_sensor_type_name ) .filter(WeatherSensor.latitude == latitude) .filter(WeatherSensor.longitude == longitude) .one_or_none() ) if weather_sensor is None: create_sensor_if_unknown = False if current_app.config.get("FLEXMEASURES_MODE", "") == "play": create_sensor_if_unknown = True # either create a new weather sensor and post to that if create_sensor_if_unknown: current_app.logger.info("CREATING NEW WEATHER SENSOR...") weather_sensor = WeatherSensor( name="Weather sensor for %s at latitude %s and longitude %s" % (weather_sensor_type_name, latitude, longitude), weather_sensor_type_name=weather_sensor_type_name, latitude=latitude, longitude=longitude, ) db.session.add(weather_sensor) db.session.flush() # flush so that we can reference the new object in the current db session # or query and return the nearest sensor and let the requesting user post to that one else: nearest_weather_sensor = WeatherSensor.query.order_by( WeatherSensor.great_circle_distance( latitude=latitude, longitude=longitude ).asc() ).first() if nearest_weather_sensor is not None: return unrecognized_sensor( nearest_weather_sensor.latitude, nearest_weather_sensor.longitude, ) else: return unrecognized_sensor() return weather_sensor
b4feb0a75709d1bf27378df6d90420c74e36646c
16,550
import six def _npy_loads(data): """ Deserializes npy-formatted bytes into a numpy array """ logger.info("Inside _npy_loads fn") stream = six.BytesIO(data) return np.load(stream,allow_pickle=True)
5e9ee0a0d41403af0a8e1ed41f6d15a677d82c44
16,551
import dateutil def parse_string(string): """Parse the string to a datetime object. :param str string: The string to parse :rtype: `datetime.datetime` :raises: :exc:`InvalidDateFormat` when date format is invalid """ try: # Try to parse string as a date value = dateutil.parser.parse(string) except (OverflowError, TypeError, ValueError): raise InvalidDateFormat("Invalid date format %r" % (string, )) return value
6db2edad31f1febced496c92bfb2d7d76761850a
16,552
def get_elfs_oriented(atoms, density, basis, mode, view = serial_view()): """ Outdated, use get_elfs() with "mode='elf'/'nn'" instead. Like get_elfs, but returns real, oriented elfs mode = {'elf': Use the ElF algorithm to orient fingerprint, 'nn': Use nearest neighbor algorithm} """ return get_elfs(atoms, density, basis, view, orient_mode = mode)
36b5abe66e9054ab49a25eca753d4a61148a1b1c
16,553
def error_logger(param=None): """ Function to get an error logger, object of Logger class. @param param : Custom parameter that can be passed to the logger. @return: custom logger """ logger = Logger('ERROR_LOGGER', param) return logger.get_logger()
ca6449c2e63ebdccbd7bd3993dc1d11375e66e29
16,555
def get_iou(mask, label): """ :param mask: predicted mask with 0 for background and 1 for object :param label: label :return: iou """ # mask = mask.numpy() # label = labels.numpy() size = mask.shape mask = mask.flatten() label = label.flatten() m = mask + label i = len(np.argwhere(m == 2)) u = len(np.argwhere(m != 0)) if u == 0: u = size[0] * size[1] iou = float(i) / u if i == 0 and u == 0: iou = 1 return iou
9322d0184a3e28bdd1d5bf3214b7fbe8936d6a21
16,557
from typing import List from typing import Set from typing import Any def mean_jaccard_distance(sets: List[Set[Any]]) -> float: """ Compute the mean Jaccard distance for sets A_1, \dots A_n: d = \frac{1}{n} \sum_{i=1}^{n-1} \sum_{j=i+1}^n (1 - J(A_i, A_j)) where J(A, B) is the Jaccard index between sets A and B and 1-J(A, B) is the Jaccard distance. """ n = len(sets) assert n > 0 if n == 1: return 0 else: d = 0.0 for i in range(n - 1): for j in range(i + 1, n): d += 1 - jaccard_index(sets[i], sets[j]) d /= n * (n - 1) / 2 return d
efbfce8092e2e3a9b5b076c46a636dfa17e2d266
16,558
def nx_find_connected(graph, start_set, end_set, cutoff=np.inf): """Return the nodes in end_set connected to start_set.""" reachable = [] for end in end_set: if nx_is_reachable(graph, end, start_set): reachable.append(end) if len(reachable) >= cutoff: break return reachable
a3feb8a172bb610fa4416c6f4a4c0558540d2190
16,559
def svn_client_proplist(*args): """ svn_client_proplist(char target, svn_opt_revision_t revision, svn_boolean_t recurse, svn_client_ctx_t ctx, apr_pool_t pool) -> svn_error_t """ return _client.svn_client_proplist(*args)
1cc82161292df7b9ba284397a0dcd55da9d0d7c1
16,560
def dev_transform(signal, input_path='../data/', is_denoised=True): """ normalization function that transforms each fature based on the scaling of the trainning set. This transformation should be done on test set(developmental set), or any new input for a trained neural network. Due to existence of a denoising step in the normal_seq funciton, this transformation can not reproduce the exact same of initial sequences, instead it transforms to the scale of denoised version of training set. Parameters ---------- signal : numpy array or pandas dataframe in the shape of (n_samples, n_features) input_path : str, default='../data/' is_denoised : boolean it specifies the state if original sequence is denoised by a threshold, if it's set to False it means that user used q=None in normal_seq function. Returns ------- transformed : numpy array a normalised sequence or features """ transformed = [] if isinstance(signal, pd.DataFrame): signal = signal.to_numpy(copy=True) elif isinstance(signal, list): signal = np.array(signal) scales = pd.read_csv(input_path + 'min_max_inputs.csv') max_element = scales.to_numpy(copy=True)[1, 1:] min_element = scales.to_numpy(copy=True)[0, 1:] if signal.ndim == 1: if is_denoised is True: signal[signal > max_element] = max_element transformed.append((signal-min_element)/( max_element-min_element)) else: for i in range(signal.shape[1]): if is_denoised is True: signal[signal[:, i] > max_element[i]] = max_element[i] transformed.append((signal[:, i]-min_element[i])/( max_element[i]-min_element[i])) transformed = np.array(transformed).T # transpose for correspondence return transformed
ce6dfe780bb724ae8036502d2b1d1828fce675dc
16,561
def moveTo(self, parent): """Move this element to new parent, as last child""" self.getParent().removeChild(self) parent.addChild(self) return self
40caa9681346db9a6cfb5c95fdb761a9f98e607a
16,562
from datetime import datetime def coerce_to_end_of_day_datetime(value): """ gets the end of day datetime equivalent of given date object. if the value is not a date, it returns the same input. :param date value: value to be coerced. :rtype: datetime | object """ if not isinstance(value, datetime) and isinstance(value, date): return end_of_day(value) return value
374e7decf543e5fb40fb7714d4472cf4cfa48cb1
16,563
def greybody(nu, temperature, beta, A=1.0, logscale=0.0, units='cgs', frequency_units='Hz', kappa0=4.0, nu0=3000e9, normalize=max): """ Same as modified blackbody... not sure why I have it at all, though the normalization constants are different. """ h,k,c = unitdict[units]['h'],unitdict[units]['k'],unitdict[units]['c'] modification = (1. - exp(-(nu/nu0)**beta)) I = blackbody(nu,temperature,units=units,frequency_units=frequency_units,normalize=normalize)*modification if normalize and hasattr(I,'__len__'): if len(I) > 1: return I/normalize(I) * 10.**logscale else: return I * 10.**logscale else: return I * 10.**logscale
89cca39acf5659e8ab7b403c5747b19c119d0e51
16,564
import copy def GCLarsen_v0(WF, WS, WD, TI, pars=[0.435449861, 0.797853685, -0.124807893, 0.136821858, 15.6298, 1.0]): """Computes the WindFarm flow and Power using GCLarsen [Larsen, 2009, A simple Stationary...] Inputs ---------- WF: WindFarm Windfarm instance WS: list Rotor averaged Undisturbed wind speed [m/s] for each WT WD: float Rotor averaged Undisturbed wind direction [deg] for each WT Meteorological axis. North = 0 [deg], clockwise. TI: float Rotor averaged turbulence intensity [-] for each WT Returns ------- P_WT: ndarray Power production of the wind turbines (nWT,1) [W] U_WT: ndarray Wind speed at hub height (nWT,1) [m/s] Ct: ndarray Thrust coefficients for each wind turbine (nWT,1) [-] """ Dist, nDownstream, id0 = WF.turbineDistance(np.mean(WD)) zg = WF.vectWTtoWT[2,:,:] # Initialize arrays to NaN Ct = np.nan * np.ones([WF.nWT]) U_WT = copy(WS) P_WT = np.nan * np.ones([WF.nWT]) # Initialize first upstream turbine Ct[id0[0]] = WF.WT[id0[0]].get_CT(WS[id0[0]]) P_WT[id0[0]] = WF.WT[id0[0]].get_P(WS[id0[0]]) U_WT[id0[0]] = WS[id0[0]] for i in range(1, WF.nWT): cWT = id0[i] # Current wind turbine (wake operating) cR = WF.WT[cWT].R LocalDU = np.zeros([WF.nWT, 1]) for j in range(i-1, -1, -1): # Loop on the upstream turbines (uWT) of the cWT uWT = id0[j] uWS = U_WT[uWT] # Wind speed at wind turbine uWT uR = WF.WT[uWT].R uCT = Ct[uWT] if np.isnan(uCT): uCT = WF.WT[uWT].get_CT(uWS) # WT2WT vector in wake coordinates Dist, _,_ = WF.turbineDistance(WD[uWT]) x = Dist[0, uWT, cWT] y = Dist[1, uWT, cWT] z = zg[uWT, cWT] r = np.sqrt(y**2.+z**2.) # Calculate the wake width of uWT at the position of cWT Rw = get_Rw(x, uR, TI[uWT], uCT, pars)[0] if (r <= Rw+cR or uWS > 0): LocalDU[uWT] = uWS*get_dUeq(x,y,z,cR,uR,uCT,TI[uWT],pars) # Wake superposition DU = LocalDU.sum() U_WT[cWT] = U_WT[cWT] + DU if U_WT[cWT] > WF.WT[cWT].u_cutin: Ct[cWT] = WF.WT[cWT].get_CT(U_WT[cWT]) P_WT[cWT] = WF.WT[cWT].get_P(U_WT[cWT]) else: Ct[cWT] = WF.WT[cWT].CT_idle P_WT[cWT] = 0.0 return (P_WT,U_WT,Ct)
a075074b0cee9b36fdf3411804ff4eff2f5fe63b
16,565
def guess_table_address(*args): """ guess_table_address(insn) -> ea_t Guess the jump table address (ibm pc specific) @param insn (C++: const insn_t &) """ return _ida_ua.guess_table_address(*args)
073773e33b5cf4c59f3a3c892d5a53320c2c1f4b
16,566
def get_elbs(account, region): """ Get elastic load balancers """ elb_data = [] aws_accounts = AwsAccounts() if not account: session = boto3.session.Session(region_name=region) for account_rec in aws_accounts.all(): elb_data.extend( query_elbs_for_account(account_rec, region, session)) elif account.isdigit() and len(account) == 12: session = boto3.session.Session() aws_account = aws_accounts.with_number(account) if aws_account: elb_data.append( query_elbs_for_account( aws_account, region, session)) else: return dict(Message="Account not found"), 404 # print(elb_data) return dict(LoadBalancers=elb_data), 200
32b059c7929b0adae3df7b8393fd062f5a281cc3
16,568
def likelihood_params(ll_mode, mode, behav_tuple, num_induc, inner_dims, inv_link, tbin, jitter, J, cutoff, neurons, mapping_net, C): """ Create the likelihood object. """ if mode is not None: kernel_tuples_, ind_list = kernel_used(mode, behav_tuple, num_induc, inner_dims) if ll_mode =='hZIP': inv_link_hetero = 'sigmoid' elif ll_mode =='hCMP': inv_link_hetero = 'identity' elif ll_mode =='hNB': inv_link_hetero = 'softplus' else: inv_link_hetero = None if inv_link_hetero is not None: mean_func = np.zeros((inner_dims)) kt, ind_list = kernel_used(mode, behav_tuple, num_induc, inner_dims) gp_lvms = GP_params(ind_list, kt, num_induc, neurons, inv_link, jitter, mean_func, None, learn_mean=True) else: gp_lvms = None inv_link_hetero = None if ll_mode == 'IBP': likelihood = mdl.likelihoods.Bernoulli(tbin, inner_dims, inv_link) elif ll_mode == 'IP': likelihood = mdl.likelihoods.Poisson(tbin, inner_dims, inv_link) elif ll_mode == 'ZIP' or ll_mode =='hZIP': alpha = .1*np.ones(inner_dims) likelihood = mdl.likelihoods.ZI_Poisson(tbin, inner_dims, inv_link, alpha, dispersion_mapping=gp_lvms) #inv_link_hetero = lambda x: torch.sigmoid(x)/tbin elif ll_mode == 'NB' or ll_mode =='hNB': r_inv = 10.*np.ones(inner_dims) likelihood = mdl.likelihoods.Negative_binomial(tbin, inner_dims, inv_link, r_inv, dispersion_mapping=gp_lvms) elif ll_mode == 'CMP' or ll_mode =='hCMP': log_nu = np.zeros(inner_dims) likelihood = mdl.likelihoods.COM_Poisson(tbin, inner_dims, inv_link, log_nu, J=J, dispersion_mapping=gp_lvms) elif ll_mode == 'IG': # renewal process shape = np.ones(inner_dims) likelihood = mdl.likelihoods.Gamma(tbin, inner_dims, inv_link, shape, allow_duplicate=False) elif ll_mode == 'IIG': # renewal process mu_t = np.ones(inner_dims) likelihood = mdl.likelihoods.invGaussian(tbin, inner_dims, inv_link, mu_t, allow_duplicate=False) elif ll_mode == 'LN': # renewal process sigma_t = np.ones(inner_dims) likelihood = mdl.likelihoods.logNormal(tbin, inner_dims, inv_link, sigma_t, allow_duplicate=False) elif ll_mode == 'U': likelihood = mdl.likelihoods.Universal(inner_dims//C, C, inv_link, cutoff, mapping_net) else: raise NotImplementedError return likelihood
2e817c4fdfdd9a65d138f61166ef8fbb3154460b
16,570
def is_num_idx(k): """This key corresponds to """ return k.endswith("_x") and (k.startswith("tap_x") or k.startswith("sig"))
bd4ed2c9c4a24ae423ec6c738d99b31ace6ec267
16,571
def convert_to_boolarr(int_arr, cluster_id): """ :param int_arr: array of integers which relate to no, one or multiple clusters cluster_id: 0=Pleiades, 1=Meingast 1, 2=Hyades, 3=Alpha Per, 4=Coma Ber """ return np.array((np.floor(int_arr/2**cluster_id) % 2), dtype=bool)
c769ca07ea32a9e0ab0d230cd3574e5b71434de4
16,572
def serialize(root): # """Serialization is the process of converting a data structure or object into a sequence of bits so that it can be stored in a file or memory buffer, or transmitted across a network connection link to be reconstructed later in the same or another computer environment. Design an algorithm to serialize and deserialize a binary tree. There is no restriction on how your serialization/deserialization algorithm should work. You just need to ensure that a binary tree can be serialized to a string and this string can be deserialized to the original tree structure. Input: 1 / \ 2 3 / \ 4 5 1 / \ 2 3 / \ 4 56 7 1 / \ 10 11 / \ 100 101 110 111 Output: [1,2,3,null,null,4,5] 0 = 0 1 = 2**0 + 1 2 = 2**0 + 2 3 = 2**1 + 1 4 = 2**1 + 2 5 = 2**2 + 1 6 = 2**2 + 2**1 7 = 2**2 + 2**1 + 2**0 """ queue = [(root, "1")] indices = {} max_location = 0 while queue: node, location = queue.pop(0) current_location = int(location, 2) max_location = max(max_location, current_location) indices[int(location, 2)] = node.val if node.left: queue.append((node.left, location + "0")) if node.right: queue.append((node.right, location + "1")) result = [None] * (max_location + 1) for k, v in indices.items(): result[k] = v return result[1:]
a2bec43b384302d5218e8c62c83bc069be3bcbd3
16,573
def ensure_daemon(f): """A decorator for running an integration test with and without the daemon enabled.""" def wrapper(self, *args, **kwargs): for enable_daemon in [False, True]: enable_daemon_str = str(enable_daemon) env = { "HERMETIC_ENV": "PANTS_PANTSD,PANTS_SUBPROCESSDIR", "PANTS_PANTSD": enable_daemon_str, } with environment_as(**env): try: f(self, *args, **kwargs) except Exception: print(f"Test failed with enable-pantsd={enable_daemon}:") if not enable_daemon: print( "Skipping run with pantsd=true because it already " "failed with pantsd=false." ) raise finally: kill_daemon() return wrapper
d9005c48d489b8b5da1f9687b78d1f455aaf3d62
16,574
from adaptivefiltering.pdal import execute_pdal_pipeline from adaptivefiltering.pdal import PDALInMemoryDataSet import json def reproject_dataset(dataset, out_srs, in_srs=None): """Standalone function to reproject a given dataset with the option of forcing an input reference system :param out_srs: The desired output format in WKT. :type out_srs: str :param in_srs: The input format in WKT from which to convert. The default is the dataset's current reference system. :type in_srs: str :return: A reprojected dataset :rtype: adaptivefiltering.DataSet """ dataset = PDALInMemoryDataSet.convert(dataset) if in_srs is None: in_srs = dataset.spatial_reference config = { "type": "filters.reprojection", "in_srs": in_srs, "out_srs": out_srs, } pipeline = execute_pdal_pipeline(dataset=dataset, config=config) spatial_reference = json.loads(pipeline.metadata)["metadata"][ "filters.reprojection" ]["comp_spatialreference"] return PDALInMemoryDataSet( pipeline=pipeline, spatial_reference=spatial_reference, )
0380442a837f89bbf06d0d1b5e9917e7309876ad
16,575
def conditional(condition, decorator): """ Decorator for a conditionally applied decorator. Example: @conditional(get_config('use_cache'), ormcache) def fn(): pass """ if condition: return decorator else: return lambda fn: fn
7c17ad3aaacffd0008ec1cf66871ea6755f7869a
16,576
import statistics def variance(data, mu=None): """Compute variance over a list.""" if mu is None: mu = statistics.mean(data) return sum([(x - mu) ** 2 for x in data]) / len(data)
92f89d35c2ae5abf742b10ba838a381d6f74e92c
16,577
def make_note(outfile, headers, paragraphs, **kw): """Builds a pdf file named outfile based on headers and paragraphs, formatted according to parameters in kw. :param outfile: outfile name :param headers: <OrderedDict> of headers :param paragraphs: <OrderedDict> of paragraphs :param kw: keyword arguments for formatting """ story = [Paragraph(x, headers[x]) for x in headers.keys()] for headline, paragraph in paragraphs.items(): story.append(Paragraph(headline, paragraph.get("style", h3))) if not paragraph.has_key("tpl"): for sub_headline, sub_paragraph in paragraph.items(): story.append(Paragraph(sub_headline, paragraph.get("style", h4))) story.append(Paragraph(sub_paragraph.get("tpl").render(**kw), p)) else: if isinstance(paragraph.get("tpl"), Template): story.append(Paragraph(paragraph.get("tpl").render(**kw), p)) elif isinstance(paragraph.get("tpl"), Table): story.append(Spacer(1, 0.2 * inch)) story.append(paragraph.get("tpl")) story.append(Spacer(1, 0.2 * inch)) else: pass doc = SimpleDocTemplate(outfile) doc.build(story, onFirstPage=formatted_page, onLaterPages=formatted_page) return doc
d9bc331167649210cf18e76bcff4099817c28458
16,578
import stat def output_file_exists(filename): """Check if a file exists and its size is > 0""" if not file_exists(filename): return False st = stat(filename) if st[stat_module.ST_SIZE] == 0: return False return True
ad2f3a7451feefd32fe98da7fc3bfca9852b080c
16,579
def IMF_N(m,a=.241367,b=.241367,c=.497056): """ returns number of stars with mass m """ # a,b,c = (.241367,.241367,.497056) # a=b=c=1/3.6631098624 if .1 <= m <= .3: res = c*( m**(-1.2) ) elif .3 < m <= 1.: res = b*( m**(-1.8) ) elif 1. < m <= 100.: # res = a*( m**(-1.3)-100**(-1.3) )/1.3 res = a*( m**(-2.3) ) else: res = 0 return res
4d120af2840a793468335cddd867f6d29940d415
16,580
def features_disable(partial_name, partial_name_field, force, **kwargs): """Disable a feature""" mode = "disable" params = {"mode": "force"} if force else None feature = _okta_get("features", partial_name, selector=_selector_field_find(partial_name_field, partial_name)) feature_id = feature["id"] rv = okta_manager.call_okta(f"/features/{feature_id}/{mode}", REST.post, params=params) return rv
5477a43ad2f849669a6a209abfc835f0f4ee453a
16,581
def _get_images(): """Get the official AWS public AMIs created by Flambe that have tag 'Creator: [email protected]' ATTENTION: why not just search the tags? We need to make sure the AMIs we pick were created by the Flambe team. Because of tags values not being unique, anyone can create a public AMI with 'Creator: [email protected]' as a tag. If we pick that AMI, then we could potentially be creating instances with unknown AMIs, causing potential security issues. By filtering by our acount id (which can be public), then we can make sure that all AMIs that are being scanned were created by Flambe team. """ client = boto3.client('ec2') return client.describe_images(Owners=[const.AWS_FLAMBE_ACCOUNT], Filters=[{'Name': 'tag:Creator', 'Values': ['[email protected]']}])
975596ff9eb1c9c0864cadb41edc2b1a4d009790
16,582
def ar_coefficient(x, param): """ This feature calculator fits the unconditional maximum likelihood of an autoregressive AR(k) process. The k parameter is the maximum lag of the process .. math:: X_{t}=\\varphi_0 +\\sum _{{i=1}}^{k}\\varphi_{i}X_{{t-i}}+\\varepsilon_{t} For the configurations from param which should contain the maxlag "k" and such an AR process is calculated. Then the coefficients :math:`\\varphi_{i}` whose index :math:`i` contained from "coeff" are returned. :param x: the time series to calculate the feature of :type x: numpy.ndarray :param param: contains dictionaries {"coeff": x, "k": y} with x,y int :type param: list :return x: the different feature values :return type: pandas.Series """ calculated_ar_params = {} x_as_list = list(x) calculated_AR = AR(x_as_list) res = {} k = param["k"] p = param["coeff"] column_name = "k_{}__coeff_{}".format(k, p) if k not in calculated_ar_params: try: calculated_ar_params[k] = calculated_AR.fit(maxlag=k, solver="mle").params except (np.linalg.LinAlgError, ValueError): calculated_ar_params[k] = [np.NaN]*k mod = calculated_ar_params[k] if p <= k: try: res[column_name] = mod[p] except IndexError: res[column_name] = 0 else: res[column_name] = np.NaN return [value for key, value in res.items()][0]
a7a7171a44055d23457fd622d7e893f839f17bcf
16,583
from faker import Faker import random def address_factory(sqla): """Create a fake address.""" fake = Faker() # Use a generic one; others may not have all methods. addresslines = fake.address().splitlines() areas = sqla.query(Area).all() if not areas: create_multiple_areas(sqla, random.randint(3, 6)) areas = sqla.query(Area).all() current_area = random.choice(areas) address = { 'name': fake.name(), 'address': addresslines[0], 'city': addresslines[1].split(",")[0], 'area_id': current_area.id, 'country_code': current_area.country_code, 'latitude': random.random() * 0.064116 + -2.933783, 'longitude': random.random() * 0.09952 + -79.055411 } return address
91f4558887025841d99ab6e65795111bbc804238
16,585
from pm4py.util import constants from pm4py.algo.discovery.dfg.adapters.pandas.df_statistics import get_dfg_graph from pm4py.statistics.start_activities.pandas import get as start_activities_module from pm4py.statistics.end_activities.pandas import get as end_activities_module from pm4py.algo.discovery.dfg.variants import performance as dfg_discovery from pm4py.statistics.start_activities.log import get as start_activities_module from pm4py.statistics.end_activities.log import get as end_activities_module from typing import Union from typing import List from typing import Tuple def discover_performance_dfg(log: Union[EventLog, pd.DataFrame], business_hours: bool = False, worktiming: List[int] = [7, 17], weekends: List[int] = [6, 7]) -> Tuple[dict, dict, dict]: """ Discovers a performance directly-follows graph from an event log Parameters --------------- log Event log business_hours Enables/disables the computation based on the business hours (default: False) worktiming (If the business hours are enabled) The hour range in which the resources of the log are working (default: 7 to 17) weekends (If the business hours are enabled) The weekends days (default: Saturday (6), Sunday (7)) Returns --------------- performance_dfg Performance DFG start_activities Start activities end_activities End activities """ general_checks_classical_event_log(log) if check_is_pandas_dataframe(log): check_pandas_dataframe_columns(log) properties = get_properties(log) activity_key = properties[constants.PARAMETER_CONSTANT_ACTIVITY_KEY] if constants.PARAMETER_CONSTANT_ACTIVITY_KEY in properties else xes_constants.DEFAULT_NAME_KEY timestamp_key = properties[constants.PARAMETER_CONSTANT_TIMESTAMP_KEY] if constants.PARAMETER_CONSTANT_TIMESTAMP_KEY in properties else xes_constants.DEFAULT_TIMESTAMP_KEY case_id_key = properties[constants.PARAMETER_CONSTANT_CASEID_KEY] if constants.PARAMETER_CONSTANT_CASEID_KEY in properties else constants.CASE_CONCEPT_NAME dfg = get_dfg_graph(log, activity_key=activity_key, timestamp_key=timestamp_key, case_id_glue=case_id_key, measure="performance", perf_aggregation_key="all", business_hours=business_hours, worktiming=worktiming, weekends=weekends) start_activities = start_activities_module.get_start_activities(log, parameters=properties) end_activities = end_activities_module.get_end_activities(log, parameters=properties) else: properties = get_properties(log) properties[dfg_discovery.Parameters.AGGREGATION_MEASURE] = "all" properties[dfg_discovery.Parameters.BUSINESS_HOURS] = business_hours properties[dfg_discovery.Parameters.WORKTIMING] = worktiming properties[dfg_discovery.Parameters.WEEKENDS] = weekends dfg = dfg_discovery.apply(log, parameters=properties) start_activities = start_activities_module.get_start_activities(log, parameters=properties) end_activities = end_activities_module.get_end_activities(log, parameters=properties) return dfg, start_activities, end_activities
df8d9669c7e2a4cd3170cb1c5a1ecc7e7811649e
16,586
import warnings def mifs(data, target_variable, prev_variables_index, candidate_variable_index, **kwargs): """ This estimator computes the Mutual Information Feature Selection criterion. Parameters ---------- data : np.array matrix Matrix of data set. Columns are variables, rows are observations. target_variable : int or float Target variable. Can not be in data! prev_variables_index: list of ints, set of ints Indexes of previously selected variables. candidate_variable_index : int Index of candidate variable in data matrix. beta: float Impact of redundancy segment in MIFS approximation. Higher the beta is, higher the impact. Returns ------- j_criterion_value : float J_criterion approximated by the Mutual Information Feature Selection. """ assert isinstance(data, np.ndarray), "Argument 'data' must be a numpy matrix" assert isinstance(target_variable, np.ndarray), "Argument 'target_variable' must be a numpy matrix" assert isinstance(candidate_variable_index, int), "Argument 'candidate_variable_index' must be an integer" assert len(data.shape) == 2, "For 'data' argument use numpy array of shape (n,p)" assert data.shape[0] == len(target_variable), "Number of rows in 'data' must equal target_variable length" assert candidate_variable_index < data.shape[1], "Index 'candidate_variable_index' out of range in 'data'" for i in prev_variables_index: assert isinstance(i, int), "All previous variable indexes must be int." if kwargs.get('beta') is None: beta = 1 warnings.warn("Parameter `beta` not provided, default value of 1 is selected.", Warning) else: beta = kwargs.pop('beta') assert isinstance(beta, int) or isinstance(beta, float), "Argument 'beta' must be int or float" candidate_variable = data[:, candidate_variable_index] if len(prev_variables_index) == 0: redundancy_sum = 0 else: redundancy_sum = np.apply_along_axis(mutual_information, axis=0, arr=data[:, prev_variables_index], vector_2=candidate_variable).sum() return mutual_information(candidate_variable, target_variable) - beta*redundancy_sum
058ebdbb831d7fb52c4b5f053ba7bb8a1ce7f144
16,587
def input_thing(): """输入物品信息""" name_str, price_str, weight_str = input('请输入物品信息(名称 价格 重量):').split() return name_str, int(price_str), int(weight_str)
2a986e9479e8e4262cfab89f258af3536c5fefe3
16,588
def extract_features_mask(img, mask): """Computes law texture features for masked area of image.""" preprocessed_img = laws_texture.preprocess_image(img, size=15) law_images = laws_texture.filter_image(preprocessed_img, LAW_MASKS) law_energy = laws_texture.compute_energy(law_images, 10) energy_features_list = [] for type, energy in law_energy.items(): # extract features for mask energy_masked = energy[np.where(mask != 0)] energy_feature = np.mean(energy_masked, dtype=np.float32) energy_features_list.append(energy_feature) return energy_features_list
e184695fb2879cf9fd418e7110498717585b4878
16,589
def construct_grid_with_k_connectivity(n1,n2,k,figu = False): """Constructs directed grid graph with side lengths n1 and n2 and neighborhood connectivity k""" """For plotting the adjacency matrix give fig = true""" def feuclidhorz(u , v): return np.sqrt((u[0] - (v[0]-n2))**2+(u[1] - v[1])**2) def feuclidvert(u , v): return np.sqrt((u[0] - (v[0]))**2+(u[1] - (v[1]-n1))**2 ) def fperiodeuc(u , v): return np.sqrt((u[0] - (v[0]-n2))**2 + (u[1] - (v[1]-n1))**2 ) def finvperiodic(u,v): return fperiodeuc(v,u) def finvvert(u,v): return feuclidvert(v,u) def finvhorz(u,v): return feuclidhorz(v,u) def fperiodeucb(u , v): return np.sqrt((u[0]-n2 - (v[0]))**2 + (u[1] - (v[1]-n1))**2 ) def fperiodeucc(v, u): return np.sqrt((u[0]-n2 - (v[0]))**2 + (u[1] - (v[1]-n1))**2 ) def fchhorz(u , v): return max(abs(u[0] - (v[0]-n2)), abs(u[1] - v[1])) def fchvert(u , v): return max(abs(u[0] - (v[0])),abs(u[1] - (v[1]-n1)) ) def fperiodch(u , v): return max(abs(u[0] - (v[0]-n2)) , abs(u[1] - (v[1]-n1)) ) def finvperiodicch(u,v): return fperiodch(v,u) def finvvertch(u,v): return fchvert(v,u) def finvhorzch(u,v): return fchhorz(v,u) def fperiodchb(u , v): return max(abs(u[0]-n2 - (v[0])) , abs(u[1] - (v[1]-n1))) def fperiodchc(v, u): return max(abs(u[0]-n2 - (v[0])) , abs(u[1] - (v[1]-n1)) ) def fperiodchd(u , v): return max(abs(n2-u[0] - (v[0])) , abs(u[1] - (n1-v[1]))) def fperiodche(v, u): return max(abs(n2-u[0] - (v[0])) , abs(u[1] - (n1-v[1])) ) #distF = distance Function #distM = distance meter for case in switch(k): if case(4): distF = 'euclidean' distM = 1 #.41 break if case(8): distF = 'euclidean' distM = 1.5 break if case(12): distF = 'euclidean' distM = 2 break if case(20): distF = 'euclidean' distM = 2.3 #2.5 break if case(24): #check this again distF = 'chebyshev' distM = 2 #or euclidean 2.9 break if case(36): distF = 'euclidean' distM = 3.5 break if case(44): distF = 'euclidean' distM = 3.8 break if case(28): distF = 'euclidean' distM = 3 break if case(48): distF = 'euclidean' distM = 4 break x = np.linspace(1,n1,n1) y = np.linspace(1,n2,n2) X,Y = np.meshgrid(x,y) XY = np.vstack((Y.flatten(), X.flatten())) adj = squareform( (pdist(XY.T, metric = distF)) <= distM ) if k!= 24: adjb = squareform( (pdist(XY.T, metric = feuclidhorz)) <= distM ) adjc = squareform( (pdist(XY.T, metric = feuclidvert)) <= distM ) adjd = squareform( (pdist(XY.T, metric = fperiodeuc)) <= distM ) adje = squareform( (pdist(XY.T, metric = finvperiodic)) <= distM ) adjf = squareform( (pdist(XY.T, metric = finvvert)) <= distM ) adjg = squareform( (pdist(XY.T, metric = finvhorz)) <= distM ) adjx = squareform( (pdist(XY.T, metric = fperiodeucc)) <= distM ) adjy = squareform( (pdist(XY.T, metric = fperiodeucb)) <= distM ) Adj = ( adj + adjb +adjc+adjd+adje+adjf+adjg+adjx+adjy >=1) if k == 24: adjb = squareform( (pdist(XY.T, metric = fchhorz)) <= distM ) adjc = squareform( (pdist(XY.T, metric = fchvert)) <= distM ) adjd = squareform( (pdist(XY.T, metric = fperiodch)) <= distM ) adje = squareform( (pdist(XY.T, metric = finvperiodicch)) <= distM ) adjf = squareform( (pdist(XY.T, metric = finvvertch)) <= distM ) adjg = squareform( (pdist(XY.T, metric = finvhorzch)) <= distM ) adjx = squareform( (pdist(XY.T, metric = fperiodchb)) <= distM ) adjy = squareform( (pdist(XY.T, metric = fperiodchc)) <= distM ) Adj = ( adj + adjb +adjc+adjd+adje+adjf+adjg+adjx+adjy >=1) #Adj = ( adj+adjb >=1 ) #print adj #plt.plot(sum(Adj)) if figu: plt.figure(figsize=(1000,1000)) plt.imshow(Adj,interpolation = 'none', extent = [0,n1*n2 , n1*n2,0] ) plt.xticks(np.arange(n1*n2)) plt.yticks(np.arange(n1*n2)) plt.grid(ls = 'solid') #plt.colorbar() """ #text portion min_val = 0 max_val = n1*n2 diff = 1 ind_array = np.arange(min_val, max_val, diff) x, y = np.meshgrid(ind_array, ind_array) for x_val, y_val in zip(x.flatten(), y.flatten()): c = adj[x_val,y_val] plt.text(x_val+0.5, y_val+0.5, '%.2f' % c, fontsize=8,va='center', ha='center') """ G = nx.from_numpy_matrix(Adj) return (G,Adj)
46b690f02c4f025719424582acecff43580543da
16,590
import array def _optimal_shift(pos, r_pad, log): """ Find the shift for the periodic unit cube that would minimise the padding. """ npts, ndim = pos.shape # +1 whenever a region starts, -1 when it finishes start_end = empty(npts*2, dtype=np.int32) start_end[:npts] = 1 start_end[npts:] = -1 pad_min = [] # Go along each axis, find the point that would require least padding for ax in range(ndim): start_reg = pos[:,ax] - r_pad end_reg = pos[:,ax] + r_pad # make periodic start_reg -= floor(start_reg) end_reg -= floor(end_reg) # Order from 0-1, add 1 whenever we come into range of a new point, -1 # whenever we leave idx_sort = argsort(concatenate([start_reg, end_reg])) region_change = cumsum(start_end[idx_sort]) # Find the minimum min_chg = argmin(region_change) # Note since this is the minimum trough: # start_end[idx_sort[min_chg]==-1 (a trough) # start_end[idx_sort[min_chg+1]] == +1 (otherwise it wasnt the minimum) trough0 = end_reg[idx_sort[min_chg]-npts] # has to be a -1 (i.e. region end) if min_chg+1==2*npts: trough1 = start_reg[idx_sort[0]]+1 mid_trough = 0.5 * (trough0 + trough1) mid_trough -= floor(mid_trough) else: trough1 = start_reg[idx_sort[min_chg+1]] mid_trough = 0.5 * (trough0 + trough1) pad_min.append(mid_trough) shift = array([1.0-x for x in pad_min], dtype=pos.dtype) print("Best shift", ', '.join('%.3f'%x for x in shift), file=log) return shift
cac3c56307ea3d240ebe838ea4d26bb38c62dc3c
16,592
def ShowActStack(cmd_args=None): """ Routine to print out the stack of a specific thread. usage: showactstack <activation> """ if cmd_args == None or len(cmd_args) < 1: print "No arguments passed" print ShowAct.__doc__.strip() return False threadval = kern.GetValueFromAddress(cmd_args[0], 'thread *') print GetThreadSummary.header print GetThreadSummary(threadval) print GetThreadBackTrace(threadval, prefix="\t") return
43b0eca326465fe9dc7b0207ba448d75da7e9889
16,593
import json def load_request(possible_keys): """Given list of possible keys, return any matching post data""" pdata = request.json if pdata is None: pdata = json.loads(request.body.getvalue().decode('utf-8')) for k in possible_keys: if k not in pdata: pdata[k] = None # print('pkeys: %s pdata: %s' % (possible_keys, pdata)) return pdata
b21c503fac56398be6745a10fb95889128c6e2b2
16,595
import random def get_random_tcp_start_pos(): """ reachability area: x = [-0.2; 0.4] y = [-0.28; -0.1] """ z_up = 0.6 tcp_x = round(random.uniform(-0.2, 0.4), 4) tcp_y = round(random.uniform(-0.28, -0.1), 4) start_tcp_pos = (tcp_x, tcp_y, z_up) # start_tcp_pos = (-0.2, -0.28, z_up) return start_tcp_pos
adf87dec45bf5a81c321f94c93d45a67f0aeff0d
16,596
def CalculateChiv3p(mol): """ ################################################################# Calculation of valence molecular connectivity chi index for path order 3 ---->Chiv3 Usage: result=CalculateChiv3p(mol) Input: mol is a molecule object. Output: result is a numeric value ################################################################# """ return _CalculateChivnp(mol,NumPath=3)
27405fce52540a0de9c4c1c2d5a35454681554fa
16,597
from typing import Tuple from typing import Optional def coerce(version: str) -> Tuple[Version, Optional[str]]: """ Convert an incomplete version string into a semver-compatible Version object * Tries to detect a "basic" version string (``major.minor.patch``). * If not enough components can be found, missing components are set to zero to obtain a valid semver version. :param str version: the version string to convert :return: a tuple with a :class:`Version` instance (or ``None`` if it's not a version) and the rest of the string which doesn't belong to a basic version. :rtype: tuple(:class:`Version` | None, str) """ match = BASEVERSION.search(version) if not match: return (None, version) ver = { key: 0 if value is None else value for key, value in match.groupdict().items() } ver = Version(**ver) rest = match.string[match.end():] # noqa:E203 return ver, rest
e712533aa05444ad47403fc10e7f2ec29b8132ec
16,598
def choose_wyckoff(wyckoffs, number): """ choose the wyckoff sites based on the current number of atoms rules 1, the newly added sites is equal/less than the required number. 2, prefer the sites with large multiplicity """ for wyckoff in wyckoffs: if len(wyckoff[0]) <= number: return choose(wyckoff) return False
14b276d8aa50e84f47d77f6796e193cc96ddd0a9
16,599
def _to_system(abbreviation): """Converts an abbreviation to a system identifier. Args: abbreviation: a `pronto.Term.id` Returns: a system identifier """ try: return { 'HP': 'http://www.human-phenotype-ontology.org/' }[abbreviation] except KeyError: raise RuntimeError( 'system abbreviation \'%s\' is not supported' % abbreviation)
f43942b242e67866028a385e6614133dc25b31b0
16,600
from typing import Union def apply_gate(circ: QuantumCircuit, qreg: QuantumRegister, gate: GateObj, parameterise: bool = False, param: Union[Parameter, tuple] = None): """Applies a gate to a quantum circuit. More complicated gates such as RXX gates should be decomposed into single qubit gates and CNOTs prior to calling this function. If parameterise is True, then qiskit's placeholder parameter theta will be used in place of any explicit parameters. """ if not isinstance(gate.qubits, list): q = gate.qubits params = gate.params if gate.name == 'I': pass elif gate.name == 'H': circ.h(qreg[q]) elif gate.name == 'HSdag': circ.h(qreg[q]) circ.s(qreg[q]) circ.h(qreg[q]) elif gate.name == 'X': circ.x(qreg[q]) elif gate.name == 'Y': circ.y(qreg[q]) elif gate.name == 'Z': circ.z(qreg[q]) elif gate.name == 'RX': if parameterise: circ.rx(param, qreg[q]) else: circ.rx(params, qreg[q]) elif gate.name == 'RY': if parameterise: circ.ry(param, qreg[q]) else: circ.ry(params, qreg[q]) elif gate.name == 'RZ': if parameterise: circ.rz(param, qreg[q]) else: circ.rz(params, qreg[q]) elif gate.name == 'U3': if parameterise: _params = [i for i in param] circ.u3(_params[0], _params[1], _params[2], qreg[q]) else: circ.u3(params[0], params[1], params[2], qreg[q]) else: cntrl = gate.qubits[0] trgt = gate.qubits[1] circ.cx(qreg[cntrl], qreg[trgt]) return circ
0babd68efb8bae67c5f610bcca3eb9f3b67630ad
16,601
from typing import Tuple import codecs def preprocess_datasets(data: str, seed: int = 0) -> Tuple: """Load and preprocess raw datasets (Yahoo! R3 or Coat).""" if data == 'yahoo': with codecs.open(f'../data/{data}/train.txt', 'r', 'utf-8', errors='ignore') as f: data_train = pd.read_csv(f, delimiter='\t', header=None) data_train.rename(columns={0: 'user', 1: 'item', 2: 'rate'}, inplace=True) with codecs.open(f'../data/{data}/test.txt', 'r', 'utf-8', errors='ignore') as f: data_test = pd.read_csv(f, delimiter='\t', header=None) data_test.rename(columns={0: 'user', 1: 'item', 2: 'rate'}, inplace=True) for _data in [data_train, data_test]: _data.user, _data.item = _data.user - 1, _data.item - 1 elif data == 'coat': col = {'level_0': 'user', 'level_1': 'item', 2: 'rate', 0: 'rate'} with codecs.open(f'../data/{data}/train.ascii', 'r', 'utf-8', errors='ignore') as f: data_train = pd.read_csv(f, delimiter=' ', header=None) data_train = data_train.stack().reset_index().rename(columns=col) data_train = data_train[data_train.rate.values != 0].reset_index(drop=True) with codecs.open(f'../data/{data}/test.ascii', 'r', 'utf-8', errors='ignore') as f: data_test = pd.read_csv(f, delimiter=' ', header=None) data_test = data_test.stack().reset_index().rename(columns=col) data_test = data_test[data_test.rate.values != 0].reset_index(drop=True) test = data_test.values train, val = train_test_split(data_train.values, test_size=0.1, random_state=seed) num_users, num_items = train[:, 0].max() + 1, train[:, 1].max() + 1 return train, val, test, num_users, num_items
78a7bfe7968ad47f797728ffb43c804ab8af6298
16,602
def loadSentimentVector(file_name): """ Load sentiment vector [Surprise, Sorrow, Love, Joy, Hate, Expect, Anxiety, Anger] """ contents = [ line.strip('\n').split() for line in open(file_name, 'r').readlines() ] sentiment_dict = { line[0].decode('utf-8'): [float(w) for w in line[1:]] for line in contents } return sentiment_dict
5d0d1f4598eeed455d080236720adcae357b6485
16,603
def unique_boxes(boxes, scale=1.0): """Return indices of unique boxes.""" v = np.array([1, 1e3, 1e6, 1e9]) hashes = np.round(boxes * scale).dot(v) _, index = np.unique(hashes, return_index=True) return np.sort(index)
fc9ab64356192828659f025af6aa112205fc838c
16,604
def HEX2DEC(*args) -> Function: """ Converts a signed hexadecimal number to decimal format. Learn more: https//support.google.com/docs/answer/3093192 """ return Function("HEX2DEC", args)
b4741d02acae7169854d1193ae5b43f6736257dc
16,606
def find_edges(mesh, key): """ Temp replacement for mesh.findEdges(). This is painfully slow. """ for edge in mesh.edges: v = edge.vertices if key[0] == v[0] and key[1] == v[1]: return edge.index
98247b64a0e5671a7dbbf314f314cef2c5c8aae3
16,607
def thumbnail(link): """ Returns the URL to a thumbnail for a given identifier. """ targetid, service = _targetid(link), _service(link) if targetid: if service in _OEMBED_MAP: try: return _embed_json(service, targetid)["thumbnail_url"] except (ValueError, KeyError): return None elif service == "bandcamp": # Sometime in the future, parse the HTML for the image_src meta tag return None return None
9ca78af2a65a41a70fef73c35383ae9214fb2d96
16,608
def valve_gas_cv(m_dot, p_1, p_2, m_molar, T): """Find the required valve Cv for a given mass flow and pressure drop. Assumes that a compressible gas is flowing through the valve. Arguments: m_dot (scalar): Mass flow rate [units: kilogram second**-1]. p_1 (scalar): Inlet pressure [units: pascal]. p_2 (scalar): Outlet pressure [units: pascal]. m_molar (scalar): Gas molar mass [units: kilogram mole**-1]. T (scalar): Gas temperature [units: kelvin]. Returns: scalar: Valve flow coefficient Cv [units: gallon minute**-1 psi**-1]. """ # Specific gravity of the gas [units: dimensionless]: spec_grav = m_molar / proptools.constants.m_molar_air # Convert gas flow to standard cubic feet per hour flow_scfh = m_dot_to_scfh(m_dot, m_molar) # Determine if the flow is choked. # Checking if `p_1 >= 2 * p_2` is suggested by [1]. # There is a more accurate choked flow criterion which depends # on the ratio of specific heats. choked = p_1 >= 2 * p_2 if choked: cv = flow_scfh / 0.08821 * (spec_grav * T)**0.5 / p_1 else: cv = flow_scfh / 0.1040 * (spec_grav * T / (p_1**2 - p_2**2))**0.5 return cv
07bd3f45392e03eb6744b98a3fde022aa517c4fc
16,609
def frequency_based_dissim(record, modes): """ Frequency-based dissimilarity function inspired by "Improving K-Modes Algorithm Considering Frequencies of Attribute Values in Mode" by He et al. """ list_dissim = [] for cluster_mode in modes: sum_dissim = 0 for i in range(len(record)): #zip(record,cluster_mode.mode): #if (elem1 != elem2): if (record[i] != cluster_mode.attrs[i]): sum_dissim += 1 else: sum_dissim += 1 - cluster_mode.attr_frequencies[i] list_dissim.append(sum_dissim) return list_dissim
80e21763d6f90ddc5a448f46247fd12253de5dbb
16,610
def _process_create_group(event: dict) -> list: """ Process CreateGroup event. This function doesn't set tags. """ return [event['responseElements']['group']['groupName']]
978b3ffc3c4aa72165914b79dc06cb7691c5c5a5
16,611
from typing import Any from typing import List def tree_labels(t: Node): """Collect all labels of a tree into a list.""" def f(label: Any, folded_subtrees: List) -> List: return [label] + folded_subtrees def g(folded_first: List, folded_rest: List) -> List: return folded_first + folded_rest return foldtree(f, g, [], t)
7ad1703a090cd761a99cd5323c9258e8d2d551b8
16,612
def find_best_split(rows): """Find the best question to ask by iterating over every feature / value and calculating the information gain.""" best_gain = 0 # keep track of the best information gain best_question = None # keep train of the feature / value that produced it current_uncertainty = gini(rows) n_features = len(rows[0]) - 1 # number of columns for col in range(n_features): # for each feature values = set([row[col] for row in rows]) # unique values in the column for val in values: # for each value question = Question(col, val) # try splitting the dataset true_rows, false_rows = partition(rows, question) # Skip this split if it doesn't divide the # dataset. if len(true_rows) == 0 or len(false_rows) == 0: continue # Calculate the information gain from this split gain = info_gain(true_rows, false_rows, current_uncertainty) # You actually can use '>' instead of '>=' here # but I wanted the tree to look a certain way for our # toy dataset. if gain >= best_gain: best_gain, best_question = gain, question return best_gain, best_question
9b197c99b41e64e37b499b5d4b3c7758cda3b56e
16,613
def pad_data(data, context_size, target_size, pad_at_begin= False): """ Performs data padding for both target and aggregate consumption :param data: The aggregate power :type data: np.array :param context_size: The input sequence length :type context_size: int :param target_size: The target sequence length :type target_size: int :param pad_at_begin: Specified how the padded values are inserted, defaults to False :type pad_at_begin: bool, optional :return: The padded aggregate power. :rtype: np.array """ sequence_length = context_size + target_size units_to_pad = sequence_length // 2 padding = (context_size,target_size) if pad_at_begin else (units_to_pad,units_to_pad+1) if data.ndim==1: new_mains = np.pad(data, padding,'constant',constant_values=(0,0)) return new_mains else: new_mains = [] for i in range(data.shape[-1]): new_mains.append(np.pad(data[:,i], padding,'constant',constant_values=(0,0))) return np.stack(new_mains).T
1b698a849a4ca82d87ce6c5711220b61cd21252b
16,614
def egg_translator(cell): """If the cell has the DNA for harboring its offspring inside it, granting it additional food and protection at the risk of the parent cell, it is an egg. Active DNA: x,A,(C/D),x,x,x """ dna = cell.dna.split(',') if dna[1] == 'A' and dna[2] == 'C': return True elif dna[1] == 'A' and dna[2] == 'D': return True else: return False del dna[:]
af0d9097c8a0b5002722c79d6ec8262a66cc375d
16,617
def all_different_cst(xs, cst): """ all_different_cst(xs, cst) Ensure that all elements in xs + cst are distinct """ return [AllDifferent([(x + c) for (x,c) in zip(xs,cst)])]
dfc75a54a92a4c8c2ef76af74250b9125c9bb647
16,618
def processing(task, region: dict, raster: str, parameters: dict): """ Cuts the raster according to given region and applies some filters in order to find the district heating potentials and related indicators. Inputs : * region : selected zone where the district heating potential is studied. * raster : raster of the heat demand. * parameters : the pixel and area thresholds. Output : * Indicators : * Graphics : Potential of areas that pass the filters. * Layer : Areas that pass the filters. """ with TemporaryDirectory(dir=settings.TESTDATA_DIR) as temp_dir: clipped_raster = join(temp_dir, "raster_tmp.tif") clip_raster(src=raster, shapes=region, dst=clipped_raster) ( geo_transform, total_heat_demand, areas, filtered_map, total_potential, areas_potential, ) = get_areas( heat_density_map=clipped_raster, pixel_threshold=parameters["Heat demand in hectare (MWh/ha)"], district_heating_zone_threshold=parameters[ "Heat demand in a DH zone (GWh/year)" ], ) dst_raster = join(temp_dir, "dst.tif") write_raster( map_array=filtered_map, projection=get_projection(geofile=clipped_raster), geotransform=geo_transform, dst=dst_raster, ) raster_name = "areas.tif" with open(dst_raster, mode="rb") as raster_fd: task.post_raster(raster_name=raster_name, raster_fd=raster_fd) response = get_response( total_potential=total_potential, total_heat_demand=total_heat_demand, areas_potential=areas_potential, raster_name=raster_name, ) validate(response) return response
63a5548e886b575011e716e05a589715f027c316
16,619
import random def randbit(): """Returns a random bit.""" return random.randrange(2)
4b47101df7368b7cb423920e6a5338b76ab4ecaa
16,620
def calc_points(goals, assists): """ Calculate the total traditional and weighted points for all players, grouped by player id. Author: Rasmus Säfvenberg Parameters ---------- goals : pandas.DataFrame A data frame with total goals and weighted assists per player. assists : pandas.DataFrame A data frame with total assists and weighted assists per player. Returns ------- points : pandas.DataFrame A data frame with total points and weighted points per player. """ # Specify columns to keep for merging goals = goals[["PlayerId", "PlayerName", "Position", "Goals", "WeightedGoals"]] assists = assists[["PlayerId", "PlayerName", "Position", "Assists", "WeightedAssists"]] # Combine goals and assists points = goals.merge(assists, on=["PlayerId", "PlayerName", "Position"], how="outer") # Fill missing values with 0 (some players only score goals etc.) points.fillna(0, inplace=True) # Calculate points = goals + assists points["Points"] = points["Goals"] + points["Assists"] # Calculate weighted points = weighted goals + weighted assists points["WeightedPoints"] = points["WeightedGoals"] + points["WeightedAssists"] # Sort by weighted points points.sort_values("WeightedPoints", ascending=False, inplace=True) return points
1801cf2602a473bdf532e1c0ee58b883dc3e79d1
16,621
import io import base64 def file_to_base64(path): """ Convert specified file to base64 string Args: path (string): path to file Return: string: base64 encoded file content """ with io.open(path, 'rb') as file_to_convert: return base64.b64encode(file_to_convert.read())
0c942f8f4d29943c5a3aac6c954d9e2b1b2898a3
16,623
def get_simverb(subset=None): """ Get SimVerb-3500 data :return: (pairs, scores) """ simverb = [] if subset == 'dev': name = '500-dev' elif subset == 'test': name = '3000-test' else: name = '3500' with open('../data/SimVerb-3500/SimVerb-{}.txt'.format(name)) as f: f.readline() # first line is headings for line in f: simverb.append(line.strip().split('\t')) all_pairs = [(x[0], x[1]) for x in simverb] all_scores = np.array([float(x[3]) for x in simverb]) return (all_pairs, all_scores)
5cec49bd232a883836029b8b011f09f360176910
16,624
def sample_image(size, min_r, max_r, circles, squares, pixel_value): """Generate image with geometrical shapes (circles and squares). """ img = np.zeros((size, size, 2)) loc = [] if pixel_value is None: vals = np.random.randint(0, 256, circles + squares) else: vals = [pixel_value] * (circles + squares) for f, v in zip(["c"] * circles + ["s"] * squares, vals): r = np.random.randint(min_r, max_r + 1) xc, yc = np.random.randint(r, size - r + 1, 2) if f == "c": mask = circle(xc, yc, r, (size, size)) if f == "s": mask = polygon((xc - r, xc + r, xc + r, xc - r), (yc - r, yc - r, yc + r, yc + r), (size, size)) img[:, :, ["c", "s"].index(f)][mask] = v loc.append([xc, yc, r]) return img, np.array(loc)
25ab1afcd7256bc07ee55ac2e12cf9d834cb798c
16,625
def host_allocations(auth): """Retrieve host allocations""" response = API.get(auth, '/os-hosts/allocations') return response.json()['allocations']
505eeb0502f6480445ec5dff1cd3203eda96d475
16,626
def rosenbrock_grad(x, y): """Gradient of Rosenbrock function.""" return (-400 * x * (-(x ** 2) + y) + 2 * x - 2, -200 * x ** 2 + 200 * y)
c7acf0bbe11a6d1cbb38b6853eb1b508e3846657
16,627
def extractYoujinsite(item): """ """ vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title']) if '[God & Devil World]' in item['tags'] and (chp or vol): return buildReleaseMessageWithType(item, 'Shenmo Xitong', vol, chp, frag=frag, postfix=postfix) if '[LBD&A]' in item['tags'] and (chp or vol): return buildReleaseMessageWithType(item, 'Line between Devil and Angel', vol, chp, frag=frag, postfix=postfix) if '[VW: Conquer the World]' in item['tags'] and (chp or vol): return buildReleaseMessageWithType(item, 'VW: Conquering the World', vol, chp, frag=frag, postfix=postfix) return False
11463288cdcc7268b0b4657934dd8872a7d36580
16,629
def get_logger() -> Logger: """ This function returns the logger for this project """ return getLogger(LOGGER_NAME)
33e11a06c357552c35f9ef089fd303ad15db0884
16,632
import json def write_guess_json(guesser, filename, fold, run_length=200, censor_features=["id", "label"], num_guesses=5): """ Returns the vocab, which is a list of all features. """ vocab = [kBIAS] print("Writing guesses to %s" % filename) num = 0 with open(filename, 'w') as outfile: total = len(fold) for qq in fold: num += 1 if num % (total // 80) == 0: print('.', end='', flush=True) runs = qq.runs(run_length) guesses = guesser.guess(runs[0], max_n_guesses=5) for rr in runs[0]: guesses = guesser.guess([rr], max_n_guesses=num_guesses) for raw_guess in guesses[0]: gg, ss = raw_guess guess = {"id": qq.qanta_id, "guess:%s" % gg: 1, "run_length": len(rr)/1000, "score": ss, "label": qq.page==gg, "category:%s" % qq.category: 1, "year:%s" % qq.year: 1} for ii in guess: # Don't let it use features that would allow cheating if ii not in censor_features and ii not in vocab: vocab.append(ii) outfile.write(json.dumps(guess, sort_keys=True)) outfile.write("\n") print("") return vocab
9f0055289ff462b0b3c067ea1e0a68c66a74136c
16,633
def upgrade_to_4g(region, strategy, costs, global_parameters, core_lut, country_parameters): """ Reflects the baseline scenario of needing to build a single dedicated network. """ backhaul = '{}_backhaul'.format(strategy.split('_')[2]) sharing = strategy.split('_')[3] geotype = region['geotype'].split(' ')[0] # generation_core_backhaul_sharing_networks_spectrum_tax network_strategy = strategy.split('_')[4] networks = country_parameters['networks']['baseline' + '_' + geotype] if network_strategy == 'srn' and geotype == 'rural': sharing = 'cns' shared_assets = INFRA_SHARING_ASSETS[sharing] assets = { 'single_sector_antenna': costs['single_sector_antenna'], 'single_remote_radio_unit': costs['single_remote_radio_unit'], 'io_fronthaul': costs['io_fronthaul'], 'processing': costs['processing'], 'io_s1_x2': costs['io_s1_x2'], 'control_unit': costs['control_unit'], 'cooling_fans': costs['cooling_fans'], 'distributed_power_supply_converter': costs['distributed_power_supply_converter'], 'bbu_cabinet': costs['bbu_cabinet'], 'installation': costs['installation'], 'site_rental': costs['site_rental_{}'.format(geotype)], 'router': costs['router'], 'backhaul': get_backhaul_costs(region, backhaul, costs, core_lut), 'core_edge': core_costs(region, 'core_edge', costs, core_lut, strategy, country_parameters), 'core_node': core_costs(region, 'core_node', costs, core_lut, strategy, country_parameters), 'regional_edge': regional_net_costs(region, 'regional_edge', costs, core_lut, strategy, country_parameters), 'regional_node': regional_net_costs(region, 'regional_node', costs, core_lut, strategy, country_parameters), 'per_site_spectrum_acquisition_cost': costs['per_site_spectrum_acquisition_cost'], 'per_site_administration_cost': costs['per_site_administration_cost'], } cost_structure = {} for key, value in assets.items(): if not key in shared_assets: cost_structure[key] = value else: if network_strategy == 'srn' and geotype == 'rural': value = value * (1 / networks) cost_structure[key] = value else: value = value / networks cost_structure[key] = value return cost_structure
947afef6d550b9022109c665fc311511f428e9f8
16,634