content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
from datetime import datetime def build_data_table(row, fields_to_try): """ Create HTML table for one row of data If no fields are valid, returns empty string """ th_class = 'attribute_heading' td_class = 'attribute_value' field_names = pd.read_csv('data/field_names.csv') output_table = """ <table> <tbody> """ fields_written = 0 for field_name in fields_to_try: if field_name in row: field_value = row[field_name] # Convert timestamp to human-readable string if isinstance(field_value, datetime): field_value = field_value.strftime('%B %-d, %Y') if pd.notna(field_value) and len(field_value) > 0: # If no display_name has been defined for the field_name, use the field_name as the display_name if sum(field_names['field_name'] == field_name) == 0: display_name = field_name else: display_name = field_names.loc[field_names['field_name'] == field_name, 'display_name'].values[0] output_table += f""" <tr> <th class="{th_class}">{display_name}</th> """ if '_link' in field_name: output_table += f'<td class="{td_class}"><a href="{field_value}">{field_value}</a></td>' elif '_email' in field_name: output_table += f'<td class="{td_class}"><a href="mailto:{field_value}">{field_value}</a></td>' else: output_table += f'<td class="{td_class}">{field_value}</td>' output_table += '</tr>' fields_written += 1 output_table += """ </tbody> </table> """ # or could use: if any([(f in row.index) for f in fields_to_try]): if fields_written == 0: output_table = '' return output_table
812a7acbe33296fc30aef4b27e427c63c6fc63bb
11,800
def time_entry_reader(date, configuration): """Read the entries and return a list of entries that are apart of the date provided.""" parser = YAML(typ='rt') date = date.date() try: with open(configuration['filename'], 'r') as data_file: time_entries = parser.load(data_file).get('records', []) except FileNotFoundError: LOGGER.error('Cannot read file %s', configuration['filename']) raise RuntimeError(f'Cannot read file {configuration["filename"]}') return [te for te in time_entries if te['date'] == date]
5e01246d3fae1d8eaf53cbf1dec40f488ddfd0d4
11,801
import asyncio async def test_send_write(event_loop): """Check feed-receive scenarios used in the library.""" STREAM_ID = 'whatever' DATA = b'data' def make_writer(): queue = asyncio.Queue() async def writer(id, data): assert id == STREAM_ID await queue.put(data) return writer, queue for stream_mode in [StreamMode.WRITE, StreamMode.READ | StreamMode.WRITE]: stream = Stream(STREAM_ID, event_loop) writer, queue = make_writer() stream.on_write.append(writer) with pytest.raises(exceptions.InvalidStreamMode): await stream.send(None) stream.open(stream_mode) assert stream.is_writable await stream.send(DATA) assert await queue.get() == DATA with pytest.raises(TypeError): await stream.send(None) stream.close_sync() with pytest.raises(exceptions.InvalidStreamState): await stream.send(None)
669d25646aecd891547c3cbc40f7215e1c32c08b
11,802
def width_series(value_series, outer_average_width=5, max_value=None, method='linear'): """ :param value_series: the pd.Series that contain the values :param outer_average_width: the average width of the width series to return :param max_value: value to use as the maximum when normalizing the series (to focus low values) :param method: linear or surface :return: width_series: pd.Series that contains the widths corresponding to the values :rtype: pd.Series """ max_value = max_value if max_value else np.max(list(value_series.values)) if method == 'linear': serie = value_series.apply(lambda x: x / max_value * outer_average_width) elif method == 'surface': serie = value_series.apply(lambda x: np.sqrt(x / max_value) * outer_average_width) return serie
0efefbe0d1e7024293e0f6a8a39b7fca2f5cf41b
11,803
def unroll_upper_triangular(matrix): """Converts square matrix to vector by unrolling upper triangle.""" rows, cols = matrix.shape assert rows == cols, "Not a square matrix." row_idx, col_idx = np.triu_indices(rows, 1) unrolled = [] for i, j in zip(row_idx, col_idx): unrolled.append(matrix[i][j]) assert len(unrolled) == rows * (rows - 1) // 2 return unrolled
b62725a178d569e2812ad48c826b8a7a864c04b6
11,804
from typing import Sequence from typing import Any from typing import Optional from typing import Tuple def fill_tuples( tuples: Sequence[Any], length: Optional[int] = None, repeat: bool = False, fill_method: str = 'bfill', ) -> Sequence[Tuple]: """Fill tuples so they are all the same length. Parameters ---------- length : int, optional Fill tuples to a fixed length. If None, fills to max length of the non-string sequence objects given by tuples. repeat : bool, default False If True then fills missing tuple values with the current value at the end of the sequence given by ``at``. If False fills with None. fill_method : {'bfill', 'ffill'}, str Whether to forward fill or backfill the tuple values. """ if not length: if not any(is_non_string_sequence(t) for t in tuples): return tuples length = max(len(t) for t in tuples if is_non_string_sequence(t)) new_tups = [] for tup in tuples: tup = tuple_convert(tup) while len(tup) < length: if fill_method == 'bfill': tup = (tup[0] if repeat else None,) + tup else: # 'end' tup += (tup[-1] if repeat else None,) new_tups.append(tup) return new_tups
80766f17b78a3fba0dc49ef95131564ce7b1e563
11,805
def intersect(connection, items, ttl=30, execute=True): """并集计算""" return _set_common(connection, 'sinterstore', items, ttl, execute)
154480043f2b7634913839ea6ed1425ecc8cc312
11,806
def batch_decode(raw_logits, use_random, decode_times): """ tbd """ size = (raw_logits.shape[1] + 7) // 8 logit_lists = [] for i in range(0, raw_logits.shape[1], size): if i + size < raw_logits.shape[1]: logit_lists.append(raw_logits[:, i: i + size, :]) else: logit_lists.append(raw_logits[:, i:, :]) result_list = [decode_chunk(logit_lists[i], use_random, decode_times) for i in range(len(logit_lists))] return_list = [] for _0 in result_list: for _1 in _0: return_list.append(_1) return return_list
fcde630681d4455e717b7b3b19b098b72fb8a64c
11,807
import pathlib import pkg_resources import yaml def _load_schemata(obj_type: str) -> dict: """Load the schemata from the package, returning merged results of other schema files if referenced in the file loaded. :raises: FileNotFoundError """ schema_path = pathlib.Path(pkg_resources.resource_filename( 'pglifecycle', 'schemata/{}.yml'.format(obj_type).replace(' ', '_'))) if not schema_path.exists(): raise FileNotFoundError( 'Schema file not found for object type {!r}'.format(obj_type)) return _preprocess(yaml.load(schema_path))
a737420b85bd78cf2210c8d12794eaaa4eb4ee90
11,808
def waitfor(msg, status = '', spinner = None, log_level = log_levels.INFO): """waitfor(msg, status = '', spinner = None) -> waiter Starts a new progress indicator which includes a spinner if :data:`pwnlib.term.term_mode` is enabled. By default it outputs to loglevel :data:`pwnlib.log_levels.INFO`. Args: msg (str): The message of the spinner. status (str): The initial status of the spinner. spinner (list): This should either be a list of strings or None. If a list is supplied, then a either element of the list is shown in order, with an update occuring every 0.1 second. Otherwise a random spinner is chosen. log_level(int): The log level to output the text to. Returns: A waiter-object that can be updated using :func:`status`, :func:`done_success` or :func:`done_failure`. """ if context.log_level > log_level: h = _DummyWaiter() elif term.term_mode: h = _TermWaiter(msg, spinner, log_level) else: h = _SimpleWaiter(msg, spinner, log_level) if status: h.status(status) _waiter_stack.append(h) return h
6dc229cff86ecdbdccbda71239eafdc878c4520e
11,809
def f(i): """Add 2 to a value Args: i ([int]): integer value Returns: [int]: integer value """ return i + 2
72b5d99f3b2132054805ab56872cf2199b425b20
11,810
def show_forecast(cmp_df, num_predictions, num_values, title): """Visualize the forecast.""" def create_go(name, column, num, **kwargs): points = cmp_df.tail(num) args = dict(name=name, x=points.index, y=points[column], mode='lines') args.update(kwargs) return go.Scatter(**args) lower_bound = create_go('Lower Bound', 'yhat_lower', num_predictions, line=dict(width=0), marker=dict(color="red")) upper_bound = create_go('Upper Bound', 'yhat_upper', num_predictions, line=dict(width=0), marker=dict(color="red"), fillcolor='rgba(68, 68, 68, 0.3)', fill='tonexty') forecast = create_go('Forecast', 'yhat', num_predictions, line=dict(color='rgb(31, 119, 180)')) actual = create_go('Actual', 'y', num_values, marker=dict(color="red")) # In this case the order of the series is important because of the filling data = [lower_bound, upper_bound, forecast, actual] layout = go.Layout(yaxis=dict(title='Posts'), title=title, showlegend = False) fig = go.Figure(data=data, layout=layout) plot(fig, show_link=False)
669300c4c57890d76153fe1a419037eada2fcbe6
11,811
import os import warnings def build_jobs_dict(path): """Build a dictionary of "job_name" : [recipe list] from a directory full of job files.""" jobfiles = nested_glob(path, '.ajp') jobs = {} for jobfile in jobfiles: job_name = os.path.basename(jobfile).strip('.ajp') try: recipe = parse_jobfile(jobfile) jobs[job_name] = recipe except: warnings.warn("Unable to parse " + jobfile) return jobs
174d677b60a9daee5c4a351dde6e8129738f60b4
11,812
def get_concepts_from_kmeans(tfidf, kmeans): """Get kmeans cluster centers in term space. Parameters ---------- tfidf : TfidfVectorizer Fitted vectorizer with learned term vocabulary. kmeans : KMeans KMeans fitted to document-term matrix returned by tfidf. Returns ------- pandas.DataFrame Columns are terms, rows are "concepts" sorted by cluster size. """ df = pd.DataFrame(kmeans.cluster_centers_, columns=tfidf.get_feature_names()) return df.reindex(pd.Series(kmeans.labels_).value_counts().index)
69734b194c5d71c8e93347845f83264c832820d6
11,813
def streams_to_dataframe(streams, imcs=None, imts=None, event=None): """Extract peak ground motions from list of processed StationStream objects. Note: The PGM columns underneath each channel will be variable depending on the units of the Stream being passed in (velocity sensors can only generate PGV) and on the imtlist passed in by user. Spectral acceleration columns will be formatted as SA(0.3) for 0.3 second spectral acceleration, for example. Args: directory (str): Directory of ground motion files (streams). imcs (list): Strings designating desired components to create in table. imts (list): Strings designating desired PGMs to create in table. event (ScalarEvent): Defines the focal time, geographic location, and magnitude of an earthquake hypocenter. Default is None. Returns: DataFrame: Pandas dataframe containing columns: - STATION Station code. - NAME Text description of station. - LOCATION Two character location code. - SOURCE Long form string containing source network. - NETWORK Short network code. - LAT Station latitude - LON Station longitude - DISTANCE Epicentral distance (km) (if epicentral lat/lon provided) - HN1 East-west channel (or H1) (multi-index with pgm columns): - PGA Peak ground acceleration (%g). - PGV Peak ground velocity (cm/s). - SA(0.3) Pseudo-spectral acceleration at 0.3 seconds (%g). - SA(1.0) Pseudo-spectral acceleration at 1.0 seconds (%g). - SA(3.0) Pseudo-spectral acceleration at 3.0 seconds (%g). - HN2 North-south channel (or H2) (multi-index with pgm columns): - PGA Peak ground acceleration (%g). - PGV Peak ground velocity (cm/s). - SA(0.3) Pseudo-spectral acceleration at 0.3 seconds (%g). - SA(1.0) Pseudo-spectral acceleration at 1.0 seconds (%g). - SA(3.0) Pseudo-spectral acceleration at 3.0 seconds (%g). - HNZ Vertical channel (or HZ) (multi-index with pgm columns): - PGA Peak ground acceleration (%g). - PGV Peak ground velocity (cm/s). - SA(0.3) Pseudo-spectral acceleration at 0.3 seconds (%g). - SA(1.0) Pseudo-spectral acceleration at 1.0 seconds (%g). - SA(3.0) Pseudo-spectral acceleration at 3.0 seconds (%g). - GREATER_OF_TWO_HORIZONTALS (multi-index with pgm columns): - PGA Peak ground acceleration (%g). - PGV Peak ground velocity (cm/s). - SA(0.3) Pseudo-spectral acceleration at 0.3 seconds (%g). - SA(1.0) Pseudo-spectral acceleration at 1.0 seconds (%g). - SA(3.0) Pseudo-spectral acceleration at 3.0 seconds (%g). """ if imcs is None: station_summary_imcs = DEFAULT_IMCS else: station_summary_imcs = imcs if imts is None: station_summary_imts = DEFAULT_IMTS else: station_summary_imts = imts subdfs = [] for stream in streams: if not stream.passed: continue if len(stream) < 3: continue stream_summary = StationSummary.from_stream( stream, station_summary_imcs, station_summary_imts, event) summary = stream_summary.summary subdfs += [summary] dataframe = pd.concat(subdfs, axis=0).reset_index(drop=True) return dataframe
8be5968ee513da80910df227156e2ceb02624941
11,814
def timestamp_diff(time_point_unit: TimePointUnit, time_point1, time_point2) -> Expression: """ Returns the (signed) number of :class:`~pyflink.table.expression.TimePointUnit` between time_point1 and time_point2. For example, `timestamp_diff(TimePointUnit.DAY, lit("2016-06-15").to_date, lit("2016-06-18").to_date` leads to 3. :param time_point_unit: The unit to compute diff. :param time_point1: The first point in time. :param time_point2: The second point in time. :return: The number of intervals as integer value. """ return _ternary_op("timestampDiff", time_point_unit._to_j_time_point_unit(), time_point1, time_point2)
711c41adf3472b2dd0ada51160aefca432ed2bc6
11,815
import tqdm def plot_solar_twins_results(star_postfix=''): """Plot results for 17 pairs with q-coefficients for solar twins""" def format_pair_label(pair_label): """Format a pair label for printing with MNRAS ion format. Parameters ---------- pair_label : str A pair label of the form "4492.660Fe2_4503.480Mn1_25" Returns ------- dict A dictionary containing LaTeX-formatted representations of the two transitions in the pair label. """ t1, t2, order_num = pair_label.split('_') # This mimics the look of ion labels in MNRAS. new_label1 = f"{t1[8:-1]}" + r"\," + r"\textsc{\lowercase{" +\ f"{roman_numerals[t1[-1]]}" + r"}}" + r"\ " + f"{t1[:8]}" new_label2 = f"{t2[8:-1]}" + r"\," + r"\textsc{\lowercase{" +\ f"{roman_numerals[t2[-1]]}" + r"}}" + r"\ " + f"{t2[:8]}" return {'ion1': new_label1, 'ion2': new_label2} roman_numerals = {'1': 'I', '2': 'II'} # Get labels of the 17 pairs on the shortlist. pairs_file = vcl.data_dir / '17_pairs.txt' pair_labels = np.loadtxt(pairs_file, dtype=str) # Get the 18 solar twins. stars = {star_name: Star(star_name + star_postfix, vcl.output_dir / star_name) for star_name in sp1_stars} # Set out lists of star for the top and bottom panels. block1_stars = ('Vesta', 'HD76151', 'HD78429', 'HD140538', 'HD146233', 'HD157347') block2_stars = ('HD20782', 'HD19467', 'HD45184', 'HD45289', 'HD171665',) block3_stars = ('HD138573', 'HD183658', 'HD220507', 'HD222582') block4_stars = ('HD1835', 'HD30495', 'HD78660', ) block1_width = 25 block1_ticks = 15 block2_width = 45 block2_ticks = 30 block3_width = 75 block3_ticks = 50 block4_width = 125 block4_ticks = 75 fig = plt.figure(figsize=(18, 10.5), tight_layout=True) gs = GridSpec(ncols=20, nrows=4, figure=fig, wspace=0, height_ratios=(len(block1_stars), len(block2_stars), len(block3_stars), len(block4_stars))) # Set the "velocity" title to be below the figure. fig.supxlabel('Diffrence between pair velocity separation and model (m/s)', fontsize=18) # Create a dict to hold all the axes. axes = {} # Create top panel (with pair labels) # Create tick locations to put the grid at. y_grid_locations = [y+0.5 for y in range(len(block1_stars))] for i, label in (enumerate(pair_labels)): ax = fig.add_subplot(gs[0, i]) ax.axvline(x=0, color='Black', linestyle='--', linewidth=1.7, zorder=1) # Set the limits of each axis. ax.set_ylim(top=-0.5, bottom=len(block1_stars)-0.5) ax.set_xlim(left=-block1_width, right=block1_width) # Add the grid. ax.yaxis.set_minor_locator(ticker.FixedLocator(y_grid_locations)) ax.yaxis.grid(which='minor', color='LightGray', linewidth=1.8, linestyle=':', zorder=0) # Remove all the ticks and labels on the y-axes (left-most will have # them specially added back in). ax.tick_params(axis='y', which='both', left=False, right=False, labelleft=False, labelright=False) ax.tick_params(axis='x', which='both', top=False, bottom=True, labeltop=False, labelbottom=True, labelsize=12) ax.xaxis.set_major_locator(ticker.FixedLocator( (-block1_ticks, 0, block1_ticks))) ax.xaxis.set_minor_locator(ticker.AutoMinorLocator()) # This sets the width of the outside edges of the subaxes. for axis in ['top', 'right', 'bottom', 'left']: ax.spines[axis].set_linewidth(2.1) ax.spines[axis].set_zorder(20) # Add the tick labels for each pair at the top of the plot. ax_twin = ax.twiny() ax_twin.set_xlim(ax.get_xlim()) ax_twin.tick_params(top=False, labelsize=16) t1, t2, order_num = label.split('_') if i > 5: ax_twin.xaxis.set_major_locator(ticker.FixedLocator((-12,))) ax_twin.set_xticklabels(('{ion1}\n{ion2}'.format( **format_pair_label(label)),), fontdict={'rotation': 90, 'horizontalalignment': 'left', 'verticalalignment': 'bottom'}) elif i in (0, 2, 4): ax_twin.xaxis.set_major_locator(ticker.FixedLocator((-11, 12))) ax_twin.set_xticklabels((f'Order: {str(order_num)}', '{ion1}\n{ion2}'.format( **format_pair_label(label)),), fontdict={'rotation': 90, 'horizontalalignment': 'left', 'verticalalignment': 'bottom'}) elif i in (1, 3, 5): ax_twin.xaxis.set_major_locator(ticker.FixedLocator((2,))) ax_twin.set_xticklabels((f'Order: {str(order_num)}',), fontdict={'rotation': 90, 'horizontalalignment': 'left', 'verticalalignment': 'bottom'}) # Add axis to axes dictionary. axes[(0, i)] = ax # Create second panel y_grid_locations = [y+0.5 for y in range(len(block2_stars))] for i, label in (enumerate(pair_labels)): ax = fig.add_subplot(gs[1, i]) ax.axvline(x=0, color='Black', linestyle='--', linewidth=1.7) ax.set_ylim(top=-0.5, bottom=len(block2_stars)-0.5) ax.set_xlim(left=-block2_width, right=block2_width) ax.yaxis.set_minor_locator(ticker.FixedLocator(y_grid_locations)) ax.yaxis.grid(which='minor', color='LightGray', linewidth=1.8, linestyle=':') ax.xaxis.set_minor_locator(ticker.AutoMinorLocator()) ax.xaxis.set_major_locator(ticker.FixedLocator( (-block2_ticks, 0, block2_ticks))) ax.tick_params(which='both', labelleft=False, labelbottom=True, left=False, right=False, top=False, bottom=True, labelsize=12) for axis in ['top', 'right', 'bottom', 'left']: ax.spines[axis].set_linewidth(2.1) ax.spines[axis].set_zorder(20) axes[(1, i)] = ax # Create third panel y_grid_locations = [y+0.5 for y in range(len(block3_stars))] for i, label in (enumerate(pair_labels)): ax = fig.add_subplot(gs[2, i]) ax.axvline(x=0, color='Black', linestyle='--', linewidth=1.7) ax.set_ylim(top=-0.5, bottom=len(block3_stars)-0.5) ax.set_xlim(left=-block3_width, right=block3_width) ax.yaxis.set_minor_locator(ticker.FixedLocator(y_grid_locations)) ax.yaxis.grid(which='minor', color='LightGray', linewidth=1.8, linestyle=':') ax.xaxis.set_minor_locator(ticker.AutoMinorLocator()) ax.xaxis.set_major_locator(ticker.FixedLocator( (-block3_ticks, 0, block3_ticks))) ax.tick_params(which='both', labelleft=False, labelbottom=True, left=False, right=False, top=False, bottom=True, labelsize=12) for axis in ['top', 'right', 'bottom', 'left']: ax.spines[axis].set_linewidth(2.1) ax.spines[axis].set_zorder(20) axes[(2, i)] = ax # Create fourth panel y_grid_locations = [y+0.5 for y in range(len(block4_stars))] for i, label in (enumerate(pair_labels)): ax = fig.add_subplot(gs[3, i]) ax.axvline(x=0, color='Black', linestyle='--', linewidth=1.7) ax.set_ylim(top=-0.5, bottom=len(block4_stars)-0.5) ax.set_xlim(left=-block4_width, right=block4_width) ax.yaxis.set_minor_locator(ticker.FixedLocator(y_grid_locations)) ax.yaxis.grid(which='minor', color='LightGray', linewidth=1.8, linestyle=':') ax.xaxis.set_minor_locator(ticker.AutoMinorLocator()) ax.xaxis.set_major_locator(ticker.FixedLocator( (-block4_ticks, 0, block4_ticks))) ax.tick_params(which='both', labelleft=False, labelbottom=True, left=False, right=False, top=False, bottom=True, labelsize=12) for axis in ['top', 'right', 'bottom', 'left']: ax.spines[axis].set_linewidth(2.1) ax.spines[axis].set_zorder(20) axes[(3, i)] = ax # Set the left-most axes to have y-labels for star names. for i in range(4): axes[(i, 0)].tick_params(labelleft=True) # Create the locations for minor ticks to put the star name labels at. for i, block in enumerate((block1_stars, block2_stars, block3_stars, block4_stars)): y_ticks = [y for y in range(len(block))] axes[(i, 0)].yaxis.set_major_locator(ticker.FixedLocator(y_ticks)) # Create the list of top stars...have to handle Vesta specially. top_labels = ['Sun'] top_labels.extend([' '.join((x[:2], x[2:])) for x in block1_stars[1:]]) axes[(0, 0)].set_yticklabels(top_labels, fontdict={'horizontalalignment': 'right', 'fontsize': 15}) for i, star_names in enumerate((block2_stars, block3_stars, block4_stars)): axes[(i+1, 0)].set_yticklabels([' '.join((x[:2], x[2:])) for x in star_names], fontdict={ 'horizontalalignment': 'right', 'fontsize': 15}) # Define colors for pre- and post- eras. pre_color = cmr.ember(0.7) post_color = cmr.cosmic(0.55) # How significant to report outliers. sigma_significance = 3 vprint(f'Looking for outliers beyond {sigma_significance} sigma') # Create lists to hold the significance values: pre_stat, pre_sys = [], [] post_stat, post_sys = [], [] for i, pair_label in enumerate(pair_labels): # Create lists to hold the values and errors: pre_values, post_values = [], [] pre_err_stat, post_err_stat = [], [] pre_err_sys, post_err_sys = [], [] # Figure out some numbers for locating things from star name. for star_name in sp1_stars: if star_name in block1_stars: row = 0 j = block1_stars.index(star_name) elif star_name in block2_stars: row = 1 j = block2_stars.index(star_name) elif star_name in block3_stars: row = 2 j = block3_stars.index(star_name) elif star_name in block4_stars: row = 3 j = block4_stars.index(star_name) else: raise RuntimeError(f"{star_name} not in any list!") star = stars[star_name] pair_index = star.p_index(pair_label) fiber_split_index = star.fiberSplitIndex # Get the pre-change values. if star.hasObsPre: values, mask = remove_nans(star.pairModelOffsetsArray[ :fiber_split_index, pair_index], return_mask=True) errors = star.pairModelErrorsArray[:fiber_split_index, pair_index][mask] plot = True try: value, error = weighted_mean_and_error(values, errors) except ZeroDivisionError: # This indicates no value for a particular 'cell', so just # plot something there to indicate that. axes[(row, i)].plot(0, j-0.15, color='Black', marker='x', markersize=7, zorder=10) plot = False if plot: # Compute error with sigma_** included. sigma_s2s = star.pairSysErrorsArray[0, pair_index] full_error = np.sqrt(error**2 + sigma_s2s**2) sig_stat = float((value / error).value) sig_sys = float((value / full_error).value) pre_stat.append(sig_stat) pre_sys.append(sig_sys) if abs(sig_sys) > sigma_significance: vprint(f'{star.name}: {pair_label}:' f' (Pre) {sig_sys:.2f}') pre_values.append(value) pre_err_stat.append(error) pre_err_sys.append(full_error) if (star.name == 'HD1835') and\ (pair_label == '4759.449Ti1_4760.600Ti1_32'): vprint('For HD 1835, 4759.449Ti1_4760.600Ti1_32:') vprint(f'Value: {value:.3f}, error: {full_error:.3f}') # First plot an errorbar with sigma_** included. axes[(row, i)].errorbar(value, j-0.15, xerr=full_error, ecolor=pre_color, marker='', capsize=3, capthick=1.5, elinewidth=1.4, zorder=11) # Then plot just the star's statistical error. axes[(row, i)].errorbar(value, j-0.15, xerr=error, markerfacecolor=pre_color, markeredgecolor='Black', ecolor=pre_color, markeredgewidth=2, marker='o', markersize=9, capsize=5, elinewidth=4, zorder=12) # Get the post-change values. if star.hasObsPost: values, mask = remove_nans(star.pairModelOffsetsArray[ fiber_split_index:, pair_index], return_mask=True) errors = star.pairModelErrorsArray[fiber_split_index:, pair_index][mask] plot = True try: value, error = weighted_mean_and_error(values, errors) except ZeroDivisionError: axes[(row, i)].plot(0, j+0.15, color='Black', marker='x', markersize=7) plot = False if plot: sigma_s2s = star.pairSysErrorsArray[1, pair_index] full_error = np.sqrt(error**2 + sigma_s2s**2) sig_stat = float((value / error).value) sig_sys = float((value / full_error).value) post_stat.append(sig_stat) post_sys.append(sig_sys) if abs(sig_sys) > sigma_significance: vprint(f'{star.name}: {pair_label}:' f' (Post) {sig_sys:.2f}') post_values.append(value) post_err_stat.append(error) post_err_sys.append(full_error) axes[(row, i)].errorbar(value, j+0.15, xerr=full_error, ecolor=post_color, marker='', capsize=4, capthick=1.5, elinewidth=1.5, zorder=13) axes[(row, i)].errorbar(value, j+0.15, xerr=error, markerfacecolor=post_color, markeredgecolor='Black', ecolor=post_color, markeredgewidth=2, marker='D', markersize=8.5, capsize=5, elinewidth=4, zorder=14) # Print some metrics for the pair. pre_val_arr = np.array(pre_values) pre_err_arr_stat = np.array(pre_err_stat) pre_err_arr_sys = np.array(pre_err_sys) post_val_arr = np.array(post_values) post_err_arr_stat = np.array(post_err_stat) post_err_arr_sys = np.array(post_err_sys) wm_value_pre, error_pre = weighted_mean_and_error( pre_val_arr, pre_err_arr_sys) wm_value_post, error_post = weighted_mean_and_error( post_val_arr, post_err_arr_sys) chi_2_pre_stat = fit.calc_chi_squared_nu( pre_val_arr, pre_err_arr_stat, 1) chi_2_pre_sys = fit.calc_chi_squared_nu( pre_val_arr, pre_err_arr_sys, 1) chi_2_post_stat = fit.calc_chi_squared_nu( post_val_arr, post_err_arr_stat, 1) chi_2_post_sys = fit.calc_chi_squared_nu( post_val_arr, post_err_arr_sys, 1) vprint(f'For {pair_label}:') vprint(' Pre : Weighted mean:' f' {wm_value_pre:.2f} ± {error_pre:.2f} m/s') vprint(f' Pre : chi^2: {chi_2_pre_stat:.2f}, {chi_2_pre_sys:.2f}') vprint(f' Pre : mean error: {np.mean(pre_err_arr_sys):.2f} m/s') vprint(' Post: Weighted mean:' f' {wm_value_post:.2f} ± {error_post:.2f} m/s') vprint(f' Post: chi^2: {chi_2_post_stat:.2f}, {chi_2_post_sys:.2f}') vprint(f' Post: mean error: {np.mean(post_err_arr_sys):.2f} m/s') # Create the histogram plots for the pair. fig_hist = plt.figure(figsize=(5.5, 5.5), tight_layout=True) bins = np.linspace(-3, 3, num=25) ax_hist = fig_hist.add_subplot(1, 1, 1) ax_hist.set_xlabel(r'Significance ($\sigma$)') ax_hist.set_ylabel('N') ax_hist.xaxis.set_major_locator(ticker.FixedLocator((-3, -2, -1, 0, 1, 2, 3))) ax_hist.xaxis.set_minor_locator(ticker.FixedLocator(bins)) ax_hist.yaxis.set_minor_locator(ticker.AutoMinorLocator()) # Add the pre and post distributions together here. pre_stat.extend(post_stat) pre_sys.extend(post_sys) one_sigma, two_sigma = 0, 0 for x in pre_sys: y = abs(x) if y < 1: one_sigma += 1 two_sigma += 1 elif y < 2: two_sigma += 1 vprint(f'{one_sigma/len(pre_sys):.1%} of values within 1 sigma.') vprint(f'{two_sigma/len(pre_sys):.1%} of values within 2 sigma.') ax_hist.hist(pre_stat, color='Gray', histtype='step', bins=bins, linewidth=1.8, label='Stat. only') ax_hist.hist(pre_sys, color='Black', histtype='step', bins=bins, linewidth=2.6, label='Stat. + Sys.') ax_hist.legend(loc='upper right', fontsize=16, shadow=True) outfile = plots_dir / f'Pair_offsets_17_pairs{star_postfix}.pdf' fig.savefig(str(outfile), bbox_inches='tight', pad_inches=0.01) histfile = plots_dir / f'Pair_offsets_histograms{star_postfix}.pdf' fig_hist.savefig(str(histfile), bbox_inches='tight', pad_inches=0.01) # Create an excerpt of a single column. fig_ex = plt.figure(figsize=(5, 6), tight_layout=True) ax_ex = fig_ex.add_subplot(1, 1, 1) y_grid_locations = [y+0.5 for y in range(len(sp1_stars))] ax_ex.axvline(x=0, color='Black', linestyle='--', linewidth=1.7) ax_ex.set_ylim(top=-0.5, bottom=len(sp1_stars)-0.5) ax_ex.set_xlim(left=-40, right=40) ax_ex.yaxis.set_minor_locator(ticker.FixedLocator(y_grid_locations)) ax_ex.yaxis.grid(which='minor', color='LightGray', linewidth=1.8, linestyle=':') ax_ex.xaxis.set_minor_locator(ticker.AutoMinorLocator()) # ax_ex.xaxis.set_major_locator(ticker.FixedLocator( # [-50, -25, 0, 25, 50])) ax_ex.tick_params(which='both', labelleft=True, labelbottom=True, left=False, right=False, top=False, bottom=True, labelsize=12) for axis in ['top', 'right', 'bottom', 'left']: ax_ex.spines[axis].set_linewidth(2.1) ax_ex.spines[axis].set_zorder(20) ax_ex.set_xlabel('Pair model offset (m/s)', size=15) # Add labels to axis. # Create the locations for major ticks to put the star name labels at. y_ticks = [y for y in range(len(sp1_stars))] ax_ex.yaxis.set_major_locator(ticker.FixedLocator(y_ticks)) # Create the list of top stars...have to handle Vesta specially. ex_labels = ['Sun'] ex_labels.extend([' '.join((x[:2], x[2:])) for x in sp1_stars[1:]]) ax_ex.set_yticklabels(ex_labels, fontdict={'horizontalalignment': 'right', 'fontsize': 15}) # Set the pair label to use. pair_label = pair_labels[10] # 6138--6139 pair_label = pair_labels[16] tqdm.write(f'Using pair {pair_label} for excerpt') for j, star_name in enumerate(sp1_stars): star = stars[star_name] pair_index = star.p_index(pair_label) fiber_split_index = star.fiberSplitIndex # Get the pre-change values. if star.hasObsPre: values, mask = remove_nans(star.pairModelOffsetsArray[ :fiber_split_index, pair_index], return_mask=True) errors = star.pairModelErrorsArray[:fiber_split_index, pair_index][mask] try: value, error = weighted_mean_and_error(values, errors) except ZeroDivisionError: # This indicates no value for a particular 'cell', so just # plot something there to indicate that. ax_ex.plot(0, j, color='Black', marker='x', markersize=7, zorder=10) continue # Compute error with sigma_** included. sigma_s2s = star.pairSysErrorsArray[0, pair_index] full_error = np.sqrt(error**2 + sigma_s2s**2) significance = abs(value / full_error).value if significance > sigma_significance: vprint(f'{star.name}: {pair_label}:' f' (Pre) {significance:.2f}') # First plot an errorbar with sigma_** included. ax_ex.errorbar(value, j-0.15, xerr=full_error, ecolor=pre_color, marker='', capsize=3, capthick=1.5, elinewidth=1.4, zorder=11) # Then plot just the star's statistical error. ax_ex.errorbar(value, j-0.15, xerr=error, markerfacecolor=pre_color, markeredgecolor='Black', ecolor=pre_color, markeredgewidth=2, # controls capthick marker='o', markersize=9, capsize=5, elinewidth=4, zorder=12) # Get the post-change values. if star.hasObsPost: values, mask = remove_nans(star.pairModelOffsetsArray[ fiber_split_index:, pair_index], return_mask=True) errors = star.pairModelErrorsArray[fiber_split_index:, pair_index][mask] try: value, error = weighted_mean_and_error(values, errors) except ZeroDivisionError: ax_ex.plot(0, j, color='Black', marker='x', markersize=7) continue sigma_s2s = star.pairSysErrorsArray[1, pair_index] full_error = np.sqrt(error**2 + sigma_s2s**2) significance = abs(value / full_error).value if significance > sigma_significance: vprint(f'{star.name}: {pair_label}:' f' (Post) {significance:.2f}') ax_ex.errorbar(value, j+0.15, xerr=full_error, ecolor=post_color, marker='', capsize=4, capthick=1.5, elinewidth=1.5, zorder=13) ax_ex.errorbar(value, j+0.15, xerr=error, markerfacecolor=post_color, markeredgecolor='Black', ecolor=post_color, markeredgewidth=2, marker='D', markersize=8.5, capsize=5, elinewidth=4, zorder=14) outfile = plots_dir /\ f'Pair_offsets_17_pairs_excerpt_{pair_label.replace(".", "_")}.pdf' fig_ex.savefig(str(outfile), bbox_inches='tight', pad_inches=0.01)
c4bd9120891d435dd631394b7b67255bd75fc8d2
11,816
def equalize(pil_img: Image.Image, level: float): """Equalize an image. .. seealso:: :func:`PIL.ImageOps.equalize`. Args: pil_img (Image.Image): The image. level (float): The intensity. """ del level # unused return ImageOps.equalize(pil_img)
f0771453063b803824571056924397e1f7bb77a3
11,817
def lightfm_trainer( train: np.ndarray, loss: str, n_components: int, lam: float ) -> None: """Train lightfm models.""" # detect and init the TPU tpu = tf.distribute.cluster_resolver.TPUClusterResolver.connect() # instantiate a distribution strategy tpu_strategy = tf.distribute.experimental.TPUStrategy(tpu) # instantiating the model in the strategy scope creates the model on the TPU with tpu_strategy.scope(): # train model normally model = LightFM( loss=loss, user_alpha=lam, item_alpha=lam, no_components=n_components, learning_rate=0.001, random_state=12345, ) dataset = Dataset() dataset.fit(train[:, 0], train[:, 1]) (interactions, weights) = dataset.build_interactions( ((x[0], x[1], 1) for x in train[train[:, 2] == 1]) ) model.fit(interactions, epochs=100) return model
6776b2fbcb3039cc1efb95cec0b7562b959df0fd
11,818
def get_wspd_ts(path, storm, res, shpmask): """ Extracts the U and V component and returns the wind speed timeseries of storm_dict Arguments: path (str): Path containing data to load storm (str): Name of storm res (str): Resolution of data Returns: Pandas dataframe with time index """ ufile = f'{path}/ua.T1Hpoint.UMRA2T.*.{storm}.{res}km.nc' vfile = f'{path}/va.T1Hpoint.UMRA2T.*.{storm}.{res}km.nc' ucube = iris.load_cube(ufile, 'x_wind') vcube = iris.load_cube(vfile, 'y_wind') ucube = ucube.intersection(longitude=(75, 100), latitude=(10, 25)) vcube = vcube.intersection(longitude=(75, 100), latitude=(10, 25)) ws_ifunc = iris.analysis.maths.IFunc(calc_wspd, ws_units_func) ws_cube = ws_ifunc(ucube, vcube, new_name='wind speed') try: mwspd = shpmask.mask_cube(ws_cube) except: print("Can't mask with shape! Masked over lon-lat box instead...") mwspd = ws_cube cubedata = [] timedata = [] for subcube in mwspd.slices_over('forecast_reference_time'): # extracting the time tcoord = subcube.coord('time') units = tcoord.units tdata = [units.num2date(point) for point in tcoord.points] cube = subcube.collapsed(['latitude', 'longitude'], iris.analysis.MAX) cubedata.append(cube.data.filled()) timedata.append(tdata) # Convert to Pandas Dataframe with unified time index s = list() [s.append(pd.Series(data=cubedata[i], index=timedata[i])) for i in range(np.shape(timedata)[0])] return pd.DataFrame(s).T
414d39105a14a5ac2d335a3146a4e6d462f9760a
11,819
def _read_output_file(path): """Read Stan csv file to ndarray.""" comments = [] data = [] columns = None with open(path, "rb") as f_obj: # read header for line in f_obj: if line.startswith(b"#"): comments.append(line.strip().decode("utf-8")) continue columns = {key: idx for idx, key in enumerate(line.strip().decode("utf-8").split(","))} break # read data for line in f_obj: line = line.strip() if line.startswith(b"#"): comments.append(line.decode("utf-8")) continue if line: data.append(np.array(line.split(b","), dtype=np.float64)) return columns, np.array(data, dtype=np.float64), comments
62b312db851386900cae1643d3eb75896f45cde1
11,820
def _transform_data(raw_df, cols_config): """ Applies required transformations to the raw dataframe :returns : Trasformed dataframe ready to be exported/loaded """ # Perform column and dtype checks if check_columns(raw_df, cols_config): df = raw_df else: logger.warning("Inconsistencies found during column check") # Apply transformations df = convert_dates(df) df = get_duration(df) df = remove_negatives(df) df = drop_columns(df) return df
3874ce8bc38d0b75f037538919b1c649d8a6b8b9
11,821
import os def dcmToSimpleITK(dcmDirectory): """Return a simple ITK image from a pile of dcm files. The returned sITK image has been rescaled based on the value of the rescale slope on the dicom tag. Array-like data of the 3D image can be obtained with the GetArrayFromImage() method""" list_dcmFiles = [] for directory, subDirectory, list_dcmFileNames in os.walk(dcmDirectory): for dcmFile in list_dcmFileNames: if '.dcm' in dcmFile.lower(): list_dcmFiles.append(os.path.join(directory, dcmFile)) dcmImage = [pydicom.dcmread(dcmSliceFile) for dcmSliceFile in list_dcmFiles] voxel_ndarray, ijk_to_xyz = dicom_numpy.combine_slices(dcmImage) sITK_image = sitk.GetImageFromArray(voxel_ndarray) return (sITK_image)
14351d07d3b4be8adcf16e51fbefc4ae736848c8
11,822
import math def mean_and_std(values): """Compute mean standard deviation""" size = len(values) mean = sum(values)/size s = 0.0 for v in values: s += (v - mean)**2 std = math.sqrt((1.0/(size-1)) * s) return mean, std
15b11e89317cc86b68262fa959b9c65a2f87bdcc
11,823
from typing import Union from typing import List def blacken( color: Color, amount: FloatOrFloatIterable ) -> Union[Color, List[Color]]: """ Return a color or colors amount fraction or fractions of the way from `color` to `black`. :param color: The existing color. :param amount: The proportion to blacken by. """ return cross_fade(from_color=color, to_color='black', amount=amount)
26c74556b8d73692ec4afbb763221c508c6a941b
11,824
def llf_gradient_sigma_neq_gamma(history, sum_less_equal=True): """ Calculate the gradient of the log-likelihood function symbolically. Parameters ---------- sum_less_equal : bool, default: True This arg is passed to :meth:`self.llf_sigma_eq_gamma`. Returns ------- gradient : sympy.Array An array containing four entries. The first (second) [third] {fourth} entry is the derivative of the log-likelihood function w.r.t. beta (sigma) [gamma] {N} parameter. """ beta, sigma, gamma, n = symbols("beta sigma gamma n") return derive_by_array( llf_sigma_neq_gamma(history, sum_less_equal), [beta, sigma, gamma, n] )
b3efce2413b5f88e4c7b76117f4f668a5f386b30
11,825
def selection_support_df(df, combinations, min_support): """ selection combinations with support Parameters ---------- df : pandas.DataFrame data to be selected. for example : = | banana | mango | apple | | 1 | 1 | 1 | | 1 | 0 | 0 | | 1 | 1 | 0 | combinations : list combinations of df columns. for example : = [("apple", "apple"), ("banana", "apple"), ("mango", "apple") ("apple", "banana", "apple"), ("apple", "mango", "apple"), ("banana", "mango", "apple"), ("apple",), ...] min_support : float minimal support to be select combinations for example : = 0.5 Returns ------- combinations and supports. for example : = [("banana", "mango", "apple"), ...] = [0.1, ...] """ selected_supports = [] selected_combinations = [] columns = df.columns n_rows = df.shape[0] for combination in combinations: position = position_itemset(combination, columns) position_columns = np.array(columns[position]) length_combination = len(combination) combination_array = np.array(df.loc[:, position_columns]) check_array = np.where(length_combination == combination_array.sum(axis=1))[0] length_check_array = len(check_array) support = cal_support(length_check_array, n_rows) if support >= min_support: selected_combinations.append(combination) selected_supports.append(support) return selected_combinations, selected_supports
4decb66dfe913a62e0b3b67d9a61a6941ec6ff76
11,826
from trie import TrieTree def empty_trie_tree(): """Empty trie tree fixture.""" return TrieTree()
d68ae38a810e02015b3967eb44bb3dda8445afd7
11,827
def bind_context_to_node(context, node): """Give a context a boundnode to retrieve the correct function name or attribute value with from further inference. Do not use an existing context since the boundnode could then be incorrectly propagated higher up in the call stack. :param context: Context to use :type context: Optional(context) :param node: Node to do name lookups from :type node NodeNG: :returns: A new context :rtype: InferenceContext """ context = copy_context(context) context.boundnode = node return context
92ce7a9d155e621e54ad90f5aefb49bda4ea60df
11,828
def get_weight_matrix(file_handle): """ Read each line in file_handle and return the weight matrix as a dict, in which each key is the original node name, and each value is a nested dict, whose keys are gene systematic names, and values are weights. """ weight_matrix = dict() for line_num, line in enumerate(file_handle, start=1): tokens = line.strip().split('\t') # The first line includes node names only if line_num == 1: num_columns = len(tokens) nodes = tokens[1:] for node_name in nodes: weight_matrix[node_name] = dict() else: # read data lines # Validate the number of columns in each line if num_columns != len(tokens): raise Exception(f"Incorrect number of columns on line {line_num}") gene_name = tokens[0] weights = [float(x) for x in tokens[1:]] for idx, w in enumerate(weights): node_name = nodes[idx] weight_matrix[node_name][gene_name] = w return weight_matrix
08773c5ff852814855e4a042bb79acc82d09b067
11,829
def get_uptime(): """ Get uptime """ try: with open('/proc/uptime', 'r') as f: uptime_seconds = float(f.readline().split()[0]) uptime_time = str(timedelta(seconds=uptime_seconds)) data = uptime_time.split('.', 1)[0] except Exception as err: data = str(err) return data
fc783a24b7239c43b69c44ea30b62465a775761d
11,830
import json def measure_list_for_upcoming_elections_retrieve_api_view(request): # measureListForUpcomingElectionsRetrieve """ Ask for all measures for the elections in google_civic_election_id_list :param request: :return: """ status = "" google_civic_election_id_list = request.GET.getlist('google_civic_election_id_list[]') state_code = request.GET.get('state_code', '') # We will need all candidates for all upcoming elections so we can search the HTML of # the possible voter guide for these names measure_list_light = [] results = retrieve_measure_list_for_all_upcoming_elections(google_civic_election_id_list, limit_to_this_state_code=state_code) if results['measure_list_found']: measure_list_light = results['measure_list_light'] expand_results = add_measure_name_alternatives_to_measure_list_light(measure_list_light) if expand_results['success']: measure_list_light = expand_results['measure_list_light'] google_civic_election_id_list = results['google_civic_election_id_list'] status += results['status'] success = results['success'] json_data = { 'status': status, 'success': success, 'google_civic_election_id_list': google_civic_election_id_list, 'measure_list': measure_list_light, } return HttpResponse(json.dumps(json_data), content_type='application/json')
449bcde309c6c224521fcf5f0acc6de427d30f55
11,831
import argparse def check_template_path(path): """ Argument checker, check if template exists and get the content """ try: with open(path) as template: tmp = template.read() return tmp except: raise argparse.ArgumentTypeError("Invalid template path!")
5af832dd38490a79c6fd014f0db2b839d866e838
11,832
from fedelemflowlist.globals import flow_list_fields def get_required_flowlist_fields(): """ Gets required field names for Flow List. :return:list of required fields """ required_fields = [] for k, v in flow_list_fields.items(): if v[1]['required']: required_fields.append(k) return required_fields
c2581cde45e9aad0c09620f98557a777a5d89bdb
11,833
def sparse_table_function(*, index, data) -> callable: """ The very simplest Python-ish "sparse matrix", and plenty fast on modern hardware, for the size of tables this module will probably ever see, is an ordinary Python dictionary from <row,column> tuples to significant table entries. There are better ways if you program closer to the bare metal, but this serves the purpose. This routine unpacks "compressed-sparse-row"-style data into an equivalent Python dictionary, then returns a means to query said dictionary according to the expected 2-dimensional interface. """ hashmap = {} for row_id, (Cs, Ds) in enumerate(zip(index, data)): if isinstance(Ds, int): # All non-blank cells this row have the same value: for column_id in Cs: hashmap[row_id, column_id] = Ds else: for column_id, d in zip(Cs, Ds) if Cs else enumerate(Ds): hashmap[row_id, column_id] = d return lambda R, C: hashmap.get((R, C))
a1c3f11f5fd9c2ba4d048a69271db48bc61b26df
11,834
import asyncio async def _get_db_connection() -> asyncpg.Connection: """ Initialise database connection. On failure, retry multiple times. When the DB starts in parallel with the app (with Compose), it may not yet be ready to take connections. """ log.info("Creating DB connection") n_attempts = 3 for attempt in range(1, n_attempts + 1): try: return await asyncpg.connect(connection_string) except ConnectionError: log.info(f"Failed to connect to DB (attempt: {attempt}/{n_attempts})") if attempt >= n_attempts: raise await asyncio.sleep(5)
9f2e83b4b98f0d292b352682bf380ff4921e5fba
11,835
def get_view_content(view): """ Returns view content as string. """ return utils.execute_in_sublime_main_thread(lambda: view.substr(sublime.Region(0, view.size())))
4dd8d4c9dfa891b31251f32ad6813549c0c453b0
11,836
def signed_byte8(x: IntVar) -> Int8: """Implementation for `SBYTE8`.""" return signed_byte_n(x, 8)
d4e16c80336a0259b2acb4faf1ff329d90aa21b2
11,837
import numpy import itertools def join(zma1, zma2, join_key_mat, join_name_mat, join_val_dct): """ join two z-matrices together """ syms1 = symbols(zma1) syms2 = symbols(zma2) natms1 = count(zma1) natms2 = count(zma2) key_mat1 = numpy.array(key_matrix(zma1)) key_mat2 = numpy.array(key_matrix(zma2, shift=natms1)) # note the shift name_mat1 = numpy.array(name_matrix(zma1)) name_mat2 = numpy.array(name_matrix(zma2)) val_dct1 = values(zma1) val_dct2 = values(zma2) join_natms = min(natms2, 3) assert len(join_key_mat) == len(join_name_mat) == join_natms join_key_mat = numpy.array(join_key_mat, dtype=numpy.object_) join_name_mat = numpy.array(join_name_mat, dtype=numpy.object_) # make sure we aren't overwriting values -- the constructor should take # care of the rest of the necessary validation assert numpy.all(numpy.equal(join_key_mat, None) == numpy.equal(join_key_mat, None)) join_idxs = numpy.not_equal(join_key_mat, None) assert numpy.all(numpy.equal(key_mat2[:3][join_idxs], None)) assert numpy.all(numpy.equal(name_mat2[:3][join_idxs], None)) key_mat2[:3][join_idxs] = join_key_mat[join_idxs] name_mat2[:3][join_idxs] = join_name_mat[join_idxs] syms = tuple(itertools.chain(syms1, syms2)) key_mat = tuple(itertools.chain(key_mat1, key_mat2)) name_mat = tuple(itertools.chain(name_mat1, name_mat2)) # Could be made to allow for joins with common zma1 and zma2 names (for # symmetry constraints). Not sure if we really want that. val_dct = val_dct1.copy() assert not set(val_dct.keys()) & set(val_dct2.keys()) assert not set(val_dct.keys()) & set(join_val_dct.keys()) val_dct.update(val_dct2) val_dct.update(join_val_dct) return automol.create.zmatrix.from_data(syms, key_mat, name_mat, val_dct)
de55377d436ce50d8c60c97992e940e53a7c9ecc
11,838
def multi_to_weighted(G: nx.MultiDiGraph): """ Converts a multidigraph into a weighted digraph. """ nG = nx.DiGraph(G) # nG.add_nodes_from(G.nodes) nG.name = G.name + "_weighted_nomulti" edge_weights = {(u, v): 0 for u, v, k in G.edges} for u, v, key in G.edges: edge_weights[(u, v)] += 1 # nG.add_edges_from(edge_weights.keys()) nx.set_edge_attributes(nG, edge_weights, "weight") return nG
0dd14a02c923c8c238c82399f51701639dc82756
11,839
def RetentionInDaysMatch(days): """Test whether the string matches retention in days pattern. Args: days: string to match for retention specified in days format. Returns: Returns a match object if the string matches the retention in days pattern. The match object will contain a 'number' group for the duration in number of days. Otherwise, None is returned. """ return _RETENTION_IN_DAYS().match(days)
0b2cded5d01bcb294df1fab956dbe54c9c5e03ae
11,840
from typing import Tuple import re def _extract_result_details(pipx_output: str) -> Tuple[str, str, str]: """ Extracts name and version from pipx's stdout """ match = re.search(r'installed package(.*),(.*)\n.*\n.*?-(.*)', pipx_output) if match: package, python_version, plugin_name = map(str.strip, match.groups()) return plugin_name.replace('.exe', ''), package, python_version raise PluginManagementFatalException('Failed to find package information install log!')
ae7a588bbb60b47aa889a4dcb7421a55b55b8e2f
11,841
def get_pwr_SXT(sxt_los, plasma, emiss, num_pts=100, labels=labels_full): """ """ pwr_int = {} for ll in labels: # Get the appropriate database label filt = ll.split()[1] pix_los = sxt_los[ll] # Get the spatial points along the line of sight num_pixels = len(pix_los) ell_pts = np.linspace(-0.5, 0.5, num=num_pts) xs = np.zeros([num_pixels, num_pts]) ys = np.zeros([num_pixels, num_pts]) for index,los in enumerate(pix_los): #xs[index,:], ys[index,:] = list(zip(*[los.get_xy(ell) for ell in ell_pts])) xs[index,:], ys[index,:] = los.get_xy(ell_pts) # Evaluate the profiles Te_xs = np.maximum(plasma.Te(xs, ys), 10.0) ne_xs = np.maximum(plasma.ne(xs, ys), 1e15) n0_xs = plasma.n0(xs, ys) pts = list( zip( Te_xs.ravel(), ne_xs.ravel()/1e19, n0_xs.ravel()/1e14 ) ) # Evaluate deuterium using quasi-netrality nZ_xs = {ion:plasma.nZ[ion](xs,ys) for ion in plasma.impurities} nZ_xs['D'] = plasma.nD(xs, ys) # Calculate the emission array emiss_xs = np.zeros(xs.shape) emiss_xs = ne_xs*nZ_xs['D']*np.reshape(emiss['D'][filt](pts), xs.shape) for ion in plasma.impurities: emiss_xs += ne_xs*nZ_xs[ion]*np.reshape(emiss[ion][filt](pts), xs.shape) # Integrate with the trapezoidal rule dl = np.ones([num_pts,1])*(ell_pts[1] - ell_pts[0]) dl[0] *= 0.5 dl[-1] *= 0.5 pwr_int[ll] = np.squeeze(np.dot(emiss_xs, dl)) return pwr_int
ba1a1831f5fd2ee18ce95214b696d37c2e33b456
11,842
import requests import json def get_mstp_port(auth): """ Function to get list of mstp port status :param auth: AOSSAuth class object returned by pyarubaoss.auth :return list of mstp port status :rtype dict """ url_mstp_port = "http://" + auth.ipaddr + "/rest/"+auth.version+"/mstp/port" try: r = requests.get(url_mstp_port, headers=auth.cookie) mstp_port = json.loads(r.text)['mstp_port_element'] return mstp_port except requests.exceptions.RequestException as error: return "Error:\n" + str(error) + " get_mstp_port: An Error has occured"
aa74f2d9c5b04f7744009c2862b0f1bcff57a6dc
11,843
from typing import OrderedDict def _process_tree(tree, nstag): """Process XML tree for a record and return a dictionary for our standard """ rec = OrderedDict() for key, tag_, getall, trans1_, transall_ in [ ('author', 'creatorName', True, None, None), ('name', "title[@titleType='AlternativeTitle']", False, None, None), ('title', "title", False, _unwrap, None), # actually it seems we have no title but "ShortDescription"!!! TODO ('doap:shortdesc', "title", False, _unwrap, None), # duplicate for now ('description', 'description', True, _unwrap, _merge), ('doap:Version', 'version', False, None, None), ('sameAs', "identifier[@identifierType='DOI']", False, None, None), # conflicts with our notion for having a "type" to be internal and to demarkate a Dataset # here might include the field e.g. Dataset/Neurophysiology, so skipping for now # ('type', "resourceType[@resourceTypeGeneral='Dataset']", False, None, None), ('citation', "relatedIdentifier", True, None, None), ('keywords', "subject", True, None, None), ('formats', "format", True, None, None), ]: trans1 = trans1_ or (lambda x: x) text = lambda x: trans1(x.text.strip()) tag = nstag(tag_) try: if getall: value = list(map(text, tree.findall(tag))) else: value = text(tree.find(tag)) except AttributeError: continue if not value or value == ['']: continue if transall_: value = transall_(value) rec[key] = value return rec
26b2ddaffa5184b273d679a4b2d65e22f678894f
11,844
def prepare_all_predictions( data, uid_map, iid_map, interactions, model, num_threads, user_features=None, item_features=None, ): """Function to prepare all predictions for evaluation. Args: data (pandas df): dataframe of all users, items and ratings as loaded uid_map (dict): Keys to map internal user indices to external ids. iid_map (dict): Keys to map internal item indices to external ids. interactions (np.float32 coo_matrix): user-item interaction model (LightFM instance): fitted LightFM model num_threads (int): number of parallel computation threads user_features (np.float32 csr_matrix): User weights over features item_features (np.float32 csr_matrix): Item weights over features Returns: pandas.DataFrame: all predictions """ users, items, preds = [], [], [] # noqa: F841 item = list(data.itemID.unique()) for user in data.userID.unique(): user = [user] * len(item) users.extend(user) items.extend(item) all_predictions = pd.DataFrame(data={"userID": users, "itemID": items}) all_predictions["uid"] = all_predictions.userID.map(uid_map) all_predictions["iid"] = all_predictions.itemID.map(iid_map) dok_weights = interactions.todok() all_predictions["rating"] = all_predictions.apply( lambda x: dok_weights[x.uid, x.iid], axis=1 ) all_predictions = all_predictions[all_predictions.rating < 1].reset_index(drop=True) all_predictions = all_predictions.drop("rating", axis=1) all_predictions["prediction"] = all_predictions.apply( lambda x: model.predict( user_ids=x["uid"], item_ids=[x["iid"]], user_features=user_features, item_features=item_features, num_threads=num_threads, )[0], axis=1, ) return all_predictions[["userID", "itemID", "prediction"]]
ab72cadcae96e430d1c1162008458f420d74c6d1
11,845
from typing import Union from typing import Callable def touch(v: Union[Callable, str], default=None): """ Touch a function or an expression `v`, see if it causes exception. If not, output the result, otherwise, output `default`. Note: Use `default = pycamia.functions.identity_function` (or write one yourself) to return the exception object. Example: ---------- >>> a = 0 >>> touch(lambda: 1/a, default = 'fail') fail """ if not callable(default): default = const_function(default) if isinstance(v, str): local_vars = get_environ_locals() local_vars.update(locals()) locals().update(local_vars) try: return eval(v) except Exception as e: return default(e) else: try: return v() except Exception as e: return default(e)
90b5395eb68daadb06b1bb29a52a2ca11f34353d
11,846
def q_mult(q1, q2): """Quaternion multiplication""" w1, x1, y1, z1 = q1 w2, x2, y2, z2 = q2 w = w1 * w2 - x1 * x2 - y1 * y2 - z1 * z2 x = w1 * x2 + x1 * w2 + y1 * z2 - z1 * y2 y = w1 * y2 + y1 * w2 + z1 * x2 - x1 * z2 z = w1 * z2 + z1 * w2 + x1 * y2 - y1 * x2 return w, x, y, z
f2623836744b9143c5eeafe1b0d71e3cfdb5d8d4
11,847
import math def round_repeats(repeats, global_params): """Calculate module's repeat number of a block based on depth multiplier. Use depth_coefficient of global_params. Args: repeats (int): num_repeat to be calculated. global_params (namedtuple): Global params of the model. Returns: new repeat: New repeat number after calculating. """ multiplier = global_params.depth_coefficient if not multiplier: return repeats # follow the formula transferred from official TensorFlow implementation return int(math.ceil(multiplier * repeats))
9a26e19663c7ecf4b6f746b1900a9afe46311770
11,848
def estimate_label_width(labels): """ Given a list of labels, estimate the width in pixels and return in a format accepted by CSS. Necessarily an approximation, since the font is unknown and is usually proportionally spaced. """ max_length = max([len(l) for l in labels]) return "{0}px".format(max(60,int(max_length*7.5)))
1e22ad939973373a669841dd5cc318d6927249ca
11,849
from datetime import datetime def update_user(user_id): """ :Route: PUT /<user_id>?active=false&admin=true&password=str&first_name=Katrina&last_name=Wijaya&[email protected] :Description: Updates user with id `user_id`. Updates any optional fields that are set as query parameters. :param user_id: The int ID of a specific user :type user_id: int :param active: An optional query component/parameter to update whether or not a user is active. If true, user has an activated account that they can log in to, otherwise account will be rejected/suspended from use :type active: boolean or None :param admin: An optional query component/parameter to update whether or not a user has admin permissions. All admins have same permissions so maybe should create a super admin. :type admin: boolean or None :param password: An optional query component/parameter to update the password for a user. TODO: actually supporting passwords/salting/hashing. :type password: str or None :param first_name: An optional query component/parameter to update the user's first name. Does not modify full name stored in database. :type first_name: str or None :param last_name: An optional query component/parameter to update the user's last name. Does not modify full name stored in database. :type last_name: str or None :param email: An optional query component/parameter to update the user's email. TODO: email verification. :type email: str or None :return: JSON of updated user or an error message :Requires: Admin permissions """ active = request.args.get('active') admin = request.args.get('admin') password = request.args.get('password') first_name = request.args.get('first_name') last_name = request.args.get('last_name') email = request.args.get('email') # Check if user already exists in collection user = user_utils.get_user(user_id) if user: # Update access/update/login time (in UTC I think) user['account']['time_updated'] = datetime.now().strftime('%Y-%m-%d %H:%M:%S') # Update all fields as passed in via optional parameters if active and active.lower() == "true": user['account']['is_active'] = True if active and active.lower() == "false": user['account']['is_active'] = False if admin and admin.lower() == "true": user['account']['is_admin'] = True if admin and admin.lower() == "false": user['account']['is_admin'] = False if password: user['account']['password_hash'] = password # TODO: implement hashing/salting/do this better if first_name: user['personal_info']['first_name'] = first_name if last_name: user['personal_info']['last_name'] = last_name if email: user['personal_info']['email'] = email # Update database entry users_collection.replace_one({ "account.id": str(user_id) }, user.copy()) return jsonify(user_utils.get_user(user_id)) return "No such user with id " + str(user_id) + " found!"
95648e47e68476487615bad5b460177103360ab3
11,850
import glob import os def install_package(pkg, directory, python_version, pip_args): """Downloads wheel for a package. Assumes python binary provided has pip and wheel package installed. :param pkg: package name :param directory: destination directory to download the wheel file in :param python: python binary path used to run pip command :param pip_args: extra pip args sent to pip :returns: path to the wheel file :rtype: str """ pip_args = [ "--isolated", "--disable-pip-version-check", "--target", directory, "--no-deps", "--ignore-requires-python", "--python-version", python_version, pkg, ] + pip_args cmd = InstallCommand() cmd.run(*cmd.parse_args(pip_args)) # need dist-info directory for pkg_resources to be able to find the packages dist_info = glob.glob(os.path.join(directory, "*.dist-info"))[0] # fix namespace packages by adding proper __init__.py files namespace_packages = os.path.join(dist_info, "namespace_packages.txt") if os.path.exists(namespace_packages): with open(namespace_packages) as nspkg: for line in nspkg.readlines(): namespace = line.strip().replace(".", os.sep) if namespace: nspkg_init = os.path.join(directory, namespace, "__init__.py") with open(nspkg_init, "w") as nspkg: nspkg.write( "__path__ = __import__('pkgutil').extend_path(__path__, __name__)" ) return pkginfo.Wheel(dist_info)
3919cb6a5998b5aff1b90a41802cc289d4076d7b
11,851
import six def ssh_encrypt_text(ssh_public_key, text): """Encrypt text with an ssh public key. If text is a Unicode string, encode it to UTF-8. """ if isinstance(text, six.text_type): text = text.encode('utf-8') try: pub_bytes = ssh_public_key.encode('utf-8') pub_key = serialization.load_ssh_public_key( pub_bytes, backends.default_backend()) return pub_key.encrypt(text, padding.PKCS1v15()) except Exception as exc: raise exception.EncryptionFailure(reason=six.text_type(exc))
2a5bfc62e08475dcd7f33ba25cf3fa76c43988a2
11,852
from typing import List import os def add_missing_init_files_from_path( paths: List[str], folders_to_ignore: List[str], source_extensions: List[str], folder_trees_to_ignore: List[str], recursive: bool, ) -> bool: """ Add missing __init__.py files to the specified root directories and subdirectories that contain at least one python module (the python module does not have to be in the directory directly, it can be in a subdirectory. Parameters ---------- paths List of root path containing the python code. folders_to_ignore List of folders paths that will be excluded. The folder path should be relative to the source_tree. Their subdirectories will NOT be excluded. source_extensions Files with these extensions will be considered python source code. folder_trees_to_ignore List of folders names that will be excluded. Their subdirectories will ALSO be excluded. recursive Recursively add missing __init__.py to the subfolders as well. Returns ------- bool True if some init files were added, false otherwise. """ init_file_added = False for path in paths: if not os.path.exists(path): exit(f"Cannot find path {path}") root_directory = path if os.path.isdir(path) else os.path.dirname(path) if root_directory == "": root_directory = "." if add_missing_init_files( root_directory, folders_to_ignore, source_extensions, folder_trees_to_ignore, recursive, ): init_file_added = True return init_file_added
4bdf4e1877edf4071785aa05235bee246cb4e080
11,853
def naildown_entity(entity_class, entity_dict, entity, state, module, check_missing=None): """ Ensure that a given entity has a certain state """ changed, changed_entity = False, entity if state == 'present_with_defaults': if entity is None: changed, changed_entity = create_entity(entity_class, entity_dict, module) elif state == 'present': if entity is None: changed, changed_entity = create_entity(entity_class, entity_dict, module) else: changed, changed_entity = update_entity(entity, entity_dict, module, check_missing) elif state == 'copied': new_entity = entity_class(name=entity_dict['new_name'], organization=entity_dict['organization']).search() if entity is not None and len(new_entity) == 0: changed, changed_entity = copy_entity(entity, entity_dict, module) elif len(new_entity) == 1: changed_entity = new_entity[0] elif state == 'absent': if entity is not None: changed, changed_entity = delete_entity(entity, module) else: module.fail_json(msg='Not a valid state: {}'.format(state)) return changed, changed_entity
3c5b7e8b026d4ea8444625fa7a01b43567973138
11,854
def get_all_admins(): """ Returns a queryset of all active admin users. """ current_admins = User.objects.filter(is_admin=True, is_active=True) return current_admins
befba9efb62d7b1a46c0019776d1327251e9cf9d
11,855
def htx_numpy(h, x): """ Convolution of reversed h with each line of u. Numpy implementation. Parameters ---------- h : array, shape (n_time_hrf), HRF x : array, shape (n_samples, n_time), neural activity signals Return ------ h_conv_x : array, shape (n_samples, n_time_valid), convolved signals """ n_samples, _ = x.shape return np.r_[[np.convolve(h[::-1], x[i], mode='valid') for i in range(n_samples)]]
306608179eb52f4f70e0f03da75283404201a044
11,856
import torch def get_mask_results(probs, boxes, im_w, im_h, pixil_score_th=0.25): """ Args: probs (Tensor) boxes (ImageContainer) Returns: rles (list[string]) mask_pixel_scores (Tensor) """ device = probs.device N, _, H, W = probs.shape num_chunks = N if device.type == "cpu" else int(np.ceil(N * int(im_h * im_w) * BYTES_PER_FLOAT / GPU_MEM_LIMIT)) assert num_chunks <= N, "Default GPU_MEM_LIMIT in is too small; try increasing it" chunks = torch.chunk(torch.arange(N, device=device), num_chunks) im_masks = torch.zeros(N, im_h, im_w, device=device, dtype=torch.bool) im_masks_tl = torch.zeros(N, im_h, im_w, device=device, dtype=torch.bool) im_masks_th = torch.zeros(N, im_h, im_w, device=device, dtype=torch.bool) for i in chunks: masks_chunk, spatial_inds = _do_paste_mask(probs[i], boxes[i], im_h, im_w, skip_empty=device.type == "cpu") im_masks[(i,) + spatial_inds] = (masks_chunk >= 0.5).to(dtype=torch.bool) im_masks_tl[(i,) + spatial_inds] = (masks_chunk >= pixil_score_th).to(dtype=torch.bool) im_masks_th[(i,) + spatial_inds] = (masks_chunk >= (1 - pixil_score_th)).to(dtype=torch.bool) mask_pixel_scores = (torch.sum(im_masks_th, dim=(1, 2)).to(dtype=torch.float32) / torch.sum(im_masks_tl, dim=(1, 2)).to(dtype=torch.float32).clamp(min=1e-6)) rles = [] for i in range(N): # Too slow. # Get RLE encoding used by the COCO evaluation API rle = mask_util.encode(np.array(im_masks[i].unsqueeze(2).cpu(), dtype=np.uint8, order='F'))[0] # For dumping to json, need to decode the byte string. # https://github.com/cocodataset/cocoapi/issues/70 rle['counts'] = rle['counts'].decode('ascii') rles.append(rle) return rles, mask_pixel_scores
a3c9823f8578e63f7a39fe25791c1b0369640f26
11,857
from typing import Dict from typing import Any def openapi() -> Dict[str, Any]: """ >>> client = app.test_client() >>> response = client.get("/openapi.json") >>> response.get_json()['openapi'] '3.0.0' >>> response.get_json()['info']['title'] 'Chapter 13. Example 2' """ # See dominoes_openapi.json for full specification return jsonify(OPENAPI_SPEC)
1671022e42c6bd8cc75aa66c3259f5094fb05696
11,858
def koliko_izdelkov_v_skladiscu(): """ Vrne stevilo razlicnih izdelkov v skladiscu. >>> koliko_izdelkov_v_skladiscu() 18 """ poizvedba = """ SELECT COUNT(*) FROM izdelki WHERE kolicina IS NOT null """ st, = conn.execute(poizvedba).fetchone() return st
bb0143d8a7e4f404c88866331cfbdfd9c89d07f1
11,859
import math import random def create_spline(curve_data, s_type='NURBS', len_nodes=100, spline_id=0, splines_count=1, bud_position=None): """ Create a spline of given type with n nodes to form a path made of sin and cos """ spline = curve_data.splines.new(type=s_type) # Regular spline points need xyz + weight got_points = 1 co_dimension = 4 pts = spline.points if s_type == 'BEZIER': got_points = 2 # Bezier control points accept only xyz co_dimension = 3 # Left and right handles are not handled here pts = spline.bezier_points # This is the len for numpy arrays len_nodes = len_nodes - got_points + 1 # Every spline already has got point(s) when created # This was compensated with got_points pts.add(len_nodes - 1) if bud_position is None: bud_position = np.random.rand(co_dimension) * 1000 # Below is a play with random, sin and cos just for demo. # Replace with your own data and if you have none, it's pretty easy # to generate a bunch of points in space with Sverchok or Animation Nodes radii = np.random.rand(len_nodes) + 1 radii *= radii**4 / 10 dir_walk = np.arange(len_nodes) / 10 + np.random.rand(len_nodes) pi_walk = (np.arange(len_nodes)+1) * int(math.pi / len_nodes * 100)/(100+len_nodes) pi_walk += random.random()*math.pi nodes = np.random.rand(len_nodes, co_dimension) nodes[:, 0] += bud_position[0] nodes[:, 1] += bud_position[1] nodes[:, 2] += bud_position[2] rf1 = int(random.random()*3 + 1) rf2 = int(random.random()*3 + 1) nodes[:, 0] += np.sin(np.cos(pi_walk)) * random.random()*300+200 nodes[:, 1] += (np.cos(np.sin(pi_walk)**rf1) + np.sin(pi_walk*rf2)) * random.random()*300+200 nodes[:, 2] += np.sin(pi_walk*rf2) * np.cos(pi_walk*rf1) * random.random()*300+200 nodes [:, 0] += np.random.rand(len_nodes) * (random.random()*20+20) nodes [:, 1] += np.random.rand(len_nodes) * (random.random()*20+20) nodes [:, 2] += np.random.rand(len_nodes) * (random.random()*20+20) #nodes[:, 0] += np.sin(pi_walk*random.random())*(random.random()*10+10)**2 #nodes[:, 1] += np.sin(pi_walk*random.random())*(random.random()*100+100) #nodes[:, 2] += np.cos(pi_walk*random.random())*(random.random()*100+100) nodes [:, :] *= (random.random()*2+0.5) # Dummy data for key and value properties, play with HairInfo.Key and HairInfo.Value in your shader! keys = np.arange(len_nodes) + np.random.rand(len_nodes) values = np.random.rand(len_nodes) pts.foreach_set('co', nodes.ravel()) pts.foreach_set('radius', radii.ravel()) pts.foreach_set('key', keys.ravel()) pts.foreach_set('value', values.ravel()) if s_type == 'BEZIER': handle_fac = 100 lefts = nodes.copy() lefts[:, 0] += np.random.rand(len_nodes)* handle_fac - handle_fac/2 lefts[:, 1] += np.random.rand(len_nodes)* handle_fac - handle_fac/2 lefts[:, 2] += np.random.rand(len_nodes)* handle_fac - handle_fac/2 rights = nodes.copy() rights[:, 0] += np.random.rand(len_nodes)* handle_fac - handle_fac/2 rights[:, 1] += np.random.rand(len_nodes)* handle_fac - handle_fac/2 rights[:, 2] += np.random.rand(len_nodes)* handle_fac - handle_fac/2 pts.foreach_set('handle_left', lefts.ravel()) pts.foreach_set('handle_right', rights.ravel()) spline.use_endpoint_u = True # Spline resolution defaults to 12 but is too much for this use-case spline.resolution_u = 3 return spline
cce26da44f9da60638b3e46b89cf87c49ad5c3d6
11,860
from os.path import join as opj import os def getImgN(path): """ 入力されたフォルダにある画像を全て読み込む [in] path: [out] 読みこんだ画像リスト """ if not os.path.isdir(path): print('path not found:', path) exit(1) return np.array([cv2.imread(opj(path, f), IMG.getCh(0)) for f in os.listdir(path) if IMG.isImgPath(opj(path, f))])
468e396242008166333505949d78e13dbbcd146a
11,861
def get_composed_jumps(jumps, levels, win, verbose=0): """ Take the output of get_jumps (from landmarks) Compose the jumps, return them as an array of array. If intermediate=True, we return the jumps for intermediary levels, not just the requested one. We use a temporary sqlite3 connection to work. """ assert len(levels) > 0 maxlevel = max(levels) assert maxlevel >= 1, 'level 1 min, it means jumps between two landmarks' # verbose if verbose>0: t1 = time.time() # open temporary connection # IT IS FAST! # timeit.Timer("import sqlite3; conn = sqlite3.connect(':memory:'); conn.close()").timeit(10000) # Out[35]: 0.49553799629211426 conn = sqlite3.connect(':memory:') # special case: level = 1 if maxlevel == 1: add_nlmk2_jumps_to_db(conn, jumps, nocopy=True) q = "SELECT * FROM jumps_level1" res = conn.execute(q) composed_jumps = res.fetchall() conn.close() if verbose > 0: print 'Composed jumps (max lvl = %d) obtained in %f seconds.' % (maxlevel, time.time() - t1) return composed_jumps # enters level1 jumps add_nlmk2_jumps_to_db(conn, jumps) # do upper levels for lvl in range(2, maxlevel+1): compose_jumps(conn, win, level=lvl) # what do we return? composed_jumps = [] for lvl in levels: q = "SELECT * FROM jumps_level" + str(lvl) res = conn.execute(q) composed_jumps.extend(res.fetchall()) # done conn.close() # verbose if verbose > 0: print 'Composed jumps (max lvl = %d) obtained in %f seconds.' % (maxlevel, time.time() - t1) return composed_jumps
3305d2efed23eed269b3483a9619e50ad39826de
11,862
import itertools def calculate_agreement_stv(agreement_dictionary, turker_accuracies): """ Inter agreement with most accurate chair vote Args: agreement_dictionary: holding sentence annotation records - 9 from non-experts and 1 expert sentence -> list of annotations (size settings.RESPONSE_COUNT + 1) turker_accuracies: accuracy for each turker used for the chair vote Returns: The accuracies from combined agreement from one to nine non-experts with the expert """ sequence = list(range(settings.RESPONSE_COUNT)) combinations = [] for i in range(settings.RESPONSE_COUNT + 1): combinations.append(list(itertools.combinations(sequence, i))) print(combinations) accuracies = [0] standard_deviations = [0] for i in range(1, settings.RESPONSE_COUNT + 1): current_combinations = combinations[i] combination_accuracies = [] for combination in current_combinations: correct = 0 incorrect = 0 for sentence in agreement_dictionary.keys(): expert_annotations = agreement_dictionary[sentence][-1][1] chosen_annotations = [agreement_dictionary[sentence][x][1] for x in combination] votes = np.sum(chosen_annotations, axis=0) chair = 0 if len(combination) > 0 and len(combination) % 2 == 0: max_accuracy = 0 for judgement_index in combination: turker = agreement_dictionary[sentence][judgement_index][0] turker_accuracy = turker_accuracies[turker][0][1] if turker_accuracy > max_accuracy: max_accuracy = turker_accuracy chair = judgement_index result_votes = [0] * len(votes) for j in range(len(votes)): if votes[j] < len(chosen_annotations) / 2: result_votes[j] = 0 elif votes[j] > len(chosen_annotations) / 2: result_votes[j] = 1 else: result_votes[j] = agreement_dictionary[sentence][chair][1][j] for j in range(len(votes)): if expert_annotations[j] == result_votes[j]: correct += 1 else: incorrect += 1 combination_accuracy = correct / (correct + incorrect) combination_accuracies.append(combination_accuracy) standard_deviation = np.std(combination_accuracies) standard_deviations.append(standard_deviation) accuracy = sum(combination_accuracies) / len(combination_accuracies) accuracies.append(accuracy) return accuracies, standard_deviations
3253505366edffea1cc7c1302b082dbd85668ad2
11,863
def count_num_peps(filename): """ Count the number of peptide sequences in FASTA file. """ with open(filename) as f: counter = 0 for line in f: if line.startswith(">"): counter += 1 return counter
c062a22cd925f29d8793ab364a74cf05cbae2a66
11,864
import re def get_variables(examples): """Convert a code string to a list of variables. We assume a variable is a 'word' with only alphanumeric characters in it.""" variables = [" ".join(re.split(r"\W+", text)) for text in examples["text"]] return {"variables": variables}
385a4fb3a73a432e6afa9aa69330f950246f48d0
11,865
def _stored_data_paths(wf, name, serializer): """Return list of paths created when storing data""" metadata = wf.datafile(".{}.alfred-workflow".format(name)) datapath = wf.datafile(name + "." + serializer) return [metadata, datapath]
5f01d804db9f1848cc13e701a56e51c06dccdb31
11,866
def ascii_to_walls(char_matrix): """ A parser to build a gridworld from a text file. Each grid has ONE start and goal location. A reward of +1 is positioned at the goal location. :param char_matrix: Matrix of characters. :param p_success: Probability that the action is successful. :param seed: The seed for the GridWorldMDP object. :param skip_checks: Skips assertion checks. :transition_matrix_builder_cls: The transition matrix builder to use. :return: """ grid_size = len(char_matrix[0]) assert(len(char_matrix) == grid_size), 'Mismatch in the columns.' for row in char_matrix: assert(len(row) == grid_size), 'Mismatch in the rows.' # ... wall_locs = [] empty = [] for r in range(grid_size): for c in range(grid_size): char = char_matrix[r][c] if char == '#': wall_locs.append((r, c)) elif char == ' ': empty.append((r, c)) else: raise ValueError('Unknown character {} in grid.'.format(char)) # Attempt to make the desired gridworld. return wall_locs, empty
9f6520625623bd446923e374a1a5a557038dfd48
11,867
def mock_sd(nresp=1): """Fake Stackdriver Monitoring API response for the ListTimeSeries endpoint. Args: nresp (int): Number of responses to add to response. Returns: ChannelStub: Mocked gRPC channel stub. """ timeserie = load_fixture('time_series_proto.json') response = {'next_page_token': '', 'time_series': [timeserie]} return mock_grpc_stub( response=response, proto_method=metric_service_pb2.ListTimeSeriesResponse, nresp=nresp)
cbc5659c02a73048f0263803562a130ac475bcb2
11,868
def cohesion_separation(chroms, doc): """Measure balancing both cohesion and separation of clusters.""" coh = cohesion(chroms, doc) sep = separation(chroms, doc) return (1 + sigmoid(coh)) ** sep
c883ee67e978e51b56f4be84e7e0731368eeb5f1
11,869
import locale import shlex import sys import subprocess def shellCall(shellCmd, stdin='', stderr=False, env=None, encoding=None): """Call a single system command with arguments, return its stdout. Returns stdout, stderr if stderr is True. Handles simple pipes, passing stdin to shellCmd (pipes are untested on windows) can accept string or list as the first argument Parameters ---------- shellCmd : str, or iterable The command to execute, and its respective arguments. stdin : str, or None Input to pass to the command. stderr : bool Whether to return the standard error output once execution is finished. env : dict The environment variables to set during execution. encoding : str The encoding to use for communication with the executed command. This argument will be ignored on Python 2.7. Notes ----- We use ``subprocess.Popen`` to execute the command and establish `stdin` and `stdout` pipes. Python 2.7 always opens the pipes in text mode; however, Python 3 defaults to binary mode, unless an encoding is specified. To unify pipe communication across Python 2 and 3, we now provide an `encoding` parameter, enforcing `utf-8` text mode by default. This parameter is present from Python 3.6 onwards; using an older Python 3 version will raise an exception. The parameter will be ignored when running Python 2.7. """ if encoding is None: encoding = locale.getpreferredencoding() if type(shellCmd) == str: # safely split into cmd+list-of-args, no pipes here shellCmdList = shlex.split(shellCmd) elif type(shellCmd) == bytes: # safely split into cmd+list-of-args, no pipes here shellCmdList = shlex.split(shellCmd.decode('utf-8')) elif type(shellCmd) in (list, tuple): # handles whitespace in filenames shellCmdList = shellCmd else: msg = 'shellCmd requires a string or iterable.' raise TypeError(msg) cmdObjects = [] for obj in shellCmdList: if type(obj) != bytes: cmdObjects.append(obj) else: cmdObjects.append(obj.decode('utf-8')) # Since Python 3.6, we can use the `encoding` parameter. if PY3: if sys.version_info.minor >= 6: proc = subprocess.Popen(cmdObjects, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding=encoding, env=env) else: msg = 'shellCall() requires Python 2.7, or 3.6 and newer.' raise RuntimeError(msg) else: proc = subprocess.Popen(cmdObjects, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env) stdoutData, stderrData = proc.communicate(stdin) del proc if stderr: return stdoutData.strip(), stderrData.strip() else: return stdoutData.strip()
8d9e5ace3740a79ba42301e90e6de72db1713d54
11,870
import re def get_number_location( input : str, ): # endregion get_number_location header # region get_number_location docs """ get the string indices of all numbers that occur on the string format example: [ ( 0, 1 ), ( 4, 6 ), ( 9, 9 ) ] both begin and end are inclusive, in contrast with the way the std_lib does it which is begin(inclusive), end(exclusive) """ # endregion get_number_location docs # region get_number_location implementation locations = [] for match in re.finditer("\d+", input): # match start is inclusive position_start = match.start() # match end is exclusive position_end = match.end() - 1 locations.append((position_start, position_end)) ... return locations
de035f640dd33dc96b4072bdc925efc649285121
11,871
def update_object(obj, new_values): """update an object attributes from a supplied dictionary""" # avoiding obj.__dict__.update(new_values) as it will set a new attribute if it doesn't exist for k, v in new_values.items(): if hasattr(obj, k): try: setattr(obj, k, v) except AttributeError: # in case of read only attribute log(f"update_object(): can't update property: {k}, with value: {v}") except Exception as e: log(f'update_object(): error, {e}, property: {k}, value: {v}') return obj
5e916b16301c6e733b2d98b32c175bb202529503
11,872
def subpathNeedsRefresh(modTimes, ufoPath, *subPath): """ Determine if a file needs to be refreshed. Returns True if the file's latest modification time is different from its previous modification time. """ previous = modTimes.get(subPath[-1]) if previous is None: return True latest = subpathGetModTime(ufoPath, *subPath) return latest != previous
046c37ca801a74bb83bb45c1b1d0510e15cba6c4
11,873
def resxy_(x: float, y: float, /) -> Resolution: """Construct resolution from X,Y order.""" return Resolution(x=x, y=y)
1cd2995142981de0932b8bc9452df71b18a46d8b
11,874
def group_toggle_modules(request, group): """Enable or disable modules. """ if request.method != 'POST': raise Http404 referer = request.META.get('HTTP_REFERER', None) next = SITE_ROOT if referer is None else referer username = request.user.username group_wiki = request.POST.get('group_wiki', 'off') if group_wiki == 'on': enable_mod_for_group(group.id, MOD_GROUP_WIKI) messages.success(request, _('Successfully enable "Wiki".')) else: disable_mod_for_group(group.id, MOD_GROUP_WIKI) if referer.find('wiki') > 0: next = reverse('group_info', args=[group.id]) messages.success(request, _('Successfully disable "Wiki".')) return HttpResponseRedirect(next)
4844d8203bd757802e38bff6ac20f45ade07f21d
11,875
def bilinear_sampler(imgs, coords): """ Construct a new image by bilinear sampling from the input image. Args: imgs: source image to be sampled from [batch, height_s, width_s, channels] coords: coordinates of source pixels to sample from [batch, height_t, Returns: A new sampled image [batch, height_t, width_t, channels] """ def _repeat(x, n_repeats): rep = tf.transpose(tf.expand_dims(tf.ones(shape=tf.stack([n_repeats, ])), 1), [1, 0]) rep = tf.cast(rep, 'float32') x = tf.matmul(tf.reshape(x, (-1, 1)), rep) return tf.reshape(x, [-1]) coords_x, coords_y = tf.split(coords, [1, 1], axis=3) inp_size = imgs.get_shape() coord_size = coords.get_shape() out_size = coords.get_shape().as_list() out_size[3] = imgs.get_shape().as_list()[3] coords_x = tf.cast(coords_x, 'float32') coords_y = tf.cast(coords_y, 'float32') y_max = tf.cast(tf.shape(imgs)[1] - 1, 'float32') x_max = tf.cast(tf.shape(imgs)[2] - 1, 'float32') zero = tf.zeros([1], dtype='float32') eps = tf.constant([0.5], tf.float32) coords_x = tf.clip_by_value(coords_x, eps, x_max - eps) coords_y = tf.clip_by_value(coords_y, eps, y_max - eps) x0 = tf.floor(coords_x) x1 = x0 + 1 y0 = tf.floor(coords_y) y1 = y0 + 1 x0_safe = tf.clip_by_value(x0, zero, x_max) y0_safe = tf.clip_by_value(y0, zero, y_max) x1_safe = tf.clip_by_value(x1, zero, x_max) y1_safe = tf.clip_by_value(y1, zero, y_max) wt_x0 = x1_safe - coords_x wt_x1 = coords_x - x0_safe wt_y0 = y1_safe - coords_y wt_y1 = coords_y - y0_safe # indices in the flat image to sample from dim2 = tf.cast(inp_size[2], 'float32') dim1 = tf.cast(inp_size[2] * inp_size[1], 'float32') base = tf.reshape(_repeat(tf.cast(tf.range(coord_size[0]), 'float32') * dim1, coord_size[1] * coord_size[2]), [out_size[0], out_size[1], out_size[2], 1]) base_y0 = base + y0_safe * dim2 base_y1 = base + y1_safe * dim2 idx00 = tf.reshape(x0_safe + base_y0, [-1]) idx01 = x0_safe + base_y1 idx10 = x1_safe + base_y0 idx11 = x1_safe + base_y1 ## sample from imgs imgs_flat = tf.reshape(imgs, tf.stack([-1, inp_size[3]])) imgs_flat = tf.cast(imgs_flat, 'float32') im00 = tf.reshape(tf.gather(imgs_flat, tf.cast(idx00, 'int32')), out_size) im01 = tf.reshape(tf.gather(imgs_flat, tf.cast(idx01, 'int32')), out_size) im10 = tf.reshape(tf.gather(imgs_flat, tf.cast(idx10, 'int32')), out_size) im11 = tf.reshape(tf.gather(imgs_flat, tf.cast(idx11, 'int32')), out_size) w00 = wt_x0 * wt_y0 w01 = wt_x0 * wt_y1 w10 = wt_x1 * wt_y0 w11 = wt_x1 * wt_y1 output = tf.add_n([ w00 * im00, w01 * im01, w10 * im10, w11 * im11 ]) return output
4138a515f0f4f25b569aae1c28d18de897c63a24
11,876
import types def wrap_array(typingctx, data_ptr, shape_tup): """create an array from data_ptr with shape_tup as shape """ assert isinstance(data_ptr, types.CPointer), "invalid data pointer" assert (isinstance(shape_tup, types.UniTuple) and shape_tup.dtype == np.intp), "invalid shape tuple" dtype = data_ptr.dtype arr_typ = types.Array(dtype, shape_tup.count, 'C') def codegen(context, builder, sig, args): assert(len(args) == 2) data = args[0] shape = args[1] # XXX: unnecessary allocation and copy, reuse data pointer shape_list = cgutils.unpack_tuple(builder, shape, shape.type.count) ary = _empty_nd_impl(context, builder, arr_typ, shape_list) cgutils.raw_memcpy(builder, ary.data, data, ary.nitems, ary.itemsize, align=1) # clean up image buffer fnty = lir.FunctionType(lir.VoidType(), [lir.IntType(8).as_pointer()]) fn_release = builder.module.get_or_insert_function(fnty, name="cv_delete_buf") builder.call(fn_release, [data]) return impl_ret_new_ref(context, builder, sig.return_type, ary._getvalue()) # # cgutils.printf(builder, "%d", shape) # retary = context.make_array(arr_typ)(context, builder) # itemsize = context.get_abi_sizeof(context.get_data_type(dtype)) # shape_list = cgutils.unpack_tuple(builder, shape, shape.type.count) # strides = [context.get_constant(types.intp, itemsize)] # for dimension_size in reversed(shape_list[1:]): # strides.append(builder.mul(strides[-1], dimension_size)) # strides = tuple(reversed(strides)) # #import pdb; pdb.set_trace() # context.populate_array(retary, # data=data, # shape=shape, # strides=strides, # itemsize=itemsize, # meminfo=None) # return retary._getvalue() return signature(arr_typ, data_ptr, shape_tup), codegen
03fc3c995ae459e644d88baab2ca766ff528ba8d
11,877
import subprocess def change(port, password, limit): """修改用户""" port, password, limit = str(port), str(password), str(limit) ret = subprocess.check_output([SS_ADMIN, 'change', port, password, limit], stderr=subprocess.STDOUT) return ret
9dc7e173e76da48e45b4b73a3584e327a0eb9d25
11,878
from typing import Union from typing import Optional from typing import Dict from typing import Any import tempfile import os def reshape_resting_ecg_to_tidy( sample_id: Union[int, str], folder: Optional[str] = None, tmap: TensorMap = DEFAULT_RESTING_ECG_SIGNAL_TMAP, ) -> pd.DataFrame: """Wrangle resting ECG data to tidy. Args: sample_id: The id of the ECG sample to retrieve. folder: The local or Cloud Storage folder under which the files reside. tmap: The TensorMap to use for ECG input. Returns: A pandas dataframe in tidy format or print a notebook-friendly error and return an empty dataframe. """ if folder is None: folder = get_resting_ecg_hd5_folder(sample_id) data: Dict[str, Any] = {'lead': [], 'raw': [], 'ts_reference': [], 'filtered': [], 'filtered_1': [], 'filtered_2': []} with tempfile.TemporaryDirectory() as tmpdirname: sample_hd5 = str(sample_id) + '.hd5' local_path = os.path.join(tmpdirname, sample_hd5) try: tf.io.gfile.copy(src=os.path.join(folder, sample_hd5), dst=local_path) except (tf.errors.NotFoundError, tf.errors.PermissionDeniedError) as e: print(f'''Warning: Resting ECG not available for sample {sample_id} in folder {folder}. Use the folder parameter to read HD5s from a different directory or bucket.\n\n{e.message}''') return pd.DataFrame(data) with h5py.File(local_path, mode='r') as hd5: try: signals = tmap.tensor_from_file(tmap, hd5) except (KeyError, ValueError) as e: print(f'''Warning: Resting ECG TMAP {tmap.name} not available for sample {sample_id}. Use the tmap parameter to choose a different TMAP.\n\n{e}''') _examine_available_keys(hd5) return pd.DataFrame(data) for (lead, channel) in ECG_REST_LEADS.items(): signal = signals[:, channel] signal_length = len(signal) data['raw'].extend(signal) data['lead'].extend([lead] * signal_length) data['ts_reference'].extend(np.array([i*1./(SAMPLING_RATE+1.) for i in range(0, signal_length)])) filtered, _, _ = filter_signal( signal=signal, ftype='FIR', band='bandpass', order=int(0.3 * SAMPLING_RATE), frequency=[.9, 50], sampling_rate=SAMPLING_RATE, ) data['filtered'].extend(filtered) filtered_1, _, _ = filter_signal( signal=signal, ftype='FIR', band='bandpass', order=int(0.3 * SAMPLING_RATE), frequency=[.9, 20], sampling_rate=SAMPLING_RATE, ) data['filtered_1'].extend(filtered_1) filtered_2, _, _ = filter_signal( signal=signal, ftype='FIR', band='bandpass', order=int(0.3 * SAMPLING_RATE), frequency=[.9, 30], sampling_rate=SAMPLING_RATE, ) data['filtered_2'].extend(filtered_2) signal_df = pd.DataFrame(data) # Convert the raw signal to mV. signal_df['raw_mV'] = signal_df['raw'] * RAW_SCALE signal_df['filtered_mV'] = signal_df['filtered'] * RAW_SCALE signal_df['filtered_1_mV'] = signal_df['filtered_1'] * RAW_SCALE signal_df['filtered_2_mV'] = signal_df['filtered_2'] * RAW_SCALE # Reshape to tidy (long format). tidy_signal_df = signal_df.melt( id_vars=['lead', 'ts_reference'], value_vars=['raw_mV', 'filtered_mV', 'filtered_1_mV', 'filtered_2_mV'], var_name='filtering', value_name='signal_mV', ) # The leads have a meaningful order, apply the order to this column. lead_factor_type = pd.api.types.CategoricalDtype( categories=[ 'strip_I', 'strip_aVR', 'strip_V1', 'strip_V4', 'strip_II', 'strip_aVL', 'strip_V2', 'strip_V5', 'strip_III', 'strip_aVF', 'strip_V3', 'strip_V6', ], ordered=True, ) tidy_signal_df['lead'] = tidy_signal_df.lead.astype(lead_factor_type) return tidy_signal_df
e3f6e2c76be191adbdc4c9ff31d62dda5d15894a
11,879
from geometric.internal import Angle, Dihedral, Distance, OutOfPlane from geometric.internal import PrimitiveInternalCoordinates as GeometricPRIC from geometric.internal import ( RotationA, RotationB, RotationC, TranslationX, TranslationY, TranslationZ, ) from geometric.molecule import Molecule as GeometricMolecule def geometric_project_derivatives( molecule: Molecule, conformer: torch.Tensor, internal_coordinates_indices: Dict[str, torch.Tensor], reference_gradients: torch.Tensor, reference_hessians: torch.Tensor, ) -> Tuple[torch.Tensor, torch.Tensor]: """A helper method to project a set of gradients and hessians into internal coordinates using ``geomTRIC``. Args: molecule: The molecule of interest conformer: The conformer of the molecule with units of [A] and shape=(n_atoms, 3) internal_coordinates_indices: The indices of the atoms involved in each type of internal coordinate. reference_gradients: The gradients to project. reference_hessians: The hessians to project. Returns: The projected gradients and hessians. """ geometric_molecule = GeometricMolecule() geometric_molecule.Data = { "resname": ["UNK"] * molecule.n_atoms, "resid": [0] * molecule.n_atoms, "elem": [atom.element.symbol for atom in molecule.atoms], "bonds": [(bond.atom1_index, bond.atom2_index) for bond in molecule.bonds], "name": molecule.name, "xyzs": [conformer.detach().numpy()], } geometric_coordinates = GeometricPRIC(geometric_molecule) geometric_coordinates.Internals = [ internal for internal in geometric_coordinates.Internals if not isinstance( internal, (TranslationX, TranslationY, TranslationZ, RotationA, RotationB, RotationC), ) ] # We need to re-order the internal coordinates to generate those produced by # smirnoffee. ic_by_type = defaultdict(list) ic_type_to_name = { Distance: "distances", Angle: "angles", Dihedral: "dihedrals", OutOfPlane: "out-of-plane-angles", } for internal_coordinate in geometric_coordinates.Internals: ic_by_type[ic_type_to_name[internal_coordinate.__class__]].append( internal_coordinate ) ordered_internals = [] for ic_type in internal_coordinates_indices: ic_by_index = { _geometric_internal_coordinate_to_indices(ic): ic for ic in ic_by_type[ic_type] } for ic_indices in internal_coordinates_indices[ic_type]: ic_indices = tuple(int(i) for i in ic_indices) if ic_indices[-1] > ic_indices[0]: ic_indices = tuple(reversed(ic_indices)) ordered_internals.append(ic_by_index[ic_indices]) geometric_coordinates.Internals = ordered_internals reference_gradients = reference_gradients.numpy().flatten() reference_hessians = reference_hessians.numpy().reshape(molecule.n_atoms * 3, -1) xyz = conformer.detach().numpy() return ( geometric_coordinates.calcGrad(xyz, reference_gradients), geometric_coordinates.calcHess(xyz, reference_gradients, reference_hessians), )
f04988255698e43e0febebbf0fa6b4d67625f86f
11,880
def api_get_script(request): """POST - Frida Get Script.""" if not request.POST.getlist('scripts[]'): return make_api_response( {'error': 'Missing Parameters'}, 422) resp = tests_frida.get_script(request, True) if resp['status'] == 'ok': return make_api_response(resp, 200) return make_api_response(resp, 500)
f221543d648901c38620bd84d8c6d55a3c8545e0
11,881
import re def is_valid_slug(slug): """Returns true iff slug is valid.""" VALID_SLUG_RE = re.compile(r"^[a-z0-9\-]+$") return VALID_SLUG_RE.match(slug)
439349f0689cd53fb2f7e89b2b48b90aa79dae80
11,882
import time import torch def KMeans_GPU(x, K=10, Niter=10, verbose=True): """Implements Lloyd's algorithm for the Euclidean metric.""" start = time.time() N, D = x.shape # Number of samples, dimension of the ambient space c = x[:K, :].clone() # Simplistic initialization for the centroids x_i = LazyTensor(x.view(N, 1, D)) # (N, 1, D) samples c_j = LazyTensor(c.view(1, K, D)) # (1, K, D) centroids # K-means loop: # - x is the (N, D) point cloud, # - cl is the (N,) vector of class labels # - c is the (K, D) cloud of cluster centroids for i in range(Niter): # E step: assign points to the closest cluster ------------------------- D_ij = ((x_i - c_j) ** 2).sum(-1) # (N, K) symbolic squared distances cl = D_ij.argmin(dim=1).long().view(-1) # Points -> Nearest cluster # M step: update the centroids to the normalized cluster average: ------ # Compute the sum of points per cluster: c.zero_() c.scatter_add_(0, cl[:, None].repeat(1, D), x) # Divide by the number of points per cluster: Ncl = torch.bincount(cl, minlength=K).type_as(c).view(K, 1) c /= Ncl # in-place division to compute the average if verbose: # Fancy display ----------------------------------------------- if torch.cuda.is_available(): torch.cuda.synchronize() end = time.time() print( f"K-means for the Euclidean metric with {N:,} points in dimension {D:,}, K = {K:,}:" ) print( "Timing for {} iterations: {:.5f}s = {} x {:.5f}s\n".format( Niter, end - start, Niter, (end - start) / Niter ) ) return cl, c
675632335520477cdcd283b5e12b46912c26b323
11,883
def make_flat_roof(bm, faces, thick, outset, **kwargs): """Create a basic flat roof Args: bm (bmesh.types.BMesh): bmesh from current edit mesh faces (bmesh.types.BMFace): list of user selected faces thick (float): Thickness of the roof outset (float): How mush the roof overhangs **kwargs: Extra kargs from RoofProperty Returns: list(bmesh.types.BMFace): Resulting top face """ ret = bmesh.ops.extrude_face_region(bm, geom=faces) bmesh.ops.translate(bm, vec=(0, 0, thick), verts=filter_geom(ret["geom"], BMVert)) top_face = filter_geom(ret["geom"], BMFace)[-1] link_faces = [f for e in top_face.edges for f in e.link_faces if f is not top_face] bmesh.ops.inset_region(bm, faces=link_faces, depth=outset, use_even_offset=True) bmesh.ops.recalc_face_normals(bm, faces=bm.faces) bmesh.ops.delete(bm, geom=faces, context="FACES") new_faces = list({f for e in top_face.edges for f in e.link_faces}) return bmesh.ops.dissolve_faces(bm, faces=new_faces).get("region")
bfb218cfac8ff8c2e2c4bf428aba50a85062dd6e
11,884
def make_comma_separated_list_fiter(filter_name, field_expression): """ Create a filter which uses a comma-separated list of values to filter the queryset. :param str filter_name: the name of the query param to fetch values :param str field_expression: the field expression to filter the queryset, like `categories__in` """ def filter_queryset(instance, request, queryset, view): values = request.query_params.get(filter_name) if not values: return queryset values = [v.strip() for v in values.split(",")] return queryset.filter(**{field_expression: values}) attrs = {} attrs.setdefault("filter_queryset", filter_queryset) return type(str("CommaSeparatedIDListFilter"), (filters.BaseFilterBackend,), attrs)
7f6088f14195a93dca6cc68b0d2c6d4840cc159c
11,885
def get_dashboard(id_, token_info=None, user=None): """Get a single dashboard by ID :param id: ID of test dashboard :type id: str :rtype: Dashboard """ dashboard = Dashboard.query.get(id_) if not dashboard: return "Dashboard not found", 404 if dashboard and dashboard.project and not project_has_user(dashboard.project, user): return "Forbidden", 403 return dashboard.to_dict()
8110177aac6457771881cc78a45528acd70b2ab3
11,886
from pathlib import Path def get_recorder(execution_cmd, ml_names): """ The helper function for generating a recorder object """ if not execution_cmd.record_progress: return DummyRecorder() root_dir_path = Path(__file__).parent.parent log_dir_path = root_dir_path.joinpath( "games", execution_cmd.game_name, "log") game_params_str = [str(p) for p in execution_cmd.game_params] filename_prefix = ( "manual" if execution_cmd.game_mode == GameMode.MANUAL else "ml") if game_params_str: filename_prefix += "_" + "_".join(game_params_str) return Recorder(ml_names, log_dir_path, filename_prefix)
b21e9fb9de5fc1e5852647196a52d5df14255d32
11,887
import torch import tqdm from typing import Counter import json import os def eval_epoch(data_iterator, model, optimizer, args, update=False, log_split='', split_name='', n_iter=0, epoch=0, writer=None, sample_path='', debug=0, verbose=False, obj_classes=[], weights=[], clip=1.0, most_freq=0, log_path=''): """Pass one epoch over the data split. """ if update: model.train() else: model.eval() task = args.task verbose = args.verbose num_classes = model.n_actions if model.n_actions > 1 else 2 confusion = torch.zeros(num_classes, num_classes).float() if task == 'task1': matrix_labels = [] for dir in DIRECTIONS[args.direction]: for obj in obj_classes: matrix_labels += [dir + '_' + obj] elif task == 'task2': matrix_labels = DIRECTIONS[args.direction] elif task in set(['task3', 'task4']): matrix_labels = ['not present', 'present'] else: raise NotImplementedError() n_updates = len(data_iterator) if not debug else debug if verbose: pbar = tqdm(data_iterator) else: pbar = data_iterator total_loss = [] total_f1 = [] total_acc = [] zeros_acc = [] ones_acc = [] random_acc = [] mostfreq_acc = [] f1_binary_loss = F1_Binary_Loss().cuda() cce_loss = nn.CrossEntropyLoss(weight=torch.tensor(weights)).cuda() for bid, (batch_lat, batch_lng, batch_images, batch_boxes, batch_feats, batch_queries, batch_query_strings, batch_targets, batch_most_freq) in enumerate(pbar): if debug and bid == n_updates: break out = model({ 'latitude': batch_lat, 'longitude': batch_lng, 'im_batch': batch_images, 'obj_boxes': batch_boxes, 'obj_feats': batch_feats, 'queries': batch_queries, 'query_strings': batch_query_strings}) preds = out['action_logits'] if task in set(['task1', 'task3', 'task4']): binary_preds = (preds > 0.5).float() binary_preds.requires_grad = True if task == 'task1': w = 10000.0 weight_rebal = torch.ones_like( batch_targets) / w + (1.0 - 1.0 / w) * batch_targets loss_fn = nn.BCELoss(weight=weight_rebal) else: loss_fn = torch.nn.functional.binary_cross_entropy_with_logits loss = loss_fn(preds, batch_targets) f1_score = f1_binary_loss(binary_preds, batch_targets) acc = ((preds > 0.5).int() == batch_targets).float().mean() zero_acc = (torch.zeros_like(batch_targets) == batch_targets).float().mean() one_acc = (torch.ones_like(batch_targets) == batch_targets).float().mean() r_acc = (torch.empty(batch_targets.size()).random_( 2).cuda() == batch_targets).float().mean() total_f1.append(f1_score.item()) total_acc.append(acc.item()) zeros_acc.append(zero_acc.item()) ones_acc.append(one_acc.item()) random_acc.append(r_acc.item()) binary_preds = (preds > 0.5).long() binary_preds = Counter(['{}'.format(bp.item()) for bp in binary_preds]) binary_preds = json.dumps(binary_preds) log_string = 'f1: {:3.3f} mean-acc: {:3.3f} 0-acc: {:3.3f} 1-acc: {:3.3f} r-acc: {:3.3f} preds : {:10s}'.format( np.mean(total_f1), np.mean(total_acc), np.mean(zeros_acc), np.mean(ones_acc), np.mean(random_acc), binary_preds) if task == 'task1': pred_indices = [[] for bb in range(preds.size(0))] for pair in (preds > 0.5).nonzero(as_tuple=False): pred_indices[pair[0]].append(pair[1].item()) elif task in set(['task3', 'task4']): pred_indices = (preds > 0.5).long() elif task == 'task2': loss = cce_loss(preds, batch_targets.squeeze(1).long()) acc = compute_precision_with_logits( preds, batch_targets.long()) total_acc.append(acc.item()) r_acc = (torch.empty(batch_targets.size()).random_( num_classes).cuda() == batch_targets).float().mean() random_acc.append(r_acc.item()) mfreq_acc = (batch_most_freq == batch_targets).float().mean() mostfreq_acc.append(mfreq_acc.item()) log_string = 'acc: {:3.3f} mfreq-acc: {:3.3f} r-acc: {:3.3f}'.format( np.mean(total_acc), np.mean(mostfreq_acc), np.mean(random_acc)) _, pred_indices = torch.max(preds, 1) else: raise NotImplementedError() total_loss.append(loss.item()) if update: optimizer.zero_grad() loss.backward() torch.nn.utils.clip_grad_norm_(model.parameters(), clip) optimizer.step() log_string = 'mean-loss: {:3.3f} '.format(np.mean(total_loss)) + log_string log_split_batch = log_split + ' B{:4d}|{:4d}'.format(bid+1, n_updates) log_final = ' '.join([log_split_batch, log_string]) if verbose: pbar.set_description(log_final) else: print(log_final) if batch_targets.size(1) == 1: targets = [[batch_targets[bb].long().item()] for bb in range(preds.size(0))] else: targets = [[] for bb in range(preds.size(0))] for pair in batch_targets.nonzero(as_tuple=False): targets[pair[0]].append(pair[1].item()) for bb in range(preds.size(0)): for t in targets[bb]: confusion[t, pred_indices[bb]] += 1 total_score = total_f1 if task in set(['task1,task3,task4']) else total_acc writer.add_scalar('{}_{}'.format( split_name, 'batch_score'), total_score[-1], n_iter) writer.add_scalar('{}_{}'.format( split_name, 'batch_loss'), total_loss[-1], n_iter) n_iter += 1 if (n_iter+1) % 100 == 0 and update: model_name = os.path.join( args.exp_dir, args.prefix + '/model.{}_{}.pt'.format(epoch, n_iter)) print('\n saving model', model_name) torch.save(model, model_name) if verbose: pbar.close() writer.add_scalar('{}_{}'.format( split_name, 'epoch_score'), np.mean(total_score), epoch) writer.add_scalar('{}_{}'.format( split_name, 'epoch_loss'), np.mean(total_loss), epoch) img_conf = get_confusion_matrix_image( matrix_labels, confusion / confusion.sum(), '') writer.add_image('Confusion Matrix', img_conf, epoch) with open(log_path, "a") as log_file: log_file.write(log_final+'\n') return {'loss': np.mean(total_loss), 'accuracy': np.mean(total_score)}, n_iter
60290172d75c4abd9a0753c8bf19d5ed96c22900
11,888
def get_customer_key(): """ Reutrn the key of the sample customer from file """ customer_file = open("sample_customer", "r") customer_key = customer_file.readline().rstrip("\n") customer_file.close() return customer_key
2b63c671aa6f8dd5fe6fbd9d58394e8c178901f5
11,889
def tau_tex(tex, tau0_): """ Eq. (15) Goldsmith et al. (2012) """ g = gu/gl return tau0_*(1. - np.exp(-tstar/tex))/(1. + g*np.exp(-tstar/tex))
1dc5b0254c2b4cb9bf443651d2ffe4685543b67d
11,890
def cached(f): """Decorator to cache result of property.""" @wraps(f) def inner(self): name = '_{}'.format(f.__name__) if getattr(self, name, None) is None: setattr(self, name, f(self)) return getattr(self, name) return inner
9c9e14f358337efe7a4a5cffe9b3a46b1065951c
11,891
def thesaurus(*args, sort=False) -> dict: """Формирует словарь, в котором ключи — первые буквы слов, а значения — списки, содержащие слова, начинающиеся с соответствующей буквы :param *args: перечень слов :param sort: признак необходимости сортировки словаря по алфавиту (True - сортировать, False - не сортировать) :return: словарь слов по первым буквам""" if sort: args = sorted(list(args)) # Changed in version 3.7: Dictionary order is guaranteed to be insertion order dict_out = {} for word in args: dict_value = dict_out.setdefault(word[0], list()) if word not in dict_value: dict_value.append(word) dict_out[word[0]] = dict_value return dict_out
2e02e4f98a85eaa19a9374d5dfba82dd855b9636
11,892
def calculate_ranking(imbalanced_results): """Calculate the ranking of oversamplers for any combination of datasets, classifiers and metrics.""" wide_optimal = calculate_wide_optimal(imbalanced_results) ranking_results = wide_optimal.apply( lambda row: _return_row_ranking( row[3:], SCORERS[row[2].replace(' ', '_').lower()]._sign ), axis=1, ) ranking = pd.concat([wide_optimal.iloc[:, :3], ranking_results], axis=1) return ranking
81c41848f618245661338c52f73bb80ab865b7df
11,893
def extract鏡像翻訳(item): """ Parser for '鏡像翻訳' """ if 'anime' in str(item['tags']).lower(): return None vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title']) if not (chp or vol) or 'preview' in item['title'].lower(): return None tagmap = [ ('sodachi fiasco', 'Orokamonogatari - Sodachi Fiasco', 'translated'), ('karen ogre', 'Wazamonogatari - Karen Ogre', 'translated'), ('shinobu mustard', 'Shinobumonogatari - Shinobu Mustard', 'translated'), ('tsubasa sleeping', 'Wazamonogatari - Tsubasa Sleeping', 'translated'), ('acerola bon appetit', 'Wazamonogatari - Acerola Bon Appetit', 'translated'), ('tsudzura human', 'Musubimonogatari - Tsudzura Human', 'translated'), ] for tagname, name, tl_type in tagmap: if tagname in item['tags']: return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type) titlemap = [ ('jinrui saikyou no netsuai', 'Jinrui Saikyou no Netsuai', 'translated'), ] for titlecomponent, name, tl_type in titlemap: if titlecomponent.lower() in item['title'].lower(): return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type) return False
2e561a9d6b66a5b243996a6a8e530a8f46476292
11,894
def config(live_server, django_user_model): """Create a user and return an auth_token config matching that user.""" user = django_user_model.objects.create( email='jathan@localhost', is_superuser=True, is_staff=True ) data = { 'email': user.email, 'secret_key': user.secret_key, 'auth_method': 'auth_token', 'url': live_server.url + '/api', # 'api_version': API_VERSION, 'api_version': '1.0', # Hard-coded. } return data
031648b92a8347f8cc5e14213eda85c9ed73d3ee
11,895
from typing import Dict from typing import Union from typing import List from typing import Any from typing import OrderedDict def combine_data_by_key( combined_outputs: Dict[str, Union[List[Any], Any]], output: Dict[str, Union[List[Any], Any]], ) -> Dict[str, Union[List[Any], Any]]: """ Combine lists in two multimaps Args: combined_outputs: Initial multimap to combine, presumably already combined output: New multimap to add to initial multimap Returns: Combined multimaps (does not modify initial or new data) """ combined_keys = combine_keys(combined_outputs, output) return OrderedDict( (key, combine_datas(combined_outputs.get(key, []), output.get(key, []))) for key in combined_keys )
3311c204cd3bce79a9613bb212978e8178a4b05f
11,896
import os def arango_connection() -> ArangoClient: """Connecting to arango.""" host = os.getenv("ARANGO_HOST") port = os.getenv("ARANGO_PORT") arango_client = ArangoClient(hosts=f"http://{host}:{port}") return arango_client
e1f2d7da8391fefd017b7ce8b41274410600221e
11,897
import os def in_bazel() -> bool: """Return whether running under bazel.""" return os.environ.get("TEST_WORKSPACE", "") != ""
f0f697d894ed0e8bf7309591a6775632b76c2ec8
11,898
def acceptCode(request): """Redeems a code to accept invitation cash""" params = request.get_params(schemas.AcceptCodeSchema()) device = get_device(request) customer = device.customer access_token = get_wc_token(request, customer) postParams = { 'code': params['code'] } response = wc_contact( request, 'POST', 'wallet/accept-code', params=postParams, access_token=access_token).json() if response.get('error'): return { 'error': response.get('error')} elif response.get('invalid'): return { 'invalid': response.get('invalid')} else: return response
8a0c1201eb1135789e42bee01d3f0ab8480963b6
11,899