content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def luminance(qcolor): """ Gives the pseudo-equivalent greyscale value of this color """ r,g,b = qcolor.red(), qcolor.green(), qcolor.blue() return int(0.2*r + 0.6*g + 0.2*b)
9e1821da2c0c6e8d76aefe56d6ed659a728737bb
7,213
def read_info(path, layer=None, encoding=None): """Read information about an OGR data source. `crs` and `geometry` will be `None` and `features` will be 0 for a nonspatial layer. Parameters ---------- path : str or pathlib.Path layer : [type], optional Name or index of layer in data source. Reads the first layer by default. encoding : [type], optional (default: None) If present, will be used as the encoding for reading string values from the data source, unless encoding can be inferred directly from the data source. Returns ------- dict { "crs": "<crs>", "fields": <ndarray of field names>, "encoding": "<encoding>", "geometry": "<geometry type>", "features": <feature count> } """ return ogr_read_info(str(path), layer=layer, encoding=encoding)
7479c63223288c94ed4756350756473866d7b2b3
7,214
import time import requests import json def _macro_cons_opec_month(): """ 欧佩克报告-月度, 数据区间从 20170118-至今 这里返回的具体索引日期的数据为上一个月的数据, 由于某些国家的数据有缺失, 只选择有数据的国家返回 :return: pandas.Series 阿尔及利亚 安哥拉 厄瓜多尔 加蓬 伊朗 伊拉克 科威特 利比亚 尼日利亚 \ 2017-01-18 108.0 172.4 54.5 21.3 372.0 463.2 281.2 60.8 154.2 2017-02-13 104.5 165.1 52.7 19.9 377.5 447.6 271.8 67.5 157.6 2017-03-14 105.3 164.1 52.6 19.4 381.4 441.4 270.9 66.9 160.8 2017-04-12 105.6 161.4 52.6 19.8 379.0 440.2 270.2 62.2 154.5 2017-05-11 104.7 169.2 52.4 20.6 375.9 437.3 270.2 55.0 150.8 2017-06-13 105.9 161.3 52.8 20.4 379.5 442.4 270.5 73.0 168.0 2017-07-12 106.0 166.8 52.7 19.7 379.0 450.2 270.9 85.2 173.3 2017-08-10 105.9 164.6 53.6 20.5 382.4 446.8 270.3 100.1 174.8 2017-09-12 106.5 164.6 53.7 17.3 382.8 444.8 270.2 89.0 186.1 2017-10-11 104.6 164.1 53.6 20.1 382.7 449.4 270.0 92.3 185.5 2017-11-13 101.2 171.1 54.1 20.3 382.3 438.3 270.8 96.2 173.8 2017-12-13 101.3 158.1 53.3 19.7 381.8 439.6 270.3 97.3 179.0 2018-01-18 103.7 163.3 52.6 19.7 382.9 440.5 270.0 96.2 186.1 2018-04-12 98.4 152.4 51.8 18.3 381.4 442.6 270.4 96.8 181.0 2018-05-14 99.7 151.5 52.0 18.3 382.3 442.9 270.5 98.2 179.1 2018-06-12 103.1 152.5 51.9 18.9 382.9 445.5 270.1 95.5 171.1 2018-07-11 103.9 143.1 51.9 19.0 379.9 453.3 273.1 70.8 166.0 2018-08-13 106.2 145.6 52.5 18.8 373.7 455.6 279.1 66.4 166.7 2018-09-12 104.5 144.8 52.9 18.7 358.4 464.9 280.2 92.6 172.5 2018-10-11 104.9 151.9 53.1 18.7 344.7 465.0 281.2 105.3 174.8 2018-11-13 105.4 153.3 52.5 18.6 329.6 465.4 276.4 111.4 175.1 2018-12-12 105.2 152.1 52.5 17.6 295.4 463.1 280.9 110.4 173.6 2019-03-14 102.6 145.7 52.2 20.3 274.3 463.3 270.9 90.6 174.1 2019-04-10 101.8 145.4 52.4 21.4 269.8 452.2 270.9 109.8 173.3 2019-06-13 102.9 147.1 52.9 21.1 237.0 472.4 271.0 117.4 173.3 沙特 阿联酋 委内瑞拉 欧佩克产量 2017-01-18 1047.4 307.1 202.1 3308.5 2017-02-13 994.6 293.1 200.4 3213.9 2017-03-14 979.7 292.5 198.7 3195.8 2017-04-12 999.4 289.5 197.2 3192.8 2017-05-11 995.4 284.2 195.6 3173.2 2017-06-13 994.0 288.5 196.3 3213.9 2017-07-12 995.0 289.8 193.8 3261.1 2017-08-10 1006.7 290.5 193.2 3286.9 2017-09-12 1002.2 290.1 191.8 3275.5 2017-10-11 997.5 290.5 189.0 3274.8 2017-11-13 1000.0 291.1 186.3 3258.9 2017-12-13 999.6 288.3 183.4 3244.8 2018-01-18 991.8 287.8 174.5 3241.6 2018-04-12 993.4 286.4 148.8 3195.8 2018-05-14 995.9 287.2 143.6 3193.0 2018-06-12 998.7 286.5 139.2 3186.9 2018-07-11 1042.0 289.7 134.0 3232.7 2018-08-13 1038.7 295.9 127.8 3232.3 2018-09-12 1040.1 297.2 123.5 3256.5 2018-10-11 1051.2 300.4 119.7 3276.1 2018-11-13 1063.0 316.0 117.1 3290.0 2018-12-12 1101.6 324.6 113.7 3296.5 2019-03-14 1008.7 307.2 100.8 3054.9 2019-04-10 979.4 305.9 73.2 3002.2 2019-06-13 969.0 306.1 74.1 2987.6 """ t = time.time() res = requests.get( JS_CONS_OPEC_URL.format( str(int(round(t * 1000))), str(int(round(t * 1000)) + 90) ) ) json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1]) date_list = [item["date"] for item in json_data["list"]] big_df = pd.DataFrame() for country in [item["datas"] for item in json_data["list"]][0].keys(): try: value_list = [item["datas"][country] for item in json_data["list"]] value_df = pd.DataFrame(value_list) value_df.columns = json_data["kinds"] value_df.index = pd.to_datetime(date_list) temp_df = value_df["上个月"] temp_df.name = country big_df = big_df.append(temp_df) except: continue headers = { "accept": "*/*", "accept-encoding": "gzip, deflate, br", "accept-language": "zh-CN,zh;q=0.9,en;q=0.8", "cache-control": "no-cache", "origin": "https://datacenter.jin10.com", "pragma": "no-cache", "referer": "https://datacenter.jin10.com/reportType/dc_opec_report", "sec-fetch-mode": "cors", "sec-fetch-site": "same-site", "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.117 Safari/537.36", "x-app-id": "rU6QIu7JHe2gOUeR", "x-csrf-token": "", "x-version": "1.0.0", } res = requests.get(f"https://datacenter-api.jin10.com/reports/dates?category=opec&_={str(int(round(t * 1000)))}", headers=headers) # 日期序列 all_date_list = res.json()["data"] need_date_list = [item for item in all_date_list if item.split("-")[0] + item.split("-")[1] + item.split("-")[2] not in date_list] for item in reversed(need_date_list): res = requests.get( f"https://datacenter-api.jin10.com/reports/list?category=opec&date={item}&_={str(int(round(t * 1000)))}", headers=headers) temp_df = pd.DataFrame(res.json()["data"]["values"], columns=pd.DataFrame(res.json()["data"]["keys"])["name"].tolist()).T temp_df.columns = temp_df.iloc[0, :] temp_df = temp_df[['阿尔及利亚', '安哥拉', '厄瓜多尔', '加蓬', '伊朗', '伊拉克', '科威特', '利比亚', '尼日利亚', '沙特', '阿联酋', '委内瑞拉', '欧佩克产量']].iloc[-2, :] big_df[item] = temp_df return big_df.T
5ae77b64b5d66e14027d757ea840385d0fc96033
7,215
def get_activation(preact_dict, param_name, hook_type): """ Hooks used for in sensitivity schedulers (LOBSTE, Neuron-LOBSTER, SERENE). :param preact_dict: Dictionary in which save the parameters information. :param param_name: Name of the layer, used a dictionary key. :param hook_type: Hook type. :return: Returns a forward_hook if $hook_type$ is forward, else a backward_hook. """ def forward_hook(model, inp, output): preact_dict[param_name] = output def backward_hook(module, grad_input, grad_output): preact_dict[param_name] = None preact_dict[param_name] = grad_output[0].detach().cpu() return forward_hook if hook_type == "forward" else backward_hook
8d5766178ef972e010b5be3a3826774f051dd3bd
7,217
def createAbsorption(cfgstr): """Construct Absorption object based on provided configuration (using available factories)""" return Absorption(cfgstr)
587b0e12f845171ffd61d5d04c37b4ff98865216
7,218
def get_optimizer_config(): """Gets configuration for optimizer.""" optimizer_config = configdict.ConfigDict() # Learning rate scheduling. One of: ["fixed", "exponential_decay"] optimizer_config.learning_rate_scheduling = "exponential_decay" # Optimization algorithm. One of: ["SGD", "Adam", "RMSprop"]. optimizer_config.optim_type = "Adam" # Adam beta1. optimizer_config.beta1 = 0.9 # Adam beta2. optimizer_config.beta2 = 0.999 # Norm clipping threshold applied for rnn cells (no clip if 0). optimizer_config.norm_clip = 0.0 # Learning rate. optimizer_config.initial_learning_rate = 0.001 # The learning rate decay 'epoch' length. optimizer_config.lr_decay_steps = 12000 # The learning rate decay base, applied per epoch. optimizer_config.lr_decay_base = 0.85 # RMSprop decay. optimizer_config.decay = 0.9 # RMSprop moment. optimizer_config.mom = 0.0 return optimizer_config
1918cd8aa9ff8446dec8cb90ff529de97f05d5aa
7,219
def flat2seq(x: Tensor, num_features: int) -> Tensor: """Reshapes tensor from flat format to sequence format. Flat format: (batch, sequence x features) Sequence format: (batch, sequence, features) Args: x (Tensor): a tensor in the flat format (batch, sequence x features). num_features (int): number of features (last dimension) of the output tensor. Returns: Tensor: the transformed tensor in sequence format (batch, seq, features). """ if not is_flat(x): raise ValueError( 'attempt to reshape tensor from flat format to sequence format failed. ', f'Excepted input tensor with 2 dimensions, got {x.ndim}.' ) return x.view(x.shape[0], -1, num_features)
d8bace4548d82352ebae28dc9be665b862b744d0
7,220
def run_results(results_data, time_column, pathway_column, table_letters, letters, dataframe_T1, dataframe_T2, dataframe_T3, dataframe_T4, original_transitions, simulation_transitions, intervention_codes, target, individuals, save_location, simulation_name, listed_times, last_arrival, period): """Fill the four results tables.""" Table1_results = T1_results(results_data, time_column, pathway_column, dataframe_T1, original_transitions, simulation_transitions, intervention_codes, target, individuals, save_location, simulation_name, last_arrival, period) Table2_results = T2_results(results_data, pathway_column, letters, dataframe_T2, simulation_name) Table3_results = T3_results(results_data, pathway_column, dataframe_T3, save_location, simulation_name) Table4_results = T4_results(results_data, table_letters, dataframe_T4, listed_times, simulation_name) return(Table1_results, Table2_results, Table3_results, Table4_results)
5292328a1f74d2ecb89daae465bace1a95eff538
7,221
from typing import Optional def get_spatial_anchors_account(name: Optional[str] = None, resource_group_name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSpatialAnchorsAccountResult: """ Get information about an Azure Spatial Anchors Account. ## Example Usage ```python import pulumi import pulumi_azure as azure example = azure.mixedreality.get_spatial_anchors_account(name="example", resource_group_name=azurerm_resource_group["example"]["name"]) pulumi.export("accountDomain", data["azurerm_spatial_anchors_account"]["account_domain"]) ``` :param str name: Specifies the name of the Spatial Anchors Account. Changing this forces a new resource to be created. Must be globally unique. :param str resource_group_name: The name of the resource group in which to create the Spatial Anchors Account. """ __args__ = dict() __args__['name'] = name __args__['resourceGroupName'] = resource_group_name if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('azure:mixedreality/getSpatialAnchorsAccount:getSpatialAnchorsAccount', __args__, opts=opts, typ=GetSpatialAnchorsAccountResult).value return AwaitableGetSpatialAnchorsAccountResult( account_domain=__ret__.account_domain, account_id=__ret__.account_id, id=__ret__.id, location=__ret__.location, name=__ret__.name, resource_group_name=__ret__.resource_group_name)
db94c210af0c46cea2e3a95573f339ec65f7e7fe
7,222
import re def format_query(str_sql): """Strips all newlines, excess whitespace, and spaces around commas""" stage1 = str_sql.replace("\n", " ") stage2 = re.sub(r"\s+", " ", stage1).strip() stage3 = re.sub(r"(\s*,\s*)", ",", stage2) return stage3
5adb0f9c3314ba04bbf92c88e3ef17802b2afeb0
7,223
def make_ytick_labels(current_ticks, n, numstring = ""): """ """ new_ticks = [] for item in current_ticks: if int(item) == item: new_ticks.append(f"{int(item)}{numstring}") else: new_ticks.append(f"{item:.1f}{numstring}") return new_ticks
2685126dc72305ccb7b4bf652fe645e9a39affd3
7,224
import re def check_token(token): """ Returns `True` if *token* is a valid XML token, as defined by XML Schema Part 2. """ return (token == '' or re.match( "[^\r\n\t ]?([^\r\n\t ]| [^\r\n\t ])*[^\r\n\t ]?$", token) is not None)
b4e1d313fb64aad4c1c244cb18d3629e13b1c3af
7,225
def generate_random_data(n=10): """Generate random data.""" return rand(10)
872591efc14d28282b24138f80e19c92487bde6d
7,226
def get_phoible_feature_list(var_to_index): """ Function that takes a var_to_index object and return a list of Phoible segment features :param var_to_index: a dictionary mapping variable name to index(column) number in Phoible data :return : """ return list(var_to_index.keys())[11:]
a53995cd927d1cdc66fadb2a8e6af3f5e2effff0
7,228
def split_data(dataset): """Split pandas dataframe to data and labels.""" data_predictors = [ "Steps_taken", "Minutes_sitting", "Minutes_physical_activity", "HR", "BP", ] X = dataset[data_predictors] y = dataset.Health x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2) return X, y, x_train, x_test, y_train, y_test
b15db522ff45dee825d64d7daf6604fb400bc677
7,229
def add_header(response): """ Add headers to both force latest IE rendering engine or Chrome Frame. """ response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1' return response
6e755f47bd12095d80a941338c451e677f80bcc6
7,230
def draw_lane_on_unwarped_frame(frame, left_line, right_line, trsf_mtx_inv): """ Drawing of the unwarped lane lines and lane area to the current frame. Args: left_line: left Line instance right_line: right Line instance trsf_mtx_inv: inverse of the perspective transformation matrix """ # Frame dimensions height, width = frame.shape[0:2] # Generate x and y values for plotting y = np.linspace(0, frame.shape[0] - 1, frame.shape[0]) left_x = left_line.evaluate_average_polynomial(y) right_x = right_line.evaluate_average_polynomial(y) # Create a green lane area between the left and right lane lines warped_lane_area = np.zeros_like(frame) # Warped at first left_points = np.column_stack((left_x, y)).reshape((1, -1, 2)).astype(int) right_points = np.flipud( np.column_stack((right_x, y))).reshape((1, -1, 2)).astype(int) vertices = np.hstack((left_points, right_points)) cv2.fillPoly(warped_lane_area, [vertices], (0, 255, 0)) # Unwarp the lane area unwarped_lane = cv2.warpPerspective( warped_lane_area, trsf_mtx_inv, (width, height)) # Overlay the unwarped lane area onto the frame green_lane_on_frame = cv2.addWeighted(frame, 1., unwarped_lane, 0.3, 0) # Draw the left and right lane polynomials into an empty and warped image warped_lanes = np.zeros_like(frame) left_points = np.column_stack((left_x, y)).reshape(-1, 1, 2) right_points = np.column_stack((right_x, y)).reshape(-1, 1, 2) warped_lanes = cv2.polylines(warped_lanes, [left_points.astype(np.int32)], isClosed=False, color=(255, 0, 0), thickness=30) warped_lanes = cv2.polylines(warped_lanes, [right_points.astype(np.int32)], isClosed=False, color=(0, 0, 255), thickness=30) # Unwarp the lane lines plot lane_lines = cv2.warpPerspective( warped_lanes, trsf_mtx_inv, (width, height)) # Create a mask of the unwarped lane lines to shadow the frame background # a bit gray = cv2.cvtColor(lane_lines, cv2.COLOR_BGR2GRAY) _, mask = cv2.threshold(gray, 1, 255, cv2.THRESH_BINARY_INV) # Black-out the area of the lane lines in the frame frame_bg = cv2.bitwise_and( green_lane_on_frame, green_lane_on_frame, mask=mask) # Combine with complete frame to shadow the area of the lane lines a bit shadowed_frame = cv2.addWeighted(frame_bg, 0.6, green_lane_on_frame, 0.4, 0) return cv2.addWeighted(shadowed_frame, 1.0, lane_lines, 1.0, 0)
b59b3d99a17ba241aff0a9ac7e6d33497f1db803
7,232
def n_states_of_vec(l, nval): """ Returns the amount of different states a vector of length 'l' can be in, given that each index can be in 'nval' different configurations. """ if type(l) != int or type(nval) != int or l < 1 or nval < 1: raise ValueError("Both arguments must be positive integers.") return nval ** l
98770fa5a5e62501bf365a4a5a40a932b2ba2450
7,234
def remove_items_from_dict(a_dict, bad_keys): """ Remove every item from a_dict whose key is in bad_keys. :param a_dict: The dict to have keys removed from. :param bad_keys: The keys to remove from a_dict. :return: A copy of a_dict with the bad_keys items removed. """ new_dict = {} for k in a_dict.keys(): if k not in bad_keys: new_dict[k] = a_dict[k] return new_dict
7c665e372c2099441f8a661f1194a76a21edf01c
7,235
def writeObject(img_array, obj_array, bbox): """Writes depression objects to the original image. Args: img_array (np.array): The output image array. obj_array (np.array): The numpy array containing depression objects. bbox (list): The bounding box of the depression object. Returns: np.array: The numpy array containing the depression objects. """ min_row, min_col, max_row, max_col = bbox roi = img_array[min_row:max_row, min_col:max_col] roi[obj_array > 0] = obj_array[obj_array > 0] return img_array
141cf9c3f47766a4020d737e743215db04761f54
7,236
def process_model(current_val): """ :param current_val: model generated by sat solver, atom is satisfied if in modal. :return tuple of sets comprising true and false atoms. """ true_atoms, false_atoms = set(), set() for atom in current_val: if current_val[atom]: true_atoms.add(str(atom)) else: false_atoms.add(str(atom)) return true_atoms, false_atoms
9cf90aec097091841c0f0ac820317f373a92e4c1
7,237
import re def filter_strace_output(lines): """ a function to filter QEMU logs returning only the strace entries Parameters ---------- lines : list a list of strings representing the lines from a QEMU log/trace. Returns ------- list a list of strings representing only the strace log entries the entries will also be cleaned up if a page dump occurs in the middle of them """ #we only want the strace lines, so remove/ignore lines that start with the following: line_starts= ['^[\d,a-f]{16}-', # pylint: disable=anomalous-backslash-in-string '^page', '^start', '^host', '^Locating', '^guest_base', '^end_', '^brk', '^entry', '^argv_', '^env_', '^auxv_', '^Trace', '^--- SIGSEGV', '^qemu' ] filter_string = '|'.join(line_starts) filtered = [] prev_line = "" for line in lines: if re.match(filter_string,line): continue # workaround for https://gitlab.com/qemu-project/qemu/-/issues/654 if re.search("page layout changed following target_mmap",line): prev_line = line.replace("page layout changed following target_mmap","") continue if re.match('^ = |^= ', line): line = prev_line+line filtered.append(line) return filtered
01b6c048ebdf890e9124c387fc744e56cc6b7f4d
7,238
def export_gmf_xml(key, dest, sitecol, imts, ruptures, rlz, investigation_time): """ :param key: output_type and export_type :param dest: name of the exported file :param sitecol: the full site collection :param imts: the list of intensity measure types :param ruptures: an ordered list of ruptures :param rlz: a realization object :param investigation_time: investigation time (None for scenario) """ if hasattr(rlz, 'gsim_rlz'): # event based smltpath = '_'.join(rlz.sm_lt_path) gsimpath = rlz.gsim_rlz.uid else: # scenario smltpath = '' gsimpath = rlz.uid writer = hazard_writers.EventBasedGMFXMLWriter( dest, sm_lt_path=smltpath, gsim_lt_path=gsimpath) writer.serialize( GmfCollection(sitecol, imts, ruptures, investigation_time)) return {key: [dest]}
139e24feb476ab10c0f1192fd47f80b7bbe29ccb
7,239
import functools def track_state_change(entity_ids, from_state=None, to_state=None): """Decorator factory to track state changes for entity id.""" def track_state_change_decorator(action): """Decorator to track state changes.""" event.track_state_change(HASS, entity_ids, functools.partial(action, HASS), from_state, to_state) return action return track_state_change_decorator
08f6e7f8354f51dfa54d233156585c84d9b811b3
7,240
def phrase(): """Generate and return random phrase.""" return models.PhraseDescription(text=random_phrase.make_random_text())
458529df5d6dbd92b7a6545d92b836763a8411a6
7,241
def classify_tweets(text): """ classify tweets for tweets about car accidents and others :param text: tweet text :return: boolean, true if tweet is about car accident, false for others """ return text.startswith(u'בשעה') and ( (u'הולך רגל' in text or u'הולכת רגל' in text or u'נהג' in text or u'אדם' in text) and (u'רכב' in text or u'מכונית' in text or u'אופנוע' in text or u"ג'יפ" in text or u'טרקטור' in text or u'משאית' in text or u'אופניים' in text or u'קורקינט' in text))
b34991a36febf2648cd83f2782ba4a8631e65a1a
7,242
def _build_results(drift_type, raw_metrics): """Generate all results for queried time window or run id of some a datadriftdetector. :param raw_metrics: origin data diff calculation results. :return: a list of result dict. """ results = [] for metric in raw_metrics: ep = _properties(metric.get_extended_properties()) if metric.name == OUTPUT_METRIC_DRIFT_COEFFICIENT: # Overall drift coefficient; add to results return object create_new_component = True if create_new_component: res = {KEY_NAME_Drift_TYPE: drift_type} # attach result content result_list = [] result_list.append( _build_single_result_content(drift_type, metric.value, ep) ) res["result"] = result_list results.append(res) return results
2938257d54d0be0ac012c4ddc39952c6767b8a38
7,243
def no_test_server_credentials(): """ Helper function that returns true when TEST_INTEGRATION_* credentials are undefined or empty. """ client_id = getattr(settings, 'TEST_INTEGRATION_CLIENT_ID', None) username = getattr(settings, 'TEST_INTEGRATION_USERNAME', None) password = getattr(settings, 'TEST_INTEGRATION_PASSWORD', None) app_read = getattr(settings, 'TEST_INTEGRATION_READ_CLIENT_ID', None) app_write = getattr(settings, 'TEST_INTEGRATION_WRITE_CLIENT_ID', None) return not (client_id and username and password and app_read and app_write)
a98f249e15f9d1c42aacd62a22024af577332275
7,244
from typing import Tuple from typing import Any def skip_spaces(st: ST) -> Tuple[ST, Any]: """ Pula espaços. """ pos, src = st while pos < len(src) and src[pos].isspace(): pos += 1 return (pos, src), None
df0c549c8af18a66a6e2d1704991592516e62ffb
7,245
def mixed_phone_list(): """Return mixed phone number list.""" return _MIXED_PHONE_LIST_
e607eb5778d8f4999fcbeb85ea5a3bb0ca04ee40
7,246
def get_report_summary(report): """ Retrieve the docstring summary content for the given report module. :param report: The report module object :returns: the first line of the docstring for the given report module """ summary = None details = get_report_details(report) if not details: return details = details.split('\n') while details and not summary: summary = details.pop(0) return summary
ea350c527cfab62496110ae08eedd9841db10492
7,248
def load_dataset(dataset_identifier, train_portion='75%', test_portion='25%', partial=None): """ :param dataset_identifier: :param train_portion: :return: dataset with (image, label) """ # splits are not always supported # split = ['train[:{0}]'.format(train_portion), 'test[{0}:]'.format(test_portion)] ds = tfds.load(dataset_identifier, split='train', shuffle_files=True) if partial is not None: ds = ds.take(partial) return ds
f836236f56ba8359194c21c20ea6767d296a0ee4
7,249
from typing import List import functools def stop(ids: List[str]): """Stop one or more instances""" return functools.partial(ec2.stop_instances, InstanceIds=ids)
fdf6db088323e5874c01662cf931aa85143ac2aa
7,250
def simple_parse(config_file): """ Do simple parsing and home-brewed type interference. """ config = ConfigObj(config_file, raise_errors=True) config.walk(string_to_python_type) # Now, parse input and output in the Step definition by hand. _step_io_fix(config) return(config)
85a406125a644fb75a5d8986778de6ea9b8af52a
7,252
import pickle def deserialize_columns(headers, frames): """ Construct a list of Columns from a list of headers and frames. """ columns = [] for meta in headers: col_frame_count = meta["frame_count"] col_typ = pickle.loads(meta["type-serialized"]) colobj = col_typ.deserialize(meta, frames[:col_frame_count]) columns.append(colobj) # Advance frames frames = frames[col_frame_count:] return columns
176d936a6019669f15049f11df00e14ad62238d7
7,253
def words_with_joiner(joiner): """Pass through words unchanged, but add a separator between them.""" def formatter_function(i, word, _): return word if i == 0 else joiner + word return (NOSEP, formatter_function)
9f24b2e7d202663902da0bfccd8e9b96faebc152
7,255
def magerr2Ivar(flux, magErr): """ Estimate the inverse variance given flux and magnitude error. The reason for this is that we need to correct the magnitude or flux for Galactic extinction. Parameters ---------- flux : scalar or array of float Flux of the obejct. magErr : scalar or array of float Error of magnitude measurements. """ fluxErr = flux * ((10.0 ** (magErr/2.5)) - 1.0) return 1.0 / (fluxErr ** 2.0)
37c48c26f1b876ca4d77dc141b1728daaea24944
7,256
def create_policy_work_item_linking(repository_id, branch, blocking, enabled, branch_match_type='exact', organization=None, project=None, detect=None): """Create work item linking policy. """ organization, project = resolve_instance_and_project( detect=detect, organization=organization, project=project) policy_client = get_policy_client(organization) configuration = create_configuration_object(repository_id, branch, blocking, enabled, '40e92b44-2fe1-4dd6-b3d8-74a9c21d0c6e', [], [], branch_match_type) return policy_client.create_policy_configuration(configuration=configuration, project=project)
230604606ba47c29386027503f45d30577cb5edf
7,257
import numbers def center_data(X, y, fit_intercept, normalize=False, copy=True, sample_weight=None): """ Centers data to have mean zero along axis 0. This is here because nearly all linear models will want their data to be centered. If sample_weight is not None, then the weighted mean of X and y is zero, and not the mean itself """ X = as_float_array(X, copy) if fit_intercept: if isinstance(sample_weight, numbers.Number): sample_weight = None if sp.issparse(X): X_offset = np.zeros(X.shape[1]) X_std = np.ones(X.shape[1]) else: X_offset = np.average(X, axis=0, weights=sample_weight) X -= X_offset # XXX: currently scaled to variance=n_samples if normalize: X_std = np.sqrt(np.sum(X ** 2, axis=0)) X_std[X_std == 0] = 1 X /= X_std else: X_std = np.ones(X.shape[1]) y_offset = np.average(y, axis=0, weights=sample_weight) y = y - y_offset else: X_offset = np.zeros(X.shape[1]) X_std = np.ones(X.shape[1]) y_offset = 0. if y.ndim == 1 else np.zeros(y.shape[1], dtype=X.dtype) return X, y, X_offset, y_offset, X_std
d31b27868a6f1ee21ef4019df9954fc1136d73eb
7,258
import socket async def get_ipv4_internet_reachability(host, port, timeout): """ Host: 8.8.8.8 (google-public-dns-a.google.com) OpenPort: 53/tcp Service: domain (DNS/TCP) """ try: socket.setdefaulttimeout(timeout) socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect((host, port)) socket.close() return True except socket.error as ex: socket.close() return False
ff1d335dec568810431c58b0bc1a72eb10e65372
7,259
def cov_pen(h_j, h_no_j): """ Personal implementation of covariance matrix with penalization. :param h_j: :param h_no_j: :return: """ final_dim = h_j.shape[1] cov_matrix = np.empty((final_dim, final_dim)) for row in range(final_dim): for column in range(final_dim): h_d = h_j[:, row] h_d_no_j = h_no_j[:, row] a = h_d - np.mean(h_d) if row == column: # Diag value = np.dot(a.T, a) + np.dot(h_d_no_j.T, h_d_no_j) else: h_i = h_j[:, column] h_i_no_j = h_no_j[:, column] b = h_i - np.mean(h_i) value = np.dot(a.T, b) + np.dot(h_d_no_j.T, h_i_no_j) cov_matrix[row, column] = value return cov_matrix
4a3ef366a072fd84d198597213ea544d88032ac5
7,260
def get_last_timestamp(): """ 获取当天23:59:59的时间戳 :return: """ # 获取明天0点的时间戳 future_timestamp = get_timestamp(-1) # 明天0点的时间戳-1 last_timestamp = future_timestamp - 1 return last_timestamp
7f4c07309f9be1437c1743f691402bae58a7ec34
7,261
def _get_all_subclasses(typ, # type: Type[T] recursive=True, # type: bool _memo=None # type: Set[Type[Any]] ): # type: (...) -> Iterable[Type[T]] """ Returns all subclasses of `typ` Warning this does not support generic types. See parsyfiles.get_all_subclasses() if one day generic types are needed (commented lines below) :param typ: :param recursive: a boolean indicating whether recursion is needed :param _memo: internal variable used in recursion to avoid exploring subclasses that were already explored :return: """ _memo = _memo or set() # if we have collected the subclasses for this already, return if typ in _memo: return [] # else remember that we have collected them, and collect them _memo.add(typ) # if is_generic_type(typ): # # We now use get_origin() to also find all the concrete subclasses in case the desired type is a generic # sub_list = get_origin(typ).__subclasses__() # else: sub_list = typ.__subclasses__() # recurse result = [] # type: List[Type[T]] for t in sub_list: # only keep the origins in the list # to = get_origin(t) or t to = t # noinspection PyBroadException try: if to is not typ and to not in result and issubclass(to, typ): # is_subtype(to, typ, bound_typevars={}): result.append(to) except Exception: # catching an error with is_subtype(Dict, Dict[str, int], bound_typevars={}) pass # recurse if recursive: for typpp in sub_list: for t in _get_all_subclasses(typpp, recursive=True, _memo=_memo): # unfortunately we have to check 't not in sub_list' because with generics strange things happen # also is_subtype returns false when the parent is a generic if t not in sub_list and issubclass(t, typ): # is_subtype(t, typ, bound_typevars={}): result.append(t) return result
a9a9c1186e195347f937961b928159c605b64ffe
7,263
import logging def conditions_summary(conditions): """ Return a dict of consumer-level observations, say, for display on a smart mirror or tablet. """ keys = ['timestamp', 'dewpoint', 'barometricPressure', 'windDirection', 'windSpeed', 'windGust', 'precipitationLastHour', 'temperature', 'relativeHumidity', 'heatIndex'] summary = dict() for key in keys: try: summary[key] = conditions['properties'][key] except Exception as exc: summary[key] = 'none' logging.error('Error trying to read summary for key {0}: {1}', key, exc) return summary
aa4c95fd892c63bd05abd24188b8931375973bc0
7,264
def InsertOrganisation(cur, con, entity_name: str = "Organisation") -> int: """ Inserts a new Organisation into the database """ # Get information about the video game print(f"Enter new {entity_name}'s details:") row = {} row["Name"] = input(f"Enter the name of the {entity_name}: ") or None row["Headquarters"] = input( f"Enter the headquarters of {entity_name} (Optional): ") or None row["Founded"] = input( f"Enter the date when the {entity_name} was founded in YYYY-MM-DD format: ") or None row["Earnings"] = input( f"Enter earnings of {entity_name} in USD (Optional): ") or 0 # Query to be executed query = """INSERT INTO Organisations (Name, Headquarters, Founded, Earnings) VALUES (%(Name)s, %(Headquarters)s, %(Founded)s, %(Earnings)s) """ print("\nExecuting") print(query) # Execute query cur.execute(query, row) # Get ID of last inserted organisation cur.execute("SELECT LAST_INSERT_ID() AS OrganisationID") return cur.fetchone()["OrganisationID"]
de22b6eeb446efab58a2124f1b26da1e9edb12ed
7,265
def _rgb_to_hsv(rgbs): """Convert Nx3 or Nx4 rgb to hsv""" rgbs, n_dim = _check_color_dim(rgbs) hsvs = list() for rgb in rgbs: rgb = rgb[:3] # don't use alpha here idx = np.argmax(rgb) val = rgb[idx] c = val - np.min(rgb) if c == 0: hue = 0 sat = 0 else: if idx == 0: # R == max hue = ((rgb[1] - rgb[2]) / c) % 6 elif idx == 1: # G == max hue = (rgb[2] - rgb[0]) / c + 2 else: # B == max hue = (rgb[0] - rgb[1]) / c + 4 hue *= 60 sat = c / val hsv = [hue, sat, val] hsvs.append(hsv) hsvs = np.array(hsvs, dtype=np.float32) if n_dim == 4: hsvs = np.concatenate((hsvs, rgbs[:, 3]), axis=1) return hsvs
ee4a2d9867351e61bf9b14de4ef2d05425285879
7,266
def find_correlation(convergence_data, lens_data, plot_correlation=False, plot_radii=False, impact=False, key=None): """Finds the value of the slope for plotting residuals against convergence. Magnitude of slope and error quantify correlation between the two. Inputs: conv -- convergence. mu_diff -- residuals. """ correlations = [] correlation_errs = [] for cone_radius in RADII[29::2]: SNe_data = find_mu_diff(lens_data, cone_radius=cone_radius, impact=impact, key=key) redshift_cut = np.logical_or(SNe_data['z'] > 0.2, SNe_data['z'] > 0.4) mu_diff = SNe_data["mu_diff"][redshift_cut] if impact: if key is None: conv = np.array(convergence_data[f"Radius{str(cone_radius)}"]["SNkappa"])[redshift_cut] else: conv = np.array(convergence_data[key][f"Radius{str(cone_radius)}"]["SNkappa"])[redshift_cut] else: conv = np.array(convergence_data[f"Radius{str(cone_radius)}"]["SNkappa"])[redshift_cut] conv_rank = rankdata(conv) mu_rank = rankdata(mu_diff) diff = np.abs(conv_rank - mu_rank) rho = 1 - 6 / (len(conv) * (len(conv) ** 2 - 1)) * np.sum(diff ** 2) rho_err = np.sqrt((1 - rho ** 2) / (len(conv) - 1)) correlations.append(rho) correlation_errs.append(rho_err) if plot_correlation: edges = np.linspace(-0.0065, 0.011, 6) bins = (edges[1:] + edges[:-1]) / 2 mean_dmu = [] standard_error = [] for bin in bins: dmus = [] for kappa, dmu in zip(conv, mu_diff): if bin - 0.007 / 4 < kappa <= bin + 0.0007 / 4: dmus.append(dmu) mean_dmu.append(np.mean(dmus)) standard_error.append(np.std(dmus) / np.sqrt(len(dmus))) plt.plot([min(conv), max(conv)], [0, 0], color=grey, linestyle='--') plt.plot(conv, mu_diff, linestyle='', marker='o', markersize=2, color=colours[0]) plt.errorbar(bins, mean_dmu, standard_error, marker='s', color='r', markersize=3, capsize=3, linestyle='') plt.xlabel('$\kappa$') plt.ylabel('$\Delta\mu$') # plt.xlim([-0.008, 0.011]) # plt.legend(frameon=0, loc='lower right') # plt.ylim([-0.3, 0.3]) plt.text(0.0038, -0.19, f'$\\rho$ = {round(rho, 3)} $\pm$ {round(rho_err, 3)}', fontsize=16) # print([convergence_cut[cuts2][i] for i in range(len(convergence_cut[cuts2]))]) # print([mu_diff_cut[cuts2][i] for i in range(len(convergence_cut[cuts2]))]) # print([SNmu_err_cut[cuts2][i] for i in range(len(convergence_cut[cuts2]))]) plt.show() u_err = [correlations[i] + correlation_errs[i] for i in range(len(correlations))] d_err = [correlations[i] - correlation_errs[i] for i in range(len(correlations))] smooth_corr = savgol_filter([correlations[i] for i in range(len(correlations))], 11, 4) smooth_u_err = savgol_filter(u_err, 11, 4) smooth_d_err = savgol_filter(d_err, 11, 4) if plot_radii: plt.plot([6, 30], [0, 0], color=grey, linestyle='--') plt.plot(RADII[29::2], smooth_corr, color=colours[0]) plt.plot(RADII[29::2], [correlations[i] for i in range(len(correlations))], marker='x', color=colours[1], linestyle='') plt.fill_between(RADII[29::2], smooth_u_err, smooth_d_err, color=colours[0], alpha=0.4) plt.xlabel('Cone Radius (arcmin)') plt.ylabel("Spearman's Rank Coefficient") # plt.xlim([5, 30.1]) # plt.ylim([-0.18, 0.02]) plt.gca().invert_yaxis() plt.show() return [correlations, smooth_corr, smooth_u_err, smooth_d_err, np.array(u_err) - np.array(correlations)]
d507cd256a555b442fff1cd2a00862a5afdf0661
7,267
def ELCE2_null_estimator(p_err, K, rng): """ Compute the ELCE^2_u for one bootstrap realization. Parameters ---------- p_err: numpy-array one-dimensional probability error vector. K: numpy-array evaluated kernel function. rng: type(np.random.RandomState()) a numpy random function return ------ float: an unbiased estimate of ELCE^2_u """ idx = rng.permutation(len(p_err)) return ELCE2_estimator(K, p_err[idx])
5b42e36ade4aba416bb8cbf6790d22fd8e4913b1
7,268
import curses def select_from(stdscr, x, y, value, slist, redraw): """ Allows user to select from a list of valid options :param stdscr: The current screen :param x: The start x position to begin printing :param y: The start y position to begin pritning :param value: The current value chosen :param slist: A list of values to choose from :return: A value within :param list """ k = 0 padwidth = 100 pad = curses.newpad(1, padwidth) height, width = stdscr.getmaxyx() try: idx = slist.index(value) except ValueError: stdscr.clear() stdscr.refresh() curses_safe_addstr(stdscr, 0, 0, str(value)) curses_safe_addstr(stdscr, 1, 0, str(type(value))) curses_safe_addstr(stdscr, 2, 0, ','.join(map(str, slist))) curses_safe_addstr(stdscr, 3, 0, ','.join( list(map(str, map(type, slist))))) stdscr.getch() stdscr.clear() stdscr.refresh() draw_status_bar(stdscr, "Press 'q' to exit and 'UP' or 'DOWN' to select a value") while k != KEY_ENTER and k != ord('q'): pad.clear() value = str(slist[idx]) if len(value) + x >= width: value = value[:width - x - 1] if len(value) > padwidth: padwidth = len(value) * 2 pad = curses.newpad(1, padwidth) pad.addstr(0, 0, str(value)) stdscr.move(y, x + len(str(value))) pad.refresh(0, 0, y, x, y, width - x) k = stdscr.getch() if k == curses.KEY_UP and idx > 0: idx -= 1 elif k == curses.KEY_DOWN and idx < len(slist) - 1: idx += 1 elif k == curses.KEY_RESIZE: stdscr.erase() height, width = stdscr.getmaxyx() redraw(stdscr) draw_status_bar( stdscr, "Press 'q' to exit and 'UP' or 'DOWN' to select a value") return slist[idx]
208283731317418bbe5ae1d16386584eaebcd626
7,269
def describe(r): """Return a dictionary with various statistics computed on r: mean, variance, skew, kurtosis, entropy, median. """ stats = {} stats['mean'] = r.mean() stats['variance'] = r.var() stats['skew'] = skew(r) stats['kurtosis'] = kurtosis(r) stats['median'] = np.median(r) stats['entropy'] = entropy(r) stats['mode'] = mode(r) return stats
7ed070110ac327ef69cf1c05eefdda16c21f7f0d
7,270
def value_iteration(env,maxiter): """ Just like policy_iteration, this employs a similar approach. Steps (to iterate over): 1) Find your optimum state_value_function, V(s). 2) Keep iterating until convergence 3) Calculate your optimized policy Outputs: - Your final state_value_function, V(s) - Optimal policy 'pi' - Average reward vector (see note below) - List of all value functions for all iterations NOTE: In order to produce the graph showing average reward over each iteration, the policy was calculated at each iteration. This is not normally done for Value Iteration. This will slow down the computation time for Value iteration. To return to traditional value iteration, comment out the respective lines and remove the appropriate output """ # intialize the state-Value function V = np.zeros(nS) V_hm = np.copy(V) V_hm.resize((1,V_hm.size)) V_hm = V_hm.tolist() # intialize a random policy. Comment out for traditional Value_Iteration policy = np.random.randint(0, 4, nS) avg_r_VI_mat = [] n_episode = 100 # Iterate over your optimized function, breaking if not changing or difference < tolerance. for i in range(maxiter): prev_V = np.copy(V) # evaluate given policy difference, V = Optimum_V(env, prev_V, maxiter, gamma) # improve policy. Comment out to return to traditional Value Iteration policy = policy_update(env, policy, V, gamma) #Play episodes based on the current policy. Comment out to return to traditional Value Iteration wins_VI, total_reward_VI, avg_reward_VI = play_episodes(env, n_episode, policy, random = False) avg_r_VI_mat.append(avg_reward_VI) # save value function to list for animation V_tmp = np.copy(V) V_tmp = V_tmp.tolist() V_hm.append(V_tmp) # if State Value function has not changed over 10 iterations, it has converged. if i % 10 == 0: # if values of 'V' not changing after one iteration if (np.all(np.isclose(V, prev_V))): print("") print('No Changes for 10 iterations. Value converged at iteration %d' %(i+1)) break elif difference < tol: print('Tolerance reached. Value converged at iteration %d' %(i+1)) break # Initialize Optimal Policy optimal_policy = np.zeros(nS, dtype = 'int8') # Update your optimal policy based on optimal value function 'V' optimal_policy = policy_update(env, optimal_policy, V, gamma) return V, optimal_policy, avg_r_VI_mat, V_hm
c116d0408d6edfa82763febb030633b815d69812
7,271
def is_excluded(branch_name): """ We may want to explicitly exclude some BRANCHES from the list of BRANCHES to be merged, check if the branch name supplied is excluded if yes then do not perform merging into it. Args: branch_name: The branch to check if to be incorporated in branching or not. Retruns: True if branch should be excluded, in this case no merges will be performed into this branch, otherwise False. """ return branch_name in BRANCHES_TO_EXCLUDE
d38923a84e7a3f9a40ebd101de5c542156fff7aa
7,272
def schedule_contrib_conv2d_winograd_without_weight_transform(attrs, outs, target): """Schedule definition of conv2d_winograd_without_weight_transform""" with target: return topi.generic.schedule_conv2d_winograd_without_weight_transform(outs)
e186a727ccf69c3292d8807abd003485308754db
7,273
def _phi(r: FloatTensorLike, order: int) -> FloatTensorLike: """Coordinate-wise nonlinearity used to define the order of the interpolation. See https://en.wikipedia.org/wiki/Polyharmonic_spline for the definition. Args: r: input op. order: interpolation order. Returns: `phi_k` evaluated coordinate-wise on `r`, for `k = r`. """ # using EPSILON prevents log(0), sqrt0), etc. # sqrt(0) is well-defined, but its gradient is not with tf.name_scope("phi"): if order == 1: r = tf.maximum(r, EPSILON) r = tf.sqrt(r) return r elif order == 2: return 0.5 * r * tf.math.log(tf.maximum(r, EPSILON)) elif order == 4: return 0.5 * tf.square(r) * tf.math.log(tf.maximum(r, EPSILON)) elif order % 2 == 0: r = tf.maximum(r, EPSILON) return 0.5 * tf.pow(r, 0.5 * order) * tf.math.log(r) else: r = tf.maximum(r, EPSILON) return tf.pow(r, 0.5 * order)
80a41c99a4ef8b396d16b02a6217eaa9191105f6
7,275
def linbin(n, nbin=None, nmin=None): """Given a number of points to bin and the number of approximately equal-sized bins to generate, returns [nbin_out,{from,to}]. nbin_out may be smaller than nbin. The nmin argument specifies the minimum number of points per bin, but it is not implemented yet. nbin defaults to the square root of n if not specified.""" if not nbin: nbin = int(np.round(n**0.5)) tmp = np.arange(nbin+1)*n//nbin return np.vstack((tmp[:-1],tmp[1:])).T
2d537131ad8d13e32375b74b9fa3e77088d046dd
7,276
from bs4 import BeautifulSoup def get_soup(url): """Gets the soup of the given URL. :param url: (str) URL the get the soup from. :return: Soup of given URL. """ header = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.134 Safari/537.36'} return BeautifulSoup(urllib_req.urlopen(urllib_req.Request(url, headers=header)), 'html.parser')
69982a75a5c329b0d9e0ca7638c0ffffdc3ac21e
7,277
def msd_id_to_dirs(msd_id): """Given an MSD ID, generate the path prefix. e.g. TRABCD12345678 -> A/B/C/TRABCD12345678""" return op.join(msd_id[2], msd_id[3], msd_id[4], msd_id)
e210ae919de4fc8b037a7e8d7aabb6858b6e07f9
7,278
def get_data_path(sub_path): """Returns path to file in data folder.""" return join(_data_folder_path, sub_path)
847b59cfea7f4d42b65f166230c032e71bb92ecd
7,279
import io def read_avro_bytes(URL, open_with, start_byte, length, header, nrows=None): """Pass a specific file/bytechunk and convert to dataframe with cyavro Both a python dict version of the header, and the original bytes that define it, are required. The bytes are prepended to the data, so that the C avro reader can interpret them. """ with open_with(URL, 'rb') as f: f.seek(start_byte) if start_byte == 0: header = read_header(f) f.seek(header['header_size']) data = header['head_bytes'] + f.read(length) if nrows is None: b = io.BytesIO(data) header['blocks'] = [] scan_blocks(b, header, len(data)) nrows = sum(b['nrows'] for b in header['blocks']) f = cyavro.AvroReader() f.init_bytes(data) df, arrs = empty(header['dtypes'].values(), nrows, cols=header['dtypes']) f.init_reader() f.init_buffers(10000) for i in range(0, nrows, 10000): d = f.read_chunk() for c in d: s = [f for f in header['schema']['fields'] if f['name'] == c][0] if 'logicalType' in s: df[c].values[i:i + 10000] = time_convert(d[c], s) else: df[c].values[i:i + 10000] = d[c] return df
9542eb13c1247de35f00a1fa370aba721f8657cd
7,280
def get_launches(method="", **query): """Gets launches based on query strings Gets launches based on query strings from the API Parameters ---------- method : str (optional) the method used for the request query : keyword args keyword args based on the API query strings Returns ------- list a list of the launches """ return _get("launches", method, query)
ca162affdd7aef187985e0d2c75e153ad75db162
7,281
def state(obj): """Gets the UnitOfWork state of a mapped object""" return obj.__ming__.state
1072265fe175ffcd581d14af5d4ee85f2941a5e4
7,282
def save_file_in_path(file_path, content): """Write the content in a file """ try: with open(file_path, 'w', encoding="utf-8") as f: f.write(content) except Exception as err: print(err) return None return file_path
7b1e453a9b2a8c1211e111a6e8db432811d84a7a
7,283
import json from datetime import datetime def export_entity_for_model_and_options(request): """ Export entity list in a list of 'format' type. @note EntityModelClass.export_list() must return a list of results. User of the request is used to check for permissions. """ limit = int_arg(request.GET.get('limit', 100000)) app_label = request.GET['app_label'] validictory.validate(app_label, Entity.NAME_VALIDATOR) model = request.GET['model'] validictory.validate(model, Entity.NAME_VALIDATOR) columns = request.GET.getlist('columns[]', ['id']) validictory.validate(model, COLUMNS_VALIDATOR) file_format = request.GET['format'] validictory.validate(model, {"type": "string"}) content_type = ContentType.objects.get_by_natural_key(app_label, model) entity_model = content_type.model_class() sort_by = json.loads(request.GET.get('sort_by', '[]')) if not len(sort_by) or sort_by[-1] not in ('id', '+id', '-id'): order_by = sort_by + ['id'] else: order_by = sort_by if request.GET.get('search'): search = json.loads(request.GET['search']) else: search = None if request.GET.get('filters'): filters = json.loads(request.GET['filters']) else: filters = None export_list = getattr(entity_model, 'export_list') if export_list and callable(export_list): cursor = None columns, items = export_list(columns, cursor, search, filters, order_by, limit, request.user) else: # nothing to export columns, items = [], [] exporter = DataExporter(columns, items) if file_format == 'csv': data = exporter.export_data_as_csv() elif file_format == 'xlsx': data = exporter.export_data_as_xslx() else: raise SuspiciousOperation("Invalid format") timestamp = datetime.datetime.now().strftime("%Y-%m-%d %H-%M-%S") file_name = "%s-%s-%s" % (app_label, model, timestamp) + exporter.file_ext response = StreamingHttpResponse(data, content_type=exporter.mime_type) response['Content-Disposition'] = 'attachment; filename="' + file_name + '"' response['Content-Length'] = exporter.size return response
5539d3fe66dd3163044acf7073e40e55cc1c3b5c
7,284
import torch def jaccard_loss(true, logits, eps=1e-7): """Computes the Jaccard loss, a.k.a the IoU loss. Note that PyTorch optimizers minimize a loss. In this case, we would like to maximize the jaccard loss so we return the negated jaccard loss. Args: true: a tensor of shape [B, H, W] or [B, 1, H, W]. logits: a tensor of shape [B, C, H, W]. Corresponds to the raw output or logits of the model. eps: added to the denominator for numerical stability. Returns: jacc_loss: the Jaccard loss. """ num_classes = logits.shape[1] if num_classes == 1: true_1_hot = torch.eye(num_classes + 1)[true.squeeze(1)] true_1_hot = true_1_hot.permute(0, 3, 1, 2).float() true_1_hot_f = true_1_hot[:, 0:1, :, :] true_1_hot_s = true_1_hot[:, 1:2, :, :] true_1_hot = torch.cat([true_1_hot_s, true_1_hot_f], dim=1) pos_prob = torch.sigmoid(logits) neg_prob = 1 - pos_prob probas = torch.cat([pos_prob, neg_prob], dim=1) else: true_1_hot = torch.eye(num_classes)[true.squeeze(1)] true_1_hot = true_1_hot.permute(0, 3, 1, 2).float() probas = F.softmax(logits, dim=1) true_1_hot = true_1_hot.type(logits.type()) dims = (0,) + tuple(range(2, true.ndimension())) intersection = torch.sum(probas * true_1_hot, dims) cardinality = torch.sum(probas + true_1_hot, dims) union = cardinality - intersection jacc_loss = (intersection / (union + eps)).mean() return (1 - jacc_loss)
ae6c8f94662f48be81abf60c8a8fcd88f7ff7d81
7,286
def _get_distribution_schema(): """ get the schema for distribution type """ return schemas.load(_DISTRIBUTION_KEY)
32af1d9547d978a8a57a799ba74723f21e05c756
7,287
def compute_transforms(rmf_coordinates, mir_coordinates, node=None): """Get transforms between RMF and MIR coordinates.""" transforms = { 'rmf_to_mir': nudged.estimate(rmf_coordinates, mir_coordinates), 'mir_to_rmf': nudged.estimate(mir_coordinates, rmf_coordinates) } if node: mse = nudged.estimate_error(transforms['rmf_to_mir'], rmf_coordinates, mir_coordinates) node.get_logger().info(f"Transformation estimate error: {mse}") return transforms
3190a94cc406bb1199df3480202df4a3258912f9
7,288
def merge_dicts(*list_of_dicts): """Merge a list of dictionaries and combine common keys into a list of values. args: list_of_dicts: a list of dictionaries. values within the dicts must be lists dict = {key: [values]} """ output = {} for dikt in list_of_dicts: for k, v in dikt.items(): if not output.get(k): output[k] = v else: output[k].extend(v) output[k] = list(set(output[k])) return output
3d629bb9bc6af2a637a622fea158447b24c00bd0
7,289
def highpass_filter(src, size): """ highpass_filter(src, size) ハイパスフィルター 引数 ---------- src : AfmImg形式の画像 size : 整数 フィルターのサイズ 戻り値 ------- dst : AfmImg形式の画像 フィルターがかかった画像 """ def highpass(dft_img_src, *args): dft_img = dft_img_src.copy() #マスク作成 mask = __make_filter(dft_img.shape, args[0], True) #マスキング dft_img = dft_img.real*mask + dft_img.imag*mask * 1j return dft_img dst = __dft_filter(src, highpass, size) return dst
7945e3556cdd2eb4fd1e2303cbba00052a4a5900
7,290
import re import socket def parse_target(target): """ 解析目标为ip格式 :param str target: 待解析的目标 :return tuple scan_ip: 解析后的ip和域名 """ scan_ip = '' domain_result = '' main_domain = '' try: url_result = re.findall('https?://(?:[-\w.]|(?:%[\da-fA-F]{2}))+', target) if url_result == []: ip_result = re.findall(r"\b(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\b", target) if ip_result == []: result = tldextract.extract(target) main_domain = result.domain + '.' + result.suffix domain_regex = re.compile(r'(?:[A-Z0-9_](?:[A-Z0-9-_]{0,247}[A-Z0-9])?\.)+(?:[A-Z]{2,6}|[A-Z0-9-]{2,}(?<!-))\Z', re.IGNORECASE) domain_result = domain_regex.findall(target) if domain_result: scan_ip = socket.gethostbyname(domain_result[0]) else: net = IP(target) #print(net.len()) scan_ip = net else: scan_ip = ip_result[0] else: url_parse = urlparse(target) result = tldextract.extract(target) main_domain = result.domain + '.' + result.suffix domain_regex = re.compile(r'(?:[A-Z0-9_](?:[A-Z0-9-_]{0,247}[A-Z0-9])?\.)+(?:[A-Z]{2,6}|[A-Z0-9-]{2,}(?<!-))\Z', re.IGNORECASE) domain_result = domain_regex.findall(url_parse.netloc) scan_ip = socket.gethostbyname(url_parse.hostname) except Exception as e: print(e) finally: pass if domain_result: domain_result = domain_result[0] return scan_ip, main_domain, domain_result
292d90eebefb8da5289b20914dfbcd9c294ee5b7
7,291
from typing import Type def isWrappedScalarType(typ: Type) -> bool: """ Given a type, determine if it is a c10::scalar which we will wrap in a lazy Value. Since we literally change the type from scalarT to valueT, information is lost. This function helps build a list of wrapped scalars to save that information """ if isinstance(typ, BaseType): # I am regretting my naming conventions, but now we are wrapping at::scalar in # lazy value, while preserving other 'scalar' types as scalars in the IR return typ.name == BaseTy.Scalar elif isinstance(typ, (OptionalType, ListType)): return isWrappedScalarType(typ.elem) return False
0d854c734ddf3441dd2524c56d1d84d85fc7ac22
7,292
def assign_topic(data, doc_topic_distr): """ Assigns dominant topic to documents of corpus. :param data: DF of preprocessed and filtered text data :type data: pd.DataFrame :param doc_topic_distr: Array of topic distribution per doc of corpus :type doc_topic_distr: np.array :return: DF incl assigned topics :rtype: pd.DataFrame """ data["topic_distribution"] = doc_topic_distr.tolist() data["topic"] = np.argmax(doc_topic_distr, axis=1) + 1 return data
d53661831d9ee1431f989b290396be3ebde0582a
7,293
def _enable_scan_single_bytecode(code, name): """ Part of the ``_enable_scan`` that applies the scan behavior on a single given list/set comprehension or generator expression code. """ bc = bytecode.Bytecode.from_code(code) Instr = bytecode.Instr # Updates LOAD_GLOBAL to LOAD_FAST when arg is name for instr in bc: if isinstance(instr, Instr) \ and instr.name == "LOAD_GLOBAL" and instr.arg == name: instr.set("LOAD_FAST", name) # Some needed information from the first/main FOR_ITER and the heading # "filter" part of the generator expression or list/set comprehension for_idx = next(idx for idx, instr in enumerate(bc) if getattr(instr, "name", None) == "FOR_ITER") for_instr = bc[for_idx] begin_label_idx = for_idx - 1 try: filter_last_idx = last(idx for idx, instr in enumerate(bc) if isinstance(instr, Instr) and instr.is_cond_jump() and instr.arg == begin_label_idx) except StopIteration: filter_last_idx = for_idx # Adds the block before the loop (i.e., first label) to append/add/yield # the first input directly from FOR_ITER and save the first "prev" # accordingly heading_instructions = [("DUP_TOP",), ("STORE_FAST", name)] + { "<listcomp>": [("LIST_APPEND", 2)], "<setcomp>": [("SET_ADD", 2)], "<genexpr>": [("YIELD_VALUE",), ("POP_TOP",)] }[bc.name] bc[begin_label_idx:begin_label_idx] = ( [instr.copy() for instr in bc[for_idx:filter_last_idx + 1]] + [Instr(*args) for args in heading_instructions] ) # Adds ending block that stores the result to prev before a new iteration loop_instructions = ["SET_ADD", "LIST_APPEND", "YIELD_VALUE"] ending_idx = next(-idx for idx, instr in enumerate(reversed(bc), 1) if isinstance(instr, Instr) and instr.name in loop_instructions) ending_instructions = [("DUP_TOP",), ("STORE_FAST", name)] bc[ending_idx:ending_idx] = \ [Instr(*args) for args in ending_instructions] return bc.to_code()
d1bd12f50961869e09d4cbdc121e01abbe34232a
7,294
def composite_rotation(r, p1=qt.QH([1, 0, 0, 0]), p2=qt.QH([1, 0, 0, 0])): """A composite function of next_rotation.""" return next_rotation(next_rotation(r, p1), p2)
335363a18efb36d28c87b0266bd4dcdd27b6b85a
7,295
def extract_vectors_ped_feature(residues, conformations, key=None, peds=None, features=None, indexes=False, index_slices=False): """ This function allows you to extract information of the ped features from the data structure. In particular allows: - all rows or a specific subset of them, containing a certain feature (i.e., RD, EN, MED_ASA, etc ...) - the interval extremes for a certain features (i.e., RD, EN, MED_ASA, etc ...) - all the feature intervals as slices :param residues: number of residues in the model :param conformations: maximum number of conformations available :param key: the key of the feature or None if considering all of them, default: False :param peds: the ped id or None if considering all of them, default: False :param features: matrix of features or None if extracting only the indexes, default: False :param indexes: return (begin, end) indexes of a feature if it's True, default: False :param index_slices: return all the intervals of the features if it's True, default: False :return: begin/end, slices or features """ begin = end = -1 residues = int(residues) conformations = int(conformations) slices = [] if key == 'PED_ID' or index_slices: begin = 0 end = 1 slices.append(slice(begin, end)) if key == 'RD' or index_slices: begin = 1 end = conformations + 1 slices.append(slice(begin, end)) if key == 'EN' or index_slices: begin = conformations + 1 end = conformations + residues + 1 slices.append(slice(begin, end)) if key == 'MED_ASA' or index_slices: begin = conformations + residues + 1 end = conformations + 2 * residues + 1 slices.append(slice(begin, end)) if key == 'MED_RMSD' or index_slices: begin = conformations + 2 * residues + 1 end = conformations + 3 * residues + 1 slices.append(slice(begin, end)) if key == 'MED_DIST' or index_slices: begin = conformations + 3 * residues + 1 end = int(conformations + 3 * residues + 1 + residues * (residues - 1) / 2) slices.append(slice(begin, end)) if key == 'STD_DIST' or index_slices: begin = int(conformations + 3 * residues + 1 + residues * (residues - 1) / 2) end = None slices.append(slice(begin, end)) begin = int(begin) if end is not None: end = int(end) if begin == -1: return None if index_slices: return slices if indexes is True or features is None: return begin, end if peds is None: return features[:, begin:end] else: if isinstance(peds, int): return np.array(features[peds][begin:end]) else: return features[peds, begin:end]
3d0efb833ffd80303e2494d017c12e1d06d10bcc
7,296
def Load_File(filename): """ Loads a data file """ with open(filename) as file: data = file.readlines() return data
f49aa4474d9af0b8a778b9575e282eb579c103ab
7,297
from scipy.ndimage import morphology import numpy def massage_isig_and_dim(isig, im, flag, band, nm, nu, fac=None): """Construct a WISE inverse sigma image and add saturation to flag. unWISE provides nice inverse variance maps. These however have no contribution from Poisson noise from sources, and so underestimate the uncertainties dramatically in bright regions. This can pull the whole fit awry in bright areas, since the sky model means that every pixel feels every other pixel. It's not clear what the best solution is. We make a goofy inverse sigma image from the original image and the inverse variance image. It is intended to be sqrt(ivar) for the low count regime and grow like sqrt(1/im) for the high count regime. The constant of proportionality should in principle be worked out; here I set it to 0.15, which worked once, and it doesn't seem like this should depend much on which WISE exposure the image came from? It's ultimately something like the gain or zero point... """ if fac is None: bandfacs = {1: 0.15, 2: 0.3} bandfloors = {1: 0.5, 2: 2} fac = bandfacs[band] floor = bandfloors[band] satbit = 16 if band == 1 else 32 satlimit = 85000 # if band == 1 else 130000 msat = ((flag & satbit) != 0) | (im > satlimit) | ((nm == 0) & (nu > 1)) # dilate = morphology.iterate_structure( # morphology.generate_binary_structure(2, 1), 3) xx, yy = numpy.mgrid[-3:3+1, -3:3+1] dilate = xx**2+yy**2 <= 3**2 msat = morphology.binary_dilation(msat, dilate) isig[msat] = 0 flag = flag.astype('i8') # zero out these bits; we claim them for our own purposes. massagebits = (extrabits['crowdsat'] | crowdsource.nodeblend_maskbit | crowdsource.sharp_maskbit | extrabits['nebulosity']) flag &= ~massagebits flag[msat] |= extrabits['crowdsat'] flag[(flag & nodeblend_bits) != 0] |= crowdsource.nodeblend_maskbit flag[(flag & sharp_bits) != 0] |= crowdsource.sharp_maskbit sigma = numpy.sqrt(1./(isig + (isig == 0))**2 + floor**2 + fac**2*numpy.clip(im, 0, numpy.inf)) sigma[msat] = numpy.inf sigma[isig == 0] = numpy.inf return (1./sigma).astype('f4'), flag
b0bf70ddfff3a6b0a48005b9e1069c5c5f670dac
7,298
def uncapitalize(string: str): """De-capitalize first character of string E.g. 'How is Michael doing?' -> 'how is Michael doing?' """ if len(string): return string[0].lower() + string[1:] return ""
1a294f171d16d7a4c41fb0546feca3c03b7ae37a
7,300
def _sc_weights_trad(M, M_c, V, N, N0, custom_donor_pool, best_w_pen, verbose=0): """ Traditional matrix solving. Requires making NxN0 matrices. """ #Potentially could be decomposed to not build NxN0 matrix, but the RidgeSolution works fine for that. sc_weights = np.full((N,N0), 0.) weight_log_inc = max(int(N/100), 1) for i in range(N): if ((i % weight_log_inc) == 0 and verbose>0): print_progress(i+1, N) if verbose > 1: print_memory_snapshot(extra_str="Loop " + str(i)) allowed = custom_donor_pool[i,:] sc_weights[i,allowed] = _weights(V, M[i,:], M_c[allowed,:], best_w_pen) if ((N-1) % weight_log_inc) != 0 and verbose > 0: print_progress(N, N) return sc_weights
00b9ae93b52453281693660fa6de685cd784127e
7,302
from typing import Callable from typing import Awaitable def generate_on_message( test_client: "DiscordTestClient", broker_id: int ) -> Callable[[discord.Message], Awaitable[None]]: """ Whenever a message comes in, we want our test client to: 1. Filter the message so we are only getting the ones we want. 2. Store received messages so we can inspect them during tests. 3. Wait to receive a certain number of messages, then set an event communicating that the expected number of messages has been received and we can continue. """ async def on_message(message: discord.Message) -> None: # Toss out any messages not on our expected channels, otherwise we may receive # messages from other devs running tests concurrently if message.channel.id not in test_client.channel_id_whitelist: return # Print the message for our test logs. We're only going to use the primary # client to print so we don't double-print each message. if test_client.is_primary: print( f"message received" f"\nfrom: {test_client.user.display_name}" f"\nby: {message.author.display_name}" f"\nchannel: {message.channel.name}" f"\n{message.content}\n\n" ) if message.author.id != broker_id: return test_client.messages_received.append(message) if test_client.test_expected_count_received == 0: raise IOError("Received an unexpected message") if ( len(test_client.messages_received) >= test_client.test_expected_count_received and not test_client.event_messages_received.is_set() ): test_client.event_messages_received.set() return on_message
2de510a3195f60056ad13c8fb1b9c71fe480b5ab
7,304
import unittest def test_suite(): """ Construct a TestSuite instance for all test cases. """ suite = unittest.TestSuite() for dt, format, expectation in TEST_CASES: suite.addTest(create_testcase(dt, format, expectation)) return suite
791e6942d213c53a44f433495e13a76abfc1f936
7,305
def calcScipionScore(modes): """Calculate the score from hybrid electron microscopy normal mode analysis (HEMNMA) [CS14]_ as implemented in the Scipion continuousflex plugin [MH20]_. This score prioritises modes as a function of mode number and collectivity order. .. [CS14] Sorzano COS, de la Rosa-Trevín JM, Tama F, Jonić S. Hybrid Electron Microscopy Normal Mode Analysis graphical interface and protocol. *J Struct Biol* **2014** 188:134-41. .. [MH20] Harastani M, Sorzano COS, Jonić S. Hybrid Electron Microscopy Normal Mode Analysis with Scipion. *Protein Sci* **2020** 29:223-236. :arg modes: mode(s) or vector(s) :type modes: :class:`.Mode`, :class:`.Vector`, :class:`.ModeSet`, :class:`.NMA` """ n_modes = modes.numModes() if n_modes > 1: collectivityList = list(calcCollectivity(modes)) else: collectivityList = [calcCollectivity(modes)] idxSorted = [i[0] for i in sorted(enumerate(collectivityList), key=lambda x: x[1], reverse=True)] score = np.zeros(n_modes) modeNum = list(range(n_modes)) for i in range(n_modes): score[idxSorted[i]] = idxSorted[i] + modeNum[i] + 2 score = score / (2.0 * n_modes) return score
e1c786bb90d6a7bc0367338f5e5bd6d10ec35366
7,306
def google_base(request): """ view for Google Base Product feed template; returns XML response """ products = Product.active.all() template = get_template("marketing/google_base.xml") xml = template.render(Context(locals())) return HttpResponse(xml, mimetype="text/xml")
a850e4c16f55486c872d0d581a25802d7de3c56e
7,307
def get_agivenn_df(run_list, run_list_sep, **kwargs): """DF of mean amplitudes conditiontioned on differnet n values.""" n_simulate = kwargs.pop('n_simulate') adfam_t = kwargs.pop('adfam_t', None) adaptive = kwargs.pop('adaptive') n_list = kwargs.pop('n_list', [1, 2, 3]) comb_vals, comb_val_resamps, sep_vals, sep_val_resamps = ( comb_sep_eval_resamp( run_list, run_list_sep, get_a_n_mean_given_n, n_simulate, adaptive=adaptive, n_list=n_list, adfam_t=adfam_t)) col_names = [r'$\mathrm{{E}}[a_{}|N={}]$'.format(n, n) for n in n_list] return get_sep_comb_df( comb_vals, comb_val_resamps, sep_vals, sep_val_resamps, col_names)
fe8fa42a3bc2e78ec1d5c5a7d47151d56789f5a5
7,308
def friendship_request_list_rejected(request, template_name='friendship/friend/requests_list.html'): """ View rejected friendship requests """ # friendship_requests = Friend.objects.rejected_requests(request.user) friendship_requests = FriendshipRequest.objects.filter(rejected__isnull=True) return render(request, template_name, {'requests': friendship_requests})
2457de6e01bd4fee96d499a481a3c5a2cd0d1782
7,309
def cycle_ctgo(object_type, related_type, related_ids): """ indirect relationships between Cycles and Objects mapped to CycleTask """ if object_type == "Cycle": join_by_source_id = db.session.query(CycleTask.cycle_id) \ .join(Relationship, CycleTask.id == Relationship.source_id) \ .filter( Relationship.source_type == "CycleTaskGroupObjectTask", Relationship.destination_type == related_type, Relationship.destination_id.in_(related_ids)) join_by_destination_id = db.session.query(CycleTask.cycle_id) \ .join(Relationship, CycleTask.id == Relationship.destination_id) \ .filter( Relationship.destination_type == "CycleTaskGroupObjectTask", Relationship.source_type == related_type, Relationship.source_id.in_(related_ids)) return join_by_source_id.union(join_by_destination_id) else: join_by_source_id = db.session.query(Relationship.destination_id) \ .join(CycleTask, CycleTask.id == Relationship.source_id) \ .filter( CycleTask.cycle_id.in_(related_ids), Relationship.source_type == "CycleTaskGroupObjectTask", Relationship.destination_type == object_type) join_by_destination_id = db.session.query(Relationship.source_id) \ .join(CycleTask, CycleTask.id == Relationship.destination_id) \ .filter( CycleTask.cycle_id.in_(related_ids), Relationship.destination_type == "CycleTaskGroupObjectTask", Relationship.source_type == object_type) return join_by_source_id.union(join_by_destination_id)
25de449672ef9ced358a53762156f3cbeaabd432
7,310
def Min(axis=-1, keepdims=False): """Returns a layer that applies min along one tensor axis. Args: axis: Axis along which values are grouped for computing minimum. keepdims: If `True`, keep the resulting size 1 axis as a separate tensor axis; else, remove that axis. """ return Fn('Min', lambda x: jnp.min(x, axis, keepdims=keepdims))
09c83217b48f16782530c1954f3e4f0127c06e69
7,311
def sampling(args): """Reparameterization trick by sampling fr an isotropic unit Gaussian. # Arguments args (tensor): mean and log of variance of Q(z|X) # Returns z (tensor): sampled latent vector """ z_mean, z_log_var = args batch = K.shape(z_mean)[0] dim = K.int_shape(z_mean)[1] # by default, random_normal has mean=0 and std=1.0 epsilon = K.random_normal(shape=(batch, dim)) return z_mean + K.exp(0.5 * z_log_var) * epsilon
06e8ca16f1139e12242ee043aa680d5ce7c43c10
7,312
def get_expression_arg_names(expression, strip_dots=True): """ Parse expression and return set of all argument names. For arguments with attribute-like syntax (e.g. materials), if `strip_dots` is True, only base argument names are returned. """ args = ','.join(aux.args for aux in parse_definition(expression)) args = [arg.strip() for arg in args.split(',')] if strip_dots: for ii, arg in enumerate(args[:]): aux = arg.split('.') if len(aux) == 2: args[ii] = aux[0] return set(args)
6f96395af45b008e5e8b0336c320813a760add49
7,313
from pathlib import Path def ORDER_CTIME(path: Path) -> int: """パスのソート用関数です。作成日時でソートを行います。 """ return path.stat().st_ctime_ns
435571222b26e0c83904305784d6c8868b5bf497
7,314
def format_location(data, year): """ Format any spatial data. Does nothing yet. Parameters ---------- data : pd.DataFrame Data before location formatting. Returns ------- data : pd.DataFrame Data with location formatting. """ # No spatial data yet so does nothing. # data["MSOA"] = "no_location" # data["location"] = "no_location" data["region"] = data["region"].astype(str).map(region_dict) return data
11d5d7b88f3143b38dc57248937b5e19e22e44c8
7,315
def create_app(service: Service): """Start a small webserver with the Service.""" app = FastAPI() @app.post("/query") def query(params: Params): """The main query endpoint.""" return service.query(**params.query, n_neighbors=params.n_neighbors) return app
3d2f01960d3def11f45bbbbf653511d6f5362881
7,317
def establecer_dominio(func_dist: Expr) -> dict: """Establece el dominio a partir de una FD. Parameters ---------- func_dist Distribución de probabilidad Returns ------- dict Dominio """ equations = func_dist.atoms(Eq) orders = func_dist.atoms(Rel) - equations dom = {var: EmptySet for var in func_dist.atoms(Symbol)} for order in orders: if len(order.atoms(Symbol)) > 1: continue var, = order.atoms(Symbol) val = solveset(order, var, Integers) dom[var] = dom[var] & val if dom[var] else val for equation in equations: var, = equation.atoms(Symbol) val = solveset(equation, var) dom[var] = dom[var] | val return dom
8ab1b4bc6518cb8baa300bd9f1d38ffff3dfbcf7
7,318
def random_init(n, max_norm): """Computes a random initial configuration of n 2D-vectors such that they all are inside of a circle of radius max_norm Parameters ---------- n : int Number of vectors max_norm : float or int Radius of the circle or maximum possible distance from the origin of coordinates that the vectors can have. Returns ------- numpy.ndarray (n, 2) matrix of vectors """ X = np.zeros((n, 2)) angles = np.random.rand(n) * 2 * np.pi norms = np.random.rand(n) * max_norm for i, angle, norm in zip(range(n), angles, norms): X[i] = np.array([np.cos(angle), np.sin(angle)]) * norm return X
5533e43572c47d8c8cd2d6765bb383382987015b
7,319
from typing import Dict from typing import Tuple def calc_cells(serial: int) -> Dict[Tuple[int, int], int]: """Calculate the power for all cells and store them in a dict to retrieve them faster later """ r = {} for i in range(300): for j in range(300): r.update({(i, j): calc_power((i, j), serial)}) return r
70684827b5c3ef1ec31d419f3012356a9bde1e6c
7,320
from typing import Union import pathlib from typing import List import glob from pathlib import Path def child_files_recursive(root: Union[str, pathlib.Path], ext: str) -> List[str]: """ Get all files with a specific extension nested under a root directory. Parameters ---------- root : pathlib.Path or str root directory ext : str file extension Returns ------- List[str] """ if not is_string_like(root) and not isinstance(root, pathlib.Path): raise TypeError(f'filetype is not string-like: {type(root)}') return list(glob.iglob(str(Path(root).joinpath('**/*' + ext)), recursive=True))
c16288b417d36d6d414c799c78fd59df976ca400
7,322
def ensure_dict(value): """Convert None to empty dict.""" if value is None: return {} return value
191b1a469e66750171648e715501690b2814b8b2
7,323
import random def mutSet(individual): """Mutation that pops or add an element.""" if random.random() < 0.5: if len(individual) > 0: # We cannot pop from an empty set individual.remove(random.choice(sorted(tuple(individual)))) else: individual.add(random.randrange(param.NBR_ITEMS)) return individual,
f9919da7f6612e3f317dbe854eda05a71d106632
7,324
def validate_tweet(tweet: str) -> bool: """It validates a tweet. Args: tweet (str): The text to tweet. Raises: ValueError: Raises if tweet length is more than 280 unicode characters. Returns: bool: True if validation holds. """ str_len = ((tweet).join(tweet)).count(tweet) + 1 if str_len > 280: raise ValueError(f"tweet is more than 280 unicode characters\n {tweet}") else: return True
41c7ef1967cba5bb75ea8bce7ffa9b7d636ef80e
7,325
def train_and_eval(model, model_dir, train_input_fn, eval_input_fn, steps_per_epoch, epochs, eval_steps): """Train and evaluate.""" train_dataset = train_input_fn() eval_dataset = eval_input_fn() callbacks = get_callbacks(model, model_dir) history = model.fit( x=train_dataset, validation_data=eval_dataset, steps_per_epoch=steps_per_epoch, epochs=epochs, validation_steps=eval_steps, callbacks=callbacks) tf.get_logger().info(history) return model
54a30f82ab3da4b60534a2775c2217de057ba93c
7,326
import requests import html def open_website(url): """ Open website and return a class ready to work on """ headers = { 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36' ' (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36' } page = requests.get(url, headers=headers) source = html.fromstring(page.content) return source
081ed99692ff9763cb19208fdc7f3e7e08e03e8d
7,327