text
stringlengths
15
7.82k
ids
sequencelengths
1
7
def METHOD_NAME(): pass
[ 615 ]
def METHOD_NAME(self) -> str: """ Unique id for identifying a data set resource """ return pulumi.get(self, "data_set_id")
[ 365, 0, 147 ]
def METHOD_NAME(verbose=True, *args, **kwargs): # test_herzberg_coefficients_CO2_json(verbose=verbose, *args, **kwargs) test_getMolecule(verbose=verbose, *args, **kwargs) test_ZPE(verbose=verbose, *args, **kwargs) # test_CO_energies_Herzberg_vs_Dunham(verbose=verbose, *args, **kwargs) return True
[ 22, 11946 ]
def METHOD_NAME(self): """ Test that the endpoint returns 200 when retrieving organisation campaigns """ response = self.client.get(self.endpoint_url) response_body = response.get_json() self.assertEqual(response.status_code, 200) self.assertEqual( response_body, {"campaigns": [{"id": 1, "name": "Test Campaign"}]} )
[ 9, 19, 4074, 13215, 3570 ]
def METHOD_NAME() -> None: get_tab_manager().add_filetab_callback(on_new_filetab)
[ 102 ]
def METHOD_NAME(): """Can a QUALITY flag that is a astropy quantity object be parsed correctly? This is a regression test for https://github.com/lightkurve/lightkurve/issues/804 """ from astropy.units.quantity import Quantity flags = list(TessQualityFlags.STRINGS.items()) for key, value in flags: assert TessQualityFlags.decode(Quantity(key, dtype="int32"))[0] == value # Can we recover combinations of flags? assert TessQualityFlags.decode( Quantity(flags[5][0], dtype="int32") + Quantity(flags[7][0], dtype="int32") ) == [flags[5][1], flags[7][1]] assert TessQualityFlags.decode( Quantity(flags[3][0], dtype="int32") + Quantity(flags[4][0], dtype="int32") + Quantity(flags[5][0], dtype="int32") ) == [flags[3][1], flags[4][1], flags[5][1]]
[ 9, 4391, 584, 5365, 5864, 279 ]
def METHOD_NAME(properties): if not properties: return None for key, value in properties: if key == "spark.executorEnv.PAASTA_SERVICE": service = value break else: return None if service.startswith(JUPYTER_PREFIX): return service[len(JUPYTER_PREFIX) :] else: return service
[ 1363, 549 ]
def METHOD_NAME(self, cached_ds): assert not cached_ds.exists() cached_ds.save(42) assert cached_ds.exists()
[ 9, 954 ]
def METHOD_NAME(monkeypatch, expect_report, installed_rpms_msg, current_kernel_pkg_index): uname_r = '' # Kernel release is not used to determine the kernel novelty kernel_info = KernelInfo(pkg=installed_rpms_msg.items[current_kernel_pkg_index], uname_r=uname_r) msgs = [installed_rpms_msg, kernel_info] monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(msgs=msgs)) monkeypatch.setattr(reporting, 'create_report', create_report_mocked()) checkinstalledkernels.process() if expect_report: assert reporting.create_report.called assert reporting.create_report.report_fields['title'] == 'Newest installed kernel not in use' else: assert not reporting.create_report.called
[ 9, 4017, 1885 ]
def METHOD_NAME(self, a): """ sqrt(a) found by the Tonelli–Shanks algorithm :param a: IntegersModuloPrimeElement :return: Output -1 if a is not a quadratic residue, otherwise the correct square roots (root, -root) Note root < self.mod / 2 """ if not isinstance(a, IntegersModuloPrimeElement): raise TypeError("Square root only supports an object") if self.is_a_quadratic_residue(a): root_raw = tonelli(a.val, self.mod) root_raw_other = self.mod - root_raw if root_raw < root_raw_other: return IntegersModuloPrimeElement(root_raw), IntegersModuloPrimeElement(root_raw_other) else: return IntegersModuloPrimeElement(root_raw_other), IntegersModuloPrimeElement(root_raw) else: return -1, -1
[ 1118 ]
def METHOD_NAME(cls, info): if info.getter.doc: for remark in info.getter.doc.remarks: if len(remark.runs) == 1 and remark.runs[0].kind == 'text': match = cls._default_re.match(remark.runs[0].content) if match: return match.group(1)
[ 416, 235 ]
def METHOD_NAME(reverse: bool = False) -> Molecule: return VirtualSiteMocking.molecule_from_smiles( "[Cl:1][C:2]([H:3])([H:4])[H:5]", reverse )
[ -1 ]
def METHOD_NAME(panel): blockSensors = [] for block in blocks.getNamedBeanSet(): sensor = block.getSensor() if sensor is not None: blockSensors.append(sensor) deleteList = [] # Prevent concurrent modification icons = panel.getSensorList() for icon in icons: sensor = icon.getSensor() if (sensor) and (sensor in blockSensors) : deleteList.append(icon) for item in deleteList: panel.removeFromContents(item)
[ 188, 6234, 2063 ]
def METHOD_NAME(): with temporary_config("""\ [global] unicodeinkey\xf8 = hi unicodeinvalue = \xf8 [unicodeinsection\xf8] key = bye """): assert get_config("global", "unicodeinkey\xf8") == "hi" assert get_config("global", "unicodeinvalue") == "\xf8" assert get_config("unicodeinsection\xf8", "key") == "bye" assert isinstance(get_config("global", "unicodeinvalue"), str)
[ 9, 774, 623, 200, 171, 192 ]
def METHOD_NAME(self): """Test GOA GAF file iterator.""" # Test GAF 2.0 recs = [] with open("UniProt/goa_yeast.gaf") as handle: for rec in GOA.gafiterator(handle): recs.append(rec) # Check number of records self.assertEqual(len(recs), 587) # Check keys are same as predefined fields self.assertEqual(sorted(recs[0].keys()), sorted(GOA.GAF20FIELDS)) # Check values of first record self.assertEqual(recs[0]["DB"], "UniProtKB") self.assertEqual(recs[0]["DB_Object_ID"], "A0A023PXA5") self.assertEqual(recs[0]["DB_Object_Symbol"], "YAL019W-A") self.assertEqual(recs[0]["Qualifier"], [""]) self.assertEqual(recs[0]["GO_ID"], "GO:0003674") self.assertEqual(recs[0]["DB:Reference"], ["GO_REF:0000015"]) self.assertEqual(recs[0]["Evidence"], "ND") self.assertEqual(recs[0]["With"], [""]) # Test GAF 2.1, it has the same fields as GAF 2.0 recs = [] with open("UniProt/gene_association.goa_yeast.1.gaf") as handle: for rec in GOA.gafiterator(handle): recs.append(rec) # Check number of records self.assertEqual(len(recs), 300) # Check keys are same as predefined fields self.assertEqual(sorted(recs[0].keys()), sorted(GOA.GAF20FIELDS)) # Check values of first record self.assertEqual(recs[0]["DB"], "UniProtKB") self.assertEqual(recs[0]["DB_Object_ID"], "P17536") self.assertEqual(recs[0]["DB_Object_Symbol"], "TPM1") self.assertEqual(recs[0]["Qualifier"], [""]) self.assertEqual(recs[0]["GO_ID"], "GO:0000001") self.assertEqual(recs[0]["DB:Reference"], ["PMID:10652251"]) self.assertEqual(recs[0]["Evidence"], "TAS") self.assertEqual(recs[0]["With"], [""])
[ 9, 15877, 640 ]
def METHOD_NAME(cls) -> common_tests.CommonTestDriver: return common_tests.CommonTestDriver()
[ 19, 9, 1988 ]
def METHOD_NAME(self, tensor): size = tensor.size() if len(size) >= 2: batch_size = size[0] * size[1] expanded_size = ( (batch_size,) + size[2:] if len(size) > 2 else (batch_size,) ) tensor = tensor.view(expanded_size) return tensor
[ 2301, 11414 ]
def METHOD_NAME(self, a: Union[bitarray, int], start: int = ..., stop: int = ...) -> int: ...
[ 724 ]
def METHOD_NAME(self, operation_name, kwarg): if operation_name == "DescribeDBEngineVersions": return { "DBEngineVersions": [ { "Engine": "mysql", "EngineVersion": "8.0.32", "DBEngineDescription": "description", "DBEngineVersionDescription": "description", }, ] } return make_api_call(self, operation_name, kwarg)
[ 248, 93, 58, 128 ]
def METHOD_NAME(seed=0): np.random.seed(seed) cols = list("AB") df = pd.DataFrame(np.random.randint(0, 2, size=(1000, 2)), columns=cols) df["label"] = [0, 1] * int(len(df) / 2) return df, cols
[ 567, 126 ]
def METHOD_NAME() -> Optional[redislib.Redis]: if not configuration.redis_enabled.value: return None instance = redislib.Redis( host=configuration.get_str('redis_host'), port=configuration.get_int('redis_port'), db=configuration.get_int('redis_db'), ) try: instance.ping() except redislib.exceptions.ConnectionError: return None return instance
[ 176 ]
def METHOD_NAME(self, request: HttpRequest, **kwargs: Any) -> HttpResponse: """Runs the network request through the client's chained policies. >>> from azure.core.rest import HttpRequest >>> request = HttpRequest("GET", "https://www.example.org/") <HttpRequest [GET], url: 'https://www.example.org/'> >>> response = client._send_request(request) <HttpResponse: 200 OK> For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request :param request: The network request you want to make. Required. :type request: ~azure.core.rest.HttpRequest :keyword bool stream: Whether the response payload will be streamed. Defaults to False. :return: The response of your network call. Does not do error handling on your response. :rtype: ~azure.core.rest.HttpResponse """ request_copy = deepcopy(request) request_copy.url = self._client.format_url(request_copy.url) return self._client.send_request(request_copy, **kwargs)
[ 353, 377 ]
def METHOD_NAME(self): """test inplace changes of control points""" dim = 3 res = [3] * dim box_data = c.nd_box(dim) n = c.splinepy.NURBS(**box_data) weights = box_data.pop("weights") b = c.splinepy.BSpline(**box_data) box_data.pop("knot_vectors") z = c.splinepy.Bezier(**box_data) r = c.splinepy.RationalBezier(**box_data, weights=weights) for s in (z, r, b, n): # init orig = s.copy() # check if we are good to start assert c.np.allclose(orig.sample(res), s.sample(res)) # modify cps s.control_points /= 2 assert c.np.allclose(orig.sample(res) / 2, s.sample(res))
[ 9, 5920, 194, 401, 182 ]
def METHOD_NAME(self): field = HoneypotField(initial=None) output = field.clean("") self.assertEqual(output, "")
[ 9, 2471, 61, 99, 623, -1, 320 ]
def METHOD_NAME(msg, video_id): """Send email notification when video encoding failed.""" send_email_item(msg, "Video", video_id)
[ 353, 487 ]
def METHOD_NAME(u: bytes) -> int: """Convert from ISO 9660 7.3.3 format to uint32_t. Return the little-endian part always, to handle non-specs-compliant images. """ return u[0] | (u[1] << 8) | (u[2] << 16) | (u[3] << 24)
[ 280, -1 ]
def METHOD_NAME(args): """ List resource contacts for OSG. """ if args.fqdn: results = topology_utils.get_resource_contacts_by_fqdn(args) else: results = topology_utils.get_resource_contacts(args) if args.fqdn: print_contacts(args, 'FQDN', results) else: print_contacts(args, 'Resource', results)
[ 245, 191, 824 ]
def METHOD_NAME(self, album): """Generate the context dict for the given path.""" from . import __url__ as sigal_link self.logger.info("Output album : %r", album) ctx = { "album": album, "index_title": self.index_title, "settings": self.settings, "sigal_link": sigal_link, "theme": { "name": os.path.basename(self.theme), "url": url_from_path(os.path.relpath(self.theme_path, album.dst_path)), }, } if self.settings["user_css"]: ctx["user_css"] = os.path.basename(self.settings["user_css"]) return ctx
[ 567, 198 ]
def METHOD_NAME(self,a): conList = [] for pair1id in range(len(a.map)): conOfPair1 = [] pair1= self.A.map[pair1id] for pair2id in range(len(a.map)): if pair1id is pair2id: continue pair2 = self.A.map[pair2id] pair1S, pair1T,p = pair1 pair2S, pair2T,p = pair2 if pair1S == pair2S or pair1T == pair2T:#has conflict! conOfPair1.append(pair2id) conList.append(conOfPair1) return conList
[ 19, -1 ]
def METHOD_NAME(self, panel_params: panel_view) -> panel_ranges: return panel_ranges( x=self.trans_x.inverse(panel_params.x.range), y=self.trans_y.inverse(panel_params.y.range), )
[ -1, 661 ]
def METHOD_NAME(self, statistics_type, test_params, dataset_samples): pass
[ 9, 1101, 8604, 955, 1685, 3898, 411 ]
def METHOD_NAME(result): """ When constructing the dtype, take extra care to set values to np.Nan / -1 (for ints) as 0 might have a meaning """ for field in result.dtype.names: if np.issubdtype(result.dtype[field], np.integer): result[field][:] = -1 else: result[field][:] = np.nan
[ 0, 4082, 1618 ]
def METHOD_NAME(debug): try: import markdown new_enough = markdown.__version_info__ >= MINIMUM_MARKDOWN except ModuleNotFoundError: new_enough = False if not new_enough: print("python3-markdown must be installed/upgraded") pip_install_package(debug, "markdown")
[ 9, 108 ]
def METHOD_NAME(self): connected, connection_error, pid = self.is_server_alive() if not connected: logger.error("The client was unable to establish a connection " "with the Language Server. The error was: " "{}".format(connection_error)) raise Exception("An error occurred while trying to create a " "client to connect to the Language Server! The " "error was\n\n{}".format(connection_error)) logger.info('Starting ZMQ connection...') self.context = zmq.Context() self.zmq_in_socket = self.context.socket(zmq.PAIR) self.zmq_in_socket.connect("tcp://{0}:{1}".format( LOCALHOST, self.zmq_in_port)) self.zmq_out_socket = self.context.socket(zmq.PAIR) self.zmq_out_socket.connect("tcp://{0}:{1}".format( LOCALHOST, self.zmq_out_port)) logger.info('Sending server_ready...') self.zmq_out_socket.send_pyobj({'id': 0, 'method': 'server_ready', 'params': {'pid': pid}})
[ 977, 7319 ]
def METHOD_NAME(self) -> str: """ Resource location """ return pulumi.get(self, "location")
[ 708 ]
def METHOD_NAME(inst, log, args): if not inst.dbverify(bename=args.backend): log.fatal("dbverify failed") return False else: log.info("dbverify successful")
[ 15194, 1162 ]
async def METHOD_NAME(client, header_parameter): await client.parameter_grouping.post_shared_parameter_group_object(header_one=header_parameter)
[ 9, 72, 1644, 511, 846, 279 ]
def METHOD_NAME(self): #closing doc FreeCAD.closeDocument("PartDesignTestLinearPattern") # print ("omit closing document for debugging")
[ 531, 481 ]
def METHOD_NAME(msg): print(msg) sys.exit(0)
[ 169, 538 ]
def METHOD_NAME(): translator = Translator() translated = translator.translate(text=inputText.get(1.0, END), src=srcLang.get().capitalize(), dest=destLang.get().capitalize()) outputText.delete(1.0, END) outputText.insert(END, translated.text)
[ 4197, 711 ]
def METHOD_NAME(request): user = get_user(request) page = max(int(request.GET.get('page', 0)), 0) usersub = None refresh = request.GET.get('refresh') now = datetime.datetime.now() unmoderated = request.GET.get('unmoderated', False) == 'true' if unmoderated: recommended_feeds = RecommendedFeed.objects.filter(is_public=False, declined_date__isnull=True)[page:page+2] else: recommended_feeds = RecommendedFeed.objects.filter(is_public=True, approved_date__lte=now)[page:page+2] if recommended_feeds and request.user.is_authenticated: usersub = UserSubscription.objects.filter(user=user, feed=recommended_feeds[0].feed) if refresh != 'true' and page > 0: logging.user(request, "~FBBrowse recommended feed: ~SBPage #%s" % (page+1)) recommended_feed = recommended_feeds and recommended_feeds[0] if not recommended_feeds: return HttpResponse("") feed_icon = MFeedIcon.objects(feed_id=recommended_feed.feed_id) if recommended_feed: return render(request, 'recommendations/render_recommended_feed.xhtml', { 'recommended_feed' : recommended_feed, 'description' : recommended_feed.description or recommended_feed.feed.data.feed_tagline, 'usersub' : usersub, 'feed_icon' : feed_icon and feed_icon[0], 'has_next_page' : len(recommended_feeds) > 1, 'has_previous_page' : page != 0, 'unmoderated' : unmoderated, 'today' : datetime.datetime.now(), 'page' : page, }) else: return HttpResponse("")
[ 557, 4324, 561 ]
async def METHOD_NAME(module_type, module_info): module = import_module(module_info['module']) return getattr(module, module_type)(module_info)
[ 557, 298 ]
def METHOD_NAME(self) -> "ExternalResourceUniqueKey": return ExternalResourceUniqueKey.from_spec(self)
[ 147, 279 ]
def METHOD_NAME(channame, cldrdate, meetname): return statfile(channame, cldrdate, meetname, typecont="Raw")
[ -1 ]
def METHOD_NAME(monkeypatch: MonkeyPatch, tmp_path: Path) -> Path: path = tmp_path / "project_dir" path.mkdir(exist_ok=True) monkeypatch.chdir(path) return path
[ 4136, 155, 1190 ]
def METHOD_NAME(): with patch("salt.utils.napalm.is_proxy", MagicMock(return_value=True)): dict_ = {"foo": {"bar": {"baz": True}}} assert dict_ == napalm_formula.setval("foo:bar:baz", True)
[ 9, 17948 ]
def METHOD_NAME(self): return AudioExporter( self.export_dir, self.test_dataset.context.sample_rate, )
[ 557, 734, 6168 ]
def METHOD_NAME(): test = PingAddress(str(uuid.uuid4())) with pytest.raises(CheckProcessException): test.commit()
[ 9, 1677, 46 ]
def METHOD_NAME(): def layout_fn(node): if node.props.get('lca_node_name'): lca = node.props.get('lca_node_name') color = node.props.get('Lca_color') level = get_level(node) lca_face = RectFace(15, float('inf'), color=color, text=lca, fgcolor="white", padding_x=1, padding_y=1) lca_face.rotate_text = True node.add_face(lca_face, position='aligned', column=level) node.add_face(lca_face, position='aligned', column=level, collapsed_only=True) layout_fn.__name__ = 'Last common ancestor' layout_fn.contains_aligned_face = True return layout_fn
[ 19, 571, 9570, -1 ]
def METHOD_NAME(file_name): try: with open(file_name, 'r', encoding='utf8') as f: file_content = f.read() # pattern to match the text between <head> and </head> pattern = re.compile(r'<head>.*?</head>', flags=re.DOTALL) file_content = re.sub(pattern, '', file_content) html, history = file_content.split('<hr color="blue"> \n\n raw chat context:\n') history = history.strip('<code>') history = history.strip('</code>') history = history.split("\n>>>") return list(filter(lambda x:x!="", history))[0][:100] except: return ""
[ 370, 171, 3182 ]
def METHOD_NAME(self, *args, **kwargs): """ :returns: estimate of the loss :rtype: float Take a gradient step on the loss function. Arguments are passed to the model and guide. """ with poutine.trace(param_only=True) as param_capture: loss = self.loss_and_grads(True, None, *args, **kwargs) params = set( site["value"].unconstrained() for site in param_capture.trace.nodes.values() if site["value"].grad is not None ) self.optim(params) pyro.infer.util.zero_grads(params) return torch_item(loss)
[ 367 ]
def METHOD_NAME(): fraud_cost = FraudCost(amount_col="value") y_predicted = np.array([0, 1, 2]) y_true = np.array([1, 0, 1]) with pytest.raises( ValueError, match="y_predicted contains more than two unique values", ): fraud_cost.score(y_true, y_predicted) y_true = np.array([0, 1, 2]) y_predicted = np.array([1, 0, 1]) with pytest.raises(ValueError, match="y_true contains more than two unique values"): fraud_cost.score(y_true, y_predicted)
[ 9, 808, 4138, 489, 1603, 2768, 199 ]
def METHOD_NAME(self): info = super(RSVisualization, self).METHOD_NAME() info["Upper Threshold"] = self.maxVal info["Lower Threshold"] = self.minVal return info
[ 19, 2900, 100 ]
def METHOD_NAME(namespace): if namespace.system_assigned is None and namespace.user_assigned is None: logger.warning(OBSOLETE_APP_IDENTITY_ASSIGN)
[ 883, 217, 654, 2989, 44, 434 ]
METHOD_NAME(self):
[ -1 ]
def METHOD_NAME(): """C13 AtomicRepresentation.""" return _AtomicRepresentation(z=6, a=13)
[ -1 ]
def METHOD_NAME(folder: str, granularity: str, commit_sha_lists: Optional[List[str]] = None): dataframe = None for (dirpath, _, filenames) in os.walk(folder): for filename in tqdm(filenames): splits = dirpath.split("/") job_name = splits[-1] sha = splits[-2] if not commit_sha_lists or sha in commit_sha_lists: with bz2.open(os.path.join(dirpath, filename), 'r') as zf: string = zf.read().decode("utf-8") data = json.loads(string) # create a deep json with sha and job info data['sha'] = sha data['job'] = job_name df = _json_to_df(data, granularity) dataframe = df if dataframe is None else dataframe.append(df) return dataframe
[ 214, 61, 294, 577 ]
def METHOD_NAME(qtbot, parent_applet): return prepare_widget(qtbot, BatchProcessingGui(parent_applet))
[ 2277, 3613, 2139 ]
def METHOD_NAME(self): """ Test changing bottom color. """ self.createWidget() self._widget.BackgroundPlugin._bottom = QtGui.QColor(0,0,255) self._widget.BackgroundPlugin.updateOptions() self._widget.BackgroundPlugin.windowRequiresUpdate.emit() self.assertImage('testBottomColor.png')
[ 9, 194, 7887 ]
def METHOD_NAME(self): self._cloud_manager.clean_up() rpc.reset_gateway_mconfigs()
[ 531, 481 ]
def METHOD_NAME(self): """Test QNode in autograd interface.""" def qfunc(amplitude): qml.AmplitudeEmbedding(amplitude, wires=0) qml.AmplitudeEmbedding(amplitude, wires=1) return qml.state() dev = qml.device("default.qubit", wires=2) optimized_qfunc = qml.transforms.merge_amplitude_embedding(qfunc) optimized_qnode = qml.QNode(optimized_qfunc, dev) amplitude = np.array([0.0, 1.0], requires_grad=True) # Check the state |11> is being generated. assert optimized_qnode(amplitude)[-1] == 1
[ 9, 411, 4746, 1632, 898 ]
def METHOD_NAME(self) -> str: """ The name of the resource """ return pulumi.get(self, "name")
[ 156 ]
def METHOD_NAME(self): uk = build('pass_and_fail.robot').resource.keywords[0] assert_equal(uk.name, 'My Keyword') assert_equal(uk.args, ('${who}',))
[ 9, 21, 2537 ]
def METHOD_NAME(): try: return THREAD_LOCAL.request_context except Exception: return None
[ 19, 127, 377, 43, 600 ]
def METHOD_NAME(self): o = objective() self.assertEqual(o.active, True) o.deactivate() self.assertEqual(o.active, False) o.activate() self.assertEqual(o.active, True) b = block() self.assertEqual(b.active, True) b.deactivate() self.assertEqual(b.active, False) b.o = o self.assertEqual(o.active, True) self.assertEqual(b.active, False) o.deactivate() self.assertEqual(o.active, False) self.assertEqual(b.active, False) b.activate() self.assertEqual(o.active, False) self.assertEqual(b.active, True) b.activate(shallow=False) self.assertEqual(o.active, True) self.assertEqual(b.active, True) b.deactivate(shallow=False) self.assertEqual(o.active, False) self.assertEqual(b.active, False)
[ 9, 923 ]
f METHOD_NAME(self):
[ 9, 2497, 41, 7717, 1056, 277, 2 ]
f METHOD_NAME(vm):
[ 123, 1382 ]
def METHOD_NAME(show_plots, useRefAttitude): """This function is called by the py.test environment.""" # each test method requires a single assert method to be called testFailCount = 0 # zero unit test result counter testMessages = [] # create empty array to store test log messages dataPos, dataVel, dataPos2, dataVel2, dataAttErr, numDataPoints, figureList = \ scenarioFormationReconfig.run(show_plots, useRefAttitude) numTruthPoints = 5 skipValue = int(numDataPoints / numTruthPoints) dataPos = dataPos[::skipValue] dataVel = dataVel[::skipValue] dataPos2 = dataPos2[::skipValue] dataVel2 = dataVel2[::skipValue] dataAttErr = dataAttErr[::skipValue] # setup truth data for unit test truePos = [ [-2668550.43581724, -6980898.10827464, 4622064.93739554] ,[ 3239719.11900864, -12361691.80735476, -5611358.11637523] ,[ 6956886.21097725, -6512028.87881183, -12049680.37988781] ,[ 6203357.9456249, 3386315.3204033, -10744531.13935835] ,[ -904077.16857348, 6986198.37448257, 1565907.58993232] ,[ -1497223.74594733, -9688096.70685282, 2593267.5982793] ] trueVel = [ [1952.09236395, -6264.36921517, -3381.12315544] , [ 2251.85678459, 773.95089899, -3900.33036227] , [ 735.27848237, 3673.48086541, -1273.53968917] , [-1521.51022886, 4060.99888361, 2635.33302062] , [-3836.04015487, -3920.91042533, 6644.21644811] , [ 2480.74592749, -4214.7585866, -4296.77798709] ] truePos2 = trueVel2 = [] if useRefAttitude: truePos2 = [ [-2667507.97243685, -6983454.82971912, 4626165.55303272] , [ 3241108.94387093, -12365029.53882338, -5606492.86033854] , [ 6957594.00740821, -6517924.48451533, -12049169.67128781] , [ 6203604.90565029, 3379408.49215067, -10749322.78076205] , [ -899274.80454994, 6988633.22340264, 1559582.60590812] , [ -1499288.94036976, -9685672.95831528, 2596285.54475707] ] trueVel2 = [ [ 1953.40294294, -6262.33384039, -3379.83469644] , [ 2251.63426593, 772.82266533, -3901.38099724] , [ 734.53359595, 3672.29757633, -1275.08087935] , [-1519.3712049, 4061.85725527, 2633.58633737] , [-3836.64595382, -3914.06736883, 6647.63278995] , [ 2478.79370247, -4217.73758483, -4296.15323074] ] else: truePos2 = [ [-2667507.97243685, -6983454.82971912, 4626165.55303272] , [ 3241108.94387093, -12365029.53882338, -5606492.86033854] , [ 6957593.3006838, -6517926.49084233, -12049170.68096033] , [ 6203601.49094786, 3379393.02430127, -10749332.43260227] , [ -899283.12085206, 6988580.86462193, 1559588.28436277] , [ -1499187.38519734, -9685744.8647062, 2596126.83315495] ] trueVel2 = [ [1953.40294294, -6262.33384039, -3379.83469644] , [ 2251.63426593, 772.82266533, -3901.38099724] , [ 734.53150821, 3672.2915335, -1275.08394125] , [-1519.37151914, 4061.84582195, 2633.58351375] , [-3836.64758968, -3914.12259276, 6647.64791968] , [ 2478.82196216, -4217.65022651, -4296.20016979] ] trueAttErr = [] if useRefAttitude: trueAttErr = [ [0.00000000e+00, 0.00000000e+00, 0.00000000e+00] , [ 5.21804822e-15, 0.00000000e+00, 0.00000000e+00] , [ 2.73188701e-03, 2.42888055e-03, -1.85264053e-03] , [ 1.50892109e-01, 1.31129690e-02, 1.99974845e-02] , [ 1.66533454e-15, 2.49085251e-16, 1.43583255e-16] , [ 2.01960670e-12, 4.98741819e-12, 7.61317659e-12] ] else: trueAttErr = [ [0.00000000e+00, 0.00000000e+00, 0.00000000e+00] , [ 1.01291777e-15, -2.81104312e-15, -2.10435630e-15] , [-1.09830506e-03, 2.86489461e-03, 9.96740835e-04] , [ 9.08383514e-02, 6.71564082e-02, -3.21870531e-02] , [ 1.35545560e-15, 1.02827666e-15, -6.35661937e-16] , [-5.65881607e-05, 3.19108458e-04, -1.34177223e-04] ] # compare the results to the truth values accuracy = 1e-6 testFailCount, testMessages = unitTestSupport.compareArrayRelative( truePos, dataPos, accuracy, "chief r_BN_N Vector", testFailCount, testMessages) testFailCount, testMessages = unitTestSupport.compareArrayRelative( trueVel, dataVel, accuracy, "chief v_BN_N Vector", testFailCount, testMessages) testFailCount, testMessages = unitTestSupport.compareArrayRelative( truePos2, dataPos2, accuracy, "deputy r2_BN_N Vector", testFailCount, testMessages) testFailCount, testMessages = unitTestSupport.compareArrayRelative( trueVel2, dataVel2, accuracy, "deputy v2_BN_N Vector", testFailCount, testMessages) testFailCount, testMessages = unitTestSupport.compareArray( trueAttErr, dataAttErr, accuracy, "deputy attitude Error", testFailCount, testMessages) # save the figures to the Doxygen scenario images folder for pltName, plt in list(figureList.items()): unitTestSupport.saveScenarioFigure(pltName, plt, path) # print out success message if no error were found if testFailCount == 0: print("PASSED ") else: print("# Errors:", testFailCount) print(testMessages) # each test method requires a single assert method to be called # this check below just makes sure no sub-test failures were found assert testFailCount < 1, testMessages
[ 9, 7061, 12715, 10159 ]
def METHOD_NAME(self, result_item: int | _ResultItem) -> None: ...
[ 356, 1571, 1024 ]
def METHOD_NAME(ckan_config): assert ckan_config[u"ckan.site_url"] == u"https://example.org"
[ 9, 1153, 12634, 200, 1743, 41, 9 ]
def METHOD_NAME( input: Union[str, os.PathLike, Mapping[str, Any], List[Any], None], base: int = 1, ) -> Iterator[Optional[str]]: if isinstance(input, list): # most complicated case: list of inits # for multiple chains, we need to create multiple files # which look like somename_{i}.json and then pass somename.json # to CmdStan mother_file = create_named_text_file( dir=_TMPDIR, prefix='', suffix='.json', name_only=True ) new_files = [ os.path.splitext(mother_file)[0] + f'_{i+base}.json' for i in range(len(input)) ] for init, file in zip(input, new_files): if isinstance(init, dict): write_stan_json(file, init) elif isinstance(init, str): shutil.copy(init, file) else: raise ValueError( 'A list of inits must contain dicts or strings, not' + str(type(init)) ) try: yield mother_file finally: for file in new_files: with contextlib.suppress(PermissionError): os.remove(file) else: yield from _temp_single_json(input)
[ 963, 12343 ]
def METHOD_NAME( organization: Organization, language_code: str ) -> str: if ( organization.language and language_code != organization.language.language_code and organization.translation_org.filter( language__language_code=language_code ).exists() ): return ( organization.translation_org.filter(language__language_code=language_code) .first() .short_description_translation ) return organization.short_description
[ 19, 1044, 1707, 1067 ]
def METHOD_NAME(self): self._check_plt_entries(os.path.join("x86_64", "libc.so.6"))
[ 9, 13144, 1036, 7614 ]
def METHOD_NAME(self, response): title_str = " ".join(response.css("#mn-pagetitle *::text").extract()) if "Emergency" in title_str: return "Emergency Meeting" if "special" in title_str.lower(): return "Special Meeting" return "Commission"
[ 214, 2893 ]
def METHOD_NAME(data_class): """Tests storing and retrieving a simple object.""" with tempfile.TemporaryDirectory() as temporary_directory: local_storage = LocalFileStorage(temporary_directory) storage_object = data_class() # Make sure the validation fails with pytest.raises(ValueError): local_storage.store_object(storage_object) # This should now pass. storage_object.some_attribute = 10 storage_key = local_storage.store_object(storage_object) assert local_storage.has_object(storage_object) retrieved_object, _ = local_storage.retrieve_object(storage_key) assert retrieved_object is not None assert storage_object.json() == retrieved_object.json() # Ensure that the same key is returned when storing duplicate # data new_storage_key = local_storage.store_object(storage_object) assert storage_key == new_storage_key
[ 9, 53, 1308, 61, 404 ]
def METHOD_NAME(self): """ Test for getting class for ID token admin. """ id_token_admin_class = get_id_token_admin_class() default_id_token_admin_class = oauth2_settings.ID_TOKEN_ADMIN_CLASS assert id_token_admin_class == default_id_token_admin_class
[ 9, 19, 147, 466, 2870, 2 ]
def METHOD_NAME(self): """Test permissions for signing authority confirmations """ initiator = \ self.users[self.user_map[TestSigningAuthorityConfirmations.UserRelationship.INITIATOR]] respondent = \ self.users[self.user_map[TestSigningAuthorityConfirmations.UserRelationship.RESPONDENT]] trades = DataCreationUtilities.create_possible_credit_trades( initiator.organization, respondent.organization ) expected_results = defaultdict(lambda: False) # key (relationship, status, rescinded?) expected_results[( TestSigningAuthorityConfirmations.UserRelationship.INITIATOR, self.statuses['draft'].status, False )] = True expected_results[( TestSigningAuthorityConfirmations.UserRelationship.INITIATOR, self.statuses['submitted'].status, False )] = True expected_results[( TestSigningAuthorityConfirmations.UserRelationship.RESPONDENT, self.statuses['accepted'].status, False )] = True expected_results[( TestSigningAuthorityConfirmations.UserRelationship.RESPONDENT, self.statuses['submitted'].status, False )] = True for (trade, relationship) in \ product(trades, TestSigningAuthorityConfirmations.UserRelationship): # Sign an array of assertions, like the frontend does with self.subTest( "Testing signing confirmation permissions as array", relationship=relationship, status=trade['status'], rescinded=trade['rescinded'] ): payload = list(map(lambda assertion, trade_id=trade['id']: { 'hasAccepted': True, 'signingAuthorityAssertion': assertion.id, 'creditTrade': trade_id }, SigningAuthorityAssertion.objects.all())) response = self.clients[self.user_map[relationship]].post( '/api/signing_authority_confirmations', content_type='application/json', data=json.dumps(payload) ) valid = status.is_success(response.status_code) self.assertEqual( valid, expected_results[(relationship, trade['status'], trade['rescinded'])] ) # also try one at a time (not a JSON array) with self.subTest( "Testing signing confirmation permissions one at a time", relationship=relationship, status=trade['status'], rescinded=trade['rescinded'] ): assertion_id = SigningAuthorityAssertion.objects.first().id payload = { 'hasAccepted': True, 'signingAuthorityAssertion': assertion_id, 'creditTrade': trade['id'] } response = self.clients[self.user_map[relationship]].post( '/api/signing_authority_confirmations', content_type='application/json', data=json.dumps(payload) ) valid = status.is_success(response.status_code) self.assertEqual( valid, expected_results[(relationship, trade['status'], trade['rescinded'])] )
[ 9, 4943, 840, 13182 ]
def METHOD_NAME(self) -> None: node_source = {'marker': ['square', 'circle', 'x']} kw = bpg.get_graph_kwargs(node_source, {}, node_marker='marker') node_glyph = kw['node_renderer'].glyph assert isinstance(node_glyph, Scatter) assert node_glyph.marker == field('marker')
[ 9, 276, 1716, 1464, -1, 3705 ]
def METHOD_NAME() -> None: config = { "version": "v1", "kind": "readable_storage", "name": "", "storage": {"key": 1, "set_key": "x"}, "readiness_state": "limited", "schema": {"columns": []}, "query_processors": [], } with pytest.raises(JsonSchemaValueException) as e: STORAGE_VALIDATORS["readable_storage"](config) assert e.value.message == "data.storage.key must be string"
[ 9, 532, 948 ]
def METHOD_NAME(self) -> str: return "SET @@session.time_zone='+00:00'"
[ 0, 2723, 24, 1166 ]
def METHOD_NAME(self) -> int: return 4
[ 984, 938 ]
def METHOD_NAME(self, state: int): ...
[ 238, 551 ]
def METHOD_NAME(self, redis_url=None, **kwargs): try: if redis_url: with from_url(redis_url, **kwargs) as conn: conn.ping() # exceptions may be raised upon ping else: with Redis(**kwargs) as conn: conn.ping() # exceptions may be raised upon ping except (ConnectionRefusedError, exceptions.ConnectionError, exceptions.TimeoutError) as e: self.add_error(ServiceUnavailable(f"Unable to connect to Redis: {type(e).__name__}"), e) except Exception as e: self.add_error(ServiceUnavailable("Unknown error"), e)
[ 250, 2485 ]
def METHOD_NAME(): """When it has no token cached, a credential should request one every time get_token is called""" credential = MockCredential() token = credential.get_token(SCOPE) credential.acquire_token_silently.assert_called_once_with(SCOPE, claims=None, tenant_id=None) credential.request_token.assert_called_once_with(SCOPE, claims=None, tenant_id=None) assert token.token == MockCredential.NEW_TOKEN.token
[ 9, 654, 175, 466 ]
def METHOD_NAME(get_graph): g = get_graph base_three = Namespace("http://three.org/") ds1 = Dataset() ds1.bind("dct", DCTERMS) ds1.bind("skos", SKOS) g8 = ds1.graph(URIRef("http://g8.com/"), base=base_one) g9 = ds1.graph(URIRef("http://g9.com/")) g8 += g g9 += g g9.base = base_two ds1.base = base_three trix = ds1.serialize(format="trix", base=Namespace("http://two.org/")) assert '<graph xml:base="http://one.org/">' in trix assert '<graph xml:base="http://two.org/">' in trix assert '<TriX xml:base="http://two.org/"' in trix
[ 9, 10748, 1629 ]
def METHOD_NAME(self, name, params): return self.framework_integration_cls.METHOD_NAME(self, name, params)
[ 557, 200 ]
def METHOD_NAME(library_interpreter, code, ignore_warnings, is_error_handling): '''handle_error Helper function for handling errors returned by nidmm.Library. It calls back into the LibraryInterpreter to get the corresponding error description and raises if necessary. ''' if _is_success(code) or (_is_warning(code) and ignore_warnings): return if is_error_handling: # The caller is in the midst of error handling and an error occurred. # Don't try to get the description or we'll start recursing until the stack overflows. description = '' else: description = library_interpreter.get_error_description(code) if _is_error(code): raise DriverError(code, description) assert _is_warning(code) warnings.warn(DriverWarning(code, description))
[ 276, 168 ]
def METHOD_NAME(self, instance): return instance.METHOD_NAME
[ 137, 5694 ]
def METHOD_NAME(): """Test that ``offline`` is added for backwards-compatibility.""" config_file = Path(esmvalcore.__file__).parent / 'config-user.yml' cfg = Config(CFG.copy()) cfg.load_from_file(config_file) session = cfg.start_session('my_session') assert session['search_esgf'] == 'never' assert session['offline'] is True
[ 9, 8024, 21, 240 ]
def METHOD_NAME(self): # 如果没有配置,则使用模版中的配置 if self.notify_type == "[]": template_cls = ( apps.get_model("tasktmpl3", "TaskTemplate") if self.template_source == PROJECT else apps.get_model("template", "CommonTemplate") ) template = template_cls.objects.filter(id=self.template_id).only("notify_type").first() notify_type = json.loads(template.notify_type) else: notify_type = json.loads(self.notify_type) logger.info(f"[clocked_task get_notify_type] success: {notify_type}") return notify_type if isinstance(notify_type, dict) else {"success": notify_type, "fail": notify_type}
[ 19, 959, 44 ]
def METHOD_NAME(self) -> str: """ The provisioning state of the resource. """ return pulumi.get(self, "provisioning_state")
[ 1994, 551 ]
def METHOD_NAME(self, object_id: str, meta: _CommonMeta): if isinstance(meta, _ChunkMeta): for band in meta.bands: self._band_chunks[band].add(object_id) prev_meta = self._store.get(object_id) if prev_meta: meta = meta.merge_from(prev_meta) self._store[object_id] = meta
[ 0, 1094 ]
def METHOD_NAME(self, path): palette_name = self.options.palette palette = ThreadCatalog().get_palette_by_name(palette_name) colors = [] palette_numbers = [] palette_colors = [] for color in palette: palette_numbers.append(color.number) palette_colors.append('#%s' % color.hex_digits.lower()) with open(path) as threadlist: for line in threadlist: if line[0].isdigit(): # some threadlists may add a # in front of the catalog number # let's remove it from the entire string before splitting it up thread = line.replace('#', '').split() catalog_number = set(thread[1:]).intersection(palette_numbers) if catalog_number: color_index = palette_numbers.index(next(iter(catalog_number))) colors.append([palette_colors[color_index], None]) else: # No color found colors.append([None, None]) return colors
[ 214, 17120, 604, 2824, 106 ]
def METHOD_NAME(log_data): # Sample data contains entire game API request/response, but log clients will trim data down # to the keys specified in `accepted_api_params` command = log_data['data']['request']['command'] requested_data = accepted_api_params[command] trimmed_data = { 'data': { 'request': {}, 'response': {}, '__version': accepted_api_params['__version'], } } for key in requested_data['request']: trimmed_data['data']['request'][key] = log_data['data']['request'].get(key) for key in requested_data['response']: trimmed_data['data']['response'][key] = log_data['data']['response'].get(key) return trimmed_data
[ 19, 2782, 219 ]
def METHOD_NAME(self): set_monitor_status(self.monitor_status_file, "Started") self.status.set_active() self.status.set_field("entry", 0) self.status.inc_value("entry", 2) self.assertTrue(self.status.get_status()["entry"], 2) self.status.set_field("data", 0) self.status.inc_value("data", 2) self.assertTrue(self.status.get_status()["data"], 2) self.status.set_field("meta", 0) self.status.inc_value("meta", 2) self.assertTrue(self.status.get_status()["meta"], 2) self.status.set_field("failures", 0) self.status.inc_value("failures", 2) self.assertTrue(self.status.get_status()["failures"], 2)
[ 9, 2405, 99 ]
def METHOD_NAME(self, other): """Return a new set which is the union of ``self`` and ``other``. Returns the same Set type as this set. """ obj = self._clone() obj.union_update(other) return obj
[ 3006 ]
def METHOD_NAME(): """ Return the total time spent on the process. This is the sum of user and system time as returned by systimes(). """ user, system = systimes() return user + system
[ -1 ]
def METHOD_NAME(adult_split_dataset_and_model): # Arrange _, val, model = adult_split_dataset_and_model val = val.sample() val.data['native-country'].iloc[0] = np.nan val.data['native-country'] = pd.Categorical(val.data['native-country']) val.data['income'] = pd.Categorical(val.data['income']) check = WeakSegmentsPerformance(n_top_features=5) # Act result = check.run(val, model) segments = result.value['weak_segments_list'] # Assert assert_that(segments, has_length(7))
[ 9, 543, 4465, 1030 ]
def METHOD_NAME(result: Dict[str, Any], remote: str): from dvc.utils import humanize def join_exps(exps): return humanize.join([f"[bold]{e}[/]" for e in exps]) if diverged_exps := result.get("diverged"): exps = join_exps(diverged_exps) ui.error_write( f"[yellow]Local experiment {exps} has diverged " "from remote experiment with the same name.\n" "To override the remote experiment re-run with '--force'.", styled=True, ) if uptodate_exps := result.get("up_to_date"): exps = join_exps(uptodate_exps) verb = "are" if len(uptodate_exps) > 1 else "is" ui.write( f"Experiment {exps} {verb} up to date on Git remote {remote!r}.", styled=True, ) if pushed_exps := result.get("success"): exps = join_exps(pushed_exps) ui.write(f"Pushed experiment {exps} to Git remote {remote!r}.", styled=True) if not uptodate_exps and not pushed_exps: ui.write("No experiments to push.") if uploaded := result.get("uploaded"): stats = {"uploaded": uploaded} ui.write(humanize.get_summary(stats.items())) if project_url := result.get("url"): ui.rich_print( "View your experiments at", project_url, style="yellow", soft_wrap=True )
[ 390, 1571 ]
def METHOD_NAME (self): print("Mining blocks...") self.nodes[0].generate(4) self.sync_all() walletinfo = self.nodes[0].getwalletinfo() assert_equal(walletinfo['immature_balance'], 40) assert_equal(walletinfo['balance'], 0) self.sync_all() self.nodes[1].generate(102) self.sync_all() assert_equal(self.nodes[0].getbalance(), 40) assert_equal(self.nodes[1].getbalance(), 20) assert_equal(self.nodes[2].getbalance(), 0) # At this point in time, commitment tree is the empty root # Node 0 creates a joinsplit transaction mytaddr0 = get_coinbase_address(self.nodes[0]) myzaddr0 = self.nodes[0].z_getnewaddress() recipients = [] recipients.append({"address":myzaddr0, "amount": Decimal('10.0') - LEGACY_DEFAULT_FEE}) myopid = self.nodes[0].z_sendmany(mytaddr0, recipients, 1, LEGACY_DEFAULT_FEE, 'AllowRevealedSenders') wait_and_assert_operationid_status(self.nodes[0], myopid) # Sync up mempools and mine the transaction. All nodes have the same anchor. self.sync_all() self.nodes[0].generate(1) self.sync_all() # Stop nodes. stop_nodes(self.nodes) wait_bitcoinds() # Relaunch nodes and partition network into two: # A: node 0 # B: node 1, 2 self.nodes = start_nodes(3, self.options.tmpdir, extra_args=[['-regtestshieldcoinbase', '-debug=zrpc']] * 3 ) connect_nodes_bi(self.nodes,1,2) # Partition B, node 1 mines an empty block self.nodes[1].generate(1) # Partition A, node 0 creates a joinsplit transaction recipients = [] recipients.append({"address":myzaddr0, "amount": Decimal('10.0') - LEGACY_DEFAULT_FEE}) myopid = self.nodes[0].z_sendmany(mytaddr0, recipients, 1, LEGACY_DEFAULT_FEE, 'AllowRevealedSenders') txid = wait_and_assert_operationid_status(self.nodes[0], myopid) rawhex = self.nodes[0].getrawtransaction(txid) # Partition A, node 0 mines a block with the transaction self.nodes[0].generate(1) # Same as self.sync_all() but only for node 0 sync_blocks(self.nodes[:1]) sync_mempools(self.nodes[:1]) # Partition B, node 1 mines the same joinsplit transaction txid2 = self.nodes[1].sendrawtransaction(rawhex) assert_equal(txid, txid2) self.nodes[1].generate(1) # Same as self.sync_all() but only for nodes 1 and 2 sync_blocks(self.nodes[1:]) sync_mempools(self.nodes[1:]) # Check that Partition B is one block ahead and that they have different tips assert_equal(self.nodes[0].getblockcount() + 1, self.nodes[1].getblockcount()) assert( self.nodes[0].getbestblockhash() != self.nodes[1].getbestblockhash()) # Shut down all nodes so any in-memory state is saved to disk stop_nodes(self.nodes) wait_bitcoinds() # Relaunch nodes and reconnect the entire network self.nodes = start_nodes(3, self.options.tmpdir, extra_args=[['-regtestshieldcoinbase', '-debug=zrpc']] * 3 ) connect_nodes_bi(self.nodes,0, 1) connect_nodes_bi(self.nodes,1, 2) connect_nodes_bi(self.nodes,0, 2) # Mine a new block and let it propagate self.nodes[1].generate(1) # Due to a bug in v1.0.0-1.0.3, node 0 will die with a tree root assertion, so sync_all() will throw an exception. self.sync_all() # v1.0.4 will reach here safely assert_equal( self.nodes[0].getbestblockhash(), self.nodes[1].getbestblockhash()) assert_equal( self.nodes[1].getbestblockhash(), self.nodes[2].getbestblockhash())
[ 22, 9 ]