query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
listlengths
30
30
negative_scores
listlengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Set the P_source for the single source constraint
def _set_p_source(self, p_source, p_rec_div=1): self.p_source = p_source self.p_S_fin = (1-p_source)*self.p_sus self.p_rec_div = p_rec_div self.p_infect_fin = (1-p_source)* (1-self.p_sus) return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_source(self, source):\n self.data['source'] = source", "def set_flow_source(self, source):\n self._source = source", "def _set_source(self, source):\n if source != self._source:\n self._source = source\n self._channel = \"\"\n self._channel_name = \"\"\n self._is_forced_val = True\n self._forced_count = 0", "def set_source(self, source_name):\n self.source = source_name", "def _set_source(source, context):\n if isinstance(source, (str, list, dict, Dataset)):\n return Source(source, context)\n elif isinstance(source, Source):\n return source\n else:\n raise ValueError('Wrong source')", "def source(self, source: Source):\n self._source = source", "def source(self, source):\n\n self._source = source", "def source(self, source):\n\n self._source = source", "def source(self, source):\n\n self._source = source", "def source(self, source):\n\n self._source = source", "def source(self, source):\n\n self._source = source", "def source(self, source):\n\n self._source = source", "def source(self, source):\n\n self._source = source", "def setSource(self, *args):\n return _libsbml.ExternalModelDefinition_setSource(self, *args)", "def source_of_published(self, source_of_published):\n\n self._source_of_published = source_of_published", "def setAddressSource(self, address_source):\n # type: (str)->None\n\n self._validator.validate_one(\n 'source', VALID_OPTS['source'], address_source)\n self._ifAttributes['source'] = address_source", "def source_id(self, source_id):\n\n self._source_id = source_id", "def source_id(self, source_id):\n\n self._source_id = source_id", "def source_domain(self, source_domain):\n\n self._source_domain = source_domain", "def set_source(self, source):\n Analyzer.set_source(self, source)\n\n # Phy-layer logs\n if self.verfiy:\n source.enable_log(\"LTE_PDCP_DL_Cipher_Data_PDU\")\n else:\n source.enable_log(\"LTE_RLC_DL_AM_All_PDU\")\n source.enable_log(\"LTE_PHY_PDSCH_Stat_Indication\")", "def set_source(self, source):\n self.qbpm = self.sources[source]\n self.title = self.qbpm.address\n self.setWindowTitle(self.title)", "def keep_potential_source(self):\n self.source = self.potential_source", "def setSourcePath(self, sourcePath):\n self.__sourcePath = sourcePath", "def price_source(self, price_source):\n\n self._price_source = price_source", "def source_instance(self, source_instance):\n self._source_instance = source_instance", "async def async_set_source(self, source):\n self._source = source\n #self.async_schedule_update_ha_state(True)", "def primary_lead_source(self, primary_lead_source):\n\n self._primary_lead_source = primary_lead_source", "def this_source(self, source):\n for module in self.modules():\n if isinstance(module, DomainBatchNorm2d):\n module.this_source = source\n self._this_source = source", "def source(self, source: str):\n if source is None:\n raise ValueError(\"Invalid value for `source`, must not be `None`\") # noqa: E501\n\n self._source = source", "def source_id(self, source_id: str):\n\n self._source_id = source_id" ]
[ "0.7000559", "0.6853645", "0.684918", "0.67619085", "0.6555328", "0.6456665", "0.64430463", "0.64430463", "0.64430463", "0.64430463", "0.64430463", "0.64430463", "0.64430463", "0.6427387", "0.6293089", "0.62723905", "0.6253527", "0.6253527", "0.61970097", "0.6196628", "0.61923915", "0.61800075", "0.6139728", "0.61187136", "0.60687006", "0.6068649", "0.60657674", "0.60566866", "0.60306376", "0.6023725" ]
0.71566385
0
Set the logp values
def _update_logprobs(self): #self._logp_src = self._log_lim(self.p_source) self._logp_I0 = self._log_lim(self.p_source) self._logp_R0 = self._log_lim(self.p_source/(self.p_rec_div)) self._logp_S_fin = self._log_lim(self.p_S_fin) self._logp_inf_fin = self._log_lim(self.p_infect_fin)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_log_from_main(self, *args):\n if self.logarithmic:\n self.scale.props.adjustment.props.value = \\\n self.smart_log(self.adjustment.props.value)", "def logp(self, x):\n pass", "def logP(self):\n raise NotImplementedError", "def __convert_to_log(self):\n for i in range(self.nStates):\n if self.pi[i]>0:\n self.pi[i]=log(self.pi[i])\n else:\n self.pi[i]=float('-inf')\n for j in range(self.nStates):\n if self.t[i][j]>0:\n self.t[i][j]=log(self.t[i][j])\n else:\n self.t[i][j]=float('-inf')\n for j in range(self.nObs):\n if self.e[i][j]>0:\n self.e[i][j]=log(self.e[i][j])\n else:\n self.e[i][j]=float('-inf')\n self.logdomain=True", "def set_main_from_log(self, *args):\n if self.logarithmic:\n self.adjustment.props.value = \\\n self.smart_unlog(self.scale.props.adjustment.props.value)", "def setLogLevel(self,value):\n self.PDFreactorConfiguration.in1[\"logLevel\"] = value", "def logp(self, F, Y):\n raise NotImplementedError(\"implement the logp function\\\n for this likelihood\")", "def logp(self, value: TensorType, **kwargs) -> TensorType:", "def logp(self, value: TensorType, **kwargs) -> TensorType:", "def setLogprobs(self, value):\n return self._set(logprobs=value)", "def log_params(params):\n mlflow.log_params(params)", "def logp(rv, value):\n\n value = at.as_tensor_variable(value, dtype=rv.dtype)\n return logp_aeppl(rv, value)", "def log_scale(self, value: float):\n assert value > 1\n self.__log_scale = value\n self.logarithmic = self.logarithmic", "def log_prob(self):", "def log(self): # just use base?\n return Factor().__build( VarSet(self.v) , np.log(self.t) )", "def update_log(self, plog, clog):\n if plog:\n self.converter.update_log(plog)\n if clog:\n self.converter.update_log(clog)", "def __init__(self, logFP):\n self.logFP = logFP", "def setAppendLog(self,value):\n self.PDFreactorConfiguration.in1[\"appendLog\"] = value", "def plogi_settings(self, plogi_settings):\n\n self._plogi_settings = plogi_settings", "def __init__(self, log_scaling_factors, **kwargs):\n self.log_scaling_factors = log_scaling_factors\n super().__init__(**kwargs)", "def SetLogging(self, logging):\n return _hypre.HypreFGMRES_SetLogging(self, logging)", "def prSet(lMa, logPath=None):\n global lPr, lMaPr, nmPrs, ticPrs, ticPr0s, nRepPrs, scaPrs, logFile\n\n # level\n lPr = 0\n lMaPr = lMa\n\n # list\n nMa = 10\n nmPrs = range(nMa)\n ticPrs = range(nMa)\n ticPr0s = range(nMa)\n nRepPrs = range(nMa)\n scaPrs = range(nMa)\n\n # log\n if logPath is not None:\n logFile = logPath\n logSet(logPath, haNew=True)", "def log(self, m=None):\n if self.is_log:\n raise ValueError(\"Pmf/Hist already under a log transform\")\n self.is_log = True\n\n if m is None:\n m = self.maxLike()\n\n for x, p in self.d.items():\n if p:\n self.set(x, math.log(p / m))\n else:\n self.remove(x)", "def loglog(self, **kwargs):\n return self.plot(plot=pylab.loglog, **kwargs)", "def logStep(self):\n n = self.mirror.cv['dp']\n self.r_Vm[n] = self.cv['Vm']\n self.r_Va[n] = self.cv['Va']", "def assign_log(self, value):\n if not self._log:\n raise StructureError(\"Trying to assign log values to non-log weights.\")\n\n value = tf.where(tf.is_nan(value), tf.log(tf.ones_like(value) * 0.01), value)\n if self._mask and not all(self._mask):\n # Only perform masking if mask is given and mask contains any 'False'\n value += tf.log(tf.cast(tf.reshape(self._mask, value.shape), dtype=conf.dtype))\n normalized_value = value - tf.reduce_logsumexp(value, axis=-1, keepdims=True)\n return tf.assign(self._variable, normalized_value)", "def set_log_scale(self, log_scale = True):\n self._plot.SetLogy(int(log_scale))\n self._logy = True", "def reset_hll(self):\n self.hll = HyperLogLog(250)\n self.hll64 = HyperLogLog64(2**17)", "def SetLogging(self, logging):\n return _hypre.HyprePCG_SetLogging(self, logging)", "def __init__(self, log_scaling_factors, **kwargs):\n super().__init__(log_scaling_factors, **kwargs)" ]
[ "0.70663077", "0.69361013", "0.6926119", "0.6848954", "0.6629823", "0.66100806", "0.6530385", "0.64838487", "0.64838487", "0.64012814", "0.6313983", "0.6302697", "0.629172", "0.62717354", "0.62027687", "0.61778677", "0.6150077", "0.6138653", "0.6104388", "0.6076358", "0.6030716", "0.60227036", "0.60184115", "0.6017513", "0.59879357", "0.59853166", "0.5972644", "0.5947835", "0.593881", "0.5908499" ]
0.7145263
0
Attempt to submit a bad file
def testBadFileSubmit(self, mock_open): mock_open.side_effect = IOError self.assertRaises( auacm.exceptions.InvalidSubmission, auacm.submit.submit, ['problem 1', 'notafile.cpp'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_upload_bad_file(self):\n url = image_upload_url(self.reteta.id)\n res = self.client.post(url, {'image': 'notimage'}, format='multipart')\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_file_upload_fail(self):\r\n module = self.get_module_from_location(self.problem_location)\r\n\r\n # Simulate a student saving an answer\r\n response = module.handle_ajax(\"save_answer\", {\"student_answer\": self.answer_text})\r\n response = json.loads(response)\r\n self.assertFalse(response['success'])\r\n self.assertIn('error', response)", "def test_bad_file_name(self):\n\n url = '/%s/jobs/%i/input_files/?file_name=%s' % (self.api, self.job.id, 'not_a.file')\n response = self.client.get(url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n results = json.loads(response.content)\n result = results['results']\n self.assertEqual(len(result), 0)", "def insert_bad_data():\n get_file_reply(files[2][0], files[2][1])", "def test_upload_wrong_file_type(self):\n file = dict(\n file=(BytesIO(b'my file contents'), \"foto.doc\"),\n )\n response = self.client.post('/upload',\n content_type='multipart/form-data',\n data=file)\n self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)\n if os.path.exists(PHOTOS_SAVE_PATH):\n self.assertNotIn('foto.jpg', os.listdir(PHOTOS_SAVE_PATH))", "def test_missing_extension(client: FlaskClient):\n file = get_example_file(ExampleFileType.Txt)\n file.filename = \"test\"\n response = util.upload_file(client, DEFAULT_USER, file)\n assert response.status == \"400 BAD REQUEST\"", "def handle_bad_file(file: File, config_path: Path):\n msg = '<red><b>ERROR!</b></red> Invalid file type/content.'\n echo(msg, format=True)\n\n # Save file\n fname, ext = os.path.splitext(os.path.basename(file.name))\n md5_hash = md5(file.data).hexdigest()\n path = config_path / 'error' / f'{fname.lower()}-{md5_hash}{ext}'\n ezio.fs.write(path, file.data, '-p')\n echo(f'File saved under {path}')", "def test_upload_step__invalid_file(self):\n # Set Up\n self.go_to_step(FeedUpdateWizard.UPLOAD_STEP)\n\n # Test\n with open(f\"{ETL_TEST_DATA_DIR}invalid_extension.txt\", \"r\") as fp:\n response = self.client.post(\n self.WIZARD_URL,\n {\n self.WIZARD_CURRENT_STEP: FeedUpdateWizard.UPLOAD_STEP,\n self.SELECTED_ITEM: self.ITEM_UPLOAD_FILE,\n \"upload_file\": fp,\n \"submit\": \"submit\",\n },\n )\n\n # Assert\n self.assertEqual(response.status_code, 200)\n error_data = response.context_data[\"wizard\"][\"form\"].errors.get_json_data()\n self.assertEqual(\n response.context[\"wizard\"][\"steps\"].current, FeedUpdateWizard.UPLOAD_STEP\n )\n self.assertEqual(\n error_data,\n {\n \"upload_file\": [\n {\n \"code\": \"invalid\",\n \"message\": \"The file is not in a correct format\",\n }\n ]\n },\n )", "def test_send_corrupted_file(app: Flask) -> None:\n client = app.test_client()\n current_directory = Path.cwd()\n with open(current_directory / 'app' / 'tests' / 'files' / 'corrupted.zip', 'rb') as file:\n data = {'file': (io.BytesIO(file.read()), 'corrupted.zip')}\n res = client.post('/', content_type='multipart/form-data', data=data)\n assert res.status == '200 OK'\n assert {\"description\": \"Corrupted zip file\",\n \"status\": \"ERROR\"} == json.loads(res.data.decode())", "def test_upload_image_bad_request(self):\n url = image_upload_url(self.recipe.id)\n res = self.client.post(url, {'image': 'notimage'}, format = 'multipart')\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_upload_image_bad_request(self):\n url = image_upload_url(self.recipe.id)\n res = self.client.post(url, {'image':'notimage'}, format='multipart')\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_invalid_filetype(self):\n rv = self.post('/queue/',\n content={'image': (StringIO('This is not an image'),\n 'text.txt')},\n token=self.user_token)\n self.assertJSONError(rv, 'TagalleryInvalidFileExtension')\n return", "def test_upload_image_bad(self):\n url = image_upload_url(self.recipe.id)\n res = self.client.post(url, {'image': 'notimage', format: 'multipart'})\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def fail(self, msg=\"Malformed backup file\"):\n\t\traise ValidationError(msg)", "def test_is_not_google_file(self):\r\n bad_file = StringIO.StringIO()\r\n bad_file.write('failing tests please')", "def test_upload_image_bad_request(self):\n url = image_upload_url(self.recipe.id)\n res = self.client.post(url,{'image':'notimage'},format='multipart')\n self.assertEqual(res.status_code,status.HTTP_400_BAD_REQUEST)", "def test_invalid_image_upload(self):\n res = self.client.post(\n reverse('articles:add-image',\n kwargs={\n \"slug\": Article.objects.get().slug\n }),\n data={\n \"file\": self.temporary_unsupported_image\n },\n format='multipart'\n )\n self.assertEquals(res.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertRaises(Exception)", "def fail_unsigned(cls, upload, location=None):\n path = \"uploader/fail_unsigned/%s\" % upload[\"ulid\"]\n headers = Backend.headers()\n payload = {\"upload_file\": json.dumps(upload), \"location\": location}\n try:\n return Backend.put(path, payload, headers=headers)\n except requests.HTTPError as err:\n if err.response.status_code == 410:\n LOGGER.warning(\"Cannot fail file %s. File not active (410)\",\n upload[\"id\"])\n raise err\n except:\n raise", "def test_upload_image_bad_request(self):\n url = image_upload_url(self.movie.id)\n res = self.client.post(url, {'image': 'notimage'}, format='multipart')\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def fileUpload(fieldName):\n## we don't deal with OS specific \"\\n\"\n## because R does not have a problem (at least with Windows files)\n## no problem in R either with empty carriage returns at end of file\n \n if fs.has_key(fieldName):\n fileClient = fs[fieldName].file\n if not fileClient:\n shutil.rmtree(tmpDir)\n commonOutput()\n print \"<h1> ADaCGH ERROR </h1>\" \n print \"<p> The \", fieldName, \"file you entered is not a file </p>\"\n print \"<p> Please fill up the required fields and try again</p>\"\n print \"</body></html>\"\n sys.exit()\n else:\n shutil.rmtree(tmpDir)\n commonOutput()\n print \"<h1> ADaCGH ERROR </h1>\" \n print \"<p> \", fieldName, \"file required </p>\"\n print \"<p> Please fill up the required fields and try again</p>\"\n print \"</body></html>\"\n sys.exit()\n \n # transferring files to final destination;\n\n fileInServer = tmpDir + \"/\" + fieldName\n srvfile = open(fileInServer, mode = 'w')\n fileString = fs[fieldName].value\n srvfile.write(fileString)\n srvfile.close()\n\n os.chmod(fileInServer, 0666)\n \n if os.path.getsize(fileInServer) == 0:\n shutil.rmtree(tmpDir)\n commonOutput()\n print \"<h1> ADaCGH ERROR </h1>\"\n print \"<p>\", fieldName, \" file has size 0 </p>\"\n print \"<p> Please enter a file with something in it.</p>\"\n print \"<p> (Did you enter only a single file, but did not check 'One file'?\\\n If you are using only one file, the 'Two files' button should not be checked.)</p>\"\n print \"</body></html>\"\n sys.exit()", "def test_upload_invalid_image_field(self):\n url = create_upload_image_url(self.recipe.id)\n\n res = self.client.post(url, {\"image\": \"not_image\"}, format=\"multipart\")\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_is_not_google_file(self):\r\n bad_file = StringIO.StringIO()\r\n bad_file.write('failing tests please')\r\n bad_file.seek(0)\r\n\r\n self.assertTrue(\r\n not GBookmarkImporter.can_handle(bad_file),\r\n \"GBookmarkImporter cannot handle this file\")\r\n\r\n bad_file.close()", "def test_invalid_request(self):\n print(\"Testing invalid request...\")\n invalid_filename = os.path.join( \\\n os.path.dirname(self.client_path), \"7xEvjAeobu\")\n os.chdir(os.path.dirname(self.client_path))\n subprocess.call([self.client_path, \\\n \"{}:{}\".format(self.args.ip, self.args.port), \\\n invalid_filename])\n self.assertFalse(os.path.isfile(invalid_filename))", "def upload_validated(request):\n if 'file' not in request.files:\n flash('No file part')\n return False \n if not request.form.get('username', None):\n flash('No username part')\n return False \n torrent_file = request.files['file']\n if torrent_file.filename == '':\n flash('No selected file')\n return False \n if torrent_file and check_allowed_extension(torrent_file.filename):\n return True", "def test_local_uploader_upload_wrong_file(self, mock):\r\n mock.save.return_value = None\r\n u = LocalUploader()\r\n file = FileStorage(filename='test.txt')\r\n res = u.upload_file(file, container='user_3')\r\n err_msg = (\"Upload file should return False, \\\r\n as this extension is not allowed\")\r\n assert res is False, err_msg", "def test_unsupported_extension(client: FlaskClient):\n file = get_example_file(ExampleFileType.Txt)\n file.filename = \"test.py\"\n response = util.upload_file(client, DEFAULT_USER, file)\n assert response.status == \"400 BAD REQUEST\"", "def form_invalid(self, form):\n messages.add_message(self.request, messages.ERROR, form.errors['file'])\n return HttpResponseRedirect(reverse('esfviewer:upload'))", "def reject_factory(total_length, content_type, filename, file_length):\n raise status.RequestEntityTooLarge('not accepting posted files')", "def test_upload_image_bad_request(self):\n url = image_upload_url(self.service_id)\n res = self.client.post(url, {'image': 'no_image'}, format='multipart')\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_malformed(self):\n fdesc, fname = tempfile.mkstemp()\n tfile = os.fdopen(fdesc, 'w')\n tfile.write(self.file_str2)\n tfile.close()\n assert_raises(Exception, grades.writers.GradesFile, fname)\n os.unlink(fname)" ]
[ "0.71990144", "0.7005815", "0.6821353", "0.6793216", "0.6602382", "0.6537717", "0.6516978", "0.6459598", "0.640576", "0.6296545", "0.6278691", "0.6249999", "0.62274677", "0.6221819", "0.6215732", "0.62119937", "0.620819", "0.62067413", "0.6171933", "0.6168765", "0.61468196", "0.61423963", "0.6134086", "0.61296076", "0.61263114", "0.6123711", "0.6119501", "0.61172485", "0.6093442", "0.60679173" ]
0.7296077
0
Return a mapping of bike station ids to the predicted number of bikes needed, e.g. >>> weather_data = {
def predict_bike_demand(weather_data): # TODO: connect to the real deal! return {s["extra"]["uid"]: random.randint(0, 11) for s in MOCK_STATION_STATS}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bikes_prediction(station_id,time_hour):\r\n\r\n # get the data through openWeather api\r\n r = requests.get('http://api.openweathermap.org/data/2.5/forecast?appid=9511c6f09bf671d3bd65bf650197234f&q=Dublin')\r\n weathers = r.json()\r\n\r\n weather_detalis = weathers[\"list\"]\r\n temp = weather_detalis[0]['main']['temp']\r\n wind = weather_detalis[0]['wind']['speed']\r\n main = weather_detalis[0]['weather'][0]['main']\r\n weather_Drizzle = 0\r\n weather_Rain = 0\r\n if main == 'Drizzle':\r\n weather_Drizzle = 1\r\n elif main == 'Rain':\r\n weather_Rain = 1\r\n f2 = pd.DataFrame(np.array([station_id, time_hour, temp, wind, weather_Drizzle, weather_Rain])).T\r\n models = {}\r\n # open the folder of model\r\n with open('static/pickle/'+str(station_id) + \".pkl\", \"rb\") as handle:\r\n models[station_id] = pickle.load(handle)\r\n available_bikes_prediction = models[station_id].predict(f2).round()[0]\r\n return jsonify(bp=available_bikes_prediction)", "def get_bikes_for_week(cls, dbsession, station_id):\n station = [(\"Day\", \"Available Bikes\")]\n station_data = dbsession.query(func.weekday(cls.last_update),\n func.avg(cls.available_bikes)) \\\n .filter(cls.station_id == station_id) \\\n .group_by(func.weekday(cls.last_update)) \\\n .all()\n\n # this section parses the query return into a readable list.\n # from docs:extend() appends the contents of seq to list.\n if station_data:\n station.extend([(days[a], float(b)) for a, b in station_data])\n else:\n station.extend([(0,0)])\n\n return station", "def get_bikes_for_weekday(cls, dbsession, weekday, station_id):\n station = [(\"Time\", \"Available Bikes\", \"Available Stands\")]\n\n station_data = dbsession.query(func.hour(cls.last_update),\n func.avg(cls.available_bikes),\n func.avg(cls.available_bike_stands)) \\\n .filter(cls.station_id == station_id,\n func.weekday(cls.last_update) == weekday) \\\n .group_by(func.hour(cls.last_update)) \\\n .all()\n\n # this section parses the query return into a readable list.\n # from docs:extend() appends the contents of seq to list.\n if station_data:\n station.extend([(a, float(b), float(c)) for a, b, c in station_data])\n else:\n station.extend([(0,0,0)])\n return station", "def get_prediction(station_id):\n try:\n # load prediction model\n model = None\n\n with open(os.path.join(app.root_path, \"bike_prediction_model.pickle\"), \"rb\") as f:\n model = pickle.load(f)\n\n app.logger.debug(\"pickle path:\" + os.path.join(app.root_path, \"bike_prediction_model.pickle\"))\n\n latitude, longitude = helper.get_station_coordinate(db, station_id)\n if latitude and longitude and model:\n # prepare input data\n weather_data = helper.get_weather_forecast(app)\n input_x, slot_timestamps = helper.create_prediction_input(weather_data, latitude, longitude)\n slot_datetimes = [datetime.datetime.fromtimestamp(i) for i in slot_timestamps]\n\n # predict\n prediction_y = model.predict(input_x)\n\n # prepare for response object\n res_list = []\n day_list = []\n prev = slot_datetimes[0].day\n day_list.append({\n 'date': slot_datetimes[0].weekday(),\n 'hour': slot_datetimes[0].hour,\n 'available_bike': prediction_y[0]\n })\n for i in range(1, len(slot_datetimes)):\n if prev != slot_datetimes[i].day or i == len(slot_datetimes) - 1:\n prev = slot_datetimes[i].day\n res_list.append(day_list)\n day_list = []\n day_list.append({\n 'date': slot_datetimes[i].weekday(),\n 'hour': slot_datetimes[i].hour,\n 'available_bike': prediction_y[i]\n })\n return jsonify(res_list)\n else:\n return jsonify({})\n except Exception as e:\n app.logger.error(e, exc_info=True)\n return jsonify({})", "def make_station_dict(self):\n self.station_dict = {}\n\n # interates over stations and puts the amount of connections in the dict\n for station in self.stations:\n length = len(self.stations[station].connections)\n self.station_dict[station] = length\n \n return self.station_dict", "def station_analysis(data):\n unique_stations = list(set(data['start_station_name'].tolist() + data['end_station_name'].tolist()))\n\n station_counter = {station : 0 for station in unique_stations}\n for index, row in data.iterrows():\n station_counter[row['start_station_name']] += 1\n\n print('List of all stations:')\n print(unique_stations)\n\n keys = list(station_counter.keys())\n vals = list(station_counter.values())\n indexArr = np.argsort(list(station_counter.values()))\n popularStations = []\n for i in reversed(indexArr):\n popularStations.append((keys[i], vals[i]))\n\n stations1, journeys = zip(*popularStations[0:10])\n plt.bar(stations1, journeys, 0.1)\n\n plt.xticks(stations1, rotation='vertical')\n plt.title('Popular stations')\n plt.xlabel('Station names')\n plt.ylabel('Journeys')\n\n plt.show()\n return station_counter", "def bikes(self):\n bikes = [1 if dock.bike else 0 for dock in self.docks]\n return sum(bikes)", "def num_stations(self) -> int:\n return self._num_stations", "def get_num_stations(add):\r\n name=get_zipcode_names(add)\r\n engine = get_sql_engine()\r\n station_stats = text(\r\n \"\"\"\r\n SELECT\r\n count(v.*) as num_stations\r\n FROM indego_rt1130 as v\r\n JOIN philly_zipcode as n\r\n ON ST_Intersects(v.geom, n.geom)\r\n WHERE n.code = :name\r\n \"\"\"\r\n )\r\n resp = engine.execute(station_stats, name=name).fetchone()\r\n return resp[\"num_stations\"]", "def create_station_mapping(station_data):\n station_map = {}\n for data_file in station_data:\n with open(data_file, 'r') as f_in:\n # set up csv reader object - note that we are using DictReader, which\n # takes the first row of the file as a header row for each row's\n # dictionary keys\n weather_reader = csv.DictReader(f_in)\n\n for row in weather_reader:\n station_map[row['station_id']] = row['landmark']\n return station_map", "def station_id_lookup(df):\n station_dict = defaultdict()\n values = df.values\n for row in values:\n stationid = row[0]\n data = row[1:]\n station_dict[stationid] = data\n return station_dict", "def get_daily(station_id):\n dailydata = db.session.query(func.avg(DublinBike.available_bike)) \\\n .filter(DublinBike.number == station_id) \\\n .group_by(func.dayofweek(DublinBike.localtime)) \\\n .order_by(func.dayofweek(DublinBike.localtime)) \\\n .all()\n return jsonify([\n {'day': i,\n 'available_bike': float(dailydata[i][0])\n } for i in range(7)\n ])", "def _compute_observations(self):\n observations = {}\n for ts in self.ts_ids:\n if self.traffic_signals[ts].time_to_act() or self.traffic_signals[ts].regular_obs() :\n observations[ts] = self.traffic_signals[ts]._compute_observation()\n return observations", "def create_count_map(self) -> Dict[int, int]:\n res: Dict[int, int] = {}\n for sequence_data in self.model.values():\n sequence_data: NGramsSequence = cast(NGramsSequence, sequence_data)\n for count in sequence_data.next_count.values():\n count: int = cast(int, count)\n if count not in res:\n res[count] = 0\n res[count] += 1\n self.count_map = res\n logger.success('created count map')\n return res", "def _get_bikes_available(sta):\n # 'num_ebikes_available\" is not part of the GBFS spec, but it appears\n # in the Divvy API response\n return sta['num_bikes_available'] + sta.get('num_ebikes_available', 0)", "def info_about_petrol_kinds(petrol_stations):\n info_about_petrol_kinds = {}\n info_about_petrol_kinds['total amount of petrol'] = 0\n\n for number_of_petrol in petrol_stations:\n for petrol_name in petrol_stations[number_of_petrol]['kinds']:\n if petrol_name not in info_about_petrol_kinds:\n info = {}\n if petrol_name == 'АИ-80':\n price = 38.95\n elif petrol_name == 'АИ-92':\n price = 43.01\n elif petrol_name == 'АИ-95':\n price = 45.69\n elif petrol_name == 'АИ-98':\n price = 49.2\n info['price'] = price\n info['stations'] = [number_of_petrol]\n info['amount of petrol'] = 0\n info_about_petrol_kinds[petrol_name] = info\n else:\n info = info_about_petrol_kinds[petrol_name]\n info['stations'] = info['stations'] + [number_of_petrol]\n return info_about_petrol_kinds", "def getNrStations(self):\n return len(self.stationData)", "def count_correct_tags(self):\n correct_dict = {}\n for gold_tag, predict_tag in zip(self.golden_tags, self.predict_tags):\n if gold_tag == predict_tag:\n if gold_tag not in correct_dict:\n correct_dict[gold_tag] = 1\n else:\n correct_dict[gold_tag] += 1\n\n return correct_dict", "def getNbStations(self) :\n return len(self._stations)", "def test_seed_station_information(self):\n\t\tget_info.seed_station_information()\n\n\t\tMacDougal_Prince = db.session.query(Station).filter(Station.id == 128).one()\n\t\tself.assertTrue(MacDougal_Prince, 'Station at MacDogual/Pride did not get sucessfully added.')\n\n\t\tself.assertEqual(MacDougal_Prince.num_bikes_available, 0, 'Bike counts were not initialized properly')\n\t\tself.assertEqual(MacDougal_Prince.num_docks_available, 0, 'Dock counts were not initialized properly')", "def get_hourly(station_id):\n hourdata = db.session.query(func.avg(DublinBike.available_bike)) \\\n .filter(DublinBike.number == station_id) \\\n .group_by(extract('hour', DublinBike.localtime)) \\\n .order_by(extract('hour', DublinBike.localtime)) \\\n .all()\n return jsonify([\n {'hour': i,\n 'available_bike': float(hourdata[i][0])\n } for i in range(24)\n ])", "def week_chart(station_id):\r\n\r\n engine = get_db()\r\n # According to the parameter:station_id\r\n # select the occupancy of the corresponding station from the database.\r\n sql = \"SELECT available_bikes, available_bike_stands, last_update FROM STATION where number={};\".format(station_id)\r\n rows = engine.execute(sql).fetchall()\r\n\r\n week_average_bikes = []\r\n week_average_stands = []\r\n\r\n # The values 0 - 6 in the list day represent the days from Sunday to Saturday\r\n days = [0, 1, 2, 3, 4, 5, 6]\r\n for day in days:\r\n # Invoking the function:day_avg, calculate the average occupancy on a single day, and then add it to the list\r\n week_average_bikes.append(day_avg(rows, day)[0])\r\n week_average_stands.append(day_avg(rows, day)[1])\r\n daily = jsonify(week_average_bikes=week_average_bikes, week_average_stands=week_average_stands)\r\n return daily", "def get_station_boroughs(self):\\", "def _get_ogd_stations():\n return {r[\"Station\"] for r in ZamgData.current_observations()}", "def predict():\n ### YOUR CODE GOES HERE\n\n predictions = list(np.random.randint(low=0, high=1, size=2073))\n ### YOUR CODE ENDS HERE\n print(predictions) # Should be a dictionary of forecasts\n # i.e. {\"id1\" : forecast, \"id2\": forecast, ...}\n return predictions", "def stations_dict(self):\n return self.__stations_dict", "def get_mike_stations(pool):\n\n mike_stations = {}\n\n connection = pool.connection()\n try:\n with connection.cursor() as cursor:\n sql_statement = \"SELECT * FROM `station` WHERE `id` like %s\"\n row_count = cursor.execute(sql_statement, \"18_____\")\n if row_count > 0:\n results = cursor.fetchall()\n for dict in results:\n mike_stations[dict.get(\"name\")] = [dict.get(\"id\"), dict.get(\"latitude\"), dict.get(\"longitude\")]\n return mike_stations\n else:\n return None\n except Exception as exception:\n error_message = \"Retrieving mike stations failed\"\n logger.error(error_message)\n traceback.print_exc()\n raise exception\n finally:\n if connection is not None:\n connection.close()", "def tobs():\n\n # Open sessions\n session = Session(bind=engine)\n\n # Query DB for StationID, Station Name of the most active station (i.e. the station with more tempt observations) \n data=session.query(Measurement.station,Station.name,func.count(Measurement.tobs)).\\\n filter(Measurement.station==Station.station).\\\n group_by(Measurement.station,Station.station).\\\n order_by(func.count(Measurement.tobs).desc()).\\\n first()\n \n # Unpaking the results\n (maxStationID,maxStationName,temp) = data\n\n # Now that we have the most active station, we need to figure out what is the last observation date.\n lastDate=session.query(Measurement.date).filter(Measurement.station == maxStationID).order_by(Measurement.date.desc()).first()\n for date in lastDate:\n dataArray = date.split(\"-\")\n (year,month,day) = dataArray\n\n # And calculate what is 1 year before that to have the start and end date for the data\n year_agoStation = dt.date(int(year),int(month),int(day)) - dt.timedelta(days=365)\n \n # Store as variables\n latestTobsDate=f'{year}-{month}-{day}'\n oldestTobsDate=year_agoStation.isoformat()\n\n # Initiating an empty dictionary\n tobs={}\n\n # Query the DB once again to get the date and the respective obs value\n results=session.query(Measurement).filter(Measurement.date >= year_agoStation).filter(Measurement.station == maxStationID).all()\n for row in results:\n temp={row.date:row.tobs} #temporary dictionary with the date as key and the obs as the value\n tobs.update(temp) #append to the tobs dictionary\n \n # Also creating a dictionary to provide the user with information of the most active station\n maxStation={'id':maxStationID,\n 'name':maxStationName}\n \n # Main API dict that holds an info key, the most active station, the date interval and the results/obs per day\n temperaturesAPI={'info':'Last 12 months of temperature observation in Fahrenheit for the Station with more observations',\n 'most active station':maxStation,\n 'date interval':{'from':oldestTobsDate,'to':latestTobsDate},\n 'results':tobs\n }\n \n # Returing the main dictionary in a JSON format API response \n return(jsonify(temperaturesAPI))", "def predict(self, unknown):\n for title in unknown:\n for ind in range(len((unknown[list(unknown.keys())[0]]))):\n unknown[title][ind] = (unknown[title][ind] - self.normalization_n[ind]) / (self.normalization_d[ind])\n print(unknown)\n unknown_labels = {}\n for title in unknown:\n neighbors = self.k_neighbors(unknown[title], self.dataset, self.k)\n unknown_labels[title] = self.rate(neighbors, self.labels)\n return unknown_labels", "def count_kmers_observed(read, k):\n counts = {}\n num_kmers = len(read) - k + 1\n for i in range (num_kmers):\n kmer= read[i:i+k]\n if kmer not in counts:\n counts[kmer] = 0\n counts[kmer] +=1\n return len(counts)" ]
[ "0.6407551", "0.6013032", "0.5959645", "0.5732601", "0.5715449", "0.5615073", "0.5461735", "0.5427727", "0.5422435", "0.5406403", "0.53591686", "0.5297303", "0.5228484", "0.52237123", "0.52150697", "0.5209488", "0.519387", "0.5190188", "0.5189188", "0.51510733", "0.51246756", "0.5122551", "0.51216453", "0.5115779", "0.51045674", "0.50829315", "0.5082295", "0.50763685", "0.50714034", "0.5069577" ]
0.7164526
0
Takes a list and puts turn every element in list to integer and returns a tuple of those integers. Prints error message and returns empty tuple if there is an element that i not list
def list_to_tuple(list): templist = [] for i in list: try: templist.append(int(i)) except ValueError: print('Error. Please enter only integers.') return () return tuple(templist)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def processed(N:int)->tuple:\n l1= str(N)\n a,b = '',''\n for i in range(len(l1)):\n if l1[i] == '4':\n a+='2'\n b+='2'\n else:\n a+=str(l1[i])\n b+='0'\n return int(a), int(b)", "def _to_tuple(values: Union[int, Iterable[int]]) -> Tuple[int, ...]:\n try:\n return tuple(values)\n except TypeError:\n return (values,)", "def element_to_tuple(list_of_elements):\n return list(map(lambda x: tuple(x), list_of_elements))", "def easy_unpack_my(elements):\n try:\n res = tuple(elements[i] for i in [0, 2, -2])\n except IndexError:\n res = 0\n return res", "def _to_int(maybe_iter):\n if not isinstance(maybe_iter, str) and isinstance(maybe_iter, abc.Iterable):\n return tuple([_to_int(a) for a in maybe_iter])\n try:\n return int(maybe_iter)\n except ValueError:\n return maybe_iter", "def _tuple_from_one_or_two_ints(self, v):\n try:\n a, b = [int(x) for x in v]\n except TypeError:\n a, b = int(v), int(v)\n return (a,b)", "def main(data: List[str]) -> Tuple[int, int]:\n data = [int(number) for number in data[0].split(\",\")]\n\n answer_one = part_one(data)\n answer_two = part_two(data)\n return answer_one, answer_two", "def index(l_: List[int], i: Tuple[int, ...]) -> Tuple[int, ...]:\n return tuple([l_[x] for x in i])", "def test_mix_tuple_issue_387(self):\n assert_type_and_value(\n tuple,\n (42, 'Test'),\n self.env(\n 'MIX_TUPLE',\n default=(0, ''),\n cast=lambda t: tuple(\n map(\n lambda v: int(v) if v.isdigit() else v.strip(),\n [c for c in t.strip('()').split(',')]\n )\n ),\n )\n )", "def _py3_safe(parsed_list):\n if len(parsed_list) < 2:\n return parsed_list\n else:\n new_list = [parsed_list[0]]\n nl_append = new_list.append\n for before, after in py23_zip(islice(parsed_list, 0, len(parsed_list)-1),\n islice(parsed_list, 1, None)):\n if isinstance(before, Number) and isinstance(after, Number):\n nl_append(\"\")\n nl_append(after)\n return tuple(new_list)", "def element_length(lst: Iterable[Sequence]) -> List[Tuple[Sequence, int]]:\n return [(i, len(i)) for i in lst]", "def element_length(lst: Iterable[Sequence]) -> List[Tuple[Sequence, int]]:\n return [(i, len(i)) for i in lst]", "def element_length(lst: Iterable[Sequence]) -> List[Tuple[Sequence, int]]:\n return [(i, len(i)) for i in lst]", "def comma_list(s):\n\n return tuple(int(v) for v in s.split(\",\"))", "def test_get_triangle_tuple_all_int(self):\n triangle = (3, 2, 1)\n result = get_triangle_type(triangle)\n self.assertEqual(result, 'scalene')", "def _list_to_tuple(v):\n if isinstance(v, list):\n return tuple(v)\n return v", "def test_list_int2(self):\n inp = [(0, 0), (10, 1), (1, 2)]\n expected = 19\n actual = get_num_steps(inp)\n self.assertEqual(expected, actual)", "def str2int_tuple(integers: str) -> Optional[Tuple[int, ...]]:\n assert check_argument_types()\n if integers.strip() in (\"none\", \"None\", \"NONE\", \"null\", \"Null\", \"NULL\"):\n return None\n return tuple(map(int, integers.strip().split(\",\")))", "def test_list_int3(self):\n inp = [(0, 0), (10, 5), (-1, 0)]\n expected = 21\n actual = get_num_steps(inp)\n self.assertEqual(expected, actual)", "def _is_positive_int_tuple(item):\n if not isinstance(item, tuple):\n return False\n for i in item:\n if not _is_positive_int(i):\n return False\n return True", "def ex_list(data):\n return tuple(data)", "def validateListOfInts(asValues, iMin = 0, iMax = 0x7ffffffe, aoNilValues = tuple([[], None]), fAllowNull = True):\n (asValues, sError) = ModelDataBase.validateListOfSomething(asValues, aoNilValues, fAllowNull);\n\n if sError is None and asValues not in aoNilValues and asValues:\n for i, _ in enumerate(asValues):\n sValue = asValues[i];\n\n sThisErr = '';\n try:\n iValue = int(sValue);\n except:\n sThisErr = 'Invalid integer value \"%s\".' % (sValue,);\n else:\n asValues[i] = iValue;\n if iValue < iMin:\n sThisErr = 'Value %d is too small (min %d)' % (iValue, iMin,);\n elif iValue > iMax:\n sThisErr = 'Value %d is too high (max %d)' % (iValue, iMax,);\n else:\n continue;\n\n if sError is None:\n sError = sThisErr;\n else:\n sError += ' ' + sThisErr;\n\n return (asValues, sError);", "def enumerate(x) -> List[Tuple[int, any]]:\n pass", "def parseTupleList(self,string):\r\n string = string.replace(\"[\",\"\")\r\n string = string.replace(\"),\",\"*\")\r\n string = string.replace(\"(\", \"\")\r\n string = string.replace(\")\", \"\")\r\n string = string.replace(\"]\", \"\")\r\n string = string.split(\"*\")\r\n for i in xrange(len(string)):\r\n string[i] = string[i].split(\",\")\r\n for i in xrange(len(string)):\r\n for j in xrange(len(string[i])):\r\n string[i][j] = int(string[i][j])\r\n string[i] = tuple(string[i])\r\n return string", "def to_tuple(v: Union[int, Tuple[int, int]]) ->Tuple[int, int]:\n if torch.jit.isinstance(v, int):\n return v, v\n else:\n return v", "def _unpack_tuple(x):\n if len(x) == 1:\n return x[0]\n else:\n return x", "def _unpack_tuple(x):\n if len(x) == 1:\n return x[0]\n else:\n return x", "def single_element_tuple():\n single = (1,)\n print(type(single)) # <type 'tuple'>", "def test_int_tuple_validation(value_idx_0: Any, value_idx_1: Any, value_idx_2: Any) -> None:\n m = ParamClass()\n val = (value_idx_0, value_idx_1, value_idx_2)\n if not all([isinstance(x, int) for x in val]):\n with pytest.raises(ValueError):\n m.int_tuple = (value_idx_0, value_idx_1, value_idx_2)\n else:\n m.int_tuple = (value_idx_0, value_idx_1, value_idx_2)", "def list_to_float(l: list) -> [list, int]:\n\n # Initialisations\n l_o = []\n l_f = 0\n\n # Loop through the list\n for i in l:\n\n # Add the current list item as a float\n try:\n l_o.append(float(i))\n\n # Increment the failure counter\n except:\n l_o.append(0)\n l_f += 1\n\n return l_o, l_f" ]
[ "0.6849902", "0.65656465", "0.6465324", "0.63646877", "0.617637", "0.61712325", "0.61625373", "0.61295885", "0.61062", "0.60706764", "0.6066202", "0.6066202", "0.6066202", "0.6015309", "0.5969996", "0.59488297", "0.5945779", "0.59452987", "0.5942725", "0.59121597", "0.59073335", "0.58949935", "0.58738965", "0.58279705", "0.5776373", "0.5774315", "0.5774315", "0.5771879", "0.57593966", "0.5756675" ]
0.84013814
0
Calculate the loss. You can calculate the loss using mse or mae.
def compute_loss(y, tx, w): # *************************************************** # INSERT YOUR CODE HERE # TODO: compute loss by MSE / MAE # *************************************************** # vector e e = compute_e(y, tx, w) N = compute_N(e) L_MSE = np.dot(np.matrix.transpose(e), e) L_MSE = L_MSE / (2 * N) return L_MSE
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_loss(self):", "def calc_loss(self, x: np.ndarray, y: np.ndarray) -> float:\n return self.descent.calc_loss(x, y)", "def calculate_loss(self, X, y):\n probs = self.predict(X)\n\n num_examples = X.shape[0]\n\n sub = np.subtract(probs, y)\n abs_sum = np.abs(sub)\n sm = np.sum(abs_sum)\n loss = 1 - sm / num_examples\n print(\"Current loss: [ \" + str(\"{:6.5f}\").format(loss) + \" ]\")\n return loss", "def calculate_loss(self, a, label):\n if self.loss == 'mse':\n diff = a - label\n err = np.square(diff).mean(axis=0).mean()\n elif self.loss == 'ce':\n return sum(-np.log2(a[label > 0]))\n else:\n raise ValueError('loss function not implemented')\n return err", "def compute_loss(self, **kwargs):\n raise NotImplementedError", "def _compute_loss(self, parameters, inputs, ground_truth):\n predictions = self.network_forward(parameters, inputs)\n loss = np.mean((ground_truth - predictions) ** 2)\n return loss", "def _compute_loss(self, parameters, inputs, ground_truth):\n predictions = self.network_forward(parameters, inputs)\n loss = np.mean((ground_truth - predictions) ** 2)\n return loss", "def compute_loss(self, obs, returns):", "def loss(self):\n if not self.run:\n self._run()\n return self.model_loss", "def _compute_loss(self):\n state, action, reward, next_state, done = self.replay_buffer.sample(self.batch_size)\n\n state = torch.FloatTensor(state)\n next_state = torch.FloatTensor(next_state)\n action = torch.LongTensor(action)\n reward = torch.FloatTensor(reward)\n done = torch.FloatTensor(done)\n\n q_values = self.dqn(state)\n q_value = q_values.gather(1, action.unsqueeze(1)).squeeze(1)\n\n next_q_values = self.target_dqn(next_state)\n next_q_value = next_q_values.max(1)[0]\n target = reward + self.discount_factor * next_q_value * (1 - done)\n\n # loss = F.smooth_l1_loss(q_value, target.detach())\n loss = F.mse_loss(q_value, target.detach())\n\n return loss", "def compute_loss(self, *args, **kwargs):\n raise NotImplementedError", "def compute_loss(self):\n def calc_loss(inputs, outputs):\n reconstruction_loss = tf.metrics.binary_crossentropy(\n tf_flat(inputs), tf_flat(outputs))\n reconstruction_loss *= OUT_SIZE * OUT_SIZE\n kl_loss = -0.5 * tf.reduce_sum(1.0 + self.log_sigma - tf.square(\n self.mu) - tf.exp(self.log_sigma), 1)\n return tf.reduce_mean(reconstruction_loss + kl_loss)\n return calc_loss", "def loss(self):\n return 'mse'", "def compute_loss(y, tx, w):\n e = y - tx.dot(w)\n return calculate_mse(e)", "def loss_op(self):\n return self.loss", "def calculate_loss(self, interaction):\n\n if self.restore_user_e is not None or self.restore_entity_e is not None:\n self.restore_user_e, self.restore_entity_e = None, None\n\n user = interaction[self.USER_ID]\n pos_item = interaction[self.ITEM_ID]\n neg_item = interaction[self.NEG_ITEM_ID]\n\n user_all_embeddings, entity_all_embeddings, cor_loss = self.forward()\n u_embeddings = user_all_embeddings[user]\n pos_embeddings = entity_all_embeddings[pos_item]\n neg_embeddings = entity_all_embeddings[neg_item]\n\n pos_scores = torch.mul(u_embeddings, pos_embeddings).sum(dim=1)\n neg_scores = torch.mul(u_embeddings, neg_embeddings).sum(dim=1)\n mf_loss = self.mf_loss(pos_scores, neg_scores)\n reg_loss = self.reg_loss(u_embeddings, pos_embeddings, neg_embeddings)\n cor_loss = self.sim_decay * cor_loss\n loss = mf_loss + self.reg_weight * reg_loss + cor_loss\n return loss", "def compute_loss(self):\n self.prototypes = self.compute_prototypes()\n self.test_logits = self.compute_logits()\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.episode.test_labels, logits=self.test_logits)\n cross_entropy_loss = tf.reduce_mean(loss)\n regularization = tf.reduce_sum(\n tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))\n loss = cross_entropy_loss + self.weight_decay * regularization\n return loss", "def compute_loss(self, x, gt):\n loss = sum([torch.mean((out - gt)**2) for out in self.forward(x)])\n return loss", "def calculate_training_loss(self):\n self.network.train()\n self.training_average_loss = self.calculate_average_loss(self.training_dataloader)", "def mae_loss(model: tf.keras.Model,\n model_input: tf.Tensor,\n model_target: tf.Tensor\n ):\n _y = model(model_input)\n _reduction_string = \"weighted_sum_over_batch_size\"\n return tf.losses.absolute_difference(labels=model_target,\n predictions=_y,\n reduction=_reduction_string\n )", "def compute_loss(self, features, mode, params, precomputed):\n raise NotImplementedError(\"Model does not implement loss.\")", "def loss(self):\n return self._loss", "def get_loss(self):\n return self.loss / self.cnt", "def compute_loss(self, X, y):\r\n # INSERT YOUR CODE HERE\r\n #raise Exception('Function not yet implemented!')\r\n\r\n # Computing the loss using the below formula\r\n # Loss = -(1/m)*sum( (y_i)*log(σ(wTx_i)) + (1-y_i)*log(1 - σ(wTx_i)))\r\n # m = number of examples and i for ith example\r\n\r\n loss = 0\r\n X = np.append(X, np.array([[1]]*X.shape[0]), axis=1)\r\n # for idx,example in enumerate(X):\r\n # loss = loss + y[idx] * np.log(self.sigmoid(np.dot(example, self.w))) + (1 - y[idx]) * np.log(1 - self.sigmoid(np.dot(example, self.w)))\r\n # loss = -loss/ X.shape[0]\r\n\r\n loss = -np.mean(y * np.log(self.sigmoid(np.dot(X, self.w))) + (1 - y) * np.log(1 - self.sigmoid(np.dot(X, self.w))))\r\n return loss", "def calculate_loss(self, pred, gold, smoothing=False):\n gold = gold.contiguous().view(-1)\n if smoothing:\n epsilon = 0.1\n n_class = pred.size(1)\n one_hot = torch.zeros_like(pred).scatter(1, gold.view(-1, 1), 1)\n one_hot = one_hot * (1 - epsilon) + \\\n (1 - one_hot) * epsilon / (n_class - 1)\n\n log_prb = F.log_softmax(pred, dim=1)\n # create non-padding mask with torch.ne()\n non_pad_mask = gold.ne(self.constants.PAD)\n loss = -(one_hot * log_prb).sum(dim=1)\n # losses are averaged later\n loss = loss.masked_select(non_pad_mask).sum()\n else:\n loss = F.cross_entropy(\n pred, gold, ignore_index=self.constants.PAD, reduction='sum')\n return loss", "def calculate_loss(self, output, target, **kwargs):\n ##dont do aggregation\n raise NotImplementedError", "def calculate_loss(self, output, target, **kwargs):\n ##dont do aggregation\n raise NotImplementedError", "def loss_total(self):\r\n def loss(y_true, y_pred):\r\n l2 = 1/2*K.sum(K.square(y_true-y_pred))\r\n\r\n return l2\r\n return loss", "def _compute_loss(self, predictions, targets, **params):\n pass", "def calc_loss(X, Y, model):\n Z = predict(X, model)\n return -(Y * np.log(Z)).sum() / len(Y)" ]
[ "0.80474067", "0.77250457", "0.77212405", "0.7686314", "0.76515734", "0.7590657", "0.7590657", "0.7521977", "0.75205433", "0.7510856", "0.7470165", "0.7456652", "0.7438454", "0.7398427", "0.73179173", "0.73172104", "0.7310165", "0.7301446", "0.72921497", "0.72774357", "0.72511524", "0.7249871", "0.7239", "0.7238317", "0.72301155", "0.7211939", "0.7211939", "0.720738", "0.7176155", "0.71756506" ]
0.7866642
1
Creates a MainWindow using 75% of the available screen resolution.
def create_main_window(): main_win = MainWindow() main_windows.append(main_win) available_geometry = app.desktop().availableGeometry(main_win) main_win.resize(available_geometry.width() * 2 / 3, available_geometry.height() * 2 / 3) main_win.show() return main_win
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self):\n self.app = qt.QApplication(sys.argv)\n self.window = qt.QMainWindow()\n self.screenSize = qt.QDesktopWidget().screenGeometry(-1)\n self.window.setGeometry(self.getDims()[1]/4, self.getDims()[0]/4, self.getDims()[1]/2, self.getDims()[0]/2)", "def placeWindow(self):\r\n\t\t# window size\r\n\t\tw = 600\r\n\t\th = 300\r\n\t\t# find the screen size\r\n\t\tsw = self.parent.winfo_screenwidth()\r\n\t\tsh = self.parent.winfo_screenheight()\r\n\t\t# now define the location on the current screen\r\n\t\tx = (sw/2-0.5*w)\r\n\t\ty = (sh/2-0.5*h)\r\n\t\tself.parent.geometry('%dx%d+%d+%d' % (w, h, x, y))", "def window(main):\r\n main.title(\"BinCryptor 1.0\")\r\n main.update_idletasks()\r\n width = main.winfo_width() #Width of the current screen\r\n height = main.winfo_height() #Height of the current screen\r\n x = (main.winfo_screenwidth() // 2) - (width // 2)\r\n y = (main.winfo_screenheight() // 2) - (height // 2)\r\n main.geometry(f'{width}x{height}+{x}+{y}') #Adjusts the height and width\r", "def main():\n root = Tk()\n if high_dpi:\n root.call('tk', 'scaling', 4)\n if fullscreen:\n root.attributes('-fullscreen', True)\n root.configure(bg=yellow)\n root.grid_columnconfigure(2, weight=1)\n root.title('NS Fietsenstalling')\n\n MainScreen(root)\n root.mainloop()", "def create_main_enviroment(self):\n # self.layout=QGridLayout()\n self.resize(900, 900)\n self.centralWidget = CentralWidget(self) # CentralWidget(self)\n self.setCentralWidget(self.centralWidget)\n\n # self.toolbar = QToolBar(self)\n # self.addToolBar(self.toolbar)\n\n # self.setLayout(self.layout)\n self.setWindowTitle(\"Fitting elastic constants\")", "def initialise_window(self):\n self.imageLabel.setBackgroundRole(QtGui.QPalette.Base)\n self.imageLabel.setScaledContents(True)\n self.scrollArea.setWidget(self.imageLabel)\n self.setCentralWidget(self.scrollArea)\n self.scrollArea.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff) # Disable horizontal scrollbar.\n self.scrollArea.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff) # Disable vertical scrollbar.\n self.setWindowTitle(\"Robot Map\") # Set title.\n self.showFullScreen() # Make fullscreen.", "def setup_window(self, fullscreen, dual):\n cv2.startWindowThread()\n if fullscreen:\n cv2.namedWindow(self.wname, cv2.WINDOW_NORMAL)\n else:\n cv2.namedWindow(self.wname)\n cv2.namedWindow(self.wname)\n cv2.setWindowProperty(self.wname, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)\n\n if dual:\n # Move is to make sure it's on the right monitor\n cv2.moveWindow(self.wname, 1920, 0)\n cv2.namedWindow(self.wname + ' Small View')\n cv2.resizeWindow(self.wname + ' Small View', 960, 540)", "def create_main_screen(self):\n\t\tself.blank_image = np.full((1280,1920, 3), 255, np.uint8)\n\t\tcv2.namedWindow(\"Background\", cv2.WND_PROP_FULLSCREEN)\n\t\tcv2.setWindowProperty(\"Background\", cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)\n\t\tcv2.imshow(\"Background\", self.blank_image)", "def _positionWindow(self):\n\t\tscreen = QtGui.QDesktopWidget().screenGeometry()\n\t\tself.setGeometry(1050, 275, 375, 350)\n\t\t# self.move( (-screen.width()/2)+200, -screen.height()/2 )", "def _prep_window(self, parent=None):\n self.toolkit.app.initialize()\n if not self.initialized:\n self.setup(parent)\n self.resize_to_initial()\n self.update_minimum_size()\n self.update_maximum_size()", "def maya_main_window():\n main_window_ptr = omui.MQtUtil.mainWindow()\n if sys.version_info.major >= 3:\n return wrapInstance(int(main_window_ptr), QtWidgets.QWidget)\n else:\n return wrapInstance(long(main_window_ptr), QtWidgets.QWidget)", "def main():\r\n root = tk.Tk()\r\n app = Home(root)\r\n root.geometry(app.resize())\r\n root.configure(background = jt.color_background)\r\n root.mainloop()", "def create_screen(self, width, height):", "def maya_main_window():\n main_window = omui.MQtUtil.mainWindow()\n return wrapInstance(long(main_window), QtWidgets.QWidget)", "def maya_main_window():\n main_window_ptr = omui.MQtUtil.mainWindow()\n return wrapInstance(long(main_window_ptr), QtWidgets.QWidget)", "def maya_main_window():\n main_window_ptr = omui.MQtUtil.mainWindow()\n return wrapInstance(long(main_window_ptr), QtWidgets.QWidget)", "def maya_main_window():\n main_window_ptr = omui.MQtUtil.mainWindow()\n return wrapInstance(long(main_window_ptr), QtWidgets.QWidget)", "def _configureWindow(self):\n if self._win_type == WindowType.IMMERSIVE:\n pg.setConfigOptions(\n foreground='d',\n background=(_DARK_COLOUR if self._dark else _LIGHT_COLOUR))\n self._win = pg.plot(title=\"Abstact Map Visualisation\")\n self._plt = self._win.plotItem\n self._plt.setAspectLocked(True, 1)\n self._plt.hideAxis('left')\n self._plt.hideAxis('bottom')\n else: # DEFAULT\n pg.setConfigOptions(foreground='k', background='w')\n self._win = pg.plot(title=\"Abstact Map Visualisation\")\n self._plt = self._win.plotItem\n\n # Set up the overlay objects as they are static\n self._overlay_items = [\n QtWidgets.QGraphicsRectItem(-_OVERLAY_WIDTH / 2,\n -_OVERLAY_HEIGHT / 2, _OVERLAY_WIDTH,\n _OVERLAY_HEIGHT)\n ]\n self._overlay_items[0].setBrush(pg.mkBrush(_OVERLAY_COLOUR))\n self._overlay_items[0].setZValue(1000)\n self._win.addItem(self._overlay_items[0])\n self.toggleOverlay(enable=False)\n\n # Do any last settings in the window\n # self._win.parentWidget().showMaximized()\n limit = 30\n self._win.setRange(xRange=[-limit, limit], yRange=[-limit, limit])", "def _positionWindow(self):\n\t\tif sys.platform=='win32':\n\t\t\tself.setGeometry(1050, 30, 375, 220)\n\t\telse:\n\t\t\tself.setGeometry(1050, 0, 375, 220)\n\t\t# self.move( (-screen.width()/2)+200, -screen.height()/2 )", "def default_window():\n X = [0, .125, 1.4375, 1.5625, 2.9375, 3.0625, 4.4375, 4.5625, 5.875, 6.0]\n Y = [0, .125, 2.875, 3.0]\n Z = [0, .125]\n V, F = True, False\n occupancy = [\n [[V],[V],[V]],\n [[V],[F],[V]],\n [[V],[V],[V]],\n [[V],[F],[V]],\n [[V],[V],[V]],\n [[V],[F],[V]],\n [[V],[V],[V]],\n [[V],[F],[V]],\n [[V],[V],[V]]\n ]\n return w7.window(X, Y, Z, occupancy)", "def __init_window(self) -> pygame.Surface:\n pygame.display.set_caption(CAPTION)\n win = pygame.display.set_mode((WIDTH, HEIGHT))\n \n return win", "def create_window(self, xoff: int, yoff: int, width: int, height: int, name: str = None):\n return Window(xoff, yoff, width, height, name)", "def _on_start(self):\n desktop = QtGui.QApplication.instance().desktop()\n available_geometry = desktop.screenGeometry(QtGui.QCursor().pos())\n self.setGeometry(available_geometry.x(), 0, 100, 100)", "def configure_window(self, width, height):\n self.configure_surface(width, height)", "def setup(self, width=_CFG[\"width\"], height=_CFG[\"height\"],\n startx=_CFG[\"leftright\"], starty=_CFG[\"topbottom\"]):\n if not hasattr(self._root, \"set_geometry\"):\n return\n sw = self._root.win_width()\n sh = self._root.win_height()\n if isinstance(width, float) and 0 <= width <= 1:\n width = sw*width\n if startx is None:\n startx = (sw - width) / 2\n if isinstance(height, float) and 0 <= height <= 1:\n height = sh*height\n if starty is None:\n starty = (sh - height) / 2\n self._root.set_geometry(width, height, startx, starty)\n self.update()", "def setupWindow(self):\n\n\t\tself.main_menu_window = MenuFrame.MainMenuFrame(self.uiCoordinator)\n\t\tself.menu_window = self.main_menu_window._mf\n\t\tself.score_window = self.main_menu_window._hf\n\t\tself.instructions_window = self.main_menu_window._if\n\t\tself.menu_window.playButton.focus_set()", "def init_window(self, size, screen=None):\n # enforce minimum size\n (mw, mh), (w, h) = config.minsize, size\n if w < mw or h < mh:\n size = mw, mh\n\n # init view surface and pass it to screen\n self.view = pygame.display.set_mode(size, pygame.RESIZABLE)\n self.view.fill((0, 0, 0))\n if screen is not None:\n screen.resize_view()", "def create(self, parent):\n self.widget = _QMainWindow(parent)", "def setUp(self):\r\n self.caption = \"mirra extending classes\" # window name\r\n self.size = 640, 480 #window size\r\n self.pos = 100,100 # window top left location\r\n self.fullScreen = 0 # if fullScreen is on it will overwrite your pos and size to match the display's resolution\r\n self.frameRate = 15 # set refresh framerate\r", "def main():\n app = QtWidgets.QApplication(sys.argv)\n main = MainWindow()\n main.show()\n sys.exit(app.exec_())" ]
[ "0.71360654", "0.66262525", "0.66070336", "0.65157807", "0.65085334", "0.65072685", "0.64285207", "0.64025426", "0.63241714", "0.627627", "0.610974", "0.6101933", "0.6099385", "0.60701615", "0.60571706", "0.60571706", "0.60571706", "0.6039301", "0.60290587", "0.5989896", "0.59836096", "0.5982593", "0.59800214", "0.59736586", "0.5951508", "0.5931379", "0.5928133", "0.5915008", "0.59116197", "0.58930546" ]
0.77146345
0
Creates a MainWindow with a BrowserTabWidget.
def create_main_window_with_browser(): main_win = create_main_window() return main_win.add_browser_tab()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create(self, parent):\n self.widget = _QMainWindow(parent)", "def createWindow(self, type):\n # this = Browser(self.url())\n # this.show()\n\n self.popup = SequanixQWebView(**self.kwargs)\n self.popup.setObjectName(\"web_content\")\n self.popup.setWindowTitle(\"Sequana browser\")\n self.popup.page().windowCloseRequested.connect(self.popup.close)\n self.popup.show()\n return self.popup", "def create_main_window():\n main_win = MainWindow()\n main_windows.append(main_win)\n available_geometry = app.desktop().availableGeometry(main_win)\n main_win.resize(available_geometry.width() * 2 / 3,\n available_geometry.height() * 2 / 3)\n main_win.show()\n return main_win", "def new_tab(self, widget):\n print('new tab added')\n name = Gtk.Buildable.get_name(widget)\n if name == 'new':\n param = 'create'\n elif name == 'button_open':\n param = 'open'\n else:\n param = 'create'\n self.notebook.append_page(*self.create_tab(param))\n self.notebook.show_all()", "def new_tab_with_webview (self, webview):\n self.tabs._construct_tab_view(webview)", "def load_browser():\n QtWidgets.QApplication.setAttribute(QtCore.Qt.AA_EnableHighDpiScaling)\n if hasattr(QtWidgets.QStyleFactory, \"AA_UseHighDpiPixmaps\"):\n QtWidgets.QApplication.setAttribute(QtCore.Qt.AA_UseHighDpiPixmaps)\n app = QtWidgets.QApplication(sys.argv)\n ui = load_ui()\n ui.show()\n app.exec_()", "def createTabs(self):\r\n self.tab1 = QWidget()\r\n self.tab2 = QWidget()\r\n self.tab3 = QWidget()\r\n self.tab4 = QWidget()\r\n self.tab5 = QWidget()\r\n self.tab6 = QWidget()\r\n self.tab7 = QWidget()\r\n self.tab8 = QWidget()\r\n self.addTab(self.tab1, \"Registro\")\r\n self.addTab(self.tab2, \"Base de Datos\")\r\n self.addTab(self.tab3, \"Ingresos\")\r\n self.addTab(self.tab4, \"Compras\")\r\n self.addTab(self.tab5, \"Gastos\")\r\n self.addTab(self.tab6, \"Res. Diarios\")\r\n self.addTab(self.tab7, \"Res. Mensuales\")\r\n self.addTab(self.tab8, \"Res. Anuales\")", "def new_tab (self, url = None, key = None):\n # create the tab content\n wv = WV(key)\n #if url: wv.open(url)\n self._construct_tab_view(wv, url)\n return wv", "def create(self, parent):\n self.widget = QFrame(parent)", "def createWidgets(self):\n self.tab = WorkspaceTab( self )\n self.tab.setMinimumWidth(500)\n self.tab.setDocumentMode( False )\n self.tab.setMovable( True )\n\n # self.dockToolbar = QToolBar(self)\n # self.dockToolbar.setOrientation(Qt.Vertical)\n\n self.findWidget = FindReplace(self)\n self.findWidget.setDisabled(True)\n self.findWidget.hide()\n\n layout = QVBoxLayout()\n layout.addWidget( self.tab )\n layout.setContentsMargins(0,0,0,0)\n\n \n self.setLayout(layout)", "def main():\n app = QtWidgets.QApplication(sys.argv)\n main = MainWindow()\n main.show()\n sys.exit(app.exec_())", "def __init__ (self,\n win, \n default_url = None, \n default_title = None,\n hp = Gtk.PolicyType.NEVER,\n vp = Gtk.PolicyType.AUTOMATIC):\n Gtk.HPaned.__init__(self)\n self.win = win\n self.tabs = Gtk.Notebook()\n self.sidepane = Gtk.Notebook()\n self.add1(self.sidepane)\n self.add2(self.tabs)\n self.sidepane.set_show_tabs(False)\n self.tabs.set_scrollable(True)\n self.default_url = default_url\n self.default_title = default_title\n self.hp = hp\n self.vp = vp\n self.tabs.props.scrollable = True\n #self.tabs.props.homogeneous = True\n self.tabs.connect(\"switch-page\", self._switch_page)\n\n self.show_all()\n self._hovered_uri = None", "def create_main(self):\n self.frame = wxMediatorMainFrame(self)", "def handle_new_window(event):\n url = event.GetURL()\n webbrowser.open(url)", "def make_tab(self, root):\n self.frame = Frame(root)\n self.make_entry(self.frame)\n self.make_display(self.frame)\n return self.frame", "def createDefaultTab(self):\n self.welcomePage = WelcomePage(self)\n tabId = self.tab.addTab(self.welcomePage, \"\" )\n self.tab.setTabIcon(tabId, QIcon(\":/main.png\") )\n\n self.welcomePage.LinkConnect.connect(self.onConnectLinkClicked)\n self.welcomePage.LinkDisconnect.connect(self.onDisconnectLinkClicked)\n self.welcomePage.LinkTux.connect(self.newTestUnit)\n self.welcomePage.LinkTsx.connect(self.newTestSuite)\n self.welcomePage.LinkTpx.connect(self.newTestPlan)\n self.welcomePage.LinkTgx.connect(self.newTestGlobal)\n self.welcomePage.LinkMacro.connect(self.onMacroLinkClicked)\n self.welcomePage.LinkBasicMacro.connect(self.onBasicMacroLinkClicked)\n self.welcomePage.LinkWebMacro.connect(self.onWebMacroLinkClicked)\n self.welcomePage.LinkMobileMacro.connect(self.onMobileMacroLinkClicked)\n self.welcomePage.OpenWeb.connect(self.onOpenWebsite)\n self.welcomePage.OpenProductWeb.connect(self.onOpenProductWebsite)\n self.welcomePage.LinkSysMacro.connect(self.onSysMacroLinkClicked)\n self.welcomePage.LinkPlugin.connect(self.onPluginLinkClicked)", "def show_browser(self) -> None:\n\n # set delete = False to avoid early delete when user open multiple plots.\n with NamedTemporaryFile(suffix=\".html\", delete=False) as tmpf:\n pass\n with open(tmpf.name, \"w\") as file:\n file.write(self.template_base.render(context=self.context))\n webbrowser.open_new_tab(f\"file://{tmpf.name}\")", "def mayaMainWindow():\n OpenMayaUI.MQtUtil.mainWindow()\n ptr = OpenMayaUI.MQtUtil.mainWindow()\n\n return wrapInstance(long(ptr), QtWidgets.QWidget)", "def mayaMainWindow():\n OpenMayaUI.MQtUtil.mainWindow()\n ptr = OpenMayaUI.MQtUtil.mainWindow()\n\n return wrapInstance(long(ptr), QtWidgets.QWidget)", "def __create_ui(self):\n vbox = gtk.VBox()\n\n # Create the viewable area of the file browser\n self.__view_port = gtk.ScrolledWindow()\n self.__view_port.set_policy(gtk.POLICY_AUTOMATIC,\n gtk.POLICY_AUTOMATIC)\n # Create the tree view and add it to the viewable area\n self.__tree_view = ProjectTreeView()\n self.__project_explorer = ProjectExplorer(self.window, self.__tree_view)\n self.__tree_view.connect('button_press_event',\n self.__on_treeview_button_press_event)\n self.__project_explorer.set_repository()\n self.__view_port.add(self.__tree_view)\n # Create the toolbar\n hbox = gtk.HBox()\n toolbar = gtk.Toolbar()\n toolbar.set_style(gtk.TOOLBAR_ICONS)\n toolbar.set_icon_size(gtk.ICON_SIZE_MENU)\n back = gtk.ToolButton(gtk.STOCK_GO_UP)\n back.connect('clicked', self.__on_back_clicked)\n toolbar.insert(back, 0)\n toolbar.insert(gtk.SeparatorToolItem(), 1)\n refresh = gtk.ToolButton(gtk.STOCK_REFRESH)\n refresh.connect('clicked', self.__on_refresh_clicked)\n toolbar.insert(refresh, 2)\n hbox.pack_start(toolbar, True, True, 0)\n vbox.pack_start(hbox, False, False, 0)\n vbox.pack_start(self.__view_port, True, True, 0)\n\n # Setup the create the buttons for:\n # New File, New Folder\n # ----------------------------------------------------------------------\n hbox1 = gtk.VBox()\n toolbar_actions = gtk.Toolbar()\n toolbar_actions.set_style(gtk.TOOLBAR_ICONS)\n toolbar_actions.set_icon_size(gtk.ICON_SIZE_MENU)\n new_file = gtk.ToolButton(gtk.STOCK_NEW)\n new_file.connect('clicked', self.__on_new_file_clicked_cb)\n toolbar_actions.insert(new_file, 0)\n new_dir = gtk.ToolButton(gtk.STOCK_OPEN) # TODO: use a custom icon\n new_dir.connect('clicked', self.__on_new_dir_clicked_cb)\n toolbar_actions.insert(new_dir, 1)\n hbox1.pack_start(gtk.HSeparator(), True, True, 0)\n hbox1.pack_start(toolbar_actions, True, True, 0)\n vbox.pack_end(hbox1, False, False, 0)\n # ----------------------------------------------------------------------\n vbox.show_all()\n # Attach the project explorer to GMate's side panel\n self.__side_panel = self.window.get_side_panel()\n self.__side_panel.add_tab(vbox, msg0005, gtk.STOCK_HARDDISK)", "def __init__(self, main_win, parent=None):\n super(Tabs, self).__init__(parent)\n self.main_win = main_win\n\n if self.main_win.beamline is not None:\n try:\n beam = importlib.import_module('beamlines.' + self.main_win.beamline + '.beam_tabs')\n except Exception as e:\n print(e)\n msg_window('cannot import beamlines.' + self.main_win.beamline + ' module')\n raise\n self.prep_tab = beam.PrepTab()\n self.format_tab = DataTab()\n self.rec_tab = RecTab()\n self.display_tab = beam.DispTab()\n self.tabs = [self.prep_tab, self.format_tab, self.rec_tab, self.display_tab]\n else:\n self.format_tab = DataTab()\n self.rec_tab = RecTab()\n self.tabs = [self.format_tab, self.rec_tab]\n\n for tab in self.tabs:\n self.addTab(tab, tab.name)\n tab.init(self, main_win)", "def _create_window(self):\n self.window = Gtk.Window()\n self.window.set_title(\"Yapsy Example\")\n self.window.set_default_size(400, 400)\n self.window.connect(\"destroy\", lambda w: Gtk.main_quit())\n # PluginList() is a composite widget that shows all installed plugins\n # in a Gtk.TreeView. See widgets.py\n self._plugin_list = PluginList(self.window)\n box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)\n box.pack_start(self._plugin_list, True, True, 0)\n box.show_all()\n self.window.add(box)", "def create_browser():\n browser = selenium.webdriver.Chrome()\n return browser", "def __init__(self, inWindowTitleStr):\n super(MainWindow, self).__init__()\n self._mainWorkspace = None\n\n self.setWindowTitle(inWindowTitleStr)\n self.setGeometry(500, 100, 700, 900)\n\n self.mainWorkspace = workspace.WorkSpace(parent=self)", "def __init__(self, parent=None):\n super(MainWindow, self).__init__(parent)\n self.setupUi(self)\n \n self.setWindowTitle(\"RPI HMI - pH Debug\") # Title creation", "def __init__(self, parent, html_file, js_server_call_fn):\n super(BrowserWidget, self).__init__()\n\n self.parent = parent\n self.view = WebViewEx(self)\n self.view.setPage(WebPage()) #ensure we can see javascript errros\n self.connection = ServerConnection(js_server_call_fn)\n self.setMaximumHeight(100000)\n \n #seems we need absolute paths in the html file for QtWebView to work !?\n self.view.setUrl(QtCore.QUrl.fromLocalFile(html_file))\n \n #make the connection back to the server...\n self.frame = self.view.page().mainFrame()\n self.frame.addToJavaScriptWindowObject('server_connection', self.connection)\n \n #self._sizeHint = QtCore.QSize(600,800)\n \n #adjust the size policy\n self.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)", "def __init__(self, parent):\r\n\r\n pre = wx.PrePyWindow()\r\n \r\n self._tabs = None\r\n self._rect = wx.Rect(0, 0, 200, 200)\r\n self._tab_ctrl_height = 20\r\n self._tab_rect = wx.Rect() \r\n self._parent = parent\r\n \r\n self.PostCreate(pre)", "def run(cls):\n app = mayaMainWindow()\n \n widget = cls(parent=app)\n\n widget.setWindowFlags(widget.windowFlags() | QtCore.Qt.Window)\n\n if platform.system() == 'Darwin':\n # MacOS is special, and the QtCore.Qt.Window flag does not sort the windows properly,\n # so QtCore.Qt.Tool is added.\n widget.setWindowFlags(widget.windowFlags() | QtCore.Qt.Dialog)\n \n # Center the widget with Maya's main window.\n widget.move(app.frameGeometry().center() - QtCore.QRect(QtCore.QPoint(), widget.sizeHint()).center())\n \n widget.show()\n\n return widget", "def newwindow(url):\n\n # Open the URL\n webbrowser.open_new(url)", "def maya_main_window():\n main_window = omui.MQtUtil.mainWindow()\n return shiboken2.wrapInstance(long(main_window), PySide2.QtWidgets.QWidget)" ]
[ "0.68949765", "0.6478098", "0.6435696", "0.6413921", "0.62903494", "0.62232417", "0.6180552", "0.6119633", "0.61178416", "0.6103711", "0.60432786", "0.60425925", "0.59410733", "0.59396756", "0.5931825", "0.59298074", "0.58862966", "0.5838804", "0.5838804", "0.58258635", "0.581818", "0.5815363", "0.5807326", "0.5777566", "0.575804", "0.575041", "0.5747316", "0.57337755", "0.57215", "0.5713269" ]
0.7908725
0
Parse notification from page
def _parse_notification(self, node): if not node: return [] messages = [] notify_node = node.find('div', class_='notify') if notify_node: for p in notify_node.select('p.notification'): messages.append(p.get_text()) return messages
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getInfo(notification):", "def parse_atwho(my_text):\n notification_list = []\n \n #markdown_link_regex = re.compile(\"\\[.*?\\] \\((.*?) \\\".*?\\\"\\)\", re.IGNORECASE) # need to test this.\n markdown_link_regex = re.compile(\"\\[([^\\]]+)\\]\\(([^)\\\"]+)(?: \\\\\\\"([^\\\\\\\"]+)\\\\\\\")?\\)\", re.IGNORECASE)\n direct_link_regex = re.compile(\"data-notify=\\\\\\\"([^\\\\\\\"]+)\\\\\\\"\", re.IGNORECASE)\n link_list = [i[2] for i in markdown_link_regex.findall(my_text)]\n link_list += [i for i in direct_link_regex.findall(my_text)]\n\n for i in link_list:\n _type, _id = i.split(\"#\", 1)\n if _type == \"user\":\n notification_list.append(User.objects.get(id=_id))\n elif _type == \"subdept\":\n notification_list.append(Subdept.objects.get(id=_id))\n elif _type == \"dept\":\n notification_list.append(Dept.objects.get(id=_id))\n elif _type == \"page\":\n notification_list.append(Page.objects.get(id=_id))\n\n return my_text, notification_list", "def _parse_message(self, soup):\n kind, = soup.attrs[u'class']\n title = soup.findChild().text\n body = ''.join(t.text for t in soup.findChildren()[1:])\n message = dict(kind=kind, title=title, body=body)\n for val in message.values():\n assert type(val) == str\n return message", "def _parse_notice_documents(self, response):\n notice_documents = []\n for doc in response.css('article.full a[href]'):\n doc_text = doc.css('*::text').extract_first()\n if 'mailto' in doc.attrib['href'] or 'flyer' in doc_text.lower():\n continue\n notice_documents.append({\n 'url': 'http://{}{}'.format(self.allowed_domains[0], doc.attrib['href']),\n 'note': doc_text,\n })\n return notice_documents", "def _parse_user_messages_page(self, html):\n\n if not html:\n return None\n\n dom = BeautifulSoup(html, 'html.parser')\n\n data = self._parse_logged_in_user(dom)\n\n return data", "def _parse_notice(self, response):\n notice_documents = self._parse_notice_documents(response)\n meetings_list = []\n for meeting in response.meta.get('upcoming', []):\n # Check if the meeting date is in any document title, if so, assign docs to that meeting\n meeting_date_str = '{dt:%B} {dt.day}'.format(dt=meeting['start']['date'])\n if any(meeting_date_str in doc['note'] for doc in notice_documents):\n meetings_list.append({\n **meeting, 'documents': notice_documents,\n 'sources': [{\n 'url': response.url,\n 'note': ''\n }]\n })\n else:\n meetings_list.append({**meeting, 'documents': []})\n return meetings_list", "def parse_notifications(notifications):\n\n result = {}\n started_at = 0\n finished_at = 0\n\n for n in notifications:\n traits = n[\"traits\"]\n\n def find_field(f_name):\n return [t[\"value\"] for t in traits if t[\"name\"] == f_name][0]\n\n trace_id = find_field(\"trace_id\")\n parent_id = find_field(\"parent_id\")\n name = find_field(\"name\")\n project = find_field(\"project\")\n service = find_field(\"service\")\n host = find_field(\"host\")\n timestamp = find_field(\"timestamp\")\n\n timestamp = datetime.datetime.strptime(timestamp,\n \"%Y-%m-%dT%H:%M:%S.%f\")\n\n if trace_id not in result:\n result[trace_id] = {\n \"info\": {\n \"name\": name.split(\"-\")[0],\n \"project\": project,\n \"service\": service,\n \"host\": host,\n },\n \"trace_id\": trace_id,\n \"parent_id\": parent_id,\n }\n\n result[trace_id][\"info\"][\"meta.raw_payload.%s\" % name] = n.get(\n \"raw\", {}).get(\"payload\", {})\n\n if name.endswith(\"stop\"):\n result[trace_id][\"info\"][\"finished\"] = timestamp\n else:\n result[trace_id][\"info\"][\"started\"] = timestamp\n\n if not started_at or started_at > timestamp:\n started_at = timestamp\n\n if not finished_at or finished_at < timestamp:\n finished_at = timestamp\n\n def msec(dt):\n # NOTE(boris-42): Unfortunately this is the simplest way that works in\n # py26 and py27\n microsec = (dt.microseconds + (dt.seconds + dt.days * 24 * 3600) * 1e6)\n return int(microsec / 1000.0)\n\n for r in result.values():\n # NOTE(boris-42): We are not able to guarantee that ceilometer consumed\n # all messages => so we should at make duration 0ms.\n if \"started\" not in r[\"info\"]:\n r[\"info\"][\"started\"] = r[\"info\"][\"finished\"]\n if \"finished\" not in r[\"info\"]:\n r[\"info\"][\"finished\"] = r[\"info\"][\"started\"]\n\n r[\"info\"][\"started\"] = msec(r[\"info\"][\"started\"] - started_at)\n r[\"info\"][\"finished\"] = msec(r[\"info\"][\"finished\"] - started_at)\n\n return {\n \"info\": {\n \"name\": \"total\",\n \"started\": 0,\n \"finished\": msec(finished_at - started_at) if started_at else 0\n },\n \"children\": _build_tree(result)\n }", "def parse(self, response):", "def parse_and_alert(self):\n self.parse_feed()\n self.alert_new_posts()", "def parse(self):\n\t\tfor part in self.mail.walk():\n\t\t\tself.process_part(part)", "def _parse_events(self, html):\n print \"Parse events\"\n data = []\n soup = BeautifulSoup(html, \"html.parser\")\n events = soup.find_all(\"div\", {\"class\": \"program clearfix\"})\n \"\"\" Site's html is broken. We have to handle descriptions \n with a hack.\n \"\"\"\n descriptions = soup.find_all(\"div\", {\"class\": \"programpostingress\"})\n for index, event in enumerate(events):\n link_tag = event.find(\"a\")\n if link_tag:\n link = link_tag[\"href\"]\n else:\n link = None\n dates = self._parse_date(self._parse_text(event.find(\"span\", {\"class\": \"programpostdato\"})))\n row = {\n \"title\": self._parse_text(event.find(\"span\", {\"class\": \"programposttittel\"})),\n \"date_start\": dates[\"start\"],\n \"date_end\": dates[\"end\"],\n \"description\": self._parse_text(descriptions[index]),\n \"link\": link,\n \"country\": \"Norge\"\n }\n data.append(row)\n print \"Found %s events\" % len(data)\n return data", "def parse_gretchens_notes(email_payload: str\n ) -> Tuple[List[Activity], List[Nap]]:\n print('start parsing email')\n # TODO: Find a clever way to detect time zone\n time_zone = pytz.timezone('US/Eastern')\n\n payload = _remove_line_breaks(email_payload)\n\n # get document attributes\n child_name_re = re.search(\"class=\\\"heading-name\\\">(.*)'s Daily Note<\",\n payload)\n if child_name_re:\n child_name = child_name_re.group(1)\n else:\n raise ValueError(\"Could not find child's name\")\n\n date_re = re.search(\"class=\\\"heading-date\\\">(.*?)<\",\n payload)\n if date_re:\n date_str = re.sub('(rd|st|th|nd)', '', date_re.group(1))\n date_py = datetime.strptime(date_str, '%B %d, %Y')\n date = date_py.strftime('%Y-%m-%d')\n else:\n raise ValueError(\"Could not find date\")\n\n # get activities and notes\n act_split = payload.split('class=\"activity-middle activity-name\">')\n re_begin = re.compile(\"^(.*?)</td>\")\n re_result = re.compile(\"class=\\\"activity-middle activity-result\\\">\"\n \"(.*?)</td>\")\n re_note = re.compile(\"class=\\\"activity-middle activity-notes\\\">\"\n \"(.*?)</td>\")\n # (lower case) headings without time\n non_time_headings = ['note', 'supplies']\n activities = []\n for act_str in act_split[1:]:\n activity_name = re_begin.search(act_str).group(1)\n print('ACTIVITY:', activity_name)\n\n # remove out unwanted stuff\n act_str = re_begin.sub('', act_str)\n act_str = act_str.replace('</body></html>', '')\n\n if activity_name.lower() not in non_time_headings:\n\n act_sub_split = act_str.split(\n \"class=\\\"activity-left activity-time\\\">\"\n )\n for act_sub in act_sub_split[1:]:\n time_str = re_begin.search(act_sub).group(1)\n # parse this time\n py_time = datetime.strptime(time_str, '%I:%M%p')\n if date_py is None:\n e_str = 'Activity time found before date?'\n raise ValueError(e_str)\n activity_time = _make_iso_time(py_time,\n date_py,\n time_zone)\n print('Activity time:', activity_time)\n # result\n activity_result = re_result.search(act_sub).group(1)\n print('Activity result:', activity_result)\n activity_note_re = re_note.search(act_sub)\n if activity_note_re:\n activity_note = activity_note_re.group(1)\n print('Activity note:', activity_note)\n else:\n activity_note = None\n\n activities.append(Activity(first_name=child_name,\n date=date,\n activity=activity_name,\n datetime=activity_time,\n result=activity_result,\n notes=activity_note))\n\n else:\n # notes are split by result, not time\n act_sub_split = act_str.split(\n \"class=\\\"activity-middle activity-result\\\">\"\n )\n activity_time = None\n for act_sub in act_sub_split[1:]:\n activity_result = re_begin.search(act_sub).group(1)\n print('Note result:', activity_result)\n activity_note_re = re_note.search(act_sub)\n if activity_note_re:\n activity_note = activity_note_re.group(1)\n print('Note note:', activity_note)\n else:\n activity_note = None\n activities.append(Activity(first_name=child_name,\n date=date,\n activity=activity_name,\n datetime=activity_time,\n result=activity_result,\n notes=activity_note))\n\n print('---')\n\n # parse naps\n re_nap = re.compile('([0-9]+:[0-9]+ (AM|PM)) - ([0-9]+:[0-9]+ (AM|PM))')\n naps = []\n for act in activities:\n if act.activity.upper() == 'NAP':\n re_nap_search = re_nap.search(act.result)\n if re_nap_search is None:\n e_str = 'No nap time found in string: {}'.format(act.result)\n raise ValueError(e_str)\n nap_start, nap_end = re_nap_search.group(1), re_nap_search.group(3)\n nap_start_time = _make_iso_time(\n datetime.strptime(nap_start, '%I:%M %p'),\n date_py,\n time_zone\n )\n nap_end_time = _make_iso_time(\n datetime.strptime(nap_end, '%I:%M %p'),\n date_py,\n time_zone\n )\n naps.append(Nap(child_name,\n nap_start_time,\n nap_end_time))\n return activities, naps", "def parse(self, response: BeautifulSoup):\n raise NotImplementedError", "def parse(message):\n html = render(message['text'])\n\n return html", "def pull(self):\n \n data = self.s.recv(1024)\n if data:\n info = ET.fromstring(data)\n info = { info[0].tag : info[0].text, info[1].tag : info[1].text}\n #print(info)\n \n return info.get(\"topic\"), info.get(\"value\")\n pass", "def _parse(self):\n soup = BS(self._current_html, 'lxml')\n for item in soup.select('div.c'):\n temp = {}\n # main content\n ctt = item.select('span.ctt')\n if not ctt:\n continue\n weibo_body = item.select('div')\n if len(weibo_body) > 1:\n temp['content'] = weibo_body[0].text\n btn_group = weibo_body[1].text\n else:\n temp['content'] = weibo_body[0].select('span.ctt')[0].text\n btn_group = weibo_body[0].text\n temp['is_repost'] = True if REPO_TEST_PATTERN.match(\n temp['content']) else False\n try:\n temp['like_num'] = LIKE_NUM_PATTERN.findall(btn_group)[0]\n temp['cmt_num'] = COMMENT_NUM_PATTERN.findall(btn_group)[0]\n temp['repo_num'] = REPO_NUM_PATTERN.findall(btn_group)[0]\n except Exception:\n pass\n cmt = item.select('.cmt')\n # visibility\n if cmt:\n try:\n temp['visibility'] = VISIBILITY_PATTERN.findall(\n cmt[0].text)[0]\n except Exception:\n pass\n\n # img in main content\n img = item.select('div a img')\n img_src = img[0].attrs['src'] if img else None\n temp['img_src'] = img_src\n LOGGER.debug('img_src: {}'.format(img_src))\n # time & source device\n ct = item.select('span.ct')\n if ct:\n ct = ct[0]\n text = ct.text\n reg_result = TIME_PATTERN.findall(text)[0]\n\n temp['time'] = ar(\n '{}年{}'.format(self._current_year, reg_result[0]),\n DATE_FMTS[0]\n ).naive if reg_result[0] else ar(\n reg_result[1], DATE_FMTS[1]\n ).naive\n temp['source'] = SOURCE_DEVICE_PATTERN.findall(text)[0]\n self._post_item = Post(**temp)\n self._attachment_item = Attachment(\n uri=img_src, post=self._post_item)\n self._store()", "def parse(self, response):\n events = response.css(\n \"div.column.scroll-item.is-one-third-tablet.is-full-mobile\"\n )\n for event in events:\n if \"Friday Nights\" in event.css(\"span.card-title.h4 span::text\").get():\n datetimes = event.css(\"div.card-text.card-subhead span::text\").get()\n start_datetime, end_datetime = self.format_datetimes(datetimes)\n if start_datetime >= datetime.now() and start_datetime < datetime.now() + timedelta(\n weeks=4\n ):\n # the link is relative\n event_link = event.css(\"div.card-image a\").attrib[\"href\"]\n full_url = self.domain + event_link\n yield SplashRequest(\n url=full_url,\n callback=self.parse_event,\n method=\"GET\",\n endpoint=\"execute\",\n args={\"wait\": 15.0, \"lua_source\": self.lua_script},\n cb_kwargs={\n \"start_datetime\": start_datetime,\n \"end_datetime\": end_datetime,\n },\n )", "def parse_article_html(page_resp):\n article_url = page_resp.url\n \n article_page_soup = bs4.BeautifulSoup(page_resp.text, \"lxml\")\n \n title_html = article_page_soup.find_all(\"h1\")[0]\n title_text = title_html.contents[0]\n \n date = article_page_soup.find_all(\"small\", {'class': 'gray'})[0]\n date_text = date.contents[4].replace(\" \", \"\").split(\"\\n\")[3][:10]\n \n article_content = article_page_soup.find_all(\"div\", {'class': 'rich_media_content'})[0]\n article_text = article_content.get_text('\\n')\n is_original = check_if_original(article_content) or '[原创]' in title_text\n \n return {\n 'title': title_text,\n 'date': date_text,\n 'url': article_url,\n 'is_original': is_original,\n 'text': article_text\n \n}", "def parse(content):\n soup = BeautifulSoup(content, 'html.parser')\n if soup.article is None:\n return None\n period = parse_event_period(soup)\n reason = identify_reason(soup)\n if reason is False:\n return None\n return (period, reason)", "def parse(self, message: Message):\n\t\tpass", "def __process_notification_events(self, notification_event):\n if notification_event is not None:\n if isinstance(notification_event, NotificationEvent):\n print(notification_event.message)", "def parse(self, response):\n if \"Calendar-and-Events\" in response.url:\n return self.parse_event_list(response)\n elif \"/events/\" in response.url:\n return self.parse_event_page(response)\n else:\n return self.parse_documents_page(response)", "def get_user_notifications(self, login):", "def handleNotification(self, notification):\n pass", "def _generate_notification_response(notification, next_page):\n response = {\n 'user_name': notification.sender.get_full_name(),\n 'user_url': reverse('user-profile',\n args=[notification.sender_id]),\n 'user_avatar': notification.sender.userprofile.thumbnail.url,\n 'message': MESSAGES[notification.notification_type],\n 'time_since': timesince(notification.created_at),\n 'next': next_page \n }\n if (notification.notification_type == \"thank\" or \n notification.notification_type == \"agree\"):\n item_type = notification.item.item_type\n response['item_name'] = getattr(notification.item, item_type).__str__()\n response['item_url'] = reverse(item_type+'-profile',\n args=[slugify(response['item_name'])])\n if notification.notification_type == \"thank\":\n response['thank_you'] = escape(notification.note)\n \n return response", "def parse_entry(msg):\n values = msg.split(';')\n return {\n 'dt': datetime.strptime(\n values[0], '%Y-%m-%d %H:%M:%S.%f'),\n 'event': values[1]\n }", "def parse_page(self, page):\n if self.domain == extract_domain(page[\"url\"]) and page[\"valid_content_type\"]:\n parent = page[\"url\"]\n parser = Parser(self.config)\n links = parser.feed_me(page[\"data\"])\n new_links = [x for x in links if x not in self.visited]\n full_links = [parse.urljoin(parent, l) for l in new_links]\n for l in full_links:\n if l not in self.visited:\n li = {\"parent\": parent, \"url\": l}\n self.TO_PROCESS.put(li)", "def parse_url(url):\n results = NotifyBase.parse_url(url, verify_host=False)\n if not results:\n # We're done early as we couldn't load the results\n return results\n\n # Store our access code\n access_token = NotifyStreamlabs.unquote(results['host'])\n results['access_token'] = access_token\n\n # call\n if 'call' in results['qsd'] and results['qsd']['call']:\n results['call'] = NotifyStreamlabs.unquote(\n results['qsd']['call'].strip().upper())\n # donation - amount\n if 'amount' in results['qsd'] and results['qsd']['amount']:\n results['amount'] = NotifyStreamlabs.unquote(\n results['qsd']['amount'])\n # donation - currency\n if 'currency' in results['qsd'] and results['qsd']['currency']:\n results['currency'] = NotifyStreamlabs.unquote(\n results['qsd']['currency'].strip().upper())\n # donation - name\n if 'name' in results['qsd'] and results['qsd']['name']:\n results['name'] = NotifyStreamlabs.unquote(\n results['qsd']['name'].strip().upper())\n # donation - identifier\n if 'identifier' in results['qsd'] and results['qsd']['identifier']:\n results['identifier'] = NotifyStreamlabs.unquote(\n results['qsd']['identifier'].strip().upper())\n # alert - alert_type\n if 'alert_type' in results['qsd'] and results['qsd']['alert_type']:\n results['alert_type'] = NotifyStreamlabs.unquote(\n results['qsd']['alert_type'])\n # alert - image_href\n if 'image_href' in results['qsd'] and results['qsd']['image_href']:\n results['image_href'] = NotifyStreamlabs.unquote(\n results['qsd']['image_href'])\n # alert - sound_href\n if 'sound_href' in results['qsd'] and results['qsd']['sound_href']:\n results['sound_href'] = NotifyStreamlabs.unquote(\n results['qsd']['sound_href'].strip().upper())\n # alert - duration\n if 'duration' in results['qsd'] and results['qsd']['duration']:\n results['duration'] = NotifyStreamlabs.unquote(\n results['qsd']['duration'].strip().upper())\n # alert - special_text_color\n if 'special_text_color' in results['qsd'] \\\n and results['qsd']['special_text_color']:\n results['special_text_color'] = NotifyStreamlabs.unquote(\n results['qsd']['special_text_color'].strip().upper())\n\n return results", "def parse(self, content):\n pass", "def parse_message(self, message):\n pass" ]
[ "0.64302725", "0.64142305", "0.6221922", "0.6151411", "0.5999759", "0.58872384", "0.5816123", "0.57522553", "0.5707328", "0.5624357", "0.56066173", "0.55695975", "0.5541741", "0.55331326", "0.55007076", "0.54943013", "0.5464355", "0.5452439", "0.5447044", "0.5440226", "0.5401231", "0.5390505", "0.536032", "0.5350128", "0.5312367", "0.52961135", "0.5290236", "0.5277617", "0.5277609", "0.52112204" ]
0.72214854
0
Convert comment data JSON for mobile display
def _convert_comment(self, data): output = {} output['id'] = int(data['id']) output['author'] = data['user_display_name'] output['profile_url'] = data['user_url'] output['date'] = data['comment_added_at'] output['date_ago'] = timeago.format(self._parse_datetime(data['comment_added_at']), datetime.now(TIMEZONE)) output['content'] = self.convert_content(data['html'].replace('\n', '')) output['is_deletable'] = data['is_deletable'] output['is_editable'] = data['is_editable'] return output
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def comment_in_json(self):\n\t\tpass", "def comments(self):\n comments_url = self.data['comments_url']\n return json.load(urllib2.urlopen(comments_url))", "def comment_to_object(self, comment, post_author_id=None):\n # the message_tags field is different in comment vs post. in post, it's a\n # dict of lists, in comment it's just a list. so, convert it to post style\n # here before running post_to_object().\n comment = dict(comment)\n comment['message_tags'] = {'1': comment.get('message_tags', [])}\n\n obj = self.post_to_object(comment)\n if not obj:\n return obj\n\n obj['objectType'] = 'comment'\n\n match = self.COMMENT_ID_RE.match(comment.get('id', ''))\n if match:\n post_author, post_id, comment_id = match.groups()\n obj['url'] = self.comment_url(post_id, comment_id,\n post_author_id=post_author_id)\n obj['inReplyTo'] = [{'id': self.tag_uri(post_id)}]\n\n return self.postprocess_object(obj)", "def render(self, data, media_type=None, render_context=None):\n if type(data) != ReturnList:\n errors = data.get(\"error\", None)\n if errors != None:\n return super(CommentsRenderer, self).render(data)\n else:\n return json.dumps({\"comment\": data})\n else:\n return json.dumps({\"comments\": data, \"commentsCount\": len(data)})", "def make_comment_data(self, comment_id, parent_id=None, children=[]): # pylint: disable=W0102\n return make_minimal_cs_comment({\n \"id\": comment_id,\n \"parent_id\": parent_id,\n \"course_id\": str(self.course.id),\n \"thread_id\": self.thread_id,\n \"thread_type\": \"discussion\",\n \"username\": self.user.username,\n \"user_id\": str(self.user.id),\n \"created_at\": \"2015-06-03T00:00:00Z\",\n \"updated_at\": \"2015-06-03T00:00:00Z\",\n \"body\": \"Original body\",\n \"children\": children,\n })", "def get_comments(self,comments):\n all_comments = []\n for comment in comments:\n try :\n all_comments.append({\n 'comment':comment['data']['body'],\n 'score':comment['data']['score']\n })\n except: pass\n return all_comments", "def parse_comment(self, node):\n\n data = []\n\n if node is not None:\n comment_id_pattern = re.compile('comment-(\\d+)')\n for comment_node in node.find_all('div', class_='comment'):\n item = {}\n item['is_deletable'] = False\n item['is_editable'] = False\n \n comment_id_result = comment_id_pattern.search(comment_node.get('id'))\n if comment_id_result:\n item['id'] = int(comment_id_result.group(1))\n \n comment_body_node = comment_node.find('div', class_='comment-body')\n if comment_body_node is not None:\n item['content'] = ''\n for p in comment_body_node.find_all(recursive=False):\n if 'class' in p.attrs and 'author' in p['class']:\n item['author'] = p.get_text()\n item['profile_url'] = self.get_link(p.get('href'))\n author_id = self._parse_user_id_from_url(item['profile_url'])\n if self.userId == author_id:\n item['is_deletable'] = True\n item['is_editable'] = True\n elif 'class' in p.attrs and 'age' in p['class']:\n item['date'] = p.abbr['title']\n item['date_ago'] = timeago.format(self._parse_datetime(item['date']), datetime.now(TIMEZONE))\n elif 'class' in p.attrs and 'edit' in p['class']:\n continue\n elif p.name == 'form':\n continue\n else:\n item['content'] += str(p)\n\n data.append(item)\n\n return data", "def test_comments_structure(self):\n for comment in self.resp_json:\n assert type(comment) == dict", "def parse_data_details(comments, reason, asset, detailId):\n\n return {\n \"comments\": \" \".join(comments),\n \"reason\": \" \".join(reason),\n \"code\": asset[0],\n \"type\": asset[1],\n \"name\": asset[2],\n \"location\": asset[3],\n \"detailId\": detailId,\n }", "def get_comment_data(self, comment):\n # remove double spaces but not triple ones; we use triple spaces to split commenter and parent_commenter\n pattern = '(?<! ) {2}(?! )'\n comment = re.sub(pattern, ' ', comment).strip() # also strip leading and trailing spaces\n\n # get names\n ix = re.search('•', comment).span()[-1]\n names = [x.strip() for x in (comment[:ix]).strip().strip('•').split(' ')]\n try:\n commenter, parent_commenter = names\n except:\n commenter, parent_commenter = names[0], ''\n\n # handle deleted comments\n pattern = 'This comment was deleted.−+−+'\n commenter = commenter.replace(pattern, '').strip()\n \n # get post and upvotes\n comment_upvotes = comment[ix:].split('ago')[-1].strip(' ')\n ix = re.search('(see more)\\w+', comment_upvotes) # redefine ix as index that separates post message from post upvotes\n clean_comment, upvotes = comment_upvotes[:ix.span()[0]], comment_upvotes[ix.span()[0]:].replace('see more', '')\n\n # build dictionary\n d = dict(zip( ['commenter', 'parent_commenter', 'comment', 'upvotes']\n , [commenter, parent_commenter.strip(), clean_comment.strip(), upvotes.strip()]))\n\n return d", "def do_comment(self, data={}):\n\n try:\n comment = data['comment'] if 'comment' in data else ''\n post_type = data['post_type'] if 'post_type' in data else ''\n post_id = int(data['post_id']) if 'post_id' in data else ''\n\n if not comment or not post_type or not post_id:\n raise Exception('Invalid parameter')\n\n submit_comment_url = BASE_URL + 'post_comments/'\n response = self.request('POST', submit_comment_url, params={\n 'comment': comment, 'post_type': post_type, 'post_id': post_id\n })\n response = response.json()\n output = []\n for item in response:\n output.append(self._convert_comment(item))\n return output\n except Exception as e:\n Utils.log(traceback.format_exc())\n Utils.error(e.args[0])", "def to_api_object(self):\n visible_to = self.visibleTo.all()\n visible_to_list = []\n for visible in visible_to:\n visible_to_list.append(\"http://\"+visible.author_uid)\n\n # We only get the first 5 comments\n # Get the comments, be aware that comments might not be returned if the foreign author of the comment is unavailable\n comments_list = [comment.to_api_object() for comment in self.comment_set.all().order_by(\"-published\")[:5]]\n filtered_comments_list = [comment for comment in comments_list if 'error' not in comment['author']]\n\n\n\n return {\n \"title\": self.title,\n \"source\": self.source,\n \"origin\": self.origin,\n \"description\": self.description,\n \"contentType\": self.contentType,\n \"content\": self.content,\n \"author\": self.author.to_api_object(),\n \"categories\": [category.name for category in self.categories.all()],\n \"count\": len(filtered_comments_list),\n \"size\": self.size,\n \"next\": settings.HOST_URI + \"/posts/\" + str(self.id.hex) + \"/comments\",\n \"comments\": filtered_comments_list,\n \"published\": self.published,\n \"id\": str(self.id.hex),\n \"visibility\": self.visibility,\n \"visibleTo\": visible_to_list,\n \"unlisted\": self.unlisted\n }", "def get_formatted_comments(self, threshold=400):\n comments = IDossier(self).comments\n if comments:\n if threshold:\n comments = truncate_ellipsis(comments, threshold)\n return api.portal.get_tool(name='portal_transforms').convertTo(\n 'text/html', comments, mimetype='text/x-web-intelligent').getData()", "def make_parsed_comments(self):\n if not hasattr(self, 'separated_comments'):\n self.separated_comments = self.separate_comments()\n \n # build comments list of dictionaries, one dictionary for each article\n self.comments = []\n for self.separated_comment in self.separated_comments:\n try:\n comment_data = self.get_comment_data(self.separated_comment)\n self.comments.append(comment_data)\n except Exception as e:\n pass\n return self.comments", "def _comment():\r\n id = request.args.get('answer_id')\r\n per_page=current_app.config['FLASKY_ANSWERS_PER_PAGE']\r\n answer = Answer.query.get_or_404(id)\r\n page = request.args.get('page', type=int, default=1)\r\n comment =request.args.get('comment')\r\n if current_user.can(Permission.COMMENT) and comment is not None:\r\n comment = Comment(body=comment,\r\n author=current_user._get_current_object(),\r\n answer_id=id)\r\n db.session.add(comment)\r\n db.session.commit()\r\n page = -1\r\n if page == -1:\r\n page = answer.comments.count() / per_page\r\n pagination = Comment.query.order_by(Comment.timestamp).filter_by(answer_id=id).paginate(\r\n page,per_page=per_page,error_out=False\r\n )\r\n macro_comment = get_template_attribute(\"_comments.html\", \"render_comments\")\r\n macro_page = get_template_attribute(\"_page.html\", \"render_page\")\r\n comments = pagination.items\r\n return jsonify({'result': True,\r\n 'comment_html': macro_comment(comments),\r\n 'page_html':macro_page(pagination),\r\n 'comments_timestamp':[comment.timestamp for comment in comments],\r\n 'comments_id':[comment.id for comment in comments]\r\n })", "def convert_text_to_comment():\n\n\t#Get the text message sent by Twilio\n\tmessage = request.args.get('Body')\n\n\t#Set up the comment to send to box\n\tbox_comment = {'message' : message}\n\t\n\t#build the request to send to box\n\turl = \"https://api.box.com/2.0/files/%s/comments\" % os.environ['BOX_FILE']\n\theaders = {'Authorization' : 'BoxAuth api_key=%s&auth_token=%s' % (os.environ['BOX_API_KEY'], os.environ['BOX_AUTH_TOKEN'])}\n\n\t#send the request\n\tr = requests.post(url, data=json.dumps(box_comment), headers=headers)\n\treturn r.text", "def get_comment(self, comment_id, activity_id=None, activity_author_id=None):\n url = API_OBJECT_URL % comment_id\n return self.comment_to_object(json.loads(self.urlopen(url).read()),\n post_author_id=activity_author_id)", "def comments(self, media_id):\n\n url = \"https://api.instagram.com/v1/media/{0}/comments?access_token={1}\".format(media_id, self.access_token)\n request = requests.get(url)\n\n return request.json()", "def process_reddit_comment_file(f,\n output_folder):\n ## Output File\n if output_folder is not None:\n fname = os.path.basename(f).replace(\"comments.json\",\"processed.comments.json\")\n if not fname.endswith(\".gz\"):\n fname = fname + \".gz\"\n output_folder = output_folder.rstrip(\"/\")\n fname = f\"{output_folder}/{fname}\"\n if os.path.exists(fname):\n return fname\n ## Load Comment Data\n if f.endswith(\".gz\"):\n file_opener = gzip.open\n else:\n file_opener = open\n try:\n with file_opener(f, \"r\") as the_file:\n comment_data = json.load(the_file)\n except json.JSONDecodeError:\n with file_opener(f, \"r\") as the_file:\n comment_data = []\n for line in the_file:\n comment_data.append(json.loads(line))\n ## Check Data\n if len(comment_data) == 0:\n return None\n ## Transform into DataFrame\n comment_data = pd.DataFrame(comment_data).dropna(subset=[\"body\"])\n ## Tokenize Text\n comment_data[\"text_tokenized\"] = comment_data[\"body\"].map(tokenizer.tokenize)\n ## Add Meta\n comment_data[\"source\"] = f\n comment_data[\"entity_type\"] = \"comment\"\n comment_data[\"date_processed_utc\"] = int(datetime.utcnow().timestamp())\n ## Rename Columns and Subset\n comment_data.rename(columns = DB_SCHEMA[\"reddit\"][\"comment\"], inplace=True)\n comment_data = comment_data[list(DB_SCHEMA[\"reddit\"][\"comment\"].values())]\n ## Format Into JSON\n formatted_data = comment_data.apply(lambda row: row.to_json(), axis=1).tolist()\n formatted_data = list(map(lambda x: json.loads(x), formatted_data))\n ## Dump Processed Data (or return)\n if output_folder is None:\n return formatted_data\n else:\n with gzip.open(fname, \"wt\", encoding=\"utf-8\") as the_file:\n json.dump(formatted_data, the_file)\n return fname", "def default_comment_response_data(post, comment, user):\n # For some reason, the default values are different for staff and non-staff users\n if user.is_staff:\n user_dependent_defaults = {\"num_reports\": 0}\n else:\n user_dependent_defaults = {\"num_reports\": None}\n\n return {\n \"author_id\": user.username,\n \"author_name\": user.profile.name,\n \"author_headline\": user.profile.headline,\n \"comment_type\": \"comment\",\n \"created\": comment.created,\n \"deleted\": False,\n \"downvoted\": False,\n \"edited\": False,\n \"id\": comment.id,\n \"parent_id\": None,\n \"post_id\": post.id,\n \"profile_image\": image_uri(user.profile),\n \"removed\": False,\n \"score\": 1,\n \"subscribed\": False,\n \"upvoted\": False,\n \"text\": comment.text,\n **user_dependent_defaults,\n }", "def get_comment_jsons(article_id, cookie):\n url = \"https://seekingalpha.com/account/ajax_get_comments?id=%s&type=Article&commentType=\" % article_id\n r = safe_request(url, cookie)\n comments = []\n\n if r.status_code != 404:\n res = json.loads(r.text)\n for comment in res['comments'].values():\n c = Comment(article_id, comment)\n comments.append(c.json())\n comments.extend(map(lambda x: x.json(), c.get_children()))\n\n return comments", "def save_comment(data):\n data['comment_id'] = len(commentslist) + 1\n data['message'] =\"message\"\n data['author'] = \"author\"\n data['date_created'] = datetime.datetime.now()\n # save to list\n commentslist.append(data)", "def _generate_pr_comment_markdown(self, data):\n pass", "def json_format(data):\n return {\n 'Title': data[\"title\"],\n 'Publication date': data['pubDate'],\n 'News link': data['link'],\n 'Image link': data['media'],\n }", "def get_comments(self, resp):\n comments = CommentList()\n for value in resp['comments']:\n comment = Comment()\n comment.set_comment_id(value['comment_id'])\n comment.set_expense_id(value['expense_id'])\n comment.set_description(value['description'])\n comment.set_commented_by_id(value['commented_by_id'])\n comment.set_commented_by(value['commented_by'])\n comment.set_date(value['date'])\n comment.set_date_description(value['date_description'])\n comment.set_time(value['time'])\n comment.set_operation_type(value['operation_type'])\n comment.set_transaction_id(value['transaction_id'])\n comment.set_transaction_type(value['transaction_type'])\n comments.set_comments(comment)\n return comments", "def parse_comment(self, comment):\n created_utc = datetime.utcfromtimestamp(comment.created_utc).isoformat()\n\n if comment.author is not None:\n author = comment.author.name\n else:\n author = \"\"\n\n com_obj = CommentMessage(\n comment.id,\n comment.link_id,\n self.subreddit,\n author,\n created_utc,\n comment.body,\n comment.score,\n )\n\n return com_obj", "def save_comment(data):\n data['comment_id'] = len(commentslist) + 1\n data['date_created'] = datetime.datetime.now()\n # save to list\n commentslist.append(data)", "def to_internal_value(self, data):\n user_id = self.context['request'].user.id\n post_id = self.context['post_id']\n data.update({'user': user_id})\n data.update({'post': post_id})\n self.fields['user'] = serializers.PrimaryKeyRelatedField(queryset=User.objects.all())\n self.fields['post'] = serializers.PrimaryKeyRelatedField(queryset=Post.objects.all())\n return super(CommentSerializer, self).to_internal_value(data)", "def __data_row_to_json(self, row):\n raw_data = {}\n raw_data[\"body\"] = row.body\n raw_data[\"score_hidden\"] = row.score_hidden\n raw_data[\"archived\"] = row.archived\n raw_data[\"name\"] = row.name\n raw_data[\"author\"] = row.author\n raw_data[\"author_flair_text\"] = row.author_flair_text\n raw_data[\"downs\"] = row.downs\n raw_data[\"created_utc\"] = row.created_utc\n raw_data[\"subreddit_id\"] = row.subreddit_id\n raw_data[\"link_id\"] = row.link_id\n raw_data[\"parent_id\"] = row.parent_id\n raw_data[\"score\"] = row.score\n raw_data[\"retrieved_on\"] = row.retrieved_on\n raw_data[\"controversiality\"] = row.controversiality\n raw_data[\"gilded\"] = row.gilded\n raw_data[\"id\"] = row.id\n raw_data[\"subreddit\"] = row.subreddit\n raw_data[\"ups\"] = row.ups\n raw_data[\"distinguished\"] = row.distinguished\n raw_data[\"author_flair_css_class\"] = row.author_flair_css_class\n\n return json.dumps(raw_data)", "def comment(postid):\n context = {}\n if \"username\" not in flask.session:\n raise InvalidUsage('Forbidden', status_code=403)\n\n connection = insta485.model.get_db()\n cursor = connection.execute(\n \"SELECT * FROM comments WHERE postid=:id\", {'id': postid})\n comments = cursor.fetchall()\n ''' \n if bool(comments) is False:\n raise InvalidUsage('Not Found', status_code=404)\n '''\n # User\n logname = flask.session[\"username\"]\n\n if flask.request.method == 'POST':\n data = flask.request.get_json(force=True)\n context['text'] = data['text']\n context['owner'] = logname\n context['owner_show_url'] = '/u/' + logname + '/'\n connection.execute('INSERT INTO comments (owner, postid, text) \\\n VALUES (?,?,?)', (logname, postid, data['text']))\n cursor = connection.execute('SELECT last_insert_rowid() AS id')\n commentid_dic = cursor.fetchone()\n context['commentid'] = commentid_dic['id']\n context['postid'] = postid\n return flask.jsonify(**context), 201\n\n # url\n context[\"url\"] = flask.request.path\n context['comments'] = []\n\n for i in comments:\n one_comment = {}\n one_comment['commentid'] = i['commentid']\n one_comment['owner'] = i['owner']\n one_comment['owner_show_url'] = '/u/' + i['owner'] + '/'\n one_comment['postid'] = postid\n one_comment['text'] = i['text']\n context['comments'].append(one_comment)\n\n return flask.jsonify(**context)" ]
[ "0.7134723", "0.64888424", "0.637814", "0.62791246", "0.61551374", "0.6044316", "0.5998198", "0.5990303", "0.59835607", "0.5981284", "0.59108824", "0.5874552", "0.58657545", "0.5792547", "0.5747527", "0.57035154", "0.56909966", "0.5671392", "0.5666398", "0.5646678", "0.56457067", "0.56383395", "0.56243753", "0.55930036", "0.5579181", "0.5543425", "0.5519832", "0.5515187", "0.5491182", "0.54646313" ]
0.79955125
0
Get logged in user from cache
def get_logged_in_user(self): if type(self.cache) is Cache: sessionId = self.cache.get('user.sessionId') userId = self.cache.get('user.id') if sessionId and userId: self.sessionId = sessionId self.userId = userId user = {} user['id'] = userId user['username'] = self.cache.get('user.username') user['profileUrl'] = self.cache.get('user.profileUrl') user['avatarUrl'] = self.cache.get('user.avatarUrl') user['reputation'] = self.cache.get('user.reputation') user['badge1'] = self.cache.get('user.badge1') user['badge2'] = self.cache.get('user.badge2') user['badge3'] = self.cache.get('user.badge3') return user
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_logged_user():\n return get_from_session(KEY_USER)", "def get_cached_user(user_id):\n cache_key = app_settings.CACHED_USER_KEY_TEMPLATE.format(\n site_id=settings.SITE_ID, \n user_id=user_id\n )\n user = cache.get(cache_key, None)\n if user is None:\n try:\n user = User.objects.get(pk=user_id)\n cache.set(\n cache_key, \n user,\n app_settings.CACHED_USER_DEFAULT_CACHE_TIMEOUT\n )\n except User.DoesNotExist:\n user = None\n return user", "def retrieve(self, request, *args, **kwargs):\n username = kwargs.get(\"user\")\n cached_user = cache.get(f\"{USER_PROFILE_PREFIX}{username}\")\n if cached_user:\n return Response(cached_user)\n response = super().retrieve(request, *args, **kwargs)\n return response", "def get_user(self, username):\n return self._cache.get(username)", "def get(model_class, id):\n key = build_key(model_class, id)\n user = cache.get(key)\n if user is None: # Not in cache\n logger.info(\" CACHE MISS key=%s\", key)\n user = User.objects.filter(id=id).first()\n if user is not None: # Found in DB\n logger.info(\" CACHE POPULATE key=%s\", key)\n cache.set(key, user) # Add to cache\n else:\n logger.info(\" CACHE HIT key=%s\", key)\n return user", "def get(self):\r\n return get_user(request)", "def get_current_user(request): \n user_id = request.session.get('user_id', False)\n \n return Member.objects.get(id=user_id)", "def getUser(self, username):\r\n if (self._credCache is None or\r\n os.path.getmtime(self.filename) > self._cacheTimestamp):\r\n self._cacheTimestamp = os.path.getmtime(self.filename)\r\n self._credCache = dict(self._loadCredentials())\r\n return self._credCache[username]", "def get_user():\n try:\n userId = request.args.get('login_as')\n return users[int(userId)]\n except Exception:\n return None", "def user_info(self):\n return self.auth.get_user_by_session()", "def get_one_user():", "def get_user(request: Request):\n return request.user", "def getUser(request, returnAnonymous=False):\n if SESSION_KEY in request.session:\n user = ezidapp.models.getUserById(request.session[SESSION_KEY])\n if user != None and user.loginEnabled:\n return user\n else:\n return ezidapp.models.AnonymousUser if returnAnonymous else None\n else:\n return ezidapp.models.AnonymousUser if returnAnonymous else None", "def get_cached_account(username, registry):\n cache_key = get_account_cache_key(username, registry)\n cache = registry.cache\n cached_account = cache.get(cache_key)\n return cached_account", "def me_get(): # noqa: E501\n s = base.check_session()\n return _cleanuser(s['user'])", "def get_user(self):\n if \"user\" not in self._data:\n self._data[\"user\"] = User.objects.get(pk=self.kwargs[\"user_id\"])\n return self._data[\"user\"]", "def current_user(self):\n user_dict = self.auth.get_user_by_session()\n return self.auth.store.user_model.get_by_id(user_dict['user_id'])", "def get_cached_user_info(self, user_id: str) -> Optional[Dict]:\n return self.get_users().get(user_id)", "def user(self):\n user = None\n if not 'user' in self.session and os.environ['APPLICATION_ID'].startswith('dev'):\n if self.request.get('paToken'):\n user = Github({'paToken': self.request.get('paToken')}).user()\n if user:\n logging.info(\"Read user data %s\" % json.dumps(user))\n user['paToken'] = self.request.get('paToken')\n self.session['user'] = user\n return user\n # No user for now\n return None\n \n if 'user' in self.session: \n return self.session['user']\n \n logging.info('No user detected; redirecting to /login')\n self.redirect('/login?%s' % urllib.urlencode({'r': self.request.path}), abort=True)", "def get_user(self):\n\n user_session = self.get()\n if not user_session:\n return None\n\n us = ServiceLocator.resolve(ServiceLocator.USERS)\n return us.single(user_session.login)", "def __get_user(self, login):\n\n user = {}\n\n if not login:\n return user\n\n user_raw = self.client.get_user(login)\n user = json.loads(user_raw)\n self._push_cache_queue(user_raw)\n user_orgs_raw = \\\n self.client.get_user_orgs(login)\n user['organizations'] = json.loads(user_orgs_raw)\n self._push_cache_queue(user_orgs_raw)\n self._flush_cache_queue()\n\n return user", "def before_request():\n if 'user_key' in session:\n user = cache.get(session['user_key'])\n\n if user is None:\n # if the user is not available in memcache we fetch\n # it from the datastore\n user = User.get_by_key_name(session['user_key'])\n\n if user:\n # add the user object to memcache so we\n # don't need to hit the datastore next time\n cache.set(session['user_key'], user)\n\n g.user = user\n else:\n g.user = None", "def me():\n return current_user.get()", "def get_user():\n global USER\n return USER", "def get_current_user():\n if 'current_user' in session:\n return User.query.filter_by(github_id=session['current_user']).first()", "def get_object(self):\n return User.objects.get(username=self.request.user.username)", "def get_current(self):\n auth_token = session.get(\"auth_token\")\n print(auth_token)\n if not auth_token:\n return None\n user = db.user.find_one({\"auth_token\":auth_token})\n\n return user", "def get_current_user(self):\n return self.get_secure_cookie(\"user\")", "def get_user(self):\n return self.get('users/self')", "def getUser(self):\n user = users.get_current_user()\n if not user:\n self.redirect(users.create_login_url(self.request.uri))\n else:\n return user" ]
[ "0.77119267", "0.7561403", "0.75211304", "0.7454216", "0.73541325", "0.73536277", "0.7330943", "0.72674334", "0.7243006", "0.7223945", "0.71723396", "0.7157568", "0.71509427", "0.7140306", "0.7090559", "0.7074809", "0.70509964", "0.7049927", "0.70444727", "0.7029923", "0.7027895", "0.70166385", "0.7009396", "0.699056", "0.69752395", "0.6950761", "0.6946966", "0.6945323", "0.69364387", "0.69251704" ]
0.815407
0
Parse user's messages from page
def _parse_user_messages_page(self, html): if not html: return None dom = BeautifulSoup(html, 'html.parser') data = self._parse_logged_in_user(dom) return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _parse_message(self, soup):\n kind, = soup.attrs[u'class']\n title = soup.findChild().text\n body = ''.join(t.text for t in soup.findChildren()[1:])\n message = dict(kind=kind, title=title, body=body)\n for val in message.values():\n assert type(val) == str\n return message", "def parse(message):\n html = render(message['text'])\n\n return html", "def parse_user_msg(xml):\n if not xml:\n return\n\n wechat_message = dict((child.tag, to_text(child.text))\n for child in ElementTree.fromstring(xml))\n wechat_message[\"raw\"] = xml\n wechat_message[\"type\"] = wechat_message.pop(\"MsgType\").lower()\n\n message_type = MESSAGE_TYPES.get(wechat_message[\"type\"], UnknownMessage)\n return message_type(wechat_message)", "def get_user_messages(user_id):\n pass \n # user_message_list = []\n\n # for message in sent messages:", "def parse_message(self, message):\n pass", "def get_messages(message_count):\r\n\r\n file = open('messages.htm', encoding='UTF-8')\r\n\r\n html = file.read().split('</p>')\r\n file.close()\r\n\r\n TOTAL[0] = len(html) - 1\r\n\r\n # Gets rid of formatting at the beginning\r\n start = html[0].find('<div class=\"message\">')\r\n while not html[0][start].isnumeric():\r\n start += 1\r\n html[0] = html[0][start:]\r\n\r\n html.pop()\r\n\r\n threads = []\r\n\r\n que = Queue(maxsize=50)\r\n for line in html:\r\n try:\r\n clean_line = BeautifulSoup(line, 'lxml').getText()\r\n except Exception:\r\n print('Install lxml')\r\n #print(line)\r\n if len(clean_line) != 0:\r\n t = threading.Thread(target=add_option,\r\n args=(message_count, que, threads))\r\n que.put(clean_line)\r\n\r\n t.daemon = True\r\n t.start()\r\n threads.append(t)\r\n\r\n que.join()", "def parse(msg):\n msg = msg.replace(\"\\r\", \"\").replace(\"\\n\", \"\").replace(\"\\b\", \"\")\n pseudo = user_account = ip = msg_type = content = target = \"\"\n msg_parsed = message_regex.search(msg)\n if msg_parsed:\n data = msg_parsed.groups()\n if len(data) >= 6:\n pseudo = data[0]\n user_account = data[1]\n ip = data[2]\n msg_type = data[3]\n target = data[4]\n content = data[5]\n if target.startswith(\"#\") and msg_type == \"PRIVMSG\":\n msg_type = \"PUBMSG\"\n return Message(pseudo, user_account, ip, msg_type, content, target)", "def parse_message(msg):\n # the message number, increments with each message\n msg_number = msg[0][0]\n # the message type\n msg_type = msg[0][1][0]\n return {\n 'noop': parse_noop_message,\n 'c': parse_content_message,\n }[msg_type](msg, msg_number)", "def gather(self, userId='me', query=''):\n\n to_process = {}\n message_count = 0\n\n request = self.connection.users().messages().list(userId=userId, q=query)\n response = request.execute()\n try:\n messages = response['messages']\n for message in messages:\n to_process[message_count] = message['id']\n page_count = 1\n print(\"[I] Gathering Pages\")\n while response.get('nextPageToken'):\n request = self.connection.users().messages().list_next(previous_request=request, previous_response=response)\n response = request.execute()\n page_count += 1\n messages += response['messages']\n print(f'[√] Total Page Count: {page_count}')\n for message in messages:\n message_count += 1\n to_process[message_count] = message['id']\n self.to_process = to_process\n print(f'[√] Total Message Count: {len(self.to_process)}')\n except KeyError as e:\n print(\"[X] No Matches Found, Please Check Your Query String\")\n sys.exit(1)", "def list_messages(request, page=1, talk_id=None, lib=None):\n if request.POST:\n form = ContactUserForm(data=request.POST)\n if form.is_valid():\n lib.add_message_to_talk(talk_id=talk_id, \n message=form.cleaned_data['message'])\n return redirect(\"talks:list_messages\", talk_id)\n else:\n form = ContactUserForm()\n \n talk = lib.get_talk(talk_id)\n messages = lib.list_talk_messages(talk_id)\n \n if request.bvuser.id == talk.from_user.id:\n to_user = talk.trip.user\n else:\n to_user = talk.from_user\n\n return render_to_response('talks/list_messages.html', {\n 'to_user' : to_user,\n 'talk' : talk,\n 'form' : form,\n 'messages': messages,\n 'is_talk': True,\n }, context_instance=RequestContext(request))", "def parse(self, message: Message):\n\t\tpass", "def message():\n if request.method == 'POST':\n db.log_msg(request.form['text'], request.cookies.get('username'))\n return db.get_all_messages()", "def get_message_user(self, name_user):\n messages = []\n\n with open(self.path_to_message_file, 'r') as file:\n for line in file:\n if line.split()[2] == name_user: # you must select 3 item, because string in format:\n messages.append(line) # \"date\" \"time\" \"user\" \"messages\"\n return messages", "def get_loaded_messages(self):\n self.chat.click()\n messages = []\n for message in self.chat.find_elements(By.XPATH, \"\"\"//*[@id=\"main\"]/div[3]/div/div/div[3]/*\"\"\"):\n messages.append(MessageElement(message))\n return messages", "def messages_page(user_id):\n\n default_msgs = Message.query.filter_by(created_by=1).all()\n user_msgs = Message.query.filter_by(created_by=user_id).all()\n\n messages = []\n for msg in default_msgs:\n messages.append( { 'msg_id': msg.msg_id,\n 'created_by': msg.created_by,\n 'msg_text': msg.msg_text } )\n\n if user_msgs:\n for msg in user_msgs:\n if msg.created_by != 1:\n messages.append( { 'msg_id': msg.msg_id,\n 'created_by': msg.created_by,\n 'msg_text': msg.msg_text } )\n\n return jsonify(messages)", "def __get_loaded_messages(self):\n messages = []\n for message in self.chat.find_elements(By.XPATH, \"\"\"//*[@id=\"main\"]/div[3]/div/div/div[3]/*\"\"\"):\n messages.append(MessageElement(message))\n return messages", "def list_messages(self):", "def parse_atwho(my_text):\n notification_list = []\n \n #markdown_link_regex = re.compile(\"\\[.*?\\] \\((.*?) \\\".*?\\\"\\)\", re.IGNORECASE) # need to test this.\n markdown_link_regex = re.compile(\"\\[([^\\]]+)\\]\\(([^)\\\"]+)(?: \\\\\\\"([^\\\\\\\"]+)\\\\\\\")?\\)\", re.IGNORECASE)\n direct_link_regex = re.compile(\"data-notify=\\\\\\\"([^\\\\\\\"]+)\\\\\\\"\", re.IGNORECASE)\n link_list = [i[2] for i in markdown_link_regex.findall(my_text)]\n link_list += [i for i in direct_link_regex.findall(my_text)]\n\n for i in link_list:\n _type, _id = i.split(\"#\", 1)\n if _type == \"user\":\n notification_list.append(User.objects.get(id=_id))\n elif _type == \"subdept\":\n notification_list.append(Subdept.objects.get(id=_id))\n elif _type == \"dept\":\n notification_list.append(Dept.objects.get(id=_id))\n elif _type == \"page\":\n notification_list.append(Page.objects.get(id=_id))\n\n return my_text, notification_list", "def process_messages(self):\n pass", "def _parse_user_page(self, html, user):\n\n dom = BeautifulSoup(html, 'html.parser')\n\n data = {}\n\n # Parse logged in user from page header\n data['current_user'] = self._parse_logged_in_user(dom)\n\n # Avatar\n avatar_node = dom.find('img', class_='gravatar')\n if avatar_node is not None:\n data['avatarUrl'] = self.get_link(avatar_node.get('src'))\n data['username'] = avatar_node.get('title')\n\n # Karma\n score_node = dom.find('div', class_='scoreNumber')\n if score_node is not None:\n data['reputation'] = score_node.get_text()\n\n user_details_table = dom.find('table', class_='user-details')\n if user_details_table is not None:\n for tr in user_details_table.find_all('tr'):\n raw_text = tr.get_text()\n if raw_text.find('member since') != -1:\n created_node = tr.find('abbr', class_='timeago')\n if created_node is not None:\n created_datetime = self._parse_datetime(created_node.get('title'))\n data['created'] = created_datetime.strftime('%Y-%m-%d')\n data['created_label'] = timeago.format(created_datetime, datetime.now(TIMEZONE))\n elif raw_text.find('last seen') != -1:\n last_seen_node = tr.find('abbr', class_='timeago')\n if last_seen_node is not None:\n last_seen_datetime = self._parse_datetime(last_seen_node.get('title'))\n data['last_seen'] = last_seen_datetime.strftime('%Y-%m-%d')\n data['last_seen_label'] = timeago.format(last_seen_datetime, datetime.now(TIMEZONE))\n\n # Questions count\n questions_a_node = dom.find('a', attrs={'name': 'questions'})\n if questions_a_node is not None:\n questions_h2_node = questions_a_node.find_next('h2')\n if questions_h2_node.name == 'h2':\n questions_count_node = questions_h2_node.find('span', class_='count')\n if questions_count_node is not None:\n data['questions_count'] = int(questions_count_node.get_text())\n\n # Questions list\n data['questions'] = []\n for question_node in dom.find_all('div', class_='short-summary'):\n question = self._parse_question_html(question_node)\n if question:\n data['questions'].append(question)\n\n return data", "def parse(self):\n\t\tfor part in self.mail.walk():\n\t\t\tself.process_part(part)", "def get_messages(self):\n other_user_email = request.args.get('other_user_email')\n page = request.args.get('page')\n per_page = request.args.get('per_page')\n if not other_user_email or not page or not per_page:\n self.logger.debug(messages.MISSING_FIELDS_ERROR % \"query params\")\n return messages.ERROR_JSON % messages.MISSING_FIELDS_ERROR % \"query params\", 400\n email_token = auth.current_user()[0]\n page = int(page)\n per_page = int(per_page)\n # App sends starting with 1 but we start at 0\n page -= 1\n try:\n message_list, pages = self.friend_database.get_conversation(email_token, other_user_email, per_page, page)\n except NoMoreMessagesError:\n self.logger.debug(messages.NO_MORE_PAGES_ERROR)\n return messages.NO_MORE_PAGES_ERROR, 404\n message_list = [{k:v for k,v in m._asdict().items() if k != \"hidden_to\"} for m in message_list]\n for i in range(len(message_list)):\n message_list[i][\"timestamp\"] = message_list[i][\"timestamp\"].isoformat()\n return json.dumps({\"messages\": message_list, \"pages\": pages}), 200", "def parseMsg(self):\n # These 4 elements are always present\n # \"ToUserName\"\n # \"FromUserName\"\n # \"CreateTime\"\n # \"MsgType\"\n\n # Following elements depends on MsgType\n # \"MsgId\"\n # \"Content\"\n # \"MediaId\"\n # \"PicUrl\"\n # \"Format\"\n # \"ThumbMediaId\"\n # \"Location_X\"\n # \"Location_Y\"\n # \"Scale\"\n # \"Label\"\n # \"Title\"\n # \"Description\"\n # \"Url\"\n # \"Event\"\n # \"EventKey\"\n # \"Ticket\"\n # \"Latitude\"\n # \"Longitude\"\n # \"Precision\"\n # \"Recognition\"\n\n def getField(req, key):\n if req.find(key) != None:\n return req.find(key).text\n\n\n msg = {}\n req = et.fromstring(self.request.body.decode(\"utf-8\"))\n\n # These 4 elements are always present\n msg[\"ToUserName\"] = getField(req, \"ToUserName\")\n msg[\"FromUserName\"] = getField(req, \"FromUserName\")\n msg[\"CreateTime\"] = getField(req, \"CreateTime\")\n msg[\"MsgType\"] = getField(req, \"MsgType\")\n\n # Following elements depends on MsgType\n msg[\"MsgId\"] = getField(req, \"MsgId\")\n msg[\"Content\"] = getField(req, \"Content\")\n msg[\"MediaId\"] = getField(req, \"MediaId\")\n msg[\"PicUrl\"] = getField(req, \"PicUrl\")\n msg[\"Format\"] = getField(req, \"Format\")\n msg[\"ThumbMediaId\"] = getField(req, \"ThumbMediaId\")\n msg[\"Location_X\"] = getField(req, \"Location_X\")\n msg[\"Location_Y\"] = getField(req, \"Location_Y\")\n msg[\"Scale\"] = getField(req, \"Scale\")\n msg[\"Label\"] = getField(req, \"Label\")\n msg[\"Title\"] = getField(req, \"Title\")\n msg[\"Description\"] = getField(req, \"Description\")\n msg[\"Url\"] = getField(req, \"Url\")\n msg[\"Event\"] = getField(req, \"Event\")\n msg[\"EventKey\"] = getField(req, \"EventKey\")\n msg[\"Ticket\"] = getField(req, \"Ticket\")\n msg[\"Latitude\"] = getField(req, \"Latitude\")\n msg[\"Longitude\"] = getField(req, \"Longitude\")\n msg[\"Precision\"] = getField(req, \"Precision\")\n msg[\"Recognition\"] = getField(req, \"Recognition\")\n return msg", "def load_received_messages(username):\n return [m for m in load_all_messages() if m[\"to\"] == username]", "def _parse_message(self, data):\r\n if TwitchChatStream._check_has_ping(data):\r\n self._maybe_print('got ping')\r\n self._send_pong()\r\n\r\n channel_name_or_false = TwitchChatStream._check_has_channel(data)\r\n if channel_name_or_false:\r\n current_channel = channel_name_or_false[0]\r\n print('Connected to channel: ' + current_channel)\r\n\r\n if TwitchChatStream._check_has_message(data):\r\n msg = {\r\n 'channel': re.findall(r'^:.+![a-zA-Z0-9_]+'\r\n r'@[a-zA-Z0-9_]+'\r\n r'.+ '\r\n r'PRIVMSG (.*?) :',\r\n data)[0],\r\n 'username': re.findall(r'^:([a-zA-Z0-9_]+)!', data)[0],\r\n 'message': re.findall(r'PRIVMSG #[a-zA-Z0-9_]+ :(.+)',\r\n data)[0]\r\n }\r\n if msg['channel'].startswith('#'):\r\n msg['channel'] = msg['channel'][1:]\r\n self._maybe_print(\r\n 'got msg: #{} @{} -- {}'.format(msg['channel'], msg['username'], msg['message']))\r\n return msg\r\n elif len(data):\r\n self._maybe_print('other data: {}'.format(data))\r\n else:\r\n return None", "def _parse_message(msg):\n lines, body = _split_lines(msg)\n # The first line is the start line.\n start_line = lines[0]\n # Remaining lines are the header.\n header = _parse_header(lines[1 :])\n return start_line, header, body", "def list(request):\r\n usermessages = request.user.profile.recent_messages()\r\n d = {\r\n 'form': NewMessageForm(),\r\n 'usermessages': usermessages,\r\n 'title': 'Messages',\r\n }\r\n return render_to_response('usermessages/list.html', d, \r\n context_instance=RequestContext(request))", "def get_words(message):\n\n words = message.body.strip()\n\n # split every paragraph looking for our username and request in a single line.\n lines = words.split(\"\\n\")\n for line in lines:\n line_parts = line.split()\n\n # get only the line with the request.\n if len(line_parts) > 0 and 'u/nhl_stats' in line_parts[0].lower():\n words = line\n break\n\n # strip an ending period if one exists.\n if words[-1:] == \".\":\n words = words[:-1].strip()\n\n words = words.split()\n\n for i in range(len(words)):\n words[i] = words[i].lower()\n return words", "def get_messages():\n incoming = request.get_json()\n messages = Message.get_messages_from_room_id(incoming['room_id'])\n messages = [{'user_id': message.user_id, \n 'sendTime': message.sendTime, 'content': message.content} for message in messages]\n for message in messages:\n user = User.get_user_with_user_id(message['user_id'])\n message['username'] = str(user.username)\n return jsonify(results = messages)", "def load_sent_messages(username):\n return [m for m in load_all_messages() if m[\"from\"] == username]" ]
[ "0.6575689", "0.6570656", "0.63135284", "0.628515", "0.62355065", "0.6216852", "0.6142316", "0.607651", "0.60617214", "0.6023116", "0.6011333", "0.5981321", "0.59291005", "0.59062594", "0.58831334", "0.58828837", "0.5859643", "0.5853651", "0.5852005", "0.5831771", "0.58298653", "0.5805426", "0.5785552", "0.57516426", "0.5745167", "0.57416505", "0.5721391", "0.5712132", "0.57019466", "0.5686087" ]
0.82223177
0
Try get session Id from WebKit cookies DB
def get_session_id_from_cookie(self): conn = sqlite3.connect(COOKIE_PATH) cursor = conn.cursor() params = ('together.jolla.comsessionid',) cursor.execute('SELECT * FROM cookies WHERE cookieId = ?', params) row = cursor.fetchone() if row is not None: cookie = SimpleCookie() cookie.load(row[1].decode('utf-8')) for cookie_name, morsel in cookie.items(): if cookie_name == 'sessionid': return morsel.value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def session_id(self):\n return self.browser.crawlera_session", "def get_session_id(self):\n return self.request_data['id']", "def _getswabid(environ):\n try:\n return environ['swab.id']\n except KeyError:\n pass\n cookie = Request(environ).cookies.get('swab')\n if cookie:\n swabid = cookie.value.encode('ascii')\n environ['swab.id'] = swabid\n return swabid\n return None", "def get_sessionid(self):\n if not self.__initialized or not self.__loggedin:\n raise NSNitroError(\"Not initialized or not logged in.\")\n\n return self.__sessionid", "def getSessionId(self) -> int:\n return self.cpp.getSessionId()", "def get_session_id(self):\n raise NotImplementedError()", "def getSessionId(self):\n return self.sessionid", "def get_session_id(context):\n skey = session_key('session_id')\n session_id = get_session(context, skey)\n\n if session_id is None:\n session_id = generate_session_id(context)\n set_session(context, skey, session_id)\n\n return session_id", "def _shib_get_token(self): # pragma: no cover\n\n shibCookie = None\n for cookie in self._session.cookies:\n if \"shibsession\" in cookie.name:\n shibCookie = cookie\n break\n\n if not shibCookie:\n warnings.warn(\"No session token found.\", AuthenticationWarning)\n\n return shibCookie", "def get_wharton_sessionid(public=False):\n sessionid = request.args.get('sessionid')\n cache_key = 'studyspaces:gsr:sessionid'\n\n if sessionid:\n return sessionid\n\n if public:\n if db.exists(cache_key):\n return db.get(cache_key).decode('utf8')\n\n return os.environ.get('GSR_SESSIONID')\n\n return None", "def get_uid_from_cookie(self):\n user_id = self.request.cookies.get('user_id')\n return user_id and utils.check_secure_cookie(user_id)", "def get_or_create_session_id(visit_id, sqlite_db):\n cursor = sqlite_db.cursor()\n query = \"SELECT session_id FROM session_ids WHERE visit_id='{p_id}'\".format(p_id=str(visit_id))\n cursor.execute(query)\n\n session_ids = cursor.fetchall()\n\n if len(session_ids) > 1:\n raise Exception(\"Multiple session ID\\'s found for single Piwik visit ID\")\n\n if session_ids:\n return str(session_ids[0][0])\n\n session_uuid = str(uuid.uuid4())\n query = \"INSERT INTO session_ids (visit_id, session_id) VALUES ('{visit}', '{session}');\".format(\n visit=str(visit_id), session=session_uuid\n )\n cursor.execute(query)\n sqlite_db.commit()\n return session_uuid", "def check_session(session_id):\n return session_cache.hget(session_id)", "def getCookie(key):", "def get_session(self, response, url=\"/\", agent=\"unknown browser\"):\n # post or get\n parts = \"\"\n if not response.content:\n print(\"RESPONSE FAILED: {}\".format(response.__class__))\n else:\n content = response.content.decode()\n if self.hidden in content:\n splitter = self.hidden\n else:\n splitter = \"%s=\" % self.skey\n parts = content.split(splitter, 1)\n session_key = \"\"\n if len(parts) > 1:\n parts = parts[1].split('\"', 1)\n session_id = parts[0]\n request = self.factory.get(\n url, REMOTE_ADDR=\"127.0.0.1\", HTTP_USER_AGENT=agent\n )\n try:\n session_key = self.crypt_sesh.decrypt(request, session_id)\n except:\n # Silently start new session in case fake session_id format causes error\n pass\n else:\n session_id = \"\"\n try:\n session = self.engine.SessionStore(session_key)\n except:\n session = self.engine.SessionStore()\n return session, session_id", "def get_session(_id):\n token = get_token()\n headers = {'Content-Type': 'application/json', 'Authorization': f'Bearer {token}'}\n endpoint = f\"https://api.signicat.io/identification/v2/sessions/{_id}\" \n \n response = requests.get(endpoint, headers=headers).json()\n identification = response['identity']\n print(identification)\n return response['identity']", "def session_id(self) -> str:\n return self._session_id", "def get_session_from_user(self, client_id):\n return self.connections[client_id][\"session_id\"]", "def request_sid_cookie(self, username, password):\n target_url = self._login_url + '?usr={0}&pwd={1}&persist=y' \\\n .format(username, password)\n cookie = urlopen(target_url).read()\n return cookie", "def get_session_id(session):\n return {'src_ip': session['src_ip'], 'src_port': session['src_port'], 'dest_ip': session['dest_ip'],\n 'dest_port': session['dest_port'], 'protocol': session['protocol'], 'start_time': session['start_time']}", "def get_cookie(self):\n\t\t# Handle cookies.\n\t\tcookie = cookies.SimpleCookie()\n\t\tcookie_string = self.headers.get('Cookie')\n\t\t# The first time the page is run there will be no cookies\n\t\tif not cookie_string:\n\t\t\tcookie['simpletracker-userid'] = uuid.uuid1()\n\t\t\tself.send_header('Set-Cookie:', '{}'.format(cookie.output()))\n\t\t\tprint('place_pixel_image(), create cookie= userid:{}'.format(cookie.output().split('=',1)[1]))\n\t\telse:\n\t\t\tcookie.load(cookie_string)\n\t\treturn cookie", "def get_browser_state_or_default(request):\n key = (request.session.session_key or\n settings.get('OIDC_UNAUTHENTICATED_SESSION_MANAGEMENT_KEY'))\n return sha224(key.encode('utf-8')).hexdigest()", "def get_id():\n token = get_token()\n # Endpoint url\n endpoint = \"https://api.idfy.io/identification/v2/sessions\"\n # Setting headers with the authorization bearer\n headers = {'Content-Type': 'application/json', 'Authorization': f'Bearer {token}'}\n data = {\n \"languages\": \"en\",\n \"flow\": \"redirect\",\n \"allowedProviders\": [\n \"no_bankid_netcentric\",\n \"no_bankid_mobile\"\n ],\n \"include\": [\n \"name\",\n \"date_of_birth\",\n \"phone_number\",\n \"nin\",\n \"email\"\n ],\n \"redirectSettings\": {\n \"successUrl\": \"https://example.com/success\",\n \"abortUrl\": \"https://example.com/abort\",\n \"errorUrl\": \"https://example.com/error\"\n }\n }\n # Converting the data into a json string and sending a post request \n response = requests.post(endpoint, data=json.dumps(data), headers=headers).json()\n # Opening the browser and to authenticate the user\n webbrowser.open(response['url'])\n # returning the session id\n return response['id']", "def get_session_id_for_unit(cache, uid):\n units = cache.get_units()\n return units[units.index == uid][\"ecephys_session_id\"].values[0]", "def get_sid(session, params):\n return session.get('http://www.flyniki.com/ru/booking/flight/vacancy.php',\n params=params).url", "def get_session_key(self, request):\r\n try:\r\n return request.session.session_key\r\n except AttributeError:\r\n return ''", "def user_from_cookie(db, environ):\n if 'HTTP_COOKIE' in environ:\n cookie = SimpleCookie(environ['HTTP_COOKIE'])\n if COOKIE_NAME in cookie:\n sessionkey = cookie[COOKIE_NAME].value\n cur = db.cursor()\n cur.execute('SELECT useremail FROM sessions WHERE sessionid IS ?', (sessionkey,))\n result = cur.fetchone()\n\n if result is not None:\n return result[0]\n return None", "def get_session_cookie(self):\n\n if self._login is not None and self._password is not None:\n session_key = self.encode_user(self._login, self._password)\n return {'sessionkey': session_key}\n else:\n return None", "def getId(self):\n return self.session.request('id/')", "def session(self):\n return self.session_store.get_session()" ]
[ "0.684743", "0.6749757", "0.67293745", "0.66384244", "0.6619955", "0.65192", "0.6503215", "0.63944334", "0.63649696", "0.6357397", "0.63556963", "0.6275198", "0.620214", "0.6145915", "0.6140672", "0.6135115", "0.61138916", "0.6101876", "0.60884416", "0.60752755", "0.60656494", "0.60380316", "0.6014806", "0.60146534", "0.59942216", "0.5924524", "0.5882449", "0.5857007", "0.58478403", "0.58229905" ]
0.7772008
0
Parse logged in user info from page header
def _parse_logged_in_user(self, node): user_node = node.select_one('div#userToolsNav') if not user_node: return None user_stats = user_node.select_one('span.user-info') if not user_stats: return None data = {} user_link = user_node.select_one('a') link_href = user_link.get('href') if user_link and link_href.find('/users/') != -1: data['profileUrl'] = self.get_link(link_href) data['username'] = user_link.get_text() user_id = self._parse_user_id_from_url(link_href) if user_id: data['id'] = user_id reputation_node = user_node.select_one('a.reputation') if reputation_node: data['reputation'] = self._to_int(reputation_node.get_text().strip().replace('karma: ', '')) for i in range(1, 4): badge = user_stats.select_one('span.badge' + str(i)) if badge: data['badge' + str(i)] = self._to_int(badge.find_next_sibling('span', class_='badgecount').get_text()) return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _parse_user_data (self, netflix_page_data):\n user_data = {};\n important_fields = [\n 'authURL',\n 'gpsModel',\n 'guid'\n ]\n\n # values are accessible via dict (sloppy parsing successfull)\n if type(netflix_page_data) == dict:\n for important_field in important_fields:\n user_data.update({important_field: netflix_page_data.get(important_field, '')})\n return user_data\n\n # values are stored in lists (returned from JS parser)\n for item in netflix_page_data:\n if 'memberContext' in dict(item).keys():\n for important_field in important_fields:\n user_data.update({important_field: item['memberContext']['data']['userInfo'][important_field]})\n\n return user_data", "def _parse_user_page(self, html, user):\n\n dom = BeautifulSoup(html, 'html.parser')\n\n data = {}\n\n # Parse logged in user from page header\n data['current_user'] = self._parse_logged_in_user(dom)\n\n # Avatar\n avatar_node = dom.find('img', class_='gravatar')\n if avatar_node is not None:\n data['avatarUrl'] = self.get_link(avatar_node.get('src'))\n data['username'] = avatar_node.get('title')\n\n # Karma\n score_node = dom.find('div', class_='scoreNumber')\n if score_node is not None:\n data['reputation'] = score_node.get_text()\n\n user_details_table = dom.find('table', class_='user-details')\n if user_details_table is not None:\n for tr in user_details_table.find_all('tr'):\n raw_text = tr.get_text()\n if raw_text.find('member since') != -1:\n created_node = tr.find('abbr', class_='timeago')\n if created_node is not None:\n created_datetime = self._parse_datetime(created_node.get('title'))\n data['created'] = created_datetime.strftime('%Y-%m-%d')\n data['created_label'] = timeago.format(created_datetime, datetime.now(TIMEZONE))\n elif raw_text.find('last seen') != -1:\n last_seen_node = tr.find('abbr', class_='timeago')\n if last_seen_node is not None:\n last_seen_datetime = self._parse_datetime(last_seen_node.get('title'))\n data['last_seen'] = last_seen_datetime.strftime('%Y-%m-%d')\n data['last_seen_label'] = timeago.format(last_seen_datetime, datetime.now(TIMEZONE))\n\n # Questions count\n questions_a_node = dom.find('a', attrs={'name': 'questions'})\n if questions_a_node is not None:\n questions_h2_node = questions_a_node.find_next('h2')\n if questions_h2_node.name == 'h2':\n questions_count_node = questions_h2_node.find('span', class_='count')\n if questions_count_node is not None:\n data['questions_count'] = int(questions_count_node.get_text())\n\n # Questions list\n data['questions'] = []\n for question_node in dom.find_all('div', class_='short-summary'):\n question = self._parse_question_html(question_node)\n if question:\n data['questions'].append(question)\n\n return data", "def user_info(self):\n return self.auth.get_user_by_session()", "def extract_user_info(user):\r\n return {\r\n 'username': user.username,\r\n 'email': user.email,\r\n 'first_name': user.first_name,\r\n 'last_name': user.last_name,\r\n }", "def extract_user_info(user):\r\n return {\r\n 'username': user.username,\r\n 'email': user.email,\r\n 'first_name': user.first_name,\r\n 'last_name': user.last_name,\r\n }", "def getInfo(self):\n request = self._connection.get('bookmarklet')\n userdata = self._userinfo_regex.search(request.text)\n if userdata is None: userdata = self._userinfo_regex_2.search(request.text)\n if userdata is None: raise errors.DiaspyError('cannot find user data')\n userdata = userdata.group(1)\n return json.loads(userdata)", "def get_user_info(self) -> str:\n return self._searcher.get_user_info()", "def GetUserInfo(self):\n user = users.get_current_user()\n user_info = GetInfoForUser(user)\n if user:\n # Check to see if the user has auxiliary info for Swyzl, and if not\n # then create it.\n if not user_info:\n user_info = models.UserInfo()\n user_info.user = user\n user_info.put()\n\n url = users.create_logout_url(self.request.uri)\n url_link_text = 'Logout'\n else:\n url = users.create_login_url(self.request.uri)\n url_link_text = 'Login'\n return (user, url, url_link_text)", "def get_user_info_by_name(self, username: str) -> dict:", "def user_info(self):\n response = self.query('user_info')\n return response", "def getPublicUserInfo(self, username):\r\n pass", "def current_user_info():\n\n return current_user", "def userinfo(self):\n return self._userinfo", "def getUser(line):\n seperate = line.split(\":\", 2)\n user = seperate[1].split(\"!\",1)[0]\n return user", "def user():\n\treturn request.authorization.username if zk.get_http_login() else zk.get_username()", "def userinfo(self, access_token: str) -> dict[str, Any]:\n data: dict[str, Any] = self.client.get(\n url=f\"{self.protocol}://{self.domain}/userinfo\",\n headers={\"Authorization\": f\"Bearer {access_token}\"},\n )\n return data", "def get_logged_info():\n user = current_identity\n return make_response(dumps({\"status\": True, \"user\": user}), 200)", "def mod_header_user() -> Optional[User]:\n return None", "def _on_get_user_info(self, callback, session, user):\n logging.debug('user data from github ' + str(user))\n if user is None:\n callback(None)\n return\n callback({\n \"id\": user[\"id\"],\n \"login\": user[\"login\"],\n \"name\": user.get(\"name\"),\n \"email\": user.get(\"email\"),\n \"access_token\": session[\"access_token\"],\n })", "def _parse_user_messages_page(self, html):\n\n if not html:\n return None\n\n dom = BeautifulSoup(html, 'html.parser')\n\n data = self._parse_logged_in_user(dom)\n\n return data", "def _get_user_details():\n with open(USER_DETAILS_FILE) as f:\n fitbit_user = json.load(f)\n access_token = fitbit_user['access_token']\n refresh_token = fitbit_user['refresh_token']\n expires_at = fitbit_user['expires_at']\n\n return access_token, refresh_token, expires_at", "def get_user_details(self, response):\n\n log.info(str(response) + \"-\" * 80)\n log.info(str(dir(self)) + \"-\" * 80)\n\n return response", "def getUserInfo(self, user):\n return pwd.getpwnam(user)[2:4]", "def _get_user_info_cookie_data(request, user):\n\n # Set a cookie with user info. This can be used by external sites\n # to customize content based on user information. Currently,\n # we include information that's used to customize the \"account\"\n # links in the header of subdomain sites (such as the marketing site).\n header_urls = {'logout': reverse('logout')}\n\n # Unfortunately, this app is currently used by both the LMS and Studio login pages.\n # If we're in Studio, we won't be able to reverse the account/profile URLs.\n # To handle this, we don't add the URLs if we can't reverse them.\n # External sites will need to have fallback mechanisms to handle this case\n # (most likely just hiding the links).\n try:\n header_urls['account_settings'] = reverse('account_settings')\n header_urls['learner_profile'] = reverse('learner_profile', kwargs={'username': user.username})\n except NoReverseMatch:\n pass\n\n # Add 'resume course' last completed block\n try:\n header_urls['resume_block'] = retrieve_last_sitewide_block_completed(user)\n except User.DoesNotExist:\n pass\n\n header_urls = _convert_to_absolute_uris(request, header_urls)\n\n image_urls = {}\n try:\n image_urls = get_profile_image_urls_for_user(user)\n except UserProfile.DoesNotExist:\n pass\n\n image_urls = _convert_to_absolute_uris(request, image_urls)\n\n user_info = {\n 'version': settings.EDXMKTG_USER_INFO_COOKIE_VERSION,\n 'username': user.username,\n 'header_urls': header_urls,\n 'user_image_urls': image_urls,\n }\n\n return user_info", "def get_user():\n\treturn '1', 200", "def user(inp):\n user = inp.text.lower().replace(' ', '-')\n return 'http://www.wikidot.com/user:info/' + user", "def extract_user_info(client_config):\n # test if there isn't a system user or if there isn't a name for that\n # user, return None\n if ('system user' not in client_config or\n 'name' not in client_config['system user']):\n return None\n\n user_info = dict()\n user_info['system_key'] = dict(\n user=client_config['system user']['name'],\n access_key=client_config['system user']['access key'],\n secret_key=client_config['system user']['secret key'],\n )\n return user_info", "def _get_client_info():\n if hasattr(request.authorization, 'username'):\n auth_user = request.authorization.username\n else:\n auth_user = 'Unknown'\n info = request.headers\n origin_string = info.get(\"User-Agent\", \"\")\n origin_props = {}\n if origin_string:\n try:\n origin_props = dict(\n [_.split(\"/\", 1) for _ in origin_string.split()]\n )\n except ValueError:\n pass\n prog_name = origin_props.get(\"prog_name\", \"Unknown\")\n uuid = origin_props.get(\"uuid\", uuid4())\n host = info.get(\"Host\", \"Unknown\")\n if info.get(\"From\") and \"@\" in info[\"From\"]:\n user = info[\"From\"].split(\"@\")[0]\n else:\n user = (\"Unknown\")\n return auth_user, prog_name, user, host, uuid", "def _get_userinfo(self):\n if not hasattr(self, \"_userinfo\"):\n self._userinfo = {\n \"name\" : self.user_name,\n \"email\" : self.user_email\n }\n if self.user_id:\n u = self.user\n if u.email:\n self._userinfo[\"email\"] = u.email\n\n # If the user has a full name, use that for the user name.\n # However, a given user_name overrides the raw user.username,\n # so only use that if this review has no associated name.\n if u.get_full_name():\n self._userinfo[\"name\"] = self.user.get_full_name()\n elif not self.user_name:\n self._userinfo[\"name\"] = u.username\n return self._userinfo", "def userLoggedOn(self, session, params):\n\n user = session.get('user')\n\n #get the details of this user\n user_detail = WebUserDetail.objects.get(user_id=user.uid)\n\n data = {}\n data['full_name'] = user_detail.full_name\n\n return {'user': data}" ]
[ "0.6791457", "0.67451113", "0.6516258", "0.65075475", "0.65075475", "0.63439125", "0.63034827", "0.6252347", "0.6215166", "0.61992705", "0.6198551", "0.61583066", "0.61355966", "0.61322826", "0.61292905", "0.6124725", "0.6091143", "0.6075794", "0.6075346", "0.6038846", "0.60252494", "0.6016091", "0.60108685", "0.60100365", "0.599446", "0.5989941", "0.59885764", "0.5976296", "0.5968075", "0.5928847" ]
0.7291526
0
Parse question data from DOM node
def _parse_question_html(self, node): if node is None: return None data = {} data['has_more_comments'] = False data['has_more_answers'] = False data['id'] = int(node.get('id').replace('question-', '')) h2_node = node.find('h2') if h2_node is not None: a_node = h2_node.find('a') if a_node is not None: data['title'] = a_node.get_text() data['url'] = self.get_link(a_node.get('href')) view_count_node = node.find('div', class_='views') if view_count_node is not None: view_count_value = view_count_node.find('span', class_='item-count').get_text() data['view_count_label'] = '0' if view_count_value == 'no' else view_count_value score_count_node = node.find('div', class_='votes') if score_count_node is not None: score_count_value = score_count_node.find('span', class_='item-count').get_text() data['score_label'] = '0' if score_count_value == 'no' else score_count_value answer_count_node = node.find('div', class_='answers') if answer_count_node is not None: answer_count_value = answer_count_node.find('span', class_='item-count').get_text() data['answer_count_label'] = '0' if answer_count_value == 'no' else answer_count_value return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _parse_question(self, text, params={}):\n\n dom = BeautifulSoup(text, 'html.parser')\n\n data = {}\n\n # If requested page is not first page, it mean we only need load more answers\n if params['page'] == 1:\n # Parse user info\n data['users'] = []\n post_node = dom.select_one('div.post.question')\n if post_node is not None:\n user_node = post_node.find('div', class_='post-update-info-container')\n if user_node is not None:\n data['users'] = self.parse_user(user_node)\n\n # Parse question's comments\n data['comments'] = []\n comments_node = dom.find(id='comments-for-question-' + str(int(params['id'])))\n if comments_node is not None:\n data['comments'] = self.parse_comment(comments_node)\n\n data['has_more_comments'] = False\n add_comment_node = dom.find(id='add-comment-to-post-' + str(int(params['id'])))\n if add_comment_node:\n add_comment_script = add_comment_node.find_next('script')\n if add_comment_script:\n more_comment_pattern = re.compile('\\[\\'comments-for-question-(\\d+)\\'\\][ =]+{[\\n ]*truncated[ :]+(true|false)')\n more_comment_result = more_comment_pattern.search(add_comment_script.get_text())\n if more_comment_result:\n data['has_more_comments'] = True if more_comment_result.group(2) == 'true' else False\n\n # Parse CSRF token\n csrf_node = dom.find('input', attrs={'name': 'csrfmiddlewaretoken'})\n if csrf_node:\n #Utils.log('CSRF: ' + csrf_node.get('value'))\n self.csrfToken = csrf_node.get('value')\n\n # Parse followers\n data['followers'] = 0\n favorite_node = dom.find('div', attrs={'id': 'favorite-number'})\n if favorite_node is not None:\n favorite_text = favorite_node.get_text().strip()\n favorite_pattern = re.compile('(\\d+) follower[s]*')\n favorite_result = favorite_pattern.search(favorite_text)\n if favorite_result:\n data['followers'] = int(favorite_result.group(1))\n\n # Parse following status\n data['following'] = False\n favorite_btn_node = dom.select_one('a.button.followed')\n if favorite_btn_node is not None and favorite_btn_node.get('alt') == 'click to unfollow this question':\n data['following'] = True\n\n # Parse related questions\n data['related'] = []\n related_nodes = dom.find('div', class_='questions-related')\n if related_nodes is not None:\n for related_node in related_nodes.select('p'):\n a_node = related_node.find('a')\n item = {}\n item['title'] = a_node.get_text()\n item['url'] = self.get_link(a_node.get('href'))\n data['related'].append(item)\n\n # Parse votes\n data['votes'] = {}\n for script in dom.select('script'):\n script_text = script.get_text()\n if not script_text:\n continue\n if script_text.find('var votes = {};') != -1:\n for vote in re.findall('votes\\[\\'(\\d+)\\'\\][ ]*=[ ]*([-1]+)', script_text):\n data['votes'][vote[0]] = int(vote[1])\n break\n\n # Parse question status\n status_node = dom.select_one('div.question-status')\n if status_node:\n data['status'] = {}\n status_reason_node = status_node.select_one('b')\n if status_reason_node:\n data['status']['reason'] = status_reason_node.get_text().strip('\"')\n status_author_node = status_node.select_one('a')\n if status_author_node:\n data['status']['author'] = status_author_node.get_text()\n data['status']['profile_url'] = self.get_link(status_author_node.get('href'))\n status_date_pattern = re.compile('close date (\\d+-\\d+-\\d+ \\d+:\\d+:\\d+)')\n status_date_result = status_date_pattern.search(status_node.get_text())\n if status_date_result:\n data['status']['date'] = status_date_result.group(1)\n status_date = datetime.strptime(data['status']['date'], '%Y-%m-%d %H:%M:%S')\n data['status']['date_ago'] = timeago.format(status_date, datetime.utcnow())\n\n # Parse question paging\n data['has_more_answers'] = False\n paging_node = dom.find('div', class_='paginator')\n if paging_node is not None:\n current_page_node = paging_node.find('span', class_='curr')\n if current_page_node is not None:\n data['page'] = int(current_page_node.get_text().strip())\n else:\n data['page'] = 1\n\n next_page_node = paging_node.find('span', class_='next')\n if next_page_node is not None:\n data['has_more_answers'] = True\n\n # Parse question's answers\n data['answers'] = self.parse_answer(dom)\n\n return data", "def scrape_question(page_text, ans):\n\n sq = BeautifulSoup(page_text, 'html.parser')\n question = Question(sq, ans)\n\n return question.__dict__", "def parse_question_data(self):\n section = ''\n subsection = ''\n quest = ''\n # The data falls into 4 cases\n # 1. Sections\n # 2. subsections\n # 3. questions\n # 4. answers.\n\n for line in self.question_data: \n\n if \":\" in line: # case #2\n subsection = line.split(\":\")[1] # split the line on the : into an array but only take the [1] element\n debug(\"Subsection: %s\" % subsection)\n \n elif \".\" in line: # this is either a question or an answer?\n \n if line.split(\".\")[0].isdigit(): # case #3 it's a question, split on . into an array and take the element to the left and ask if it's a digit.\n quest = line # Since we know it's something like \"3. Are you a warlock?\" we stick that in the quest varable.\n debug(\"Question: %s\" % quest)\n # Create a question object and stick it in the dictonary with the key being the question (since we know it'll be unique)\n self.questions[quest] = question(section, subsection, quest) # I know it's redundant to have the key and have a value.\n \n elif line.startswith(\".\"): # case #4 answer All the answers startswith \".\" \n debug(\"Answer: %s\" % line)\n # take the question and append it to the answers array in the question object.\n self.questions[quest].answers.append(line[2:]) # Trim the first two characters off the answer since it's \". the answer\"\n \n else: # case #1 # This is section like AMERICAN DEMOCRACY\n section = line # load the line from the file into the section variable\n debug(\"Section = %s\" % section)", "def parse_answer(self, node):\n\n data = []\n\n if node is not None:\n for answer_node in node.select('div.post.answer'):\n item = {}\n\n item['id'] = answer_node['data-post-id']\n\n # User info\n answer_user_node = answer_node.find('div', class_='post-update-info-container')\n if answer_user_node is not None:\n item['users'] = self.parse_user(answer_user_node)\n\n # Vote count\n item['vote_count'] = 0\n item['vote_count_label'] = 0\n answer_vote_node = answer_node.find('div', id='answer-vote-number-' + str(item['id']))\n if answer_vote_node is not None:\n item['vote_count'] = answer_vote_node.get_text()\n item['vote_count_label'] = self.convert_count(item['vote_count'])\n\n # Content\n answer_info_node = answer_node.find('div', class_='post-update-info-container')\n if answer_info_node is not None:\n item['content'] = ''\n for p in answer_info_node.find_next_siblings():\n style = ''\n if p.name == 'pre':\n style += 'white-space:normal;'\n p['style'] = style\n item['content'] += self.parse_content(p)\n\n # Parse answer's comments\n answer_comments_node = answer_node.find('div', class_='comments')\n if answer_comments_node is not None:\n item['comments'] = self.parse_comment(answer_comments_node)\n\n item['has_more_comments'] = False\n add_comment_node = answer_node.find(id='add-comment-to-post-' + item['id'])\n if add_comment_node:\n add_comment_script = add_comment_node.find_next('script')\n if add_comment_script:\n more_comment_pattern = re.compile('\\[\\'comments-for-answer-(\\d+)\\'\\][ =]+{[\\n ]*truncated[ :]+(true|false)')\n more_comment_result = more_comment_pattern.search(add_comment_script.get_text())\n if more_comment_result:\n item['has_more_comments'] = True if more_comment_result.group(2) == 'true' else False\n\n data.append(item)\n\n return data", "def fill_question(self, response, question_answer):\n question_answer['source_url'] = response.url\n\n question_answer['question_title'] = response.xpath('//*[@id=\"question-header\"]/h1/a/text()').extract_first()\n question_answer['question_body'] = BeautifulSoup(\n response.xpath(self.gt.css_to_xpath('.postcell .post-text')).extract_first()).text\n question_answer['question_tags'] = list(set(\n response.xpath('//*[contains(concat(\" \", normalize-space(@class), \" \"), \" post-tag \")]/text()').extract()))\n # would like to specify the hierarchy of the css tags\n question_answer['question_upvotes'] = int(response.xpath(\n '//*[contains(concat(\" \", normalize-space(@class), \" \"), \" vote-count-post \")]/text()').extract_first())\n question_answer['question_view_count'] = int(\n response.xpath(self.gt.css_to_xpath('#qinfo .label-key') + '/b/text()').extract()[1].split(' ')[0])\n\n author_name = response.xpath(\n self.gt.css_to_xpath('.owner .user-details') + '/a/text()').extract_first()\n question_answer['question_author'] = {'author_id': '{}_{}'.format(self.allowed_domains[0], author_name),\n 'author_name': author_name}\n\n se_date_format = '%b %d \\'%y at %H:%M' # if date not current year\n se_date_format_curr_year = '%b %d at %H:%M' # if date current year\n try:\n try:\n question_answer['question_date'] = date_to_solr_format(datetime.strptime(response.xpath(\n self.gt.css_to_xpath('.owner .user-action-time .relativetime') + '/text()').extract_first(),\n se_date_format))\n except ValueError:\n question_answer['question_date'] = date_to_solr_format(datetime.strptime(response.xpath(\n self.gt.css_to_xpath('.owner .user-action-time .relativetime') + '/text()').extract_first(),\n se_date_format_curr_year))\n except (ValueError, TypeError):\n pass\n # Look for duplicates\n duplicate_url = response.xpath(self.gt.css_to_xpath('.question-originals-of-duplicate')+'/ul/li/a/@href').extract_first()\n if duplicate_url:\n print('duplicate question')\n self.duplicate_count += 1\n print('duplicate question count: {}'.format(self.duplicate_count))\n duplicate_url = \"https://superuser.com\" + duplicate_url\n print(duplicate_url)\n self.logger.info('duplicate url: {}'.format(duplicate_url))\n question_answer['question_original_url'] = duplicate_url\n self.duplicate_url = duplicate_url\n\n return question_answer", "def parse_response(self, response):\n elm_tree = ET.fromstring(response)\n data = {}\n for child in elm_tree:\n\n temp_list = []\n # related faqs\n if child.tag == 'faqitems':\n # if there are any related faqs present store each faq as an object in temp list this store list in return value.\n related_list = child.find('suggestedfaqlist').find('semanticfaqs')\n if len(related_list):\n for faq in related_list:\n temp_dict = {}\n for el in faq:\n temp_dict[el.tag] = el.text\n temp_list.append(temp_dict)\n # temp_list.append({\n # 'answer_id': faq.find('AnswerId'),\n # 'recognition_id': faq.find('RecognitionId'),\n # 'question_text': faq.find('QuestionText')\n # })\n data['related_list'] = temp_list\n\n else:\n data['related_list'] = None\n\n elif child.tag.lower() == 'connectors':\n if len(child):\n for connector in child:\n temp_dict = {}\n for el in connector:\n temp_dict[el.tag] = el.text\n temp_list.append(temp_dict)\n data['connectors'] = temp_list\n\n else:\n data['connectors'] = None\n\n elif child.tag.lower() == 'disambiguationoptions':\n if len(child):\n for option in child:\n temp_dict = {}\n for el in option:\n temp_dict[el.tag] = el.text\n temp_list.append(temp_dict)\n data['disambiguationoptions'] = temp_list\n\n else:\n data['disambiguationoptions'] = None\n\n else:\n data[child.tag] = child.text\n\n return data", "def p_parse(toks):\n return p_question_group.parseString(toks[0])", "def test_data_parse_vanilla_xml(self):\n lines = [\n '<?xml version=\"1.0\" encoding=\"UTF-8\"?>',\n '<note type=\"hi\"><to>Tove</to><from>Jani</from></note>'\n ]\n dat, dat_type = parser._parse_data(lines)\n self.assertEqual(\"note\", dat.tag)\n self.assertEqual({\"type\": \"hi\"}, dat.attrib)\n self.assertEqual(\"to\", dat[0].tag)\n self.assertEqual(\"Tove\", dat[0].text)\n self.assertEqual({}, dat[0].attrib)\n self.assertEqual(\"from\", dat[1].tag)\n self.assertEqual(\"Jani\", dat[1].text)\n self.assertEqual({}, dat[1].attrib)", "def process_question(qu):\n\n ## global ranking\n rank_info = {}\n rank_info_k = [\"viewcount\",\"score\",\"favoritecount\"]\n for k in rank_info_k:\n rank_info[k] = int(qu[k])\n qu.pop(k,None)\n\n rank_info[\"creationdate\"] = qu[\"creationdate\"]\n\n if qu[\"acceptedanswer\"]:\n qu[\"acceptedanswer\"] = list(qu[\"acceptedanswer\"])\n else:\n qu[\"acceptedanswer\"] = []\n\n qu.pop('comments',None) # discard comments, maybe add back later\n qu[\"rank_info\"] = rank_info\n\n return qu", "def _get_questions_from_tag_assessment(self, event_data):\n unit_id, lesson_id = self._get_unit_and_lesson_id_from_url(\n event_data['location'])\n if unit_id is None or lesson_id is None:\n return []\n\n if event_data['type'] == self.QUESTION_GROUP:\n mc_indices = [\n i for i in xrange(len(event_data['containedTypes']))\n if event_data['containedTypes'][i] == self.MC_QUESTION]\n return [{\n 'id': 'u.%s.l.%s.c.%s.i.%s' % (\n unit_id, lesson_id, event_data['instanceid'], index),\n 'score': event_data['individualScores'][index],\n 'answers': event_data['answer'][index]\n } for index in mc_indices if event_data['answer'][index]]\n elif (event_data['type'] == self.MC_QUESTION and\n event_data['answer']):\n # This is a single multiple-choice question.\n return [{\n 'id': 'u.%s.l.%s.c.%s' % (\n unit_id, lesson_id, event_data['instanceid']),\n 'score': event_data['score'],\n 'answers': event_data['answer']\n }]\n else:\n return []", "def GetDataFromTag(dom, tag):\n tags = dom.getElementsByTagName(tag)\n if not tags:\n return None\n elif tags[0].hasChildNodes():\n return tags[0].firstChild.data\n else:\n return ''", "def __getData(self, post,is_question=False):\n page = {'entity':'question' if is_question else 'answer', 'uri':self.currenturi}\n css_class_name = 'span' if is_question else 'div'\n if post.find('div', text='Post temporarily unavailable'):\n log.info(self.log_msg('Message Temporarily not available in url %s'%self.currenturi))\n return False\n if post.find('form', id='frm_quick_reply_submit'):\n log.info(self.log_msg('It is not a post'))\n return False\n try:\n page['et_author_name'] = stripHtml(post.find(css_class_name, 'vt_asked_by_user').renderContents())\n except:\n log.info(self.log_msg('Author name not found in %s'% self.currenturi))\n try:\n date_str = stripHtml(post.find(css_class_name,attrs={'class':re.compile('vt_.+?_timestamp')}).renderContents()).replace('replied ','').strip()\n date_str = re.sub(\"(\\d+)(st|nd|rd|th)\",r\"\\1\", date_str)\n page['posted_date'] = datetime.strftime(datetime.strptime(date_str\n , 'on %B %d, %Y'),\"%Y-%m-%dT%H:%M:%SZ\")\n except:\n page['posted_date'] = datetime.strftime(datetime.utcnow(), \"%Y-%m-%dT%H:%M:%SZ\")\n log.info(self.log_msg('Date not be found in %s'% self.currenturi))\n try:\n page['et_author_category'] = stripHtml(post.find('span', 'vt_user_rank').renderContents())\n except:\n log.info(self.log_msg('Author name not found in %s'% self.currenturi))\n try:\n data_tag = post.find('div', 'vt_post_body')\n ads_tag = post.findAll('div',attrs={'class':re.compile('vt_post_body_ad_[l/r]')})\n [each.extract() for each in ads_tag]\n page['data'] = stripHtml(data_tag.renderContents())\n except:\n log.info(post)\n log.info(self.log_msg('Cannot find the Data for this Post %s'%self.currenturi))\n page['data'] = ''\n try:\n if is_question:\n page['title'] = stripHtml(str(post.find('div', 'vt_post_subject').span.next.next))\n else:\n page['title'] = 'Re: ' + self.__hierarchy[-1]\n except:\n log.info(self.log_msg('Cannot find the Data thread details for this Post %s'%self.currenturi))\n page['title'] = ''\n if not (page['data'] and page['title']):\n log.info(self.log_msg('No data found in url %s'%self.currenturi))\n return \n if len(self.__hierarchy) >= 3:\n page['et_thread_topic'] = self.__hierarchy[-1]\n page['et_thread_forum'] = self.__hierarchy[-3]\n page['et_thread_subforum'] = self.__hierarchy[-2]\n return page", "def question_medtagger(self):\n self.save_on_file(self.cleaned_questions_q_dot, 'questions.txt')\n medpos_tag_out = self.medpos_tag('questions.txt')\n parsed_questions = self.parse(medpos_tag_out)\n parsed_questions_type = []\n parsed_questions_out = []\n for i in range(0, len(self.data['questions'])):\n # [ type, [ (w1, tag_w1) , (w2, tag_w2) ... (wn, tagwn) ] ]\n parsed_questions_type += [[self.data['questions'][i]['type']] + [parsed_questions[i]]]\n parsed_questions_out += [parsed_questions[i]]\n return parsed_questions_out, parsed_questions_type", "def parse_data(node):\n if node['__typename'] == 'Repository':\n return parse_repo_data(node)\n if node['__typename'] == 'Issue' or node['__typename'] == 'PullRequest':\n return parse_issue_data(node)", "def get_related_questions(element):\n tag2path = {\n 'OrgQuestion': './Thread/RelQuestion',\n 'Thread': './RelQuestion',\n 'RelQuestion': '.',\n }\n if element.tag in tag2path:\n return element.findall(tag2path[element.tag])\n return element.findall('./OrgQuestion/Thread/RelQuestion')", "def get_question_data(data):\n\n def get_value(js):\n if js.get('value') is not None:\n return js['value']\n if js.get('children') is not None:\n return get_question_data(js['children'])\n\n ls = list(map(lambda x: {\n x['name']: get_value(x),\n }, data))\n return dict(ChainMap(*ls))", "def get_semeval_content(element):\n if element.tag == 'OrgQuestion':\n return get_orgquestion_content(element)\n\n if element.tag == 'RelQuestion':\n return get_relquestion_content(element)\n\n if element.tag == 'Thread':\n return get_relquestion_content(element.find('./RelQuestion'))\n\n if element.tag == 'RelComment':\n return get_relcomment_content(element)\n\n return None", "def parse2016(filename, qdict, cdict):\n \n tree = ET.parse(filename)\n root = tree.getroot()\n\n for child in root:\n # Each child represents a new (original question, related question) pair\n orgq_id = child.attrib[\"ORGQ_ID\"]\n relq_id = child[2].attrib[\"THREAD_SEQUENCE\"]\n orgq_comment = []\n relq_comment = []\n # get orgq_comment, relq_comment\n orgq_subject = child[0].text if child[0].text != None else \"\"\n orgq_body = child[1].text if child[1].text != None else \"\"\n DUPLICATE = True if \"SubtaskA_Skip_Because_Same_As_RelQuestion_ID\" in child[2].attrib else False \n for rel in child[2]:\n if rel.tag == \"RelQuestion\":\n relq_subject = rel[0].text if rel[0].text != None else \"\"\n relq_body = rel[1].text if rel[1].text != None else \"\"\n elif rel.tag == \"RelComment\":\n c_text = rel[0].text\n orgq_c_label = rel.attrib[\"RELC_RELEVANCE2ORGQ\"]\n orgq_comment.append((c_text, orgq_c_label))\n relq_c_label = rel.attrib[\"RELC_RELEVANCE2RELQ\"]\n relq_comment.append((c_text, relq_c_label))\n\n if DUPLICATE is False:\n qdict[relq_id] = (relq_subject, relq_body)\n cdict[relq_id] = relq_comment\n \n if (orgq_id in qdict) != (orgq_id in cdict):\n print(\"WARNING qdict inconsistent with cdict\")\n elif orgq_id not in qdict:\n qdict[orgq_id] = (orgq_subject, orgq_body)\n cdict[orgq_id] = relq_comment\n else:\n cdict[orgq_id] = cdict[orgq_id] + orgq_comment\n \n return qdict, cdict", "def _extract_html(self, problemtree): # private\r\n if not isinstance(problemtree.tag, basestring):\r\n # Comment and ProcessingInstruction nodes are not Elements,\r\n # and we're ok leaving those behind.\r\n # BTW: etree gives us no good way to distinguish these things\r\n # other than to examine .tag to see if it's a string. :(\r\n return\r\n\r\n if (problemtree.tag == 'script' and problemtree.get('type')\r\n and 'javascript' in problemtree.get('type')):\r\n # leave javascript intact.\r\n return deepcopy(problemtree)\r\n\r\n if problemtree.tag in html_problem_semantics:\r\n return\r\n\r\n problemid = problemtree.get('id') # my ID\r\n\r\n if problemtree.tag in inputtypes.registry.registered_tags():\r\n # If this is an inputtype subtree, let it render itself.\r\n status = \"unsubmitted\"\r\n msg = ''\r\n hint = ''\r\n hintmode = None\r\n input_id = problemtree.get('id')\r\n if problemid in self.correct_map:\r\n pid = input_id\r\n status = self.correct_map.get_correctness(pid)\r\n msg = self.correct_map.get_msg(pid)\r\n hint = self.correct_map.get_hint(pid)\r\n hintmode = self.correct_map.get_hintmode(pid)\r\n\r\n value = \"\"\r\n if self.student_answers and problemid in self.student_answers:\r\n value = self.student_answers[problemid]\r\n\r\n if input_id not in self.input_state:\r\n self.input_state[input_id] = {}\r\n\r\n # do the rendering\r\n state = {\r\n 'value': value,\r\n 'status': status,\r\n 'id': input_id,\r\n 'input_state': self.input_state[input_id],\r\n 'feedback': {\r\n 'message': msg,\r\n 'hint': hint,\r\n 'hintmode': hintmode,\r\n }\r\n }\r\n\r\n input_type_cls = inputtypes.registry.get_class_for_tag(problemtree.tag)\r\n # save the input type so that we can make ajax calls on it if we need to\r\n self.inputs[input_id] = input_type_cls(self.capa_system, problemtree, state)\r\n return self.inputs[input_id].get_html()\r\n\r\n # let each Response render itself\r\n if problemtree in self.responders:\r\n overall_msg = self.correct_map.get_overall_message()\r\n return self.responders[problemtree].render_html(\r\n self._extract_html, response_msg=overall_msg\r\n )\r\n\r\n # let each custom renderer render itself:\r\n if problemtree.tag in customrender.registry.registered_tags():\r\n renderer_class = customrender.registry.get_class_for_tag(problemtree.tag)\r\n renderer = renderer_class(self.capa_system, problemtree)\r\n return renderer.get_html()\r\n\r\n # otherwise, render children recursively, and copy over attributes\r\n tree = etree.Element(problemtree.tag)\r\n for item in problemtree:\r\n item_xhtml = self._extract_html(item)\r\n if item_xhtml is not None:\r\n tree.append(item_xhtml)\r\n\r\n if tree.tag in html_transforms:\r\n tree.tag = html_transforms[problemtree.tag]['tag']\r\n else:\r\n # copy attributes over if not innocufying\r\n for (key, value) in problemtree.items():\r\n tree.set(key, value)\r\n\r\n tree.text = problemtree.text\r\n tree.tail = problemtree.tail\r\n\r\n return tree", "def parse(content):\r\n soup = BeautifulSoup(content)\r\n submissions = soup.findAll('div')\r\n\r\n submission_data = []\r\n for s in submissions:\r\n t = s.getText()\r\n try:\r\n num, date, score = num_date_pattern.findall(t)[0]\r\n num = s.find('b').getText()\r\n data = json.loads(unescape(s.find('pre').getText()))\r\n d = {'submission_num': num,\r\n 'date': date,\r\n 'score': score,\r\n 'detail': data}\r\n submission_data.append(d)\r\n except Exception, e:\r\n logging.error(\"Error finding num_date_pattern: %s. Text:\\n%s\" % (e, t))\r\n return submission_data", "def extract_answer_from_html(self, html):\n if html.strip().startswith('<'):\n soup = bs4.BeautifulSoup(html, 'html.parser')\n\n for p in soup.find_all('p'):\n if self.REPLY_RE.match(p.text):\n for el in list(p.previous_elements):\n if isinstance(el, bs4.element.Tag):\n el.decompose()\n p.decompose()\n break\n\n return str(soup)\n else:\n # plain text\n match = self.REPLY_RE.search(html)\n if match:\n return html[match.end(0):]\n\n return html", "def get_relquestion_content(relquestion):\n return '. '.join(\n [relquestion.find(tag).text\n if relquestion.find(tag).text is not None else ''\n for tag in ['RelQSubject', 'RelQBody']\n ]\n )", "def process(data_item, article_id):\n questions = []\n answers = []\n paragraph = [article_id, data_item['context']]\n\n for item in data_item['qas']:\n question = [item[\"id\"], item[\"question\"], item['is_impossible']]\n questions.append(question)\n if item['is_impossible']:\n continue\n answer_options = item[\"answers\"]\n answer_set = set()\n for option in answer_options:\n answer_tuple = (option['text'], option['answer_start'])\n answer_set.add(answer_tuple)\n for index, answer_tuple in enumerate(answer_set):\n answer = [\"{}_{}\".format(item[\"id\"], index+1), item[\"id\"], answer_tuple[0], answer_tuple[1]]\n answers.append(answer)\n return paragraph, questions, answers", "def _parse_preset(self, xmldata):\r\n\r\n raise NotImplementedError", "def parse_comment(dom: str) -> Dict[str, str]:\n try:\n pstr = minidom.parseString(dom)\n # If it is not complete dom, mark it as \"partial\"\n # to concatinate to the next data.\n except xml.parsers.expat.ExpatError:\n resp = {\n \"tag\": \"partial\",\n \"data\": dom\n }\n return resp\n\n try:\n chat = pstr.getElementsByTagName(\"chat\")[0]\n except IndexError:\n try:\n _ = pstr.getElementsByTagName(\"thread\")[0]\n except IndexError:\n pass\n # Initial recieved data.\n else:\n tag = \"thread\"\n resp = {\n \"tag\": tag\n }\n else:\n tag = \"chat\"\n # KeyError occurs on official programs.\n try:\n commentno = chat.attributes[\"no\"].value\n except KeyError:\n commentno = \"-\"\n\n time = str(chat.attributes[\"date\"].value)\n userid = chat.attributes[\"user_id\"].value\n\n # Free members don't have premium key.\n try:\n premium = chat.attributes[\"premium\"].value\n except KeyError:\n premium = \"0\"\n\n # ID users don't have anonymity key.\n try:\n anonymity = chat.attributes[\"anonymity\"].value\n except KeyError:\n anonymity = \"0\"\n\n # Owner don't has locale key.\n try:\n locale = chat.attributes[\"locale\"].value\n except KeyError:\n locale = \"ja-jp\"\n\n # If score is 0, dont have score key.\n try:\n score = chat.attributes[\"score\"].value\n except KeyError:\n score = \"0\"\n\n # Comment content.\n content = chat.firstChild.data\n\n resp = {\n \"tag\": tag,\n \"no\": commentno,\n \"time\": time,\n \"id\": userid,\n \"premium\": premium,\n \"anonymity\": anonymity,\n \"locale\": locale,\n \"score\": score,\n \"content\": content\n }\n\n return resp", "def parse_question(question):\n\tcontext = question['support']\n\tanswer = question['correct_answer']\n\ttarget = question['question']\n\n\tcontext_words = context.split(\" \")[0: 510]\n\ttarget_words = target.split(\" \")\n\n\tpunc_filter = str.maketrans('', '', string.punctuation)\n\n\tcontext_words = [word.translate(punc_filter) for word in context_words]\n\ttarget_words = [word.translate(punc_filter) for word in target_words]\n\tanswer_words = [word.translate(punc_filter) for word in answer.split(\" \")]\n\n\tbio_embeddings = [EMBEDER['O']]\n\tinside_answer = False\n\tanswer_index = 0\n\tcan_be_inside_answer = True\n\n\t# The following loop and above code does:\n\t# -Find where the answer is and place a B tag\n\t# -While still in the answer (the answer is more than one word) put an I tag\n\t# -Outside of the answer place a O tag\n\t# -Start and end with an O tag for BERT's automatic\n\t# -start token and end token representing the start and end of a sentence.\n\tfor word in context_words:\n\t\tif word.lower() == answer_words[0].lower() and can_be_inside_answer:\n\t\t\tbio_embeddings.append(EMBEDER[\"B\"])\n\t\t\tanswer_index += 1\n\t\t\tinside_answer = True\n\t\t\tcan_be_inside_answer = False\n\t\telif inside_answer:\n\t\t\tif len(answer_words) > 1:\n\t\t\t\tif word.lower() != answer_words[answer_index]:\n\t\t\t\t\tinside_answer = False\n\t\t\t\t\tbio_embeddings.append(EMBEDER[\"O\"])\n\t\t\t\telse:\n\t\t\t\t\tbio_embeddings.append(EMBEDER[\"I\"])\n\t\t\telse:\n\t\t\t\tinside_answer = False\n\t\t\t\tbio_embeddings.append(EMBEDER[\"O\"])\n\t\telse:\n\t\t\tbio_embeddings.append(EMBEDER[\"O\"])\n\tbio_embeddings.append(EMBEDER[\"O\"])\n\n\tground_truth = torch.tensor([BERT_TOKENIZER.encode(target_words)])\n\tcontext_words = torch.tensor([BERT_TOKENIZER.encode(context_words)])\n\n\tassert len(bio_embeddings) == len(context_words[0]), f'The BIO tags are not equal in length to the embeddings! ' \\\n\t f'{None} & {len(bio_embeddings)} & {len(context_words[0])}'\n\treturn context_words, bio_embeddings, ground_truth", "def get_data(node):\n return node['data']", "def parse_question(\n question: Question\n ) -> tuple[str, list[list[InlineKeyboardButton]]]:\n statement = question.ask_question()\n options = question.get_options()\n keyboard = [[\n InlineKeyboardButton(str(i + 1), callback_data=f'option_{i}')\n for i in range(len(options))\n ]]\n\n return (statement, keyboard)", "def parse_question(save=False):\n\n text = read_screen(save)\n lines = text.splitlines()\n question = \"\"\n options = list()\n flag = False\n\n for line in lines:\n if not flag:\n question = question + \" \" + line\n if '?' in line:\n flag = True\n continue\n if flag:\n if line != '':\n options.append(line)\n\n return question, options", "def parse_fitting_information(node):\n\n fit_tag = node.getElementsByTagName('fit')[0]\n raw_fiting_strings = str(fit_tag.firstChild.data).split()\n\n if len(raw_fiting_strings) == 1 and re_none.match(raw_fiting_strings[0]):\n return None\n else:\n fitting_strings = []\n for i in raw_fiting_strings:\n # replace species with sample_points\n fitting_string = re.sub('species', 'sample_points', i)\n\n # find all instances of sample_points[ ] and extract the list of species numbers\n sp_strings = re.findall(\"sample_points([0-9]+)\", fitting_string)\n sp_nums = [int(j) for j in sp_strings]\n sp_nums.sort()\n sp_nums.reverse()\n\n # loop over the species numbers and replace\n for n in sp_nums:\n fitting_string = re.sub('ts' + str(n), 'ts[:,' + str(n - 1) + ']', fitting_string)\n\n fitting_strings.append(fitting_string)\n\n return fitting_strings" ]
[ "0.6670985", "0.5976315", "0.59380513", "0.58029866", "0.57788837", "0.5576191", "0.53909415", "0.5249243", "0.5216531", "0.5199933", "0.51827544", "0.51701975", "0.51307523", "0.5097334", "0.5091783", "0.5084911", "0.5055956", "0.5032215", "0.5027602", "0.5010149", "0.49967995", "0.49960023", "0.49916148", "0.49833038", "0.49655426", "0.49264416", "0.49237403", "0.49139693", "0.48887172", "0.48869145" ]
0.770682
0
Parse question details from html response
def _parse_question(self, text, params={}): dom = BeautifulSoup(text, 'html.parser') data = {} # If requested page is not first page, it mean we only need load more answers if params['page'] == 1: # Parse user info data['users'] = [] post_node = dom.select_one('div.post.question') if post_node is not None: user_node = post_node.find('div', class_='post-update-info-container') if user_node is not None: data['users'] = self.parse_user(user_node) # Parse question's comments data['comments'] = [] comments_node = dom.find(id='comments-for-question-' + str(int(params['id']))) if comments_node is not None: data['comments'] = self.parse_comment(comments_node) data['has_more_comments'] = False add_comment_node = dom.find(id='add-comment-to-post-' + str(int(params['id']))) if add_comment_node: add_comment_script = add_comment_node.find_next('script') if add_comment_script: more_comment_pattern = re.compile('\[\'comments-for-question-(\d+)\'\][ =]+{[\n ]*truncated[ :]+(true|false)') more_comment_result = more_comment_pattern.search(add_comment_script.get_text()) if more_comment_result: data['has_more_comments'] = True if more_comment_result.group(2) == 'true' else False # Parse CSRF token csrf_node = dom.find('input', attrs={'name': 'csrfmiddlewaretoken'}) if csrf_node: #Utils.log('CSRF: ' + csrf_node.get('value')) self.csrfToken = csrf_node.get('value') # Parse followers data['followers'] = 0 favorite_node = dom.find('div', attrs={'id': 'favorite-number'}) if favorite_node is not None: favorite_text = favorite_node.get_text().strip() favorite_pattern = re.compile('(\d+) follower[s]*') favorite_result = favorite_pattern.search(favorite_text) if favorite_result: data['followers'] = int(favorite_result.group(1)) # Parse following status data['following'] = False favorite_btn_node = dom.select_one('a.button.followed') if favorite_btn_node is not None and favorite_btn_node.get('alt') == 'click to unfollow this question': data['following'] = True # Parse related questions data['related'] = [] related_nodes = dom.find('div', class_='questions-related') if related_nodes is not None: for related_node in related_nodes.select('p'): a_node = related_node.find('a') item = {} item['title'] = a_node.get_text() item['url'] = self.get_link(a_node.get('href')) data['related'].append(item) # Parse votes data['votes'] = {} for script in dom.select('script'): script_text = script.get_text() if not script_text: continue if script_text.find('var votes = {};') != -1: for vote in re.findall('votes\[\'(\d+)\'\][ ]*=[ ]*([-1]+)', script_text): data['votes'][vote[0]] = int(vote[1]) break # Parse question status status_node = dom.select_one('div.question-status') if status_node: data['status'] = {} status_reason_node = status_node.select_one('b') if status_reason_node: data['status']['reason'] = status_reason_node.get_text().strip('"') status_author_node = status_node.select_one('a') if status_author_node: data['status']['author'] = status_author_node.get_text() data['status']['profile_url'] = self.get_link(status_author_node.get('href')) status_date_pattern = re.compile('close date (\d+-\d+-\d+ \d+:\d+:\d+)') status_date_result = status_date_pattern.search(status_node.get_text()) if status_date_result: data['status']['date'] = status_date_result.group(1) status_date = datetime.strptime(data['status']['date'], '%Y-%m-%d %H:%M:%S') data['status']['date_ago'] = timeago.format(status_date, datetime.utcnow()) # Parse question paging data['has_more_answers'] = False paging_node = dom.find('div', class_='paginator') if paging_node is not None: current_page_node = paging_node.find('span', class_='curr') if current_page_node is not None: data['page'] = int(current_page_node.get_text().strip()) else: data['page'] = 1 next_page_node = paging_node.find('span', class_='next') if next_page_node is not None: data['has_more_answers'] = True # Parse question's answers data['answers'] = self.parse_answer(dom) return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _parse_question_html(self, node):\n\n if node is None:\n return None\n\n data = {}\n\n data['has_more_comments'] = False\n data['has_more_answers'] = False\n data['id'] = int(node.get('id').replace('question-', ''))\n\n h2_node = node.find('h2')\n if h2_node is not None:\n a_node = h2_node.find('a')\n if a_node is not None:\n data['title'] = a_node.get_text()\n data['url'] = self.get_link(a_node.get('href'))\n\n view_count_node = node.find('div', class_='views')\n if view_count_node is not None:\n view_count_value = view_count_node.find('span', class_='item-count').get_text()\n data['view_count_label'] = '0' if view_count_value == 'no' else view_count_value\n\n score_count_node = node.find('div', class_='votes')\n if score_count_node is not None:\n score_count_value = score_count_node.find('span', class_='item-count').get_text()\n data['score_label'] = '0' if score_count_value == 'no' else score_count_value\n\n answer_count_node = node.find('div', class_='answers')\n if answer_count_node is not None:\n answer_count_value = answer_count_node.find('span', class_='item-count').get_text()\n data['answer_count_label'] = '0' if answer_count_value == 'no' else answer_count_value\n\n return data", "def parse(response):\n # print(response.text.encode('utf-8'))\n soup = BeautifulSoup(response.text, 'lxml')\n title = soup.find('title')\n answer = title.string\n return answer", "def scrape_question(page_text, ans):\n\n sq = BeautifulSoup(page_text, 'html.parser')\n question = Question(sq, ans)\n\n return question.__dict__", "def parse_response(self, response):\n elm_tree = ET.fromstring(response)\n data = {}\n for child in elm_tree:\n\n temp_list = []\n # related faqs\n if child.tag == 'faqitems':\n # if there are any related faqs present store each faq as an object in temp list this store list in return value.\n related_list = child.find('suggestedfaqlist').find('semanticfaqs')\n if len(related_list):\n for faq in related_list:\n temp_dict = {}\n for el in faq:\n temp_dict[el.tag] = el.text\n temp_list.append(temp_dict)\n # temp_list.append({\n # 'answer_id': faq.find('AnswerId'),\n # 'recognition_id': faq.find('RecognitionId'),\n # 'question_text': faq.find('QuestionText')\n # })\n data['related_list'] = temp_list\n\n else:\n data['related_list'] = None\n\n elif child.tag.lower() == 'connectors':\n if len(child):\n for connector in child:\n temp_dict = {}\n for el in connector:\n temp_dict[el.tag] = el.text\n temp_list.append(temp_dict)\n data['connectors'] = temp_list\n\n else:\n data['connectors'] = None\n\n elif child.tag.lower() == 'disambiguationoptions':\n if len(child):\n for option in child:\n temp_dict = {}\n for el in option:\n temp_dict[el.tag] = el.text\n temp_list.append(temp_dict)\n data['disambiguationoptions'] = temp_list\n\n else:\n data['disambiguationoptions'] = None\n\n else:\n data[child.tag] = child.text\n\n return data", "def parse_survey_details():\n json_data = open('/Users/williamliu/GitHub/surveys/get_survey_details.json')\n loaded_data = json.load(json_data)\n\n print loaded_data['data']['pages'][1]['questions'][1]['heading']\n # I am preoccupied with more than one person I help", "def extract_answer_from_html(self, html):\n if html.strip().startswith('<'):\n soup = bs4.BeautifulSoup(html, 'html.parser')\n\n for p in soup.find_all('p'):\n if self.REPLY_RE.match(p.text):\n for el in list(p.previous_elements):\n if isinstance(el, bs4.element.Tag):\n el.decompose()\n p.decompose()\n break\n\n return str(soup)\n else:\n # plain text\n match = self.REPLY_RE.search(html)\n if match:\n return html[match.end(0):]\n\n return html", "def parse(self, response):", "def parse(self, response: BeautifulSoup):\n raise NotImplementedError", "def _parse_questions(self, text, params):\n\n try:\n data = json.loads(text)\n except ValueError as e:\n Utils.log(traceback.format_exc())\n raise Exception('Could not get content')\n\n output = {}\n output['count'] = data['count']\n output['pages'] = data['pages']\n output['page'] = params['page'] if 'page' in params else 1\n output['questions'] = []\n for q in data['questions']:\n output['questions'].append(self.convert_question(q))\n\n return output", "def parse_question_data(self):\n section = ''\n subsection = ''\n quest = ''\n # The data falls into 4 cases\n # 1. Sections\n # 2. subsections\n # 3. questions\n # 4. answers.\n\n for line in self.question_data: \n\n if \":\" in line: # case #2\n subsection = line.split(\":\")[1] # split the line on the : into an array but only take the [1] element\n debug(\"Subsection: %s\" % subsection)\n \n elif \".\" in line: # this is either a question or an answer?\n \n if line.split(\".\")[0].isdigit(): # case #3 it's a question, split on . into an array and take the element to the left and ask if it's a digit.\n quest = line # Since we know it's something like \"3. Are you a warlock?\" we stick that in the quest varable.\n debug(\"Question: %s\" % quest)\n # Create a question object and stick it in the dictonary with the key being the question (since we know it'll be unique)\n self.questions[quest] = question(section, subsection, quest) # I know it's redundant to have the key and have a value.\n \n elif line.startswith(\".\"): # case #4 answer All the answers startswith \".\" \n debug(\"Answer: %s\" % line)\n # take the question and append it to the answers array in the question object.\n self.questions[quest].answers.append(line[2:]) # Trim the first two characters off the answer since it's \". the answer\"\n \n else: # case #1 # This is section like AMERICAN DEMOCRACY\n section = line # load the line from the file into the section variable\n debug(\"Section = %s\" % section)", "def fill_question(self, response, question_answer):\n question_answer['source_url'] = response.url\n\n question_answer['question_title'] = response.xpath('//*[@id=\"question-header\"]/h1/a/text()').extract_first()\n question_answer['question_body'] = BeautifulSoup(\n response.xpath(self.gt.css_to_xpath('.postcell .post-text')).extract_first()).text\n question_answer['question_tags'] = list(set(\n response.xpath('//*[contains(concat(\" \", normalize-space(@class), \" \"), \" post-tag \")]/text()').extract()))\n # would like to specify the hierarchy of the css tags\n question_answer['question_upvotes'] = int(response.xpath(\n '//*[contains(concat(\" \", normalize-space(@class), \" \"), \" vote-count-post \")]/text()').extract_first())\n question_answer['question_view_count'] = int(\n response.xpath(self.gt.css_to_xpath('#qinfo .label-key') + '/b/text()').extract()[1].split(' ')[0])\n\n author_name = response.xpath(\n self.gt.css_to_xpath('.owner .user-details') + '/a/text()').extract_first()\n question_answer['question_author'] = {'author_id': '{}_{}'.format(self.allowed_domains[0], author_name),\n 'author_name': author_name}\n\n se_date_format = '%b %d \\'%y at %H:%M' # if date not current year\n se_date_format_curr_year = '%b %d at %H:%M' # if date current year\n try:\n try:\n question_answer['question_date'] = date_to_solr_format(datetime.strptime(response.xpath(\n self.gt.css_to_xpath('.owner .user-action-time .relativetime') + '/text()').extract_first(),\n se_date_format))\n except ValueError:\n question_answer['question_date'] = date_to_solr_format(datetime.strptime(response.xpath(\n self.gt.css_to_xpath('.owner .user-action-time .relativetime') + '/text()').extract_first(),\n se_date_format_curr_year))\n except (ValueError, TypeError):\n pass\n # Look for duplicates\n duplicate_url = response.xpath(self.gt.css_to_xpath('.question-originals-of-duplicate')+'/ul/li/a/@href').extract_first()\n if duplicate_url:\n print('duplicate question')\n self.duplicate_count += 1\n print('duplicate question count: {}'.format(self.duplicate_count))\n duplicate_url = \"https://superuser.com\" + duplicate_url\n print(duplicate_url)\n self.logger.info('duplicate url: {}'.format(duplicate_url))\n question_answer['question_original_url'] = duplicate_url\n self.duplicate_url = duplicate_url\n\n return question_answer", "def parse_answers(dns_resp: str, session_cache):\n\n ID = dns_resp[:4]\n other_flags = dns_resp[4:8]\n questions_count = dns_resp[8:12]\n answers_count = dns_resp[12:16]\n auth_serv_info = dns_resp[16:20]\n additional_info = dns_resp[20:24]\n offset = 0\n ip = \"0.0.0.0\"\n\n # может придти несколько ответов, из каждого вычленим нужные записи\n for i in range(int(answers_count, 16)):\n try:\n ip, offset = DNSHandler.parse_answer(dns_resp, session_cache, offset=offset * i)\n except ValueError:\n print(\"url does not exist\")\n sys.exit(0)\n return ip", "def process_question_answer_page(self, response):\n self.results_page_count += 1\n self.classification_file.write(\"results, {}\\n\".format(response.url))\n logging.info('results: {}'.format(response.url))\n print(\"results: {}\".format(response.url))\n\n # Filters\n if not self.page_contains_answers(response):\n return []\n\n # Process posts\n question_answer_list = []\n question_answer = QuestionAnswer()\n question_answer = self.fill_question(response, question_answer)\n # cycle through answers and build Q/A pairs\n answers = response.xpath(self.gt.css_to_xpath('.answercell .post-text')).extract()\n for answer_number in range(len(answers)):\n question_answer_copy = question_answer.copy()\n question_answer_copy = self.fill_answer(response, question_answer_copy, answer_number)\n question_answer_list.append(question_answer_copy)\n return question_answer_list", "def load_questions_from_web():\n # sending http request\n r = requests.get(http_api_questions)\n \n # loading the content to JSON object\n json_content = json.loads(r.content)\n \n # deleting unnecessary value from the JSON\n del json_content['response_code']\n\n # creating the questions dict\n questions = {}\n\n # questions from the JSON (in list [])\n questions_from_json = json_content['results']\n\n # looping through the questions, convert it to usual formatted question\n for index_question, question in enumerate(questions_from_json):\n\n # correct-answer\n correct_answer = question[\"correct_answer\"]\n\n # list of all answers\n all_answers = [correct_answer] + question[\"incorrect_answers\"]\n\n # shuffling the list [otherwise, the answer will always be in the first place]\n random.shuffle(all_answers)\n\n # finding the new index of the correct-answer\n correct_answer_index = all_answers.index(correct_answer)\n\n # important! - converting from 'XML\\CSS\\HTML' format to 'utf-8'\n fixed_replaced_question = question[\"question\"].replace(\"&#039;\", \"'\").replace(\"&quot;\", \"'\")\n \n # discarding specific question if it still has unwelcomed characters\n if(fixed_replaced_question.find('#') != -1 or fixed_replaced_question.find('|') != -1):\n break\n \n # copying the question to the new dict\n questions[str(index_question)] = {\n \"question\" : fixed_replaced_question,\n \"answers\" : all_answers,\n \"correct\" : str(correct_answer_index + 1)\n }\n \n # returns the questions' dict\n return questions", "def extract_questions_from_text(self, text):\n questions = []\n\n for match in self.QUESTION_RE.finditer(text):\n match_dict = match.groupdict()\n\n answer_type = match_dict['answer_type']\n number1 = match_dict.pop('number1')\n\n if answer_type == 'O':\n if re.search('(?i)to ask the Deputy President', match_dict['intro']):\n match_dict['dp_number'] = number1\n elif re.search('(?i)to ask the President', match_dict['intro']):\n match_dict['president_number'] = number1\n else:\n match_dict['oral_number'] = number1\n elif answer_type == 'W':\n match_dict['written_number'] = number1\n\n match_dict['translated'] = bool(match_dict['translated'])\n match_dict['questionto'] = match_dict['questionto'].replace(':', '')\n match_dict['questionto'] = self.correct_minister_title(match_dict['questionto'])\n\n questions.append(match_dict)\n\n return questions", "def test_parse_hit_details(self):\n for query in self.result:\n first_hsp = self.result[query][0][0]\n self.assertEqual(first_hsp[\"SUBJECT_ID\"], \"gi|148670104|gb|EDL02051.1|\")\n self.assertEqual(\n first_hsp[\"HIT_DEF\"],\n \"insulin-like growth factor 2 receptor, isoform CRA_c [Mus musculus]\",\n )\n self.assertEqual(first_hsp[\"HIT_ACCESSION\"], \"2001\")\n self.assertEqual(first_hsp[\"HIT_LENGTH\"], 707)", "def process():\n question = request.form['question']\n parsed_question = parse(question)\n wiki_extract = get_wiki_extract(parsed_question)\n wiki_url = get_wiki_url(parsed_question)\n address = get_address(parsed_question)\n lat = get_latitude(parsed_question)\n lng = get_longitude(parsed_question)\n return jsonify({'data': [question, address, wiki_extract,\n wiki_url, lat, lng, parsed_question]})", "def _parse_question_json(self, text, params={}):\n\n try:\n data = json.loads(text)\n except ValueError as e:\n Utils.log(traceback.format_exc())\n raise Exception('Could not get content')\n\n output = self.convert_question(data)\n output['body'] = self.markdown(output['body'])\n\n return output", "def parse_response(self):\n pass", "def decode_html(html):\n soup = BeautifulSoup(html, 'xml')\n return {\n 'personality': ProfileApi.get_personality(soup),\n 'ida_years': ProfileApi.get_ida_years(soup),\n 'years': ProfileApi.get_years(soup),\n 'choices': ProfileApi.get_choices(soup)\n }", "def detail(self):\n url = '/question/%d' % self.id\n d = req.get(url)\n return parser.detail(d)", "def parse_article_html(page_resp):\n article_url = page_resp.url\n \n article_page_soup = bs4.BeautifulSoup(page_resp.text, \"lxml\")\n \n title_html = article_page_soup.find_all(\"h1\")[0]\n title_text = title_html.contents[0]\n \n date = article_page_soup.find_all(\"small\", {'class': 'gray'})[0]\n date_text = date.contents[4].replace(\" \", \"\").split(\"\\n\")[3][:10]\n \n article_content = article_page_soup.find_all(\"div\", {'class': 'rich_media_content'})[0]\n article_text = article_content.get_text('\\n')\n is_original = check_if_original(article_content) or '[原创]' in title_text\n \n return {\n 'title': title_text,\n 'date': date_text,\n 'url': article_url,\n 'is_original': is_original,\n 'text': article_text\n \n}", "def parseSearchHtml(self):\n pass", "def parseSearchHtml(self):\n pass", "def parse(content):\r\n soup = BeautifulSoup(content)\r\n submissions = soup.findAll('div')\r\n\r\n submission_data = []\r\n for s in submissions:\r\n t = s.getText()\r\n try:\r\n num, date, score = num_date_pattern.findall(t)[0]\r\n num = s.find('b').getText()\r\n data = json.loads(unescape(s.find('pre').getText()))\r\n d = {'submission_num': num,\r\n 'date': date,\r\n 'score': score,\r\n 'detail': data}\r\n submission_data.append(d)\r\n except Exception, e:\r\n logging.error(\"Error finding num_date_pattern: %s. Text:\\n%s\" % (e, t))\r\n return submission_data", "def parse_get_responses():\n json_data = open('/Users/williamliu/GitHub/surveys/get_responses.json')\n loaded_data = json.load(json_data)\n test = json_normalize(loaded_data['data'])\n\n print type(test)\n print test.head()\n\n # Get first respondent's questions and answers back\n #print loaded_data['data'][1]['questions'][1]['question_id'] # Get respondent's question_id\n #print loaded_data['data'][1]['questions'][1]['answers'] # Get respondent's question_id", "def get_questionnaire(self, url, survey_path):\n pass", "def parse_doctor_detail(self, response):\n\n hxs = HtmlXPathSelector(response)\n\n\n l = XPathItemLoader(CYDoctorItem(), hxs)\n\n\n l.add_xpath('_name', (\"//div[@class='bdHd']/h1/text()\"))\n\n shortdesc = hxs.select(\"//div[@id='mainColumn']//p[@class='bdFt']/text()\").extract()\n if len(shortdesc) == 1:\n shortdescStr = shortdesc[0].strip()\n words = shortdescStr.split()\n if len(words) == 3:\n l.add_value('title', words[0])\n l.add_value('hospital', words[1])\n l.add_value('specialty', words[2])\n else:\n print (\"title/hostpital/special error.\")\n\n\n\n l.add_xpath('specialtyDesc', \"//div[@id='docOtherInfo']/div[@class='infoCell'][1]//p[2]/text()\")\n l.add_xpath('personalInfo', \"//div[@id='docOtherInfo']/div[@class='infoCell'][2]//p[2]/text()\")\n l.add_xpath('stars', \"//p[@class='right starTxt']/text()\")\n\n answer = hxs.select(\"//div[@id='resolvedData']/p[1]/a/text()\").extract()\n if len(answer) == 1:\n answerStr = answer[0].strip().replace(u\"\\xa0\", \"\")\n m = re.match(u\"解答:(?P<answer_cnt>\\d+)\", answerStr)\n if m.groupdict()[\"answer_cnt\"]is not None:\n l.add_value('answers', m.groupdict()[\"answer_cnt\"])\n\n review = hxs.select(\"//div[@id='resolvedData']/p[2]/text()\").extract()\n if len(review) == 1:\n reviewStr = review[0].strip().replace(u\"\\xa0\", \"\")\n m = re.match(u\"评价:(?P<review_cnt>\\d+)\", reviewStr)\n if m.groupdict()[\"review_cnt\"]is not None:\n l.add_value('reviews', m.groupdict()[\"review_cnt\"])\n\n # l.add_xpath('answers', \"//div[@id='resolvedData']/p[1]/a/text()\")\n # l.add_xpath('reviews', \"//div[@id='resolvedData']/p[2]/text()\")\n\n ret = l.load_item()\n print ret\n\n yield ret", "def _preprocess_problem(self, tree): # private\r\n response_id = 1\r\n self.responders = {}\r\n for response in tree.xpath('//' + \"|//\".join(responsetypes.registry.registered_tags())):\r\n response_id_str = self.problem_id + \"_\" + str(response_id)\r\n # create and save ID for this response\r\n response.set('id', response_id_str)\r\n response_id += 1\r\n\r\n answer_id = 1\r\n input_tags = inputtypes.registry.registered_tags()\r\n inputfields = tree.xpath(\r\n \"|\".join(['//' + response.tag + '[@id=$id]//' + x for x in (input_tags + solution_tags)]),\r\n id=response_id_str\r\n )\r\n\r\n # assign one answer_id for each input type or solution type\r\n for entry in inputfields:\r\n entry.attrib['response_id'] = str(response_id)\r\n entry.attrib['answer_id'] = str(answer_id)\r\n entry.attrib['id'] = \"%s_%i_%i\" % (self.problem_id, response_id, answer_id)\r\n answer_id = answer_id + 1\r\n\r\n # instantiate capa Response\r\n responsetype_cls = responsetypes.registry.get_class_for_tag(response.tag)\r\n responder = responsetype_cls(response, inputfields, self.context, self.capa_system)\r\n # save in list in self\r\n self.responders[response] = responder\r\n\r\n # get responder answers (do this only once, since there may be a performance cost,\r\n # eg with externalresponse)\r\n self.responder_answers = {}\r\n for response in self.responders.keys():\r\n try:\r\n self.responder_answers[response] = self.responders[response].get_answers()\r\n except:\r\n log.debug('responder %s failed to properly return get_answers()',\r\n self.responders[response]) # FIXME\r\n raise\r\n\r\n # <solution>...</solution> may not be associated with any specific response; give\r\n # IDs for those separately\r\n # TODO: We should make the namespaces consistent and unique (e.g. %s_problem_%i).\r\n solution_id = 1\r\n for solution in tree.findall('.//solution'):\r\n solution.attrib['id'] = \"%s_solution_%i\" % (self.problem_id, solution_id)\r\n solution_id += 1", "def test_parse_hsp_details(self):\n for query in self.result:\n # should check integers in next version.\n first_hsp = self.result[query][0][0]\n self.assertEqual(first_hsp[\"QUERY ID\"], 1)\n self.assertEqual(first_hsp[\"BIT_SCORE\"], \"1023.46\")\n self.assertEqual(first_hsp[\"SCORE\"], \"2645\")\n self.assertEqual(first_hsp[\"E_VALUE\"], \"0.333\")\n self.assertEqual(first_hsp[\"QUERY_START\"], \"4\")\n self.assertEqual(first_hsp[\"QUERY_END\"], \"18\")\n self.assertEqual(first_hsp[\"QUERY_ALIGN\"], \"ELEPHANTTHISISAHITTIGER\")\n self.assertEqual(first_hsp[\"MIDLINE_ALIGN\"], \"ORCA-WHALE\")\n self.assertEqual(first_hsp[\"SUBJECT_ALIGN\"], \"SEALSTHIS---HIT--GER\")\n self.assertEqual(first_hsp[\"SUBJECT_START\"], \"5\")\n self.assertEqual(first_hsp[\"SUBJECT_END\"], \"19\")\n self.assertEqual(first_hsp[\"PERCENT_IDENTITY\"], \"55\")\n self.assertEqual(first_hsp[\"POSITIVE\"], \"555\")\n self.assertEqual(first_hsp[\"GAP_OPENINGS\"], 0)\n self.assertEqual(first_hsp[\"ALIGNMENT_LENGTH\"], \"14\")\n\n gap_hsp = self.result[query][0][1]\n self.assertEqual(gap_hsp[\"GAP_OPENINGS\"], \"33\")" ]
[ "0.7339163", "0.68039155", "0.66917884", "0.631492", "0.6244135", "0.61681545", "0.6128832", "0.6124948", "0.6037291", "0.59815145", "0.59344417", "0.5837285", "0.5769417", "0.575594", "0.57425815", "0.56890744", "0.56850564", "0.5662544", "0.56593573", "0.56189245", "0.56130636", "0.56031185", "0.5574243", "0.5574243", "0.5527093", "0.54993165", "0.54561144", "0.54460996", "0.54412293", "0.5425203" ]
0.6876753
1
Build url for question list
def build_list_url(self, params={}): url = BASE_URL + 'api/v1/questions/' if params: url += '?' + urllib.parse.urlencode(self.clean_params(params)) return url
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_post_list_url(self):\n return urljoin(self.BASE_URL, self.POST_LIST_URL)", "def url(self):\n return reverse('snippet-list')", "def construct_url(self):\n path = [self.path]\n path.extend([str(x) for x in self.params])\n\n url = self.client.base_url + '/'.join(x for x in path if x)\n query = self.kwargs.get('query')\n\n if query:\n # Dict -> List\n if type(query) is dict:\n query = query.items()\n\n # Remove items with `None` value\n query = [\n (k, v) for (k, v) in query\n if v is not None\n ]\n\n # Encode query, append to URL\n url += '?' + urlencode(query)\n\n return url", "def get_success_url(self):\n url = reverse(\n \"qa:question-create\"\n ) + \"?success=true\"\n return url", "def get_absolute_url(self):\n return reverse('questionSchema-detail', args=[str(self.questionId)])", "def _make_url(self):\n ...", "def _build_url(self, story):\n return u'/api/items/{}/schedule/'.format(story)", "def make_urls(row):\n mapping = {\n 'base': self.course.moodle.base_url,\n 'cmid': row['cmid'],\n 'subid': row['subid'],\n }\n url = self._submission_url.format(**mapping)\n return url", "def build_url(self):\n url = requests.utils.requote_uri(\n self.torrent_page + self.string_search)\n if self.page == '1337x':\n return(url + '/1/')\n elif self.page == 'limetorrents':\n return(url + '/')\n else:\n return(url)", "def _generate_url(self, **kwargs):\n path = self.url_path.format(**kwargs)\n return self.poolbot.generate_url(path)", "def create_url(artist, song, language):\n url = __BASE_URL__ + '/wiki/{artist}:{song}'.format(artist=urlize(artist), song=urlize(song))\n if language:\n url += '/{language}'.format(language=urlize(language).lower())\n return url", "def answer_url(self, obj):\n request = self.context.get(\"request\")\n return reverse(\"answer-detail\", args=[obj.id], request=request)", "def get_question(url):\n return EXTERNAL_URL_QUESTION.format(url)", "def url(self):\n return url_search_posts(self.parameters, url_domain=self.url_domain)", "def answerUrl(question, masteryCache):\r\n return \"/api/mastery/{0}/answer\".format(masteryCache[question.subject.foreign.id].id)", "def get_absolute_url(self):\n return ('listar_parroquia', [self.id, ])", "def exam_url(self, obj):\n request = self.context.get(\"request\")\n return reverse(\"exam-detail\", args=[obj.id], request=request)", "def BLS_QCEW_URL_helper(*, build_url, year, **_):\n urls = []\n\n url = build_url\n url = url.replace('__year__', str(year))\n urls.append(url)\n\n return urls", "def make_url(self, artist, song):\n url = \"http://www.azlyrics.com/lyrics/{}/{}.html\".format(artist, song)\n return url", "def build_details_url(self, params={}):\n\n if 'url' in params:\n url = params['url']\n url += '?page=' + str(int(params['page'])) + '&sort=' + str(params['sort'])\n return url", "def create_query_url(self):\n self.__log('Starting to create the query URL.')\n query_url = self.config['API_URI']\n for key, value in self.options.items():\n if value:\n if query_url == self.config['API_URI']:\n query_url = query_url + str(key) + \"=\" + str(value)\n else:\n query_url = query_url + \"&\" + str(key) + \"=\" + str(value)\n query_url = query_url.replace(' ', '%20')\n self.__log(f'Done creating query url. URL to query: \"{query_url}\"')\n return query_url", "def gen_url(section):\n urls = []\n urls.append('https://ia800500.us.archive.org/22/items/stackexchange/' + section + '.stackexchange.com.7z')\n urls.append('https://ia800500.us.archive.org/22/items/stackexchange/' + section + '.7z')\n return urls", "def Url(self) -> str:", "def urlGenerator(self):\n \n # VERMONT #\n baseurl = 'https://www.vermontjudiciary.org'\n path = '/opinions-decisions'\n # from date\n param1 = 'facet_from_date=01/01'\n # to date\n param2 = 'facet_to_date=01/01/'\n # division\n param3 = 'f%5B0%5D=court_division_opinions_library%3A'\n # search by text\n param4 = 'search_api_fulltext='\n # page\n param5 = 'page='\n # generate list of URL\n listURL = []\n \n # list of divisions\n vt_court_division = {\"civil\": \"1\", \"supreme court\": \"7\", \"environmental\": \"3\", \"family\": \"4\", \"criminal\": \"2\"}\n # inputs\n from_year = 2000\n to_year = 2017\n endPages = 75 #0-74\n startPages = 0\n # make change to pull data from different division by changing division name below to any of the division in vt_court_vivision dict\n division = vt_court_division[\"environmental\"]\n # url generating\n for i in range(startPages, endPages):\n build_url = baseurl + path + '?' + param1 + str(from_year) + \"&\" + param2 + str(to_year) + \"&\" + param3 + division + param4 + \"&\" + param5 + str(i) + \"\"\n # append url to listUrl\n listURL.append(build_url)\n i += 1\n \n # return full list of URLs\n return listURL", "def generate_problem_url(problem_url_parts, base_course_url):\r\n problem_url = base_course_url + \"/\"\r\n for i, part in enumerate(problem_url_parts):\r\n if part is not None:\r\n # This is the course_key. We need to turn it into its deprecated\r\n # form.\r\n if i == 0:\r\n part = part.to_deprecated_string()\r\n # This is placed between the course id and the rest of the url.\r\n if i == 1:\r\n problem_url += \"courseware/\"\r\n problem_url += part + \"/\"\r\n return problem_url", "def _get_url(self, category):\n query = []\n for key,value in self._params.iteritems():\n query.append(\"{key}={value}\".format(key=key,value=value))\n return \"{base}/{category}?{query}\".format(base = self._base_url, category = category, query = \"&\".join(query))", "def gen_url(self):\n self.url = \"https://www.cubedb.net/?rank=3&title={}&time={}&scramble=\".format(self.name_of_solve, self.time_solve)\n for move in self.scramble.split():\n if \"\\'\" in move:\n move.replace(\"\\'\", \"-\")\n self.url += \"{}_\".format(move)\n self.url += \"&alg=\"\n count = 0\n for move in self.solve_stats:\n if self.comms_unparsed_bool:\n if self.comms_unparsed_bool:\n if move[\"comment\"] != \"\":\n if \"mistake\" in move[\"comment\"]:\n move[\"comment\"] = \"{}%0A//{}\".format(move[\"comment\"].split(\"mistake\")[0],\n \"mistake from here\")\n if \"#\" in move[\"comment\"]:\n piece = move[\"comment\"].split(\"#\")[0]\n move[\"comment\"] = move[\"comment\"].split(\"#\")[1]\n if self.url.rfind(\"%0A\") != -1:\n alg = self.url[self.url.rfind(\"%0A\") + 3:]\n self.url = self.url[:self.url.rfind(\"%0A\") + 3] + \"%0A//{}%0A\".format(piece) + alg\n else:\n alg = self.url[self.url.rfind(\"=\") + 1:]\n self.url = self.url[:self.url.rfind(\"=\") + 1] + \"%0A//{}%0A\".format(piece) + alg\n self.url += self.comms_unparsed[count]\n count += 1\n self.url += \"// {} %0A\".format(move[\"comment\"])\n\n\n else:\n if \"move\" in move:\n if move[\"move\"] != \"\":\n if \"\\'\" in move[\"move\"]:\n move[\"move\"].replace(\"\\'\", \"-\")\n self.url += \"{}_\".format(move[\"move\"])\n if move[\"comment\"] != \"\":\n if \"mistake\" in move[\"comment\"]:\n move[\"comment\"] = \"{}%0A//{}\".format(move[\"comment\"].split(\"mistake\")[0], \"mistake from here\")\n if \"#\" in move[\"comment\"]:\n piece = move[\"comment\"].split(\"#\")[0]\n move[\"comment\"] = move[\"comment\"].split(\"#\")[1]\n if self.url.rfind(\"%0A\") != -1:\n alg = self.url[self.url.rfind(\"%0A\") + 3:]\n self.url = self.url[:self.url.rfind(\"%0A\") + 3] + \"//{}%0A\".format(piece) + alg\n else:\n alg = self.url[self.url.rfind(\"=\") + 1:]\n self.url = self.url[:self.url.rfind(\"=\") + 1] + \"//{}%0A\".format(piece) + alg\n\n self.url += \"// {} %0A\".format(move[\"comment\"])\n else:\n self.url += \"// {} %0A\".format(move[\"comment\"])", "def url(self):\r\n course_key = \"slashes:{course_org}+{course_num}+{course_run}\".format(**self.course_info)\r\n return \"/\".join([BASE_URL, self.url_path, course_key])", "def get_success_url(self):\n return reverse('post-detail',\n kwargs={'pk': self.get_object().question.pk,\n 'title': self.get_object().question.title})", "def _build_url(self):\n u = urlparse.urljoin(settings.SITE_URL, '/#/')\n\n m = self.object.__class__.__name__\n\n if m == 'Workspace':\n return urlparse.urljoin(\n u, 'workspaces/w/{}'.format(self.object.slug)\n )\n elif m == 'Vault':\n return urlparse.urljoin(\n u, 'workspaces/w/{}/vaults/v/{}'.format(\n self.object.workspace.slug, self.object.slug))\n elif m == 'Card':\n return urlparse.urljoin(\n u, '/workspaces/w/{}/vaults/v/{}/cards/c/{}'.format(\n self.object.vault.workspace.slug, self.object.vault.slug,\n self.object.slug))\n\n return None" ]
[ "0.6373155", "0.63654226", "0.6280937", "0.6246357", "0.6218233", "0.6205551", "0.6102574", "0.60705477", "0.60506797", "0.6043795", "0.60000736", "0.5996932", "0.59536606", "0.5917964", "0.5904006", "0.58995205", "0.5893151", "0.5887981", "0.58844286", "0.5881763", "0.58490604", "0.58230317", "0.5822308", "0.58116126", "0.58105993", "0.5800152", "0.5792437", "0.5773718", "0.5722669", "0.5711519" ]
0.8045995
0
Build url for question page details, including paging, sorting
def build_details_url(self, params={}): if 'url' in params: url = params['url'] url += '?page=' + str(int(params['page'])) + '&sort=' + str(params['sort']) return url
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_url(self, page_number=1):\n return ''.join(\n ['http://a.qidian.com/', '?size=', str(self.size), '&sign=', str(self.sign), '&tag=', str(self.tag),\n '&chanId=', str(self.chanId), '&subCateId=', str(self.subCateId), '&orderId=', str(self.orderId),\n '&update=', str(self.update), '&page=', str(page_number), '&month=', str(self.month), '&style=',\n str(self.style), '&action=', str(self.action), '&vip=', str(self.vip)])", "def build_list_url(self, params={}):\n\n url = BASE_URL + 'api/v1/questions/'\n if params:\n url += '?' + urllib.parse.urlencode(self.clean_params(params))\n\n return url", "def page_url(self):\n url = '/plaque/%s' % self.key.urlsafe()\n return url", "def build_url(self):\n url = requests.utils.requote_uri(\n self.torrent_page + self.string_search)\n if self.page == '1337x':\n return(url + '/1/')\n elif self.page == 'limetorrents':\n return(url + '/')\n else:\n return(url)", "def get_absolute_url(self):\n return reverse('questionSchema-detail', args=[str(self.questionId)])", "def _prepare_url(self, paging=False):\n headers = {\"Content-Type\": \"application/json\"}\n fmt = \"%s&access_token=%s\" % (self.URL_FORMAT, self._access_token)\n if not paging:\n self.paging_url = None\n self.url = fmt.format(self.freshest - 2,\n self.current_query,\n self.limit())\n else:\n self.paging_url = \"%s&until=%d\" % (self.url, self.prev_stalest)\n\n return headers", "def _get_page_url(self, page_num):\n\n # for city comes with 2 words, replace the space with -\n # e.g. 'new york' -> 'new-york'\n city = self._city.lower().replace(' ', '-')\n state = self._state.lower().replace(' ', '-')\n page = f'{self._overhead}/{state}/{city}/apartments_condos_houses_townhouses?page={page_num}'\n return page", "def exam_url(self, obj):\n request = self.context.get(\"request\")\n return reverse(\"exam-detail\", args=[obj.id], request=request)", "def get_absolute_url(self):\n\n url = reverse('comicsite.views.page', args=[self.comicsite.short_name,self.title])\n return url", "def url_for_page(pager, page_num):\n filters = request.view_args.get('filters', '')\n filters = RequestListing.parse_filter(filters)\n filters['page'] = page_num\n return url_for(request.endpoint,\n filters=RequestListing.unparse_filter(filters))", "def build_review_url(self, cipherid, offset=0, limit=20, sort='helpful'):\n params = {\n 'offset': offset,\n 'limit': limit,\n 'sort': sort\n }\n query = urllib.urlencode(params)\n return 'https://www.beautylish.com/rest/reviews/p-{cipherid}?{query}'.format(cipherid=cipherid, query=query)", "def get_submission_URL(self):\n URL = reverse('comicsite.views.site', args=[self.short_name])\n if self.submission_page_name:\n if self.submission_page_name.startswith(\"http://\") or self.submission_page_name.startswith(\"https://\"):\n # the url in the submission page box is a full url\n return self.submission_page_name\n else:\n page = self.submission_page_name\n if not page.endswith(\"/\"):\n page += \"/\"\n URL += page\n return URL", "def get_question(url):\n return EXTERNAL_URL_QUESTION.format(url)", "def get_related_info_page_url(self, kind):\n if self.external_url:\n return \"\"\n elif self == default_entity:\n return \"/%s/\" % kind\n else:\n return \"/%s/%s/\" % (kind, self.slug)", "def create_query_url(self):\n self.__log('Starting to create the query URL.')\n query_url = self.config['API_URI']\n for key, value in self.options.items():\n if value:\n if query_url == self.config['API_URI']:\n query_url = query_url + str(key) + \"=\" + str(value)\n else:\n query_url = query_url + \"&\" + str(key) + \"=\" + str(value)\n query_url = query_url.replace(' ', '%20')\n self.__log(f'Done creating query url. URL to query: \"{query_url}\"')\n return query_url", "def _get_url(self, category):\n query = []\n for key,value in self._params.iteritems():\n query.append(\"{key}={value}\".format(key=key,value=value))\n return \"{base}/{category}?{query}\".format(base = self._base_url, category = category, query = \"&\".join(query))", "def url(self):\n self._current_page += 1\n return URL_TPL.format(self._uid, self._current_page)", "def construct_url(self):\n path = [self.path]\n path.extend([str(x) for x in self.params])\n\n url = self.client.base_url + '/'.join(x for x in path if x)\n query = self.kwargs.get('query')\n\n if query:\n # Dict -> List\n if type(query) is dict:\n query = query.items()\n\n # Remove items with `None` value\n query = [\n (k, v) for (k, v) in query\n if v is not None\n ]\n\n # Encode query, append to URL\n url += '?' + urlencode(query)\n\n return url", "def _build_url_exact(self, q: str, **kwargs: Dict) -> str:\n url = f\"{self._URL}?where=\"\n if kwargs.get('doi'):\n input_doi = kwargs.get('doi')\n url += f'''{{\"doi\":\"{input_doi}\"}}'''\n return url", "def next_url(self):\n if self.has_next:\n kwargs = g.request_args.copy()\n kwargs.update(request.view_args.copy())\n kwargs['offset'] = self.offset + self.limit\n kwargs['limit'] = self.limit\n return url_for(request.endpoint, **kwargs)", "def query_url(text_query, page_num=1, output_format=None):\n\t\n\turl = app.config['HOST_NAME']+'/'\n\tif output_format is not None:\n\t\turl += output_format\n\tif text_query is not None:\n\t\turl += '?q=' + urllib.quote(text_query.encode('utf8'))\n\t\tif page_num != 1:\n\t\t\turl += '&page=%d' % page_num\n\treturn url", "def create_guardian_search_url(api_key, query, page, from_date, to_date):\n\n # format base url\n url = '%s?page-size=%s&show-fields=%s&q=%s&page=%s&api-key=%s' % (\n GUARDIAN_SEARCH_API, PAGE_SIZE, SHOW_FIELDS, query, page, api_key\n )\n\n # add from-date query, if exists\n if (from_date):\n url += '&from-date=%s' % (from_date)\n\n # add to-date query, if exists\n if (to_date):\n url += '&to-date=%s' % (to_date)\n\n return url", "def get_success_url(self):\n url = reverse(\n \"qa:question-create\"\n ) + \"?success=true\"\n return url", "def get_absolute_url(self):\r\n return \"{0}page1/\".format(self.get_short_url())", "def title_page_url(self):\n url = '/plaque/%s' % self.title_url\n return url", "def get_success_url(self):\n return reverse('post-detail',\n kwargs={'pk': self.get_object().question.pk,\n 'title': self.get_object().question.title})", "def get_absolute_url(self):\n return ('listar_parroquia', [self.id, ])", "def answer_url(self, obj):\n request = self.context.get(\"request\")\n return reverse(\"answer-detail\", args=[obj.id], request=request)", "def url(self):\n return url_search_posts(self.parameters, url_domain=self.url_domain)", "def _other_page_querystring(self, page_number):\n if self.paginator.request:\n self.base_queryset['page'] = page_number\n return self.base_queryset.urlencode()\n\n # raise Warning(\"You must supply Paginator() with the request object for a proper querystring.\")\n return 'page=%s' % page_number" ]
[ "0.6673839", "0.65708864", "0.6440531", "0.6379734", "0.62787867", "0.6092793", "0.6091528", "0.6076074", "0.60007733", "0.5961712", "0.59066933", "0.5900967", "0.58840996", "0.58442384", "0.58360463", "0.5823607", "0.5817216", "0.5812188", "0.58041906", "0.5783659", "0.57749057", "0.57633597", "0.5755184", "0.574328", "0.57229173", "0.57187814", "0.5718208", "0.57015747", "0.56962734", "0.568511" ]
0.7364159
0
Return Gravatar with maximum size
def get_gravatar(self, source, size=100): return re.sub('s=(\d+)', 's=' + str(size), self.get_link(source))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def avatar(self, size=200, d=\"robohash\"):\n\t\tdigest = md5(self.email.lower().encode('utf-8')).hexdigest()\n\t\tg = \"https://gravatar.com/avatar/{}?d={}&s={}\".format(digest, d, size)\n\t\treturn g", "def gravatar_url(email, size=80):\n return 'http://www.gravatar.com/avatar/%s?d=identicon&s=%d' % \\\n (md5(email.strip().lower().encode('utf-8')).hexdigest(), size)", "def gravatar_url(email, size=80):\n return 'http://www.gravatar.com/avatar/%s?d=identicon&s=%d' % \\\n (md5(email.strip().lower().encode('utf-8')).hexdigest(), size)", "def gravatar_url(email, size=80):\n return 'https://www.gravatar.com/avatar/%s?d=identicon&s=%d' % \\\n (md5(email.strip().lower().encode('utf-8')).hexdigest(), size)", "def get_avatar(self, size):\n\n digest = md5(self.email.encode('utf-8')).hexdigest()\n return 'https://www.gravatar.com/avatar/{}?d=identicon&s={}'.format(\n digest, size\n )", "def gravatar(email):\r\n hashed_email = md5.new(email).hexdigest()\r\n return mark_safe(GRAVATAR_URL % {\r\n 'hash': hashed_email,\r\n 'rating': GRAVATAR_MAX_RATING, \r\n 'size': GRAVATAR_SIZE,\r\n 'default': urllib.quote_plus(GRAVATAR_DEFAULT_IMG),\r\n })", "def avatar(self, size):\n digest = md5(str(self.email).encode('utf-8')).hexdigest()\n return 'https://www.gravatar.com/avatar/{}?d=identicon&s={}'.format(\n digest, size)", "def gravatar(email, default='%simg/unknown.png' % (settings.MEDIA_URL),\n size=175, rating='pg'):\n\n return 'http://www.gravatar.com/avatar/%s?%s' % (\n hashlib.md5(email.lower()).hexdigest(),\n urllib.urlencode({'d': absolutify(default),\n 's': str(size),\n 'r': rating}))", "def gravatar_queryset(queryset):\n\tdefault = ''\n\tsize = 48\n\tfor loop_item in queryset:\n\t\turl = \"http://www.gravatar.com/avatar.php?%s\" % urllib.urlencode({\n\t\t\t'gravatar_id': hashlib.md5(loop_item.user.email).hexdigest(),\n\t\t\t'default': default,\n\t\t\t'size': str(size)\n\t\t})\n\t\tloop_item.user.gravatar = url", "def avatar_url(self):\n return gravatar_for_email(self.user.email, size=40)", "def gravatar(context, user, size=None):\n url = get_gravatar_url(context['request'], user, size)\n\n if url:\n return format_html(\n '<img src=\"{0}\" width=\"{1}\" height=\"{1}\" alt=\"{2}\" '\n 'class=\"gravatar\"/>',\n url, size, user.get_full_name() or user.username)\n else:\n return ''", "def player_avatar(player_obj):\n avatar = \"http://www.gravatar.com/avatar/%s.jpg?d=monsterid\" % md5(player_obj.user.email).hexdigest()\n\n return avatar", "def avatar(self) -> str:\n\t\tif self.hasAvatar:\n\t\t\treturn f'https://avatars.atelier801.com/{self.id % 10000}/{self.id}.jpg'\n\n\t\t# default avatar\n\t\treturn 'https://avatars.atelier801.com/0/0.jpg'", "def avatar(self):\n admin_user = User.objects.get(pk=1)\n email = self.original_author\n\n if self.author != admin_user:\n email = self.author.email\n import hashlib\n value = hashlib.md5(email)\n\n return 'http://www.gravatar.com/avatar/%s' % value.hexdigest() + '?s=200'", "def gravatar_url(context, email, size=None):\n return get_gravatar_url_for_email(context['request'], email, size)", "def _get_avatar_url(user: Member) -> str:\n # if user.avatar.startswith(\"a\"):\n # url = user.avatar_url_as(format=\"gif\")\n # else:\n url = user.avatar_url_as(format=\"png\")\n\n return url.split(\"?\")[0] # we really don't care about the size, chop it off", "def get_avatars():\n\n error_on_unauthorized()\n\n media = Avatar.query.order_by(Avatar.id)\n total_num = media.count()\n\n if total_num == 0:\n return jsonify(total=0, uploads=[])\n\n try:\n count = int(request.args.get('max', total_num))\n page = int(request.args.get('page', 1))\n\n if count <= 0 or page <= 0:\n raise APIError(422, \"Query parameters out of range\")\n\n begin = (page - 1) * count\n end = min(begin + count, total_num)\n\n return jsonify(total=total_num, uploads=[avatar_to_dict(a) for a in media.all()[begin:end]]), 200\n except ValueError:\n raise APIError(422, \"Invalid query parameter\")", "def _select_largest_photo(self, sizes):\n\n max_size = 0\n photo = ''\n for size in sizes:\n w = size['width']\n h = size['height']\n if w * h >= max_size:\n max_size = w * h\n photo = size['url']\n return photo", "def get_possible_thumbnail(self):\n meta = self.get_meta_data()\n print meta\n if \"og:image\" in meta:\n return meta[\"og:image\"]\n elif \"twitter:image:src\" in meta:\n return meta[\"twitter:image:src\"]\n else:\n images = self.get_image_data()\n temp_url = \"\"\n temp_width = 0\n for img in images:\n if img[\"image_width\"] > temp_width:\n temp_url = img[\"image_url\"]\n temp_width = img[\"image_width\"]\n\n return temp_url", "def generate_gravatar(username, size=128):\n hashed_text = md5(username.encode('utf-8')).hexdigest()\n return \"https://secure.gravatar.com/avatar/%s?s=%s&d=identicon&r=pg\" % (\n hashed_text, size\n )", "def large_image(self) -> Optional[str]:\n return pulumi.get(self, \"large_image\")", "def resize_profile_pic(sender, instance, **kwargs):\n profile_pic = instance.profile_picture\n if profile_pic.name != \"default.png\":\n img = Image.open(profile_pic.path)\n if img.height > 300 or img.width > 300:\n output_size = (300, 300)\n img.thumbnail(output_size)\n img.save(profile_pic.path)", "def get_thumbnail_size(self, thumbnail_name, forced=False):", "def get_large_from_thumbnail(url):\n a = annotate(url)\n if not a:\n return\n r = return_large(a)\n\n # If there are no large equivalents, return None\n if not r:\n return None\n return r", "def GetThumbnail(self, type, maxsize): # real signature unknown; restored from __doc__\n pass", "def max_scanned_images(self):\n return self._max_scanned_images", "def get_available_avatars(self):\n # TODO: Assuming first server is good - need to make fallback logic\n return self.session.get_any(\"{base}{request_url}\".format(base=self.servers[0],\n request_url=F\"/GroupV2/GetAvailableAvatars/\"))", "def get_media(self, max_id):\r\n url = 'https://instagram.com/' + self.username + '/media'\r\n\r\n if max_id is not None:\r\n url += '?&max_id=' + max_id\r\n resp = requests.get(url)\r\n\r\n if resp.status_code == 200:\r\n media = json.loads(resp.text)\r\n\r\n if not media['items']:\r\n raise ValueError('User %s is private' % self.username)\r\n\r\n return media\r\n else:\r\n raise ValueError('User %s does not exist' % self.username)", "def avatar(user):\n\n if user_service.user_has_avatar(user.id):\n return url_for('user.view_avatar', user_id=user.id)\n\n # Set default values gravatar\n default = 'identicon'\n size = 100\n email = user.email or ''\n\n # Construct the url\n gravatar_url = 'https://www.gravatar.com/avatar/' + \\\n hashlib.md5(\n email.lower().encode('utf-8')).hexdigest() + '?'\n gravatar_url += urllib.parse.urlencode({'d': default, 's': str(size)})\n return gravatar_url", "def avatar_url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"avatar_url\")" ]
[ "0.6543706", "0.652166", "0.652166", "0.64982855", "0.6495083", "0.6452975", "0.6447085", "0.62740374", "0.622244", "0.6142005", "0.6137685", "0.59658086", "0.5932887", "0.59247464", "0.5919478", "0.5854818", "0.5753524", "0.5750543", "0.57245594", "0.5715687", "0.55389243", "0.5534462", "0.5533553", "0.54731596", "0.5418651", "0.5411816", "0.5398526", "0.5395296", "0.5369825", "0.53681475" ]
0.708055
0
Convert html to DOM for process images, links... Also return string processed
def convert_content(self, html): try: dom = BeautifulSoup(html, 'html.parser') return self.parse_content(dom) except: return html
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_html():\n return", "def promed_html_to_formatted_text(html):\n # This is to fix some cases in malformed html where <s aren't esacaped.\n # >s can be parsed without escaping.\n normed_html = html.\\\n replace(\"<<\", \"&lt;<\").\\\n replace(\"<http\", \"&lt;http\").\\\n replace(\"< \", \"&lt; \")\n return dom_tree_to_formatted_text(BeautifulSoup(normed_html))", "def scrubHTML( html ):\n parser = StrippingParser()\n parser.feed( html )\n parser.close()\n return parser.result", "def parse_html(html):\n parser = lxml.html.HTMLParser(encoding='utf8')\n return lxml.html.fromstring(html.encode('utf8'), parser=parser)", "def parsed_html():\n return utils.parse_html(\n \"\"\"\n <!doctype hmtl>\n <html>\n <head>\n <meta charset=\"utf-8\">\n <meta name=\"viewport\" content=\"width=device-width\">\n <title>Page title</title>\n <link rel=\"stylesheet\" href=\"/static/styles.css\" />\n </head>\n <body>\n <h1>Django Auto AMP</h1>\n <p>Generate automatic AMP from your Django templates</p>\n <img src=\"/static/img.jpg\" width=\"500\" height=\"300\" />\n <img src=\"/static/img.gif\" layout=\"nodisplay\" />\n <img src=\"/static/img.png\" />\n <script type=\"text/javascript\" src=\"/static/scripts.js\" />\n <script type=\"application/json\" src=\"/static/data.json\" />\n </body>\n </html>\n \"\"\"\n )", "def soupify(html):\n return BeautifulSoup(html, \"html.parser\")", "def unhtml(cls, text):\n parser = cls()\n parser.feed(text)\n return parser", "def remove_html( html):\n return html2txt(html)", "def cleaned_html(self):\n cleaner = Cleaner()\n cleaner.scripts = True\n cleaner.javascript = True\n cleaner.comments = True\n cleaner.style = True\n self.dom = cleaner.clean_html(self.dom)\n assert self.dom, 'The html needs to be parsed to get the cleaned html'\n return lxml.html.tostring(self.dom)", "def strip_html(inputString):\r\n return BeautifulSoup(inputString, \"html.parser\").text", "def clean_html(input):\n p = HTMLParser(tree=treebuilders.getTreeBuilder(\"dom\"))\n dom_tree = p.parseFragment(input)\n walker = treewalkers.getTreeWalker(\"dom\")\n stream = walker(dom_tree)\n\n s = HTMLSerializer(omit_optional_tags=False)\n return \"\".join(s.serialize(stream))", "def _html_text(self, html):\n ee = None\n try: return html.html_text()\n except Exception, e: ee = e; pass\n try: return html.xml_text()\n except Exception, e: print \"HtmlDocument/text\", ee, e; pass\n try: return str(html)\n except Exception, e: print \"HtmlDocument/text\", e; return \"&nbsp;\"", "def parse_source(html, encoding='utf-8'):\n return BeautifulSoup(html, from_encoding=encoding)", "def html_to_text(html, target_elements=None):\n soup = BeautifulSoup(html)\n\n for script in soup(\n [\"script\", \"style\"]\n ): # remove all javascript and stylesheet code\n script.extract()\n\n targets = []\n\n if target_elements:\n targets = soup.find_all(target_elements)\n\n if target_elements and len(targets) > 3:\n text = \" \".join([t.text for t in targets])\n else:\n text = soup.get_text()\n\n # break into lines and remove leading and trailing space on each\n lines = (line.strip() for line in text.splitlines())\n # break multi-headlines into a line each\n chunks = (phrase.strip() for line in lines for phrase in line.split(\" \"))\n # drop blank lines\n text = \"\\n\".join(chunk for chunk in chunks if chunk)\n return text", "def safeHTML(s):\n parser = StrippingParser()\n parser.feed(s)\n parser.close()\n parser.cleanup()\n return parser.result", "def html_to_text(html):\n html_parser = 'html5lib'\n soup = BeautifulSoup(html, html_parser)\n pretty_html = soup.prettify()\n pretty_soup = BeautifulSoup(pretty_html, html_parser)\n text = pretty_soup.get_text()\n lines = [s for s in text.splitlines() if not re.search(r'^\\s*$', s)]\n return os.linesep.join(lines)", "def html_to_text(html):\n s = TextExtractorHTMLParser()\n s.feed(html)\n return s.get_text()", "def convert_rhtml(html):\n try:\n return HTML(html=html)\n except Exception as e: # pragma: no cover\n raise SoupError(str(e))", "def convert_html_to_text(html_str: str, ignore_tags: None = None) -> str:\n if not html_str:\n return \"\"\n if html_parser is None:\n return strip_tags(html_str)\n\n parser = HTMLParser(encoding=\"utf-8\")\n root = html_parser.fromstring(html_str.encode(\"utf-8\"), parser=parser)\n try:\n body = root.xpath(\"./body\")[0]\n except IndexError:\n # No body element\n body = root\n\n for tag in HTML_GARBAGE:\n els = body.xpath(\".//\" + tag)\n for el in els:\n el.getparent().remove(el)\n\n convert_element(body, ignore_tags=ignore_tags)\n\n text = html_parser.tostring(\n body, pretty_print=True, method=\"text\", encoding=\"utf-8\"\n ).decode(\"utf-8\")\n\n return \"\\n\".join(x.strip() for x in text.splitlines()).strip()", "def process_doc_html(self, doc_in):\n self.feed(doc_in) #SGMLParser call\n self.close() #SGMLParser call\n self.hand_off_temp_pieces('to_doc_pieces')\n self.all_pieces = self.all_pieces[:-16] # drop </body></html>\n return self.all_pieces", "def sanitize_html(input):\n p = HTMLParser(tokenizer=HTMLSanitizer, tree=treebuilders.getTreeBuilder(\"dom\"))\n dom_tree = p.parseFragment(input)\n walker = treewalkers.getTreeWalker(\"dom\")\n stream = walker(dom_tree)\n\n s = HTMLSerializer(omit_optional_tags=False)\n return \"\".join(s.serialize(stream))", "def normalised_html(html):\n soup = bs4.BeautifulSoup(html)\n root = soup.find(attrs='refentry')\n\n # Format function signature\n synopsis = root.find(attrs='methodsynopsis')\n pre = soup.new_tag('pre')\n pre.append(re.sub('\\s+', ' ', synopsis.get_text().strip()))\n synopsis.replace_with(pre)\n\n # Remove unwanted information\n changelog = root.find(attrs='changelog')\n if changelog: changelog.decompose()\n\n # Remove misused/unnecessary <blockquote>s\n for tag in root.find_all('blockquote'):\n tag.unwrap()\n\n # Convert <h3> => <h2>\n for h3 in root.find_all('h3'):\n h2 = soup.new_tag('h2')\n h2.append(h3.get_text().strip())\n h3.replace_with(h2)\n\n # Unwrap decorated <code> elements. Markdown looks a bit noisy when\n # different formatting elements are combined (e.g. **`foo`**)\n for code in root.find_all('code'):\n if code.parent.name in ('em', 'strong'):\n code.parent.unwrap()\n\n # Convert block <code> => <pre>\n for code in [div.find('code') for div in root.find_all('div', 'phpcode')]:\n for br in code.find_all('br'):\n br.replace_with('\\n')\n pre = soup.new_tag('pre')\n pre.append(code.get_text().strip())\n code.replace_with(pre)\n\n return unicode(root)", "def unhtmlify(html):\n return unescape(re.sub(r'<.*?>', '', html))", "def html(input):\n output=atpic.cleaner_alex.clean(input)\n return output", "def parse(html, encoding='utf-8'):\n if isinstance(html, unicode):\n return bs4.BeautifulSoup(html, 'html.parser')\n\n return bs4.BeautifulSoup(html, 'html.parser', from_encoding=encoding)", "def normalize_html(html):\n # Replace many whitespace characters with a single space in some elements\n # kind of like a browser does.\n soup = BeautifulSoup(html, 'lxml')\n for e in soup.select(':not(script,pre,code,style)'):\n for part in e:\n if isinstance(part, NavigableString):\n crunched = NavigableString(re.sub(r'\\s+', ' ', part))\n if crunched != part:\n part.replace_with(crunched)\n # Asciidoctor adds a \"content\" wrapper. It doesn't really change the layout\n # so we're ok with it.\n for e in soup.select('#content'):\n e.unwrap()\n # Docbook adds <span class=\"emphasis\"> around <em> tags. We don't need them\n # and it isn't worth making Asciidoctor make them.\n for e in soup.select('.emphasis'):\n e.unwrap()\n # Asciidoctor adds a \"ulist\" class to all unordered lists which doesn't\n # hurt anything so we can ignore it.\n for e in soup.select('.itemizedlist.ulist'):\n e['class'].remove('ulist')\n # Docbook adds type=\"disc\" to ul which is the default and isn't needed.\n for e in soup.select('ul'):\n if 'type' in e.attrs and e['type'] == 'disc':\n del e['type']\n # Asciidoctor adds a \"olist\" class to all ordered lists which doesn't\n # hurt anything so we can ignore it.\n for e in soup.select('.orderedlist.olist'):\n e['class'].remove('olist')\n # Docbook adds type=\"1\" to ol which is the default and isn't needed.\n for e in soup.select('ol'):\n if 'type' in e.attrs and e['type'] == '1':\n del e['type']\n # Docbook emits images with the 'inlinemediaobject' class and Asciidoctor\n # has the 'image' class. We've updated our styles to make both work.\n for e in soup.select('.inlinemediaobject'):\n e['class'].remove('inlinemediaobject')\n e['class'].append('image')\n # Docbook links with `<a class=\"link\"` when linking from one page of a book\n # to another. Asciidoctor emits `<a class=\"link\"`. Both look fine.\n for e in soup.select('a.xref'):\n if '.html#' in e['href']:\n e['class'].remove('xref')\n e['class'].append('link')\n # Format the html with indentation so we can *see* things\n html = soup.prettify()\n # docbook spits out the long-form charset and asciidoctor spits out the\n # short form but they are equivalent\n html = html.replace(\n '<meta content=\"text/html; charset=utf-8\" http-equiv=\"Content-Type\"/>',\n '<meta charset=\"utf-8\"/>')\n return html", "def getHTML(url): \n return urlopen(url)", "def prettyformat(self):\n \n import re\n\n html = self.get_htmlsrc()\n if type(html) == type([]):\n html = html[0]\n if type(html) != type(\"\"):\n try:\n html = str(html)\n except:\n html = html.__str__()\n \n tmp = BeautifulSoup(html)\n base = self.target_baseurl()\n# aitems = tmp.findAll(\"a\",href=re.compile(\"^\\/\"))\n aitems = tmp.findAll(\"a\",href=re.compile(\"^[^hH]\"))\n for i in aitems:\n u = i['href']\n if u[0] != '/':\n i['href'] = base + '/' + u\n else: \n i['href'] = base + u\n# imgitems = tmp.findAll(\"img\",src=re.compile(\"^\\/\"))\n imgitems = tmp.findAll(\"img\",src=re.compile(\"^[^hH]\"))\n for j in imgitems:\n v = j['src']\n if v[0] != '/':\n j['src'] = base + '/' + v\n else: \n j['src'] = base + v\n return tmp", "def source_to_soup(page_source):\n\tpage_source = re.sub('<br>', '', page_source)\n\tpage_source = re.sub('<br/', '', page_source)\n\tpage_source = re.sub('<br />', '', page_source)\n\treturn BeautifulSoup(page_source, 'html.parser', parse_only=SoupStrainer('div'))", "def from_html(self, content):\r\n pass" ]
[ "0.7239839", "0.659467", "0.657226", "0.656696", "0.6490043", "0.6425265", "0.6414505", "0.64094734", "0.6365156", "0.63646567", "0.63585263", "0.63093334", "0.6302106", "0.623015", "0.62219685", "0.6208928", "0.6206838", "0.6191704", "0.6181132", "0.61616075", "0.61527205", "0.6127179", "0.61224735", "0.61208016", "0.61059564", "0.60995173", "0.6086776", "0.60594845", "0.6054409", "0.60337394" ]
0.6950768
1
Parse questions from API response
def _parse_questions(self, text, params): try: data = json.loads(text) except ValueError as e: Utils.log(traceback.format_exc()) raise Exception('Could not get content') output = {} output['count'] = data['count'] output['pages'] = data['pages'] output['page'] = params['page'] if 'page' in params else 1 output['questions'] = [] for q in data['questions']: output['questions'].append(self.convert_question(q)) return output
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_get_responses():\n json_data = open('/Users/williamliu/GitHub/surveys/get_responses.json')\n loaded_data = json.load(json_data)\n test = json_normalize(loaded_data['data'])\n\n print type(test)\n print test.head()\n\n # Get first respondent's questions and answers back\n #print loaded_data['data'][1]['questions'][1]['question_id'] # Get respondent's question_id\n #print loaded_data['data'][1]['questions'][1]['answers'] # Get respondent's question_id", "def normalize_data(typeform_response):\n survey = json.loads(typeform_response.survey)\n response = json.loads(typeform_response.response)\n answers = {}\n response_set = response.get('answers') or []\n for answer in response_set:\n field_id = answer.get('field').get('id')\n value_key = answer.get('type')\n value = json.dumps(answer.get(value_key))\n\n field = find_field(field_id, survey)\n field_title = field.get('title') if field else '??'\n\n answers[field_id] = {\n 'field_title': field_title,\n 'answer': value,\n }\n\n if typeform_response.study_group:\n answers['study_group_id'] = {\n 'field_title': 'Learning circle ID',\n 'answer': typeform_response.study_group.id,\n }\n answers['study_group_name'] = {\n 'field_title': 'Learning circle name',\n 'answer': typeform_response.study_group.name\n }\n answers['course'] = {\n 'field_title': 'Course',\n 'answer': typeform_response.study_group.course.title,\n }\n answers['facilitator'] = {\n 'field_title': 'Facilitator',\n 'answer': typeform_response.study_group.created_by.email,\n }\n if typeform_response.study_group.team:\n answers['team'] = {\n 'field_title': 'Team',\n 'answer': typeform_response.study_group.team.name\n }\n\n return answers", "def parse_question_data(self):\n section = ''\n subsection = ''\n quest = ''\n # The data falls into 4 cases\n # 1. Sections\n # 2. subsections\n # 3. questions\n # 4. answers.\n\n for line in self.question_data: \n\n if \":\" in line: # case #2\n subsection = line.split(\":\")[1] # split the line on the : into an array but only take the [1] element\n debug(\"Subsection: %s\" % subsection)\n \n elif \".\" in line: # this is either a question or an answer?\n \n if line.split(\".\")[0].isdigit(): # case #3 it's a question, split on . into an array and take the element to the left and ask if it's a digit.\n quest = line # Since we know it's something like \"3. Are you a warlock?\" we stick that in the quest varable.\n debug(\"Question: %s\" % quest)\n # Create a question object and stick it in the dictonary with the key being the question (since we know it'll be unique)\n self.questions[quest] = question(section, subsection, quest) # I know it's redundant to have the key and have a value.\n \n elif line.startswith(\".\"): # case #4 answer All the answers startswith \".\" \n debug(\"Answer: %s\" % line)\n # take the question and append it to the answers array in the question object.\n self.questions[quest].answers.append(line[2:]) # Trim the first two characters off the answer since it's \". the answer\"\n \n else: # case #1 # This is section like AMERICAN DEMOCRACY\n section = line # load the line from the file into the section variable\n debug(\"Section = %s\" % section)", "def _parse_question(self, text, params={}):\n\n dom = BeautifulSoup(text, 'html.parser')\n\n data = {}\n\n # If requested page is not first page, it mean we only need load more answers\n if params['page'] == 1:\n # Parse user info\n data['users'] = []\n post_node = dom.select_one('div.post.question')\n if post_node is not None:\n user_node = post_node.find('div', class_='post-update-info-container')\n if user_node is not None:\n data['users'] = self.parse_user(user_node)\n\n # Parse question's comments\n data['comments'] = []\n comments_node = dom.find(id='comments-for-question-' + str(int(params['id'])))\n if comments_node is not None:\n data['comments'] = self.parse_comment(comments_node)\n\n data['has_more_comments'] = False\n add_comment_node = dom.find(id='add-comment-to-post-' + str(int(params['id'])))\n if add_comment_node:\n add_comment_script = add_comment_node.find_next('script')\n if add_comment_script:\n more_comment_pattern = re.compile('\\[\\'comments-for-question-(\\d+)\\'\\][ =]+{[\\n ]*truncated[ :]+(true|false)')\n more_comment_result = more_comment_pattern.search(add_comment_script.get_text())\n if more_comment_result:\n data['has_more_comments'] = True if more_comment_result.group(2) == 'true' else False\n\n # Parse CSRF token\n csrf_node = dom.find('input', attrs={'name': 'csrfmiddlewaretoken'})\n if csrf_node:\n #Utils.log('CSRF: ' + csrf_node.get('value'))\n self.csrfToken = csrf_node.get('value')\n\n # Parse followers\n data['followers'] = 0\n favorite_node = dom.find('div', attrs={'id': 'favorite-number'})\n if favorite_node is not None:\n favorite_text = favorite_node.get_text().strip()\n favorite_pattern = re.compile('(\\d+) follower[s]*')\n favorite_result = favorite_pattern.search(favorite_text)\n if favorite_result:\n data['followers'] = int(favorite_result.group(1))\n\n # Parse following status\n data['following'] = False\n favorite_btn_node = dom.select_one('a.button.followed')\n if favorite_btn_node is not None and favorite_btn_node.get('alt') == 'click to unfollow this question':\n data['following'] = True\n\n # Parse related questions\n data['related'] = []\n related_nodes = dom.find('div', class_='questions-related')\n if related_nodes is not None:\n for related_node in related_nodes.select('p'):\n a_node = related_node.find('a')\n item = {}\n item['title'] = a_node.get_text()\n item['url'] = self.get_link(a_node.get('href'))\n data['related'].append(item)\n\n # Parse votes\n data['votes'] = {}\n for script in dom.select('script'):\n script_text = script.get_text()\n if not script_text:\n continue\n if script_text.find('var votes = {};') != -1:\n for vote in re.findall('votes\\[\\'(\\d+)\\'\\][ ]*=[ ]*([-1]+)', script_text):\n data['votes'][vote[0]] = int(vote[1])\n break\n\n # Parse question status\n status_node = dom.select_one('div.question-status')\n if status_node:\n data['status'] = {}\n status_reason_node = status_node.select_one('b')\n if status_reason_node:\n data['status']['reason'] = status_reason_node.get_text().strip('\"')\n status_author_node = status_node.select_one('a')\n if status_author_node:\n data['status']['author'] = status_author_node.get_text()\n data['status']['profile_url'] = self.get_link(status_author_node.get('href'))\n status_date_pattern = re.compile('close date (\\d+-\\d+-\\d+ \\d+:\\d+:\\d+)')\n status_date_result = status_date_pattern.search(status_node.get_text())\n if status_date_result:\n data['status']['date'] = status_date_result.group(1)\n status_date = datetime.strptime(data['status']['date'], '%Y-%m-%d %H:%M:%S')\n data['status']['date_ago'] = timeago.format(status_date, datetime.utcnow())\n\n # Parse question paging\n data['has_more_answers'] = False\n paging_node = dom.find('div', class_='paginator')\n if paging_node is not None:\n current_page_node = paging_node.find('span', class_='curr')\n if current_page_node is not None:\n data['page'] = int(current_page_node.get_text().strip())\n else:\n data['page'] = 1\n\n next_page_node = paging_node.find('span', class_='next')\n if next_page_node is not None:\n data['has_more_answers'] = True\n\n # Parse question's answers\n data['answers'] = self.parse_answer(dom)\n\n return data", "def __init__(self, question):\n self.question = question\n self.responses = []", "def _parse_question_json(self, text, params={}):\n\n try:\n data = json.loads(text)\n except ValueError as e:\n Utils.log(traceback.format_exc())\n raise Exception('Could not get content')\n\n output = self.convert_question(data)\n output['body'] = self.markdown(output['body'])\n\n return output", "def load_questions_from_web():\n # sending http request\n r = requests.get(http_api_questions)\n \n # loading the content to JSON object\n json_content = json.loads(r.content)\n \n # deleting unnecessary value from the JSON\n del json_content['response_code']\n\n # creating the questions dict\n questions = {}\n\n # questions from the JSON (in list [])\n questions_from_json = json_content['results']\n\n # looping through the questions, convert it to usual formatted question\n for index_question, question in enumerate(questions_from_json):\n\n # correct-answer\n correct_answer = question[\"correct_answer\"]\n\n # list of all answers\n all_answers = [correct_answer] + question[\"incorrect_answers\"]\n\n # shuffling the list [otherwise, the answer will always be in the first place]\n random.shuffle(all_answers)\n\n # finding the new index of the correct-answer\n correct_answer_index = all_answers.index(correct_answer)\n\n # important! - converting from 'XML\\CSS\\HTML' format to 'utf-8'\n fixed_replaced_question = question[\"question\"].replace(\"&#039;\", \"'\").replace(\"&quot;\", \"'\")\n \n # discarding specific question if it still has unwelcomed characters\n if(fixed_replaced_question.find('#') != -1 or fixed_replaced_question.find('|') != -1):\n break\n \n # copying the question to the new dict\n questions[str(index_question)] = {\n \"question\" : fixed_replaced_question,\n \"answers\" : all_answers,\n \"correct\" : str(correct_answer_index + 1)\n }\n \n # returns the questions' dict\n return questions", "def parse_response(self, response):\n elm_tree = ET.fromstring(response)\n data = {}\n for child in elm_tree:\n\n temp_list = []\n # related faqs\n if child.tag == 'faqitems':\n # if there are any related faqs present store each faq as an object in temp list this store list in return value.\n related_list = child.find('suggestedfaqlist').find('semanticfaqs')\n if len(related_list):\n for faq in related_list:\n temp_dict = {}\n for el in faq:\n temp_dict[el.tag] = el.text\n temp_list.append(temp_dict)\n # temp_list.append({\n # 'answer_id': faq.find('AnswerId'),\n # 'recognition_id': faq.find('RecognitionId'),\n # 'question_text': faq.find('QuestionText')\n # })\n data['related_list'] = temp_list\n\n else:\n data['related_list'] = None\n\n elif child.tag.lower() == 'connectors':\n if len(child):\n for connector in child:\n temp_dict = {}\n for el in connector:\n temp_dict[el.tag] = el.text\n temp_list.append(temp_dict)\n data['connectors'] = temp_list\n\n else:\n data['connectors'] = None\n\n elif child.tag.lower() == 'disambiguationoptions':\n if len(child):\n for option in child:\n temp_dict = {}\n for el in option:\n temp_dict[el.tag] = el.text\n temp_list.append(temp_dict)\n data['disambiguationoptions'] = temp_list\n\n else:\n data['disambiguationoptions'] = None\n\n else:\n data[child.tag] = child.text\n\n return data", "def parse_survey_details():\n json_data = open('/Users/williamliu/GitHub/surveys/get_survey_details.json')\n loaded_data = json.load(json_data)\n\n print loaded_data['data']['pages'][1]['questions'][1]['heading']\n # I am preoccupied with more than one person I help", "def _parse_question_html(self, node):\n\n if node is None:\n return None\n\n data = {}\n\n data['has_more_comments'] = False\n data['has_more_answers'] = False\n data['id'] = int(node.get('id').replace('question-', ''))\n\n h2_node = node.find('h2')\n if h2_node is not None:\n a_node = h2_node.find('a')\n if a_node is not None:\n data['title'] = a_node.get_text()\n data['url'] = self.get_link(a_node.get('href'))\n\n view_count_node = node.find('div', class_='views')\n if view_count_node is not None:\n view_count_value = view_count_node.find('span', class_='item-count').get_text()\n data['view_count_label'] = '0' if view_count_value == 'no' else view_count_value\n\n score_count_node = node.find('div', class_='votes')\n if score_count_node is not None:\n score_count_value = score_count_node.find('span', class_='item-count').get_text()\n data['score_label'] = '0' if score_count_value == 'no' else score_count_value\n\n answer_count_node = node.find('div', class_='answers')\n if answer_count_node is not None:\n answer_count_value = answer_count_node.find('span', class_='item-count').get_text()\n data['answer_count_label'] = '0' if answer_count_value == 'no' else answer_count_value\n\n return data", "def extract_questions_from_text(self, text):\n questions = []\n\n for match in self.QUESTION_RE.finditer(text):\n match_dict = match.groupdict()\n\n answer_type = match_dict['answer_type']\n number1 = match_dict.pop('number1')\n\n if answer_type == 'O':\n if re.search('(?i)to ask the Deputy President', match_dict['intro']):\n match_dict['dp_number'] = number1\n elif re.search('(?i)to ask the President', match_dict['intro']):\n match_dict['president_number'] = number1\n else:\n match_dict['oral_number'] = number1\n elif answer_type == 'W':\n match_dict['written_number'] = number1\n\n match_dict['translated'] = bool(match_dict['translated'])\n match_dict['questionto'] = match_dict['questionto'].replace(':', '')\n match_dict['questionto'] = self.correct_minister_title(match_dict['questionto'])\n\n questions.append(match_dict)\n\n return questions", "def fill_question(self, response, question_answer):\n question_answer['source_url'] = response.url\n\n question_answer['question_title'] = response.xpath('//*[@id=\"question-header\"]/h1/a/text()').extract_first()\n question_answer['question_body'] = BeautifulSoup(\n response.xpath(self.gt.css_to_xpath('.postcell .post-text')).extract_first()).text\n question_answer['question_tags'] = list(set(\n response.xpath('//*[contains(concat(\" \", normalize-space(@class), \" \"), \" post-tag \")]/text()').extract()))\n # would like to specify the hierarchy of the css tags\n question_answer['question_upvotes'] = int(response.xpath(\n '//*[contains(concat(\" \", normalize-space(@class), \" \"), \" vote-count-post \")]/text()').extract_first())\n question_answer['question_view_count'] = int(\n response.xpath(self.gt.css_to_xpath('#qinfo .label-key') + '/b/text()').extract()[1].split(' ')[0])\n\n author_name = response.xpath(\n self.gt.css_to_xpath('.owner .user-details') + '/a/text()').extract_first()\n question_answer['question_author'] = {'author_id': '{}_{}'.format(self.allowed_domains[0], author_name),\n 'author_name': author_name}\n\n se_date_format = '%b %d \\'%y at %H:%M' # if date not current year\n se_date_format_curr_year = '%b %d at %H:%M' # if date current year\n try:\n try:\n question_answer['question_date'] = date_to_solr_format(datetime.strptime(response.xpath(\n self.gt.css_to_xpath('.owner .user-action-time .relativetime') + '/text()').extract_first(),\n se_date_format))\n except ValueError:\n question_answer['question_date'] = date_to_solr_format(datetime.strptime(response.xpath(\n self.gt.css_to_xpath('.owner .user-action-time .relativetime') + '/text()').extract_first(),\n se_date_format_curr_year))\n except (ValueError, TypeError):\n pass\n # Look for duplicates\n duplicate_url = response.xpath(self.gt.css_to_xpath('.question-originals-of-duplicate')+'/ul/li/a/@href').extract_first()\n if duplicate_url:\n print('duplicate question')\n self.duplicate_count += 1\n print('duplicate question count: {}'.format(self.duplicate_count))\n duplicate_url = \"https://superuser.com\" + duplicate_url\n print(duplicate_url)\n self.logger.info('duplicate url: {}'.format(duplicate_url))\n question_answer['question_original_url'] = duplicate_url\n self.duplicate_url = duplicate_url\n\n return question_answer", "def get_questions(self):\n self.post_question()\n return self.client.get(\"api/v2/questions\", headers={\"Authorization\": \"{}\".format(self.token())}, data=json.dumps(self.question), content_type='application/json')", "def parse(string, max_questions):\r\n print(\"inside parse\")\r\n question = []\r\n answer = []\r\n selected_questions = []\r\n selected_answers = []\r\n try:\r\n txt = TextBlob(string)\r\n print(\"after text blob\")\r\n # Each sentence is taken from the string input and passed to genQuestion() to generate questions.\r\n for sentence in txt.sentences:\r\n single_question, single_answer = genQuestion(sentence)\r\n if single_question:\r\n question.append(single_question)\r\n answer.append(single_answer)\r\n \r\n if max_questions >= len(question):\r\n return question, answer\r\n \r\n while (max_questions > 0):\r\n i = random.randint(0, len(question)-1)\r\n if question:\r\n selected_questions.append(question[i])\r\n selected_answers.append(answer[i])\r\n del question[i]\r\n del answer[i]\r\n max_questions -= 1\r\n \r\n else:\r\n break\r\n print(\"before return\")\r\n return json.dumps(selected_questions), json.dumps(selected_answers)\r\n \r\n except Exception as e:\r\n raise e", "def convert_question(self, q):\n\n item = {}\n item['id'] = q['id']\n item['title'] = q['title']\n item['body'] = q['text']\n item['author_id'] = q['author']['id']\n item['author'] = q['author']['username']\n item['url'] = q['url']\n item['score'] = q['score']\n item['score_label'] = self.convert_count(q['score'])\n item['answer_count'] = q['answer_count']\n item['answer_count_label'] = self.convert_count(q['answer_count'])\n item['view_count'] = q['view_count']\n item['view_count_label'] = self.convert_count(q['view_count'])\n item['added_at'] = q['added_at']\n item['added_at_label'] = timeago.format(datetime.fromtimestamp(int(q['added_at']), TIMEZONE), datetime.now(TIMEZONE))\n item['last_activity'] = q['last_activity_at']\n item['last_activity_label'] = timeago.format(datetime.fromtimestamp(int(q['last_activity_at']), TIMEZONE), datetime.now(TIMEZONE))\n item['has_more_comments'] = False\n item['has_more_answers'] = False\n item['has_accepted_answer'] = q['has_accepted_answer']\n item['closed'] = q['closed']\n\n item['tags'] = []\n for tag in q['tags']:\n item['tags'].append({'name': tag})\n\n return item", "def process():\n question = request.form['question']\n parsed_question = parse(question)\n wiki_extract = get_wiki_extract(parsed_question)\n wiki_url = get_wiki_url(parsed_question)\n address = get_address(parsed_question)\n lat = get_latitude(parsed_question)\n lng = get_longitude(parsed_question)\n return jsonify({'data': [question, address, wiki_extract,\n wiki_url, lat, lng, parsed_question]})", "def process_question(qu):\n\n ## global ranking\n rank_info = {}\n rank_info_k = [\"viewcount\",\"score\",\"favoritecount\"]\n for k in rank_info_k:\n rank_info[k] = int(qu[k])\n qu.pop(k,None)\n\n rank_info[\"creationdate\"] = qu[\"creationdate\"]\n\n if qu[\"acceptedanswer\"]:\n qu[\"acceptedanswer\"] = list(qu[\"acceptedanswer\"])\n else:\n qu[\"acceptedanswer\"] = []\n\n qu.pop('comments',None) # discard comments, maybe add back later\n qu[\"rank_info\"] = rank_info\n\n return qu", "def process_question_answer_page(self, response):\n self.results_page_count += 1\n self.classification_file.write(\"results, {}\\n\".format(response.url))\n logging.info('results: {}'.format(response.url))\n print(\"results: {}\".format(response.url))\n\n # Filters\n if not self.page_contains_answers(response):\n return []\n\n # Process posts\n question_answer_list = []\n question_answer = QuestionAnswer()\n question_answer = self.fill_question(response, question_answer)\n # cycle through answers and build Q/A pairs\n answers = response.xpath(self.gt.css_to_xpath('.answercell .post-text')).extract()\n for answer_number in range(len(answers)):\n question_answer_copy = question_answer.copy()\n question_answer_copy = self.fill_answer(response, question_answer_copy, answer_number)\n question_answer_list.append(question_answer_copy)\n return question_answer_list", "def load_vqa_json(self, data_split):\n qdic, adic = {}, {}\n\n with open(config.DATA_PATHS[self.exp_type][data_split]['ques_file'], 'r') as f:\n qdata = json.load(f)['questions']\n for q in qdata:\n q_key = data_split + QID_KEY_SEPARATOR + str(q['question_id'])\n qdic[q_key] = {\n 'qstr': q['question'],\n 'iid': q['image_id']\n }\n if self.use_ocr:\n qdic[q_key]['ocr_tokens'] = q['ocr_tokens']\n if self.use_binary:\n qdic[q_key]['ocr_answer_flag'] = q['ocr_answer_flag']\n\n if 'test' not in data_split:\n with open(config.DATA_PATHS[self.exp_type][data_split]['ans_file'], 'r') as f:\n adata = json.load(f)['annotations']\n for a in adata:\n # TODO: we only use key 'answer' in this a['answers'] list\n adic[data_split + QID_KEY_SEPARATOR + str(a['question_id'])] = \\\n a['answers']\n\n self.logger.info('parsed {} questions for {}'.format(len(qdic), data_split))\n return qdic, adic", "def test_get_questions(self):\n res = self.client().get('/api/questions')\n res_body = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertTrue(res_body['success'])\n self.assertTrue(res_body['questions'])", "def parse_response_payload(payload):\n header = Header.fromData(payload)\n byte_ptr = len(header)\n config = OrderedDict(zip([\"question\" , \"answer\", \"authority\", \"additional\"], [\"_qdcount\", \"_ancount\", \"_nscount\", \"_arcount\"]))\n parsed = {\"header\" : header}\n for key, val in config.items():\n #the question section isn't parsed as a RR, needs special treatment\n if key is \"question\":\n #assumes only ever receive one question entry\n if getattr(header, val) > 1:\n raise Exception(\"Uh oh!\")\n question = QE.fromData(payload, byte_ptr)\n parsed[key] = [question,]\n byte_ptr += len(question)\n \n else:\n num_entries = getattr(header, val)\n rrs, byte_ptr = ([], byte_ptr) if num_entries is 0 else parse_rrs(payload,\n byte_ptr,\n num_entries) \n parsed[key] = rrs\n return parsed", "def _get_questions_from_tag_assessment(self, event_data):\n unit_id, lesson_id = self._get_unit_and_lesson_id_from_url(\n event_data['location'])\n if unit_id is None or lesson_id is None:\n return []\n\n if event_data['type'] == self.QUESTION_GROUP:\n mc_indices = [\n i for i in xrange(len(event_data['containedTypes']))\n if event_data['containedTypes'][i] == self.MC_QUESTION]\n return [{\n 'id': 'u.%s.l.%s.c.%s.i.%s' % (\n unit_id, lesson_id, event_data['instanceid'], index),\n 'score': event_data['individualScores'][index],\n 'answers': event_data['answer'][index]\n } for index in mc_indices if event_data['answer'][index]]\n elif (event_data['type'] == self.MC_QUESTION and\n event_data['answer']):\n # This is a single multiple-choice question.\n return [{\n 'id': 'u.%s.l.%s.c.%s' % (\n unit_id, lesson_id, event_data['instanceid']),\n 'score': event_data['score'],\n 'answers': event_data['answer']\n }]\n else:\n return []", "def parse(self, payload):\n payload = json.loads(payload)\n \n if payload['response'] in self.possible_responses:\n return self.possible_responses[payload['response']](payload)\n else:\n print 'Response not valid'", "def populate(self, response):\n answers = self.filter(response=response)\n if response.survey:\n questions = Question.objects.filter(survey=response.survey).exclude(\n pk__in=answers.values('question'))\n answers = list(answers)\n for question in questions:\n answers += [Answer(question=question)]\n return answers", "async def qa_infer(query: dict, response: Response) -> dict:\n logger.debug(\"QUESTION ANSWER - predicting query: \" + str(query[\"query\"]))\n results = {}\n\n try:\n query_text = query[\"query\"]\n query_context = query[\"search_context\"]\n start = time.perf_counter()\n answers = MODELS.qa_model.answer(query_text, query_context)\n end = time.perf_counter()\n logger.info(answers)\n logger.info(f\"time: {end - start:0.4f} seconds\")\n results[\"answers\"] = answers\n results[\"question\"] = query_text\n\n except Exception:\n logger.error(f\"Unable to get results from QA model for {query}\")\n response.status_code = status.HTTP_500_INTERNAL_SERVER_ERROR\n raise\n return results", "def parse_answers(dns_resp: str, session_cache):\n\n ID = dns_resp[:4]\n other_flags = dns_resp[4:8]\n questions_count = dns_resp[8:12]\n answers_count = dns_resp[12:16]\n auth_serv_info = dns_resp[16:20]\n additional_info = dns_resp[20:24]\n offset = 0\n ip = \"0.0.0.0\"\n\n # может придти несколько ответов, из каждого вычленим нужные записи\n for i in range(int(answers_count, 16)):\n try:\n ip, offset = DNSHandler.parse_answer(dns_resp, session_cache, offset=offset * i)\n except ValueError:\n print(\"url does not exist\")\n sys.exit(0)\n return ip", "def process(data_item, article_id):\n questions = []\n answers = []\n paragraph = [article_id, data_item['context']]\n\n for item in data_item['qas']:\n question = [item[\"id\"], item[\"question\"], item['is_impossible']]\n questions.append(question)\n if item['is_impossible']:\n continue\n answer_options = item[\"answers\"]\n answer_set = set()\n for option in answer_options:\n answer_tuple = (option['text'], option['answer_start'])\n answer_set.add(answer_tuple)\n for index, answer_tuple in enumerate(answer_set):\n answer = [\"{}_{}\".format(item[\"id\"], index+1), item[\"id\"], answer_tuple[0], answer_tuple[1]]\n answers.append(answer)\n return paragraph, questions, answers", "def test_search_questions(self):\n\n # send post request with search term\n response = self.client().post('/questions',\n json={'searchTerm': 'egyptians'})\n\n # load response data\n data = json.loads(response.data)\n\n # check response status code and message\n self.assertEqual(response.status_code, 200)\n self.assertEqual(data['success'], True)\n\n # check that number of results = 1\n self.assertEqual(len(data['questions']), 1)\n\n # check that id of question in response is correct\n self.assertEqual(data['questions'][0]['id'], 23)", "def validate_exercice(request, test_student, test_exercice):\n raw_answer = None\n is_correct = False\n if test_exercice.exercice is None:\n raw_answer = None\n is_correct = False\n\n else:\n \"\"\"\n raw_answer JSON format:\n correct : 1 if True, 0 if False, -1 if not corrected yet\n [\n {\n \"0\": {\n \"response\": [\n \"some_response\", \"some_other_response\"\n ],\n \"correct\": 1\n },\n \"1\": {\n \"response\": [\n \"some_response\"\n ],\n \"correct\": -1\n }\n }\n ]\n \"\"\"\n # Help function\n def get_occurence(s, delimiter, occurence):\n \"\"\"Returns the n'th occurrence of s splitted with the delimiter.\"\"\"\n return s.split(delimiter)[occurence]\n\n raw_answer = {}\n for number, question in enumerate(test_exercice.exercice.get_questions()):\n raw_answer[number] = {\"response\": [], \"correct\": -1}\n data = question.get_answer()\n if data[\"type\"] == \"checkbox\":\n raw_answer[number][\"response\"] = list(map(int, request.POST.getlist(str(number))))\n elif data[\"type\"] == \"radio\":\n if str(number) in request.POST:\n raw_answer[number][\"response\"] = [int(request.POST[str(number)])]\n else:\n # The Student did not select an answer\n raw_answer[number][\"response\"] = -1\n elif data[\"type\"] == \"text\":\n raw_answer[number][\"response\"] = [request.POST[str(number)]]\n elif data[\"type\"].startswith(\"math\"):\n raw_answer[number][\"response\"] = [request.POST[str(number)]]\n elif data[\"type\"] == \"graph\":\n graph_list = list()\n for key, value in request.POST.items():\n # key has the form \"graph-0-point-1-Y\" for type_question-id_question-type_answer-id_answer\n if key == \"csrfmiddlewaretoken\":\n continue\n # If the graph element is read for the first time (a graph element may contain several coordinates)\n pointNumber = get_occurence(key, \"-\", 2) + get_occurence(key, \"-\", 3)\n doesExist = False\n for elem in graph_list:\n if \"type\" in elem:\n if elem[\"type\"] == str(pointNumber):\n doesExist = True\n if (len(request.POST.items())-1)/2 > len(graph_list) and not doesExist:\n graph_list.append({\"type\": pointNumber})\n\n # Case 1 : A point\n if get_occurence(key, \"-\", 2) == \"point\":\n # Format: graph-inputNumber-point-coordinate\n # Neither X or Y have been read yet: initialization\n for elem in graph_list:\n if elem.get(\"type\") == pointNumber:\n nextElem = elem\n if not nextElem.get(\"coordinates\"):\n nextElem[\"coordinates\"] = {\"X\": 0, \"Y\": 0}\n # Get the coordinate (X or Y, order is not guaranteed)\n coordinate = get_occurence(key, \"-\", 4)\n # First case : the X coordinate\n if coordinate == \"X\":\n nextElem[\"coordinates\"][\"X\"] = value\n # Then the Y coordinate\n elif coordinate == \"Y\":\n nextElem[\"coordinates\"][\"Y\"] = value\n\n raw_answer[number][\"response\"] = graph_list\n\n elif data[\"type\"] == \"professor\":\n raw_answer[number][\"response\"] = [request.POST[str(number)]]\n else:\n raise Exception()\n\n # Perform the correction\n raw_answer[number][\"correct\"] = question.evaluate(raw_answer[number][\"response\"])\n\n # The Student answers are embedded in a list, in a way that they can be extended if needed\n raw_answer = [raw_answer]\n raw_answer = json.dumps(raw_answer, indent=4)\n\n with transaction.atomic():\n answer = Answer.objects.create(\n raw_answer=raw_answer,\n test_student=test_student,\n test_exercice=test_exercice,\n )\n\n # Evaluates the answer of the whole attached Context to assess the related Skill\n is_correct = answer.evaluate()\n\n student_skill = StudentSkill.objects.get(student=request.user.student, skill=test_exercice.skill)\n\n if is_correct == 1:\n student_skill.validate(\n who=request.user,\n reason=\"Réponse à une question.\",\n reason_object=test_exercice,\n )\n elif is_correct == 0:\n student_skill.unvalidate(\n who=request.user,\n reason=\"Réponse à une question.\",\n reason_object=test_exercice,\n )\n\n # update student skills, then redirect to self\n return HttpResponseRedirect(reverse(\"student_pass_test\", args=(test_student.id,)))", "def parseBQMC(s):\n\n answerlist = []\n try:\n RespDict = json.loads(s)\n for records in RespDict[\"Response\"]:\n for record in records[\"Response\"]:\n if record[\"Selected\"] is True:\n answerlist.append(record[\"val\"])\n except:\n return None\n return answerlist" ]
[ "0.6683125", "0.64587665", "0.6452531", "0.63551277", "0.6319017", "0.62600535", "0.62520546", "0.62217253", "0.6206413", "0.6111697", "0.6105986", "0.60504484", "0.6041145", "0.6023531", "0.60067135", "0.5974075", "0.59160846", "0.5890735", "0.58624613", "0.58607596", "0.58589894", "0.5845423", "0.58217245", "0.58069074", "0.58027", "0.5791384", "0.5775976", "0.57511914", "0.5727495", "0.5723536" ]
0.6853872
0
Convert question raw data
def convert_question(self, q): item = {} item['id'] = q['id'] item['title'] = q['title'] item['body'] = q['text'] item['author_id'] = q['author']['id'] item['author'] = q['author']['username'] item['url'] = q['url'] item['score'] = q['score'] item['score_label'] = self.convert_count(q['score']) item['answer_count'] = q['answer_count'] item['answer_count_label'] = self.convert_count(q['answer_count']) item['view_count'] = q['view_count'] item['view_count_label'] = self.convert_count(q['view_count']) item['added_at'] = q['added_at'] item['added_at_label'] = timeago.format(datetime.fromtimestamp(int(q['added_at']), TIMEZONE), datetime.now(TIMEZONE)) item['last_activity'] = q['last_activity_at'] item['last_activity_label'] = timeago.format(datetime.fromtimestamp(int(q['last_activity_at']), TIMEZONE), datetime.now(TIMEZONE)) item['has_more_comments'] = False item['has_more_answers'] = False item['has_accepted_answer'] = q['has_accepted_answer'] item['closed'] = q['closed'] item['tags'] = [] for tag in q['tags']: item['tags'].append({'name': tag}) return item
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convertData(data):\n\n return data", "def convert_txt_to_data():\n pass", "def _get_converted_data(self):\n pass", "def transform(self, data):", "def decode_input_data(self, rawdata):\n return self.get_content_type().loads(rawdata, self)", "def data_for_question(self, question_type):\n\t\treturn {}", "def process_question(qu):\n\n ## global ranking\n rank_info = {}\n rank_info_k = [\"viewcount\",\"score\",\"favoritecount\"]\n for k in rank_info_k:\n rank_info[k] = int(qu[k])\n qu.pop(k,None)\n\n rank_info[\"creationdate\"] = qu[\"creationdate\"]\n\n if qu[\"acceptedanswer\"]:\n qu[\"acceptedanswer\"] = list(qu[\"acceptedanswer\"])\n else:\n qu[\"acceptedanswer\"] = []\n\n qu.pop('comments',None) # discard comments, maybe add back later\n qu[\"rank_info\"] = rank_info\n\n return qu", "def decode(data): #@NoSelf", "def load_as_raw(self):\n\n # Q & A\n questions, answers = self.get_questions_answers()\n\n # Get vocabs\n\n # Step 4: cleaning the questions\n pprint('---- Step 4 cleaning questions ----')\n\n clean_questions = []\n for question in questions:\n clean_questions.append(clean_text(question))\n\n pprint(clean_questions, stream=Head(5))\n print('\\n\\n')\n \"\"\"\n Step 5: Clean the answers\n \"\"\"\n\n pprint('---- Step 5 cleaning answers ----')\n clean_answers = []\n for answer in answers:\n clean_answers.append(clean_text(answer))\n\n pprint(clean_answers, stream=Head(5))\n print('\\n\\n')\n \"\"\"\n Step 6: Creating a dictionary that maps each word to its number of occurences\n \"\"\"\n\n word2count = {}\n pprint('------ Step 6: counting words in questions ----')\n\n word2count = convert_word_to_count(word2count, clean_questions)\n\n pprint(word2count, stream=Head(5))\n print('\\n\\n')\n \"\"\"\n Step 7:\n For example, for a question: can we make this quick roxanne korrine and andrew barrett are having an incredibly horrendous public break up on the quad again\n It counts each word occurence such as \"can\" and accumulates the count into word2count dict\n \"\"\"\n pprint('------ Step 6: counting words in answers ----')\n\n word2count = convert_word_to_count(word2count, clean_answers)\n\n pprint(word2count, stream=Head(5))\n print('\\n\\n')\n\n keys = ['<unk>', '<s>', '</s>']\n\n \"\"\"\n Step 8: Creating word 2 int(count) by filtering words that are greater than the threshold\n \"\"\"\n\n pprint(\n '------ Step 8: questions_vocabs filtered by threshold (>) ----')\n threshold_questions = 20\n questions_vocabs = [] + keys\n for word, count in word2count.items():\n if count >= threshold_questions:\n if not word in questions_vocabs:\n questions_vocabs.append(word)\n\n pprint(questions_vocabs, stream=Head(5))\n print('\\n\\n')\n \"\"\"\n Step 9: Same as step 8 but for answers\n \"\"\"\n pprint(\n '------ Step 9: answers_vocabs filtered by threshold (>) ----')\n threshold_answers = 20\n answers_vocabs = [] + keys\n for word, count in word2count.items():\n if count >= threshold_answers:\n if not word in answers_vocabs:\n answers_vocabs.append(word)\n\n pprint(answers_vocabs, stream=Head(5))\n\n return questions, answers, questions_vocabs, answers_vocabs", "def _encode_question(self, question):\n \n inputs = self.tokenizer.encode_plus(\n question,\n None,\n add_special_tokens=True,\n max_length=self.max_len,\n )\n\n ids = inputs[\"input_ids\"]\n mask = inputs[\"attention_mask\"]\n token_type_ids = inputs[\"token_type_ids\"]\n\n padding_length = self.max_len - len(ids)\n ids += ([0]*padding_length)\n mask += ([0]*padding_length)\n token_type_ids += ([0]*padding_length)\n \n return {\n 'ids': torch.tensor(ids, dtype=torch.long),\n 'mask': torch.tensor(mask, dtype=torch.long),\n 'token_type_ids': torch.tensor(token_type_ids, dtype=torch.long),\n \n }", "def preprocess_raw(self):\n pass", "def normalize_dataset(self):", "def convert_to_dialogData(\n premise_raw, hypo_raw, answer_raw, dialog_format=False, binary_classes=False\n):\n premise_raw = premise_raw.strip('\\n').strip('\\t')\n hypo_raw = hypo_raw.strip('\\n').strip('\\t')\n clas = MULTINLI_LABELS\n\n if binary_classes:\n answer_raw = BICLASS_DICT[answer_raw]\n clas = BICLASS_LABELS\n if not dialog_format:\n premise_raw = MULTINLI_PREMISE_PREFIX + premise_raw\n hypo_raw = MULTINLI_HYPO_PREFIX + hypo_raw\n\n question = premise_raw + '\\n' + hypo_raw\n answers = [answer_raw]\n\n return question, answers, clas", "def get_question(self):\n question = self.raw_question\n if question is not None:\n return {\n \"question\": self.raw_question\n }", "def _get_to_actual_data(raw):\n raise NotImplemented", "def buildQuestion():\n #example.com\n QNAME = b\"\\x07\\x65\\x78\\x61\\x6d\\x70\\x6c\\x65\\x03\\x63\\x6f\\x6d\\x00\"\n\n \"\"\"\n A two octet code which specifies the type of the query.\n The values for this field include all codes valid for a\n TYPE field, together with some more general codes which\n can match more than one type of RR.\n \"\"\" \n QTYPE = b\"\\x00\\x01\"\n\n \"\"\"\n A two octet code that specifies the class of the query.\n For example, the QCLASS field is IN for the Internet.\n \"\"\"\n QCLASS = b\"\\x00\\x01\"\n\n dnsBody = QNAME + QTYPE + QCLASS\n #print(dnsBody)\n return dnsBody", "def _convert_data(self, data):\n if isinstance(data, Tensor):\n data = data.asnumpy()\n elif isinstance(data, list):\n data = np.array(data)\n elif isinstance(data, np.ndarray):\n pass\n else:\n raise TypeError('Input data type must be tensor, list or numpy.ndarray')\n return data", "def decode(self, encoded):", "def example_to_data(self, example):\n raise NotImplementedError", "def normalizeData(self, data):\n return _normalizeData(data)", "def normalizeData(self, data):\n return _normalizeData(data)", "def parse_question_data(self):\n section = ''\n subsection = ''\n quest = ''\n # The data falls into 4 cases\n # 1. Sections\n # 2. subsections\n # 3. questions\n # 4. answers.\n\n for line in self.question_data: \n\n if \":\" in line: # case #2\n subsection = line.split(\":\")[1] # split the line on the : into an array but only take the [1] element\n debug(\"Subsection: %s\" % subsection)\n \n elif \".\" in line: # this is either a question or an answer?\n \n if line.split(\".\")[0].isdigit(): # case #3 it's a question, split on . into an array and take the element to the left and ask if it's a digit.\n quest = line # Since we know it's something like \"3. Are you a warlock?\" we stick that in the quest varable.\n debug(\"Question: %s\" % quest)\n # Create a question object and stick it in the dictonary with the key being the question (since we know it'll be unique)\n self.questions[quest] = question(section, subsection, quest) # I know it's redundant to have the key and have a value.\n \n elif line.startswith(\".\"): # case #4 answer All the answers startswith \".\" \n debug(\"Answer: %s\" % line)\n # take the question and append it to the answers array in the question object.\n self.questions[quest].answers.append(line[2:]) # Trim the first two characters off the answer since it's \". the answer\"\n \n else: # case #1 # This is section like AMERICAN DEMOCRACY\n section = line # load the line from the file into the section variable\n debug(\"Section = %s\" % section)", "def normalize_data(typeform_response):\n survey = json.loads(typeform_response.survey)\n response = json.loads(typeform_response.response)\n answers = {}\n response_set = response.get('answers') or []\n for answer in response_set:\n field_id = answer.get('field').get('id')\n value_key = answer.get('type')\n value = json.dumps(answer.get(value_key))\n\n field = find_field(field_id, survey)\n field_title = field.get('title') if field else '??'\n\n answers[field_id] = {\n 'field_title': field_title,\n 'answer': value,\n }\n\n if typeform_response.study_group:\n answers['study_group_id'] = {\n 'field_title': 'Learning circle ID',\n 'answer': typeform_response.study_group.id,\n }\n answers['study_group_name'] = {\n 'field_title': 'Learning circle name',\n 'answer': typeform_response.study_group.name\n }\n answers['course'] = {\n 'field_title': 'Course',\n 'answer': typeform_response.study_group.course.title,\n }\n answers['facilitator'] = {\n 'field_title': 'Facilitator',\n 'answer': typeform_response.study_group.created_by.email,\n }\n if typeform_response.study_group.team:\n answers['team'] = {\n 'field_title': 'Team',\n 'answer': typeform_response.study_group.team.name\n }\n\n return answers", "def deserialize(self, data):", "def _read(self, question_id):\n question_id = question_id.squeeze(-1)\n correlation_weight = self._compute_correlation_weight(question_id)\n read_content = torch.matmul(self._value_memory, correlation_weight.unsqueeze(-1)).squeeze(-1)\n return read_content.to(ARGS.device)", "def format_data(self, data):", "def question_new_translate():", "def preprocess(sent):\n return sent", "def process_question_for_prediction(question):\n question = __set_has_hexadecimal(question)\n question = __set_has_numeric(question)\n homework_list = constants.HOMEWORK_SYNONMS_LIST\n homework_list.sort(key=len, reverse=True)\n replacement_text = constants.QUESTION_HAS_HOMEWORK_KEY\n question = __set_has_homework_or_assignment(question, replacement_text, homework_list)\n question = stem_training_data(question)\n return question", "def format_raw_audio_cnn(self):\n result_x, doa_from_file = self.load_audio()\n x = np.array([result_x])\n x_data = cnn.reshape_x_for_cnn(cnn.normalize_x_data(cnn.flatten_stereo(x)))\n\n return x_data, doa_from_file" ]
[ "0.65550745", "0.6127209", "0.60165995", "0.5991417", "0.57663774", "0.5749049", "0.5713165", "0.567219", "0.5659159", "0.56423324", "0.56397426", "0.55999476", "0.5513851", "0.5502245", "0.5497998", "0.5479821", "0.5475443", "0.5465245", "0.545568", "0.54260504", "0.54260504", "0.54212004", "0.54139", "0.5385053", "0.53478056", "0.5334604", "0.5313802", "0.5312668", "0.5306674", "0.5302985" ]
0.6391561
1
Convert count number to label
def convert_count(self, count): count = int(count) if count >= 1000: return str(int(count / 1000)) + 'k' else: return str(count)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def numToStrLabel(self, value):\n zero_count = 3 - len(str(value))\n return zero_count * \"0\" + str(value)", "def int_to_text(self, labels):\n string = []\n for i in labels:\n string.append(self.index_map[i])\n return ''.join(string).replace('', ' ')", "def create_label(image_name,number):\r\n\r\n target=[]\r\n for i in range(0,number):\r\n target.append(0)\r\n target[image_name]=1\r\n\r\n return target", "def getLabels(self):\n return self.numToLabel", "def getLabel(labels):\r\n elems = {}\r\n for l in labels:\r\n if l not in elems.keys():\r\n elems[l] = 1\r\n else:\r\n elems[l] += 1\r\n counts = sorted(elems.values(), reverse=True)\r\n if len(counts) > 1 and counts[0] == counts[1]:\r\n return choice(list(elems.keys()))\r\n return sorted(elems, key=elems.get, reverse=True)[0]", "def ordinal_label(n):\n n = int(n)\n return \"%d%s\" % (n,\"tsnrhtdd\"[(n/10%10!=1)*(n%10<4)*n%10::4])", "def label (x):\n\n if x == 0:\n return None\n if x == 1:\n return Fraction(0)\n b = bin(x)[2:]\n s = b[1]\n wf = re.compile(r'^.({}+)(.*)$'.format(s))\n w,f = wf.findall(b)[0]\n p = 1 if s == '1' else -1\n n = p * Fraction(len(w),1)\n scale = Fraction(1,1)\n while len(f):\n scale /= 2\n if f[0] == '1':\n n += scale\n else:\n n += -scale\n f = f[1:]\n return n", "def _label_rider_by_trip_frequency(self, rider):\n if rider['total_num_trips'] <= 5*self.duration:\n label = 0\n elif rider['total_num_trips'] <= 20*self.duration:\n label = 1\n elif rider['total_num_trips'] > 20*self.duration:\n label = 2\n else:\n label = -1\n return label", "def get_label(i, n, tweet):\n click.echo(f\"{i}/{n}: {tweet}\")\n while True:\n click.echo(\"Label (←(-1)/↓(0)/→(1)/↑(None)): \")\n c = click.getchar()\n if c == '\\x1b[D':\n click.echo('')\n return -1\n elif c == '\\x1b[C':\n click.echo('')\n return 1\n elif c == '\\x1b[B':\n click.echo('')\n return 0\n elif c == '\\x1b[A':\n click.echo('')\n return None\n click.echo(\"Invalid option\")", "def generate_label(self):\n\n last = self.label\n self.label += 1\n self.P.append(last)\n\n return last", "def add_incident_count_class_label(data, count_col=\"incidents\", num_classes=6, one_hot=True):\n def add_plus(x, value=num_classes - 1):\n if int(x) == value:\n return str(x) + \"+\"\n return x\n\n data = data.copy()\n data[\"class\"] = np.minimum(data[count_col].values, num_classes - 1)\n data[\"class\"] = data[\"class\"].astype(int).astype(str)\n data[\"class\"] = data[\"class\"].map(add_plus)\n\n # to onehot\n if one_hot:\n classes = np.sort(data[\"class\"].unique())\n data = pd.concat([data, data[\"class\"].str.get_dummies()], axis=1, ignore_index=False)\n class_labels = [\"class_{}\".format(x) for x in classes]\n data = data.rename(columns={x: \"class_{}\".format(x) for x in classes})\n \n return data, class_labels\n\n else:\n return data", "def convert_label_num2string(number, num_types):\n dictionary = empty_label_dictionary(num_types)\n all_labels = list(dictionary.keys())\n return all_labels[number]", "def numerify_iso_label(lab):\n from sage.databases.cremona import class_to_int\n if 'CM' in lab:\n return -1 - class_to_int(lab[2:])\n else:\n return class_to_int(lab.lower())", "def encode_label(label: str) -> int:\n\tif not label:\n\t\treturn 0\n\t# part after letter if it has a number, otherwise 1\n\tindex = int(label[1:]) if len(label) > 1 else 1\n\t# A = 1, B = 2, ... E = 5\n\toffset = ord(label[0]) - ord(\"A\") + 1\n\t# compute label number\n\treturn (index - 1) * 5 + offset", "def makeLabel(self):\n\n self.setIndexNames()\n\n if self.isInCore():\n self.getFirstChar()\n else:\n # stick with what we have. (default:ExCore)\n return\n self.label = self.firstChar + \"{0:03d}\".format(self.i2)\n if self.axial is not None:\n # add axial letter\n self.label = self.label + AXIAL_CHARS[self.axial]", "def to_label(self):\n return self.label", "def make_labels(self, ilines):\n\n llist = []\n for lind, lstr in enumerate(ilines):\n # get label and value list\n rv, label, vals = self.get_label_vals(lstr)\n if rv < 1: continue\n\n nvals = len(vals)\n\n # label = self.find_parent_label(label)\n\n if self.verb > 2: print('++ label: %s, %d val(s)' % (label, nvals))\n\n llist.append(label)\n self.maxcounts[label] = nvals\n self.subjcounts[label] = 0\n\n if not UTIL.vals_are_unique(llist):\n print('** warning: labels are not unique, will use only last values')\n llist = UTIL.get_unique_sublist(llist)\n\n return 0, llist", "def uniqueLabel(self):\n label = f\"Label-{self.next_label}\"\n self.next_label += 1\n return label", "def genLabel(self):\n self._nextlabelid += 1\n return CLABEL(self._nextlabelid)", "def _createLabel(element, a, state):\n # len(e.symbol) is 1 or 2 => a % (either 1000 or 100)\n # => gives exact a, or last two digits.\n # the division by 10 removes the last digit.\n firstTwoDigits = (a % (10 ** (4 - len(element.symbol)))) // 10\n # the last digit is either 0-9 if state=0, or A-J if state=1, or K-T if state=2, or U-d if state=3\n lastDigit = (\n \"0123456789\" \"ABCDEFGHIJ\" \"KLMNOPQRST\" \"UVWXYZabcd\"[(a % 10) + state * 10]\n )\n return \"{}{}{}\".format(element.symbol, firstTwoDigits, lastDigit)", "def encode_label(self, label: str) -> int:\n return self.class_map[label]", "def _build_label(self):\n counter = Counter()\n _, labels = self.read_json()\n counter.update(labels)\n dictionary = dict()\n for i, word in enumerate(counter.most_common()):\n dictionary[word[0]] = i\n return dictionary", "def __get_label_idx__(idx: int) -> int:\n\n label_idx = idx // 100\n label_idx = int(label_idx) if label_idx >= 0 else 0\n\n return label_idx", "def get_value_label(self, value):\n return self.label_config.get_index_label(value)", "def set_label_text(index):\n nonlocal count_label\n labeled = len(annotations.loc[annotations['changed']])\n str_output = f'{labeled} of {len(annotations)} Examples annotated, Current Position: {index + 1} '\n if id_column in annotations.columns and index >= 0 and index < len(annotations):\n ix = annotations.iloc[index].name\n str_output += f\"(id: {annotations.at[ix, id_column]}) \"\n count_label.value = str_output", "def rename_labels_by_count(labels):\n new_labels, label_counts = _count_labels(labels)\n\n return new_labels", "def _fact2label(self, ax_ndx, fact_ndx):\n if len(self._dims) > 1:\n key,value = self._factors[ax_ndx][fact_ndx]\n else:\n if fact_ndx == 1:\n return ''\n key,value = self._factors[ax_ndx][0]\n return '{} = {}'.format(key,value) if key != '' else ''", "def get_label(self, list_question_int):\n \"\"\" list_question_int is 0 based, the question ids and labels are 1-based \"\"\"\n\n quiz_question = self.get_quiz_question(list_question_int)\n form_question_int = list_question_int + 1\n label = str(form_question_int) + '. ' + quiz_question['question_text']\n # print('Quiz.get_label - list_question_int:', list_question_int)\n # print('Quiz.get_label - label:', label)\n return label", "def add_num_label(self, name, value=0, limiter=None, formatter=None, label=None, location=(None,0)):\n widget=widget_label.LVNumLabel(self,value=value,num_limit=limiter,num_format=formatter)\n widget.setObjectName(_fromUtf8(self.name+\"_\"+name))\n return self.add_simple_widget(name,widget,label=label,add_indicator=False,location=location)", "def label_stats(label_mapping):\n\tlabels = list(label_mapping.values())\n\n\tfor count, elem in sorted(((labels.count(e), e) for e in set(labels)), reverse=True):\n\t\tprint('%s: \\t\\t %d' % (elem, count))" ]
[ "0.7585147", "0.6444092", "0.6398636", "0.63696074", "0.6362171", "0.62847257", "0.62568784", "0.62455547", "0.62153375", "0.62082916", "0.6184615", "0.61571884", "0.6150326", "0.614304", "0.61208063", "0.60952497", "0.6082725", "0.60614187", "0.60454804", "0.60439867", "0.5997629", "0.59949285", "0.5989927", "0.5981236", "0.5976755", "0.59498155", "0.594894", "0.5947276", "0.59425116", "0.59258765" ]
0.6841037
1
Initialise bishop, sets color of piece.
def __init__(self, color, *args, **kwargs): # Initialise bishop with correct symbol super().__init__( color, symbol = PieceRepresentation.KING, *args, **kwargs, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, color):\n self.id = Piece.PIECE_ID\n self.crowned = False\n self.color = color\n\n Piece.PIECE_ID += 1", "def __init__(self, color, location):\n\n super().__init__(color, location)\n self._piece_type = 'Ho'", "def __init__(self, color, location):\n\n super().__init__(color, location)\n self._piece_type = 'So'", "def __init__(self, color, location):\n\n super().__init__(color, location)\n self._piece_type = 'Ch'", "def __init__(self, color, location):\n\n super().__init__(color, location)\n self._piece_type = 'Gu'", "def __init__(self, color, location):\n\n super().__init__(color, location)\n self._piece_type = 'Ca'", "def __init__(self, color, location):\n\n super().__init__(color, location)\n self._piece_type = 'El'", "def __init__(self, colour):\n self.colour = colour\n self.board = Board()\n self.pieces = self.assign_pieces()\n self.strategy = Strategy()", "def __init__(self, color, location):\n\n self._color = color\n self._piece_type = None\n self._location = location", "def __init__(self, color, location):\n\n super().__init__(color, location)\n self._piece_type = 'Ge'\n self._in_check = False", "def __init__(self, colour):\n # TODO: Set up state representation.\n\n self.colour = colour\n # Initial layout\n self.layout = {\n \"whites\": [[1, 0, 1], [1, 1, 1], [1, 3, 1], [1, 4, 1], [1, 6, 1], [1, 7, 1],\n [1, 0, 0], [1, 1, 0], [1, 3, 0], [1, 4, 0], [1, 6, 0], [1, 7, 0]],\n \"blacks\": [[1, 0, 7], [1, 1, 7], [1, 3, 7], [1, 4, 7], [1, 6, 7], [1, 7, 7],\n [1, 0, 6], [1, 1, 6], [1, 3, 6], [1, 4, 6], [1, 6, 6], [1, 7, 6]]\n }", "def __init__(self, colour):\n self.colour = colour\n self.bags = []", "def __init__(self):\n self.red = 0\n self.black = 0", "def setUp(self):\r\n self.black = Color('black', (0, 0, 0))\r\n self.red = Color('red', (255, 0, 0))\r\n self.pink = Color('pink', (100, 0, 0))\r\n self.green = Color('green', (0, 255, 0))", "def __init__(self, field, color):\n Figure.__init__(self, field, color)\n self.pot_moves = {(i, -i) for i in range(-7, 0)} \\\n | {(i, -i) for i in range(1, 8)} \\\n | {(i, i) for i in range(-7, 0)} \\\n | {(i, i) for i in range(1, 8)}\n self.value = 3.5 * self.color_value\n self.short_name = self.color[0] + \"B\"", "def __init__(self, color):\n super().__init__(color)", "def __init__(self, color):\n super().__init__(color)", "def __init__(self, color):\n super().__init__(color)", "def __init__(self, color):\n super().__init__(color)", "def __init__(self, color):\n super().__init__(color)", "def __init__(self, color):\n super().__init__(color)", "def __init__(self, color):\n super().__init__(color)", "def setup(self):\n piece_order = ['ROOK','KNIGHT','BISHOP','QUEEN','KING','BISHOP','KNIGHT','ROOK']\n for row,colour in zip([0,7],['BLACK','WHITE']):\n for col,piece in enumerate(piece_order):\n self.board[row][col] = colour + '_' + piece\n \n for row,colour in zip([1,6],['BLACK','WHITE']):\n for i in range(8):\n self.board[row][i] = colour + '_' + 'PAWN'\n \n self.toplay = 'WHITE'", "def __init__(self, colour: Colour) -> None:\n self.colour = colour", "def SetColor(self, rgbtuple):\n if not rgbtuple:\n rgbtuple = wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNFACE).Get()\n col = [c/255.0 for c in rgbtuple]\n self.figure.set_facecolor(col)\n self.figure.set_edgecolor(col)\n self.canvas.SetBackgroundColour(wx.Colour(*rgbtuple))", "def set_piece(self, piece: Piece) -> None:\r\n self.piece = piece \r\n piece.square = self", "def __init__(self, color):\n self._color = color # Color redefined when placed\n # Palace coords\n self._d = ['d1','d2','d3','d8','d9','d10'] \n self._e = ['e1','e2','e3','e8','e9','e10']\n self._f = ['f1','f2','f3','f8','f9','f10']\n self._special = self._d + self._f + self._e\n self._corners = ['d1','f1','e2','d3','f3','d8','d10','f8','f10','e9']", "def setBrick(self,x,y,color):\n assert type(x)==int or type(x)==float\n assert type(y)==int or type(y)==float\n assert color in [colormodel.WHITE,colormodel.LIGHT_GRAY,colormodel.GRAY,\n colormodel.DARK_GRAY,colormodel.BLACK,colormodel.RED,\n colormodel.PINK,colormodel.YELLOW,colormodel.ORANGE,\n colormodel.GREEN,colormodel.BLUE,colormodel.MAGENTA,\n colormodel.CYAN]\n \n return Brick(x,y,color)", "def __init__(self, *args, **kwargs):\n _gdi_.Colour_swiginit(self,_gdi_.new_Colour(*args, **kwargs))", "def set_color(self, color):\n\t\tpass" ]
[ "0.6933153", "0.69328475", "0.6804505", "0.67297816", "0.6722225", "0.66612196", "0.66514975", "0.6557041", "0.6474227", "0.6467087", "0.6432223", "0.64256966", "0.62181336", "0.6193901", "0.6152506", "0.61518973", "0.61518973", "0.61518973", "0.61518973", "0.61518973", "0.61518973", "0.61518973", "0.6137134", "0.60972124", "0.609245", "0.60912526", "0.6088724", "0.6074685", "0.60693425", "0.60676533" ]
0.71482193
0
Take a JSON POST body, add an attribute to it "SeenByFakeDocker", then pass it back as a response.
def render_POST(self, request): jsonPayload = request.content.read() jsonParsed = json.loads(jsonPayload) if "SeenByFakeDocker" in jsonParsed: raise Exception("already seen by a fake docker?!") jsonParsed["SeenByFakeDocker"] = 42 if not self.rawStream: request.setHeader("Content-Type", "application/json") else: request.setHeader("Content-Type", "application/vnd.docker.raw-stream") if self.chunkedResponse: request.setHeader("Transfer-Encoding", "chunked") return json.dumps(jsonParsed)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def json_post(method):\n def wrap(*args, **kwargs):\n # idx is the position of the data\n idx = 0\n if not isinstance(args[0], webob.Request):\n idx = 1\n\n json_data = json.loads(args[idx].body)\n kwargs['post_data'] = json_data\n\n #print \"JP:\", repr(args), repr(kwargs)\n\n return method(*args, **kwargs)\n \n return json_return(wrap)", "def grab_post_json(user):\n\n the_name = \"eapi.user.test.x.{}\".format(int(time.time()))\n the_image_url = \"https://i.imgur.com/uGOD7Wr.jpg\"\n team = Team().choose_random()\n role = Role().choose_random()\n\n return json.dumps(\n {\n \"external_id\": fake.grab_external_id(the_name),\n \"email\": fake.grab_email(the_name),\n \"new_email\": \"EDIT.{}\".format(fake.grab_email(the_name)),\n \"status\": fake.grab_bool(),\n \"type\": fake.grab_user_type(),\n \"role\": {\n \"role_name\": role[\"role_name\"]\n },\n \"info\": {\n \"address\": fake.grab_address_one(),\n \"address_2\": fake.grab_address_two(),\n \"city\": fake.grab_city(),\n \"state\": fake.grab_state_abbr(),\n \"zip_code\": fake.grab_zip(),\n \"first_name\": fake.grab_first_name(),\n \"last_name\": fake.grab_last_name(),\n \"phone_cell\": fake.grab_phone(),\n \"phone_office\": fake.grab_phone(),\n \"phone_fax\": fake.grab_phone(),\n \"job_title\": fake.grab_job(),\n \"cost_center\": fake.grab_building_number(),\n \"website\": fake.grab_url(),\n \"testimonial_url\": fake.grab_url(),\n \"timezone_name\": fake.grab_timezone(),\n \"location_id\": fake.grab_building_number(),\n \"company\": fake.grab_company()\n },\n \"settings_marketing\": {\n \"application_url\": fake.grab_url(),\n \"disclaimer\": fake.grab_phrase(),\n \"license_title\": fake.grab_job(),\n \"social_facebook\": fake.grab_url(),\n \"social_twitter\": fake.grab_url(),\n \"social_google\": fake.grab_url(),\n \"social_linkedin\": fake.grab_url(),\n \"social_youtube\": fake.grab_url(),\n \"weekly_spend_threshold\": fake.grab_percent(),\n \"daily_spend_threshold\": fake.grab_percent() - 1.00,\n \"agent_bio\": fake.grab_phrase(),\n \"short_name\": \"short.{}\".format(fake.grab_username()),\n \"post_close_survey_url\": fake.grab_url(),\n \"wistia_id\": fake.grab_large_int(),\n \"user_quote\": fake.grab_phrase(),\n \"profile_img_url\": the_image_url,\n \"logo_img_url\": the_image_url\n },\n \"expenditure_approver\": {\n \"id\": user[\"id\"]\n },\n \"licenses\": [\n {\n \"license_name\": fake.grab_state_abbr(),\n \"content\": \"license.{}\".format(fake.grab_large_int())\n }\n ],\n \"teams\": fake.grab_one_from_validated_array(team, \"id\")\n }\n )", "def post_response(self, body, **kwargs):\n data = json.loads(body)\n if \"errors\" in data:\n self.handle_error(data)", "def _mock_post(*args, **kwargs):\n # pylint: disable=unused-argument\n payload = kwargs[\"payload\"]\n res = []\n for name in payload[\"usernames\"]:\n res.append({'firstName': name,\n 'lastName': name,\n 'username': name})\n return {'persons': res}", "def grab_post_json():\n\n return json.dumps(\n {\n \"loan_purpose\": \"loanpurp.test.x.{}\".format(int(time.time()))\n }\n )", "def test_typical_post(self):\n self.seed_static_data()\n params = {\n 'event_id': 2,\n 'tag_type': 'RESPONSE',\n 'name': {\n 'en': 'English Tag 2 Event 2',\n 'fr': 'French Tag 2 Event 2',\n },\n 'description': {\n 'en': 'English Tag 2 Event 2 Description',\n 'fr': 'French Tag 2 Event 2 Description',\n }\n }\n response = self.app.post(\n '/api/v1/tag', \n headers=self.user2_headers, \n data=json.dumps(params),\n content_type='application/json')\n self.assertEqual(response.status_code, 201)\n data = json.loads(response.data)\n new_id = data['id']\n\n response = self.app.get('/api/v1/tag', headers=self.user2_headers, data={'id': new_id, 'event_id': 2})\n data = json.loads(response.data)\n\n self.assertEqual(data['id'], new_id)\n self.assertEqual(data['event_id'], 2)\n self.assertEqual(data['tag_type'], 'RESPONSE')\n self.assertDictEqual(data['name'], {\n 'en': 'English Tag 2 Event 2',\n 'fr': 'French Tag 2 Event 2'\n })\n self.assertDictEqual(data['description'], {\n 'en': 'English Tag 2 Event 2 Description',\n 'fr': 'French Tag 2 Event 2 Description'\n })", "def post_algorithm():\n try:\n request_json = request.get_json()\n result = json.dumps([])\n response = app.response_class(\n response=result,\n status=200,\n mimetype='application/json')\n except ValueError as e:\n response = app.response_class(\n status=400,\n response=str(e)\n )\n return response", "def test_mocked_post_json_format(self):\n c = Client()\n response = c.post(\"/apimock/mocked/mocked_post?format=json\")\n self.assertEqual(response.status_code, 200)\n self.assertEqual(\n '{\"value\": \"test_return_value_for_post\"}', response.content)", "def post(self):\n data = request.get_json()\n dbops.post_meta(data)\n return None, 204", "def post_data(logged_in_apiclient):\n _, user = logged_in_apiclient\n\n input_data = {\n \"owner\": user.id,\n \"title\": \"foo title\",\n \"view_lists\": [],\n \"admin_lists\": [],\n }\n return input_data", "def do_post_parse_json(self, *args, **kwargs): # real signature unknown\n pass", "def post_data():\n return json.loads('{\"success\":true, \"message\":\"Data created (but not really)\" }')", "def post(self):\n data = request.json\n return save_new_post(data=data)", "def test_user_can_send_json_body_data(self):\n data = {'user_name': 3435455}\n req = self.httpbin_4.test_requests_put_method(json=data, dry_run=True)\n self.assertEqual(bytes(json.dumps(data), encoding='utf8'), req.prepared_request.body)", "def render_POST(self, request):\n jsonPayload = request.content.read()\n jsonParsed = json.loads(jsonPayload)\n\n if self.explode:\n request.setResponseCode(500)\n return \"sadness for you, today.\"\n\n if jsonParsed[\"Type\"] == \"pre-hook\" and self.pre:\n return self._renderPreHook(request, jsonParsed)\n elif jsonParsed[\"Type\"] == \"post-hook\" and self.post:\n return self._renderPostHook(request, jsonParsed)", "def test_client_can_send_default_json_body_data(self):\n req = self.httpbin_4.test_requests_post_method(dry_run=True)\n req_data = self.httpbin_4.client['test_requests_post_method']['data']\n self.assertEqual(bytes(json.dumps(req_data), encoding='utf8'), req.prepared_request.body)", "def api_post(mocker):\n def _mock_post(event, registrations, **kwargs):\n return CERNAccessRequestState.active, {reg.id: {'$rc': 'test'} for reg in registrations}\n\n mock = mocker.patch('indico_cern_access.plugin.send_adams_post_request', side_effect=_mock_post, autospec=True)\n mocker.patch('indico_cern_access.util.send_adams_post_request', new=mock)\n return mock", "def api_post(mocker):\n def _mock_post(event, registrations, **kwargs):\n return CERNAccessRequestState.active, {reg.id: {'$rc': 'test'} for reg in registrations}\n\n mock = mocker.patch('indico_cern_access.plugin.send_adams_post_request', side_effect=_mock_post, autospec=True)\n mocker.patch('indico_cern_access.util.send_adams_post_request', new=mock)\n return mock", "def mocked_requests_post(*args, **kwargs):\n response = {'message': 'Success!',\n 'data': {}}\n return MockResponse(json.dumps(response), 200, HEADERS)", "def test_dict_for_request_in_method_post(self):\n self.request.POST = {\"foo\": \"bar\"}\n response = self.panel.process_request(self.request)\n self.panel.generate_stats(self.request, response)\n # ensure the panel POST request data is processed correctly.\n content = self.panel.content\n self.assertIn(\"foo\", content)\n self.assertIn(\"bar\", content)", "def test_post(self):\n client = RestClient(host=self.host, username='')\n rest_url = 'some/url/'\n \n # Mock good response\n with responses.RequestsMock() as rsps:\n rsps.add(responses.POST, f'{self.host}/{rest_url}', status=200,\n json={'value':\"good!\"})\n r = client.post(rest_url)", "def testPost(self):\n response = self.runPost(self.root, data=self.post_data)\n self.response_201(response)\n data = json.loads(response.content.decode(\"utf-8\"))\n self.assertIn(\"sodar_uuid\", data)", "def test_pantry_post(client):\n nu = { \"add_food\": \"quinoa\" }\n new_pantry = copy.deepcopy(pantry)\n resp = post(pantry_post, nu)\n new_pantry['pantry'].append(nu[\"add_food\"])\n assert sorted(resp['pantry']) == sorted(new_pantry['pantry'])\n assert resp['status']['css'] == \"has-text-success\"\n\n bad = { \"add_food\": \"fake\" }\n bad_resp = post(pantry_post, bad)\n assert bad_resp['status']['css'] == \"has-text-danger\"\n\n with raises(IndexError):\n post(pantry_post, {})", "def program_post_response_fixture() -> dict[str, Any]:\n return cast(dict[str, Any], json.loads(load_fixture(\"program_post_response.json\")))", "def post(self):\n json_body = self.request.body\n if not json_body:\n # TODO(davidbyttow): Log error?\n return\n\n json_body = unicode(json_body, 'utf8')\n logging.info('Incoming: ' + json_body)\n\n context, events = robot_abstract.ParseJSONBody(json_body)\n for event in events:\n try:\n self._robot.HandleEvent(event, context)\n except:\n logging.error(traceback.format_exc())\n\n json_response = robot_abstract.SerializeContext(context,\n self._robot.version)\n logging.info('Outgoing: ' + json_response)\n\n # Build the response.\n self.response.headers['Content-Type'] = 'application/json; charset=utf-8'\n self.response.out.write(json_response.encode('utf-8'))", "def post(self, **kwargs):\n return self.client.post(\n self.url(), data=json.dumps(kwargs),\n content_type='application/json')", "def on_post(self, req, resp):\n self._set_default_response(resp)\n\n string = req.media.get('string')\n if string is None:\n resp.body = \"\"\"{ \"result\": \"error: request payload must contain a JSON object with a 'string' attribute\" }\"\"\"\n else:\n resp.status = falcon.HTTP_200\n result = \"pass\" if self._validate(string) else \"fail\"\n resp.body = json.dumps({'result': result})", "def update_animal():\n\n animal_uuid = request.args.get(\"uuid\", default=None, type=str)\n animal = json.loads(rd.get(animal_uuid))\n\n new_animal_body = request.args.get(\"body\", default=None, type=str)\n if new_animal_body is not None:\n animal[\"body\"] = new_animal_body\n\n new_animal_arms = request.args.get(\"arms\", default=None, type=int)\n if new_animal_body is not None:\n animal[\"arms\"] = new_animal_arms\n\n new_animal_legs = request.args.get(\"legs\", default=None, type=int)\n if new_animal_legs is not None:\n animal[\"legs\"] = new_animal_legs\n\n new_animal_tails = request.args.get(\"tails\", default=None, type=int)\n if new_animal_tails is not None:\n animal[\"tails\"] = new_animal_tails\n\n rd.set(animal_uuid, json.dumps(animal))\n return animal", "def post(self, **kwargs):\n data = request.json\n return save_new_writer(data=data)", "def api_post(self, *args, **kwargs):\n return self.api_post_with_response(*args, **kwargs)[0]" ]
[ "0.579965", "0.5641818", "0.5623903", "0.54964966", "0.5440844", "0.53810656", "0.5376849", "0.52939844", "0.52551764", "0.5233824", "0.52013814", "0.51728314", "0.517167", "0.5169562", "0.51675385", "0.51555437", "0.5133879", "0.5133879", "0.5101325", "0.50688505", "0.50259495", "0.5004965", "0.49997815", "0.49938968", "0.49897686", "0.49637058", "0.4951858", "0.49493968", "0.49383554", "0.49381655" ]
0.74861807
0
OK, we got to the meat of it. This render handles JSON POST requests. If pre is set, it succeeds on JSON which looks like.
def render_POST(self, request): jsonPayload = request.content.read() jsonParsed = json.loads(jsonPayload) if self.explode: request.setResponseCode(500) return "sadness for you, today." if jsonParsed["Type"] == "pre-hook" and self.pre: return self._renderPreHook(request, jsonParsed) elif jsonParsed["Type"] == "post-hook" and self.post: return self._renderPostHook(request, jsonParsed)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def render_POST(self, request):\n jsonPayload = request.content.read()\n jsonParsed = json.loads(jsonPayload)\n if \"SeenByFakeDocker\" in jsonParsed:\n raise Exception(\"already seen by a fake docker?!\")\n jsonParsed[\"SeenByFakeDocker\"] = 42\n if not self.rawStream:\n request.setHeader(\"Content-Type\", \"application/json\")\n else:\n request.setHeader(\"Content-Type\", \"application/vnd.docker.raw-stream\")\n if self.chunkedResponse:\n request.setHeader(\"Transfer-Encoding\", \"chunked\")\n return json.dumps(jsonParsed)", "def process_request(self, request):\n\n # Does the request contain a JSON payload?\n content_type = request.META.get('CONTENT_TYPE', '')\n if content_type != '' and 'application/json' in content_type:\n\n # Ignore empty payloads (e.g. for deletes)\n content_length = 0\n if request.META.get('CONTENT_LENGTH', '') != '':\n content_length = int(request.META.get('CONTENT_LENGTH', 0))\n if content_length > 0:\n try:\n # Replace request.POST with flattened dictionary from JSON\n decoded_dict = simplejson.loads(request.raw_post_data)\n request.POST = request.POST.copy()\n request.POST = self._flatten_dict(decoded_dict)\n except:\n return HttpResponse('Invalid JSON', status=400)", "def json_post(method):\n def wrap(*args, **kwargs):\n # idx is the position of the data\n idx = 0\n if not isinstance(args[0], webob.Request):\n idx = 1\n\n json_data = json.loads(args[idx].body)\n kwargs['post_data'] = json_data\n\n #print \"JP:\", repr(args), repr(kwargs)\n\n return method(*args, **kwargs)\n \n return json_return(wrap)", "def render_POST(self, request):", "def manual_json_post():\n form = JSONForm()\n is_dict = None\n dict_error = None\n if request.method == 'POST':\n parsed_dict = json_post_to_dict(form)\n\n if parsed_dict is None:\n dict_error = \"JSON message was improperly formatted.\"\n is_dict = False\n current_app.logger.warning('JSON Form Message '\n 'Exception: %s', dict_error)\n return render_template('json_post.html',\n form=form,\n is_dict=is_dict,\n error=dict_error)\n\n json_post = Machine.flatten(parsed_dict)\n flattened_accepted_json = Machine.flatten(ACCEPTED_JSON)\n data = Machine.from_json(json_post)\n to_json_data = data.to_json()\n\n if not Machine.is_valid_datetime(json_post):\n dict_error = (\"Missing datetime or Datetime is not \"\n \"in the correct format.\")\n is_dict = False\n current_app.logger.warning('JSON Form Message '\n 'Exception: %s', dict_error)\n return render_template('json_post.html',\n form=form,\n is_dict=is_dict,\n error=dict_error)\n\n missing_data, invalid_sensors = Machine.invalid_data(\n json_post, flattened_accepted_json)\n\n if len(missing_data) > 0:\n dict_error = (\"Missing data from sensors. \"\n \"A sensor may have been removed from the network. \"\n \"Missing data: \" + str(missing_data))\n is_dict = False\n current_app.logger.warning('JSON Form Message '\n 'Exception: %s', dict_error)\n return render_template('json_post.html',\n form=form,\n is_dict=is_dict,\n error=dict_error)\n\n if len(invalid_sensors) > 0:\n dict_error = (\"Invalid or extra sensors. \"\n \"A sensor may have been added to the network. \"\n \"Invalid: \" + str(invalid_sensors))\n is_dict = False\n current_app.logger.warning('JSON Form Message '\n 'Exception: %s', dict_error)\n return render_template('json_post.html',\n form=form,\n is_dict=is_dict,\n error=dict_error)\n\n \"\"\"\n Set datetime key in cache if it doesn't already exist.\n Try to commit it to the database if it wasn't in cache already.\n \"\"\"\n try:\n if cache.get(json_post['datetime']) is None:\n cache.set(json_post['datetime'], to_json_data,\n timeout=current_app.config['REDIS_CACHE_TIMEOUT'])\n try:\n db.session.add(data)\n db.session.commit()\n except sqlalchemy.exc.IntegrityError:\n is_dict = False\n dict_error = (\n \"There was a unique constraint error,\"\n \" this datetime already appears in the database.\")\n return render_template('json_post.html',\n form=form,\n is_dict=is_dict,\n error=dict_error)\n else:\n is_dict = False\n dict_error = (\"The datetime already appears in the cache.\"\n \" There was a unique constraint error.\")\n return render_template('json_post.html',\n form=form,\n is_dict=is_dict,\n error=dict_error)\n except RedisError as e:\n print(e)\n print('Redis port may be closed, the redis server does '\n 'not appear to be running.')\n is_dict = False\n dict_error = (\"Redis port may be closed.\")\n return render_template('json_post.html',\n form=form,\n is_dict=is_dict,\n error=dict_error)\n\n dict_error = None\n is_dict = True\n return render_template('json_post.html',\n form=form,\n is_dict=is_dict,\n error=dict_error)\n\n return render_template('json_post.html',\n form=form,\n is_dict=is_dict,\n error=dict_error)", "def view_post():\n\n return jsonify(\n get_dict(\"url\", \"args\", \"form\", \"data\", \"origin\", \"headers\", \"files\", \"json\")\n )", "def test_renderer_works_correctly_with_application_json(self):\n rendered = self.renderer.render(\n data=self.data,\n media_type=\"application/json\",\n renderer_context=None,\n )\n reloaded = orjson.loads(rendered)\n\n self.assertEqual(reloaded, self.data)", "def render_json(object):\r\n return HttpResponse(jsonify(object), content_type='application/json')", "def render(self, renderParam):\n if 'initial_data' not in renderParam:\n renderParam['initial_data'] = json.dumps({});\n return pystache.render(self.template, renderParam)", "def post_algorithm():\n try:\n request_json = request.get_json()\n result = json.dumps([])\n response = app.response_class(\n response=result,\n status=200,\n mimetype='application/json')\n except ValueError as e:\n response = app.response_class(\n status=400,\n response=str(e)\n )\n return response", "def prepare(self):\n if self.request.body:\n if self.request.headers[\"Content-Type\"] and self.request.headers[\"Content-Type\"].startswith(\"application/json\") and self.request.body:\n self.json_body = self.request.body\n else:\n self.json_body = None", "def do_post_parse_json(self, *args, **kwargs): # real signature unknown\n pass", "def preprocessRequest(self, route):\n request.jsonData = None\n\n if not request.headers.get(\"Content-Type\", \"\").startswith(\"application/json\"):\n # there is no JSON posted, so we can return\n self.logger.debug(\"No JSON to decode; finished\")\n return\n\n # JSON is expected, so ensure it is either already parsed by bottle, or parse it ourselves\n if hasattr(request, \"json\") and request.json is not None:\n # It is already parsed, so there's nothing to do\n self.logger.debug(\"JSON data already parsed by bottle\")\n request.jsonData = request.json\n return\n\n self.logger.debug(\"Attempting to parse JSON from request.body since request.json is missing/None\")\n # ensure some data was actually POSTed\n if hasattr(request, \"body\") and request.body:\n try:\n # TODO: set encoding based on request header\n request.jsonData = json.load(request.body)\n self.logger.debug(\"Decoded JSON successfully\")\n except Exception, e:\n self.logger.warn(\"Request header Content-Type indicates JSON, and we failed to parse request.body: %s\" % e)\n request.body.seek(0)\n self.logger.warn(\"Request body (first 32bytes)=%s\" % repr(request.body.read(32)))\n else:\n self.logger.warn(\"Request header Content-Type indicates JSON, but no data was POSTed?\")", "def json(data):\n if isinstance(data, dict):\n data = ujson.encode(data)\n uid = str(uuid.uuid4())\n display(HTML('<div id=\"{0}\" style=\"height: 600px; width:100%;\"></div>'.format(uid)))\n display(Javascript(\"\"\"\n require([\"https://rawgit.com/caldwell/renderjson/master/renderjson.js\"], function() {\n document.getElementById('%s').appendChild(renderjson(%s))\n });\n \"\"\" % (uid, data)))", "def render_to_response(self, context):\n if self.get_is_ajax():\n return self.get_json_response(self.get_json_content_prefix() + self.convert_context_to_json(context))\n return super(AjaxJsonResponseMixin, self).render_to_response(context)", "def render_POST(self, request):\n # Make sure simple_auth_key is a good key\n auth_key = request.args[\"simple_auth_key\"][0]\n if auth_key != \"abc\":\n defer.returnValue(str(webapi.ValueError(request, \n \"simple_auth_key\",\n \"Key isn't valid!\")))\n \n # Test latency to request example.com\n start_time = time.time()\n web_agent = Agent(reactor)\n resp = yield web_agent.request(\"GET\", \"http://example.com\")\n end_time = time.time()\n \n # new_client is an optional parameter,\n # so set a default value if it isn't present\n # in the JSON arguments\n new_client = False\n if request.jsonArgs.has_key(\"new_client\"):\n new_client = request.jsonArgs[\"new_client\"]\n \n # Return a JSON dictionary as the API call result.\n return_dict = {\"result\" : {\"latency\" : end_time-start_time,\n \"client_tz\" : request.jsonArgs[\"client_tz\"],\n \"client_id\" : request.jsonArgs[\"client_id\"],\n \"new_client\" : request.jsonArgs[\"new_client\"]}}\n \n defer.returnValue(json.dumps(return_dict))", "def render_json(template, data):\n result = render(template, data)\n result = _remove_ctl_chars(result)\n return _convert_to_json(result)", "def render_json(self, obj):\n self.response.content_type = \"application/json\"\n self.response.out.write(json.encode(obj))", "def patch_json(self, data):\n def hook(response):\n response.patch_json(data)\n return self.dynamic_hook(hook)", "def test_renderer_works_with_provided_default_is_none(self):\n\n data = {\"value\": \"test\"}\n rendered = self.renderer.render(\n data=data,\n media_type=\"application/json\",\n renderer_context={\"default_function\": None},\n )\n reloaded = orjson.loads(rendered)\n\n self.assertEqual(reloaded, dict(data))", "def prepare(self):\n if 'Content-Type' in self.request.headers:\n content_type = self.request.headers['Content-Type']\n if content_type == \"application/json\":\n print 'json data'\n data = self.request.body\n try:\n json_data = json_decode(data)\n except ValueError:\n raise tornado.httpserver._BadRequestException(\n \"Invalid JSON structure.\"\n )\n if type(json_data) != dict:\n raise tornado.httpserver._BadRequestException(\n \"We only accept key value objects!\"\n )\n for key, value in json_data.iteritems():\n self.request.arguments[key] = [unicode(value),]", "def _postproc(self, request):\n if request.status_code != 200: raise Exception('wrong error code: {0}'.format(request.status_code))\n data = request.json()\n self.data = self._finalize_data(data)", "def postprocessRequest(self, retval, route):\n JSONed = False\n GZIPPED = False\n\n if retval is None:\n self.logger.warn(\"retval is None!\")\n return retval\n\n # Is this request under the a path we're enforcing JSON output for?\n if (route is not None and hasattr(route, 'rule') and route.rule.startswith(self.baseRulePath)) or response.status_code >= 400:\n # It is. Try to serialize the returned data as JSON\n self.logger.debug(\"response should be JSON\")\n\n # First, is the data even something we can serialize as JSON?\n # if the retval is not a dict, we don't know what to do with it, so just be transparent\n if type(retval) not in (dict, list):\n self.logger.error(\"\\033[41;1m You are trying to send the client data that doesn't look like it should be JSON (%s). Fix this! \\033[0m\" % type(retval))\n # TODO: consider raising an exception so as to generate a server error (500), forcing the app developer\n # to confront why/how they are sending back something that doesn't make much sense serializing as JSON\n else:\n # Was the \"pretty\" query parameter set?\n if request.query.get(\"pretty\") == 'true':\n # It was. Indent & sort keys\n self.logger.debug(\"found pretty query param, value is true, prettying JSON\")\n retval = json.dumps(retval, indent=4, sort_keys=True)\n else:\n # It was not. By default, we'll use the most compact representation\n retval = json.dumps(retval, separators=(',', ':'))\n response.content_type = \"application/json\"\n self.logger.debug(\"%d bytes of JSON created\" % len(retval))\n JSONed = True\n else:\n self.logger.debug(\"response should NOT be JSON\")\n\n # Gzipping the response\n # Can the client even handle gzipped response bodies?\n httpRespObj = None\n if isinstance(retval, bottle.HTTPResponse):\n # we'll keep the HTTPResponse so we can update it after gzipping.\n self.logger.debug(\"Found HTTPResponse instance\")\n httpRespObj = retval\n if type(retval.body) in (str, unicode):\n retval = retval.body\n elif hasattr(retval.body, \"read\"):\n retval = retval.body.read()\n else:\n self.logger.error(\"HTTPResponse.body attr is not a str and does not have a read() method!\")\n raise ValueError(\"HTTPResponse.body is not sane: attr is not a str, and is not a file-like object\")\n\n elif isinstance(retval, bottle.HTTPError):\n self.logger.debug(\"Found HTTPError instance\")\n httpRespObj = retval\n if type(retval.body) in (str, unicode):\n retval = retval.body\n elif hasattr(retval.body, \"read\"):\n retval = retval.body.read()\n else:\n self.logger.error(\"HTTPError.body attr is not a str and does not have a read() method!\")\n raise ValueError(\"HTTPError.body is not sane: attr is not a str, and is not a file-like object\")\n\n if 'gzip' in request.headers.get(\"Accept-Encoding\", \"\") and len(retval) > 0:\n self.logger.debug(\"client accepts gzip, gzipping data\")\n # the client handle gzipped data, so lets gzip out data\n self.logger.debug(\"original response data was %d bytes\" % len(retval))\n sio = StringIO.StringIO()\n gzFile = gzip.GzipFile(fileobj=sio, mode='wb', compresslevel=6)\n gzFile.write(retval)\n gzFile.close()\n sio.seek(0)\n retval = sio.read()\n sio.close()\n self.logger.debug(\"new gzipped response data is %d bytes\" % len(retval))\n GZIPPED = True\n\n # Were we given an HTTPResponse isntance? If so, we need to update it a bit\n if httpRespObj:\n self.logger.debug(\"Updating HTTPResponse instance with gzipped content, headers\")\n httpRespObj.body = retval\n httpRespObj['Content-Length'] = str(len(retval))\n httpRespObj['Content-Encoding'] = 'gzip'\n else:\n # update the content-length (it is already set) and add the content-encoding header\n response.set_header('Content-Length', str(len(retval)))\n response.set_header('Content-Encoding', 'gzip')\n else:\n self.logger.debug(\"client either doesn't accept gzip or there's no data to return; len(retval)=%d\" % len(retval))\n\n self.logger.info(\"RESPONSE %s gzipped:%s json:%s size:%dB\" % (response.status_code, GZIPPED, JSONed, len(retval)))\n if httpRespObj:\n return httpRespObj\n return retval", "def render_to_response(self, context, **response_kwargs):\n if 'json' in self.request.GET.get('format', ''):\n s = serializers.serialize('json', context.get('photos'))\n return HttpResponse(s, content_type=\"application/json\")\n\n # Business as usual otherwise\n else:\n return super(PropertyPhotosView, self).render_to_response(context, **response_kwargs)", "def _create(self, request, *args, **kwargs):\n app = kwargs['app']\n\n data_form = PreviewJSONForm(request.data)\n if not data_form.is_valid():\n return Response(data_form.errors, status=HTTP_400_BAD_REQUEST)\n\n form = PreviewForm(data_form.cleaned_data)\n if not form.is_valid():\n return Response(data_form.errors, status=HTTP_400_BAD_REQUEST)\n\n form.save(app)\n log.info('Preview created: %s' % form.instance)\n serializer = self.get_serializer(form.instance)\n return Response(serializer.data, status=HTTP_201_CREATED)", "def render_to_json_response(self, data: Optional[Dict] = {}, meta: Optional[Dict] = {},\n error: Optional[str] = '', status=HTTPStatus.OK, **response_kwargs):\n response_data = {\"body\": data, \"meta\": meta, \"error\": error}\n return JsonResponse(response_data, status=status, **response_kwargs)", "def process_request(request):\n initial_data = request.POST[\"initial_data\"]\n if re.search(\"datetime.date\\\\((.*?)\\\\)\", initial_data):\n date_val = re.findall(\"datetime.date\\\\((.*?)\\\\)\", initial_data)\n for date in date_val:\n dates = list(map(int, date.split(\", \")))\n initial_data = re.sub(\"datetime.date\\\\((.*?)\\\\)\",\n \"'\" + datetime.date(dates[0], dates[1], dates[2]).strftime(\"%d %B, %Y\") + \"'\",\n initial_data, 1)\n initial_data = json.loads(initial_data.replace(\"'\", \"\\\"\"))\n\n old_data = json.loads(request.POST[\"prev_data\"].replace(\"'\", \"\\\"\")) if \"prev_data\" in request.POST else None\n data = []\n try:\n if old_data is not None:\n data += old_data\n\n data.append({\n \"short_description\": request.POST[\"short_description\"],\n \"particulars\": request.POST[\"particulars\"],\n \"quantity\": request.POST[\"quantity\"],\n \"unit\": request.POST[\"unit\"],\n \"unit_price\": request.POST[\"unit_price\"],\n \"total_cost\": str(float(request.POST[\"quantity\"]) * float(request.POST[\"unit_price\"]))\n })\n except MultiValueDictKeyError:\n data = old_data\n\n return initial_data, data", "def view_patch():\n\n return jsonify(\n get_dict(\"url\", \"args\", \"form\", \"data\", \"origin\", \"headers\", \"files\", \"json\")\n )", "def test_json_renderer(dummy_request: DummyRequest) -> None:\n tag = Tag(name=\"foö\")\n\n renderer = json_renderer()\n output = renderer(None)(tag, {})\n\n assert json.loads(output) == \"foö\"", "def postrender(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"postrender\")" ]
[ "0.651392", "0.5857324", "0.585235", "0.58278984", "0.58079636", "0.57987946", "0.56987077", "0.5682887", "0.566622", "0.56309", "0.5614444", "0.55940974", "0.55289334", "0.5502075", "0.5488988", "0.54605556", "0.54228806", "0.5408678", "0.5402406", "0.539539", "0.53892547", "0.5333411", "0.53330505", "0.53101593", "0.5301314", "0.5294568", "0.52908283", "0.5288689", "0.5245661", "0.52372265" ]
0.6845635
0
Remove a guild's configuration from the database.
async def clear_config(self, ctx, guild_id: int): try: result = await db.Config.filter(guild_id=guild_id).delete() except Exception as e: result = f"{e.__class__.__name__}: {e}" db.config_cache.pop(guild_id) await ctx.send(f"```py\n{result}\n```")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def remove_guild(self, guild_id):\n\n await self.db[str(guild_id)].drop()", "async def on_guild_remove(self, guild):\n self.jfile.data.pop(str(guild.id), None)\n self.jfile.save\n log.guildinfo(\"Removed\", guild.id)", "async def on_guild_remove(self, guild):\n\t\tself.leaderboards.pop(str(guild.id))\n\t\tawait self.update_state()", "async def on_guild_remove(self, guild: discord.Guild):\n with open(\"./config/prefixes.json\", \"r\") as f:\n prefixes = json.load(f)\n\n prefixes.pop(str(guild.id))\n\n with open(\"./config/prefixes.json\", \"w\") as f:\n json.dump(prefixes, f, indent=4)", "def remove_config(name):\n db = dbm.open(config_file, 'c')\n del db[name]\n db.close()", "async def on_guild_remove(self, guild: discord.Guild) -> None:\n logger.info(f'Removed from guild: {guild.name} ({guild.id})')\n\n with self.get_session() as session:\n # Get the associated Guild and mark it as disabled.\n _guild: Guild = session.query(Guild).filter_by(active=True, id=guild.id).first()\n _guild.active = False\n\n # Shut down any current running Period objects if possible.\n period: Period = _guild.current_period\n if period is not None and period.active:\n period.deactivate()", "def remove():\n\n db_remove()", "async def remove_roles(guild):\r\n Rules = Query()\r\n db.remove(Rules.guild == guild.id)\r\n del RULES[guild.id]", "def remove_pgsql_conf(self):\n self.kv.unset(\"pgsql_host\")\n self.kv.unset(\"pgsql_port\")\n self.kv.unset(\"pgsql_db\")\n self.kv.unset(\"pgsql_user\")\n self.kv.unset(\"pgsql_pass\")\n self.kv.flush()", "async def on_guild_remove(guild):\r\n logging.info(\"Left guild %d\", guild.id)\r\n await remove_roles(guild)", "async def websocket_lovelace_delete_config(\n hass: HomeAssistant,\n connection: websocket_api.ActiveConnection,\n msg: dict[str, Any],\n config: LovelaceStorage,\n) -> None:\n await config.async_delete()", "def reset_server_configuration_fixture():\n config_instance = Configuration()\n Path(config_instance.server_config_dir, 'test-gigantum-com.json').unlink()\n Path(config_instance.server_config_dir, 'CURRENT').unlink()\n config_instance._get_redis_client().delete(config_instance.SERVER_CONFIG_CACHE_KEY,\n config_instance.AUTH_CONFIG_CACHE_KEY)\n Path(config_instance.server_data_dir, 'test-gigantum-com').rmdir()", "async def erase(self, guild: discord.Guild):\n role = await self.get_role(guild=guild)\n if role:\n await role.delete()", "def remove(self, db_name):\n path = self.get_path(db_name)\n os.remove(path)", "def clear(self):\r\n del self.__config\r\n self.__config = {}\r\n self.save()", "async def disable(self, ctx: Context, guild: int = None):\n\n if guild is None:\n guild = ctx.guild\n else:\n guild = self.bot.get_guild(guild)\n\n if not guild:\n return await ctx.message.add_reaction(\"⚠\")\n\n if guild.id not in self.active_guilds:\n return await ctx.message.add_reaction(\"⚠\")\n\n self._config_cache.pop(guild.id)\n self.config.delete(f\"guilds:{guild.id}\")\n\n await ctx.message.add_reaction(\"✅\")", "async def save_guild_config(bot, config: GuildConfig):\n bot.db_cache[config.guild_id] = config\n await engine.save(config)", "def db_remove():\n\n db.session.close()\n db.drop_all()\n\n path = current_app.config['SNER_VAR']\n for file_object in os.listdir(path):\n file_object_path = os.path.join(path, file_object)\n if os.path.isdir(file_object_path):\n shutil.rmtree(file_object_path)\n else:\n os.unlink(file_object_path)", "def delete_db(self):\n import os.path\n os.remove(self.filepath)", "def remove_stored_config(self):\n stored_config_filename = self.stored_config_filename\n if stored_config_filename.exists():\n stored_config_filename.remove()\n self._stored_cmake_generator = self._stored_config.cmake_generator", "def delete_mute_rule(mute_config_name: str) -> None:\n from google.cloud import securitycenter\n\n client = securitycenter.SecurityCenterClient()\n\n request = securitycenter.DeleteMuteConfigRequest()\n request.name = mute_config_name\n\n client.delete_mute_config(request)\n print(f\"Mute rule deleted successfully: {mute_config_name}\")", "def pytest_unconfigure(config):\n db = Graph(DEFAULT_DB)\n db.delete_all()", "def unconfigure_set(cluster, cursor, name, configuration):\n logger.info('Unconfiguring replication set on %s...', cursor.connection.dsn)\n\n # Drop the transaction queue if it exists.\n logger.info('Dropping transaction queue...')\n cursor.execute(\"SELECT pgq.drop_queue(%s)\", (cluster.get_queue_name(name),))\n\n for table in configuration.tables:\n drop_trigger(cluster, cursor, name, table.schema, table.name)", "def delete_db():\n db.drop_all()", "def delete(self):\r\n return self.connection.delete_launch_configuration(self.name)", "async def remove_bot_channels(self, guild_id):\n api_cog = self.bot.get_cog('RR_API')\n channelInfo = await api_cog.get_channel_info(guild_id)\n\n if not channelInfo:\n print(\"Server Name Not in DB, Can't delete channels. Server: \" + str(guild_id))\n return\n if channelInfo['futurechannelid']:\n await self.bot.get_channel(int(channelInfo['futurechannelid'])).delete()\n if channelInfo['pastchannelid']:\n await self.bot.get_channel(int(channelInfo['pastchannelid'])).delete()\n if channelInfo['lootchannelid']:\n await self.bot.get_channel(int(channelInfo['lootchannelid'])).delete()\n if channelInfo['commandschannelid']:\n await self.bot.get_channel(int(channelInfo['commandschannelid'])).delete()\n if channelInfo['categoryid']:\n await self.bot.get_channel(int(channelInfo['categoryid'])).delete()", "def del_conf(self, path):\n\t\tself.monitor.removePath(path)\n\t\tself.cache.pop(path, None)", "def drop_db(self):\n db_name = self.db.db_url.split('///')[1]\n if os.path.exists(db_name):\n os.remove(db_name)", "def delete():\n\n from slicr.extensions import db\n\n click.echo('deleting database...')\n\n db.drop_all()", "async def deregister_role(self, ctx, role_title: str, guild_id: int = None):\n\n if guild_id is None:\n guild = ctx.guild\n guild_id = guild.id\n else:\n guild = self.bot.get_guild(guild_id)\n if guild is None:\n await ctx.send(\"Couldn't find the guild provided.\")\n return\n\n role_key = \"guild:{}:roles:roles:{}\".format(guild_id, role_title.lower())\n if self.config.exists(role_key):\n self.config.remove(role_key)\n self.config.hdel(\"guild:{}:roles:all:names\".format(guild_id), role_title)\n await ctx.send(\"Role '{}' was deregistered.\".format(role_title))\n else:\n await ctx.send(\"The given role doesn't exist in the specified guild.\")" ]
[ "0.68177557", "0.6752706", "0.66133314", "0.6459288", "0.6397447", "0.63166744", "0.6134505", "0.59588444", "0.5935352", "0.5916336", "0.5829168", "0.5798249", "0.57827187", "0.57659113", "0.5725611", "0.5722255", "0.57106936", "0.5624269", "0.5620863", "0.5614595", "0.561379", "0.56124353", "0.55737627", "0.5550091", "0.553122", "0.55285424", "0.5515787", "0.5481422", "0.54646903", "0.5448416" ]
0.69693434
0
Completely remove a guild's modlog data from the database.
async def clear_modlog(self, ctx, guild_id: int): # remove infractions try: result_1 = await db.Infraction.filter(guild_id=guild_id).delete() except Exception as e: result_1 = f"{e.__class__.__name__}: {e}" # remove user history try: result_2 = await db.History.filter(guild_id=guild_id).delete() except Exception as e: result_2 = f"{e.__class__.__name__}: {e}" try: misc = await db.MiscData.get_or_none(guild_id=guild_id) if misc: if guild_id in db.last_case_id_cache: db.last_case_id_cache.pop(guild_id) misc.last_case_id = 0 await misc.save() result_3 = True else: result_3 = False except Exception as e: result_3 = f"{e.__class__.__name__}: {e}" await ctx.send( f"Infractions:```py\n{result_1}\n```\n" f"History:```py\n{result_2}\n```\n" f"Misc (last_case_id):```py\n{result_3}\n```\n" )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def on_guild_remove(self, guild):\n self.jfile.data.pop(str(guild.id), None)\n self.jfile.save\n log.guildinfo(\"Removed\", guild.id)", "async def logremove(self, ctx):\n if await check_if_logged(channel_id=ctx.channel.id):\n c.execute(\"DELETE FROM logging.channels WHERE channelid = %s\", (ctx.channel.id,))\n DBconn.commit()\n await ctx.send(\"> **This channel is no longer being logged.**\")\n else:\n await ctx.send(f\"> **This channel is not being logged.**\")", "async def on_guild_remove(self, guild):\n\t\tself.leaderboards.pop(str(guild.id))\n\t\tawait self.update_state()", "async def remove_guild(self, guild_id):\n\n await self.db[str(guild_id)].drop()", "def remove():\n\n db_remove()", "async def _delete_log(self, ctx):\n try:\n config = self.bot.db['questions'][str(ctx.guild.id)][str(ctx.channel.id)]\n except KeyError:\n return\n\n log_channel = ctx.guild.get_channel(config['log_channel'])\n if not log_channel:\n await hf.safe_send(ctx, \"The original log channel was not found. Please run `;q setup`.\")\n return\n try:\n last_message = None\n async for msg in log_channel.history(limit=5).filter(lambda m: m.author == m.guild.me and m.embeds):\n last_message = msg\n break\n if last_message.embeds[0].title.startswith('⁣List⁣'):\n try:\n await last_message.delete() # replace the last message in the channel (it should be a log)\n except discord.NotFound:\n pass\n except (TypeError, AttributeError, discord.Forbidden):\n return", "async def on_guild_remove(guild):\r\n logging.info(\"Left guild %d\", guild.id)\r\n await remove_roles(guild)", "async def on_guild_remove(self, guild: discord.Guild) -> None:\n logger.info(f'Removed from guild: {guild.name} ({guild.id})')\n\n with self.get_session() as session:\n # Get the associated Guild and mark it as disabled.\n _guild: Guild = session.query(Guild).filter_by(active=True, id=guild.id).first()\n _guild.active = False\n\n # Shut down any current running Period objects if possible.\n period: Period = _guild.current_period\n if period is not None and period.active:\n period.deactivate()", "async def on_member_remove(self, member: discord.Member) -> None:\n\n # retrieve logging information\n if logging_info := (await typed_retrieve_query(\n self.bot.database,\n PartialLoggingAction,\n 'SELECT CHANNEL_ID, BITS FROM LOGGING WHERE GUILD_ID=?',\n (member.guild.id,))\n ):\n await log_to_channel(\n self.bot,\n LoggingActions.USER_LEFT,\n logging_info[0].bits,\n logging_info[0].channel_id,\n f'**{str(member)}** left the guild.'\n )", "async def remove_roles(guild):\r\n Rules = Query()\r\n db.remove(Rules.guild == guild.id)\r\n del RULES[guild.id]", "def test_log_remove_admin(self):\n existing_admin = UserFactory.create()\n admins_relation = AdminsFactory.create(\n project=self.project,\n user=existing_admin)\n log_count_init = LoggerHistory.objects.count()\n admins_relation.delete()\n\n log = LoggerHistory.objects.last()\n log_count = LoggerHistory.objects.count()\n\n self.assertNotEqual(log.user, {\n 'id': str(self.user.id),\n 'display_name': self.user.display_name})\n self.assertEqual(log.project, {\n 'id': str(self.project.id),\n 'name': self.project.name})\n self.assertEqual(log.usergroup, None)\n self.assertEqual(log.category, None)\n self.assertEqual(log.field, None)\n self.assertEqual(log.location, None)\n self.assertEqual(log.observation, None)\n self.assertEqual(log.comment, None)\n self.assertEqual(log.subset, None)\n self.assertEqual(log.action, {\n 'id': 'deleted',\n 'class': 'Admins',\n 'user_id': str(existing_admin.id),\n 'user_display_name': existing_admin.display_name})\n self.assertEqual(log_count, log_count_init + 1)\n self.assertEqual(log.historical, None)", "async def remove_bot_channels(self, guild_id):\n api_cog = self.bot.get_cog('RR_API')\n channelInfo = await api_cog.get_channel_info(guild_id)\n\n if not channelInfo:\n print(\"Server Name Not in DB, Can't delete channels. Server: \" + str(guild_id))\n return\n if channelInfo['futurechannelid']:\n await self.bot.get_channel(int(channelInfo['futurechannelid'])).delete()\n if channelInfo['pastchannelid']:\n await self.bot.get_channel(int(channelInfo['pastchannelid'])).delete()\n if channelInfo['lootchannelid']:\n await self.bot.get_channel(int(channelInfo['lootchannelid'])).delete()\n if channelInfo['commandschannelid']:\n await self.bot.get_channel(int(channelInfo['commandschannelid'])).delete()\n if channelInfo['categoryid']:\n await self.bot.get_channel(int(channelInfo['categoryid'])).delete()", "def purgeLogs( self ):\n cmd = \"DELETE FROM `ProxyDB_Log` WHERE TIMESTAMPDIFF( SECOND, UTC_TIMESTAMP(), ExpirationTime ) > 15552000\"\n return self._update( cmd )", "def delete_exchange_log_in_db_log():\n log_message = (u\"Loeschen der Exchangelogs von vorgestern\")\n db.write_log_to_db(ac, log_message, \"e\")\n date_log_back = (datetime.datetime.now()\n + datetime.timedelta(days=- 2))\n c_date_log_back = date_log_back.strftime(\"%Y-%m-%d %H:%M\")\n\n sql_command = (\"DELETE FROM EXCHANGE_LOGS WHERE EX_LOG_TIME < '\"\n + c_date_log_back + \"'\")\n\n delete_ok = db.delete_logs_in_db_log(ac, sql_command, log_message)\n if delete_ok is None:\n db.write_log_to_db_a(ac, ac.app_errorslist[5],\n \"x\", \"write_also_to_console\")\n return", "async def on_guild_remove(self, guild: discord.Guild):\n with open(\"./config/prefixes.json\", \"r\") as f:\n prefixes = json.load(f)\n\n prefixes.pop(str(guild.id))\n\n with open(\"./config/prefixes.json\", \"w\") as f:\n json.dump(prefixes, f, indent=4)", "def remove_server(self, server: discord.Server):\n to_exec = \"DELETE FROM server WHERE server_id = %s\"\n self.__cursor.execute(to_exec, (str(server.id),))\n to_exec = \"DELETE FROM channel WHERE server_id = %s\"\n self.__cursor.execute(to_exec, (str(server.id),))\n self.__connection.commit()", "def remove_data(self):\n db.session.delete(self)\n db.session.commit( )", "def drop_log(self, name):\n pass", "def removeAlertFromDb(self):\n sql_query = \"DELETE FROM Alert WHERE symbol='\" + self.symbol + \"' AND cross='\" + self.cross + \"' AND level=\" + str(self.level)\n db.exec_query(sql_query)", "async def remove(self, context):\n try: \n if context.author.is_mod:\n name_to_remove = self.get_params_as_text(context)\n self.tournament.remove(name_to_remove)\n except Exception as error:\n print(error)", "async def admin_remove(self, ctx: MyContext, wormhole: str, user: discord.User):\n if not self.check_wh_exists(wormhole):\n await ctx.send(\n await self.bot._(\n ctx.guild.id, \"wormhole.error.not-exists\", name=wormhole\n )\n )\n return\n if not self.check_is_admin(wormhole, ctx.author.id):\n await ctx.send(await self.bot._(ctx.guild.id, \"wormhole.error.not-admin\"))\n return\n query = \"SELECT 1 FROM wormhole_admin WHERE name = ? AND admin = ?\"\n isAlready = len(self.bot.db_query(query, (wormhole, user.id))) > 0\n if isAlready:\n query = \"DELETE FROM wormhole_admin WHERE admin = ? AND name = ?\"\n self.bot.db_query(query, (user.id, wormhole))\n await ctx.send(\n await self.bot._(ctx.guild.id, \"wormhole.success.admin-removed\")\n )\n else:\n await ctx.send(\n await self.bot._(\n ctx.guild.id, \"wormhole.error.not-admin\", user=user.name\n )\n )", "def drop_log(self, log_name):\n if log_name in self.logs:\n log = self.get_log(log_name)\n col = \"{}({})\".format(log.descr.replace(' ', '_'), log.units)\n self.data_frame = self.data_frame.drop(col, 1)\n else:\n print(\"no log named {}\".format(log_name))", "async def on_guild_role_delete(self, role):\n channel = self.client.get_channel(serverlogs.getChannel(role.guild.id, \"roles\"))\n if channel is not None:\n await self.log_role(role=role, type='Delete', channel=channel, guild=role.guild)", "def handle_admindellogplug(bot, event):\n if not event.rest: event.missing(\"<plugname>\") ; return\n dellogplug(event.rest)\n event.done()", "def remove_level(teach_id,level):\n query = \"DELETE FROM teacher_levels WHERE teacher_account_id = %s and level = %s;\"\n args = (teach_id, level)\n database.connection.save_data(query, args)", "def remove(name, db):\n database = load(db)\n if name in database:\n del database[name]\n pickle.dump(database, open(db, 'wb'))\n print(\"%s removed from %r\" % (name, db))\n else:\n print(\"no such person %r in %r\" % (name, db))\n sys.exit(-1)", "async def send_modlog(self, guild: discord.Guild, *args, **kwargs):\n\n try:\n manual_id = int(await self.config_get(guild, 'modlog_channel_id'))\n manual_mod_log = discord.utils.get(\n guild.text_channels, id=manual_id)\n except:\n manual_mod_log = None\n\n mod_log = manual_mod_log or discord.utils.get(\n guild.text_channels, name='mod-log')\n\n # don't post to mod-log, couldn't find the channel\n if mod_log is None:\n return\n\n try:\n await self.redis.incr('stats:modlog:sends')\n return await mod_log.send(*args, **kwargs)\n except discord.Forbidden:\n # couldn't post to modlog\n logger.warning(\n 'Couldn\\'t post to modlog for guild %d. No permissions.',\n guild.id)\n pass", "def collector_remove(self, msg, args):\n client = self._connect()\n collector_name = args.pop(0)\n collector = sumologic.Collectors(client)\n collector.delete(collector_name)\n message = 'collector {0} deleted.'.format(collector_name)\n self.send(msg.frm,\n message,\n message_type=msg.type,\n in_reply_to=msg,\n groupchat_nick_reply=True)", "def deletePlayers():\n DB = dbc()\n DB.cursor().execute('DELETE FROM players')\n DB.commit()\n DB.close()", "def delete():\n\n from slicr.extensions import db\n\n click.echo('deleting database...')\n\n db.drop_all()" ]
[ "0.71187824", "0.65700024", "0.6538177", "0.64906514", "0.64783925", "0.6283352", "0.6217291", "0.61778146", "0.60201395", "0.58525705", "0.5699053", "0.56744224", "0.56560785", "0.5587909", "0.5570198", "0.55293846", "0.55292803", "0.5519154", "0.55070776", "0.54834366", "0.5441563", "0.54364175", "0.5428338", "0.53644836", "0.53623354", "0.53454334", "0.5344964", "0.53321767", "0.53168714", "0.5316044" ]
0.73581076
0
an im2col function, transferring an image to patches of size window (length 2 list). the step size is the stride of the sliding window.
def im2col(A, window, stepsize=1): return viewW(np.ascontiguousarray(A), (window[0], window[1])).reshape(-1, window[0] * window[1]).T[:, ::stepsize]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unfold(input, kernel_size, dilation=1, padding=0, stride=1):\n nd_util = utils._ntuple(input.ndimension() - 2)\n out = FunctionLib.apply(\n 'Im2Col',\n input.device,\n [input],\n kernel_shape=nd_util(kernel_size),\n strides=nd_util(stride),\n pads=nd_util(padding),\n dilations=nd_util(dilation))\n return out.flatten_(2)", "def maxpool_im2col(X,kernel_shape,stride):\n \n output_size1 = math.floor((X.shape[1] - kernel_shape[1])/(stride)) + 1\n output_size2 = math.floor((X.shape[2] - kernel_shape[2])/(stride)) + 1\n \n if len(kernel_shape) == 2:\n kernel_shape = torch.reshape(kernel_shape,(1,kernel_shape[0],kernel_shape[1]))\n \n im = {}\n \n for i in range(X.shape[0]):\n Xi = X[i,:,:]\n Xi = torch.reshape(Xi,(-1,Xi.shape[0],Xi.shape[1]))\n X_im2col,imi = im2col(Xi,kernel_shape,stride,im_needed = True,shape_specified = True)\n if i == 0:\n X_im2c = torch.zeros((X.shape[0],X_im2col.shape[0],X_im2col.shape[1]))\n output = torch.zeros((X.shape[0],X_im2col.shape[1]))\n X_im2c[i,:,:] = X_im2col\n # Equivalent indicator dictionary representation created for the purposes of backpropagation\n\n return X_im2c", "def im2col(X, W_shape, pad, stride, dilation=0):\n\tfr, fc, n_in, n_out = W_shape\n\ts, p, d = stride, pad, dilation\n\tn_ex, in_rows, in_cols, n_in = X.shape\n\n\t# zero-pad the input\n\tX_pad, p = pad2D(X, p, W_shape[:2], stride=s, dilation=d)\n\tpr1, pr2, pc1, pc2 = p\n\n\t# change to have channels as the first dim\n\tX_pad = X_pad.transpose(0, 3, 1, 2)\n\n\t# get the indices for im2col\n\tk, i, j = _im2col_indices((n_ex, n_in, in_rows, in_cols), fr, fc, p, s, d)\n\n\tX_col = X_pad[:, k, i, j]\n\tX_col = X_col.transpose(1, 2, 0).reshape(fr * fc * n_in, -1)\n\treturn X_col, p", "def sliding_window(image, patch_size: tuple, step: int, show_debug: bool = False) -> list:\n if isinstance(image, Image.Image):\n image = np.array(image)\n\n if step == 0:\n h, w = image.shape[0], image.shape[1] # 720, 1280\n w_iter, h_iter = w // patch_size[0], h // patch_size[1]\n crop_image_list = []\n for i in range(h_iter):\n for j in range(w_iter):\n bbox = (i*patch_size[0], j*patch_size[0],\n (i+1)*patch_size[0], (j+1)*patch_size[0])\n crop_image = image[bbox[0]:bbox[2], bbox[1]: bbox[3]]\n if show_debug:\n crop_image = Image.fromarray(crop_image)\n crop_image.save(f\"/data/jiangmingchao/patches/{i}.png\")\n cv2.rectangle(image,\n (i*patch_size[0], j*patch_size[0]),\n ((i+1)*patch_size[0], (j+1)*patch_size[0]),\n (255, 255, 0),\n 2,\n )\n\n crop_image_list.append(Image.fromarray(crop_image))\n\n if show_debug:\n cv2.imwrite(\"1.jpg\", image)\n\n else:\n h, w = image.shape[0], image.shape[1]\n step_w_iter, step_h_iter = (w - patch_size[0]) // step, (h - patch_size[0]) // step\n crop_image_list = []\n for i in range(step_h_iter):\n for j in range(step_w_iter):\n bbox = (i * step, j * step, patch_size[0] + i * step, patch_size[1] + j * step)\n crop_image = image[bbox[0]: bbox[2], bbox[1]: bbox[3]]\n print(crop_image.shape)\n crop_image_list.append(Image.fromarray(crop_image))\n\n return crop_image_list", "def imageRGB_as_strided(img, kernel_size=224, stride=32):\n for ch in range(3):\n channel = img[:,:,ch]\n new_channel, x0, y0 = image2d_as_strided(channel, kernel_size=kernel_size, stride=stride)\n if ch == 0:\n new_img = np.zeros(new_channel.shape + (3,), dtype=np.uint8)\n new_img[:, :, :, :, 0] = new_channel\n else:\n new_img[:, :, :, :, ch] = new_channel\n\n return new_img, x0, y0", "def img_to_patches(img, win, stride=1):\n k = 0\n endc = img.shape[0]\n endw = img.shape[1]\n endh = img.shape[2]\n if endw<win or endh<win:\n return np.zeros([endc,win,win,0])\n patch = img[:, 0:endw-win+0+1:stride, 0:endh-win+0+1:stride]\n total_pat_num = patch.shape[1] * patch.shape[2]\n res = np.zeros([endc, win*win, total_pat_num], np.float32)\n for i in range(win):\n for j in range(win):\n patch = img[:, i:endw-win+i+1:stride, j:endh-win+j+1:stride]\n res[:, k, :] = np.array(patch[:]).reshape(endc, total_pat_num)\n k = k + 1\n return res.reshape([endc, win, win, total_pat_num])", "def divide_image_to_patches(img, patch_size, stride=None):\n\n stride = stride or patch_size\n if not 0 < stride <= patch_size:\n raise ValueError(\n 'stride should be positive and smaller than or equal to patch_size')\n\n if len(img.shape) == 2: # this is a mask\n img = np.expand_dims(img, -1)\n\n height, width, n_channels = img.shape\n\n # Sometimes we need to extend the original image so that the sliding window\n # won't move out of the image\n ext_height, ext_width = _get_extended_image_size(\n height, width, patch_size, stride)\n ext_img = np.zeros((ext_height, ext_width, n_channels))\n ext_img[:height, :width] = img\n\n x = []\n\n for i in range(0, ext_height - patch_size + 1, stride):\n for j in range(0, ext_width - patch_size + 1, stride):\n x.append(ext_img[i:i + patch_size, j:j + patch_size, :])\n\n return np.array(x).astype('uint8')", "def cut_image_strided(image, new_size):\n bands = image.shape[0]\n new_size_y, new_size_x = new_size\n old_size_y = image.shape[1]\n old_size_x = image.shape[2]\n nr_images_x = old_size_x // new_size[1]\n nr_images_y = old_size_y // new_size[0]\n if old_size_x % new_size_x != 0 or old_size_y % new_size_y != 0:\n print(\"The patch size is not a full multiple of the complete patch size\")\n\n return as_strided(image, shape=(nr_images_y, nr_images_x, bands, new_size_y, new_size_x),\n strides=(image.strides[1] * new_size_y, image.strides[2] * new_size_x, image.strides[0],\n image.strides[1], image.strides[2]))", "def patches_to_img(patches, stride, img_shape):\r\n if len(img_shape) > 2:\r\n channels = [patches_to_img(patches[:, :, :, i], stride, img_shape[:2]) for i in range(3)]\r\n return np.concatenate(channels, axis=2)\r\n\r\n h, w = img_shape\r\n patch_size = patches.shape[1]\r\n n_stride = (h - patch_size) // stride + 1\r\n\r\n assert h == w, \"only squared image are accepted\"\r\n assert (h - patch_size) % stride == 0, \"The stride must be adapted on image and patch size\"\r\n assert len(patches) == n_stride ** 2, \"They must be the right number of patches per image\"\r\n\r\n pred_final = np.zeros(img_shape + (1,)) # Accumulator for the final prediction\r\n pred_normalizer = np.zeros(img_shape + (1,)) # Counter of the patch per prediction per pixel\r\n\r\n for i in range(n_stride):\r\n for j in range(n_stride):\r\n x_from, x_to = i * stride, i * stride + patch_size\r\n y_from, y_to = j * stride, j * stride + patch_size\r\n idx = i * n_stride + j\r\n pred_final[x_from: x_to, y_from: y_to] += patches[idx].reshape(patch_size, patch_size, 1)\r\n pred_normalizer[x_from: x_to, y_from: y_to] += 1\r\n return pred_final / pred_normalizer", "def col2im(X_col, X_shape, W_shape, pad, stride, dilation=0):\n\tif not(isinstance(pad, tuple) and len(pad) == 4):\n\t\traise TypeError(\"pad must be a 4-tuple, but got: {}\".format(pad))\n\n\ts, d = stride, dilation\n\tpr1, pr2, pc1, pc2 = pad\n\tfr, fc, n_in, n_out = W_shape\n\n\tX_pad = np.zeros((n_ex, n_in, in_rows+pr1+pr2, in_cols+pc1+pc2))\n\tk, i, j = _im2col_indices((n_ex, n_in, in_rows, in_cols), fr, fc, pad, s, d)\n\n\tX_col_reshaped = X_col.reshape(n_in * fr * fc, -1, n_ex)\n\tX_col_reshaped = X_col_reshaped.transpose(2, 0, 1)\n\n\tnp.add.at(X_pad, (slice(None), k, i, j), X_col_reshaped)\n\n\tpr2 = None if pr2 == 0 else -pr2\n\tpc2 = None if pc2 == 0 else -pc2\n\treturn X_pad[:, :, pr1:pr2, pc1:pc2]", "def conv_2D(img,kernel,stride=1):\n\n m,n = img.shape\n r,c = kernel.shape\n\n kernel = np.flip(kernel,axis=1)\n kernel = np.flip(kernel,axis=0)\n\n c_m, c_n = int(np.ceil((m-r+1)/stride)), int(np.ceil((n-c+1)/stride))\n img_conv = np.zeros((c_m,c_n),dtype=float)\n\n for i,j in it.product(range(c_m),range(c_n)):\n img_conv[i,j] = (img[i*stride:i*stride+r,j*stride:j*stride+c] * kernel).sum()\n\n return img_conv", "def _image_to_column(\n self,\n input_data: torch.Tensor,\n kernel_h: int,\n kernel_w: int,\n stride: int = 1,\n padding=0,\n ) -> torch.Tensor:\n batch, channel, height, width = input_data.size()\n\n output_h = (height + 2 * padding - kernel_h) // stride + 1\n output_w = (width + 2 * padding - kernel_w) // stride + 1\n\n output = torch.zeros(batch, channel, kernel_h, kernel_w, output_h, output_w)\n\n padded_input = nn.functional.pad(\n input_data, [padding, padding, padding, padding]\n )\n\n for y in range(kernel_h):\n for x in range(kernel_w):\n output[:, :, y, x, :, :] = padded_input[\n :,\n :,\n y : y + stride * output_h : stride,\n x : x + stride * output_w : stride,\n ]\n\n return (\n output.permute(0, 4, 5, 1, 2, 3).reshape(batch * output_h * output_w, -1),\n output_h,\n output_w,\n )", "def im2col_indices(x: ndarray, field_height: int, field_width: int, padding: int, stride: int) -> ndarray:\n # Zero-pad the input\n p = padding\n x_padded = np.pad(x, ((0, 0), (0, 0), (p, p), (p, p)), mode='constant')\n\n k, i, j = get_im2col_indices(x.shape, field_height, field_width, padding,\n stride)\n\n cols = x_padded[:, k, i, j]\n C = x.shape[1]\n cols = cols.transpose(1, 2, 0).reshape(field_height * field_width * C, -1)\n return cols", "def get_image_patches_by_sliding_window(img, stepSize, window_size, overlapping):\n # read the image and define the stepSize and window size\n # (width,height)\n if overlapping == 100:\n return None\n # generation step size for overlapping\n overlapping = 100 - overlapping\n stepSize = int(stepSize * (overlapping / 100))\n\n patches = []\n image = img # your image path\n tmp = image # for drawing a rectangle\n (w_width, w_height) = (window_size, window_size) # window size\n for x in range(0, image.shape[1] - w_width, stepSize):\n for y in range(0, image.shape[0] - w_height, stepSize):\n window = image[x:x + w_width, y:y + w_height, :]\n # add the window into your patches array.\n patches.append(window)\n\n return patches", "def patches_to_images(patches, stride, img_shape):\r\n h = img_shape[0]\r\n patch_size = patches.shape[1]\r\n n_stride = (h - patch_size) // stride + 1\r\n assert len(patches) % n_stride ** 2 == 0, \"They must be the right number of patches per image\"\r\n\r\n n_images = len(patches) // (n_stride ** 2)\r\n\r\n images = []\r\n for i in range(n_images):\r\n n_patches = n_stride ** 2\r\n img = patches_to_img(patches[i * n_patches:(i + 1) * n_patches], stride, img_shape)\r\n images.append(img)\r\n\r\n return np.array(images)", "def combine_patches_to_image(y_pred, img, stride):\n\n counter = 0\n height, width = img.shape[:2]\n output_size = y_pred.shape[1]\n\n # The last channel is the number of overlapping patches for a given pixel,\n # used for averaging predictions from multiple windows.\n combined = np.zeros((height, width, y_pred.shape[-1] + 1))\n\n for i in range(0, height - output_size + 1, stride):\n for j in range(0, width - output_size + 1, stride):\n patch = combined[i:i + output_size, j:j + output_size, :-1]\n overlaps = combined[i:i + output_size, j:j + output_size, -1:]\n patch = (patch * overlaps + y_pred[counter]) / (overlaps + 1)\n combined[i:i + output_size, j:j + output_size, :-1] = patch\n overlaps += 1.\n counter += 1\n\n return combined[:height, :width, :-1]", "def _convert_chunk_to_tiles(\n feature_data: np.array, loss_window_radius: int, window_radius: int\n) -> Tuple[np.array, np.array]:\n\n output_array = []\n col_index = []\n for _col in range(0, feature_data.shape[1], loss_window_radius * 2):\n col_index.append(min(_col, feature_data.shape[1] - window_radius * 2))\n output_array.append(feature_data[:, col_index[-1] : col_index[-1] + window_radius * 2, :])\n output_array = np.stack(output_array)\n output_array = np.reshape(\n output_array, (output_array.shape[0], output_array.shape[1], output_array.shape[2], feature_data.shape[-1])\n )\n\n col_index = np.array(col_index)\n\n return output_array, col_index", "def dense_patch_slices(image_size, patch_size, scan_interval):\n num_spatial_dims = len(image_size)\n if num_spatial_dims not in (2, 3):\n raise ValueError(\"image_size should has 2 or 3 elements\")\n patch_size = get_valid_patch_size(image_size, patch_size)\n scan_interval = ensure_tuple_size(scan_interval, num_spatial_dims)\n\n scan_num = [\n int(math.ceil(float(image_size[i]) / scan_interval[i])) if scan_interval[i] != 0 else 1\n for i in range(num_spatial_dims)\n ]\n slices = []\n if num_spatial_dims == 3:\n for i in range(scan_num[0]):\n start_i = i * scan_interval[0]\n start_i -= max(start_i + patch_size[0] - image_size[0], 0)\n slice_i = slice(start_i, start_i + patch_size[0])\n\n for j in range(scan_num[1]):\n start_j = j * scan_interval[1]\n start_j -= max(start_j + patch_size[1] - image_size[1], 0)\n slice_j = slice(start_j, start_j + patch_size[1])\n\n for k in range(0, scan_num[2]):\n start_k = k * scan_interval[2]\n start_k -= max(start_k + patch_size[2] - image_size[2], 0)\n slice_k = slice(start_k, start_k + patch_size[2])\n slices.append((slice_i, slice_j, slice_k))\n else:\n for i in range(scan_num[0]):\n start_i = i * scan_interval[0]\n start_i -= max(start_i + patch_size[0] - image_size[0], 0)\n slice_i = slice(start_i, start_i + patch_size[0])\n\n for j in range(scan_num[1]):\n start_j = j * scan_interval[1]\n start_j -= max(start_j + patch_size[1] - image_size[1], 0)\n slice_j = slice(start_j, start_j + patch_size[1])\n slices.append((slice_i, slice_j))\n return slices", "def im2col_indices(x, field_height, field_width, padding=1, stride=1):\n # Zero-pad the input\n p = padding\n x_padded = np.pad(x, ((0, 0), (0, 0), (p, p), (p, p)), mode='constant')\n\n k, i, j = get_im2col_indices(x.shape, field_height, field_width, padding,\n stride)\n\n cols = x_padded[:, k, i, j]\n C = x.shape[1]\n cols = cols.transpose(1, 2, 0).reshape(field_height * field_width * C, -1)\n return cols", "def img_to_patches(img, patch_size, stride, overlapping=True):\r\n h, w, _ = img.shape\r\n\r\n assert h == w, 'height should be equal to width ({} != {})'.format(h, w)\r\n assert overlapping or patch_size % stride == 0, 'cannot have non overlapping patches with {} % {} != 0' \\\r\n .format(patch_size, stride)\r\n assert (h - patch_size) % stride == 0, 'height - patch_size should be dividable by stride but {} % {} != 0' \\\r\n .format(h - patch_size, stride)\r\n\r\n n_stride = (h - patch_size) // stride + 1\r\n patches = []\r\n for i in range(n_stride):\r\n if overlapping or i * stride % patch_size == 0:\r\n for j in range(n_stride):\r\n if overlapping or j * stride % patch_size == 0:\r\n patch = img[i * stride: i * stride + patch_size, j * stride: j * stride + patch_size]\r\n patches.append(patch)\r\n return np.array(patches)", "def _im2col_indices(X_shape, fr, fc, p, s, d=0):\n\tpr1, pr2, pc1, pc2 = p\n\tn_ex, n_in, in_rows, in_cols = X_shape\n\n\t# adjust effective filter size to account for dilation\n\t_fr, _fc = fr * (d+1) - d, fc * (d+1) - d\n\tout_rows = (in_rows + pr1 + pr2 - _fr) // s + 1\n\tout_cols = (in_cols + pc1 + pc2 - _fc) // s + 1\n\n\tif any([out_rows <=0, out_cols<=0]):\n\t\traise ValueError(\n\t\t\t'dimension mismatch during convolution: '\n\t\t\t'out_rows = {}, out_cols = {}'.format(out_rows, out_cols)\n\t\t\t)\n\n\t# i1/j1: row/col templates\n\t# i0/j0: n. copies (len) and offsets (values) for row/col templates\n\ti0 = np.repeat(np.arange(fr), fc)\n\ti0 = np.tile(i0, n_in) * (d + 1)\n\ti1 = s * np.repeat(np.arange(out_rows), out_cols)\n\tj0 = np.tile(np.arange(fc), fr*n_in) * (d + 1)\n\tj1 = s * np.tile(np.arange(out_cols), out_rows)\n\n\t# i.shape = (fr * fc * n_in, out_height * out_width)\n\t# j.shape = (fr * fc * n_in, out_height * out_width)\n\t# k.shape = (fr * fc * n_in, 1)\n\ti = i0.reshape(-1, 1) + i1.reshape(1, -1)\n\tj = j0.reshape(-1, 1) + j1.reshape(1, -1)\n\tk = np.repeat(np.arange(n_in), fr* fc).reshape(-1, 1)\n\treturn k, i, j", "def patches_sampling(self, image, patch_size, stride):\n h, w = image.shape[2:4]\n patches = []\n for i in range(0, h - patch_size + 1, stride):\n for j in range(0, w - patch_size + 1, stride):\n patches.append(image[:, :, i:i + patch_size, j:j + patch_size])\n patches = torch.cat(patches, dim=0).to(self.device)\n return patches", "def divide_image_to_patches(img, patch_size):\n\n assert len(img.shape) == 3 and img.shape[-1] == 3\n\n height, width, n_channels = img.shape\n coordinates = _get_top_left_coordinates(height, width, patch_size)\n\n patches = []\n\n for top, left in coordinates:\n patches.append(img[top:top + patch_size, left:left + patch_size])\n\n return np.array(patches).astype('uint8')", "def get_patches(image_mat, stride):\n window_shape = (128, 128, 3)\n windows = view_as_windows(image_mat, window_shape, step=stride)\n patches = []\n for m in range(windows.shape[0]):\n for n in range(windows.shape[1]):\n patches += [windows[m][n][0]]\n return patches", "def MyConvolve(img, ff):\n result = np.zeros(img.shape)\n x_len = img.shape[0]\n y_len = img.shape[1]\n\n ff = np.flipud(np.fliplr(ff)) # Flip filters\n\n # Apply filter to pixels\n for x in range(1, x_len - 1):\n for y in range(1, y_len - 1):\n # Left column\n top_left = img[x - 1, y - 1] * ff[0, 0]\n left = img[x, y - 1] * ff[1, 0]\n btm_left = img[x + 1, y - 1] * ff[2, 0]\n # Middle column\n top = img[x - 1, y] * ff[0, 1]\n middle = img[x, y] * ff[1, 1]\n btm = img[x + 1, y] * ff[2, 1]\n # Right column\n top_right = img[x - 1, y + 1] * ff[0, 2]\n right = img[x, y + 1] * ff[1, 2]\n btm_right = img[x + 1, y + 1] * ff[2, 2]\n\n result[x, y] = top_left + left + btm_left + top + middle + btm + top_right + right + btm_right\n\n return result", "def im2col_indices_(x, field_height, field_width, padding=1, stride=1):\n # Zero-pad the input\n p = padding\n x_padded = np.pad(x, ((0, 0), (0, 0), (p, p), (p, p)), mode='constant')\n\n k, i, j = get_im2col_indices(x.shape, field_height, field_width, padding, stride)\n\n cols = x_padded[:, k, i, j]\n C = x.shape[1]\n cols = cols.transpose(1, 2, 0).reshape(field_height * field_width * C, -1)\n return cols", "def dense_patch_slices(\n image_size: Sequence[int], patch_size: Sequence[int], scan_interval: Sequence[int], return_slice: bool = True\n) -> list[tuple[slice, ...]]:\n num_spatial_dims = len(image_size)\n patch_size = get_valid_patch_size(image_size, patch_size)\n scan_interval = ensure_tuple_size(scan_interval, num_spatial_dims)\n\n scan_num = []\n for i in range(num_spatial_dims):\n if scan_interval[i] == 0:\n scan_num.append(1)\n else:\n num = int(math.ceil(float(image_size[i]) / scan_interval[i]))\n scan_dim = first(d for d in range(num) if d * scan_interval[i] + patch_size[i] >= image_size[i])\n scan_num.append(scan_dim + 1 if scan_dim is not None else 1)\n\n starts = []\n for dim in range(num_spatial_dims):\n dim_starts = []\n for idx in range(scan_num[dim]):\n start_idx = idx * scan_interval[dim]\n start_idx -= max(start_idx + patch_size[dim] - image_size[dim], 0)\n dim_starts.append(start_idx)\n starts.append(dim_starts)\n out = np.asarray([x.flatten() for x in np.meshgrid(*starts, indexing=\"ij\")]).T\n if return_slice:\n return [tuple(slice(s, s + patch_size[d]) for d, s in enumerate(x)) for x in out]\n return [tuple((s, s + patch_size[d]) for d, s in enumerate(x)) for x in out] # type: ignore", "def dense_conv_forward_2d(inp_image: np.ndarray, kernel: np.ndarray, stride, padding):\n assert len(inp_image.shape) == 3, 'single 2D images only. No batches.'\n assert len(kernel.shape) == 4\n\n height, width, colors = inp_image.shape\n kernel_height, kernel_width, colors_in, colors_out = kernel.shape\n kernel_stride_x, kernel_stride_y = stride\n kernel_padding_x, kernel_padding_y = padding\n i_f = int(np.floor(kernel_width / 2.0))\n j_f = int(np.floor(kernel_height / 2.0))\n\n out_pixels = np.zeros((height, width, colors_out))\n for y in range(kernel_padding_y, height - kernel_padding_y,\n kernel_stride_y): # todo: add kernel_padding_y and kernel_stride_y fix to glsl\n for x in range(kernel_padding_x, width - kernel_padding_x,\n kernel_stride_x): # todo: add kernel_padding_x and kernel_stride_x fix to glsl\n output_select = [y, x, 0]\n input_select = np.asarray(\n [y * kernel_stride_y, x * kernel_stride_x, 0]\n )\n for i in range(-np.int(np.floor(kernel_width / 2.0)), np.int(np.ceil(kernel_width / 2.0))):\n for j in range(-np.int(np.floor(kernel_height / 2.0)), np.int(np.ceil(kernel_height / 2.0))):\n in_pixel_select = np.copy(input_select)\n in_pixel_select += [j, i, 0]\n for co in range(colors_out):\n output_select[2] = co\n for ci in range(colors_in):\n in_pixel_select[2] = ci\n kernel_select = np.asarray([j_f + j, i_f + i, ci, co])\n\n out_pixels[tuple(output_select)] += kernel[tuple(kernel_select)] * inp_image[\n tuple(in_pixel_select)]\n return out_pixels", "def conv2d_output_shape(height, width, filter_height, filter_width, out_channels, stride):\n return (out_channels, ((height - filter_height) / stride + 1), ((width - filter_width) / stride + 1))", "def split_image(img):\n xs = [] # positions\n ys = [] # colors\n for row_i in range(img.shape[0]):\n for col_i in range(img.shape[1]):\n xs.append([row_i, col_i])\n ys.append(img[row_i, col_i])\n \n xs = np.array(xs)\n ys = np.array(ys)\n return xs, ys" ]
[ "0.6645364", "0.6491519", "0.62999904", "0.60910404", "0.604907", "0.58832616", "0.5740589", "0.5653002", "0.5623746", "0.5609291", "0.55689853", "0.55332327", "0.5455916", "0.5418624", "0.5417081", "0.5379944", "0.5376328", "0.53699076", "0.53666115", "0.5354386", "0.5336", "0.5329988", "0.531126", "0.5297998", "0.5291694", "0.52503264", "0.523353", "0.5231789", "0.5213743", "0.52058804" ]
0.71692234
0
sample N psized patches from images after standardising them.
def sample_patches(images, psize=(8, 8), n=10000, remove_mean=True): d = psize[0] * psize[1] patches = np.zeros((d, n)) standardized = grayscale_and_standardize(images, remove_mean) shapes = [] for pic in standardized: shapes.append(pic.shape) rand_pic_num = np.random.randint(0, len(standardized), n) rand_x = np.random.rand(n) rand_y = np.random.rand(n) for i in range(n): pic_id = rand_pic_num[i] pic_shape = shapes[pic_id] x = int(np.ceil(rand_x[i] * (pic_shape[0] - psize[1]))) y = int(np.ceil(rand_y[i] * (pic_shape[1] - psize[0]))) patches[:, i] = np.reshape(np.ascontiguousarray( standardized[pic_id][x:x + psize[0], y:y + psize[1]]), d) return patches
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sample_patches(images, npatches, patch_sz):\n\tnimages, nrows, ncols = images.shape\n\timg_index = np.random.randint(0, nimages, npatches)\n\trow_index = np.random.randint(0, nrows-patch_sz, npatches)\n\tcol_index = np.random.randint(0, ncols-patch_sz, npatches)\n\tpatches = np.empty((npatches, patch_sz, patch_sz))\n\tfor i, (img, row, col) in enumerate(zip(img_index, row_index, col_index)):\n\t\tpatches[i] = images[img, row:row+patch_sz, col:col+patch_sz]\n\treturn patches", "def _sample_patches(imgs, \n labelimgs, \n patch_size, \n patchgroup, \n padding_mode, \n padding_values, \n ignore_labels,\n startidx=0):\n samplelist = []\n \n # number of bands should be constant, therefore the dimensionality can be read from any \n # sub img\n bands = imgs[0].shape[-1]\n\n # calculate remapping for labels when removing `ignore_labels`\n # flatten labelimgs and convert to numpy array to use np.unique function on it\n flattened_labelimgs = np.concatenate([labelimg.reshape(-1) for labelimg in labelimgs])\n max_label = np.unique(flattened_labelimgs).max()\n remaining_labels = np.setdiff1d(np.arange(max_label+1), ignore_labels)\n label_remap = np.full((max_label+1), -1)\n for i, val in enumerate(remaining_labels):\n label_remap[val] = i\n\n valid_sample_count = 0\n for labelimg in labelimgs:\n valid_sample_count += np.invert(np.isin(labelimg, ignore_labels)).sum()\n print(f'Extracting {valid_sample_count} valid samples...')\n \n if ('data' in patchgroup) and ('labels' in patchgroup):\n # resize existing dataset to append patches from test set\n patchgroup['data'].resize((patchgroup['data'].shape[0] + valid_sample_count), axis=0)\n patchgroup['labels'].resize((patchgroup['labels'].shape[0] + valid_sample_count), axis=0)\n else:\n patchgroup.create_dataset('data', (valid_sample_count, patch_size, patch_size, bands)\n , chunks=(1, patch_size, patch_size, bands)\n , maxshape=(None, patch_size, patch_size, bands)\n , dtype=imgs[0].dtype) # datatype should be the same for all imgs\n patchgroup.create_dataset('labels', (valid_sample_count,1)\n , chunks=True, maxshape=(None, 1)\n , dtype=labelimgs[0].dtype) # datatype should be the same for all labelimgs\n \n idx = startidx\n with tqdm(total=valid_sample_count) as pbar:\n for img, labelimg in zip(imgs, labelimgs):\n\n # pad along spatial axes\n margin = int((patch_size - 1) / 2)\n X = np.pad(img, ((margin, margin), (margin, margin), (0,0)), \n mode=padding_mode, constant_values=padding_values) \n\n # split patches\n for r in range(margin, X.shape[0] - margin):\n for c in range(margin, X.shape[1] - margin):\n patchlabel = labelimg[r-margin, c-margin]\n\n # do not create a sample for 'ignore_labels'\n if patchlabel in ignore_labels:\n continue\n else :\n # correct label\n patchlabel = label_remap[patchlabel]\n\n patch = X[r - margin:r + margin + 1, c - margin:c + margin + 1]\n # store sample in hdf file\n patchgroup['data'][idx] = patch\n patchgroup['labels'][idx] = patchlabel\n\n # update\n idx += 1\n pbar.update(1)\n\n patchgroup.attrs['patch_size'] = patch_size\n patchgroup.attrs['padding_mode'] = padding_mode\n patchgroup.attrs['padding_values'] = padding_values\n patchgroup.attrs['ignore_labels'] = ignore_labels\n\n return valid_sample_count", "def extract_patch(n, patch_size, imgs):\n # Extract patches from input images\n img_patches = [img_crop(imgs[i], patch_size, patch_size) for i in range(n)]\n #gt_patches = [img_crop(gt_imgs[i], patch_size, patch_size) for i in range(n)]\n\n # Linearize list of patches\n img_patches = np.asarray([img_patches[i][j] for i in range(len(img_patches)) for j in range(len(img_patches[i]))])\n #gt_patches = np.asarray([gt_patches[i][j] for i in range(len(gt_patches)) for j in range(len(gt_patches[i]))])\n \n return img_patches #,gt_patches", "def patches_sampling(self, image, patch_size, stride):\n h, w = image.shape[2:4]\n patches = []\n for i in range(0, h - patch_size + 1, stride):\n for j in range(0, w - patch_size + 1, stride):\n patches.append(image[:, :, i:i + patch_size, j:j + patch_size])\n patches = torch.cat(patches, dim=0).to(self.device)\n return patches", "def noise_patch(patch,prc=0.2): #X\n npatch = patch.copy().reshape(-1,3)\n height,width = patch.shape[:2]\n nb =int(prc*height*width)\n npatch[np.random.randint(0,height*width,nb),:]=DEAD\n return npatch.reshape(height,width,3)", "def get_random_patches(images, n_patches, patch_x, patch_y):\n n_images = images.shape[0]\n img_c = images.shape[1]\n img_r = images.shape[2]\n r_images = rng.randint(n_images, size = n_patches)\n r_x = rng.randint(img_c-patch_x+1, size = n_patches)\n r_y = rng.randint(img_r-patch_y+1, size = n_patches)\n patches_list = []\n for image_i, x_i, y_i in zip(r_images, r_x, r_y):\n patch_i = images[image_i, x_i:(x_i + patch_x), y_i:(y_i + patch_y)]\n patches_list.append(patch_i)\n \n patches_np = np.asarray(patches_list, dtype=images.dtype) \n return patches_np", "def load_pixel_sparse(n_imgs=5, n_patches=100000, patch_x=4, patch_y=4):\n #n = np.random.randn(n_patches, patch_x*patch_y)\n #patches_unnorm = n**3\n #patches = patches_unnorm / np.std(patches_unnorm)\n patches = np.random.laplace(size=(n_patches, patch_x*patch_y))\n #patches = np.random.standard_cauchy(size=(n_patches, patch_x*patch_y))\n W_X = np.eye(patch_x*patch_y)\n # DEBUG why is this different from what's expected of load_van_hateren\n #return patches, W_X\n return patches", "def get_patches(rimage, gimage, mimage, num_patches=48, patch_size=80, patch_stride=80):\n num_FSpatches = 16\n num_RApatches = 32\n rpatches = []\n gpatches = []\n mpatches = []\n #R_imgs = ((rimage+1)*127.5).astype(np.uint8)\n #scipy.misc.imsave('results'+'/' + 'rainy.jpg', R_imgs[0,:,:,:])\n for i in range(int(math.sqrt(num_FSpatches))):\n for j in range(int(math.sqrt(num_FSpatches))):\n point_x = patch_stride*i\n point_y = patch_stride*j\n rpatch = rimage[:,(point_x):(point_x+patch_size), (point_y):(point_y+patch_size),:]\n #print(point_x)\n #print(point_y)\n #print(point_y+patch_size)\n #P_imgs = ((rpatch+1)*127.5).astype(np.uint8)\n #scipy.misc.imsave('results'+'/' + 'patch_%d_%d.jpg'%(i,j), P_imgs[0,:,:,:])\n #print(rpatch.shape)\n rpatches.append(rpatch)\n #print(np.array(rpatches).shape)\n gpatch = gimage[:,(point_x):(point_x+patch_size), (point_y):(point_y+patch_size),:]\n gpatches.append(gpatch)\n mpatch = mimage[:,(point_x):(point_x+patch_size), (point_y):(point_y+patch_size),:]\n mpatches.append(mpatch)\n\n for k in range(num_RApatches):\n point1 = random.randint(0,240) # 116 comes from the image source size (320) - the patch dimension (80)\n point2 = random.randint(0,240)\n #rpatch = tf.image.crop_to_bounding_box(rimage, point1, point2, patch_size, patch_size)\n rpatch = rimage[:,(point1):(point1+patch_size), (point2):(point2+patch_size),:]\n #P_imgs = ((rpatch+1)*127.5).astype(np.uint8)\n #scipy.misc.imsave('results'+'/' + 'patch_%d.jpg'%i, P_imgs[0,:,:,:])\n #print(rpatch.shape)\n rpatches.append(rpatch)\n #print(np.array(rpatches).shape)\n gpatch = gimage[:,(point1):(point1+patch_size), (point2):(point2+patch_size),:]\n gpatches.append(gpatch)\n mpatch = mimage[:,(point1):(point1+patch_size), (point2):(point2+patch_size),:]\n mpatches.append(mpatch)\n\n rpatches = np.array(rpatches)\n rpatches = np.squeeze(rpatches)\n #print(rpatches.shape)\n gpatches = np.array(gpatches)\n gpatches = np.squeeze(gpatches)\n mpatches = np.array(mpatches)\n mpatches = np.squeeze(mpatches)\n #assert rpatches.get_shape().dims == [num_patches, patch_size, patch_size, 3]\n assert rpatches.shape == (num_patches, patch_size, patch_size, 3)\n return rpatches, gpatches, mpatches", "def patchGenerator(gen, patch_size=128, patch_batch_size=1):\n \n for imgs, masks in gen: # For each batch\n img_list = []\n mask_list = []\n for i in range(0, imgs.shape[0]): # For each image in a batch\n patch_x = patchify(imgs[i], (patch_size, patch_size, imgs[i].shape[-1]), step=patch_size) # split image into 4*4 small 128*128 patches.\n img_p = patch_x.reshape(-1, *patch_x.shape[-3:])\n img_list.append(img_p)\n\n mask_y = patchify(masks[i], (patch_size, patch_size, 1), step=patch_size) # split mask into 4*4 small 128*128 patches.\n mask_p = mask_y.reshape(-1, *mask_y.shape[-3:])\n mask_list.append(mask_p)\n \n if (patch_batch_size == 1):\n for j in range(0, img_p.shape[0]): # For each patch in a image\n yield img_p[j][np.newaxis, :], mask_p[j][np.newaxis, :]\n \n if (patch_batch_size > 1):\n image_patches = np.concatenate(img_list)\n mask_patches = np.concatenate(mask_list)\n patch_batch_counter = 0\n for idx in range(0, patch_batch_size):\n image_patch_batch = image_patches[patch_batch_counter:patch_batch_counter + patch_batch_size]\n mask_patch_batch = mask_patches[patch_batch_counter:patch_batch_counter + patch_batch_size]\n shuffled_images, shuffled_masks = randomize(image_patch_batch, mask_patch_batch)\n yield shuffled_images, shuffled_masks", "def get_identical_patches(imgs, patch_size):\n ih, iw = imgs[0].shape[:2]\n tp = patch_size\n ix = np.random.randint(0, iw - patch_size)\n iy = np.random.randint(0, ih - patch_size)\n imgs = []\n for i in range(len(imgs)):\n imgs.append(imgs[i][iy:iy + tp, ix:ix + tp, :])\n return imgs", "def reconstructions_sample(self, n=()):\n self.assert_sampled()\n return [[j.sample(sample_shape=n, seed=self.randint).numpy()\n for j in i]\n for i in self._reconstructions]", "def oversample_number_of_particles(\n self, mult_factor: int, noise_frac=0.0):\n mult_factor = int(mult_factor)\n if mult_factor <= 1:\n return\n npart = self.num_part\n self.de = _np.tile(self.de, mult_factor)\n self.ss = _np.tile(self.ss, mult_factor)\n if not _np.math.isclose(noise_frac, 0):\n de_noise = self.de[:, :npart].std(axis=1) * noise_frac\n ss_noise = self.ss[:, :npart].std(axis=1) * noise_frac\n self.de[:, npart:] += de_noise[:, None] * _np.random.randn(\n self.num_buns, npart*(mult_factor-1))\n self.ss[:, npart:] += ss_noise[:, None] * _np.random.randn(\n self.num_buns, npart*(mult_factor-1))", "def resample_photons(self, srcs, verbose=False):\n # first, clear out old sample images\n for src in srcs:\n src.clear_sample_images()\n\n # generate per-source sample image patch for each fits image in\n # this field. keep track of photons due to noise\n noise_sums = {}\n for band, img in self.img_dict.iteritems():\n if verbose:\n print \" ... resampling band %s \" % band\n samp_imgs, noise_sum = \\\n cel_mcmc.sample_source_photons_single_image_cython(\n img, [s.params for s in srcs]\n )\n\n # tell each source to keep track of it's source-specific sampled\n # images (and the image it was stripped out of)\n for src, samp_img in zip(srcs, samp_imgs):\n if samp_img is not None:\n\n # cache pixel grid for each sample image\n y_grid = np.arange(samp_img.y0, samp_img.y1, dtype=np.float)\n x_grid = np.arange(samp_img.x0, samp_img.x1, dtype=np.float)\n xx, yy = np.meshgrid(x_grid, y_grid, indexing='xy')\n pixel_grid = np.column_stack((xx.ravel(order='C'), yy.ravel(order='C')))\n src.sample_image_list.append((samp_img, img, pixel_grid))\n\n # keep track of noise sums\n noise_sums[band] = noise_sum\n\n # resample noise parameter in each fits image\n for band, img in self.img_dict.iteritems():\n a_n = self.a_0 + noise_sums[band]\n b_n = self.b_0 + img.nelec.size\n #eps_tmp = img.epsilon\n img.epsilon = np.random.gamma(a_n, 1./b_n)", "def generate_patches(scaled_imgs, constants, all_patches):\n patch_size = constants.PATCH_SIZE\n step = 1 if all_patches else 2\n patches = []\n for k, sc in enumerate(scaled_imgs):\n img_patches = []\n for i in range(0, sc.shape[0] - patch_size, step):\n for j in range(0, sc.shape[1] - patch_size, step):\n raw_patch = sc[i:i + patch_size, j:j + patch_size, :]\n patch = Patch(\n raw_patch=raw_patch,\n patch_size=patch_size,\n )\n patch.store(sc, [i, j])\n img_patches.append(patch)\n patches.append(img_patches)\n return patches", "def quantize(image_patch, gray_levels=12, n_stddev=2):\n # compute gray level gaussian stats\n mean = np.mean(image_patch)\n stddev = np.std(image_patch)\n # logger.debug('mean: {!s}\\nstd dev: {!s}'.format(mean, stddev))\n bin_width = 2*n_stddev*stddev / (gray_levels-2)\n # logger.debug('bin_width: {!s}'.format(bin_width))\n\n # rebin values into new quanization, first and last bins hold outliers\n quantized_image_patch = np.zeros_like(image_patch, dtype=np.int8)\n it = np.nditer(image_patch, op_flags=['readwrite'], flags=['multi_index'])\n while not it.finished:\n val = image_patch[it.multi_index]\n quantized_image_patch[it.multi_index] = min(gray_levels-1, max(0, math.floor(((val - mean + n_stddev*stddev)/(bin_width+1e-9))+1)))\n it.iternext()\n\n # import matplotlib.pyplot as plt\n # xy_shape = quantized_image_patch.shape[1:]\n # for z in range(quantized_image_patch.shape[0]):\n # fig = plt.figure()\n # ax = fig.add_subplot(1,2,1)\n # ax.imshow(image_patch[z,:,:].reshape(xy_shape), cmap='gray')\n # ax = fig.add_subplot(1,2,2)\n # ax.imshow(quantized_image_patch[z,:,:].reshape(xy_shape), cmap='gray', vmin=0, vmax=gray_levels-1)\n # plt.show()\n return quantized_image_patch", "def resampleParticles(self, gameState):\n self.particles = []\n for i in range(self.numParticles):\n self.particles.append(tuple(util.sample(self.uniformPrior) for _ in\n self.ghostIndices))", "def rescale_images(self, patches = [], n_pixel_elements = 42, flip = True, save = False):\t\t\t\n\t\t\n\t\trescaled_patches = []\n\t\tfor i in patches:\n\t\t\tpatch = plt.imread(os.path.join(self.project.base_dir,'patches','image'+str(i)+'.png'))\n\t\t\tif flip == True:\n\t\t\t\tpatch = np.flipud(patch)\n\t\t\tpatch = patch[:,patch.shape[1]/2 - patch.shape[0]/2:patch.shape[1]/2 + patch.shape[0]/2,0] # visual field 1080 by 1080\n\t\t\t\n\t\t\tscaled_patch = []\n\t\t\tscale = patch.shape[0]/n_pixel_elements\n\t\t\tfor x in range(n_pixel_elements):\n\t\t\t\tfor y in range (n_pixel_elements):\n\t\t\t\t\t# michelson_contrast\n\t\t\t\t\tscaled_patch.append(np.max(patch[scale*x:scale*x + scale,scale*y:scale*y + scale]) - np.min(patch[scale*x:scale*x + scale,scale*y:scale*y + scale]))\n\t\t\tscaled_patch = np.asarray(scaled_patch).reshape([n_pixel_elements,n_pixel_elements],order = 'C')\n\t\t\tif save == True:\n\t\t\t\timshow(scaled_patch)\n\t\t\t\tplt.savefig(os.path.join(self.stageFolder(stage = 'processed/mri/figs'), 'heatmap_patch' + str(i)))\t\n\t\t\trescaled_patches.append(scaled_patch)\n\n\t\treturn rescaled_patches", "def representations_sample(self, n=()):\n self.assert_sampled()\n return [\n z.sample(sample_shape=n, seed=self.randint).numpy()\n for z in self.representations\n ]", "def extract_patches(image_list, mask_src, image_src, mask_dst, image_dst, patch_size):\n class_counts = defaultdict(lambda: 0)\n skipped = 0\n total = 0\n for im in tqdm(image_list):\n img = cv2.imread(os.path.join(image_src, im))\n msk = cv2.imread(os.path.join(mask_src, im), 0)\n \n assert (img.shape[0] == msk.shape[0]) \\\n and (img.shape[1] == msk.shape[1]), \"Mismatch!\"\n\n img_patches = patchify(img, (patch_size, patch_size, 3), step=patch_size)\n msk_patches = patchify(msk, (patch_size, patch_size), step=patch_size)\n img_patches = img_patches.reshape((-1, patch_size, patch_size, 3))\n msk_patches = msk_patches.reshape((-1, patch_size, patch_size))\n # Step = 256 for patch size means no overlap\n for i in range(img_patches.shape[0]):\n # Replace class labels\n mask_patch = replace_classes(msk_patches[i])\n unique, counts = np.unique(mask_patch, return_counts=True)\n # If outside of RoI takes > 90% and there is only 1 class, ignore the patch.\n outside = np.mean(mask_patch == 0) > 0.9\n if outside and (len(unique) < 2):\n skipped += 1\n continue\n for x, y in enumerate(unique):\n class_counts[y] += counts[x].item()\n img_patch = img_patches[i]\n filename = im.split(\".png\")[0] + \"_\" + str(i) + \".png\"\n cv2.imwrite(os.path.join(image_dst, filename), img_patch)\n cv2.imwrite(os.path.join(mask_dst, filename), mask_patch)\n total += 1\n print('Skipped: {} / {}'.format(skipped, total))\n return class_counts", "def get_patches(image, label, coordmaps, sample, num_pos = 100, num_neg = 100, all_patches=False, patch_shape= (48,48,48), spacing=(24,24,24), start_idx = 0):\n image_shape = np.shape(image)\n cn_size = image_shape[0]\n sg_size = image_shape[1]\n cr_size = image_shape[2]\n ax_size = image_shape[3]\n\n if not all_patches:\n idx_pos = np.stack(np.where(label[0, ...] > 0))\n \n # Only include points not near boundary\n #sg_idx = np.where(((patch_shape[0]/2) < idx_pos[0]) & (idx_pos[0] < (sg_size - (patch_shape[0]/2))))\n #idx_pos = idx_pos[:,sg_idx[0]]\n #cr_idx = np.where(((patch_shape[1]/2) < idx_pos[1]) & (idx_pos[1] < (cr_size - (patch_shape[1]/2))))\n #idx_pos = idx_pos[:, cr_idx[0]]\n #ax_idx = np.where(((patch_shape[2]/2) < idx_pos[2]) & (idx_pos[2] < (ax_size - (patch_shape[2]/2))))\n #idx_pos = idx_pos[:, ax_idx[0]]\n \n idx_rand = np.random.choice(idx_pos[0].shape[0], num_pos, replace = False)\n cpts_pos_sampled = idx_pos[:, idx_rand] \n \n image_patch_list = []\n label_patch_list = []\n coordmaps_patch_list = []\n for i in range(num_pos):\n idx1_sg = cpts_pos_sampled[0][i] - int(patch_shape[0]/2)\n idx1_cr = cpts_pos_sampled[1][i] - int(patch_shape[1]/2)\n idx1_ax = cpts_pos_sampled[2][i] - int(patch_shape[2]/2)\n \n idx2_sg = idx1_sg + patch_shape[0]\n idx2_cr = idx1_cr + patch_shape[1]\n idx2_ax = idx1_ax + patch_shape[2]\n \n image_patch_orig = image[:, idx1_sg:idx2_sg, idx1_cr:idx2_cr, idx1_ax:idx2_ax]\n image_patch = p.standardize_image(image_patch_orig)\n #image_patch = p.Normalize(image_patch)\n label_patch = label[:, idx1_sg:idx2_sg, idx1_cr:idx2_cr, idx1_ax:idx2_ax]\n coordmaps_patch = coordmaps[:, idx1_sg:idx2_sg, idx1_cr:idx2_cr, idx1_ax:idx2_ax]\n \n image_patch_list.append(image_patch)\n label_patch_list.append(label_patch)\n coordmaps_patch_list.append(coordmaps_patch)\n \n #Write patch/image and control points to csv and save image\n write_patch_to_file(image_patch, label_patch, coordmaps_patch, sample, cpts_pos_sampled[:,i], start_idx + i)\n \n # For negative points\n idx_neg = np.stack(np.where(label[0, ...]==0), axis = 0)\n \n # Only include points not near boundary\n sg_idx = np.where(((patch_shape[0]/2) < idx_pos[0]) & (idx_pos[0] < (sg_size - (patch_shape[0]/2))))\n idx_neg = idx_neg[:,sg_idx[0]]\n cr_idx = np.where(((patch_shape[1]/2) < idx_pos[1]) & (idx_pos[1] < (cr_size - (patch_shape[1]/2))))\n idx_neg = idx_neg[:, cr_idx[0]]\n ax_idx = np.where(((patch_shape[2]/2) < idx_pos[2]) & (idx_pos[2] < (ax_size - (patch_shape[2]/2))))\n idx_neg = idx_neg[:, ax_idx[0]]\n \n idx_rand = np.random.choice(idx_neg[0].shape[0], num_neg, replace = False)\n cpts_neg_sampled = idx_neg[:, idx_rand] \n \n for i in range(num_neg):\n idx1_sg = cpts_pos_sampled[0][i] - int(patch_shape[0]/2)\n idx1_cr = cpts_pos_sampled[1][i] - int(patch_shape[1]/2)\n idx1_ax = cpts_pos_sampled[2][i] - int(patch_shape[2]/2)\n \n idx2_sg = idx1_sg + patch_shape[0]\n idx2_cr = idx1_cr + patch_shape[1]\n idx2_ax = idx1_ax + patch_shape[2]\n \n image_patch_orig = image[:, idx1_sg:idx2_sg, idx1_cr:idx2_cr, idx1_ax:idx2_ax]\n image_patch = p.standardize_image(image_patch_orig)\n #image_patch = p.Normalize(image_patch)\n label_patch = label[:, idx1_sg:idx2_sg, idx1_cr:idx2_cr, idx1_ax:idx2_ax]\n coordmaps_patch = coordmaps[:, idx1_sg:idx2_sg, idx1_cr:idx2_cr, idx1_ax:idx2_ax]\n \n image_patch_list.append(image_patch)\n label_patch_list.append(label_patch)\n coordmaps_patch_list.append(coordmaps_patch)\n \n #Write patch/image and control points to csv and save image\n write_patch_to_file(image_patch, label_patch, coordmaps_patch, sample, cpts_pos_sampled[:,i], start_idx + num_pos + i)\n \n cpts = np.concatenate((cpts_pos_sampled, cpts_neg_sampled), axis = 1)\n \n return image_patch_list, label_patch_list, coordmaps_patch_list, cpts, start_idx + num_pos + i\n\n else:\n \n idx = p.grid_center_points(image.shape[1:], spacing)\n \n # Only include points not near boundary\n sg_idx = np.where(((patch_shape[0]/2) < idx[0]) & (idx[0] < (sg_size - (patch_shape[0]/2))))\n idx = idx[:,sg_idx[0]]\n cr_idx = np.where(((patch_shape[1]/2) < idx[1]) & (idx[1] < (cr_size - (patch_shape[1]/2))))\n idx = idx[:, cr_idx[0]]\n ax_idx = np.where(((patch_shape[2]/2) < idx[2]) & (idx[2] < (ax_size - (patch_shape[2]/2))))\n idx = idx[:, ax_idx[0]]\n \n image_patch_list = []\n label_patch_list = []\n coordmaps_patch_list = []\n \n for i in range(idx.shape[1]):\n \n idx1_sg = idx[0][i] - int(patch_shape[0]/2)\n idx1_cr = idx[1][i] - int(patch_shape[1]/2)\n idx1_ax = idx[2][i] - int(patch_shape[2]/2)\n \n idx2_sg = idx1_sg + patch_shape[0]\n idx2_cr = idx1_cr + patch_shape[1]\n idx2_ax = idx1_ax + patch_shape[2]\n \n image_patch_orig = image[:, idx1_sg:idx2_sg, idx1_cr:idx2_cr, idx1_ax:idx2_ax]\n image_patch = p.standardize_image(image_patch_orig)\n #image_patch = p.Normalize(image_patch)\n label_patch = label[:, idx1_sg:idx2_sg, idx1_cr:idx2_cr, idx1_ax:idx2_ax]\n coordmaps_patch = coordmaps[:, idx1_sg:idx2_sg, idx1_cr:idx2_cr, idx1_ax:idx2_ax]\n \n image_patch_list.append(image_patch)\n label_patch_list.append(label_patch)\n coordmaps_patch_list.append(coordmaps_patch)\n \n return image_patch_list, label_patch_list, coordmaps_patch_list, idx, len(image_patch_list)", "def apply_patch_on_the_image(img, patch, count=5, offset=150):\n mask = np.zeros(shape=img.shape)\n boxes = []\n prev = (0, 0)\n gen = gencoordinates(img.shape[0], img.shape[1])\n for i in range(count):\n rnd = random.choice([x for x in range(100)])\n x_offset = rnd + patch.shape[0]\n y_offset = rnd + patch.shape[1]\n x_offset += prev[0]\n y_offset += prev[1]\n if y_offset < patch.shape[1]:\n y_offset = patch.shape[1]\n if x_offset < patch.shape[0]:\n x_offset = patch.shape[0]\n img[y_offset:y_offset+patch.shape[0], x_offset:x_offset+patch.shape[1]] = patch\n mask[y_offset:y_offset+patch.shape[0], x_offset:x_offset+patch.shape[1]] = 1\n boxes.append((y_offset, patch.shape[0], x_offset, patch.shape[1]))\n prev = (x_offset, y_offset)\n return img, mask, boxes", "def subimage_generator(image, patch_block_size, numberxy, numberz):\n width = np.shape(image)[1]\n height = np.shape(image)[2]\n imagez = np.shape(image)[0]\n block_width = np.array(patch_block_size)[1]\n block_height = np.array(patch_block_size)[2]\n blockz = np.array(patch_block_size)[0]\n\n stridewidth = (width - block_width) // (numberxy - 1)\n strideheight = (height - block_height) // (numberxy - 1)\n stridez = (imagez - blockz) // numberz\n # step 1:if image size of z is smaller than blockz,return zeros samples\n if imagez < blockz:\n nb_sub_images = numberxy * numberxy * 1\n hr_samples = np.zeros(shape=(nb_sub_images, blockz, block_width, block_height), dtype=np.float32)\n return hr_samples\n # step 2:if stridez is bigger 1,return numberxy * numberxy * numberz samples\n if stridez >= 1:\n nb_sub_images = numberxy * numberxy * numberz\n hr_samples = np.empty(shape=(nb_sub_images, blockz, block_width, block_height), dtype=np.float32)\n indx = 0\n for z in range(0, numberz * stridez, stridez):\n for x in range(0, width - block_width + 1, stridewidth):\n for y in range(0, height - block_height + 1, strideheight):\n hr_samples[indx, :, :, :] = image[z:z + blockz, x:x + block_width, y:y + block_height]\n indx += 1\n\n if (indx != nb_sub_images):\n print(\"error sub number image\")\n return hr_samples\n\n # step3: if stridez==imagez,return numberxy * numberxy * 1 samples,one is [0:blockz,:,:]\n if imagez == blockz:\n nb_sub_images = numberxy * numberxy * 1\n hr_samples = np.empty(shape=(nb_sub_images, blockz, block_width, block_height), dtype=np.float32)\n indx = 0\n for x in range(0, width - block_width + 1, stridewidth):\n for y in range(0, height - block_height + 1, strideheight):\n hr_samples[indx, :, :, :] = image[:, x:x + block_width, y:y + block_height]\n indx += 1\n if (indx != nb_sub_images):\n print(\"error sub number image\")\n print(indx)\n print(nb_sub_images)\n return hr_samples\n # step4: if stridez==0,return numberxy * numberxy * 2 samples,one is [0:blockz,:,:],two is [-blockz-1:-1,:,:]\n if stridez == 0:\n nb_sub_images = numberxy * numberxy * 2\n hr_samples = np.empty(shape=(nb_sub_images, blockz, block_width, block_height), dtype=np.float32)\n indx = 0\n for x in range(0, width - block_width + 1, stridewidth):\n for y in range(0, height - block_height + 1, strideheight):\n hr_samples[indx, :, :, :] = image[0:blockz, x:x + block_width, y:y + block_height]\n indx += 1\n hr_samples[indx, :, :, :] = image[-blockz - 1:-1, x:x + block_width, y:y + block_height]\n indx += 1\n if (indx != nb_sub_images):\n print(\"error sub number image\")\n return hr_samples", "def get_downsampled_patch(points, h, w, patch=[.75, 1.0, .10, 0.9], ds=[10, 10]):\n t0 = time.time()\n pc_image = points.reshape((h, w, 3))\n ys = int(patch[0] * h)\n ye = int(patch[1] * h)\n xs = int(patch[2] * w)\n xe = int(patch[3] * w)\n patch = pc_image[ys:ye:ds[0], xs:xe:ds[1]]\n patch = patch.reshape(patch.size // 3, 3)\n patch = filter_zero(patch)\n\n # print(f\"Downampled Patch: {(time.time() - t0) * 1000:.1f} ms\")\n\n return patch", "def _sample_pairs_nbp(data, frac, size_cap=np.int(1e6)):\n sample_size = int(len(data) * (len(data) - 1) / 2 * frac)\n sample_size = min(sample_size, size_cap)\n pairs = np.empty((sample_size, 2))\n for i in numba.prange(sample_size):\n pair = np.random.choice(data, size=2, replace=False)\n pair.sort()\n pairs[i] = pair\n return pairs", "def smoothen(scaled_imgs, patches, constants):\n patch_size = constants.PATCH_SIZE\n\n for k in range(len(patches)):\n img = scaled_imgs[k]\n patch = patches[k]\n\n # We assume that alternate patches have been extracted in the initial step\n length_sd_array = int(round((img.shape[0] - patch_size) / 2))\n width_sd_array = int(round((img.shape[1] - patch_size) / 2))\n\n std_database = np.reshape(map(lambda x: x.std_dev, patch), [length_sd_array, width_sd_array])\n blur = np.reshape(cv2.GaussianBlur(std_database, (7, 7), sigmaX=6, sigmaY=6), [-1])\n map(lambda (i, x): setattr(x, 'std_dev', blur[i]), enumerate(patch))", "def _color_sample(img: np.ndarray, p: float = 0.05) -> np.ndarray:\n # combine the X and Y dimension into one, only keep the channels dimension\n ravelled = img.reshape(-1, 3)\n # for 5%, take every 20th value, for 10% every 10th, etc...\n every_nth = int(1 / p)\n return ravelled[::every_nth, :]", "def fast_sample(self, n, items_per=None):\n item_pool = np.arange(self.items.shape[0]) #self.items.copy()\n samples = []\n remaining = n\n samples_per_shuffle = int(item_pool.shape[0]/items_per)\n while remaining > 0:\n random.shuffle(item_pool)\n for i in range(0, min(samples_per_shuffle, remaining) * items_per, items_per):\n samples.append(item_pool[i:i+items_per])\n remaining -= 1\n return np.array(samples)", "def downsample_sam(self, factor):", "def sampling_algorithm(self, X, y):\r\n\r\n n_to_sample = self.det_n_to_sample(self.proportion)\r\n\r\n if n_to_sample == 0:\r\n return self.return_copies(X, y, \"Sampling is not needed.\")\r\n\r\n # standardization is needed to make the range of the propensity scores\r\n # similar to that of the features\r\n mms = MinMaxScaler()\r\n X_trans = mms.fit_transform(X) # pylint: disable=invalid-name\r\n\r\n X_min = X_trans[y == self.min_label]\r\n\r\n # adding propensity scores as a new feature\r\n X_new = np.column_stack([X_trans, self.propensity_scores(X_trans, y)])\r\n X_min_new = X_new[y == self.min_label] # pylint: disable=invalid-name\r\n\r\n # finding nearest neighbors of minority samples\r\n n_neighbors = min([len(X_new), self.n_neighbors+1])\r\n\r\n ind = self.neighborhood_structure(X_new, y, n_neighbors, X_min_new)\r\n\r\n # noise removal\r\n t_hat = np.sum(y[ind[:, 1:]] == self.min_label, axis=1)\r\n to_remove = np.where(t_hat < self.t * n_neighbors)[0]\r\n\r\n if len(to_remove) >= len(X_min) - 1:\r\n return self.return_copies(X, y,\r\n \"most minority samples indentified as noise\")\r\n\r\n n_to_sample = n_to_sample + to_remove.shape[0]\r\n\r\n samples = self.generate_samples(X_min=X_min,\r\n to_remove=to_remove,\r\n X_trans=X_trans,\r\n y=y,\r\n ind=ind,\r\n n_to_sample=n_to_sample)\r\n\r\n X_min = np.delete(X_min, to_remove, axis=0)\r\n\r\n # do the sampling\r\n #samples = []\r\n #while len(samples) < n_to_sample:\r\n # idx = self.random_state.randint(len(X_min))\r\n # # finding the number of minority neighbors\r\n # t_hat = np.sum(y[ind[idx][1:]] == self.min_label)\r\n # if t_hat < self.t*n_neighbors:\r\n # # removing the minority point if the number of minority\r\n # # neighbors is less then the threshold\r\n # # to_remove indexes X_min\r\n # if idx not in to_remove:\r\n # to_remove.append(idx)\r\n # # compensating the removal of the minority point\r\n # n_to_sample = n_to_sample + 1\r\n #\r\n # if len(to_remove) == len(X_min):\r\n # _logger.warning(self.__class__.__name__ + \": \" +\r\n # \"all minority samples identified as noise\")\r\n # return X.copy(), y.copy()\r\n # else:\r\n # # otherwise do the sampling\r\n # X_b = X_trans[self.random_state.choice(ind[idx][1:])]\r\n # samples.append(self.sample_between_points(X_min[idx], X_b))\r\n\r\n return (mms.inverse_transform(np.vstack([X_trans[y == self.maj_label],\r\n X_min,\r\n samples])),\r\n np.hstack([np.repeat(self.maj_label,\r\n np.sum(y == self.maj_label)),\r\n np.repeat(self.min_label, len(X_min)),\r\n np.repeat(self.min_label, len(samples))]))", "def samplePositiveImages(images, positiveSample, size=(64, 64), N=200):\n\n for image in images:\n \n rotated = imutils.rotate_bound(image, random.randint(-15,15))\n \n h, w, channels = rotated.shape\n cropped_img = rotated[w//2 - 64//2:w//2 + 64//2, h//2 - 64//2:h//2 + 64//2]\n\n positiveSample.append(image);\n positiveSample.append(cropped_img)\n positiveSample.append(np.fliplr(image))\n positiveSample.append(np.fliplr(cropped_img))\n \n supportList = []\n for img in positiveSample:\n supportList.append(img)\n\n for img in supportList:\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) #convert it to hsv\n hsv = hsv + 10\n img = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n positiveSample.append(img)\n \n hsv = hsv - 20\n img = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n positiveSample.append(img)\n\n return" ]
[ "0.67314327", "0.6650126", "0.6467092", "0.64232033", "0.63184655", "0.6244714", "0.61122465", "0.6007285", "0.5839796", "0.5838395", "0.5829985", "0.5823634", "0.58136296", "0.580021", "0.5799174", "0.5792084", "0.5783304", "0.576639", "0.5761128", "0.57605505", "0.57274", "0.5718308", "0.5703057", "0.5682935", "0.5673232", "0.56700915", "0.56532764", "0.5651243", "0.561191", "0.5609006" ]
0.75956285
0
A function for denoising an image. The function accepts a noisy gray scale image, denoises the different patches of it and then reconstructs the image.
def denoise_image(Y, model, denoise_function, noise_std, patch_size=(8, 8)): (h, w) = np.shape(Y) cropped_h = h - patch_size[0] + 1 cropped_w = w - patch_size[1] + 1 middle_linear_index = int( ((patch_size[0] / 2) * patch_size[1]) + (patch_size[1] / 2)) # split the image into columns and denoise the columns: noisy_patches = im2col(Y, patch_size) denoised_patches = denoise_function(noisy_patches, model, noise_std) # reshape the denoised columns into a picture: x_hat = np.reshape(denoised_patches[middle_linear_index, :], [cropped_h, cropped_w]) return x_hat
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_denoising(image, model, denoise_function,\n noise_range=(0.01, 0.05, 0.1, 0.2), patch_size=(8, 8)):\n h, w = np.shape(image)\n noisy_images = np.zeros((h, w, len(noise_range)))\n denoised_images = []\n cropped_original = crop_image(image, patch_size)\n\n # make the image noisy:\n for i in range(len(noise_range)):\n noisy_images[:, :, i] = image + (\n noise_range[i] * np.random.randn(h, w))\n\n # denoise the image:\n for i in range(len(noise_range)):\n denoised_images.append(\n denoise_image(noisy_images[:, :, i], model, denoise_function,\n noise_range[i], patch_size))\n\n # calculate the MSE for each noise range:\n noisy_mses = {}\n denoised_mses = {}\n for i in range(len(noise_range)):\n print(\"noisy MSE for noise = \" + str(noise_range[i]) + \":\")\n noisy_mse = np.mean((crop_image(noisy_images[:, :, i],\n patch_size) - cropped_original) ** 2)\n noisy_mses[str(noise_range[i])] = noisy_mse\n print(noisy_mse)\n print(\"denoised MSE for noise = \" + str(noise_range[i]) + \":\")\n denoised_mse = np.mean((cropped_original - denoised_images[i]) ** 2)\n denoised_mses[str(noise_range[i])] = denoised_mse\n print(denoised_mse)\n\n plt.figure(figsize=(20, 20))\n plt.axis('off')\n for i in range(len(noise_range)):\n plt.subplot(2, len(noise_range), i + 1, xlabel='Noisy image', xticks=[], yticks=[])\n plt.imshow(noisy_images[:, :, i], cmap='gray')\n plt.subplot(2, len(noise_range), i + 1 + len(noise_range), xlabel='Denoised image', xticks=[], yticks=[])\n plt.imshow(denoised_images[i], cmap='gray')\n plt.show()\n return noisy_mses, denoised_mses", "def denoise(img, h=10, hForColor=None, templateWindowSize=7, searchWindowSize=21):\n\tif hForColor is None:\n\t\thForColor=h\n\ttmp = img.copy()\n\tif len(img.shape) != 3:\n\t\tdst = cv2.fastNlMeansDenoising(tmp, None, h, templateWindowSize, searchWindowSize)\n\telse:\n\t\tdst = cv2.fastNlMeansDenoisingColored(img, None, h, hForColor, templateWindowSize, searchWindowSize)\n\treturn dst", "def _gaussian_for_learn_denosing_model(image):\n return add_gaussian_noise(image, 0, 0.2)", "def make_noisy_images(image):\r\n return apply_poisson_noise(image, random_state=12345)", "def main() -> None:\n\n # Define file name\n file_name = define_file()\n\n # Open chosen image\n img = image.load_img(IMAGES + file_name, color_mode='grayscale')\n\n # Show user image\n plt.imshow(img)\n plt.show()\n\n # Convert image to array\n img_arr = image.img_to_array(img)\n img_arr = np.array([img_arr])\n img_arr = img_arr.astype(\"float32\") / 255.0\n\n # Classify image\n img_class = classification(img_arr)\n\n # Suggest user add noise to original image\n if img_class == ORIGINAL:\n while True:\n command = input('Seems like your image is original. Do you want to add noise? y/n: ')\n if command.strip().lower() in ('y', 'yes'):\n noisy_array = noise(img_arr)\n display(img_arr, noisy_array)\n img = image.array_to_img(noisy_array[0])\n img.save(IMAGES + file_name[:-4] + '_noise' + file_name[-4:])\n main()\n elif command.strip().lower() in ('n', 'no'):\n main()\n else:\n continue\n\n # Suggest user remove noise from noised image\n elif img_class == NOISED:\n while True:\n command = input('Seems like your image has noise. Do you want to remove noise? y/n: ')\n if command.strip().lower() in ('y', 'yes'):\n denoise_array = denoise_image(img_arr)\n display(img_arr, denoise_array)\n img = image.array_to_img(denoise_array[0])\n img.save(IMAGES + file_name[:-4] + '_denoised' + file_name[-4:])\n main()\n elif command.strip().lower() in ('n', 'no'):\n main()\n else:\n continue\n\n # Image denoised. Nothing to do\n else:\n print('Seems like your image denoised.')\n main()", "def IMAGE_PREPROCESSING_DEFAULT(img, grayscale_only=False):\n if grayscale_only:\n return cv2.cvtColor(img.copy(), cv2.COLOR_BGR2GRAY)\n else:\n img = cv2.medianBlur(img, 9)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n img = cv2.bilateralFilter(img, 7, 13, 13)\n return cv2.Canny(img,100,200)", "def _denoise(self, img, weight):\n\n from skimage.filters import denoise_tv_chambolle\n\n img = denoise_tv_chambolle(img, weight=weight) * 255\n\n return img.astype(\"uint8\")", "def dilate(img, kernel = (5,5), iterations = 1):\n\ttmp = grayscale(img)\n\tk = np.ones(kernel, np.uint8)\n\tdilation = cv2.dilate(tmp, k, iterations = iterations)\n\treturn dilation", "def dilate_image(image, dilation=1, passes=1, binarization=None,\n invert=False):\n # if image is all one single color, return it\n if len(np.unique(image)) == 1:\n return image\n # scikit-image needs only 0's and 1's\n mono_image = binarize_image(image, method=binarization) / 255\n if invert:\n mono_image = invert_image(mono_image)\n if dilation:\n dilation = (2 * dilation) + 1\n dilation_kernel = np.ones((dilation, dilation), np.uint8)\n dilated = cv2.morphologyEx(mono_image, cv2.MORPH_DILATE,\n dilation_kernel, iterations=passes)\n else:\n dilated = mono_image\n return dilated.astype(np.ubyte) * 255", "def softing_noise(image, kn):\n\n s_noise = cv2.GaussianBlur(image, (kn, kn), 0)\n\n return s_noise", "def enhance_edges(image):\n working_image = image.copy()\n working_image = cv2.cvtColor(working_image, cv2.COLOR_BGR2GRAY)\n # Blur away fine details.\n working_image = cv2.GaussianBlur(working_image, (5, 5), 0)\n return working_image", "def perform_noise_removal(mask):\n trans1 = cv.dilate(mask, KERNEL, iterations=4)\n trans1 = cv.erode(trans1, KERNEL, iterations=5)\n return cv.dilate(trans1, KERNEL, iterations=7)", "def fft_im_denoise(img: numpy.ndarray, keep_fraction: float = 0.1) -> numpy.ndarray:\n assert 0.0 < keep_fraction < 1.0\n\n im_fft = fftpack.fft2(img)\n\n # In the lines following, we'll make a copy of the original spectrum and\n # truncate coefficients.\n # Call ff a copy of the original transform. Numpy arrays have a copy\n # method for this purpose.\n im_fft_cp = im_fft # .copy()\n num_row, num_columns = im_fft_cp.shape\n\n # Set to zero all rows with indices between r*keep_fraction and\n # r*(1-keep_fraction):\n im_fft_cp[int(num_row * keep_fraction) : int(num_row * (1 - keep_fraction))] = 0\n im_fft_cp[\n :, int(num_columns * keep_fraction) : int(num_columns * (1 - keep_fraction))\n ] = 0\n\n # pyplot.figure()\n # plot_spectrum(im_fft)\n # pyplot.title('Fourier transform')\n\n # pyplot.figure()\n # plot_spectrum(im_fft_cp)\n # pyplot.title('Filtered Spectrum')\n\n # Reconstruct the denoised image from the filtered spectrum, keep only the\n # real part for display.\n return fftpack.ifft2(im_fft_cp).real # Inverse / Reconstruction", "def image_preprocessing(image):\n\treturn cv2.GaussianBlur(cv2.cvtColor(image, cv2.COLOR_BGR2GRAY), (5,5), 0)", "def _make_noisy(x, the_noise):\n noise_sample = the_noise[np.random.choice(the_noise.shape[0],\n x.shape[0],\n replace=False)]\n return x + noise_sample", "def real_blur_and_noise(image, kernel, sigma_d):\n degraded = filter_image(image, kernel, mode=\"valid\", boundary=\"fill\")\n noise = np.random.normal(0.0, sigma_d, degraded.shape).astype(np.float32)\n degraded = degraded + noise\n initial = np.pad(degraded, ((kernel.shape[0] // 2, kernel.shape[0] // 2),\n (kernel.shape[1] // 2, kernel.shape[1] // 2),\n (0, 0)), mode=\"edge\")\n initial = edgeTaper(initial, kernel)\n return initial", "def denoise_image(image,dest):\n beta = 1e-3\n eta = 2e-4\n argh = 0.0\n kmax = 10\n data = sign(image.getdata(), {0: -1, 255: 1}) # convert to {-1, 1}\n E, localized_E = E_generator(beta, eta, argh)\n temp_dir = os.path.dirname(os.path.realpath(os.path.join('submits', 'img', dest)))\n y = data.reshape(image.size[::-1]) # convert 1-d array to matrix\n result = simulated_annealing(\n y, kmax, E, localized_E, temp_dir)\n result = sign(result, {-1: 0, 1: 255})\n output_image = Image.fromarray(result).convert('1', dither=Image.NONE)\n return output_image", "def denormalize_generate_image(fake_data, normalize_with_sigmoid=True):\n if normalize_with_sigmoid:\n return fake_data * 255.0 # Denormalization\n else:\n return tf.clip_by_value((fake_data + 1) * 127.5, 0, 255)", "def traffic_sign_detection_noisy(img_in):\n img = img_in.copy()\n clean_picture = cv2.fastNlMeansDenoisingColored(\n src=img,\n dst=None,\n templateWindowSize=7,\n searchWindowSize=21,\n h=15,\n hColor=15\n )\n clean_picture = cv2.bilateralFilter(clean_picture, 9, 75, 75)\n return traffic_sign_detection(clean_picture, light_size=(8, 30), light_offset=10)", "def pre_processing_image(img):\n\n #print(img.shape)\n # apply gamma correction and show the images\n #adjusted = adjust_gamma(img, gamma=0.65)\n\n adjusted = exposure.adjust_gamma(img, gamma=1.65)\n #print(adjusted.shape)\n\n # log transform of image\n\n logarithmic_corrected = exposure.adjust_log(adjusted, 1)\n #print(logarithmic_corrected.shape)\n\n # denoising\n #dst2 = cv2.fastNlMeansDenoisingColored(logarithmic_corrected, None, 10, 10, 7, 21)\n #print(dst2.shape)\n dst2 = logarithmic_corrected\n return dst2", "def remove_noise(image):\n filtered = cv2.absdiff(image.astype(np.uint8), 255,\n cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 9, 41)\n kernel = np.ones((1, 1), np.uint8)\n opening = cv2.morphologyEx(filtered, cv2.MORPH_OPEN, kernel)\n closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel)\n\n img = image_smoothening(image)\n transform = cv2.bitwise_or(img, closing)\n return transform", "def apply_noise(input_image, noise_level):\n n,m = input_image.shape\n # generate binomial noise\n ksi = np.random.binomial(size=n*m, n=1, p=noise_level).reshape(input_image.shape)\n noised_image = ksi^input_image\n return noised_image", "def blur_image(im, n, ny=None) :\n g = gauss_kern(n, sizey=ny)\n improc = signal.convolve(im,g, mode='same')\n return(improc)", "def dilate(ary, N, iterations):\n\n kernel = np.zeros((N, N), dtype=np.uint8)\n kernel[(N - 1) // 2, :] = 1 # Bug solved with // (integer division)\n\n dilated_image = cv2.dilate(ary / 255, kernel, iterations=iterations)\n\n kernel = np.zeros((N, N), dtype=np.uint8)\n kernel[:, (N - 1) // 2] = 1 # Bug solved with // (integer division)\n dilated_image = cv2.dilate(dilated_image, kernel, iterations=iterations)\n return dilated_image", "def dilate_image(img, kernelSize=(4,4), iterations=2):\n\n kernel = np.ones(kernelSize, np.uint8)\n return cv.dilate(img,kernel, iterations=iterations)", "def preprocess_images(input_image, soften=None, fill_holes=None):\n ratio = get_scaling_ratio(input_image)\n if soften == None:\n soften = max(soften_amt_deafult * ratio, 1)\n if fill_holes == None:\n fill_holes = round(fill_holes_deafult * ratio)\n fill_holes = max(fill_holes, 1)\n\n # ensure that all points which are transparent have RGB values of 255 (will become white when\n # converted to non-transparent grayscale.)\n input_image = img_as_float32(input_image)\n if len(input_image.shape) == 3 and input_image.shape[2] == 4:\n input_image = rgba2rgb(input_image)\n gray_img = img_as_ubyte(rgb2gray(input_image))\n\n # get the otsu threshold after running a flood fill on the corners, so that those huge clumps of\n # dark pixels don't mess up the statistics too much (we only care about text!)\n thresh = threshold_otsu(\n fill_corners(gray_img, fill_value=255, thresh=5, tol=1, fill_below_thresh=True)\n )\n\n # n.b. here we are setting black pixels from the original image to have a value of 1 (effectively inverting\n # what you would get from a normal binarization, because the math gets easier this way)\n img_bin = img_as_ubyte(gray_img < thresh)\n \n # need to add clipping because of a weird case where the range of the\n # blurred imagewill be from -1 to 1.0000000004\n blurred = np.clip(gaussian(gray_img, soften), -1, 1)\n img_blur_bin = img_as_ubyte(img_as_ubyte(blurred) < thresh)\n\n # now, fill corners of binarized images with black (value 0)\n img_bin = fill_corners(\n img_bin, fill_value=0, thresh=1, tol=1, fill_below_thresh=False\n )\n img_blur_bin = fill_corners(\n img_blur_bin, fill_value=0, thresh=1, tol=1, fill_below_thresh=False\n )\n\n # run smoothing on the blurred-binarized image so we get blobs of text in neat lines\n kernel = np.ones((fill_holes, fill_holes), np.uint8)\n img_cleaned = binary_opening(binary_closing(img_blur_bin, kernel), kernel)\n\n # find rotation angle of cleaned, smoothed image. use that to correct the rotation of the unsmoothed image\n angle = find_rotation_angle(img_cleaned)\n img_cleaned_rot = rotate(img_cleaned, angle, order=0, mode=\"edge\") > 0\n img_bin_rot = rotate(img_bin, angle, order=0, mode=\"edge\") > 0\n\n return img_bin_rot, img_cleaned_rot, angle", "def noise_dither_bayer(img: np.ndarray):\n \n imgtype = img.dtype\n size = img.shape\n\n #Note: these are very slow for large images, must crop first before applying.\n # Bayer works more or less. I think it's missing a part of the image, the\n # dithering pattern is apparent, but the quantized (color palette) is not there. \n # Still enough for models to learn dedithering\n bayer_matrix = np.array([[0, 8, 2, 10], [12, 4, 14, 6], [3, 11, 1, 9], [15, 7, 13, 5]]) #/256 #4x4 Bayer matrix\n \n bayer_matrix = bayer_matrix*16\n \n red = img[:,:,2] #/255.\n green = img[:,:,1] #/255.\n blue = img[:,:,0] #/255.\n \n img_split = np.zeros((img.shape[0], img.shape[1], 3), dtype = imgtype)\n \n for values, color, channel in zip((red, green, blue), ('red', 'green', 'blue'), (2,1,0)):\n for i in range(0, values.shape[0]):\n for j in range(0, values.shape[1]):\n x = np.mod(i, 4)\n y = np.mod(j, 4)\n if values[i, j] > bayer_matrix[x, y]:\n img_split[i,j,channel] = 255 #1\n dithered = img_split #*255.\n \n return dithered.astype(imgtype)", "def plot_denoising(fname_raw, fmin=0, fmax=300, tmin=0.0, tmax=60.0,\n proj=False, n_fft=4096, color='blue',\n stim_name=None, event_id=1,\n tmin_stim=-0.2, tmax_stim=0.5,\n area_mode='range', area_alpha=0.33, n_jobs=1,\n title1='before denoising', title2='after denoising',\n info=None, show=True, fnout=None):\n\n from matplotlib import gridspec as grd\n import matplotlib.pyplot as plt\n from mne.time_frequency import psd_welch\n\n fnraw = get_files_from_list(fname_raw)\n\n # ---------------------------------\n # estimate power spectrum\n # ---------------------------------\n psds_all = []\n freqs_all = []\n\n # loop across all filenames\n for fname in fnraw:\n\n # read in data\n raw = mne.io.Raw(fname, preload=True)\n picks = mne.pick_types(raw.info, meg='mag', eeg=False,\n stim=False, eog=False, exclude='bads')\n\n if area_mode not in [None, 'std', 'range']:\n raise ValueError('\"area_mode\" must be \"std\", \"range\", or None')\n\n psds, freqs = psd_welch(raw, picks=picks, fmin=fmin, fmax=fmax,\n tmin=tmin, tmax=tmax, n_fft=n_fft,\n n_jobs=n_jobs, proj=proj)\n psds_all.append(psds)\n freqs_all.append(freqs)\n\n if stim_name:\n n_xplots = 2\n\n # get some infos\n events = mne.find_events(raw, stim_channel=stim_name, consecutive=True)\n\n else:\n n_xplots = 1\n\n fig = plt.figure('denoising', figsize=(16, 6 * n_xplots))\n gs = grd.GridSpec(n_xplots, int(len(psds_all)))\n\n # loop across all filenames\n for idx in range(int(len(psds_all))):\n\n # ---------------------------------\n # plot power spectrum\n # ---------------------------------\n p1 = plt.subplot(gs[0, idx])\n\n # Convert PSDs to dB\n psds = 10 * np.log10(psds_all[idx])\n psd_mean = np.mean(psds, axis=0)\n if area_mode == 'std':\n psd_std = np.std(psds, axis=0)\n hyp_limits = (psd_mean - psd_std, psd_mean + psd_std)\n elif area_mode == 'range':\n hyp_limits = (np.min(psds, axis=0), np.max(psds, axis=0))\n else: # area_mode is None\n hyp_limits = None\n\n p1.plot(freqs_all[idx], psd_mean, color=color)\n if hyp_limits is not None:\n p1.fill_between(freqs_all[idx], hyp_limits[0], y2=hyp_limits[1],\n color=color, alpha=area_alpha)\n\n if idx == 0:\n p1.set_title(title1)\n ylim = [np.min(psd_mean) - 10, np.max(psd_mean) + 10]\n else:\n p1.set_title(title2)\n\n p1.set_xlabel('Freq (Hz)')\n p1.set_ylabel('Power Spectral Density (dB/Hz)')\n p1.set_xlim(freqs_all[idx][0], freqs_all[idx][-1])\n p1.set_ylim(ylim[0], ylim[1])\n\n # ---------------------------------\n # plot signal around stimulus\n # onset\n # ---------------------------------\n if stim_name:\n raw = mne.io.Raw(fnraw[idx], preload=True)\n epochs = mne.Epochs(raw, events, event_id, proj=False,\n tmin=tmin_stim, tmax=tmax_stim, picks=picks,\n preload=True, baseline=(None, None))\n evoked = epochs.average()\n if idx == 0:\n ymin = np.min(evoked.data)\n ymax = np.max(evoked.data)\n\n times = evoked.times * 1e3\n p2 = plt.subplot(gs[1, idx])\n p2.plot(times, evoked.data.T, 'blue', linewidth=0.5)\n p2.set_xlim(times[0], times[len(times) - 1])\n p2.set_ylim(1.1 * ymin, 1.1 * ymax)\n\n if (idx == 1) and info:\n plt.text(times[0], 0.9 * ymax, ' ICs: ' + str(info))\n\n # save image\n if fnout:\n fig.savefig(fnout + '.png', format='png')\n\n # show image if requested\n if show:\n plt.show()\n\n plt.close('denoising')\n plt.ion()", "def image_enhancement(self,img,file_name):\n #Creating output directory if it doesnt exist\n dirname = 'output'\n dir_path = os.path.dirname(os.path.realpath(__file__))\n if(os.path.isdir(os.path.join(dir_path, dirname))): \n if(os.path.exists(os.path.join(dir_path, dirname))):\n pass\n else:\n os.mkdir(os.path.join(dir_path, dirname))\n os.mkdir(os.path.join(dir_path, dirname,\"results\"))\n os.mkdir(os.path.join(dir_path, dirname,\"inputs\"))\n #Extracting edges using Canny's Edge Detection\n edges = cv2.Canny(img,80,255)\n cv2.imwrite(os.path.join(dir_path, dirname,'inputs','edges.png'),edges)\n kernel = (3,3)\n #Applying image pyramid technique\n #Applying Gaussian blur filter over the image\n gaussian_blurred_image =self.gaussian_blurring(img,kernel,0)\n cv2.imwrite(os.path.join(dir_path, dirname,'inputs','gaussian_blurred_image.png'),gaussian_blurred_image)\n plt.subplot(121),\n plt.xticks([]), plt.yticks([])\n plt.subplot(122),\n plt.xticks([]), plt.yticks([])\n #Downsizing the image to 1/4th of its original size\n coarse_image =self.sampling(gaussian_blurred_image,0.25,0.25) \n cv2.imwrite(os.path.join(dir_path, dirname,'inputs','coarse_image.png'),coarse_image)\n #Upsampling the image to its original size\n up_sampling=self.sampling(coarse_image,4,4)\n cv2.imwrite(os.path.join(dir_path, dirname,'inputs','up_sampling.png'),up_sampling)\n #Applying Gaussian Blur filtering\n gaus=self.gaussian_blurring(up_sampling,kernel,0)\n cv2.imwrite(os.path.join(dir_path, dirname,'inputs','gaus2.png'),gaus)\n #Resizing the image for image subtraction\n gaussian_blurred_image=cv2.resize(img,(gaus.shape[1],gaus.shape[0]))\n #Convert into grayscale\n gaus_gray=cv2.cvtColor(gaus,cv2.COLOR_BGR2GRAY)\n cv2.imwrite(os.path.join(dir_path, dirname,'inputs','gausgray.png'),gaus_gray)\n #Converting to grayscale\n dst_gray=cv2.cvtColor(gaussian_blurred_image,cv2.COLOR_BGR2GRAY)\n (score, diff) = compare_ssim(gaus_gray, dst_gray, full=True)\n diff = (diff * 255).astype(\"uint8\")\n #Image Subtraction\n detail_image = cv2.subtract(gaus,gaussian_blurred_image)\n cv2.imwrite(os.path.join(dir_path, dirname,'inputs','detailed.png'),detail_image)\n print(detail_image.shape)\n output_path=self.process_imgdir(os.path.join(dir_path, dirname))\n dehazed_image=cv2.imread(output_path)\n # dehazed_image =self.sampling(dehazed_image,4,4)\n output_path=\"\\\\\".join(output_path.split(\"\\\\\")[:-1])\n print(dehazed_image.shape)\n cv2.imwrite(os.path.join(output_path,'dehazed_image.png'),dehazed_image) \n #Adding two images\n dst = cv2.addWeighted(detail_image,1,dehazed_image,1,0) \n kernel = np.array([[-1,-1,-1], [-1,9,-1], [-1,-1,-1]])\n dst = cv2.filter2D(dst, -1, kernel)\n #Converting images to lightness,chroma ,hue for increasing the brightness\n lab= cv2.cvtColor(dst, cv2.COLOR_BGR2LAB)\n l, a, b = cv2.split(lab)\n #Applying CLAHE Algorithm for contrast amplification which is limited and to reduce the problem of noise amplification\n clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8,8))\n cl = clahe.apply(l) \n limg = cv2.merge((cl,a,b))\n #Convert back to rgb\n final = cv2.cvtColor(limg, cv2.COLOR_LAB2BGR) \n psf = np.ones((5, 5)) / 25\n #Applying mean denoising filtering\n dst=cv2.fastNlMeansDenoisingColored(final,None,10,10,7,21)\n edges=cv2.cvtColor(edges,cv2.COLOR_GRAY2BGR)\n print(edges.shape)\n edges=cv2.resize(edges,(dst.shape[1],dst.shape[0]))\n #Increasing the brightness of the image\n hsv = cv2.cvtColor(dst, cv2.COLOR_BGR2HSV)\n h,s,v=cv2.split(hsv)\n value = 30 #whatever value you want to add\n lim=255-value\n \n s[s>lim]=255\n s[s<lim]+=value\n value1=30\n lim1=255-value1\n v[v>lim1]=255\n v[v<lim1]+=value1\n hsv = cv2.merge((h, s, v))\n dst = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n #Writing the output file \n dst = cv2.addWeighted(dst,1,edges,1,0)\n cv2.imwrite(os.path.join(output_path,file_name+'.png'),dst)\n #Resizing the file to compare it with other methods\n resized = cv2.resize(dst, (256,256), interpolation = cv2.INTER_AREA)\n cv2.imwrite(os.path.join(output_path,'result_resized.png'),resized)", "def dilate(image, iterations, kernel_size=(5, 5)):\n kernel = np.ones(kernel_size, np.uint8)\n image = cv2.dilate(image, kernel, iterations=iterations)\n return image" ]
[ "0.6804645", "0.6336191", "0.6276971", "0.61491084", "0.6051832", "0.6048815", "0.6029486", "0.60108566", "0.59982866", "0.5986291", "0.596342", "0.5904856", "0.5863499", "0.5794029", "0.57855695", "0.5770762", "0.572468", "0.5707011", "0.5696118", "0.56886435", "0.5676679", "0.5664853", "0.5636921", "0.56359434", "0.56160444", "0.55988204", "0.5584402", "0.55824393", "0.5580505", "0.5558802" ]
0.66669136
1
Given a matrix in log space, return the matrix with normalized columns in log space.
def normalize_log_likelihoods(X): h, w = np.shape(X) return X - np.tile(logsumexp(X, axis=0), (h, 1)) # return X - np.matlib.repmat(logsumexp(X, axis=0), h, 1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def log_normalize(log_prob, axis):\n log_sum = logsumexp(log_prob, axis=axis)\n \n if not isinstance(log_sum, np.ndarray):\n log_sum = np.array([log_sum])\n if log_prob.shape[0] == log_sum.shape[0]:\n # column normalize \n return (log_prob.transpose() - log_sum).transpose()\n else:\n # row normalize\n return log_prob - log_sum", "def log_normalize(a, axis=None):\n if axis is not None and a.shape[axis] == 1:\n # Handle single-state GMMHMM in the degenerate case normalizing a\n # single -inf to zero.\n a[:] = 0\n else:\n with np.errstate(under=\"ignore\"):\n a_lse = logsumexp(a, axis, keepdims=True)\n a -= a_lse", "def normalize_columns(mat):\n norm = np.sqrt((mat**2).sum(0))\n return mat / norm", "def normalize(data):\n norm_matrix = np.int_(np.log10(data)**2)\n norm_matrix = map(lambda x: x if x < BOARD_SIZE else BOARD_SIZE, norm_matrix)\n norm_matrix = map(lambda x: x if x > 0 else 0, norm_matrix)\n return norm_matrix", "def normalize_col(input_matrix):\n\n col_sums = np.nan_to_num(input_matrix).sum(axis=0, keepdims=True)\n\n #new_matrix = input_matrix / col_sums if np.isscalar(col_sums) else input_matrix / col_sums[np.newaxis, :]\n new_matrix = np.divide(input_matrix, col_sums)\n return np.nan_to_num(new_matrix)", "def normalize(input_matrix):\n\n row_sums = input_matrix.sum(axis=1)\n try:\n assert (np.count_nonzero(row_sums)==np.shape(row_sums)[0]) # no row should sum to zero\n except Exception:\n raise Exception(\"Error while normalizing. Row(s) sum to zero\")\n new_matrix = input_matrix / row_sums[:, np.newaxis]\n return new_matrix", "def normalize(self,matrix):\n for i in range(self.N):\n matrix[self.N-1][i] = 0\n for i in range(self.n):\n matrix[self.N - 1][self.index(i,i)] = 1\n return matrix", "def normalize_matrix(mat):\n return (mat + abs(mat.min())) / (mat.max() - mat.min())", "def normalize_matrix(matrix, axis=1):\n if len(matrix.shape) == 1:\n # turn vector into matrix with one row\n matrix = matrix[np.newaxis, :]\n divisor = np.linalg.norm(matrix, axis=axis)[:, np.newaxis]\n # only normalize where divisor is not zero\n result = np.divide(matrix, divisor, out=np.zeros(matrix.shape), where=divisor != 0)\n return result", "def _log_matrix_vector(ms, vs):\n return tf.reduce_logsumexp(ms + vs[..., tf.newaxis, :], axis=-1)", "def log_normalize(df):\n\n return df.applymap(log_cust)", "def normalize_matrix(matrix):\n\n nrows = matrix.shape[0]\n for col in xrange(matrix.shape[1]):\n tot = float(sum(matrix[:,col]))\n \n for row in xrange(nrows):\n try:\n matrix[row][col] = matrix[row][col]/tot\n except ZeroDivisionError:\n pass\n return matrix", "def log_norm(log_x):\n c = np.max(log_x)\n\n if np.isinf(c):\n return c\n\n sum_exp = 0\n\n for x in log_x:\n sum_exp += np.exp(x - c)\n\n log_sum_exp = np.log(sum_exp)\n\n log_Z = log_sum_exp + c\n\n return log_Z", "def normalize(self, lam):\n return (lam.T / np.sum(lam, axis=1)).T", "def normalize_matrix(matrix, min_val, max_val):\n return (max_val - min_val) * (matrix - np.min(matrix)) / (np.max(matrix) - np.min(matrix)) + min_val", "def _log_vector_matrix(vs, ms):\n return tf.reduce_logsumexp(vs[..., tf.newaxis] + ms, axis=-2)", "def _xlogx(x):\n y = x.copy()\n if isinstance(y, sparse.csc_matrix) or isinstance(y, sparse.csr_matrix):\n z = y.data\n else:\n z = np.asarray(y) # ensure np.matrix converted to np.array\n nz = z.nonzero()\n z[nz] *= np.log2(z[nz])\n return y", "def lognormalize(x, temp = 1):\n if type(x) is list: x = np.array(x)\n\n x = x - np.max(x)\n # anneal\n xp = np.power(np.exp(x), temp)\n return xp / xp.sum()", "def log_normalise(data, vmin, vmax):\n result = np.ma.masked_less_equal(data, 0, copy=False)\n if vmin > vmax:\n raise ValueError(\"minvalue must be less than or equal to maxvalue\")\n elif vmin <= 0:\n raise ValueError(\"values must all be positive\")\n elif vmin == vmax:\n result.fill(0)\n else:\n mask = np.ma.getmask(result)\n result = np.ma.array(np.clip(result.filled(vmax), vmin, vmax), mask=mask)\n\n resdat = result.data\n mask = result.mask\n if mask is np.ma.nomask:\n mask = (resdat <= 0)\n else:\n mask |= (resdat <= 0)\n\n np.log(resdat, resdat)\n resdat -= np.log(vmin)\n resdat /= (np.log(vmax) - np.log(vmin))\n result = np.ma.array(resdat, mask=mask, copy=False)\n\n return result", "def _normalize(weights, axis, log=True):\n if log:\n normalizer = tf.reduce_logsumexp(weights, axis=axis, keepdims=True)\n return weights - normalizer\n normalizer = tf.reduce_sum(weights, axis=axis)\n return weights / normalizer", "def _horizontal_log(self, X: np.ndarray) -> (np.ndarray, np.ndarray):\n ret_p = np.zeros_like(X)\n ret_n = np.zeros_like(X)\n log_p = self.manifold.log(X[:, :-1], X[:, 1:])\n log_n = self.manifold.log(X[:, 1:], X[:, :-1])\n ret_p[:, :-1] = log_p\n ret_n[:, 1:] = log_n\n return ret_p, ret_n", "def norm_by_rows(matrix):\n row_sums = matrix.sum(axis=1)\n return matrix / row_sums.reshape((-1, 1))", "def log(self) -> np.ndarray:\n S = 0.5*(self.A-self.A.T) # Skew-symmetric matrix\n y = np.array([S[2, 1], -S[2, 0], S[1, 0]]) # Axis\n if np.allclose(np.zeros(3), y):\n return np.zeros(3)\n y2 = np.linalg.norm(y)\n return np.arcsin(y2)*y/y2", "def lognormexp(values, dim=0):\n\n log_denominator = torch.logsumexp(values, dim=dim, keepdim=True)\n # log_numerator = values\n return values - log_denominator", "def lognormexp(values, dim=0):\n\n log_denominator = torch.logsumexp(values, dim=dim, keepdim=True)\n # log_numerator = values\n return values - log_denominator", "def normalise(self,data,take_logs:bool=False):\n\n # Normalise vector to sum up to 1\n normalised_vector = data/np.sum(data)\n\n # If take logs is selected, take logs\n if take_logs:\n return np.log(normalised_vector)\n else:\n return normalised_vector", "def _convert_normlogprice(self, series):\n try:\n return np.log(series.div(series[0]))\n except:\n raise TypeError('ERROR: Could not transform prices to log function. Check price history data.')", "def _log_normal_matrix(points, means, cov):\n n_points, dim = points.shape\n n_components, _ = means.shape\n precisions_chol = _compute_precisions_chol(cov)\n log_det_chol = np.log(np.linalg.det(precisions_chol))\n log_prob = np.empty((n_points, n_components))\n for k, (mu, prec_chol) in enumerate(zip(means, precisions_chol)):\n y = np.dot(points, prec_chol) - np.dot(mu, prec_chol)\n log_prob[:, k] = np.sum(np.square(y), axis=1)\n return -.5 * (dim * np.log(2 * np.pi) + log_prob) + log_det_chol", "def logtrace(m: np.ndarray) -> np.ndarray:\n\n \"\"\" note: performance cannot easily be improve by numba.\n `np.diagonal` not supported by numba 0.52.0\n \"\"\"\n\n return np.sum(np.log(np.diagonal(m, axis1=-2, axis2=-1)), axis=-1)", "def fast_logdet(matrix):\n sign, ld = np.linalg.slogdet(matrix)\n if not sign > 0:\n return -np.inf\n return ld" ]
[ "0.71530575", "0.66780686", "0.64958405", "0.633474", "0.63256425", "0.63216084", "0.6304669", "0.62542933", "0.62441796", "0.6148054", "0.6142754", "0.6142461", "0.611145", "0.60670215", "0.59872395", "0.59844184", "0.59607893", "0.5960525", "0.5908303", "0.58673406", "0.58568215", "0.58389884", "0.5831036", "0.5817897", "0.5817897", "0.5815202", "0.5759275", "0.57587004", "0.5742139", "0.572931" ]
0.71583116
0
Given image patches and a MVN model, return the log likelihood of the patches according to the model.
def MVN_log_likelihood(X, model): D, M = X.shape X_normalized = normalize_log_likelihoods(X.copy()) mvn = multivariate_normal(mean=model.mean, cov=model.cov) return mvn.logpdf(X_normalized.T).sum() # log_2pi = D * np.log(2 * np.pi) # log_det = np.log(np.linalg.det(model.cov)) # residuals = calc_residuals(X_normalized, model.mean, "minus") # mahalanobis_distance = np.dot(np.dot(residuals.T, np.linalg.inv(model.cov)), residuals) # return -0.5 * (log_2pi + log_det + mahalanobis_distance).sum()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def GSM_log_likelihood(X, model):\n D, M = X.shape\n k = model.mix.shape[0]\n log_likelihood = 0\n for i in range(M):\n logpdf_X = 0\n for j in range(k):\n mvn = multivariate_normal(cov=model.cov[j, :])\n logpdf_X = mvn.logpdf(x=X[:, i]) * model.mix[j]\n log_likelihood += logpdf_X\n return log_likelihood", "def loglike(self, params):\n\n if type(params) is not MixedLMParams:\n params = MixedLMParams.from_packed(params, self.k_fe,\n self.use_sqrt)\n\n fe_params = params.get_fe_params()\n cov_re = params.get_cov_re()\n try:\n cov_re_inv = np.linalg.inv(cov_re)\n except np.linalg.LinAlgError:\n cov_re_inv = None\n _, cov_re_logdet = np.linalg.slogdet(cov_re)\n\n # The residuals\n expval = np.dot(self.exog, fe_params)\n resid_all = self.endog - expval\n\n likeval = 0.\n\n # Handle the covariance penalty\n if self.cov_pen is not None:\n likeval -= self.cov_pen.func(cov_re, cov_re_inv)\n\n # Handle the fixed effects penalty\n if self.fe_pen is not None:\n likeval -= self.fe_pen.func(fe_params)\n\n xvx, qf = 0., 0.\n for k, lab in enumerate(self.group_labels):\n\n exog = self.exog_li[k]\n ex_r = self.exog_re_li[k]\n ex2_r = self.exog_re2_li[k]\n resid = resid_all[self.row_indices[lab]]\n\n # Part 1 of the log likelihood (for both ML and REML)\n ld = _smw_logdet(1., ex_r, ex2_r, cov_re, cov_re_inv,\n cov_re_logdet)\n likeval -= ld / 2.\n\n # Part 2 of the log likelihood (for both ML and REML)\n u = _smw_solve(1., ex_r, ex2_r, cov_re, cov_re_inv, resid)\n qf += np.dot(resid, u)\n\n # Adjustment for REML\n if self.reml:\n mat = _smw_solve(1., ex_r, ex2_r, cov_re, cov_re_inv,\n exog)\n xvx += np.dot(exog.T, mat)\n\n if self.reml:\n likeval -= (self.n_totobs - self.k_fe) * np.log(qf) / 2.\n _,ld = np.linalg.slogdet(xvx)\n likeval -= ld / 2.\n likeval -= (self.n_totobs - self.k_fe) * np.log(2 * np.pi) / 2.\n likeval += ((self.n_totobs - self.k_fe) *\n np.log(self.n_totobs - self.k_fe) / 2.)\n likeval -= (self.n_totobs - self.k_fe) / 2.\n else:\n likeval -= self.n_totobs * np.log(qf) / 2.\n likeval -= self.n_totobs * np.log(2 * np.pi) / 2.\n likeval += self.n_totobs * np.log(self.n_totobs) / 2.\n likeval -= self.n_totobs / 2.\n\n return likeval", "def evaluate_GMM_log_likelihood(model, x, y):\n y_pred = model.predict(x)\n \n num_datapoints = len(x)\n output_dim = y.shape[-1]\n num_comp = int(y_pred.shape[-1] / (3*output_dim))\n\n mix_comp_logits = y_pred[:, :num_comp]\n mus = y_pred[:, num_comp:(1+output_dim)*num_comp]\n sigmas = y_pred[:, (1+output_dim)*num_comp:]\n \n # convert logits to categorical distribution - need to itterate through all points\n mix_comp = np.zeros((num_datapoints, num_comp))\n for i in range(num_datapoints):\n mix_comp[i,:] = get_mixture_dist(mix_comp_logits[i,:], num_comp)\n \n log_likelihood = 0\n for i in range(num_comp):\n for j in range(output_dim):\n mse = -0.5*np.sum(mix_comp[:,i]*np.square((y[:,j]-mus[:,(i*output_dim)+j])/sigmas[:,(i*output_dim)+j]))\n sigma_trace = -np.sum(mix_comp[:,i]*np.log(sigmas[:,(i*output_dim)+j]))\n log2pi = -np.sum(mix_comp[:,i]*0.5*output_dim*np.log(2*np.pi))\n\n log_likelihood += mse + sigma_trace + log2pi\n \n avg_log_likelihood = np.round(log_likelihood / num_datapoints, 2)\n print(f'Log likelihood: {avg_log_likelihood}')\n return avg_log_likelihood", "def log_likelihood_function(self, instance):\r\n\r\n try:\r\n return self.fit_interferometer_for_instance(\r\n instance=instance\r\n ).figure_of_merit\r\n except (\r\n exc.PixelizationException,\r\n exc.InversionException,\r\n exc.GridException,\r\n OverflowError,\r\n ) as e:\r\n raise exc.FitException from e", "def analyse_loglike(test_data, mods):\r\n l1 = list(map(lambda x: x + ' NB', mods.names))\r\n l1.extend(list(map(lambda x: x + ' ZI', mods.names)))\r\n l1.extend(list(map(lambda x: x + ' P', mods.names)))\r\n loglikeNB = np.array(mods.compute_log_likelihood(test_data, 'NB'))\r\n loglikeZI = np.array(mods.compute_log_likelihood(test_data, 'ZI'))\r\n loglikeP = np.array(mods.compute_log_likelihood(test_data, 'P'))\r\n # loglikeG = np.array(mods.compute_log_likelihood_gaussian(test_data))\r\n # loglikegeo = np.array(mods.compute_log_likelihood_geom(test_data))\r\n LL = np.zeros((loglikeNB.shape[0] * 3, loglikeNB.shape[1]))\r\n LL[:loglikeNB.shape[0], :] = loglikeNB\r\n LL[loglikeNB.shape[0]:2 * loglikeNB.shape[0], :] = loglikeZI\r\n LL[2 * loglikeNB.shape[0]:3 * loglikeNB.shape[0], :] = loglikeP\r\n # LL[3 * loglikeNB.shape[0]:4 * loglikeNB.shape[0], :] = loglikeG\r\n # LL[4 * llzi.shape[0]:, :] = np.array(mods.loglikegeo)\r\n print('mean per model', list(zip(np.ma.masked_invalid(LL).sum(axis=1), map(lambda x: x.mod.name, mods.models))))\r\n print('mean per distrib')\r\n print(np.ma.masked_invalid(LL[:loglikeNB.shape[0], :]).mean())\r\n print(np.ma.masked_invalid(LL[loglikeNB.shape[0]:loglikeNB.shape[0] * 2, :]).mean())\r\n print(np.ma.masked_invalid(LL[loglikeNB.shape[0] * 2:loglikeNB.shape[0] * 3, :]).mean())\r\n # print(np.nanmean(LL[1-np.isinf(LL)], axis=1))\r\n # print(np.nanmean(LL[LL != np.inf],axis=1))\r\n LL[np.isnan(LL)] = 0\r\n LL[np.isinf(LL)] = 0\r\n LL[LL == 0] = -np.inf\r\n r = np.argmax(LL, axis=0)\r\n # LL /= mx\r\n print('mean_best', np.mean(np.ma.masked_invalid(LL[r, range(LL.shape[1])])))\r\n mx = np.max(LL, axis=0)\r\n LL = LL / mx\r\n means = test_data.get_miniOD(None)[test_data.get_stations_col(None)].mean(axis=0).to_numpy()\r\n # for i in np.unique(r):\r\n # print(means[r == i].max())\r\n print('mean NB', means[r < loglikeNB.shape[0]].mean())\r\n print('mean ZI', means[(r < 2 * loglikeNB.shape[0]) * (r > loglikeNB.shape[0])].mean())\r\n print('mean poisson', means[(r < 3 * loglikeNB.shape[0]) * (r > 2 * loglikeNB.shape[0])].mean())\r\n # print('mean ga', means[(r < 4 * llzi.shape[0]) * (r > 3 * llzi.shape[0])].mean())\r\n # print('mean Gaussian', means[r > 3 * loglikeNB.shape[0]].mean())\r\n print('model name, mean trips per model, LL/maxLL, N inf')\r\n for i in range(LL.shape[0]):\r\n print(l1[i], means[r == i].mean(), np.mean(np.ma.masked_invalid(LL[i, :])), np.sum(np.isinf(LL[i, :])))\r\n print(np.ma.corrcoef(np.ma.masked_invalid(LL[i, :]), means[:LL.shape[1]])[1, 0])\r\n plt.hist(r, bins=np.arange(-0.5, 3 * len(mods.names) + 1, 1))\r\n\r\n # l1.extend(list(map(lambda x: x + ' geo', mods.names)))\r\n # l1.extend(list(map(lambda x: x + ' G', mods.names)))\r\n plt.xticks(range(len(l1)), l1, rotation='vertical')\r\n plt.show()\r\n\r\n for m in mods.loglike:\r\n print(m)\r\n print(m[np.logical_not(np.isinf(m))].mean())", "def log_likelihood(self):\r\n return (-0.5 * self.num_data * self.output_dim * np.log(2.*np.pi) -\r\n 0.5 * self.output_dim * self.K_logdet + self._model_fit_term() + self.likelihood.Z)", "def figure_of_merit_from(self, parameter_list):\r\n return self.log_likelihood_from(parameter_list=parameter_list)", "def log_likelihood_function(self, instance: af.ModelInstance) -> float:\r\n model_data = self.model_data_from_instance(instance=instance)\r\n fit = self.fit_from_model_data(model_data=model_data)\r\n return fit.log_likelihood", "def compute_log_likelihood(self,params: ndarray) -> float:\n \n pred_mag = self._pred_mag(params,self.times)\n sigma_2 = self.sd_mags**2 \n ln_likelihood = -0.5*np.sum((pred_mag - self.mags)**2 / sigma_2+ np.log(sigma_2))\n\n return ln_likelihood", "def log_likelihood(self, data, reward_model, bias_params):", "def _log_likelihood_poisson(self, df, dfo, n_bins=10):\n cond = df[\"selected_jig\"].values == 1\n range = parameter_ranges['uae'], parameter_ranges['rec']\n\n uae_obs = dfo[\"mueff_av\"].values\n rec_obs = dfo[\"rec_arcsec\"].values\n obs, xedges, yedges = np.histogram2d(uae_obs, rec_obs, range=range, bins=n_bins)\n\n uae_mod = df[\"uae_obs_jig\"].values[cond]\n rec_mod = df[\"rec_obs_jig\"].values[cond]\n model, _, _ = np.histogram2d(uae_mod, rec_mod, range=range, bins=n_bins, density=True)\n\n # Rescale model by number of observations\n model = model.astype(\"float\") * dfo.shape[0]\n\n # Calculate Poisson probability for each bin\n obs = obs.reshape(-1).astype(\"float\")\n model = model.reshape(-1)\n probs = stats.poisson(mu=model).pmf(obs)\n\n # Return overall log likelihood\n return np.log(probs).sum()", "def log_likelihood(self, y_list):\n if self.lambda_mat is None:\n raise ValueError(\"Can't compute model likelihood before fitting!\")\n\n # precision prior distribution given precision hyper-parameters\n prec_distr = stats.gamma(a=self.prec_distr[0],\n scale=self.prec_distr[1] ** -1.0)\n\n # likelihood of projection matrix precision priors given\n # precision hyper-parameters\n lambda_logl = np.sum(\n prec_distr.logpdf(self.lambda_mat['alpha']\n / self.lambda_mat['beta'])\n )\n\n # likelihood of projection matrix values given their precision priors\n a_logl = np.sum(\n stats.norm(loc=0, scale=(self.lambda_mat['beta']\n / self.lambda_mat['alpha']))\n .logpdf(self.A_mat['mu'])\n )\n\n # likelihood of latent feature matrix given kernel matrix,\n # projection matrix, and standard deviation hyper-parameter\n h_logl = np.sum(\n stats.norm(loc=self.A_mat['mu'].transpose() @ self.kernel_mat,\n scale=self.sigma_h)\n .logpdf(self.H_mat['mu'])\n )\n\n # likelihood of bias parameter precision priors given\n # precision hyper-parameters\n weight_prior_logl = np.sum(\n prec_distr.logpdf(np.array(self.weight_priors['alpha'])\n / np.array(self.weight_priors['beta']))\n )\n\n # likelihood of bias parameters given their precision priors\n weight_logl = np.sum(\n stats.norm(loc=0, scale=(np.array(self.weight_priors['beta'])\n / np.array(self.weight_priors['alpha'])))\n .logpdf(self.weight_mat['mu'])\n )\n\n # likelihood of predicted outputs given latent features, bias\n # parameters, and latent feature weight parameters\n f_logl = np.sum(\n stats.norm(\n loc=(self.weight_mat['mu'][1:, :].transpose()\n @ self.H_mat['mu']\n + np.vstack(self.weight_mat['mu'][0, :])),\n scale=1).logpdf(self.output_mat['mu'])\n )\n\n # likelihood of actual output labels given class separation margin\n # and predicted output labels\n y_logl = np.sum(self.get_y_logl(y_list))\n\n return (lambda_logl + a_logl + h_logl\n + weight_prior_logl + weight_logl + f_logl + y_logl)", "def _compute_log_likelihood(self, X, S):\n log_likelihood = 0\n for n in range(self.n_col):\n likelihood = 1\n for k in range(self.n_components):\n likelihood *= self.weights[k] \\\n * multivariate_normal(self.means[k], self.covs[k]).pdf(X[n]) \\\n * poisson(self.rates[k]).pmf(S[n])\n log_likelihood += np.log(likelihood)\n\n return log_likelihood", "def log_likelihood(self):\r\n A = -0.5 * self.num_data * self.output_dim * np.log(2.*np.pi) + 0.5 * np.sum(np.log(self.beta_star)) - 0.5 * np.sum(self.V_star * self.likelihood.Y)\r\n C = -self.output_dim * (np.sum(np.log(np.diag(self.LB))))\r\n D = 0.5 * np.sum(np.square(self._LBi_Lmi_psi1V))\r\n return A + C + D + self.likelihood.Z", "def _compute_likelihood(self, mus, pmfs):\n expected_counts = pmfs.copy()\n for mu, _p_bin_source in zip(mus, expected_counts):\n _p_bin_source *= mu # Works because of numpy view magic...\n expected_total = np.sum(expected_counts, axis=0)\n\n observed_counts = self.data_events_per_bin.histogram\n\n ret = observed_counts * np.log(expected_total) - expected_total - gammaln(observed_counts + 1.).real\n return np.sum(ret)", "def log_likelihood(self):\n return -0.5*self.D*self.K_logdet + self._model_fit_term() + self.likelihood.Z", "def log_likelihood_function(self, instance):\r\n\r\n xvalues = np.arange(self.data.shape[0])\r\n model_data = instance.profile_from_xvalues(xvalues=xvalues)\r\n residual_map = self.data - model_data\r\n chi_squared_map = (residual_map / self.noise_map) ** 2.0\r\n log_likelihood = -0.5 * sum(chi_squared_map)\r\n\r\n return log_likelihood", "def _compute_log_likelihood(self, parameters):\n raise NotImplementedError('')", "def log_likelihood(self, params):\n # extract the parameters\n m1 = params['m1']\n m2 = params['m2']\n DL = params['DL']\n Tc = params['Tc']\n iota = params['iota']\n phic = params['phic']\n psi = params['psi']\n thetaS = params['thetaS']\n phiS = params['phiS']\n\n # calculate the model\n model = self._model(time, m1, m2, DL, Tc, iota, phic, psi, thetaS, phiS)\n\n# # normalisation\n# norm = -0.5*self._ndata*LN2PI - self._ndata*self._logsigma\n\n# # chi-squared\n# chisq = np.sum(((self._data - model)/(self._sigma))**2)\n\n return -np.vdot(self._data - model,self._data - model)", "def ICA_log_likelihood(X, model):\n\n # TODO: YOUR CODE HERE", "def __log_likelihood(self, params, *args):\n\t\tX, y, feature_set, lambda_reg, empirical_weights, verbose, sign = args\n\n\t\tno_example = len(X)\n\t\ttotal_logZ = 0\n\t\ttotal_logProb = 0\n\t\texpected_weights = np.zeros(len(feature_set))\n\t\tfor t in range(len(X)):\n\t\t\t# example_features = X[t], example_labels = y[t]\n\n\t\t\tpotential = np.zeros(len(X[t]))\n\t\t\tfor i in range(len(X[t])):\n\t\t\t\t#candidate_features = X[t][i], candidate_label = y[t][i]\n\t\t\t\tpotential[i] = feature_set.calc_inner_product(X[t][i], params)\n\n\t\t\t#scaling\n\t\t\tpotential = potential - np.max(potential, keepdims=True)\n\n\t\t\tfor i in range(len(X[t])):\n\t\t\t\ttotal_logProb += potential[i] * y[t][i]\n\n\t\t\tpotential, Z = self.__softmax(potential)\n\n\t\t\tfor i in range(len(X[t])):\n\t\t\t\tfeature_set.calc_inner_sum(expected_weights, X[t][i], potential[i])\n\n\t\t\ttotal_logZ += log(Z)\n\n\t\t# _params = feature_set.get_regularized_params(params, 'bias')\n\t\t_params = params\n\t\tlog_likelihood = total_logProb - total_logZ - (lambda_reg/2) * np.sum(np.multiply(_params,_params))\n\t\tgradients = empirical_weights - expected_weights - lambda_reg * _params\n\n\t\tglobal SUB_ITERATION_NUM\n\t\tif verbose:\n\t\t\tsub_iteration_str = ' '\n\t\t\tif SUB_ITERATION_NUM > 0:\n\t\t\t\tsub_iteration_str = '(' + '{0:02d}'.format(SUB_ITERATION_NUM) + ')'\n\t\t\tprint(' ', '{0:03d}'.format(ITERATION_NUM), sub_iteration_str, ':', log_likelihood * sign)\n\n\t\tSUB_ITERATION_NUM += 1\n\n\t\treturn sign * log_likelihood, sign * gradients", "def lnlike(params, observables, nDraws=1000000):\n #print('checking type ({}) and length ({}) of params in lnlikefxn'.format(type(params),len(params)))\n evalData=generateModelData(params, distance_standoffMid, nDraws)\n evalHist, evalBinEdges = np.histogram(evalData[:,3], tof_nBins, tof_range,\n density=True)\n logEvalHist = np.log(evalHist)\n #print(logEvalHist)\n # find what TOFs have zero observed data\n # we'll use this to handle cases where we might wind up with -inf*0\n # likelihood is fine if PDF is 0 somewhere where no data is found\n # without checks though, ln(PDF=0)=-inf, -inf*0 = nan\n # however, if PDF is 0 (lnPDF=-inf) where there IS data, lnL should be -inf\n zeroObservedIndices = np.where(observables == 0)[0]\n for idx in zeroObservedIndices:\n if logEvalHist[idx] == -inf:\n logEvalHist[zeroObservedIndices] = 0\n \n loglike = np.dot(logEvalHist,observables)\n return loglike", "def compute_log_posteriors(points, model):\n log_normal_matrix = _log_normal_matrix(points,\n model['means'],\n model['cov'])\n log_product = log_normal_matrix + model['log_weights'][:, np.newaxis].T\n log_prob_norm = scipy.special.logsumexp(log_product, axis=1)\n log_resp = log_product - log_prob_norm[:, np.newaxis]\n return log_resp", "def train_patches(lmks, imgs, ref, psize, ssize, var=1.0, lmbda=1e-6, mu_init=1e-3, nsamples=1000):\n\n if isinstance(psize, int):\n psize = (psize, psize)\n if isinstance(ssize, int):\n ssize = (ssize, ssize)\n\n n = len(ref) // 2\n ximg = psize[1] + ssize[1]\n yimg = psize[0] + ssize[0]\n wsize = (yimg, ximg)\n\n patches = []\n\n # train each patch model\n for i in range(n):\n print('patch', i+1, 'of', n, '...')\n images = []\n for j in range(lmks.shape[1]):\n im = imgs[j]\n pt = lmks[:,j]\n S = calc_simil(pt, ref)\n A = np.empty((2,3))\n A[:2,:2] = S[:2,:2]\n A[0,2] = pt[2*i] - (A[0,0] * (ximg-1)/2 + A[0,1] * (yimg-1)/2)\n A[1,2] = pt[2*i+1] - (A[1,0] * (ximg-1)/2 + A[1,1] * (yimg-1)/2)\n I = cv2.warpAffine(im, A, wsize, flags=cv2.INTER_LINEAR+cv2.WARP_INVERSE_MAP)\n images.append(I)\n\n patch = train_patch(images, psize, var, lmbda, mu_init, nsamples)\n patches.append(patch)\n\n return np.array(patches)", "def loglikelihood(self):\n raise NotImplementedError(\"To be implemented\")", "def nll_calc(model, codec, highlight, text):\n\n\n past = None\n loss = nn.CrossEntropyLoss(reduction = \"sum\")\n with torch.no_grad():\n ## 1) Compute the logits from `model`;\n ## 2) Return the log-likelihood of the `text` string. It should be a Python scalar.\n ## NOTE: for simplicity, you can ignore the likelihood of the first token in `text`\n text = codec.encode(text)[:512]\n length = len(text) - 1\n highlight = codec.encode(highlight)\n text = highlight + text\n text = torch.tensor(text, dtype=torch.long).unsqueeze(0)\n logits, past = model(text, past=past)\n logits = logits[:,len(highlight):-1,:].view(-1, 50257)\n text = text[:,len(highlight)+1:].view(-1,)\n nll = float(loss(logits, text))\n # ppl = np.exp(nll / length)\n return nll", "def llf(self):\n return self.model.loglike(self.params)", "def lnlike(theta, model, x, y, yerr):\n return -np.nansum(0.5 * np.log([2 * np.pi] * len(y)))\\\n -np.nansum(np.log(yerr))\\\n -0.5*np.nansum(((y-model(x, *theta))/yerr)**2)", "def likelihood(self):\n\n # assert the Gaussian process is up to date\n self._gp_up_to_date()\n\n noise_penalization_term = -1 / 2 * np.log(\n np.linalg.det(self.cov_matrix))\n\n y = np.linalg.solve(self.cov_matrix, self.list_y)\n y = np.array(self.list_y) @ y\n data_fidelity_term = -1 / 2 * y\n\n nbr_obs_term = - self.n_observation * np.log(2 * np.pi)\n likelihood = (\n noise_penalization_term + data_fidelity_term + nbr_obs_term\n )\n return likelihood", "def compute_likelihood(self, spectrum=None, same=False):\n\n # If no spectrum given compute it.\n if spectrum is None:\n spectrum = self.__call__()\n\n # Evaluate the model image for the spectrum.\n model = self.rebuild(spectrum, same=same)\n\n # Get data and error attributes.\n data = self.data\n error = self.error\n mask = self.mask\n\n # Compute the log-likelihood for the spectrum.\n with np.errstate(divide='ignore'):\n logl = (model - data) / error\n logl = -np.nansum((logl[~mask])**2)\n\n return logl" ]
[ "0.6581772", "0.6201777", "0.6165119", "0.6070179", "0.5969646", "0.5882966", "0.58730745", "0.586947", "0.58435184", "0.5781459", "0.57760274", "0.5753418", "0.57469696", "0.5681554", "0.56661844", "0.5662688", "0.5646431", "0.5612954", "0.56089133", "0.5596493", "0.55937177", "0.55534184", "0.55401975", "0.55339605", "0.5528629", "0.55118984", "0.5509694", "0.5496478", "0.547197", "0.5469451" ]
0.65176845
1
Given image patches and a GSM model, return the log likelihood of the patches according to the model.
def GSM_log_likelihood(X, model): D, M = X.shape k = model.mix.shape[0] log_likelihood = 0 for i in range(M): logpdf_X = 0 for j in range(k): mvn = multivariate_normal(cov=model.cov[j, :]) logpdf_X = mvn.logpdf(x=X[:, i]) * model.mix[j] log_likelihood += logpdf_X return log_likelihood
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def loglike(self, params):\n\n if type(params) is not MixedLMParams:\n params = MixedLMParams.from_packed(params, self.k_fe,\n self.use_sqrt)\n\n fe_params = params.get_fe_params()\n cov_re = params.get_cov_re()\n try:\n cov_re_inv = np.linalg.inv(cov_re)\n except np.linalg.LinAlgError:\n cov_re_inv = None\n _, cov_re_logdet = np.linalg.slogdet(cov_re)\n\n # The residuals\n expval = np.dot(self.exog, fe_params)\n resid_all = self.endog - expval\n\n likeval = 0.\n\n # Handle the covariance penalty\n if self.cov_pen is not None:\n likeval -= self.cov_pen.func(cov_re, cov_re_inv)\n\n # Handle the fixed effects penalty\n if self.fe_pen is not None:\n likeval -= self.fe_pen.func(fe_params)\n\n xvx, qf = 0., 0.\n for k, lab in enumerate(self.group_labels):\n\n exog = self.exog_li[k]\n ex_r = self.exog_re_li[k]\n ex2_r = self.exog_re2_li[k]\n resid = resid_all[self.row_indices[lab]]\n\n # Part 1 of the log likelihood (for both ML and REML)\n ld = _smw_logdet(1., ex_r, ex2_r, cov_re, cov_re_inv,\n cov_re_logdet)\n likeval -= ld / 2.\n\n # Part 2 of the log likelihood (for both ML and REML)\n u = _smw_solve(1., ex_r, ex2_r, cov_re, cov_re_inv, resid)\n qf += np.dot(resid, u)\n\n # Adjustment for REML\n if self.reml:\n mat = _smw_solve(1., ex_r, ex2_r, cov_re, cov_re_inv,\n exog)\n xvx += np.dot(exog.T, mat)\n\n if self.reml:\n likeval -= (self.n_totobs - self.k_fe) * np.log(qf) / 2.\n _,ld = np.linalg.slogdet(xvx)\n likeval -= ld / 2.\n likeval -= (self.n_totobs - self.k_fe) * np.log(2 * np.pi) / 2.\n likeval += ((self.n_totobs - self.k_fe) *\n np.log(self.n_totobs - self.k_fe) / 2.)\n likeval -= (self.n_totobs - self.k_fe) / 2.\n else:\n likeval -= self.n_totobs * np.log(qf) / 2.\n likeval -= self.n_totobs * np.log(2 * np.pi) / 2.\n likeval += self.n_totobs * np.log(self.n_totobs) / 2.\n likeval -= self.n_totobs / 2.\n\n return likeval", "def evaluate_GMM_log_likelihood(model, x, y):\n y_pred = model.predict(x)\n \n num_datapoints = len(x)\n output_dim = y.shape[-1]\n num_comp = int(y_pred.shape[-1] / (3*output_dim))\n\n mix_comp_logits = y_pred[:, :num_comp]\n mus = y_pred[:, num_comp:(1+output_dim)*num_comp]\n sigmas = y_pred[:, (1+output_dim)*num_comp:]\n \n # convert logits to categorical distribution - need to itterate through all points\n mix_comp = np.zeros((num_datapoints, num_comp))\n for i in range(num_datapoints):\n mix_comp[i,:] = get_mixture_dist(mix_comp_logits[i,:], num_comp)\n \n log_likelihood = 0\n for i in range(num_comp):\n for j in range(output_dim):\n mse = -0.5*np.sum(mix_comp[:,i]*np.square((y[:,j]-mus[:,(i*output_dim)+j])/sigmas[:,(i*output_dim)+j]))\n sigma_trace = -np.sum(mix_comp[:,i]*np.log(sigmas[:,(i*output_dim)+j]))\n log2pi = -np.sum(mix_comp[:,i]*0.5*output_dim*np.log(2*np.pi))\n\n log_likelihood += mse + sigma_trace + log2pi\n \n avg_log_likelihood = np.round(log_likelihood / num_datapoints, 2)\n print(f'Log likelihood: {avg_log_likelihood}')\n return avg_log_likelihood", "def analyse_loglike(test_data, mods):\r\n l1 = list(map(lambda x: x + ' NB', mods.names))\r\n l1.extend(list(map(lambda x: x + ' ZI', mods.names)))\r\n l1.extend(list(map(lambda x: x + ' P', mods.names)))\r\n loglikeNB = np.array(mods.compute_log_likelihood(test_data, 'NB'))\r\n loglikeZI = np.array(mods.compute_log_likelihood(test_data, 'ZI'))\r\n loglikeP = np.array(mods.compute_log_likelihood(test_data, 'P'))\r\n # loglikeG = np.array(mods.compute_log_likelihood_gaussian(test_data))\r\n # loglikegeo = np.array(mods.compute_log_likelihood_geom(test_data))\r\n LL = np.zeros((loglikeNB.shape[0] * 3, loglikeNB.shape[1]))\r\n LL[:loglikeNB.shape[0], :] = loglikeNB\r\n LL[loglikeNB.shape[0]:2 * loglikeNB.shape[0], :] = loglikeZI\r\n LL[2 * loglikeNB.shape[0]:3 * loglikeNB.shape[0], :] = loglikeP\r\n # LL[3 * loglikeNB.shape[0]:4 * loglikeNB.shape[0], :] = loglikeG\r\n # LL[4 * llzi.shape[0]:, :] = np.array(mods.loglikegeo)\r\n print('mean per model', list(zip(np.ma.masked_invalid(LL).sum(axis=1), map(lambda x: x.mod.name, mods.models))))\r\n print('mean per distrib')\r\n print(np.ma.masked_invalid(LL[:loglikeNB.shape[0], :]).mean())\r\n print(np.ma.masked_invalid(LL[loglikeNB.shape[0]:loglikeNB.shape[0] * 2, :]).mean())\r\n print(np.ma.masked_invalid(LL[loglikeNB.shape[0] * 2:loglikeNB.shape[0] * 3, :]).mean())\r\n # print(np.nanmean(LL[1-np.isinf(LL)], axis=1))\r\n # print(np.nanmean(LL[LL != np.inf],axis=1))\r\n LL[np.isnan(LL)] = 0\r\n LL[np.isinf(LL)] = 0\r\n LL[LL == 0] = -np.inf\r\n r = np.argmax(LL, axis=0)\r\n # LL /= mx\r\n print('mean_best', np.mean(np.ma.masked_invalid(LL[r, range(LL.shape[1])])))\r\n mx = np.max(LL, axis=0)\r\n LL = LL / mx\r\n means = test_data.get_miniOD(None)[test_data.get_stations_col(None)].mean(axis=0).to_numpy()\r\n # for i in np.unique(r):\r\n # print(means[r == i].max())\r\n print('mean NB', means[r < loglikeNB.shape[0]].mean())\r\n print('mean ZI', means[(r < 2 * loglikeNB.shape[0]) * (r > loglikeNB.shape[0])].mean())\r\n print('mean poisson', means[(r < 3 * loglikeNB.shape[0]) * (r > 2 * loglikeNB.shape[0])].mean())\r\n # print('mean ga', means[(r < 4 * llzi.shape[0]) * (r > 3 * llzi.shape[0])].mean())\r\n # print('mean Gaussian', means[r > 3 * loglikeNB.shape[0]].mean())\r\n print('model name, mean trips per model, LL/maxLL, N inf')\r\n for i in range(LL.shape[0]):\r\n print(l1[i], means[r == i].mean(), np.mean(np.ma.masked_invalid(LL[i, :])), np.sum(np.isinf(LL[i, :])))\r\n print(np.ma.corrcoef(np.ma.masked_invalid(LL[i, :]), means[:LL.shape[1]])[1, 0])\r\n plt.hist(r, bins=np.arange(-0.5, 3 * len(mods.names) + 1, 1))\r\n\r\n # l1.extend(list(map(lambda x: x + ' geo', mods.names)))\r\n # l1.extend(list(map(lambda x: x + ' G', mods.names)))\r\n plt.xticks(range(len(l1)), l1, rotation='vertical')\r\n plt.show()\r\n\r\n for m in mods.loglike:\r\n print(m)\r\n print(m[np.logical_not(np.isinf(m))].mean())", "def log_likelihood_function(self, instance):\r\n\r\n try:\r\n return self.fit_interferometer_for_instance(\r\n instance=instance\r\n ).figure_of_merit\r\n except (\r\n exc.PixelizationException,\r\n exc.InversionException,\r\n exc.GridException,\r\n OverflowError,\r\n ) as e:\r\n raise exc.FitException from e", "def phot_logg(Teff,mag0,BCmag,distmod,Mstar=0.75):\n return 4.44 + np.log10(Mstar) + 4*np.log10(Teff/5780) + 0.4 * (mag0 - distmod + BCmag - 4.75)", "def compute_likelihood(self, spectrum=None, same=False):\n\n # If no spectrum given compute it.\n if spectrum is None:\n spectrum = self.__call__()\n\n # Evaluate the model image for the spectrum.\n model = self.rebuild(spectrum, same=same)\n\n # Get data and error attributes.\n data = self.data\n error = self.error\n mask = self.mask\n\n # Compute the log-likelihood for the spectrum.\n with np.errstate(divide='ignore'):\n logl = (model - data) / error\n logl = -np.nansum((logl[~mask])**2)\n\n return logl", "def lgammln(xx):\r\n\r\n coeff = [76.18009173, -86.50532033, 24.01409822, -1.231739516,\r\n 0.120858003e-2, -0.536382e-5]\r\n x = xx - 1.0\r\n tmp = x + 5.5\r\n tmp = tmp - (x+0.5)*math.log(tmp)\r\n ser = 1.0\r\n for j in range(len(coeff)):\r\n x = x + 1\r\n ser = ser + coeff[j]/x\r\n return -tmp + math.log(2.50662827465*ser)", "def figure_of_merit_from(self, parameter_list):\r\n return self.log_likelihood_from(parameter_list=parameter_list)", "def glcm1(image, skin_mask, window_size):\n OFFSET = [1]\n THETA = [0, np.pi/4, np.pi/2, 3*np.pi/4]\n N_GREY_LEVELS = 64\n\n k = np.uint8(np.floor(window_size/2))\n skin_mask_r = skin_mask[k:skin_mask.shape[0]-k,\n k:skin_mask.shape[1]-k].ravel()\n\n patches = extract_patches_2d(image, (window_size, window_size))\n\n memmap_folder = './joblib_memmap'\n\n contrast_filename_memmap = os.path.join(memmap_folder, 'constrast_memmap')\n ASM_filename_memmap = os.path.join(memmap_folder, 'ASM_memmap')\n correlation_filename_memmap = os.path.join(memmap_folder, 'correlation_memmap')\n glcm_mean_filename_memmap = os.path.join(memmap_folder, 'glcm_mean_memmap')\n\n contrast = np.memmap(contrast_filename_memmap, dtype=np.float32,\n shape=(len(patches), 4), mode='w+')\n ASM = np.memmap(ASM_filename_memmap, dtype=np.float64,\n shape=(len(patches), 4), mode='w+')\n correlation = np.memmap(correlation_filename_memmap, dtype=np.float32,\n shape=(len(patches), 4), mode='w+')\n glcm_mean = np.memmap(glcm_mean_filename_memmap, dtype=np.float32,\n shape=(len(patches), 4), mode='w+')\n\n Parallel(n_jobs=1, verbose=1)(\n delayed(glcmCal)(i, patch, window_size, skin_mask_r,\n OFFSET, THETA, N_GREY_LEVELS,\n contrast, ASM, correlation, glcm_mean)\n for i, patch in enumerate(patches)\n )\n\n return contrast, ASM, correlation, glcm_mean", "def ICA_log_likelihood(X, model):\n\n # TODO: YOUR CODE HERE", "def _compute_log_likelihood(self, X, S):\n log_likelihood = 0\n for n in range(self.n_col):\n likelihood = 1\n for k in range(self.n_components):\n likelihood *= self.weights[k] \\\n * multivariate_normal(self.means[k], self.covs[k]).pdf(X[n]) \\\n * poisson(self.rates[k]).pmf(S[n])\n log_likelihood += np.log(likelihood)\n\n return log_likelihood", "def compute_log_likelihood(self,params: ndarray) -> float:\n \n pred_mag = self._pred_mag(params,self.times)\n sigma_2 = self.sd_mags**2 \n ln_likelihood = -0.5*np.sum((pred_mag - self.mags)**2 / sigma_2+ np.log(sigma_2))\n\n return ln_likelihood", "def log_likelihood_ratios(self, groundtype):\n\t\tif groundtype == 'Foreground':\n\t\t\tLLR = self.foreground['LLR']\n\t\telif groundtype == 'Background':\n\t\t\tLLR = self.background['LLR']\n\t\t\n\t\treturn LLR", "def _log_likelihood_colour(self, df, dfo):\n pdf = fit_colour_gaussian(df[\"colour_obs\"].values)\n return np.log(pdf(dfo[\"g_r\"].values)).sum()", "def mark2loglikelihood(psr, Aw, Ar, Si):\n Mmat = psr.Mmat\n \n Cov = Aw**2 * np.eye(len(psr.toas)) + \\\n PL_covmat(psr.toas, Ar, alpha=0.5*(3-Si), fL=1.0/(year*20))\n \n cfC = sl.cho_factor(Cov)\n Cinv = sl.cho_solve(cfC, np.eye(len(psr.toas)))\n ldetC = 2 * np.sum(np.log(np.diag(cfC[0])))\n\n MCM = np.dot(Mmat.T, np.dot(Cinv, Mmat))\n cfM = sl.cho_factor(MCM)\n ldetM = 2 * np.sum(np.log(np.diag(cfM[0])))\n \n wr = np.dot(Cinv, psr.residuals)\n rCr = np.dot(psr.residuals, wr)\n MCr = np.dot(Mmat.T, wr)\n \n return -0.5 * rCr + 0.5 * np.dot(MCr, sl.cho_solve(cfM, MCr)) - \\\n 0.5 * ldetC - 0.5 * ldetM -0.5*len(psr.residuals)*np.log(2*np.pi)", "def likelihoods(self, alleles):\n\n models = self.models_dict[len(alleles)]\n\n F = self.joint_frequencies_combo(alleles)\n\n ### BPH ###\n (((A0, A1),((B0,),)),) = models['BPH'][1].items()\n\n BPH = (A0 / A1) * F[B0]\n\n\n BPH += sum( sum(F[B0] * F[B1] for (B0, B1) in C) * A0 / A1\n for (A0, A1), C in models['BPH'][2].items())\n\n if len(alleles)>2:\n BPH += sum( sum(F[B0] * sum( F[B1] * F[B2] for (B1, B2) in C[B0]) for B0 in C) * A0 / A1\n for (A0, A1), C in models['BPH'][3].items())\n\n ### SPH ###\n (((A0, A1),((B0,),)),) = models['SPH'][1].items()\n SPH = (A0 / A1) * F[B0]\n\n SPH += sum( sum(F[B0] * F[B1] for (B0, B1) in C) * A0 / A1\n for (A0, A1), C in models['SPH'][2].items())\n\n ### DIPLOIDY ###\n (((A0, A1),((B0,),)),) = models['DISOMY'][1].items()\n DISOMY = (A0 / A1) * F[B0]\n\n DISOMY += sum( sum( F[B0] * F[B1] for (B0, B1) in C) * A0 / A1\n for (A0, A1), C in models['DISOMY'][2].items())\n\n ### MONOSOMY ###\n ((B0,),) = models['MONOSOMY'][1][(1,1)]\n MONOSOMY = F[B0]\n\n result = likelihoods_tuple(MONOSOMY, DISOMY, SPH, BPH)\n return result", "def llf(self):\n return self.model.loglike(self.params)", "def log_likelihood(self, data, reward_model, bias_params):", "def apply_model(gmm_model, speech_array):\n # given 1 speaker (1 file), figure out\n # assume that each time stamp's log-probs are INDEPENDENT\n return np.sum(gmm_model.score(speech_array))", "def __log_likelihood(self, params, *args):\n\t\tX, y, feature_set, lambda_reg, empirical_weights, verbose, sign = args\n\n\t\tno_example = len(X)\n\t\ttotal_logZ = 0\n\t\ttotal_logProb = 0\n\t\texpected_weights = np.zeros(len(feature_set))\n\t\tfor t in range(len(X)):\n\t\t\t# example_features = X[t], example_labels = y[t]\n\n\t\t\tpotential = np.zeros(len(X[t]))\n\t\t\tfor i in range(len(X[t])):\n\t\t\t\t#candidate_features = X[t][i], candidate_label = y[t][i]\n\t\t\t\tpotential[i] = feature_set.calc_inner_product(X[t][i], params)\n\n\t\t\t#scaling\n\t\t\tpotential = potential - np.max(potential, keepdims=True)\n\n\t\t\tfor i in range(len(X[t])):\n\t\t\t\ttotal_logProb += potential[i] * y[t][i]\n\n\t\t\tpotential, Z = self.__softmax(potential)\n\n\t\t\tfor i in range(len(X[t])):\n\t\t\t\tfeature_set.calc_inner_sum(expected_weights, X[t][i], potential[i])\n\n\t\t\ttotal_logZ += log(Z)\n\n\t\t# _params = feature_set.get_regularized_params(params, 'bias')\n\t\t_params = params\n\t\tlog_likelihood = total_logProb - total_logZ - (lambda_reg/2) * np.sum(np.multiply(_params,_params))\n\t\tgradients = empirical_weights - expected_weights - lambda_reg * _params\n\n\t\tglobal SUB_ITERATION_NUM\n\t\tif verbose:\n\t\t\tsub_iteration_str = ' '\n\t\t\tif SUB_ITERATION_NUM > 0:\n\t\t\t\tsub_iteration_str = '(' + '{0:02d}'.format(SUB_ITERATION_NUM) + ')'\n\t\t\tprint(' ', '{0:03d}'.format(ITERATION_NUM), sub_iteration_str, ':', log_likelihood * sign)\n\n\t\tSUB_ITERATION_NUM += 1\n\n\t\treturn sign * log_likelihood, sign * gradients", "def lnlike(pars, data, massdata=None, bulk_alpha=False, include_ti=False):\n\n pars = np.asarray(pars)\n\n if massdata is not None:\n mgas, mstar, mstarerr = massdata\n\n #Observational data to use in the likelihood determination\n n_obs = len(data['fe_h'])\n\n #Based on the GCE model used, check if the number of parameters is correct\n n_pars = len(pars)\n w = np.where(pars < 0.0)[0]\n if len(w) > 0: return np.inf\n if gce_params.name == 'Zwind':\n if n_pars == 7:\n if pars[6] > 1.0: return np.inf\n else:\n if pars[5] > 1.0: return np.inf\n\n #Run the GCE model for this given set of model parameters\n model,_ = gce.gce_model(pars)\n n_model = len(model['t'])\n\n #Initialize variables to be used in the likelihood calculation\n norm = np.sqrt(2*np.pi)\n likelihood = 0.\n\n #Consider only timesteps after 7 Myr in the galaxy's evolution, and check that\n #there are a sufficient number of finite values in the abundances returned by the\n #GCE model\n where_greater = model['t'] > 0.007\n\n where_finite = np.isfinite(model['eps'][:,mg_index]) &\\\n np.isfinite(model['eps'][:,si_index]) &\\\n np.isfinite(model['eps'][:,ca_index]) &\\\n np.isfinite(model['eps'][:,fe_index])\n if include_ti:\n where_finite = where_finite & np.isfinite(model['eps'][:,ti_index])\n\n where_good = where_greater & where_finite\n if len(model['t'][where_good]) < 10:\n return np.inf\n\n #If the model passes the above criteria, then construct the abundance ratios for\n #[Fe/H], [Mg/Fe], [Si/Fe], [Ca/Fe]\n feh = model['eps'][:,fe_index][where_good]\n mgfe = model['eps'][:,mg_index][where_good] - model['eps'][:,fe_index][where_good]\n sife = model['eps'][:,si_index][where_good] - model['eps'][:,fe_index][where_good]\n cafe = model['eps'][:,ca_index][where_good] - model['eps'][:,fe_index][where_good]\n tife = model['eps'][:,ti_index][where_good] - model['eps'][:,ti_index][where_good]\n\n #Timesteps to consider from the model\n t = model['t'][where_good]\n\n #Calculate the probability with respect to time that a star forms\n dp_dt = model['mdot'][where_good]/model['mstar'][n_model-1]\n dp_dt = dp_dt[dp_dt > 0.]\n dp_dt /= simps(dp_dt, t) #note that the original IDL uses Netwon-Cotes integration here\n\n \"\"\"\n #Do not consider the last 3 Myr of the evolution of the galaxy\n maxt = n_model - 3\n\n #Calculate the integrated ejecta in each element\n ejecta = np.zeros(len(model['eps'][0]))\n for j in range(len(model['eps'][0])):\n\n w = np.where((model['mout'][:,j][7:maxt] > 0.)&(np.isfinite(model['mout'][:,j][7:maxt])))[0] + 7\n if len(w) > 50:\n ejecta[j] = simps(model['mout'][:,j][w], model['t'][w])\n else: ejecta[j] = -999.\n\n #Calculate the total outflow based on the ejecta in each element\n total_outflow = np.sum(ejecta[0:1]) + np.sum(ejecta[3:6])*10**(1.31) + ejecta[7]*10**(0.03)\n\n #Now calculate the integrated mass influx\n w = np.where((model['f_in'][7:maxt] > 0.)&(np.isfinite(model['f_in'][7:maxt])))[0] + 7\n if len(w) > 50:\n integrated_mass = simps(model['f_in'][w], model['t'][w]) + model['mgas'][0]\n else: integrated_mass = -999\n \"\"\"\n\n #Calculate the total mass based on the luminosity and the mass-to-light ratio\n #assumed for the observed dwarf galaxies\n #bdm_ratio = integrated_mass/total_mass\n\n #Now loop through the observational data dictionary provided in the input\n #parameters, where i is the index for a given red giant star in the galaxy,\n #and determine the likelihood as compared to the model for each\n #instance based on simultaneously using [Fe/H], [Mg/Fe], [Si/Fe], and [Ca/Fe]\n\n print(massdata, bulk_alpha, include_ti)\n for i in range(n_obs):\n\n feh_dist = ((feh - data['fe_h'][i])/data['e_fe_h'][i])**2.\n\n if bulk_alpha:\n\n if include_ti:\n alphafe = (mgfe + cafe + sife + tife)/4.\n else:\n alphafe = (mgfe + cafe + sife)/3.\n\n alphafe_dist = ((alphafe - data['alpha_fe'][i])/data['e_alpha_fe'][i])**2.\n\n dist_arr = [feh_dist, alphafe_dist]\n err_arr = [data['e_fe_h'][i], data['e_alpha_fe'][i]]\n\n else:\n\n mgfe_dist = ((mgfe - data['mg_fe'][i])/data['e_mg_fe'][i])**2.\n cafe_dist = ((cafe - data['ca_fe'][i])/data['e_ca_fe'][i])**2.\n sife_dist = ((sife - data['si_fe'][i])/data['e_si_fe'][i])**2.\n\n dist_arr = [feh_dist, mgfe_dist, sife_dist, cafe_dist]\n\n err_arr = [data['e_fe_h'][i], data['e_mg_fe'][i], data['e_si_fe'][i],\n data['e_ca_fe'][i]]\n\n if include_ti:\n tife_dist = ((tife - data['ti_fe'][i])/data['e_ti_fe'][i])**2.\n dist_arr.append(tife_dist)\n err_arr.append(data['e_ti_fe'][i])\n\n err_mult = np.nanprod(err_arr)\n dist = np.nansum(dist_arr, axis=0)\n\n lfunc = np.exp(-0.5*dist) / (err_mult * norm**4.)\n\n int_i = simps(lfunc*dp_dt, t)\n if (np.isfinite(int_i)) and (int_i >= 0.):\n likelihood -= np.log(int_i)\n else:\n likelihood += 5.\n\n #Now add terms to the likelihood based on the stellar mass in the GCE model and the\n #observed gas mass as described in Eq. 17 of Kirby et al 2011b\n if massdata is not None:\n\n #Now calculate the amount of gas remaining in the galaxy at the end of the GCE model\n remaining_gas = model['mgas'][n_model-1]\n if np.abs(model['mgas'][n_model-2] - remaining_gas) > 0.5*remaining_gas:\n remaining_gas = 0.\n if remaining_gas < 0.: remaining_gas = 0.\n\n z0_mstar_term = 0.5*((model['mstar'][n_model-1] - mstar)/mstarerr)**2.\n delta_mgas = 1.e3\n z0_mgas_term = 0.5*((remaining_gas - mgas)/delta_mgas)**2. #assuming observed gas mass of 0 Msun at z = 0\n\n likelihood += 0.1*n_obs*(z0_mstar_term + z0_mgas_term + np.log(norm**2. * mstarerr * delta_mgas))\n\n if (model['mstar'][n_model-1] < delta_mgas): likelihood += 3.e6\n\n return likelihood", "def calculate_groupLLR(self, groundtype, groupname):\n\t\t#Load in data to compute group LLR for\n\t\tif groundtype == 'Foreground':\n\t\t\tdata = self.foreground[groupname]['data']\n\t\telif groundtype == 'Background':\n\t\t\tdata = self.background[groupname]['data']\n\t\t\n\t\t#Find number of parameters within group\n\t\tn_params = self.signal[groupname]['dimension']\n\t\t\n\t\t#Divide data points into those which need interpolation and those that need extrapolation\n\t\tinterp_array_sig = np.product((data >= self.signal[groupname]['interp range'][:,0]) * (data <= self.signal[groupname]['interp range'][:,1]), axis=-1, dtype=bool)\n\t\textrap_array_sig = ~interp_array_sig\n\t\t\n\t\tinterp_array_noise = np.product((data >= self.noise[groupname]['interp range'][:,0]) * (data <= self.noise[groupname]['interp range'][:,1]), axis=-1, dtype=bool)\n\t\textrap_array_noise = ~interp_array_noise\n\t\t\n\t\t#Initialize arrays to store likelihood values\n\t\tlikelihood_signal = np.zeros(len(data))\n\t\tlikelihood_noise = np.zeros(len(data))\n\t\t\n\t\t#Get necessary coordinates and values\n\t\tcoords_sig = self.signal[groupname]['KDE'][0]\n\t\tvalues_sig = self.signal[groupname]['KDE'][1]\n\t\tcoords_noise = self.noise[groupname]['KDE'][0]\n\t\tvalues_noise = self.noise[groupname]['KDE'][1]\t\t\n\t\t\t\t\n\t\t#Calculate interpolated likelihoods\n\t\tlikelihood_signal[interp_array_sig] = self.interpolate(known_coords=coords_sig, known_values=values_sig, interp_coords=data[interp_array_sig], groupname=groupname)\n\t\tlikelihood_noise[interp_array_noise] = self.interpolate(known_coords=coords_noise, known_values=values_noise, interp_coords=data[interp_array_noise], groupname=groupname)\n\t\t\n\t\t#Calculate extrapolated likelihoods\n\t\tlikelihood_signal[extrap_array_sig] = self.extrapolate(known_coords=coords_sig, known_values=values_sig, extrap_coords=data[extrap_array_sig], groupname=groupname)\n\t\tlikelihood_noise[extrap_array_noise] = self.extrapolate(known_coords=coords_noise, known_values=values_noise, extrap_coords=data[extrap_array_noise], groupname=groupname)\n\t\t\n\t\treturn np.log10(likelihood_signal) - np.log10(likelihood_noise)", "def log_likelihoodJoint(theta, x, y, data, var, size):\n #unpack the parameters\n #[xpos, ypos]*images) +[amplitude, radius, focus])\n images = len(theta[:-5]) / 2\n peak, radius, focus, width_x, width_y = theta[-5:]\n\n lnL = 0.\n for tmp in xrange(images):\n #X and Y are always in pairs\n center_x = theta[2*tmp]\n center_y = theta[2*tmp+1]\n\n #1)Generate a model Airy disc\n amplitude = _amplitudeFromPeak(peak, center_x, center_y, radius,\n x_0=int(size[0]/2.-0.5), y_0=int(size[1]/2.-0.5))\n airy = models.AiryDisk2D(amplitude, center_x, center_y, radius)\n adata = airy.eval(x, y, amplitude, center_x, center_y, radius).reshape(size)\n\n #2)Apply Focus, no normalisation as smoothing\n f = models.Gaussian2D(1., center_x, center_y, focus, focus, 0.)\n focusdata = f.eval(x, y, 1., center_x, center_y, focus, focus, 0.).reshape(size)\n model = signal.convolve2d(adata, focusdata, mode='same')\n\n #3)Apply CCD diffusion, approximated with a Gaussian -- max = 1 as centred\n CCD = models.Gaussian2D(1., size[0]/2.-0.5, size[1]/2.-0.5, width_x, width_y, 0.)\n CCDdata = CCD.eval(x, y, 1., size[0]/2.-0.5, size[1]/2.-0.5, width_x, width_y, 0.).reshape(size)\n model = signal.convolve2d(model, CCDdata, mode='same').flatten()\n\n #lnL += - 0.5 * np.sum((data[tmp].flatten() - model)**2 / var[tmp].flatten())\n #Gary B. said that this should be from the model not data so recompute var (now contains rn**2)\n var = var[tmp] + model.copy()\n lnL += - 0.5 * np.sum((data[tmp].flatten() - model)**2 / var)\n\n return lnL", "def log_likelihood(self, params):\n # extract the parameters\n m1 = params['m1']\n m2 = params['m2']\n DL = params['DL']\n Tc = params['Tc']\n iota = params['iota']\n phic = params['phic']\n psi = params['psi']\n thetaS = params['thetaS']\n phiS = params['phiS']\n\n # calculate the model\n model = self._model(time, m1, m2, DL, Tc, iota, phic, psi, thetaS, phiS)\n\n# # normalisation\n# norm = -0.5*self._ndata*LN2PI - self._ndata*self._logsigma\n\n# # chi-squared\n# chisq = np.sum(((self._data - model)/(self._sigma))**2)\n\n return -np.vdot(self._data - model,self._data - model)", "def train_patches(lmks, imgs, ref, psize, ssize, var=1.0, lmbda=1e-6, mu_init=1e-3, nsamples=1000):\n\n if isinstance(psize, int):\n psize = (psize, psize)\n if isinstance(ssize, int):\n ssize = (ssize, ssize)\n\n n = len(ref) // 2\n ximg = psize[1] + ssize[1]\n yimg = psize[0] + ssize[0]\n wsize = (yimg, ximg)\n\n patches = []\n\n # train each patch model\n for i in range(n):\n print('patch', i+1, 'of', n, '...')\n images = []\n for j in range(lmks.shape[1]):\n im = imgs[j]\n pt = lmks[:,j]\n S = calc_simil(pt, ref)\n A = np.empty((2,3))\n A[:2,:2] = S[:2,:2]\n A[0,2] = pt[2*i] - (A[0,0] * (ximg-1)/2 + A[0,1] * (yimg-1)/2)\n A[1,2] = pt[2*i+1] - (A[1,0] * (ximg-1)/2 + A[1,1] * (yimg-1)/2)\n I = cv2.warpAffine(im, A, wsize, flags=cv2.INTER_LINEAR+cv2.WARP_INVERSE_MAP)\n images.append(I)\n\n patch = train_patch(images, psize, var, lmbda, mu_init, nsamples)\n patches.append(patch)\n\n return np.array(patches)", "def log_likelihood_function(self, instance: af.ModelInstance) -> float:\r\n model_data = self.model_data_from_instance(instance=instance)\r\n fit = self.fit_from_model_data(model_data=model_data)\r\n return fit.log_likelihood", "def gmmloglik(log_emlik, weights):\n N,_ = log_emlik.shape;\n ll = 0;\n for i in range(N):\n ll += logsumexp(log_emlik[i, :] + np.log(weights));\n return ll", "def gmmloglik(log_emlik, weights):", "def gmmloglik(log_emlik, weights):", "def _compute_likelihood(self, mus, pmfs):\n expected_counts = pmfs.copy()\n for mu, _p_bin_source in zip(mus, expected_counts):\n _p_bin_source *= mu # Works because of numpy view magic...\n expected_total = np.sum(expected_counts, axis=0)\n\n observed_counts = self.data_events_per_bin.histogram\n\n ret = observed_counts * np.log(expected_total) - expected_total - gammaln(observed_counts + 1.).real\n return np.sum(ret)" ]
[ "0.6422759", "0.6289507", "0.61328065", "0.59302616", "0.58532107", "0.5736071", "0.5675968", "0.56670296", "0.56350833", "0.556734", "0.5563308", "0.55380976", "0.5481087", "0.5438895", "0.54248834", "0.5403001", "0.5399288", "0.539747", "0.53716975", "0.5363187", "0.53582937", "0.53514105", "0.53486663", "0.5346749", "0.5326208", "0.5323081", "0.53219163", "0.53060806", "0.53060806", "0.52909636" ]
0.68105716
0
Given image patches and an ICA model, return the log likelihood of the patches according to the model.
def ICA_log_likelihood(X, model): # TODO: YOUR CODE HERE
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def log_likelihood_function(self, instance):\r\n\r\n try:\r\n return self.fit_interferometer_for_instance(\r\n instance=instance\r\n ).figure_of_merit\r\n except (\r\n exc.PixelizationException,\r\n exc.InversionException,\r\n exc.GridException,\r\n OverflowError,\r\n ) as e:\r\n raise exc.FitException from e", "def loglike(self, params):\n\n if type(params) is not MixedLMParams:\n params = MixedLMParams.from_packed(params, self.k_fe,\n self.use_sqrt)\n\n fe_params = params.get_fe_params()\n cov_re = params.get_cov_re()\n try:\n cov_re_inv = np.linalg.inv(cov_re)\n except np.linalg.LinAlgError:\n cov_re_inv = None\n _, cov_re_logdet = np.linalg.slogdet(cov_re)\n\n # The residuals\n expval = np.dot(self.exog, fe_params)\n resid_all = self.endog - expval\n\n likeval = 0.\n\n # Handle the covariance penalty\n if self.cov_pen is not None:\n likeval -= self.cov_pen.func(cov_re, cov_re_inv)\n\n # Handle the fixed effects penalty\n if self.fe_pen is not None:\n likeval -= self.fe_pen.func(fe_params)\n\n xvx, qf = 0., 0.\n for k, lab in enumerate(self.group_labels):\n\n exog = self.exog_li[k]\n ex_r = self.exog_re_li[k]\n ex2_r = self.exog_re2_li[k]\n resid = resid_all[self.row_indices[lab]]\n\n # Part 1 of the log likelihood (for both ML and REML)\n ld = _smw_logdet(1., ex_r, ex2_r, cov_re, cov_re_inv,\n cov_re_logdet)\n likeval -= ld / 2.\n\n # Part 2 of the log likelihood (for both ML and REML)\n u = _smw_solve(1., ex_r, ex2_r, cov_re, cov_re_inv, resid)\n qf += np.dot(resid, u)\n\n # Adjustment for REML\n if self.reml:\n mat = _smw_solve(1., ex_r, ex2_r, cov_re, cov_re_inv,\n exog)\n xvx += np.dot(exog.T, mat)\n\n if self.reml:\n likeval -= (self.n_totobs - self.k_fe) * np.log(qf) / 2.\n _,ld = np.linalg.slogdet(xvx)\n likeval -= ld / 2.\n likeval -= (self.n_totobs - self.k_fe) * np.log(2 * np.pi) / 2.\n likeval += ((self.n_totobs - self.k_fe) *\n np.log(self.n_totobs - self.k_fe) / 2.)\n likeval -= (self.n_totobs - self.k_fe) / 2.\n else:\n likeval -= self.n_totobs * np.log(qf) / 2.\n likeval -= self.n_totobs * np.log(2 * np.pi) / 2.\n likeval += self.n_totobs * np.log(self.n_totobs) / 2.\n likeval -= self.n_totobs / 2.\n\n return likeval", "def GSM_log_likelihood(X, model):\n D, M = X.shape\n k = model.mix.shape[0]\n log_likelihood = 0\n for i in range(M):\n logpdf_X = 0\n for j in range(k):\n mvn = multivariate_normal(cov=model.cov[j, :])\n logpdf_X = mvn.logpdf(x=X[:, i]) * model.mix[j]\n log_likelihood += logpdf_X\n return log_likelihood", "def log_likelihood(self):\r\n A = -0.5 * self.num_data * self.output_dim * np.log(2.*np.pi) + 0.5 * np.sum(np.log(self.beta_star)) - 0.5 * np.sum(self.V_star * self.likelihood.Y)\r\n C = -self.output_dim * (np.sum(np.log(np.diag(self.LB))))\r\n D = 0.5 * np.sum(np.square(self._LBi_Lmi_psi1V))\r\n return A + C + D + self.likelihood.Z", "def mark2loglikelihood(psr, Aw, Ar, Si):\n Mmat = psr.Mmat\n \n Cov = Aw**2 * np.eye(len(psr.toas)) + \\\n PL_covmat(psr.toas, Ar, alpha=0.5*(3-Si), fL=1.0/(year*20))\n \n cfC = sl.cho_factor(Cov)\n Cinv = sl.cho_solve(cfC, np.eye(len(psr.toas)))\n ldetC = 2 * np.sum(np.log(np.diag(cfC[0])))\n\n MCM = np.dot(Mmat.T, np.dot(Cinv, Mmat))\n cfM = sl.cho_factor(MCM)\n ldetM = 2 * np.sum(np.log(np.diag(cfM[0])))\n \n wr = np.dot(Cinv, psr.residuals)\n rCr = np.dot(psr.residuals, wr)\n MCr = np.dot(Mmat.T, wr)\n \n return -0.5 * rCr + 0.5 * np.dot(MCr, sl.cho_solve(cfM, MCr)) - \\\n 0.5 * ldetC - 0.5 * ldetM -0.5*len(psr.residuals)*np.log(2*np.pi)", "def log_likelihoodJoint(theta, x, y, data, var, size):\n #unpack the parameters\n #[xpos, ypos]*images) +[amplitude, radius, focus])\n images = len(theta[:-5]) / 2\n peak, radius, focus, width_x, width_y = theta[-5:]\n\n lnL = 0.\n for tmp in xrange(images):\n #X and Y are always in pairs\n center_x = theta[2*tmp]\n center_y = theta[2*tmp+1]\n\n #1)Generate a model Airy disc\n amplitude = _amplitudeFromPeak(peak, center_x, center_y, radius,\n x_0=int(size[0]/2.-0.5), y_0=int(size[1]/2.-0.5))\n airy = models.AiryDisk2D(amplitude, center_x, center_y, radius)\n adata = airy.eval(x, y, amplitude, center_x, center_y, radius).reshape(size)\n\n #2)Apply Focus, no normalisation as smoothing\n f = models.Gaussian2D(1., center_x, center_y, focus, focus, 0.)\n focusdata = f.eval(x, y, 1., center_x, center_y, focus, focus, 0.).reshape(size)\n model = signal.convolve2d(adata, focusdata, mode='same')\n\n #3)Apply CCD diffusion, approximated with a Gaussian -- max = 1 as centred\n CCD = models.Gaussian2D(1., size[0]/2.-0.5, size[1]/2.-0.5, width_x, width_y, 0.)\n CCDdata = CCD.eval(x, y, 1., size[0]/2.-0.5, size[1]/2.-0.5, width_x, width_y, 0.).reshape(size)\n model = signal.convolve2d(model, CCDdata, mode='same').flatten()\n\n #lnL += - 0.5 * np.sum((data[tmp].flatten() - model)**2 / var[tmp].flatten())\n #Gary B. said that this should be from the model not data so recompute var (now contains rn**2)\n var = var[tmp] + model.copy()\n lnL += - 0.5 * np.sum((data[tmp].flatten() - model)**2 / var)\n\n return lnL", "def evaluate_GMM_log_likelihood(model, x, y):\n y_pred = model.predict(x)\n \n num_datapoints = len(x)\n output_dim = y.shape[-1]\n num_comp = int(y_pred.shape[-1] / (3*output_dim))\n\n mix_comp_logits = y_pred[:, :num_comp]\n mus = y_pred[:, num_comp:(1+output_dim)*num_comp]\n sigmas = y_pred[:, (1+output_dim)*num_comp:]\n \n # convert logits to categorical distribution - need to itterate through all points\n mix_comp = np.zeros((num_datapoints, num_comp))\n for i in range(num_datapoints):\n mix_comp[i,:] = get_mixture_dist(mix_comp_logits[i,:], num_comp)\n \n log_likelihood = 0\n for i in range(num_comp):\n for j in range(output_dim):\n mse = -0.5*np.sum(mix_comp[:,i]*np.square((y[:,j]-mus[:,(i*output_dim)+j])/sigmas[:,(i*output_dim)+j]))\n sigma_trace = -np.sum(mix_comp[:,i]*np.log(sigmas[:,(i*output_dim)+j]))\n log2pi = -np.sum(mix_comp[:,i]*0.5*output_dim*np.log(2*np.pi))\n\n log_likelihood += mse + sigma_trace + log2pi\n \n avg_log_likelihood = np.round(log_likelihood / num_datapoints, 2)\n print(f'Log likelihood: {avg_log_likelihood}')\n return avg_log_likelihood", "def analyse_loglike(test_data, mods):\r\n l1 = list(map(lambda x: x + ' NB', mods.names))\r\n l1.extend(list(map(lambda x: x + ' ZI', mods.names)))\r\n l1.extend(list(map(lambda x: x + ' P', mods.names)))\r\n loglikeNB = np.array(mods.compute_log_likelihood(test_data, 'NB'))\r\n loglikeZI = np.array(mods.compute_log_likelihood(test_data, 'ZI'))\r\n loglikeP = np.array(mods.compute_log_likelihood(test_data, 'P'))\r\n # loglikeG = np.array(mods.compute_log_likelihood_gaussian(test_data))\r\n # loglikegeo = np.array(mods.compute_log_likelihood_geom(test_data))\r\n LL = np.zeros((loglikeNB.shape[0] * 3, loglikeNB.shape[1]))\r\n LL[:loglikeNB.shape[0], :] = loglikeNB\r\n LL[loglikeNB.shape[0]:2 * loglikeNB.shape[0], :] = loglikeZI\r\n LL[2 * loglikeNB.shape[0]:3 * loglikeNB.shape[0], :] = loglikeP\r\n # LL[3 * loglikeNB.shape[0]:4 * loglikeNB.shape[0], :] = loglikeG\r\n # LL[4 * llzi.shape[0]:, :] = np.array(mods.loglikegeo)\r\n print('mean per model', list(zip(np.ma.masked_invalid(LL).sum(axis=1), map(lambda x: x.mod.name, mods.models))))\r\n print('mean per distrib')\r\n print(np.ma.masked_invalid(LL[:loglikeNB.shape[0], :]).mean())\r\n print(np.ma.masked_invalid(LL[loglikeNB.shape[0]:loglikeNB.shape[0] * 2, :]).mean())\r\n print(np.ma.masked_invalid(LL[loglikeNB.shape[0] * 2:loglikeNB.shape[0] * 3, :]).mean())\r\n # print(np.nanmean(LL[1-np.isinf(LL)], axis=1))\r\n # print(np.nanmean(LL[LL != np.inf],axis=1))\r\n LL[np.isnan(LL)] = 0\r\n LL[np.isinf(LL)] = 0\r\n LL[LL == 0] = -np.inf\r\n r = np.argmax(LL, axis=0)\r\n # LL /= mx\r\n print('mean_best', np.mean(np.ma.masked_invalid(LL[r, range(LL.shape[1])])))\r\n mx = np.max(LL, axis=0)\r\n LL = LL / mx\r\n means = test_data.get_miniOD(None)[test_data.get_stations_col(None)].mean(axis=0).to_numpy()\r\n # for i in np.unique(r):\r\n # print(means[r == i].max())\r\n print('mean NB', means[r < loglikeNB.shape[0]].mean())\r\n print('mean ZI', means[(r < 2 * loglikeNB.shape[0]) * (r > loglikeNB.shape[0])].mean())\r\n print('mean poisson', means[(r < 3 * loglikeNB.shape[0]) * (r > 2 * loglikeNB.shape[0])].mean())\r\n # print('mean ga', means[(r < 4 * llzi.shape[0]) * (r > 3 * llzi.shape[0])].mean())\r\n # print('mean Gaussian', means[r > 3 * loglikeNB.shape[0]].mean())\r\n print('model name, mean trips per model, LL/maxLL, N inf')\r\n for i in range(LL.shape[0]):\r\n print(l1[i], means[r == i].mean(), np.mean(np.ma.masked_invalid(LL[i, :])), np.sum(np.isinf(LL[i, :])))\r\n print(np.ma.corrcoef(np.ma.masked_invalid(LL[i, :]), means[:LL.shape[1]])[1, 0])\r\n plt.hist(r, bins=np.arange(-0.5, 3 * len(mods.names) + 1, 1))\r\n\r\n # l1.extend(list(map(lambda x: x + ' geo', mods.names)))\r\n # l1.extend(list(map(lambda x: x + ' G', mods.names)))\r\n plt.xticks(range(len(l1)), l1, rotation='vertical')\r\n plt.show()\r\n\r\n for m in mods.loglike:\r\n print(m)\r\n print(m[np.logical_not(np.isinf(m))].mean())", "def log_likelihood_function(self, instance):\r\n\r\n xvalues = np.arange(self.data.shape[0])\r\n model_data = instance.profile_from_xvalues(xvalues=xvalues)\r\n residual_map = self.data - model_data\r\n chi_squared_map = (residual_map / self.noise_map) ** 2.0\r\n log_likelihood = -0.5 * sum(chi_squared_map)\r\n\r\n return log_likelihood", "def get_ICL(self) -> float:\n assert (\n self.trained_successfully_ == True\n ), \"Model not trained successfully\"\n return (\n self.loglikelihood_\n - (self.n_row_clusters - 1) / 2 * np.log(self._nb_rows)\n - (self.n_column_clusters - 1) / 2 * np.log(self._nb_cols)\n - (self.n_column_clusters * self.n_row_clusters)\n / 2\n * np.log(self._nb_cols * self._nb_rows)\n )", "def _log_likelihood_poisson(self, df, dfo, n_bins=10):\n cond = df[\"selected_jig\"].values == 1\n range = parameter_ranges['uae'], parameter_ranges['rec']\n\n uae_obs = dfo[\"mueff_av\"].values\n rec_obs = dfo[\"rec_arcsec\"].values\n obs, xedges, yedges = np.histogram2d(uae_obs, rec_obs, range=range, bins=n_bins)\n\n uae_mod = df[\"uae_obs_jig\"].values[cond]\n rec_mod = df[\"rec_obs_jig\"].values[cond]\n model, _, _ = np.histogram2d(uae_mod, rec_mod, range=range, bins=n_bins, density=True)\n\n # Rescale model by number of observations\n model = model.astype(\"float\") * dfo.shape[0]\n\n # Calculate Poisson probability for each bin\n obs = obs.reshape(-1).astype(\"float\")\n model = model.reshape(-1)\n probs = stats.poisson(mu=model).pmf(obs)\n\n # Return overall log likelihood\n return np.log(probs).sum()", "def log_likelihood(self):\r\n return (-0.5 * self.num_data * self.output_dim * np.log(2.*np.pi) -\r\n 0.5 * self.output_dim * self.K_logdet + self._model_fit_term() + self.likelihood.Z)", "def log_likelihood(self):\r\n assert not self.likelihood.is_heteroscedastic\r\n A = -0.5*self.batchsize*self.output_dim*(np.log(2.*np.pi) - np.log(self.likelihood.precision))\r\n B = -0.5*self.likelihood.precision*self.output_dim*self.trace_K\r\n Kmm_logdet = 2.*np.sum(np.log(np.diag(self.Lm)))\r\n C = -0.5*self.output_dim*self.data_prop*(Kmm_logdet-self.q_u_logdet - self.num_inducing)\r\n C += -0.5*np.sum(self.LQL * self.B)\r\n D = -0.5*self.likelihood.precision*self.likelihood.trYYT\r\n E = np.sum(self.V*self.projected_mean)\r\n return (A+B+C+D+E)/self.data_prop", "def log_likelihood(self):\r\n if self.likelihood.is_heteroscedastic:\r\n A = -0.5 * self.num_data * self.output_dim * np.log(2.*np.pi) + 0.5 * np.sum(np.log(self.likelihood.precision)) - 0.5 * np.sum(self.likelihood.V * self.likelihood.Y)\r\n B = -0.5 * self.output_dim * (np.sum(self.likelihood.precision.flatten() * self.psi0) - np.trace(self._A))\r\n else:\r\n A = -0.5 * self.num_data * self.output_dim * (np.log(2.*np.pi) - np.log(self.likelihood.precision)) - 0.5 * self.likelihood.precision * self.likelihood.trYYT\r\n B = -0.5 * self.output_dim * (np.sum(self.likelihood.precision * self.psi0) - np.trace(self._A))\r\n C = -self.output_dim * (np.sum(np.log(np.diag(self.LB)))) # + 0.5 * self.num_inducing * np.log(sf2))\r\n D = 0.5 * self.data_fit\r\n self._A_part, self._B_part, self._C_part, self._D_part = A, B, C, D\r\n return A + B + C + D + self.likelihood.Z", "def log_likelihood_function(self, instance: af.ModelInstance) -> float:\r\n model_data = self.model_data_from_instance(instance=instance)\r\n fit = self.fit_from_model_data(model_data=model_data)\r\n return fit.log_likelihood", "def likelihood(self):\n\n # assert the Gaussian process is up to date\n self._gp_up_to_date()\n\n noise_penalization_term = -1 / 2 * np.log(\n np.linalg.det(self.cov_matrix))\n\n y = np.linalg.solve(self.cov_matrix, self.list_y)\n y = np.array(self.list_y) @ y\n data_fidelity_term = -1 / 2 * y\n\n nbr_obs_term = - self.n_observation * np.log(2 * np.pi)\n likelihood = (\n noise_penalization_term + data_fidelity_term + nbr_obs_term\n )\n return likelihood", "def _compute_log_likelihood(self, X, S):\n log_likelihood = 0\n for n in range(self.n_col):\n likelihood = 1\n for k in range(self.n_components):\n likelihood *= self.weights[k] \\\n * multivariate_normal(self.means[k], self.covs[k]).pdf(X[n]) \\\n * poisson(self.rates[k]).pmf(S[n])\n log_likelihood += np.log(likelihood)\n\n return log_likelihood", "def __log_likelihood(self, params, *args):\n\t\tX, y, feature_set, lambda_reg, empirical_weights, verbose, sign = args\n\n\t\tno_example = len(X)\n\t\ttotal_logZ = 0\n\t\ttotal_logProb = 0\n\t\texpected_weights = np.zeros(len(feature_set))\n\t\tfor t in range(len(X)):\n\t\t\t# example_features = X[t], example_labels = y[t]\n\n\t\t\tpotential = np.zeros(len(X[t]))\n\t\t\tfor i in range(len(X[t])):\n\t\t\t\t#candidate_features = X[t][i], candidate_label = y[t][i]\n\t\t\t\tpotential[i] = feature_set.calc_inner_product(X[t][i], params)\n\n\t\t\t#scaling\n\t\t\tpotential = potential - np.max(potential, keepdims=True)\n\n\t\t\tfor i in range(len(X[t])):\n\t\t\t\ttotal_logProb += potential[i] * y[t][i]\n\n\t\t\tpotential, Z = self.__softmax(potential)\n\n\t\t\tfor i in range(len(X[t])):\n\t\t\t\tfeature_set.calc_inner_sum(expected_weights, X[t][i], potential[i])\n\n\t\t\ttotal_logZ += log(Z)\n\n\t\t# _params = feature_set.get_regularized_params(params, 'bias')\n\t\t_params = params\n\t\tlog_likelihood = total_logProb - total_logZ - (lambda_reg/2) * np.sum(np.multiply(_params,_params))\n\t\tgradients = empirical_weights - expected_weights - lambda_reg * _params\n\n\t\tglobal SUB_ITERATION_NUM\n\t\tif verbose:\n\t\t\tsub_iteration_str = ' '\n\t\t\tif SUB_ITERATION_NUM > 0:\n\t\t\t\tsub_iteration_str = '(' + '{0:02d}'.format(SUB_ITERATION_NUM) + ')'\n\t\t\tprint(' ', '{0:03d}'.format(ITERATION_NUM), sub_iteration_str, ':', log_likelihood * sign)\n\n\t\tSUB_ITERATION_NUM += 1\n\n\t\treturn sign * log_likelihood, sign * gradients", "def morph_CII(**kwargs):\n\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n\n GR = glo.global_results()\n \n L_line = np.log10(getattr(GR,'L_'+p.line+'_sun'))\n SFR = np.log10(getattr(GR,'SFR'))\n\n fig,ax = plt.subplots(figsize=(20,16))\n\n for i in range(len(L_line)):\n \n im = mpimg.imread('plots/sim_data/stamps/%s%s_G%i.png' % (p.sim_name,p.sim_run,i))\n imbox = OffsetImage(im, zoom=0.02)\n ab = AnnotationBbox(imbox, (SFR[i],L_line[i]), pad=0, frameon=False)\n ax.add_artist(ab)\n\n ax.set_xlabel('log ' + getlabel('SFR'))\n ax.set_ylabel('log ' + getlabel(p.line))\n\n if not p.xlim: p.xlim = np.array([-3,4])\n if not p.ylim: \n p.ylim = [np.median(L_line)-6,np.median(L_line)+4]\n ax.set_xlim(p.xlim)\n ax.set_ylim(p.ylim)\n\n if not os.path.isdir(p.d_plot + 'luminosity/'): os.mkdir(p.d_plot + 'luminosity/') \n plt.savefig(p.d_plot + 'luminosity/morph_CII_%s%s' % (p.sim_name,p.sim_run),dpi=350)", "def compute_likelihood(self, spectrum=None, same=False):\n\n # If no spectrum given compute it.\n if spectrum is None:\n spectrum = self.__call__()\n\n # Evaluate the model image for the spectrum.\n model = self.rebuild(spectrum, same=same)\n\n # Get data and error attributes.\n data = self.data\n error = self.error\n mask = self.mask\n\n # Compute the log-likelihood for the spectrum.\n with np.errstate(divide='ignore'):\n logl = (model - data) / error\n logl = -np.nansum((logl[~mask])**2)\n\n return logl", "def likelihood(self, log_occr_array=None):\n\n if log_occr_array is not None:\n # Catch invalid occurrence rates for zero likelihood\n try:\n self.log_occr = log_occr_array\n except InvalidOccurrenceRate:\n return 0.0\n\n # Procedure: calculate N_exp in all cases, calculate detection\n # term if there are detections\n\n # N_exp\n N_exp = self.calc_integral() * self._N_stars\n\n # Product terms\n # TODO:Check that the array broadcasting works here\n # Shape of s_terms should be [N_planets, NR, NP]\n s_terms = self.H_array * self.F_array * self.occr\n\n if tf.is_tensor(self.occr):\n ps_terms = tf.reduce_sum(s_terms, axis=(-1, -2))\n product_term = tf.reduce_prod(ps_terms)\n ll_value = product_term * tf.exp(product_term)\n else:\n product_term = s_terms.sum(axis=(-1, -2)).prod()\n ll_value = product_term * np.exp(-N_exp)\n\n # BUG\n if np.isnan(ll_value):\n warnings.warn(\".likelihood value is nan.\")\n import pdb; pdb.set_trace()\n\n # A nan value is possible when some of the occr are too high\n return ll_value if not np.isnan(ll_value) else 0.0", "def log_likelihood(self, params):\n # extract the parameters\n m1 = params['m1']\n m2 = params['m2']\n DL = params['DL']\n Tc = params['Tc']\n iota = params['iota']\n phic = params['phic']\n psi = params['psi']\n thetaS = params['thetaS']\n phiS = params['phiS']\n\n # calculate the model\n model = self._model(time, m1, m2, DL, Tc, iota, phic, psi, thetaS, phiS)\n\n# # normalisation\n# norm = -0.5*self._ndata*LN2PI - self._ndata*self._logsigma\n\n# # chi-squared\n# chisq = np.sum(((self._data - model)/(self._sigma))**2)\n\n return -np.vdot(self._data - model,self._data - model)", "def log_likelihood(theta, x, y, data, var, size):\n #unpack the parameters\n peak, center_x, center_y, radius, focus, width_x, width_y = theta\n\n #1)Generate a model Airy disc\n amplitude = _amplitudeFromPeak(peak, center_x, center_y, radius, x_0=int(size[0]/2.-0.5), y_0=int(size[1]/2.-0.5))\n airy = models.AiryDisk2D(amplitude, center_x, center_y, radius)\n adata = airy.eval(x, y, amplitude, center_x, center_y, radius).reshape(size)\n\n #2)Apply Focus\n f = models.Gaussian2D(1., center_x, center_y, focus, focus, 0.)\n focusdata = f.eval(x, y, 1., center_x, center_y, focus, focus, 0.).reshape(size)\n model = signal.convolve2d(adata, focusdata, mode='same')\n\n #3)Apply CCD diffusion, approximated with a Gaussian\n CCD = models.Gaussian2D(1., size[0]/2.-0.5, size[1]/2.-0.5, width_x, width_y, 0.)\n CCDdata = CCD.eval(x, y, 1., size[0]/2.-0.5, size[1]/2.-0.5, width_x, width_y, 0.).reshape(size)\n model = signal.convolve2d(model, CCDdata, mode='same').flatten()\n\n #true for Gaussian errors\n #lnL = - 0.5 * np.sum((data - model)**2 / var)\n #Gary B. said that this should be from the model not data so recompute var (now contains rn**2)\n var += model.copy()\n lnL = - 0.5 * np.sum((data - model)**2 / var)\n\n return lnL", "def figure_of_merit_from(self, parameter_list):\r\n return self.log_likelihood_from(parameter_list=parameter_list)", "def log_patches(comet, epoch, batch_img1, batch_img2, labels, cd_preds):\n batch_size = batch_img1.shape[0]\n samples = list(range(0, batch_size, 10))\n for sample in samples:\n sample_img1 = _denorm_image(batch_img1, sample)\n sample_img2 = _denorm_image(batch_img2, sample)\n\n # log cd\n cd_figname = 'epoch_'+str(epoch)+'_cd_sample_'+str(sample)\n log_figure(comet,\n sample_img1,\n sample_img2,\n labels[sample].cpu().numpy(),\n cd_preds[sample].cpu().numpy(),\n fig_name=cd_figname)", "def glcm1(image, skin_mask, window_size):\n OFFSET = [1]\n THETA = [0, np.pi/4, np.pi/2, 3*np.pi/4]\n N_GREY_LEVELS = 64\n\n k = np.uint8(np.floor(window_size/2))\n skin_mask_r = skin_mask[k:skin_mask.shape[0]-k,\n k:skin_mask.shape[1]-k].ravel()\n\n patches = extract_patches_2d(image, (window_size, window_size))\n\n memmap_folder = './joblib_memmap'\n\n contrast_filename_memmap = os.path.join(memmap_folder, 'constrast_memmap')\n ASM_filename_memmap = os.path.join(memmap_folder, 'ASM_memmap')\n correlation_filename_memmap = os.path.join(memmap_folder, 'correlation_memmap')\n glcm_mean_filename_memmap = os.path.join(memmap_folder, 'glcm_mean_memmap')\n\n contrast = np.memmap(contrast_filename_memmap, dtype=np.float32,\n shape=(len(patches), 4), mode='w+')\n ASM = np.memmap(ASM_filename_memmap, dtype=np.float64,\n shape=(len(patches), 4), mode='w+')\n correlation = np.memmap(correlation_filename_memmap, dtype=np.float32,\n shape=(len(patches), 4), mode='w+')\n glcm_mean = np.memmap(glcm_mean_filename_memmap, dtype=np.float32,\n shape=(len(patches), 4), mode='w+')\n\n Parallel(n_jobs=1, verbose=1)(\n delayed(glcmCal)(i, patch, window_size, skin_mask_r,\n OFFSET, THETA, N_GREY_LEVELS,\n contrast, ASM, correlation, glcm_mean)\n for i, patch in enumerate(patches)\n )\n\n return contrast, ASM, correlation, glcm_mean", "def _compute_log_likelihood(self, parameters):\n raise NotImplementedError('')", "def log_likelihood(X, Z, variable_types):\n\tk = Z['pi_unconstrained'].shape[1]+1 # the number of mixture components\n\t## We gather the log probabilities of each indiv in batch for each mixture component into\n\t## a matrix of size (B x k), where B is the batch size.\n\tlogps = torch.zeros([len(X), k])\n\t## First insert the mixture weight contribution to the array\n\tlogps += logsoftmax(Z['pi_unconstrained'], dim=-1)\n\t## Next loop over the features and sum the contributions to logps\n\tfor i, (key, z) in enumerate(Z.items()):\n\t\tif key not in ['pi_unconstrained']:\n\t\t\tdata = torch.Tensor(X[key].values).unsqueeze(-1)\n\t\t\tdist = variable_types[key]\n\t\t\tif dist == 'Categorical':\n\t\t\t\talpha = softmax(z, dim=-1, additional=-50.)\n\t\t\t\tlogps += Categorical(probs = alpha).log_prob(data)\n\t\t\telif dist == 'Bernoulli':\n\t\t\t\ttheta = z\n\t\t\t\tlogps += Bernoulli(logits = theta).log_prob(data)\n\t\t\telif dist == 'Beta':\n\t\t\t\talpha, beta = torch.exp(z).transpose(0,1)\n\t\t\t\tlogps += Beta(alpha, beta).log_prob(data)\n\t## Compute logsumexp over the mixture components and return the sum over data elements.\n\tlogp = torch.logsumexp(logps, dim=-1)\n\treturn logp.sum()", "def _calculate_ll(self, x):\n observation_log_probs = self._observation_log_probs(x, mask=None)\n forward_log_probs = self._forward(observation_log_probs)\n log_likelihood = logsumexp(\n forward_log_probs[forward_log_probs.shape[0] - 1, :].numpy())\n return log_likelihood", "def log_likelihood(self):\n return -0.5*self.D*self.K_logdet + self._model_fit_term() + self.likelihood.Z" ]
[ "0.61544716", "0.6020137", "0.59118134", "0.5785453", "0.5719369", "0.56867313", "0.5613568", "0.5562878", "0.5560809", "0.55222946", "0.55194604", "0.5514143", "0.55073017", "0.54521865", "0.5437854", "0.5399128", "0.5386203", "0.5381811", "0.5379536", "0.53365344", "0.5330276", "0.53061986", "0.52910316", "0.52902365", "0.52795535", "0.5279085", "0.52700216", "0.5269919", "0.52511954", "0.52349156" ]
0.6737606
0
Denoise every column in Y, assuming an MVN model and gaussian white noise. The model assumes that y = x + noise where x is generated by a single 0mean multivariate normal distribution.
def MVN_Denoise(Y, mvn_model, noise_std): return calc_weiner_filter(Y, mvn_model.mean, mvn_model.cov, noise_std)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def GSM_Denoise(Y, gsm_model, noise_std):\n X = np.empty(Y.shape)\n k = gsm_model.mix.shape[0]\n I = np.identity(gsm_model.cov[0, :].shape[0])\n for i in range(k):\n mvn = multivariate_normal(cov=(gsm_model.cov[i, :] + ((noise_std**2) * I)))\n upper_arg = gsm_model.mix[i] * (mvn.logpdf(Y[:, i]))\n lower_arg = 0\n for j in range(k):\n inner_mvn = multivariate_normal(cov=(gsm_model.cov[j] + ((noise_std**2) * I)))\n lower_arg += gsm_model.mix[j] * (inner_mvn.logpdf(Y[:, i]))\n c_i = upper_arg / lower_arg\n weiner_i = calc_weiner_filter(Y, np.zeros(Y.shape[0]), gsm_model.cov[i, :], noise_std)\n X += c_i * weiner_i\n return X", "def denoise_image(Y, model, denoise_function, noise_std, patch_size=(8, 8)):\n (h, w) = np.shape(Y)\n cropped_h = h - patch_size[0] + 1\n cropped_w = w - patch_size[1] + 1\n middle_linear_index = int(\n ((patch_size[0] / 2) * patch_size[1]) + (patch_size[1] / 2))\n\n # split the image into columns and denoise the columns:\n noisy_patches = im2col(Y, patch_size)\n denoised_patches = denoise_function(noisy_patches, model, noise_std)\n\n # reshape the denoised columns into a picture:\n x_hat = np.reshape(denoised_patches[middle_linear_index, :],\n [cropped_h, cropped_w])\n\n return x_hat", "def ICA_Denoise(Y, ica_model, noise_std):\n\n # TODO: YOUR CODE HERE", "def test_denoising(image, model, denoise_function,\n noise_range=(0.01, 0.05, 0.1, 0.2), patch_size=(8, 8)):\n h, w = np.shape(image)\n noisy_images = np.zeros((h, w, len(noise_range)))\n denoised_images = []\n cropped_original = crop_image(image, patch_size)\n\n # make the image noisy:\n for i in range(len(noise_range)):\n noisy_images[:, :, i] = image + (\n noise_range[i] * np.random.randn(h, w))\n\n # denoise the image:\n for i in range(len(noise_range)):\n denoised_images.append(\n denoise_image(noisy_images[:, :, i], model, denoise_function,\n noise_range[i], patch_size))\n\n # calculate the MSE for each noise range:\n noisy_mses = {}\n denoised_mses = {}\n for i in range(len(noise_range)):\n print(\"noisy MSE for noise = \" + str(noise_range[i]) + \":\")\n noisy_mse = np.mean((crop_image(noisy_images[:, :, i],\n patch_size) - cropped_original) ** 2)\n noisy_mses[str(noise_range[i])] = noisy_mse\n print(noisy_mse)\n print(\"denoised MSE for noise = \" + str(noise_range[i]) + \":\")\n denoised_mse = np.mean((cropped_original - denoised_images[i]) ** 2)\n denoised_mses[str(noise_range[i])] = denoised_mse\n print(denoised_mse)\n\n plt.figure(figsize=(20, 20))\n plt.axis('off')\n for i in range(len(noise_range)):\n plt.subplot(2, len(noise_range), i + 1, xlabel='Noisy image', xticks=[], yticks=[])\n plt.imshow(noisy_images[:, :, i], cmap='gray')\n plt.subplot(2, len(noise_range), i + 1 + len(noise_range), xlabel='Denoised image', xticks=[], yticks=[])\n plt.imshow(denoised_images[i], cmap='gray')\n plt.show()\n return noisy_mses, denoised_mses", "def _gaussian_for_learn_denosing_model(image):\n return add_gaussian_noise(image, 0, 0.2)", "def EST_NOISE(images):\n num = images.shape[0]\n m_e_bar = sum(images)/num\n m_sigma = np.sqrt(sum((images - m_e_bar)**2)/(num - 1))\n \n return m_sigma", "def sample_from_model_posterior(self, noisy_samples: tf.Tensor)\\\n -> tf.Tensor:\n n_batch = noisy_samples.shape[0]\n samples = tf.transpose(tf.reshape(noisy_samples, [n_batch, -1]))\n y_vector = samples - tf.reshape(self.computed_mean_function, [-1, 1])\n total_covariance_matrix = self._compute_total_covariance_matrix()\n aux_matrix = tf.linalg.solve(total_covariance_matrix, y_vector)\n mu = tf.transpose(tf.matmul(self.total_c_phi, aux_matrix))\n gaussian_noise = tf.random_normal(shape=[self.dimensionality *\n self.n_points,\n n_batch],\n dtype=tf.float64)\n f_var = self.total_c_phi\\\n - tf.matmul(self.total_c_phi,\n tf.linalg.solve(total_covariance_matrix,\n self.total_c_phi))\n chol_var = tf.cholesky(f_var)\n noise = tf.transpose(tf.matmul(chol_var, gaussian_noise))\n return tf.reshape(mu + noise,\n [n_batch, self.dimensionality, self.n_points])", "def model_noise(self, model, model_res=None, num_observations=1):\n\n raise NotImplementedError", "def perform_noise_removal(mask):\n trans1 = cv.dilate(mask, KERNEL, iterations=4)\n trans1 = cv.erode(trans1, KERNEL, iterations=5)\n return cv.dilate(trans1, KERNEL, iterations=7)", "def noise(x: np.ndarray) -> np.ndarray:\n\n return np.random.normal(loc=MEAN, scale=1e-2, size=1)", "def model_gauss_noise(sigma, nx, ny=1, nz=1):\n\te = EMData()\n\te.set_size(nx, ny, nz)\n\te.process_inplace(\"testimage.noise.gauss\", {\"sigma\":sigma})\n\treturn e", "def add_noise(Y, sigma):\r\n return Y + np.random.normal(0, sigma, Y.shape)", "def define_noise(self, ctx, model):\n # Only save the mean/cov if we have foregrounds, and they don't update every iteration (otherwise, get them\n # every iter).\n if self.foreground_cores and not any([fg._updating for fg in self.foreground_cores]):\n if not self.use_analytical_noise:\n mean, covariance = self.numerical_covariance(\n nrealisations=self.nrealisations, nthreads=self._nthreads\n )\n else:\n # Still getting mean numerically for now...\n mean = self.numerical_covariance(nrealisations=self.nrealisations, nthreads=self._nthreads)[0]\n\n covariance = self.analytical_covariance(self.u, self.eta,\n np.median(self.frequencies),\n self.frequencies.max() - self.frequencies.min())\n\n thermal_covariance = self.get_thermal_covariance()\n covariance = [x + y for x, y in zip(covariance, thermal_covariance)]\n\n else:\n # Only need thermal variance if we don't have foregrounds, otherwise it will be embedded in the\n # above foreground covariance... BUT NOT IF THE FOREGROUND COVARIANCE IS ANALYTIC!!\n # covariance = self.get_thermal_covariance()\n # mean = np.repeat(self.noise_power_expectation, len(self.eta)).reshape((len(self.u), len(self.eta)))\n mean = 0\n covariance = 0\n\n return [{\"mean\": mean, \"covariance\": covariance}]", "def noiseReduction(self):\n pass", "def test_modeling__mapped_noise_from_model_works(y_data, train_Yvar):\n\n # define temporary class\n class Tmp:\n def __init__(self, y_data, train_Yvar):\n self.y_data = y_data\n self.train_Yvar = train_Yvar\n self.train_Y = torch.from_numpy(y_data.values)\n\n self.dtype = torch.double\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n from greattunes._modeling import _mapped_noise_from_model\n\n cls = Tmp(y_data=y_data, train_Yvar=train_Yvar)\n\n # run the method\n train_Yvar_mapped = cls._mapped_noise_from_model()\n\n # assert size\n assert y_data.shape[0] == train_Yvar_mapped.size()[0]\n\n # assert output type\n assert isinstance(train_Yvar_mapped, torch.DoubleTensor)\n\n # special case for cases which are not functions, to assert level of noise\n if not isinstance(cls.train_Yvar, types.FunctionType):\n if isinstance(cls.train_Yvar, torch.DoubleTensor):\n if len(list(cls.train_Yvar.size())) == 0:\n assert train_Yvar_mapped[0,0].item() == train_Yvar.item()\n elif len(list(cls.train_Yvar.size())) == 1:\n assert train_Yvar_mapped[0,0].item() == train_Yvar[0].item()\n elif isinstance(cls.train_Yvar, float) or isinstance(cls.train_Yvar, int):\n assert train_Yvar_mapped[0,0].item() == float(train_Yvar)", "def filter_denoise(self, x):\n b, a = self.c_notch\n return filtfilt(b, a, x)", "def drop_and_noise(image, sigma_d, percentage=0.8):\n M, N = image.shape[:2]\n n = N * M\n p = m.floor(percentage * n)\n image = np.cast[np.float32](image)\n\n missing_pixels_ind = np.random.permutation(n)[:p]\n\n mask = np.ones((M * N,), dtype=np.bool)\n mask[missing_pixels_ind] = 0\n mask = mask.reshape((M, N, 1))\n\n maskf = np.cast[np.float32](mask)\n y_clean = image * maskf\n\n noise = np.random.normal(loc=0, scale=sigma_d, size=image.shape) * maskf\n y = y_clean + noise\n\n return y, mask", "def preprocess(D, W, nsig_lo=10, nsig_hi=30, vmin=None, vmax=None):\n masked = W == 0\n # Calculate the median unmasked pixel value.\n median_value = np.median(D[~masked])\n # Calculate the median non-zero inverse variance.\n median_ivar = np.median(W[~masked])\n # Calculate the corresponding pixel sigma.\n sigma = 1 / np.sqrt(median_ivar)\n if vmin is None:\n vmin = median_value - nsig_lo * sigma\n if vmax is None:\n vmax = median_value + nsig_hi * sigma\n # Clip values to [vmin, vmax].\n D = np.clip(D, vmin, vmax)\n # Set masked pixel values to nan so they are not plotted.\n D[masked] = np.nan\n return D", "def denoise(img, h=10, hForColor=None, templateWindowSize=7, searchWindowSize=21):\n\tif hForColor is None:\n\t\thForColor=h\n\ttmp = img.copy()\n\tif len(img.shape) != 3:\n\t\tdst = cv2.fastNlMeansDenoising(tmp, None, h, templateWindowSize, searchWindowSize)\n\telse:\n\t\tdst = cv2.fastNlMeansDenoisingColored(img, None, h, hForColor, templateWindowSize, searchWindowSize)\n\treturn dst", "def process_noise_dist(self, dt=0.0):\n Q = self.process_noise_cov(dt)\n return dist.MultivariateNormal(\n torch.zeros(Q.shape[-1], dtype=Q.dtype, device=Q.device), Q\n )", "def noise(self, stddev):\n #add noise to weights\n pass", "def learn_MVN(X):\n return MVN_Model(np.mean(X, axis=-1), np.cov(X))", "def _denoise(self, img, weight):\n\n from skimage.filters import denoise_tv_chambolle\n\n img = denoise_tv_chambolle(img, weight=weight) * 255\n\n return img.astype(\"uint8\")", "def generate_noise_vector(self, ):\n self.noise.resize_(\n self.batch_size, int(self.opt.nz), 1, 1).normal_(0, 1)\n self.noisev = Variable(self.noise) # TODO: Add volatile=True???", "def make_noisy_images(image):\r\n return apply_poisson_noise(image, random_state=12345)", "def process_noise_cov(self, dt=0.0):\n raise NotImplementedError", "def process_noise_cov(self, dt=0.0):\n raise NotImplementedError", "def process_noise_cov(self, dt=0.0):\n raise NotImplementedError", "def remove_noise(ground_dem_path, out_path, ignore_value=-99999.0):\n ground_np = np.array(gdal.Open(ground_dem_path).ReadAsArray())\n std = ground_np[ground_np != ignore_value].std()\n mean = ground_np[ground_np != ignore_value].mean()\n threshold_value = mean + 1.5 * std\n ground_np[ground_np >= threshold_value] = -99999.0\n save_array_as_geotif(ground_np, ground_dem_path, out_path)", "def denoise(im,U_init,tolerance=0.1,tau=0.125,tv_weight=100):\n\n m,n = im.shape #size of noisy image\n\n # initialize\n U = U_init\n Px = np.zeros((m, n)) #x-component to the dual field\n Py = np.zeros((m, n)) #y-component of the dual field\n error = 1\n\n while (error > tolerance):\n Uold = U\n\n # gradient of primal variable\n GradUx = np.roll(U,-1,axis=1)-U # x-component of U's gradient\n GradUy = np.roll(U,-1,axis=0)-U # y-component of U's gradient\n\n # update the dual varible\n PxNew = Px + (tau/tv_weight)*GradUx # non-normalized update of x-component (dual)\n PyNew = Py + (tau/tv_weight)*GradUy # non-normalized update of y-component (dual)\n NormNew = np.maximum(1,np.sqrt(PxNew**2+PyNew**2))\n\n Px = PxNew/NormNew # update of x-component (dual)\n Py = PyNew/NormNew # update of y-component (dual)\n\n # update the primal variable\n RxPx = np.roll(Px,1,axis=1) # right x-translation of x-component\n RyPy = np.roll(Py,1,axis=0) # right y-translation of y-component\n\n DivP = (Px-RxPx)+(Py-RyPy) # divergence of the dual field.\n U = im + tv_weight*DivP # update of the primal variable\n\n # update of error\n error = np.linalg.norm(U-Uold)/np.sqrt(n*m);\n\n return U,im-U # denoised image and texture residual" ]
[ "0.7035352", "0.6817909", "0.651164", "0.60872686", "0.599298", "0.5948621", "0.5867001", "0.5618468", "0.5616018", "0.55830455", "0.5568835", "0.5553988", "0.5434292", "0.5406714", "0.53591746", "0.53309554", "0.5318294", "0.53102773", "0.5291699", "0.5256051", "0.5250106", "0.52456176", "0.5214275", "0.5209441", "0.5207002", "0.51941115", "0.51941115", "0.51941115", "0.51842505", "0.51719534" ]
0.780044
0
Denoise every column in Y, assuming a GSM model and gaussian white noise. The model assumes that y = x + noise where x is generated by a mixture of 0mean gaussian components sharing the same covariance up to a scaling factor.
def GSM_Denoise(Y, gsm_model, noise_std): X = np.empty(Y.shape) k = gsm_model.mix.shape[0] I = np.identity(gsm_model.cov[0, :].shape[0]) for i in range(k): mvn = multivariate_normal(cov=(gsm_model.cov[i, :] + ((noise_std**2) * I))) upper_arg = gsm_model.mix[i] * (mvn.logpdf(Y[:, i])) lower_arg = 0 for j in range(k): inner_mvn = multivariate_normal(cov=(gsm_model.cov[j] + ((noise_std**2) * I))) lower_arg += gsm_model.mix[j] * (inner_mvn.logpdf(Y[:, i])) c_i = upper_arg / lower_arg weiner_i = calc_weiner_filter(Y, np.zeros(Y.shape[0]), gsm_model.cov[i, :], noise_std) X += c_i * weiner_i return X
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ICA_Denoise(Y, ica_model, noise_std):\n\n # TODO: YOUR CODE HERE", "def denoise_image(Y, model, denoise_function, noise_std, patch_size=(8, 8)):\n (h, w) = np.shape(Y)\n cropped_h = h - patch_size[0] + 1\n cropped_w = w - patch_size[1] + 1\n middle_linear_index = int(\n ((patch_size[0] / 2) * patch_size[1]) + (patch_size[1] / 2))\n\n # split the image into columns and denoise the columns:\n noisy_patches = im2col(Y, patch_size)\n denoised_patches = denoise_function(noisy_patches, model, noise_std)\n\n # reshape the denoised columns into a picture:\n x_hat = np.reshape(denoised_patches[middle_linear_index, :],\n [cropped_h, cropped_w])\n\n return x_hat", "def MVN_Denoise(Y, mvn_model, noise_std):\n return calc_weiner_filter(Y, mvn_model.mean, mvn_model.cov, noise_std)", "def _gaussian_for_learn_denosing_model(image):\n return add_gaussian_noise(image, 0, 0.2)", "def test_denoising(image, model, denoise_function,\n noise_range=(0.01, 0.05, 0.1, 0.2), patch_size=(8, 8)):\n h, w = np.shape(image)\n noisy_images = np.zeros((h, w, len(noise_range)))\n denoised_images = []\n cropped_original = crop_image(image, patch_size)\n\n # make the image noisy:\n for i in range(len(noise_range)):\n noisy_images[:, :, i] = image + (\n noise_range[i] * np.random.randn(h, w))\n\n # denoise the image:\n for i in range(len(noise_range)):\n denoised_images.append(\n denoise_image(noisy_images[:, :, i], model, denoise_function,\n noise_range[i], patch_size))\n\n # calculate the MSE for each noise range:\n noisy_mses = {}\n denoised_mses = {}\n for i in range(len(noise_range)):\n print(\"noisy MSE for noise = \" + str(noise_range[i]) + \":\")\n noisy_mse = np.mean((crop_image(noisy_images[:, :, i],\n patch_size) - cropped_original) ** 2)\n noisy_mses[str(noise_range[i])] = noisy_mse\n print(noisy_mse)\n print(\"denoised MSE for noise = \" + str(noise_range[i]) + \":\")\n denoised_mse = np.mean((cropped_original - denoised_images[i]) ** 2)\n denoised_mses[str(noise_range[i])] = denoised_mse\n print(denoised_mse)\n\n plt.figure(figsize=(20, 20))\n plt.axis('off')\n for i in range(len(noise_range)):\n plt.subplot(2, len(noise_range), i + 1, xlabel='Noisy image', xticks=[], yticks=[])\n plt.imshow(noisy_images[:, :, i], cmap='gray')\n plt.subplot(2, len(noise_range), i + 1 + len(noise_range), xlabel='Denoised image', xticks=[], yticks=[])\n plt.imshow(denoised_images[i], cmap='gray')\n plt.show()\n return noisy_mses, denoised_mses", "def model_gauss_noise(sigma, nx, ny=1, nz=1):\n\te = EMData()\n\te.set_size(nx, ny, nz)\n\te.process_inplace(\"testimage.noise.gauss\", {\"sigma\":sigma})\n\treturn e", "def remove_noise(emg):\n def butter_bandstop_filter(data, lowcut, highcut, fs, order=2):\n def butter_bandstop(lowcut, highcut, fs, order=2):\n nyq = 0.5 * fs\n low = lowcut / nyq\n high = highcut / nyq\n b, a = butter(order, [low, high], btype='bandstop')\n return b, a\n \n b, a = butter_bandstop(lowcut, highcut, fs, order=order)\n y = lfilter(b, a, data)\n return y\n \n # Remove noise from signal\n for channel in [\"emg1\", \"emg2\", \"emg3\", \"emg4\", \"emg5\", \"emg6\"]:\n emg[channel] = butter_bandstop_filter(emg[channel], 49., 51., EMG_F_SAMPLE, order=2)\n return emg", "def noiseReduction(self):\n pass", "def add_noise(Y, sigma):\r\n return Y + np.random.normal(0, sigma, Y.shape)", "def EST_NOISE(images):\n num = images.shape[0]\n m_e_bar = sum(images)/num\n m_sigma = np.sqrt(sum((images - m_e_bar)**2)/(num - 1))\n \n return m_sigma", "def gaussianise_series(self, train_x):\n\n n_batches = train_x.shape[0]\n\n for batch in range(n_batches):\n train_x[batch, :, :] = gaussianise(train_x[batch, :, :], target_sigma=1.0)\n\n return train_x", "def random_gaussian_noise(image ):\n sigma = image.std()\n for ch in range(image.shape[2] ):\n sigma = min(sigma, image[:,:,ch].std() )\n image = random_noise(image, var = sigma**2)\n return image", "def define_noise(self, ctx, model):\n # Only save the mean/cov if we have foregrounds, and they don't update every iteration (otherwise, get them\n # every iter).\n if self.foreground_cores and not any([fg._updating for fg in self.foreground_cores]):\n if not self.use_analytical_noise:\n mean, covariance = self.numerical_covariance(\n nrealisations=self.nrealisations, nthreads=self._nthreads\n )\n else:\n # Still getting mean numerically for now...\n mean = self.numerical_covariance(nrealisations=self.nrealisations, nthreads=self._nthreads)[0]\n\n covariance = self.analytical_covariance(self.u, self.eta,\n np.median(self.frequencies),\n self.frequencies.max() - self.frequencies.min())\n\n thermal_covariance = self.get_thermal_covariance()\n covariance = [x + y for x, y in zip(covariance, thermal_covariance)]\n\n else:\n # Only need thermal variance if we don't have foregrounds, otherwise it will be embedded in the\n # above foreground covariance... BUT NOT IF THE FOREGROUND COVARIANCE IS ANALYTIC!!\n # covariance = self.get_thermal_covariance()\n # mean = np.repeat(self.noise_power_expectation, len(self.eta)).reshape((len(self.u), len(self.eta)))\n mean = 0\n covariance = 0\n\n return [{\"mean\": mean, \"covariance\": covariance}]", "def sigmanorm(y):\n y = y.copy()\n y -= y.mean() # set to zero mean\n y /= y.std() # rescale to units of sigma\n return y", "def transform(self, X, y=None):\n np.random.seed(self.random_state)\n noise = np.random.normal(loc=self.loc, scale=self.scale, size=X.size).reshape(X.shape)\n return np.array(X) + noise", "def perform_noise_removal(mask):\n trans1 = cv.dilate(mask, KERNEL, iterations=4)\n trans1 = cv.erode(trans1, KERNEL, iterations=5)\n return cv.dilate(trans1, KERNEL, iterations=7)", "def add_gaussian_noise(X, mu=0, sigma=0.1):\n noise = np.random.normal(0.0, sigma, size=X.size)\n return X + noise.reshape(X.shape)", "def real_blur_and_noise(image, kernel, sigma_d):\n degraded = filter_image(image, kernel, mode=\"valid\", boundary=\"fill\")\n noise = np.random.normal(0.0, sigma_d, degraded.shape).astype(np.float32)\n degraded = degraded + noise\n initial = np.pad(degraded, ((kernel.shape[0] // 2, kernel.shape[0] // 2),\n (kernel.shape[1] // 2, kernel.shape[1] // 2),\n (0, 0)), mode=\"edge\")\n initial = edgeTaper(initial, kernel)\n return initial", "def noise(x: np.ndarray) -> np.ndarray:\n\n return np.random.normal(loc=MEAN, scale=1e-2, size=1)", "def apply_gaussian(X, sigma):\n return np.array([ndimage.gaussian_filter(x, sigma) for x in X])", "def create_synthetic_noise_dataset(cfg):\n from colorednoise import powerlaw_psd_gaussian\n\n betas = np.linspace(cfg['data.mix_synthetic_noise.min_beta'],\n cfg['data.mix_synthetic_noise.max_beta'],\n num=cfg['data.mix_synthetic_noise.num_samples'])\n sample_rate = cfg['data.sample_rate']\n segment_length = 2 * cfg['data.len_min']\n wavs = [powerlaw_psd_gaussian(beta, sample_rate * segment_length)\n for beta in betas]\n wavs = [audio.normalize(wav, low=-1, high=1) for wav in wavs]\n return NoiseDataset(wavs)", "def apply_gaussian_resolution(self,params,data,fwhm=1,dE=0.01,E_max=100):\n print('\\n################### CONVOLUTION #####################\\n')\n print(f'\\n\\tConvolution with Gaussian function, FWHM = {fwhm} meV\\n')\n\n data.fwhm = fwhm\n c = fwhm/2.35482\n\n data.dE = dE\n data.E_max = E_max\n data.spectra_E = np.arange(0,data.E_max+data.dE,data.dE)\n data.spectra_num_E = len(data.spectra_E)\n data.spectra = np.zeros((data.spectra_num_E,params.num_Qpoints))\n data.smooth_spectra = np.zeros((data.spectra_num_E,params.num_Qpoints))\n structure_factors = []\n energies = []\n\n ### sum intensity of degenerate bands\n if params.sum_degenerate_bands == True:\n print('\\n\\tSumming degenerate bands before convolution (using convolution dE as tolerance)\\n')\n for q in range(params.num_Qpoints):\n sfac = data.structure_factors[:,q]\n energy = data.frequencies[f'{q}']\n reduced_energies = []\n summed_sfac = []\n while True:\n if len(energy) == 0:\n break\n test_energy = energy[0]\n reduced_energies.append(test_energy)\n indicies = np.intersect1d(np.argwhere(energy <= (test_energy+data.dE)),\n np.argwhere(energy > (test_energy-data.dE)))\n summed_sfac.append(sfac[indicies].sum())\n sfac = np.delete(sfac,indicies)\n energy = np.delete(energy,indicies)\n energies.append(reduced_energies)\n structure_factors.append(summed_sfac)\n else:\n print('\\n\\tWARNING: You should definitely sum degenerate bands!!!\\n')\n for q in range(params.num_Qpoints):\n energies.append(data.frequencies[f'{q}'])\n structure_factors.append(data.structure_factors[:,q])\n\n ### populate array for heatmap\n ### try statement takes care of negative energies\n for q in range(params.num_Qpoints):\n for b in range(len(structure_factors[q][:])):\n try: # if there are negative modes, argwhere returns an empty vector and the slice crashes\n data.spectra[np.argwhere(data.spectra_E <= \n energies[q][b]).max(),q] = structure_factors[q][b]\n except:\n continue\n\n if params.bose_factor == True:\n print('\\n\\tWARNING: Bose factor isnt verified. Need to compare to SNAXS.\\n')\n if params.temperature < 5:\n temperature = 5\n else:\n temperature = params.temperature\n inds = np.argwhere(data.spectra_E <= 0.5)\n tmp_e = np.copy(data.spectra_E)\n tmp_e[inds] = 0.5\n bose = 1+1/(np.exp(tmp_e/(constants.kb*1000*temperature))-1)\n bose = np.tile(bose.reshape((data.spectra_num_E,1)),reps=(1,params.num_Qpoints))\n data.spectra = np.multiply(data.spectra,bose)\n data.spectra = data.spectra/np.max(data.spectra)\n\n ### gaussian convolution using for loops, slow but very little memory utilization\n g_energy = np.append(data.spectra_E-data.spectra_E.max(),data.spectra_E[1:])\n gaussian = np.exp(-0.5*g_energy**2/c**2)/c/np.sqrt(2*np.pi)\n gaussian = np.tile(gaussian.reshape((gaussian.shape[0],1)),(1,data.num_Qpoints))\n tmp = np.append(data.spectra,data.spectra,axis=0)[1:,:]\n for e in range(data.spectra_num_E):\n if e%50 == 0:\n print(f'\\t------ {e}/{data.spectra_num_E} -------')\n data.smooth_spectra[e,:] = np.trapz(tmp*np.roll(gaussian,shift=e,axis=0),g_energy,axis=0)\n print('\\n\\tDone convolving!\\n')\n data.smooth_spectra = data.smooth_spectra/np.max(data.smooth_spectra)\n\n# if params.random_background == True:\n# data.smooth_spectra = data.smooth_spectra+(np.random.normal(0,1,\n# (data.smooth_spectra.shape[0],data.smooth_spectra.shape[1])))*0.001\n \n plt.imshow(data.smooth_spectra,origin='lower',aspect='auto',cmap='hot')\n plt.show()", "def sample_from_model_posterior(self, noisy_samples: tf.Tensor)\\\n -> tf.Tensor:\n n_batch = noisy_samples.shape[0]\n samples = tf.transpose(tf.reshape(noisy_samples, [n_batch, -1]))\n y_vector = samples - tf.reshape(self.computed_mean_function, [-1, 1])\n total_covariance_matrix = self._compute_total_covariance_matrix()\n aux_matrix = tf.linalg.solve(total_covariance_matrix, y_vector)\n mu = tf.transpose(tf.matmul(self.total_c_phi, aux_matrix))\n gaussian_noise = tf.random_normal(shape=[self.dimensionality *\n self.n_points,\n n_batch],\n dtype=tf.float64)\n f_var = self.total_c_phi\\\n - tf.matmul(self.total_c_phi,\n tf.linalg.solve(total_covariance_matrix,\n self.total_c_phi))\n chol_var = tf.cholesky(f_var)\n noise = tf.transpose(tf.matmul(chol_var, gaussian_noise))\n return tf.reshape(mu + noise,\n [n_batch, self.dimensionality, self.n_points])", "def filter_denoise(self, x):\n b, a = self.c_notch\n return filtfilt(b, a, x)", "def fit(self, X, y=None):\n self.pre_filter_ = VarianceThreshold()\n self.filter_ = GenericUnivariateSelect(\n mode=self.filter_method, param=self.alpha\n )\n self.numeric_columns = get_numerical_columns(\n data_frame=X,\n ignore_columns=self.ignore_columns,\n uniqueness_thresshold=self.uniqueness_thresshold,\n )\n\n # Remove zero-variance features.\n subframe = X[self.numeric_columns]\n self.pre_filter_.fit(subframe, y)\n constant_mask = ~self.pre_filter_.get_support(indices=False)\n constant_features = subframe.columns[constant_mask]\n\n # Apply `filter_method` on the remaining columns.\n filtered_subframe = subframe.drop(columns=constant_features)\n self.filter_.fit(filtered_subframe, y)\n filter_mask = ~self.filter_.get_support(indices=False)\n insignificant_features = filtered_subframe.columns[filter_mask]\n\n self.columns_to_remove = list(constant_features) + list(insignificant_features)\n print(\n \"Removing {}/{} numeric columns.\".format(\n len(self.columns_to_remove), len(self.numeric_columns)\n )\n )\n return self", "def drop_and_noise(image, sigma_d, percentage=0.8):\n M, N = image.shape[:2]\n n = N * M\n p = m.floor(percentage * n)\n image = np.cast[np.float32](image)\n\n missing_pixels_ind = np.random.permutation(n)[:p]\n\n mask = np.ones((M * N,), dtype=np.bool)\n mask[missing_pixels_ind] = 0\n mask = mask.reshape((M, N, 1))\n\n maskf = np.cast[np.float32](mask)\n y_clean = image * maskf\n\n noise = np.random.normal(loc=0, scale=sigma_d, size=image.shape) * maskf\n y = y_clean + noise\n\n return y, mask", "def gaussian_filter(stddev, array):\n\n return astropy.convolution.convolve(\n array, astropy.convolution.Gaussian2DKernel(stddev))", "def scipy_smooth(img, sigma=5):\n return ndimage.gaussian_filter(img, sigma=sigma)", "def gaussian_white(z, mu: 'normal' = 0, sigma: (0.4, 1) = 0.7):\n return 1 - gaussian_black(z, mu, sigma)", "def remove_silence(y, threshold=-50, nb_sample=4096): \r\n from scipy.ndimage.filters import maximum_filter1d \r\n \r\n if np.max(y) != 1.0:\r\n raise ValueError(\"Input signal is expected to be normalised to 1\")\r\n \r\n # Ignore log(0) warnings\r\n np.seterr(divide = 'ignore') \r\n y_db = 20 * np.log10(np.abs(y))\r\n np.seterr(divide = 'warn') \r\n \r\n y_envelope = maximum_filter1d(y_db, nb_sample) \r\n mask = y_envelope >= threshold\r\n y_out = y[mask]\r\n \r\n return(y_out)" ]
[ "0.66855353", "0.6581606", "0.63768095", "0.61107254", "0.5810138", "0.5789084", "0.56934977", "0.5500701", "0.5492713", "0.54288906", "0.54254895", "0.53467345", "0.5328296", "0.532445", "0.5312437", "0.5300838", "0.5274596", "0.5271104", "0.5264344", "0.52400446", "0.5230267", "0.5194188", "0.51900446", "0.51821303", "0.51796114", "0.516335", "0.5160422", "0.514822", "0.51433825", "0.51391506" ]
0.7913218
0
Denoise every column in Y, assuming an ICA model and gaussian white noise. The model assumes that y = x + noise where x is generated by an ICA 0mean mixture model.
def ICA_Denoise(Y, ica_model, noise_std): # TODO: YOUR CODE HERE
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def denoise_image(Y, model, denoise_function, noise_std, patch_size=(8, 8)):\n (h, w) = np.shape(Y)\n cropped_h = h - patch_size[0] + 1\n cropped_w = w - patch_size[1] + 1\n middle_linear_index = int(\n ((patch_size[0] / 2) * patch_size[1]) + (patch_size[1] / 2))\n\n # split the image into columns and denoise the columns:\n noisy_patches = im2col(Y, patch_size)\n denoised_patches = denoise_function(noisy_patches, model, noise_std)\n\n # reshape the denoised columns into a picture:\n x_hat = np.reshape(denoised_patches[middle_linear_index, :],\n [cropped_h, cropped_w])\n\n return x_hat", "def GSM_Denoise(Y, gsm_model, noise_std):\n X = np.empty(Y.shape)\n k = gsm_model.mix.shape[0]\n I = np.identity(gsm_model.cov[0, :].shape[0])\n for i in range(k):\n mvn = multivariate_normal(cov=(gsm_model.cov[i, :] + ((noise_std**2) * I)))\n upper_arg = gsm_model.mix[i] * (mvn.logpdf(Y[:, i]))\n lower_arg = 0\n for j in range(k):\n inner_mvn = multivariate_normal(cov=(gsm_model.cov[j] + ((noise_std**2) * I)))\n lower_arg += gsm_model.mix[j] * (inner_mvn.logpdf(Y[:, i]))\n c_i = upper_arg / lower_arg\n weiner_i = calc_weiner_filter(Y, np.zeros(Y.shape[0]), gsm_model.cov[i, :], noise_std)\n X += c_i * weiner_i\n return X", "def MVN_Denoise(Y, mvn_model, noise_std):\n return calc_weiner_filter(Y, mvn_model.mean, mvn_model.cov, noise_std)", "def test_denoising(image, model, denoise_function,\n noise_range=(0.01, 0.05, 0.1, 0.2), patch_size=(8, 8)):\n h, w = np.shape(image)\n noisy_images = np.zeros((h, w, len(noise_range)))\n denoised_images = []\n cropped_original = crop_image(image, patch_size)\n\n # make the image noisy:\n for i in range(len(noise_range)):\n noisy_images[:, :, i] = image + (\n noise_range[i] * np.random.randn(h, w))\n\n # denoise the image:\n for i in range(len(noise_range)):\n denoised_images.append(\n denoise_image(noisy_images[:, :, i], model, denoise_function,\n noise_range[i], patch_size))\n\n # calculate the MSE for each noise range:\n noisy_mses = {}\n denoised_mses = {}\n for i in range(len(noise_range)):\n print(\"noisy MSE for noise = \" + str(noise_range[i]) + \":\")\n noisy_mse = np.mean((crop_image(noisy_images[:, :, i],\n patch_size) - cropped_original) ** 2)\n noisy_mses[str(noise_range[i])] = noisy_mse\n print(noisy_mse)\n print(\"denoised MSE for noise = \" + str(noise_range[i]) + \":\")\n denoised_mse = np.mean((cropped_original - denoised_images[i]) ** 2)\n denoised_mses[str(noise_range[i])] = denoised_mse\n print(denoised_mse)\n\n plt.figure(figsize=(20, 20))\n plt.axis('off')\n for i in range(len(noise_range)):\n plt.subplot(2, len(noise_range), i + 1, xlabel='Noisy image', xticks=[], yticks=[])\n plt.imshow(noisy_images[:, :, i], cmap='gray')\n plt.subplot(2, len(noise_range), i + 1 + len(noise_range), xlabel='Denoised image', xticks=[], yticks=[])\n plt.imshow(denoised_images[i], cmap='gray')\n plt.show()\n return noisy_mses, denoised_mses", "def filter_denoise(self, x):\n b, a = self.c_notch\n return filtfilt(b, a, x)", "def _gaussian_for_learn_denosing_model(image):\n return add_gaussian_noise(image, 0, 0.2)", "def EST_NOISE(images):\n num = images.shape[0]\n m_e_bar = sum(images)/num\n m_sigma = np.sqrt(sum((images - m_e_bar)**2)/(num - 1))\n \n return m_sigma", "def perform_noise_removal(mask):\n trans1 = cv.dilate(mask, KERNEL, iterations=4)\n trans1 = cv.erode(trans1, KERNEL, iterations=5)\n return cv.dilate(trans1, KERNEL, iterations=7)", "def define_noise(self, ctx, model):\n # Only save the mean/cov if we have foregrounds, and they don't update every iteration (otherwise, get them\n # every iter).\n if self.foreground_cores and not any([fg._updating for fg in self.foreground_cores]):\n if not self.use_analytical_noise:\n mean, covariance = self.numerical_covariance(\n nrealisations=self.nrealisations, nthreads=self._nthreads\n )\n else:\n # Still getting mean numerically for now...\n mean = self.numerical_covariance(nrealisations=self.nrealisations, nthreads=self._nthreads)[0]\n\n covariance = self.analytical_covariance(self.u, self.eta,\n np.median(self.frequencies),\n self.frequencies.max() - self.frequencies.min())\n\n thermal_covariance = self.get_thermal_covariance()\n covariance = [x + y for x, y in zip(covariance, thermal_covariance)]\n\n else:\n # Only need thermal variance if we don't have foregrounds, otherwise it will be embedded in the\n # above foreground covariance... BUT NOT IF THE FOREGROUND COVARIANCE IS ANALYTIC!!\n # covariance = self.get_thermal_covariance()\n # mean = np.repeat(self.noise_power_expectation, len(self.eta)).reshape((len(self.u), len(self.eta)))\n mean = 0\n covariance = 0\n\n return [{\"mean\": mean, \"covariance\": covariance}]", "def _denoise(self, img, weight):\n\n from skimage.filters import denoise_tv_chambolle\n\n img = denoise_tv_chambolle(img, weight=weight) * 255\n\n return img.astype(\"uint8\")", "def noiseReduction(self):\n pass", "def transform(self, X, y=None):\n np.random.seed(self.random_state)\n noise = np.random.normal(loc=self.loc, scale=self.scale, size=X.size).reshape(X.shape)\n return np.array(X) + noise", "def remove_noise(self):\n kernel = np.ones((5, 5), np.uint8)\n self.frame = cv.morphologyEx(self.frame, cv.MORPH_CLOSE, kernel)\n self.frame = cv.morphologyEx(self.frame, cv.MORPH_OPEN, kernel)", "def denoise(im,U_init,tolerance=0.1,tau=0.125,tv_weight=100):\n\n m,n = im.shape #size of noisy image\n\n # initialize\n U = U_init\n Px = np.zeros((m, n)) #x-component to the dual field\n Py = np.zeros((m, n)) #y-component of the dual field\n error = 1\n\n while (error > tolerance):\n Uold = U\n\n # gradient of primal variable\n GradUx = np.roll(U,-1,axis=1)-U # x-component of U's gradient\n GradUy = np.roll(U,-1,axis=0)-U # y-component of U's gradient\n\n # update the dual varible\n PxNew = Px + (tau/tv_weight)*GradUx # non-normalized update of x-component (dual)\n PyNew = Py + (tau/tv_weight)*GradUy # non-normalized update of y-component (dual)\n NormNew = np.maximum(1,np.sqrt(PxNew**2+PyNew**2))\n\n Px = PxNew/NormNew # update of x-component (dual)\n Py = PyNew/NormNew # update of y-component (dual)\n\n # update the primal variable\n RxPx = np.roll(Px,1,axis=1) # right x-translation of x-component\n RyPy = np.roll(Py,1,axis=0) # right y-translation of y-component\n\n DivP = (Px-RxPx)+(Py-RyPy) # divergence of the dual field.\n U = im + tv_weight*DivP # update of the primal variable\n\n # update of error\n error = np.linalg.norm(U-Uold)/np.sqrt(n*m);\n\n return U,im-U # denoised image and texture residual", "def denoise(image, U_init, tolerance=0.1, tau=0.125, tv_weight=100):\n m, n = image.shape # size of noisy image\n\n # initialize\n U = U_init\n Px = image # x-component to the dual field\n Py = image # y-component to the dual field\n error = 1.0\n\n while (error > tolerance):\n U_old = U\n\n # gradient of primal variable\n grad_Ux = roll(U, -1, axis=1) - U # x-component of U's gradient\n grad_Uy = roll(U, -1, axis=0) - U # y-component of U's gradient\n\n # update the dual variable\n Px_new = Px + (tau / tv_weight) * grad_Ux\n Py_new = Py + (tau / tv_weight) * grad_Uy\n norm_new = maximum(1, sqrt(Px_new ** 2 + Py_new ** 2))\n\n Px = Px_new / norm_new # update of x-component (dual)\n Py = Py_new / norm_new # update of y-component (dual)\n\n # update the primal variable\n Rx_Px = roll(Px, 1, axis=1) # right x-translation of x-component\n Ry_Py = roll(Py, 1, axis=1) # right y-translation of y-component\n\n DivP = (Px - Rx_Px) + (Py - Ry_Py) # divergence of the dual field\n\n U = image + (tv_weight * DivP) # update of the primal variable\n\n # update of error\n error = linalg.norm(U - U_old) / sqrt(n * m)\n\n return U, (image - U) # denoised image and texture redisual", "def add_noise(Y, sigma):\r\n return Y + np.random.normal(0, sigma, Y.shape)", "def process_noise_cov(self, dt=0.0):\n raise NotImplementedError", "def process_noise_cov(self, dt=0.0):\n raise NotImplementedError", "def process_noise_cov(self, dt=0.0):\n raise NotImplementedError", "def denoise_image(image,dest):\n beta = 1e-3\n eta = 2e-4\n argh = 0.0\n kmax = 10\n data = sign(image.getdata(), {0: -1, 255: 1}) # convert to {-1, 1}\n E, localized_E = E_generator(beta, eta, argh)\n temp_dir = os.path.dirname(os.path.realpath(os.path.join('submits', 'img', dest)))\n y = data.reshape(image.size[::-1]) # convert 1-d array to matrix\n result = simulated_annealing(\n y, kmax, E, localized_E, temp_dir)\n result = sign(result, {-1: 0, 1: 255})\n output_image = Image.fromarray(result).convert('1', dither=Image.NONE)\n return output_image", "def run_denoising(time_series, hpf_before_regression, inds_to_include, interpolation_method,\n noise_comps, clean_comps, high_pass, low_pass, n_skip_vols, TR, filter_order = 6):\n\n\n initial_dvars = dvars(time_series, np.linspace(0,n_skip_vols - 1,n_skip_vols,dtype=int))\n\n #Load the arrays with the data for both the clean and noise components to be used in regression\n clean_comps_pre_filter = clean_comps\n noise_comps_pre_filter = noise_comps\n\n #Apply an initial HPF to everything if necessary - this does not remove scrubbed timepoints,\n #but does skips the first n_skip_vols (which will be set to 0 and not used in subsequent steps)\n if hpf_before_regression != False:\n\n b, a = imaging_utils.construct_filter('highpass', [hpf_before_regression], TR, filter_order)\n\n #start with the clean comps matrix\n if type(clean_comps_pre_filter) != type(False):\n\n clean_comps_post_filter = np.zeros(clean_comps_pre_filter.shape)\n for clean_dim in range(clean_comps_pre_filter.shape[0]):\n\n clean_comps_post_filter[clean_dim, n_skip_vols:] = imaging_utils.apply_filter(b, a, clean_comps_pre_filter[clean_dim, n_skip_vols:])\n\n #this option for both clean/noise indicates there is no input matrix to filter\n else:\n\n clean_comps_post_filter = False\n\n #Move to the noise comps matrix\n if type(noise_comps_pre_filter) != type(False):\n\n noise_comps_post_filter = np.zeros(noise_comps_pre_filter.shape)\n for noise_dim in range(noise_comps_pre_filter.shape[0]):\n\n noise_comps_post_filter[noise_dim, n_skip_vols:] = imaging_utils.apply_filter(b, a, noise_comps_pre_filter[noise_dim, n_skip_vols:])\n\n else:\n\n noise_comps_post_filter = False\n\n #then filter the original time signal\n filtered_time_series = np.zeros(time_series.shape)\n for original_ts_dim in range(time_series.shape[0]):\n\n filtered_time_series[original_ts_dim, n_skip_vols:] = imaging_utils.apply_filter(b, a, time_series[original_ts_dim, n_skip_vols:])\n\n #If you don't want to apply the initial HPF, then\n #just make a copy of the matrices of interest\n else:\n\n clean_comps_post_filter = clean_comps_pre_filter\n noise_comps_post_filter = noise_comps_pre_filter\n filtered_time_series = time_series\n\n\n\n\n #Now create the nuisance regression model. Only do this step if\n #the noise_comps_post_filter isn't false.\n good_timepoint_inds = np.where(inds_to_include == True)[0]\n bad_timepoint_inds = np.where(inds_to_include == False)[0]\n\n if type(noise_comps_post_filter) == type(False):\n\n regressed_time_signal = filtered_time_series\n original_std = None\n\n else:\n\n\n #Calculate the standard deviation of the signal before nuisance regression\n original_std = np.std(filtered_time_series[:,good_timepoint_inds], axis=1)\n\n #Weird thing where I need to swap dimensions here...(implemented correctly)\n\n #First add constant/linear trend to the denoising model\n constant = np.ones((1,filtered_time_series.shape[1]))\n linear_trend = np.linspace(0,filtered_time_series.shape[1],num=filtered_time_series.shape[1])\n linear_trend = np.reshape(linear_trend, (1,filtered_time_series.shape[1]))[0]\n noise_comps_post_filter = np.vstack((constant, linear_trend, noise_comps_post_filter))\n\n regressed_time_signal = np.zeros(filtered_time_series.shape).transpose()\n filtered_time_series_T = filtered_time_series.transpose()\n\n #If there aren't any clean components,\n #do a \"hard\" or \"agressive\" denosing\n if type(clean_comps_post_filter) == type(False):\n\n noise_comps_post_filter_T_to_be_used = noise_comps_post_filter[:,good_timepoint_inds].transpose()\n XT_X_Neg1_XT = np.linalg.pinv(noise_comps_post_filter_T_to_be_used)\n\n for temp_time_signal_dim in range(filtered_time_series.shape[0]):\n regressed_time_signal[good_timepoint_inds,temp_time_signal_dim] = imaging_utils.partial_clean_fast(filtered_time_series_T[good_timepoint_inds,temp_time_signal_dim], XT_X_Neg1_XT, noise_comps_post_filter_T_to_be_used)\n\n\n\n #If there are clean components, then\n #do a \"soft\" denoising\n else:\n\n full_matrix_to_be_used = np.vstack((noise_comps_post_filter, clean_comps_post_filter))[:,good_timepoint_inds].transpose()\n noise_comps_post_filter_T_to_be_used = noise_comps_post_filter[:,good_timepoint_inds].transpose()\n XT_X_Neg1_XT = np.linalg.pinv(full_matrix_to_be_used)\n\n for temp_time_signal_dim in range(filtered_time_series.shape[0]):\n regressed_time_signal[good_timepoint_inds,temp_time_signal_dim] = imaging_utils.partial_clean_fast(filtered_time_series_T[good_timepoint_inds,temp_time_signal_dim], XT_X_Neg1_XT, noise_comps_post_filter_T_to_be_used)\n\n\n #Put back into original dimensions\n regressed_time_signal = regressed_time_signal.transpose()\n\n #Calculate the standard deviation of the signal after the nuisance regression\n post_regression_std = np.std(regressed_time_signal[:,good_timepoint_inds], axis=1)\n\n\n #Now apply interpolation\n interpolated_time_signal = np.zeros(regressed_time_signal.shape)\n\n if interpolation_method == 'spectral':\n\n interpolated_time_signal = spectral_interpolation_fast(inds_to_include, regressed_time_signal, TR)\n\n else:\n for dim in range(regressed_time_signal.shape[0]):\n interpolated_time_signal[dim,:] = interpolate(inds_to_include, regressed_time_signal[dim,:], interpolation_method, TR)\n\n #Now if necessary, apply additional filterign:\n if high_pass == False and low_pass == False:\n\n filtered_time_signal = interpolated_time_signal\n\n else:\n\n if high_pass != False and low_pass == False:\n\n b, a = imaging_utils.construct_filter('highpass', [high_pass], TR, filter_order)\n\n elif high_pass == False and low_pass != False:\n\n b, a = imaging_utils.construct_filter('lowpass', [low_pass], TR, filter_order)\n\n elif high_pass != False and low_pass != False:\n\n b, a = imaging_utils.construct_filter('bandpass', [high_pass, low_pass], TR, filter_order)\n\n filtered_time_signal = np.zeros(regressed_time_signal.shape)\n for dim in range(regressed_time_signal.shape[0]):\n\n filtered_time_signal[dim,:] = imaging_utils.apply_filter(b,a,regressed_time_signal[dim,:])\n\n final_dvars = dvars(filtered_time_signal, bad_timepoint_inds)\n\n #Now set all the undefined timepoints to Nan\n cleaned_time_signal = filtered_time_signal\n cleaned_time_signal[:,bad_timepoint_inds] = np.nan\n\n output_dict = {}\n denoising_stats = {}\n\n output_dict['cleaned_timeseries'] = cleaned_time_signal\n\n denoising_stats['dvars_pre_cleaning'] = initial_dvars\n denoising_stats['dvars_post_cleaning'] = final_dvars\n\n dvars_stats = {}\n dvars_stats['mean_dvars_pre_cleaning'] = np.mean(initial_dvars[(initial_dvars > 0)])\n dvars_stats['mean_dvars_post_cleaning'] = np.mean(final_dvars[(final_dvars > 0)])\n dvars_stats['max_dvars_pre_cleaning'] = np.max(initial_dvars)\n dvars_stats['max_dvars_post_cleaning'] = np.max(final_dvars)\n dvars_stats['dvars_remaining_ratio'] = np.mean(final_dvars[(final_dvars > 0)])/np.mean(initial_dvars[(initial_dvars > 0)])\n dvars_stats['def'] = 'DVARS calculated before any denoising steps (or filtering), and also after.\\nBad timepoints not included in any stats.'\n denoising_stats['dvars_stats.json'] = dvars_stats\n\n\n if type(original_std) != type(None):\n\n output_dict['std_before_regression'] = original_std\n output_dict['std_after_regression'] = post_regression_std\n\n output_dict['denoising_stats'] = denoising_stats\n\n\n\n return output_dict", "def make_noisy_images(image):\r\n return apply_poisson_noise(image, random_state=12345)", "def fit(self, X, y=None):\n self.pre_filter_ = VarianceThreshold()\n self.filter_ = GenericUnivariateSelect(\n mode=self.filter_method, param=self.alpha\n )\n self.numeric_columns = get_numerical_columns(\n data_frame=X,\n ignore_columns=self.ignore_columns,\n uniqueness_thresshold=self.uniqueness_thresshold,\n )\n\n # Remove zero-variance features.\n subframe = X[self.numeric_columns]\n self.pre_filter_.fit(subframe, y)\n constant_mask = ~self.pre_filter_.get_support(indices=False)\n constant_features = subframe.columns[constant_mask]\n\n # Apply `filter_method` on the remaining columns.\n filtered_subframe = subframe.drop(columns=constant_features)\n self.filter_.fit(filtered_subframe, y)\n filter_mask = ~self.filter_.get_support(indices=False)\n insignificant_features = filtered_subframe.columns[filter_mask]\n\n self.columns_to_remove = list(constant_features) + list(insignificant_features)\n print(\n \"Removing {}/{} numeric columns.\".format(\n len(self.columns_to_remove), len(self.numeric_columns)\n )\n )\n return self", "def blur_ground(X):\n return img_conv(X, kernel_blur)", "def zca_whiten(X):\n cov = np.cov(X.T)\n U, Sigma, V = np.linalg.svd(cov)\n D = np.diag(np.sqrt(1/Sigma)) # square root inverse of singular value matrix\n W = U @ D @ V # rotation matrix\n centered = X - X.mean(axis=0)\n X_white = np.einsum(\"ij, ...j -> ...i\", W, centered)\n return X_white", "def filter(self):\n self.filter_means = [self.m_0]\n self.filter_covs = [self.P_0]\n self.marginal_covs = []\n for t in range(self.data.shape[0]):\n m_bar, P_bar = self.one_step_prediction(self.filter_means[-1], self.filter_covs[-1])\n\n # Update step\n y = self.data[t]\n if not np.isnan(y).any():\n v = y[:, None] - self.observation_matrix @ m_bar\n S = self.observation_matrix @ P_bar @ self.observation_matrix.T + self.observation_cov\n K = P_bar @ self.observation_matrix.T @ np.linalg.inv(S)\n\n m_bar = m_bar + K @ v\n P_bar = P_bar - K @ S @ K.T\n\n self.marginal_covs.append(S)\n\n self.filter_means.append(m_bar)\n self.filter_covs.append(P_bar)\n self.filter_means = self.filter_means[1:]\n self.filter_covs = self.filter_covs[1:]", "def remove_noise(image):\n filtered = cv2.absdiff(image.astype(np.uint8), 255,\n cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 9, 41)\n kernel = np.ones((1, 1), np.uint8)\n opening = cv2.morphologyEx(filtered, cv2.MORPH_OPEN, kernel)\n closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel)\n\n img = image_smoothening(image)\n transform = cv2.bitwise_or(img, closing)\n return transform", "def kalman_filter(x, x_cov, u, u_cov, Z, z_cov):\n for i in range(len(Z)):\n x, x_cov = predict_new_belief(x, x_cov, u, u_cov)\n x, x_cov = incorporate_measurement(x, x_cov, Z[i], z_cov)\n\n return x, x_cov", "def remove_noise(ground_dem_path, out_path, ignore_value=-99999.0):\n ground_np = np.array(gdal.Open(ground_dem_path).ReadAsArray())\n std = ground_np[ground_np != ignore_value].std()\n mean = ground_np[ground_np != ignore_value].mean()\n threshold_value = mean + 1.5 * std\n ground_np[ground_np >= threshold_value] = -99999.0\n save_array_as_geotif(ground_np, ground_dem_path, out_path)", "def noise_filter(image, selem, out=None, mask=None, shift_x=False,\n shift_y=False):\n\n # ensure that the central pixel in the structuring element is empty\n centre_r = int(selem.shape[0] / 2) + shift_y\n centre_c = int(selem.shape[1] / 2) + shift_x\n # make a local copy\n selem_cpy = selem.copy()\n selem_cpy[centre_r, centre_c] = 0\n\n return _apply(_crank8.noise_filter, None, image, selem_cpy, out=out,\n mask=mask, shift_x=shift_x, shift_y=shift_y)" ]
[ "0.69057566", "0.68267506", "0.64559305", "0.5843955", "0.5648864", "0.55616444", "0.54743546", "0.5460113", "0.5383557", "0.5348004", "0.5341139", "0.53100145", "0.5273911", "0.5260276", "0.5248578", "0.52413625", "0.5236249", "0.5236249", "0.5236249", "0.5203331", "0.5165584", "0.5138767", "0.51355124", "0.51339257", "0.51288724", "0.5106439", "0.5104961", "0.50978106", "0.5080637", "0.5078648" ]
0.78871566
0
Operates on literal level. Works the same as uniform_crossover, except that constraints are not matched on their (meaningless) index in the individual, but based on their syntactic similarity. In a first step, each constraint in each individual is matched with the most syntactically similar (based on Hamming distance) unmatched constraint from the other individual. Then, for each pair of matched constraints, each variable's occurrence (positive, negative, absent) is swapped between the two constraints (inplace) with probability 0.5.
def matched_uniform_crossover(ind1, ind2): # We calculate the pairwise match between ind1's and ind2's clauses match_matrix = np.zeros((len(ind1), len(ind2))) for i in range(len(ind1)): clause1 = ind1[i] for j in range(len(ind2)): clause2 = ind2[j] curr_syntactic_match = 0 for k in range(len(clause1)): if k != len(clause1) - 1: if clause1[k] == clause2[k]: curr_syntactic_match += 1 else: curr_syntactic_match += (1 - abs(clause1[k] - clause2[k])) match_matrix[i][j] = curr_syntactic_match # Arg-sort the pairwise clause matches from best to worst match matches_ordered = np.dstack(np.unravel_index(np.argsort(match_matrix.ravel())[::-1], (len(ind1), len(ind2))))[0] # Finally match the clauses, making sure that each clause is only matched once # Then perform uniform crossover on matched clauses ind1_matched_clauses = set() ind2_matched_clauses = set() count = 0 for match in matches_ordered: i = match[0] j = match[1] if match_matrix[i][j] >= len(ind1[0])//2: if i not in ind1_matched_clauses and j not in ind2_matched_clauses: count += 1 # Perform the uniform crossover for k in range(len(ind1[i])): if random.random() < 0.5: temp = ind1[i][k] ind1[i][k] = ind2[j][k] ind2[j][k] = temp ind1_matched_clauses.add(i) ind2_matched_clauses.add(j)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def uniform_clause_crossover(ind1, ind2):\n k = len(ind1)\n for i in range(k):\n if random.random() < 0.5:\n temp = ind1[i]\n ind1[i] = ind2[i]\n ind2[i] = temp", "def scramble_clause_crossover(ind1, ind2):\n all_clauses = ind1 + ind2\n random.shuffle(all_clauses)\n ind1[0:len(ind1)] = all_clauses[0:len(ind1)]\n ind2[0:len(ind2)] = all_clauses[len(ind1):len(ind1) + len(ind2)]", "def smart_clause_crossover_infeasibility(ind1, ind2, examples, greedy=True, probability_variant=None, temperature=1, clause_bitvector_cache=None):\n allow_duplicates = False # allow_duplicates denotes whether the resulting indivuals may contain duplicate clauses\n ind1_hard_constraints = [constr for constr in ind1 if constr[-2] == True]\n ind2_hard_constraints = [constr for constr in ind2 if constr[-2] == True]\n all_hard_constraints = ind1_hard_constraints + ind2_hard_constraints\n ind1_soft_constraints = [constr for constr in ind1 if constr[-2] == False]\n ind2_soft_constraints = [constr for constr in ind2 if constr[-2] == False]\n all_soft_constraints = ind1_soft_constraints + ind2_soft_constraints\n ind1_hard_coverage_bitvectors = compute_clause_coverage_bitvectors(ind1_hard_constraints, examples, use_infeasibility=True, clause_bitvector_cache=clause_bitvector_cache)\n ind2_hard_coverage_bitvectors = compute_clause_coverage_bitvectors(ind2_hard_constraints, examples, use_infeasibility=True, clause_bitvector_cache=clause_bitvector_cache)\n ind1_soft_coverage_bitvectors = compute_clause_coverage_bitvectors(ind1_soft_constraints, examples, use_infeasibility=True, clause_bitvector_cache=clause_bitvector_cache)\n ind2_soft_coverage_bitvectors = compute_clause_coverage_bitvectors(ind2_soft_constraints, examples, use_infeasibility=True, clause_bitvector_cache=clause_bitvector_cache)\n all_hard_coverage_bitvectors = ind1_hard_coverage_bitvectors + ind2_hard_coverage_bitvectors\n all_soft_coverage_bitvectors = ind1_soft_coverage_bitvectors + ind2_soft_coverage_bitvectors\n\n ind1_num_hard = len([constr for constr in ind1 if constr[-2] == True])\n ind2_num_hard = len([constr for constr in ind2 if constr[-2] == True])\n # num_hard = random.choice([ind1_num_hard, ind2_num_hard])\n if ind1_num_hard <= ind2_num_hard:\n num_hard = random.choice(list(range(ind1_num_hard, ind2_num_hard+1)))\n else:\n num_hard = random.choice(list(range(ind2_num_hard, ind1_num_hard + 1)))\n num_soft = len(ind1) - num_hard\n chosen_hard_clauses = []\n chosen_hard_clause_indices = []\n chosen_soft_clauses = []\n chosen_soft_clause_indices = []\n\n # Choose hard constraints\n for i in range(0, num_hard):\n if i == 0:\n combined_hard_coverage_bitvectors = all_hard_coverage_bitvectors\n else:\n combined_hard_coverage_bitvectors = [combine_coverage_bitvectors_hard_constraints(\n chosen_hard_clauses_bitvector, bitvector, examples) for bitvector in all_hard_coverage_bitvectors]\n if not allow_duplicates:\n for index in chosen_hard_clause_indices:\n for j in range(len(combined_hard_coverage_bitvectors)):\n if all_hard_constraints[index][:-2] == all_hard_constraints[j][:-2]:\n combined_hard_coverage_bitvectors[j] = [0] * len(examples)\n if greedy:\n combined_hard_coverages = [sum(coverage_bitvector) for coverage_bitvector in combined_hard_coverage_bitvectors]\n best_hard_coverage = max(combined_hard_coverages)\n best_hard_indices = [i for i in range(len(combined_hard_coverages)) if combined_hard_coverages[i] == best_hard_coverage]\n chosen_hard_clause_index = random.choice(best_hard_indices)\n else:\n coverages = [sum(x) for x in combined_hard_coverage_bitvectors]\n if probability_variant == \"linear\":\n sum_coverages = sum(coverages)\n coverages_to_probabilities = [x / sum_coverages for x in coverages]\n elif probability_variant == \"squared\":\n coverages_squared = [x ** 2 for x in coverages]\n sum_coverages_squared = sum(coverages_squared)\n coverages_to_probabilities = [x ** 2 / sum_coverages_squared for x in coverages]\n elif probability_variant == \"softmax\":\n # Softmax with normalization to prevent overflow\n coverages_max = max(coverages)\n coverages_for_softmax = [a_coverage - coverages_max for a_coverage in coverages]\n coverages_to_probabilities = np.exp(np.asarray(coverages_for_softmax) / temperature) / sum(\n np.exp(np.asarray(coverages_for_softmax) / temperature))\n\n chosen_hard_clause_index = np.random.choice(list(range(0, len(all_hard_coverage_bitvectors))),\n p=coverages_to_probabilities)\n chosen_hard_coverage_bitvector = combined_hard_coverage_bitvectors[chosen_hard_clause_index]\n if chosen_hard_clause_index < len(ind1_hard_constraints):\n chosen_hard_clause = ind1_hard_constraints[chosen_hard_clause_index]\n else:\n chosen_hard_clause = ind2_hard_constraints[chosen_hard_clause_index - len(ind1_hard_constraints)]\n\n chosen_hard_clauses.append(chosen_hard_clause)\n chosen_hard_clause_indices.append(chosen_hard_clause_index)\n chosen_hard_clauses_bitvector = chosen_hard_coverage_bitvector\n\n # Choose soft constraints\n for i in range(0, num_soft):\n if i == 0:\n combined_soft_coverage_bitvectors = all_soft_coverage_bitvectors\n else:\n combined_soft_coverage_bitvectors = [combine_coverage_bitvectors_soft_constraints(\n chosen_soft_clauses_bitvector, bitvector, examples) for bitvector in all_soft_coverage_bitvectors]\n if not allow_duplicates:\n for index in chosen_soft_clause_indices:\n for j in range(len(combined_soft_coverage_bitvectors)):\n if all_soft_constraints[index][:-2] == all_soft_constraints[j][:-2]:\n combined_soft_coverage_bitvectors[j] = [0] * len(examples)\n if greedy:\n combined_soft_coverages = [sum(coverage_bitvector) for coverage_bitvector in combined_soft_coverage_bitvectors]\n best_soft_coverage = max(combined_soft_coverages)\n best_soft_indices = [i for i in range(len(combined_soft_coverages)) if combined_soft_coverages[i] == best_soft_coverage]\n chosen_soft_clause_index = random.choice(best_soft_indices)\n else:\n coverages = [sum(x) for x in combined_soft_coverage_bitvectors]\n if probability_variant == \"linear\":\n sum_coverages = sum(coverages)\n coverages_to_probabilities = [x / sum_coverages for x in coverages]\n elif probability_variant == \"squared\":\n coverages_squared = [x ** 2 for x in coverages]\n sum_coverages_squared = sum(coverages_squared)\n coverages_to_probabilities = [x ** 2 / sum_coverages_squared for x in coverages]\n elif probability_variant == \"softmax\":\n # Softmax with normalization to prevent overflow\n coverages_max = max(coverages)\n coverages_for_softmax = [a_coverage - coverages_max for a_coverage in coverages]\n coverages_to_probabilities = np.exp(np.asarray(coverages_for_softmax) / temperature) / sum(\n np.exp(np.asarray(coverages_for_softmax) / temperature))\n\n chosen_soft_clause_index = np.random.choice(list(range(0, len(all_soft_coverage_bitvectors))),\n p=coverages_to_probabilities)\n chosen_soft_coverage_bitvector = combined_soft_coverage_bitvectors[chosen_soft_clause_index]\n if chosen_soft_clause_index < len(ind1_soft_constraints):\n chosen_soft_clause = ind1_soft_constraints[chosen_soft_clause_index]\n else:\n chosen_soft_clause = ind2_soft_constraints[chosen_soft_clause_index - len(ind1_soft_constraints)]\n\n chosen_soft_clauses.append(chosen_soft_clause)\n chosen_soft_clause_indices.append(chosen_soft_clause_index)\n chosen_soft_clauses_bitvector = chosen_soft_coverage_bitvector\n\n for i in range(len(chosen_hard_clauses)):\n hard_clause = chosen_hard_clauses[i]\n # We can safely set ind1 and ind2 to the same computed smart combination, as only one of them will make it\n # to the next generation\n ind1[i] = hard_clause\n ind2[i] = hard_clause\n\n for i in range(len(chosen_soft_clauses)):\n soft_clause = chosen_soft_clauses[i]\n ind1[num_hard+i] = soft_clause\n ind2[num_hard+i] = soft_clause", "def avoid_duplicate_clauses_scramble_clause_crossover(ind1, ind2):\n ind_length = len(ind1)\n ind1_copy = copy.deepcopy(ind1)\n ind2_copy = copy.deepcopy(ind2)\n\n clauses_both_have = []\n remaining_clauses = []\n for clause in ind1:\n try:\n index = ind2_copy.index(clause)\n clauses_both_have.append(clause)\n del ind2_copy[index]\n except ValueError:\n remaining_clauses.append(clause)\n\n for clause in ind2:\n try:\n index = ind1_copy.index(clause)\n del ind1_copy[index]\n except ValueError:\n remaining_clauses.append(clause)\n\n random.shuffle(remaining_clauses)\n ind1[0:len(clauses_both_have)] = clauses_both_have\n ind2[0:len(clauses_both_have)] = clauses_both_have\n ind1[len(clauses_both_have):] = remaining_clauses[:len(remaining_clauses) // 2]\n ind2[len(clauses_both_have):] = remaining_clauses[len(remaining_clauses) // 2:]\n if len(ind1) != ind_length or len(ind2) != ind_length:\n raise Exception(\"Crossover operator altered the length of an individual\")", "def uniformCrossover(self, cl):\n if cons.env.format_data.discrete_action: #Always crossover condition if the phenotype is discrete (if continuous phenotype, half the time phenotype crossover is performed instead)\n self_specified_atts = copy.deepcopy(self.specified_attributes)\n cl_specified_atts = copy.deepcopy(cl.specified_attributes)\n probability = 0.5 #Equal probability for attribute alleles to be exchanged.\n\n #Make list of attribute references appearing in at least one of the parents.-----------------------------\n combined_atts = []\n for i in self_specified_atts:\n combined_atts.append(i)\n for i in cl_specified_atts:\n if i not in combined_atts:\n combined_atts.append(i)\n elif not cons.env.format_data.attribute_info[i][0]: #Attribute specified in both parents, and the attribute is discrete (then no reason to cross over)\n combined_atts.remove(i)\n combined_atts.sort()\n #--------------------------------------------------------------------------------------------------------\n changed = False;\n for att in combined_atts: #Each condition specifies different attributes, so we need to go through all attributes in the dataset.\n att_info = cons.env.format_data.attribute_info[att]\n #-----------------------------\n ref = 0\n #if att in self.specified_attributes:\n if att in self_specified_atts:\n ref += 1\n #if att in cl.specified_attributes:\n if att in cl_specified_atts:\n ref += 1\n #-----------------------------\n\n if ref == 0: #Attribute not specified in either condition (Attribute type makes no difference)\n print(\"Error: UniformCrossover!\")\n pass\n\n elif ref == 1: #Attribute specified in only one condition - do probabilistic switch of whole attribute state (Attribute type makes no difference)\n if att in self_specified_atts and random.random() > probability:\n i = self.specified_attributes.index(att) #reference to the position of the attribute in the rule representation\n cl.condition.append(self.condition.pop(i)) #Take attribute from self and add to cl\n cl.specified_attributes.append(att)\n self.specified_attributes.remove(att)\n changed = True #Remove att from self and add to cl\n\n\n if att in cl_specified_atts and random.random() < probability:\n i = cl.specified_attributes.index(att) #reference to the position of the attribute in the rule representation\n self.condition.append(cl.condition.pop(i)) #Take attribute from self and add to cl\n self.specified_attributes.append(att)\n cl.specified_attributes.remove(att)\n changed = True #Remove att from cl and add to self.\n\n\n else: #Attribute specified in both conditions - do random crossover between state alleles. The same attribute may be specified at different positions within either classifier\n #-------------------------------------------------------\n # CONTINUOUS ATTRIBUTE\n #-------------------------------------------------------\n if att_info[0]:\n i_cl1 = self.specified_attributes.index(att) #pairs with self (classifier 1)\n i_cl2 = cl.specified_attributes.index(att) #pairs with cl (classifier 2)\n tmp_key = random.randint(0,3) #Make random choice between 4 scenarios, Swap minimums, Swap maximums, Self absorbs cl, or cl absorbs self.\n if tmp_key == 0: #Swap minimum\n temp = self.condition[i_cl1][0]\n self.condition[i_cl1][0] = cl.condition[i_cl2][0]\n cl.condition[i_cl2][0] = temp\n elif tmp_key == 1: #Swap maximum\n temp = self.condition[i_cl1][1]\n self.condition[i_cl1][1] = cl.condition[i_cl2][1]\n cl.condition[i_cl2][1] = temp\n else: #absorb range\n all_list = self.condition[i_cl1] + cl.condition[i_cl2]\n new_min = min(all_list)\n new_max = max(all_list)\n if tmp_key == 2: #self absorbs cl\n self.condition[i_cl1] = [new_min,new_max]\n #Remove cl\n cl.condition.pop(i_cl2)\n cl.specified_attributes.remove(att)\n else: #cl absorbs self\n cl.condition[i_cl2] = [new_min,new_max]\n #Remove self\n self.condition.pop(i_cl1)\n self.specified_attributes.remove(att)\n #-------------------------------------------------------\n # DISCRETE ATTRIBUTE\n #-------------------------------------------------------\n else:\n pass\n tmp_list1 = copy.deepcopy(self_specified_atts)\n tmp_list2 = copy.deepcopy(cl.specified_attributes)\n tmp_list1.sort()\n tmp_list2.sort()\n if changed and (tmp_list1 == tmp_list2):\n changed = False\n\n if self.action != cl.action and random.random() > probability:\n # Switch phenotypes of 2 classifiers if GA is run in match set\n temp = self.action\n self.action = cl.action\n cl.action = temp\n changed = True\n return changed\n #-------------------------------------------------------\n # CONTINUOUS PHENOTYPE CROSSOVER\n #-------------------------------------------------------\n elif random.random() < 0.5:\n return self.actionCrossover(cl)", "def clause_crossover_1x(ind1, ind2):\n k = len(ind1)\n cx_point = random.randint(1, k - 1)\n temp = ind1[cx_point:]\n ind1[cx_point:] = ind2[cx_point:]\n ind2[cx_point:] = temp", "def test_redundant_vlans(self):\n PUSH, POP = ('PUSH_VLAN', 0x8100), ('POP_VLAN', None)\n VID1, VID2 = ('SET_FIELD', ('VLAN_VID', 1)), ('SET_FIELD', ('VLAN_VID', 2))\n PCP5, OUT = ('SET_FIELD', ('VLAN_PCP', 5)), ('OUTPUT', 1)\n IP = ('SET_FIELD', ('IPV4_SRC', 1))\n MATCH = Match([('IPV4_DST', 1, None)])\n\n # Base case\n n1 = normalise([\n Rule(priority=10, match=MATCH,\n instructions=inst_from_acts([VID1, IP, OUT])),\n Rule(priority=0)\n ])\n # Check that VLAN set fields between PUSH and POP are removed, but not others\n n2 = normalise([\n Rule(priority=10, match=MATCH,\n instructions=inst_from_acts([VID2, PUSH, IP, PCP5, POP, VID1, OUT])),\n Rule(priority=0)\n ])\n self.assertTrue(check_equal(n1, n2))\n\n # Check with multiple set fields between\n n3 = normalise([\n Rule(priority=10, match=MATCH,\n instructions=inst_from_acts([IP, PUSH, VID2, VID2, POP, VID1, OUT])),\n Rule(priority=0)\n ])\n self.assertTrue(check_equal(n1, n3))\n\n # Check with nested push pop operations\n n4 = normalise([\n Rule(priority=10, match=MATCH,\n instructions=inst_from_acts([IP, PUSH, PUSH, VID2, POP, VID2, POP, VID1, OUT])),\n Rule(priority=0)\n ])\n self.assertTrue(check_equal(n1, n4))\n\n # Check we don't remove a pop, push case!!\n # A very strict set of conditions would be needed to remove this:\n # 1). All fields are set after the push, that is VID and PCP,\n # and we keep the set fields, (or)\n # - All fields are set back to their original value based on the match\n # and this becomes a nop\n # 2). We can be sure that the push'd VLAN ethertype is the same\n # as the original VLAN ethertype\n # For now we don't account for this case as it is too complex\n n5 = normalise([\n Rule(priority=10, match=MATCH,\n instructions=inst_from_acts([IP, POP, PUSH, VID2, OUT])),\n Rule(priority=0)\n ])\n\n self.assertFalse(check_equal(n1, n5))\n\n # We should remove any set fields before the POP\n n6 = normalise([\n Rule(priority=10, match=MATCH,\n instructions=inst_from_acts([IP, VID1, PCP5, POP, PUSH, VID2, OUT])),\n Rule(priority=0)\n ])\n self.assertTrue(check_equal(n5, n6))\n\n # Check that a double tagging is correct and doesn't remove set_fields\n # push_vlan, vid: 1, push_vlan, vid: 2\n n7 = normalise([\n Rule(priority=10, match=MATCH,\n instructions=inst_from_acts([PUSH, VID1, PUSH, VID2, OUT])),\n Rule(priority=0)\n ])\n\n # != push_vlan, push_vlan, vid:2\n n8 = normalise([\n Rule(priority=10, match=MATCH,\n instructions=inst_from_acts([PUSH, PUSH, VID2, OUT])),\n Rule(priority=0)\n ])\n self.assertFalse(check_equal(n7, n8))", "def smart_clause_crossover(ind1, ind2, examples, greedy=True, probability_variant=None, temperature=1, clause_bitvector_cache=None):\n allow_duplicates = False # allow_duplicates denotes whether the resulting indivuals may contain duplicate clauses\n number_of_clauses = len(ind1)\n all_clauses = ind1+ind2\n chosen_clauses = []\n chosen_clause_indices = []\n ind1_coverage_bitvectors = compute_clause_coverage_bitvectors(ind1, examples, clause_bitvector_cache=clause_bitvector_cache)\n ind2_coverage_bitvectors = compute_clause_coverage_bitvectors(ind2, examples, clause_bitvector_cache=clause_bitvector_cache)\n all_coverage_bitvectors = ind1_coverage_bitvectors + ind2_coverage_bitvectors\n\n for i in range(0, number_of_clauses):\n if i == 0:\n combined_coverage_bitvectors = all_coverage_bitvectors\n else:\n combined_coverage_bitvectors = [combine_coverage_bitvectors(chosen_clauses_bitvector, bitvector, examples)\n for bitvector in all_coverage_bitvectors]\n if not allow_duplicates:\n for index in chosen_clause_indices:\n for j in range(len(combined_coverage_bitvectors)):\n if all_clauses[index] == all_clauses[j]:\n combined_coverage_bitvectors[j] = [0] * len(examples)\n combined_coverages = [sum(coverage_bitvector) for coverage_bitvector in combined_coverage_bitvectors]\n if greedy:\n best_coverage = max(combined_coverages)\n best_indices = [i for i in range(len(combined_coverages)) if combined_coverages[i] == best_coverage]\n chosen_clause_index = random.choice(best_indices)\n else:\n if probability_variant == \"linear\":\n sum_coverages = sum(combined_coverages)\n coverages_to_probabilities = [x / sum_coverages for x in combined_coverages]\n elif probability_variant == \"squared\":\n coverages_squared = [x ** 2 for x in combined_coverages]\n sum_coverages_squared = sum(coverages_squared)\n coverages_to_probabilities = [x ** 2 / sum_coverages_squared for x in combined_coverages]\n elif probability_variant == \"softmax\":\n # Softmax with normalization to prevent overflow\n coverages_max = max(combined_coverages)\n coverages_for_softmax = [a_coverage - coverages_max for a_coverage in combined_coverages]\n coverages_to_probabilities = np.exp(np.asarray(coverages_for_softmax) / temperature) / sum(\n np.exp(np.asarray(coverages_for_softmax) / temperature))\n\n chosen_clause_index = np.random.choice(list(range(0, len(all_coverage_bitvectors))),\n p=coverages_to_probabilities)\n chosen_coverage_bitvector = combined_coverage_bitvectors[chosen_clause_index]\n if chosen_clause_index < number_of_clauses:\n chosen_clause = ind1[chosen_clause_index]\n else:\n chosen_clause = ind2[chosen_clause_index - number_of_clauses]\n\n chosen_clauses.append(chosen_clause)\n chosen_clause_indices.append(chosen_clause_index)\n chosen_clauses_bitvector = chosen_coverage_bitvector\n\n for i in range(len(chosen_clauses)):\n clause = chosen_clauses[i]\n # We can safely set ind1 and ind2 to the same computed smart combination, as only one of them will make it\n # to the next generation\n ind1[i] = clause\n ind2[i] = clause", "def uniform_crossover(ind1, ind2):\n k = len(ind1)\n for i in range(k):\n for j in range(len(ind1[i])):\n if random.random() < 0.5:\n temp = ind1[i][j]\n ind1[i][j] = ind2[i][j]\n ind2[i][j] = temp", "def test_tensor_composite_constraints_equal_penalties():\n from pygam.penalties import derivative\n\n def der1(*args, **kwargs):\n kwargs.update({'derivative':1})\n return derivative(*args, **kwargs)\n\n # create a 3D tensor where the penalty should be equal to the constraint\n term = te(0, 1, 2,\n n_splines=[4, 5, 6],\n penalties=der1,\n lam=1,\n constraints='monotonic_inc')\n\n # check all the dimensions\n for i in range(3):\n P = term._build_marginal_penalties(i).A\n C = term._build_marginal_constraints(i,\n -np.arange(term.n_coefs),\n constraint_lam=1,\n constraint_l2=0).A\n\n assert (P == C).all()", "def test_ppt_distinguishability_werner_hiding_pairs():\n dim = 2\n sigma_0 = (np.kron(np.identity(dim), np.identity(dim)) + swap_operator(dim)) / (dim * (dim + 1))\n sigma_1 = (np.kron(np.identity(dim), np.identity(dim)) - swap_operator(dim)) / (dim * (dim - 1))\n\n states = [sigma_0, sigma_1]\n\n expected_val = 1 / 2 + 1 / (dim + 1)\n\n primal_res = ppt_distinguishability(states, probs=None, dist_method=\"min-error\", strategy=True)\n dual_res = ppt_distinguishability(states, probs=None, dist_method=\"min-error\", strategy=False)\n\n np.testing.assert_equal(np.isclose(primal_res, expected_val, atol=0.001), True)\n np.testing.assert_equal(np.isclose(dual_res, expected_val, atol=0.001), True)\n\n primal_res = ppt_distinguishability(\n states, probs=None, dist_method=\"unambiguous\", strategy=True\n )\n dual_res = ppt_distinguishability(states, probs=None, dist_method=\"unambiguous\", strategy=False)\n\n np.testing.assert_equal(np.isclose(primal_res, 1 / 3, atol=0.001), True)\n np.testing.assert_equal(np.isclose(dual_res, 1 / 3, atol=0.001), True)", "def assign_unconstrained_matches(fx2_to_fx1, fx2_to_dist):\n fx2_match = np.arange(len(fx2_to_fx1), dtype=fx2_to_fx1.dtype)\n fx1_match = fx2_to_fx1.T[0]\n fx1_norm = fx2_to_fx1.T[1]\n match_dist = fx2_to_dist.T[0]\n norm_dist = fx2_to_dist.T[1]\n assigntup = fx2_match, fx1_match, fx1_norm, match_dist, norm_dist\n return assigntup", "def uniform_crossover(self, another_individual):\n size = len(another_individual.get_genotype())\n genotype1 = numpy.zeros(size, dtype=another_individual.get_genotype().dtype)\n genotype2 = numpy.zeros(size, dtype=another_individual.get_genotype().dtype)\n mask = numpy.random.choice([True,False], size=size)\n not_mask = numpy.logical_not(mask)\n genotype1[mask] = self.get_genotype()[mask]\n genotype1[not_mask] = another_individual.get_genotype()[not_mask]\n genotype2[mask] = another_individual.get_genotype()[mask]\n genotype2[not_mask] = self.get_genotype()[not_mask]\n\n return optimization.Individual(genotype1, self.fitness_evaluator, self.uniform_crossover, self.mutation_method), optimization.Individual(genotype2, self.fitness_evaluator, self.uniform_crossover, self.mutation_method)", "def solvePostOverlap(targetNum, defenders, dRewards, dPenalties, dCosts, aTypes, aRewards, aPenalties, q):\n \"\"\"Contains a dummy target for defenders and attackers\"\"\"\n # Add the extra dummy target\n _dRewards = copy.deepcopy(dRewards)\n _dPenalties = copy.deepcopy(dPenalties)\n _dCosts = copy.deepcopy(dCosts)\n _aRewards = copy.deepcopy(aRewards)\n _aPenalties = copy.deepcopy(aPenalties)\n for m in defenders:\n for defenderCount in defenders:\n _dRewards[m].append(0)\n _dPenalties[m].append(0)\n _dCosts[m].append(0)\n for lam in aTypes:\n _aRewards[lam].append(0)\n _aPenalties[lam].append(0)\n targetNumWithDummies = len(_dRewards[0])\n targetRange = list(range(targetNumWithDummies))\n attackerActions = targetRange\n placements = getPlacements(defenders, targetNumWithDummies)\n omegaKeys = getOmegaKeys(aTypes, placements, attackerActions)\n\n # Build the model\n model = Model('PrimalWithOverlap')\n w = model.continuous_var_dict(keys=omegaKeys, lb=0, ub=1, name=\"w\")\n objectiveFunction = sum([q[lam] * sum([w[s,a,lam] * defenderSocialUtility(s,a,defenders,_dRewards,_dCosts,_dPenalties) for s in placements for a in attackerActions]) for lam in aTypes])\n c1 = [sum([w[s,a,lam] * aUtility(s,a,lam,_aPenalties,_aRewards) for s in placements]) \\\n >= sum([w[s,a,lam] * aUtility(s,b,lam,_aPenalties,_aRewards) for s in placements])\n for lam in aTypes for a in attackerActions for b in attackerActions if a != b]\n c1 = [constraint for constraint in c1 if not isinstance(constraint, bool)]\n c1 = model.add_constraints(c1)\n c2 = model.add_constraints([sum([q[lam] * sum([w[s,a,lam] * utilityM(d,s,a,m,_dRewards,_dPenalties,_dCosts) for a in attackerActions for s in placements if s[m] == d]) for lam in aTypes]) \\\n >= sum([q[lam] * sum([w[s,a,lam] * utilityM(e,s,a,m,_dRewards,_dPenalties,_dCosts) for a in attackerActions for s in placements if s[m] == d]) for lam in aTypes])\n for m in defenders for d in targetRange for e in targetRange if d!=e])\n c3 = model.add_constraints([sum([w[(s,a,lam)] for s in placements for a in attackerActions]) == 1 for lam in aTypes])\n # Solve the model\n model.maximize(objectiveFunction)\n model.solve()\n # Now that w contains all the outcomes and their probabilities, sum the attacker utilities up.\n utilityPerAttacker = 0\n for k,v in w.items():\n prob = float(v)\n s,a,lam = k\n utilityPerAttacker += aUtility(s,a,lam,_aPenalties,_aRewards) * prob\n utilityPerAttacker /= len(aTypes)\n utilityPerDefender = model.solution.get_objective_value()\n utilityPerDefender /= len(defenders)\n\n return utilityPerDefender, utilityPerAttacker, None", "def mpda_cxPartialyMatched(ind1, ind2):\n size = min(len(ind1), len(ind2))\n p1, p2 = [0] * size, [0] * size\n\n # Initialize the position of each indices in the individuals\n for i in range(size):\n p1[ind1[i]] = i\n p2[ind2[i]] = i\n # Choose crossover points\n cxpoint1 = random.randint(0, size)\n cxpoint2 = random.randint(0, size - 1)\n if cxpoint2 >= cxpoint1:\n cxpoint2 += 1\n else: # Swap the two cx points\n cxpoint1, cxpoint2 = cxpoint2, cxpoint1\n\n # Apply crossover between cx points\n for i in range(cxpoint1, cxpoint2):\n # Keep track of the selected values\n temp1 = ind1[i]\n temp2 = ind2[i]\n # Swap the matched value\n ind1[i], ind1[p1[temp2]] = temp2, temp1\n ind2[i], ind2[p2[temp1]] = temp1, temp2\n # Position bookkeeping\n p1[temp1], p1[temp2] = p1[temp2], p1[temp1]\n p2[temp1], p2[temp2] = p2[temp2], p2[temp1]\n\n return ind1, ind2", "def constraint_test():\n import itertools, sys\n\n show_analysis = False\n #Generated via grammar\n gr = grammar.Grammar('grammars/test_constraints.bnf')\n inputs = ([1 for _ in range(100)], [ i%3 for i in range(100)])\n for _input in inputs: \n output = gr.generate(_input)\n azr = analyser.Analyser('test',output['phenotype'],True)\n try:\n azr.create_graph()\n except ValueError as e:\n print(__name__, \"ERROR\", _input, e)\n continue\n azr.parse_graph()\n azr.apply_stresses()\n azr.create_slf_file()\n azr.test_slf_file()\n azr.parse_results()\n azr.print_stresses()\n if show_analysis:\n azr.show_analysis()\n \n #Fixed generated\n lengths = (1000, 10000)\n levels = (5, 10)\n for length_idx, level_idx in itertools.permutations([0,1]):\n try:\n GRAPH = constrained_offset_graph(lengths[length_idx],\n levels[length_idx])\n except ValueError as e:\n print(__name__, \"ERROR\", lengths[length_idx], levels[length_idx], e)\n continue\n GRAPH.save_graph(\"pylon\")\n print \"nodes:\", GRAPH.number_of_nodes()\n print \"edges\", GRAPH.number_of_edges()\n #will it blend?\n azr = analyser.Analyser('test',\"moo\",True)\n azr.my_graph = GRAPH\n azr.parse_graph()\n azr.apply_stresses()\n azr.create_slf_file()\n azr.test_slf_file()\n azr.parse_results()\n azr.print_stresses()\n if show_analysis:\n azr.show_analysis()", "def match(pos_thresh, neg_thresh, truths, priors, labels, loc_t, conf_t, idx_t, idx, loc_data):\n decoded_priors = point_form(priors)\n\n\n overlaps = jaccard(truths, decoded_priors)\n\n # Size [num_priors] best ground truth for each prior\n overlaps = P.Cast()(overlaps, mindspore.float32)\n best_truth_idx, best_truth_overlap = P.ArgMaxWithValue(0)(overlaps)\n\n\n _, drop_pad_overlap = P.ArgMaxWithValue(1)(overlaps)\n for i in range(overlaps.shape[0]):\n if drop_pad_overlap[i] == 0:\n overlaps = overlaps[:i, :]\n break\n\n for _ in range(overlaps.shape[0]):\n # Find j, the gt with the highest overlap with a prior\n # In effect, this will loop through overlaps.size(0) in a \"smart\" order,\n # always choosing the highest overlap first.\n best_prior_idx, best_prior_overlap = P.ArgMaxWithValue(1)(overlaps)\n cast = P.Cast()\n idx_j, _ = P.ArgMaxWithValue(0)(best_prior_overlap)\n # Find i, the highest overlap anchor with this gt\n\n i = best_prior_idx[idx_j]\n\n # Set all other overlaps with i to be -1 so that no other gt uses it\n overlaps[:, i] = mindspore.ops.ScalarCast()(-1, mindspore.float32)\n # Set all other overlaps with j to be -1 so that this loop never uses j again\n overlaps[idx_j, :] = mindspore.ops.ScalarCast()(-1, mindspore.float32)\n\n best_truth_overlap[i] = mindspore.ops.ScalarCast()(2, mindspore.float32)\n\n\n best_truth_idx = cast(best_truth_idx, mindspore.float16)\n new_best_truth_idx = mindspore.ops.expand_dims(best_truth_idx, 0)\n new_best_truth_idx[::, i] = idx_j\n best_truth_idx = mindspore.ops.Squeeze()(new_best_truth_idx)\n\n best_truth_idx = cast(best_truth_idx, mindspore.int32)\n\n matches = truths[best_truth_idx] # Shape: [num_priors,4]\n\n conf = labels[best_truth_idx] + 1 # Shape: [num_priors]\n\n conf[best_truth_overlap < neg_thresh] = 0 # label as background\n\n loc = encode(matches, priors)\n loc_t[idx] = loc # [num_priors,4] encoded offsets to learn\n\n conf_t[idx] = conf # [num_priors] top class label for each prior\n\n best_truth_idx = P.Cast()(best_truth_idx, mindspore.int32)\n idx_t[idx] = best_truth_idx # [num_priors] indices for lookup\n\n return 0", "def test_numbers_can_substitute_scalars(free_alg, full_balance):\n\n dr = free_alg\n p = dr.names\n\n x = IndexedBase('x')\n y = IndexedBase('y')\n r = Range('D', 0, 2)\n i, j, k, l = symbols('i j k l')\n dr.set_dumms(r, [i, j, k, l])\n v = p.v\n\n orig = dr.sum((i, r), x[i] ** 2 * x[j] * y[k] * v[l])\n\n res = orig.subst(x[i], 0, full_balance=full_balance).simplify()\n assert res == 0\n res = orig.subst(x[j], 1, full_balance=full_balance).simplify()\n assert res == dr.sum(2 * y[k] * v[l])\n res = orig.subst(x[k], 2, full_balance=full_balance).simplify()\n assert res == dr.sum(16 * y[k] * v[l])", "def choose_literals(self):\n # reciprocal norm vector for probabilities of positive literals\n normalization_vector = (np.sum(self.probabilities, axis=0) + self.EPSILON) ** -1\n # for each variable decide wheter to take positive or negative literal\n np.random.seed()\n chosen = np.random.rand(len(self.variables)) < (normalization_vector * self.probabilities[0])\n return chosen", "def CrossoverOX1(p1,p2):\n countryNo=len(p1)\n [start,end] = sorted(random.sample(range(1,countryNo),2))\n ch1 = [0]+[-1 for i in range(1,len(p1))]\n ch2 = [0]+[-1 for i in range(1,len(p1))]\n for i in range(1,countryNo):\n if i>=start and i<=end:\n ch1[i]=p1[i]\n ch2[i]=p2[i]\n for i in range(1,countryNo):\n if p2[i] not in ch1:\n ch1[ch1.index(-1)]=p2[i]\n for i in range(1,countryNo):\n if p1[i] not in ch2:\n ch2[ch2.index(-1)]=p1[i]\n return ch1, ch2", "def solvePostNoOverlap(targetNum, defenders, dRewards, dPenalties, dCosts, aTypes, aRewards, aPenalties, q):\n \"\"\"Contains as many dummy targets as defenders, for defenders and attackers\"\"\"\n # Add the extra dummy targets\n _dRewards = copy.deepcopy(dRewards)\n _dPenalties = copy.deepcopy(dPenalties)\n _dCosts = copy.deepcopy(dCosts)\n _aRewards = copy.deepcopy(aRewards)\n _aPenalties = copy.deepcopy(aPenalties)\n for m in defenders:\n for defenderCount in defenders:\n _dRewards[m].append(0)\n _dPenalties[m].append(0)\n _dCosts[m].append(0)\n for lam in aTypes:\n _aRewards[lam].append(0)\n _aPenalties[lam].append(0)\n targetNumWithDummies = len(_dRewards[0])\n targetRange = list(range(targetNumWithDummies))\n attackerActions = targetRange\n # Get the suggestions that occur with no overlap\n overlapPlacements = getPlacements(defenders, targetNumWithDummies)\n placements = list(filter(lambda x: len(set(x)) == len(x), overlapPlacements))\n omegaKeys = getOmegaKeys(aTypes, placements, attackerActions)\n\n # Build the model\n model = Model('PrimalWithOverlap')\n w = model.continuous_var_dict(keys=omegaKeys, lb=0, ub=1, name=\"w\")\n objectiveFunction = sum([q[lam] * sum([w[s,a,lam] * defenderSocialUtility(s,a,defenders,_dRewards,_dCosts,_dPenalties) for s in placements for a in attackerActions]) for lam in aTypes])\n c1 = [sum([w[s,a,lam] * aUtility(s,a,lam,_aPenalties,_aRewards) for s in placements]) \\\n >= sum([w[s,a,lam] * aUtility(s,b,lam,_aPenalties,_aRewards) for s in placements])\n for lam in aTypes for a in attackerActions for b in attackerActions if a != b]\n c1 = [constraint for constraint in c1 if not isinstance(constraint, bool)]\n c1 = model.add_constraints(c1)\n c2 = model.add_constraints([sum([q[lam] * sum([w[s,a,lam] * utilityM(d,s,a,m,_dRewards,_dPenalties,_dCosts) for a in attackerActions for s in placements if s[m] == d]) for lam in aTypes]) \\\n >= sum([q[lam] * sum([w[s,a,lam] * utilityM(e,s,a,m,_dRewards,_dPenalties,_dCosts) for a in attackerActions for s in placements if s[m] == d]) for lam in aTypes])\n for m in defenders for d in targetRange for e in targetRange if d!=e])\n c3 = model.add_constraints([sum([w[(s,a,lam)] for s in placements for a in attackerActions]) == 1 for lam in aTypes])\n # Solve the model\n model.maximize(objectiveFunction)\n model.solve()\n # Now that w contains all the outcomes and their probabilities, sum the attacker utilities up.\n utilityPerAttacker = 0\n for k,v in w.items():\n prob = float(v)\n s,a,lam = k\n utilityPerAttacker += aUtility(s,a,lam,_aPenalties,_aRewards) * prob\n utilityPerAttacker /= len(aTypes)\n utilityPerDefender = model.solution.get_objective_value()\n utilityPerDefender /= len(defenders)\n return utilityPerDefender, utilityPerAttacker, None", "def dual_problem(\n states: list[np.ndarray], probs: list[float] = None, dist_method=\"min-error\"\n) -> float:\n constraints = []\n meas = []\n\n dim_x, _ = states[0].shape\n\n y_var = cvxpy.Variable((dim_x, dim_x), hermitian=True)\n objective = cvxpy.Minimize(cvxpy.trace(cvxpy.real(y_var)))\n\n dim = int(np.log2(dim_x))\n dim_list = [2] * int(np.log2(dim_x))\n sys_list = list(range(1, dim, 2))\n # dim_list = [3, 3]\n\n if dist_method == \"min-error\":\n for i, _ in enumerate(states):\n meas.append(cvxpy.Variable((dim_x, dim_x), PSD=True))\n constraints.append(\n cvxpy.real(y_var - probs[i] * states[i])\n >> partial_transpose(meas[i], sys=sys_list, dim=dim_list)\n )\n\n if dist_method == \"unambiguous\":\n for j, _ in enumerate(states):\n sum_val = 0\n for i, _ in enumerate(states):\n if i != j:\n sum_val += cvxpy.real(cvxpy.Variable()) * probs[i] * states[i]\n meas.append(cvxpy.Variable((dim_x, dim_x), PSD=True))\n constraints.append(\n cvxpy.real(y_var - probs[j] * states[j] + sum_val)\n >> partial_transpose(meas[j], sys=sys_list, dim=dim_list)\n )\n\n meas.append(cvxpy.Variable((dim_x, dim_x), PSD=True))\n constraints.append(\n cvxpy.real(y_var) >> partial_transpose(meas[-1], sys=sys_list, dim=dim_list)\n )\n\n problem = cvxpy.Problem(objective, constraints)\n sol_default = problem.solve()\n\n # print(np.around(y_var.value, decimals=3))\n\n return sol_default", "def test_constant_merge(self):\r\n x = tensor.constant([0, 0])\r\n y = x[1:]\r\n x1 = x - tensor.join(0, y, y)\r\n x1.eval()", "def crossover(self,\n ind1: Dict[str, Union[str, Dict[str, List[int]], Callable]],\n ind2: Dict[str, Union[str, Dict[str, List[int]], Callable]]\n ) -> Dict[str, Union[str, Dict[str, List[int]], Callable]]:\n ind1 = deepcopy(ind1)\n ind2 = deepcopy(ind2)\n\n mask = random.choices(list(self.grammar.keys()),\n k=random.randrange(len(self.grammar) - 2))\n for key in mask:\n ind1[\"genome\"][key], ind2[\"genome\"][key] = ind2[\"genome\"][key], ind1[\"genome\"][key]\n return ind1", "def _apply_crossover(pop, op, pb):\n for i in range(1, len(pop), 2):\n if random.random() < pb:\n pop[i - 1], pop[i] = op(pop[i - 1], pop[i])\n del pop[i - 1].fitness.values\n del pop[i].fitness.values\n return pop", "def correlate_assignments(formula, marginals, samples):\n\n # Localize the formula's assignment variable table.\n\n asgvar = formula.assignment_variables\n\n # Iterate over all pairs of signatures\n\n for sig1, sig2 in itertools.combinations(marginals.keys(), 2):\n\n # If these are a geminal pair, ignore.\n\n if sig1.is_geminal(sig2):\n continue\n\n # If either of these are nailed, ignore.\n\n if len(marginals[sig1]) == 1 or len(marginals[sig2]) == 1:\n continue\n\n # Iterate over all pairs of assignments between these two.\n\n for m1_id, m2_id in itertools.product(marginals[sig1].keys(),\n marginals[sig2].keys()):\n\n # If these are the same methyl, ignore.\n\n if m1_id == m2_id:\n continue\n\n # Get the product of the marginal probability of sig1 -> m1 and\n # sig2 -> m2.\n\n independent_prob = marginals[sig1][m1_id] * marginals[sig2][m2_id]\n\n # Count the fraction of samples in which these assignments co-occur\n\n real_prob = 0\n for sample in samples:\n\n if sample[sig1].seqid == m1_id and sample[sig2].seqid == m2_id:\n real_prob += 1/len(samples)\n\n # Get the methyls with the given sequence IDs.\n\n m1s = [m for m in asgvar[sig1].keys() if m.seqid == m1_id]\n\n m2s = [m for m in asgvar[sig2].keys() if m.seqid == m2_id]\n\n if real_prob > 10*independent_prob:\n\n for m1 in m1s:\n clause = [-1*asgvar[sig1][m1]]\n for m2 in m2s:\n clause.append(asgvar[sig2][m2])\n formula.add_clause(clause)\n\n for m2 in m2s:\n clause = [-1*asgvar[sig2][m2]]\n for m1 in m1s:\n clause.append(asgvar[sig1][m1])\n formula.add_clause(clause)\n\n elif real_prob*10 < independent_prob:\n for m1, m2 in itertools.product(m1s, m2s):\n formula.add_clause([-asgvar[sig1][m1], -asgvar[sig2][m2]])", "def test_ex_2_2(self):\n wam = WAM()\n compiler = Compiler()\n X = Variable()\n Y = Variable()\n var_idxes = {}\n a1 = compiler.write_to_heap(Compound('f', X, Compound('g', X, Atom('a'))), wam, var_idxes)\n a2 = compiler.write_to_heap(Compound('f', Atom('b'), Y), wam, var_idxes)\n aX = var_idxes[X]\n aY = var_idxes[Y]\n wam.unify(a1, a2)\n self.assertEqual(wam.get_term_repr(aX), 'b')\n self.assertEqual(wam.get_term_repr(aY), 'g(b, a)')", "def nonuniform_bounds_mutation(random, candidate, args):\n lower_bound = args.get('lower_bound')\n upper_bound = args.get('upper_bound')\n strength = args.setdefault('mutation_strength', 1)\n mutant = copy(candidate)\n for i, (c, lo, hi) in enumerate(zip(candidate, lower_bound, upper_bound)):\n if random.random() <= 0.5:\n new_value = c + (hi - c) * (1.0 - random.random() ** strength)\n else:\n new_value = c - (c - lo) * (1.0 - random.random() ** strength)\n mutant[i] = new_value\n\n return mutant", "def _svm_loss_penalty_dual(name):\n return hp.choice(name, [\n ('hinge', 'l2', True),\n ('squared_hinge', 'l2', True),\n ('squared_hinge', 'l1', False),\n ('squared_hinge', 'l2', False)\n ])", "def test_ppt_distinguishability_yyd_vectors():\n psi_0 = bell(0)\n psi_1 = bell(2)\n psi_2 = bell(3)\n psi_3 = bell(1)\n\n x_1 = np.kron(psi_0, psi_0)\n x_2 = np.kron(psi_1, psi_3)\n x_3 = np.kron(psi_2, psi_3)\n x_4 = np.kron(psi_3, psi_3)\n\n states = [x_1, x_2, x_3, x_4]\n probs = [1 / 4, 1 / 4, 1 / 4, 1 / 4]\n\n primal_res = ppt_distinguishability(states, probs=probs, dist_method=\"min-error\", strategy=True)\n dual_res = ppt_distinguishability(states, probs=probs, dist_method=\"min-error\", strategy=False)\n\n np.testing.assert_equal(np.isclose(primal_res, 7 / 8, atol=0.001), True)\n np.testing.assert_equal(np.isclose(dual_res, 7 / 8, atol=0.001), True)\n\n primal_res = ppt_distinguishability(\n states, probs=probs, dist_method=\"unambiguous\", strategy=True\n )\n dual_res = ppt_distinguishability(\n states, probs=probs, dist_method=\"unambiguous\", strategy=False\n )\n\n np.testing.assert_equal(np.isclose(primal_res, 3 / 4, atol=0.001), True)\n np.testing.assert_equal(np.isclose(dual_res, 3 / 4, atol=0.001), True)" ]
[ "0.6022955", "0.57126135", "0.56022096", "0.55702895", "0.55280787", "0.5495883", "0.5482473", "0.5477625", "0.54446507", "0.54238015", "0.5409056", "0.54083395", "0.5406118", "0.53358424", "0.5321293", "0.5319676", "0.5170372", "0.5108026", "0.5097792", "0.5073639", "0.5059063", "0.5038252", "0.502811", "0.5027751", "0.5017589", "0.5017575", "0.49883327", "0.4960855", "0.49536473", "0.49506637" ]
0.6571927
0
Operates on constraint level. Crosses over two individuals by first collecting all constraints occurring in the individuals in a list of size 2k, randomly scrambling the list, and then assigning the first k constraints in the list to the first individual, and the second k constraints to the second individual. Note that this operator has a larger mixing rate than clause_crossover_1x and uniform clause_crossover.
def scramble_clause_crossover(ind1, ind2): all_clauses = ind1 + ind2 random.shuffle(all_clauses) ind1[0:len(ind1)] = all_clauses[0:len(ind1)] ind2[0:len(ind2)] = all_clauses[len(ind1):len(ind1) + len(ind2)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def uniform_clause_crossover(ind1, ind2):\n k = len(ind1)\n for i in range(k):\n if random.random() < 0.5:\n temp = ind1[i]\n ind1[i] = ind2[i]\n ind2[i] = temp", "def avoid_duplicate_clauses_scramble_clause_crossover(ind1, ind2):\n ind_length = len(ind1)\n ind1_copy = copy.deepcopy(ind1)\n ind2_copy = copy.deepcopy(ind2)\n\n clauses_both_have = []\n remaining_clauses = []\n for clause in ind1:\n try:\n index = ind2_copy.index(clause)\n clauses_both_have.append(clause)\n del ind2_copy[index]\n except ValueError:\n remaining_clauses.append(clause)\n\n for clause in ind2:\n try:\n index = ind1_copy.index(clause)\n del ind1_copy[index]\n except ValueError:\n remaining_clauses.append(clause)\n\n random.shuffle(remaining_clauses)\n ind1[0:len(clauses_both_have)] = clauses_both_have\n ind2[0:len(clauses_both_have)] = clauses_both_have\n ind1[len(clauses_both_have):] = remaining_clauses[:len(remaining_clauses) // 2]\n ind2[len(clauses_both_have):] = remaining_clauses[len(remaining_clauses) // 2:]\n if len(ind1) != ind_length or len(ind2) != ind_length:\n raise Exception(\"Crossover operator altered the length of an individual\")", "def smart_clause_crossover(ind1, ind2, examples, greedy=True, probability_variant=None, temperature=1, clause_bitvector_cache=None):\n allow_duplicates = False # allow_duplicates denotes whether the resulting indivuals may contain duplicate clauses\n number_of_clauses = len(ind1)\n all_clauses = ind1+ind2\n chosen_clauses = []\n chosen_clause_indices = []\n ind1_coverage_bitvectors = compute_clause_coverage_bitvectors(ind1, examples, clause_bitvector_cache=clause_bitvector_cache)\n ind2_coverage_bitvectors = compute_clause_coverage_bitvectors(ind2, examples, clause_bitvector_cache=clause_bitvector_cache)\n all_coverage_bitvectors = ind1_coverage_bitvectors + ind2_coverage_bitvectors\n\n for i in range(0, number_of_clauses):\n if i == 0:\n combined_coverage_bitvectors = all_coverage_bitvectors\n else:\n combined_coverage_bitvectors = [combine_coverage_bitvectors(chosen_clauses_bitvector, bitvector, examples)\n for bitvector in all_coverage_bitvectors]\n if not allow_duplicates:\n for index in chosen_clause_indices:\n for j in range(len(combined_coverage_bitvectors)):\n if all_clauses[index] == all_clauses[j]:\n combined_coverage_bitvectors[j] = [0] * len(examples)\n combined_coverages = [sum(coverage_bitvector) for coverage_bitvector in combined_coverage_bitvectors]\n if greedy:\n best_coverage = max(combined_coverages)\n best_indices = [i for i in range(len(combined_coverages)) if combined_coverages[i] == best_coverage]\n chosen_clause_index = random.choice(best_indices)\n else:\n if probability_variant == \"linear\":\n sum_coverages = sum(combined_coverages)\n coverages_to_probabilities = [x / sum_coverages for x in combined_coverages]\n elif probability_variant == \"squared\":\n coverages_squared = [x ** 2 for x in combined_coverages]\n sum_coverages_squared = sum(coverages_squared)\n coverages_to_probabilities = [x ** 2 / sum_coverages_squared for x in combined_coverages]\n elif probability_variant == \"softmax\":\n # Softmax with normalization to prevent overflow\n coverages_max = max(combined_coverages)\n coverages_for_softmax = [a_coverage - coverages_max for a_coverage in combined_coverages]\n coverages_to_probabilities = np.exp(np.asarray(coverages_for_softmax) / temperature) / sum(\n np.exp(np.asarray(coverages_for_softmax) / temperature))\n\n chosen_clause_index = np.random.choice(list(range(0, len(all_coverage_bitvectors))),\n p=coverages_to_probabilities)\n chosen_coverage_bitvector = combined_coverage_bitvectors[chosen_clause_index]\n if chosen_clause_index < number_of_clauses:\n chosen_clause = ind1[chosen_clause_index]\n else:\n chosen_clause = ind2[chosen_clause_index - number_of_clauses]\n\n chosen_clauses.append(chosen_clause)\n chosen_clause_indices.append(chosen_clause_index)\n chosen_clauses_bitvector = chosen_coverage_bitvector\n\n for i in range(len(chosen_clauses)):\n clause = chosen_clauses[i]\n # We can safely set ind1 and ind2 to the same computed smart combination, as only one of them will make it\n # to the next generation\n ind1[i] = clause\n ind2[i] = clause", "def clause_crossover_1x(ind1, ind2):\n k = len(ind1)\n cx_point = random.randint(1, k - 1)\n temp = ind1[cx_point:]\n ind1[cx_point:] = ind2[cx_point:]\n ind2[cx_point:] = temp", "def crossover(new_pop, k):\n shuffle(new_pop)\n for i in range(len(new_pop) // 2):\n points = random.sample(range(1, len(new_pop[i])), k)\n points.sort()\n for fold in range(k):\n x = points[fold]\n tmp = new_pop[2 * i][:x].copy()\n new_pop[2 * i][:x], new_pop[2 * i + 1][:x] = new_pop[2 * i +\n 1][:x], tmp\n return new_pop", "def smart_clause_crossover_infeasibility(ind1, ind2, examples, greedy=True, probability_variant=None, temperature=1, clause_bitvector_cache=None):\n allow_duplicates = False # allow_duplicates denotes whether the resulting indivuals may contain duplicate clauses\n ind1_hard_constraints = [constr for constr in ind1 if constr[-2] == True]\n ind2_hard_constraints = [constr for constr in ind2 if constr[-2] == True]\n all_hard_constraints = ind1_hard_constraints + ind2_hard_constraints\n ind1_soft_constraints = [constr for constr in ind1 if constr[-2] == False]\n ind2_soft_constraints = [constr for constr in ind2 if constr[-2] == False]\n all_soft_constraints = ind1_soft_constraints + ind2_soft_constraints\n ind1_hard_coverage_bitvectors = compute_clause_coverage_bitvectors(ind1_hard_constraints, examples, use_infeasibility=True, clause_bitvector_cache=clause_bitvector_cache)\n ind2_hard_coverage_bitvectors = compute_clause_coverage_bitvectors(ind2_hard_constraints, examples, use_infeasibility=True, clause_bitvector_cache=clause_bitvector_cache)\n ind1_soft_coverage_bitvectors = compute_clause_coverage_bitvectors(ind1_soft_constraints, examples, use_infeasibility=True, clause_bitvector_cache=clause_bitvector_cache)\n ind2_soft_coverage_bitvectors = compute_clause_coverage_bitvectors(ind2_soft_constraints, examples, use_infeasibility=True, clause_bitvector_cache=clause_bitvector_cache)\n all_hard_coverage_bitvectors = ind1_hard_coverage_bitvectors + ind2_hard_coverage_bitvectors\n all_soft_coverage_bitvectors = ind1_soft_coverage_bitvectors + ind2_soft_coverage_bitvectors\n\n ind1_num_hard = len([constr for constr in ind1 if constr[-2] == True])\n ind2_num_hard = len([constr for constr in ind2 if constr[-2] == True])\n # num_hard = random.choice([ind1_num_hard, ind2_num_hard])\n if ind1_num_hard <= ind2_num_hard:\n num_hard = random.choice(list(range(ind1_num_hard, ind2_num_hard+1)))\n else:\n num_hard = random.choice(list(range(ind2_num_hard, ind1_num_hard + 1)))\n num_soft = len(ind1) - num_hard\n chosen_hard_clauses = []\n chosen_hard_clause_indices = []\n chosen_soft_clauses = []\n chosen_soft_clause_indices = []\n\n # Choose hard constraints\n for i in range(0, num_hard):\n if i == 0:\n combined_hard_coverage_bitvectors = all_hard_coverage_bitvectors\n else:\n combined_hard_coverage_bitvectors = [combine_coverage_bitvectors_hard_constraints(\n chosen_hard_clauses_bitvector, bitvector, examples) for bitvector in all_hard_coverage_bitvectors]\n if not allow_duplicates:\n for index in chosen_hard_clause_indices:\n for j in range(len(combined_hard_coverage_bitvectors)):\n if all_hard_constraints[index][:-2] == all_hard_constraints[j][:-2]:\n combined_hard_coverage_bitvectors[j] = [0] * len(examples)\n if greedy:\n combined_hard_coverages = [sum(coverage_bitvector) for coverage_bitvector in combined_hard_coverage_bitvectors]\n best_hard_coverage = max(combined_hard_coverages)\n best_hard_indices = [i for i in range(len(combined_hard_coverages)) if combined_hard_coverages[i] == best_hard_coverage]\n chosen_hard_clause_index = random.choice(best_hard_indices)\n else:\n coverages = [sum(x) for x in combined_hard_coverage_bitvectors]\n if probability_variant == \"linear\":\n sum_coverages = sum(coverages)\n coverages_to_probabilities = [x / sum_coverages for x in coverages]\n elif probability_variant == \"squared\":\n coverages_squared = [x ** 2 for x in coverages]\n sum_coverages_squared = sum(coverages_squared)\n coverages_to_probabilities = [x ** 2 / sum_coverages_squared for x in coverages]\n elif probability_variant == \"softmax\":\n # Softmax with normalization to prevent overflow\n coverages_max = max(coverages)\n coverages_for_softmax = [a_coverage - coverages_max for a_coverage in coverages]\n coverages_to_probabilities = np.exp(np.asarray(coverages_for_softmax) / temperature) / sum(\n np.exp(np.asarray(coverages_for_softmax) / temperature))\n\n chosen_hard_clause_index = np.random.choice(list(range(0, len(all_hard_coverage_bitvectors))),\n p=coverages_to_probabilities)\n chosen_hard_coverage_bitvector = combined_hard_coverage_bitvectors[chosen_hard_clause_index]\n if chosen_hard_clause_index < len(ind1_hard_constraints):\n chosen_hard_clause = ind1_hard_constraints[chosen_hard_clause_index]\n else:\n chosen_hard_clause = ind2_hard_constraints[chosen_hard_clause_index - len(ind1_hard_constraints)]\n\n chosen_hard_clauses.append(chosen_hard_clause)\n chosen_hard_clause_indices.append(chosen_hard_clause_index)\n chosen_hard_clauses_bitvector = chosen_hard_coverage_bitvector\n\n # Choose soft constraints\n for i in range(0, num_soft):\n if i == 0:\n combined_soft_coverage_bitvectors = all_soft_coverage_bitvectors\n else:\n combined_soft_coverage_bitvectors = [combine_coverage_bitvectors_soft_constraints(\n chosen_soft_clauses_bitvector, bitvector, examples) for bitvector in all_soft_coverage_bitvectors]\n if not allow_duplicates:\n for index in chosen_soft_clause_indices:\n for j in range(len(combined_soft_coverage_bitvectors)):\n if all_soft_constraints[index][:-2] == all_soft_constraints[j][:-2]:\n combined_soft_coverage_bitvectors[j] = [0] * len(examples)\n if greedy:\n combined_soft_coverages = [sum(coverage_bitvector) for coverage_bitvector in combined_soft_coverage_bitvectors]\n best_soft_coverage = max(combined_soft_coverages)\n best_soft_indices = [i for i in range(len(combined_soft_coverages)) if combined_soft_coverages[i] == best_soft_coverage]\n chosen_soft_clause_index = random.choice(best_soft_indices)\n else:\n coverages = [sum(x) for x in combined_soft_coverage_bitvectors]\n if probability_variant == \"linear\":\n sum_coverages = sum(coverages)\n coverages_to_probabilities = [x / sum_coverages for x in coverages]\n elif probability_variant == \"squared\":\n coverages_squared = [x ** 2 for x in coverages]\n sum_coverages_squared = sum(coverages_squared)\n coverages_to_probabilities = [x ** 2 / sum_coverages_squared for x in coverages]\n elif probability_variant == \"softmax\":\n # Softmax with normalization to prevent overflow\n coverages_max = max(coverages)\n coverages_for_softmax = [a_coverage - coverages_max for a_coverage in coverages]\n coverages_to_probabilities = np.exp(np.asarray(coverages_for_softmax) / temperature) / sum(\n np.exp(np.asarray(coverages_for_softmax) / temperature))\n\n chosen_soft_clause_index = np.random.choice(list(range(0, len(all_soft_coverage_bitvectors))),\n p=coverages_to_probabilities)\n chosen_soft_coverage_bitvector = combined_soft_coverage_bitvectors[chosen_soft_clause_index]\n if chosen_soft_clause_index < len(ind1_soft_constraints):\n chosen_soft_clause = ind1_soft_constraints[chosen_soft_clause_index]\n else:\n chosen_soft_clause = ind2_soft_constraints[chosen_soft_clause_index - len(ind1_soft_constraints)]\n\n chosen_soft_clauses.append(chosen_soft_clause)\n chosen_soft_clause_indices.append(chosen_soft_clause_index)\n chosen_soft_clauses_bitvector = chosen_soft_coverage_bitvector\n\n for i in range(len(chosen_hard_clauses)):\n hard_clause = chosen_hard_clauses[i]\n # We can safely set ind1 and ind2 to the same computed smart combination, as only one of them will make it\n # to the next generation\n ind1[i] = hard_clause\n ind2[i] = hard_clause\n\n for i in range(len(chosen_soft_clauses)):\n soft_clause = chosen_soft_clauses[i]\n ind1[num_hard+i] = soft_clause\n ind2[num_hard+i] = soft_clause", "def uniform_crossover(ind1, ind2):\n k = len(ind1)\n for i in range(k):\n for j in range(len(ind1[i])):\n if random.random() < 0.5:\n temp = ind1[i][j]\n ind1[i][j] = ind2[i][j]\n ind2[i][j] = temp", "def crossover_two_candidates(self, character_1: int, character_2: int):\n p = [(0, 5), (0, 6), (0, 11), (0, 12), (6, 6), (6, 11), (6, 12), (6, 15), (7, 11), (7, 12), (7, 15),\n (12, 12), (12, 15)]\n\n candidate_1 = self.characters[character_1]\n candidate_2 = self.characters[character_2]\n\n cut = random.choice(p)\n # generate list with values 0\n c_1_list: List = list(np.zeros(16))\n c_2_list: List = list(np.zeros(16))\n\n # fill the list with the correct values\n for i in range(cut[0], cut[1]+1):\n c_1_list[i] = candidate_1.dna_generator.dna.get_dna_value(i)\n c_2_list[i] = candidate_2.dna_generator.dna.get_dna_value(i)\n\n # change the candidates with the list values\n for i in range(cut[0], cut[1]+1):\n candidate_1.dna_generator.dna.set_dna_value(c_2_list[i], i)\n candidate_2.dna_generator.dna.set_dna_value(c_1_list[i], i)\n\n self.characters[character_1] = candidate_1\n self.characters[character_2] = candidate_2", "def seat_model_generator(n1, n2, k_low, k_up, flag=0):\n def order_by_master_list(l, master_list):\n return sorted(l, key=master_list.index)\n\n possible_credits = [5, 10, 15, 20]\n # set up geometric distribution among above possible hospital credits\n probs = np.random.geometric(p=0.10, size=len(possible_credits))\n probs = probs / np.sum(probs)\n\n def get_hosp_credits():\n return list(np.random.choice(possible_credits, size=1, replace=False, p=probs))[0]\n\n def get_hosp_capacity_uniform():\n # returns the average hospital capacity based on resident, hospital credits\n res_cap_sum = 0\n hosp_cred_sum = 0\n for r in g.residents:\n res_cap_sum += r.uq\n for h in g.hospitals:\n hosp_cred_sum += h.credits\n\n return int(np.ceil((1.5 * res_cap_sum) / hosp_cred_sum))\n\n def get_hosp_capacity_non_uniform(cap):\n low = int(np.ceil(0.3*cap))\n high = int(np.ceil(1.7*cap))\n return random.randint(low, high)\n\n g = Graph()\n\n # default hospital capacity\n cap = 60\n\n # create the sets R and H, r_1 ... r_n1, h_1 .. h_n2\n R = list('r{}'.format(i) for i in range(1, n1+1))\n H = list('h{}'.format(i) for i in range(1, n2+1))\n\n for res in R:\n g.residents.append(Resident(res))\n\n for hosp in H:\n g.hospitals.append(Hospital(hosp, 0, cap, get_hosp_credits()))\n\n # prepare a master list\n master_list = list(r for r in R)\n random.shuffle(master_list)\n\n # setup a probability distribution over the hospitals\n p = np.random.geometric(p=0.10, size=len(H))\n\n # normalize the distribution\n p = p / np.sum(p) # p is a ndarray, so this operation is correct\n\n prob_dict = dict(zip(H, p))\n master_list_h = sorted(H, key=lambda h: prob_dict[h], reverse=True)\n\n pref_H, pref_R = collections.defaultdict(list), {}\n for r in R:\n # sample hospitals according to the probability distribution and without replacement\n k = random.randint(k_low, k_up)\n pref_R[r] = list(np.random.choice(H, size=min(len(H), k), replace=False, p=p))\n # add these residents to the preference list for the corresponding hospitals\n for h in pref_R[r]:\n pref_H[h].append(r)\n\n for r in R:\n if(flag == 0):\n pref_R[r] = order_by_master_list(pref_R[r], master_list_h)\n else:\n random.shuffle(pref_R[r])\n res = g.get_resident(r)\n for hosp in pref_R[r]:\n res.pref.append(g.get_hospital(hosp))\n\n for h in H:\n if(flag == 0):\n pref_H[h] = order_by_master_list(pref_H[h], master_list)\n else:\n random.shuffle(pref_H[h])\n hosp = g.get_hospital(h)\n for res in pref_H[h]:\n hosp.pref.append(g.get_resident(res))\n\n g.init_resident_capacities()\n\n # get average capacity for hospitals\n cap = get_hosp_capacity_uniform()\n for h in g.hospitals:\n h.uq = get_hosp_capacity_non_uniform(cap)\n\n # initialize class constraints for residents\n g.init_all_resident_class()\n\n # initialize master class constraints applicable to all residents \n g.init_master_classes_disjoint()\n\n return g", "def crossover(self):\n print(' - crossover')\n s = time.time()\n\n # make a list with all index\n tmp_list = list(range(0, self.size))\n while len(tmp_list) > 0:\n candidate_1 = random.choice(tmp_list)\n tmp_list.remove(candidate_1)\n candidate_2 = random.choice(tmp_list)\n tmp_list.remove(candidate_2)\n\n # ceck if the two candidates will crossover\n chance = random.uniform(0, 1)\n if chance <= self.crossover_rate:\n self.crossover_two_candidates(candidate_1, candidate_2)\n\n e = time.time()\n print(\" - time: \", e - s)", "def _apply_crossover(pop, op, pb):\n for i in range(1, len(pop), 2):\n if random.random() < pb:\n pop[i - 1], pop[i] = op(pop[i - 1], pop[i])\n del pop[i - 1].fitness.values\n del pop[i].fitness.values\n return pop", "def matched_uniform_crossover(ind1, ind2):\n # We calculate the pairwise match between ind1's and ind2's clauses\n match_matrix = np.zeros((len(ind1), len(ind2)))\n for i in range(len(ind1)):\n clause1 = ind1[i]\n for j in range(len(ind2)):\n clause2 = ind2[j]\n curr_syntactic_match = 0\n for k in range(len(clause1)):\n if k != len(clause1) - 1:\n if clause1[k] == clause2[k]:\n curr_syntactic_match += 1\n else:\n curr_syntactic_match += (1 - abs(clause1[k] - clause2[k]))\n match_matrix[i][j] = curr_syntactic_match\n\n # Arg-sort the pairwise clause matches from best to worst match\n matches_ordered = np.dstack(np.unravel_index(np.argsort(match_matrix.ravel())[::-1], (len(ind1), len(ind2))))[0]\n\n # Finally match the clauses, making sure that each clause is only matched once\n # Then perform uniform crossover on matched clauses\n ind1_matched_clauses = set()\n ind2_matched_clauses = set()\n count = 0\n for match in matches_ordered:\n i = match[0]\n j = match[1]\n if match_matrix[i][j] >= len(ind1[0])//2:\n if i not in ind1_matched_clauses and j not in ind2_matched_clauses:\n count += 1\n # Perform the uniform crossover\n for k in range(len(ind1[i])):\n if random.random() < 0.5:\n temp = ind1[i][k]\n ind1[i][k] = ind2[j][k]\n ind2[j][k] = temp\n ind1_matched_clauses.add(i)\n ind2_matched_clauses.add(j)", "def set_Kk2_uniform_activity(self, clip=True, **kwargs):\n\t\t\n\t\tmatrix_shape = [self.Mm, self.Nn]\n\t\t\n\t\tparams_Kk1 = [self.mu_Kk1, self.sigma_Kk1]\n\t\tself.Kk1 = random_matrix(matrix_shape, params_Kk1, seed=self.seed_Kk1)\n\t\n\t\tmu_Ss0 = self.mu_Ss0\n\t\tmu_eps = self.mu_eps\n\t\tfor key in kwargs:\n\t\t\texec ('%s = kwargs[key]' % key)\n\t\t\n\t\tparams_Kk2 = [self.uniform_activity_lo, self.uniform_activity_hi]\n\t\tself.Kk2 = Kk2_eval_uniform_activity(matrix_shape, params_Kk2, \n\t\t\t\t\t\t\t\t\t\t\tmu_Ss0, mu_eps, \n\t\t\t\t\t\t\t\t\t\t\tself.seed_Kk2)\n\t\t\n\t\tif clip == True:\n\t\t\tarray_dict = clip_array(dict(Kk1 = self.Kk1, Kk2 = self.Kk2))\n\t\t\tself.Kk1 = array_dict['Kk1']\n\t\t\tself.Kk2 = array_dict['Kk2']", "def crossover(v1, v2):\n idx1 = np.random.choice(v1.size, size=int(v1.size/2))\n idx2 = np.random.choice(v2.size, size=int(v2.size/2))\n data = np.array([v1.data[i] for i in idx1] +\n [v2.data[i] for i in idx2])\n idx = np.array([v1.indices[i] for i in idx1] +\n [v2.indices[i] for i in idx2])\n v3 = sp.sparse.csc_matrix((data, (idx, np.zeros(idx.shape, dtype=int))),\n shape=v1.shape)\n return v3", "def gen2_constraint(model):\n return 20, model.g[2], 100", "def set_uniform_Kk(self, clip=True):\n\t\t\n\t\tKk1_los = random_matrix([self.Mm], params=[self.lo_Kk1_hyper_lo, \n\t\t\t\t\t\t\tself.lo_Kk1_hyper_hi], sample_type='uniform',\n\t\t\t\t\t\t\tseed=self.seed_Kk1)\n\t\tKk1_his = random_matrix([self.Mm], params=[self.hi_Kk1_hyper_lo, \n\t\t\t\t\t\t\tself.hi_Kk1_hyper_hi], sample_type='uniform',\n\t\t\t\t\t\t\tseed=self.seed_Kk1)\n\t\tKk2_los = random_matrix([self.Mm], params=[self.lo_Kk2_hyper_lo, \n\t\t\t\t\t\t\tself.lo_Kk2_hyper_hi], sample_type='uniform',\n\t\t\t\t\t\t\tseed=self.seed_Kk2)\n\t\tKk2_his = random_matrix([self.Mm], params=[self.hi_Kk2_hyper_lo, \n\t\t\t\t\t\t\tself.hi_Kk2_hyper_hi], sample_type='uniform',\n\t\t\t\t\t\t\tseed=self.seed_Kk2)\n\t\t\n\t\tself.Kk1 = random_matrix([self.Mm, self.Nn], [Kk1_los, Kk1_his], \n\t\t\t\t\t\t\t\tsample_type='rank2_row_uniform', \n\t\t\t\t\t\t\t\tseed = self.seed_Kk1)\n\t\tself.Kk2 = random_matrix([self.Mm, self.Nn], [Kk2_los, Kk2_his], \n\t\t\t\t\t\t\t\tsample_type='rank2_row_uniform', \n\t\t\t\t\t\t\t\tseed = self.seed_Kk2)\n\t\t\n\t\tif clip == True:\n\t\t\tarray_dict = clip_array(dict(Kk1 = self.Kk1, Kk2 = self.Kk2))\n\t\t\tself.Kk1 = array_dict['Kk1']\n\t\t\tself.Kk2 = array_dict['Kk2']", "def generate_constraints():\n return list(chain(collect_rows(), collect_columns(), collect_blocks()))", "def generate_pn2kc_weights(nb_pn, nb_kc, min_pn=10, max_pn=20, aff_pn2kc=None, nb_trials=100000, baseline=25000,\r\n rnd=np.random.RandomState(2018), dtype=np.float32):\r\n\r\n dispersion = np.zeros(nb_trials)\r\n best_pn2kc = None\r\n\r\n for trial in range(nb_trials):\r\n pn2kc = np.zeros((nb_pn, nb_kc), dtype=dtype)\r\n\r\n if aff_pn2kc is None or aff_pn2kc <= 0:\r\n vaff_pn2kc = rnd.randint(min_pn, max_pn + 1, size=nb_pn)\r\n else:\r\n vaff_pn2kc = np.ones(nb_pn) * aff_pn2kc\r\n\r\n # go through every kenyon cell and select a nb_pn PNs to make them afferent\r\n for i in range(nb_pn):\r\n pn_selector = rnd.permutation(nb_kc)\r\n pn2kc[i, pn_selector[:vaff_pn2kc[i]]] = 1\r\n\r\n # This selections mechanism can be used to restrict the distribution of random connections\r\n # compute the sum of the elements in each row giving the number of KCs each PN projects to.\r\n pn2kc_sum = pn2kc.sum(axis=0)\r\n dispersion[trial] = pn2kc_sum.max() - pn2kc_sum.min()\r\n # pn_mean = pn2kc_sum.mean()\r\n\r\n # Check if the number of projections per PN is balanced (min max less than baseline)\r\n # if the dispersion is below the baseline accept the sample\r\n if dispersion[trial] <= baseline: return pn2kc\r\n\r\n # cache the pn2kc with the least dispersion\r\n if best_pn2kc is None or dispersion[trial] < dispersion[:trial].min():\r\n best_pn2kc = pn2kc\r\n\r\n # if non of the samples have dispersion lower than the baseline,\r\n # return the less dispersed one\r\n return best_pn2kc", "def TAoCPpermutation(n,k):\n perms = []\n for subset in itertools.combinations(range(n), k):\n A = []; B = []; C = []; min = 0; j = 0; up = 0\n for i in xrange(n):\n if(j>=k or i != subset[j]):\n B.append(i)\n up +=1\n else:\n up -=1\n j += 1\n if(up < min):\n min = up\n B.append(i)\n else:\n A.append(i)\n C.append(B.pop())\n perms.append(A+B+C)\n return perms", "def crossover(chromosome_1, chromosome_2):\n (x1, y1) = (randrange(col_count), randrange(row_count))\n (x2, y2) = (randrange(x1+1, col_count+1), randrange(y1+1, row_count+1))\n def mate(chromosome_1, chromosome_2):\n used = set(chromosome_1[x+y*col_count] for x in range(x1, x2) for y in range(y1, y2))\n not_used = (allele for allele in chromosome_2 if allele not in used)\n return [chromosome_1[x+y*col_count] if x1 <= x < x2 and y1 <= y < y2 else next(not_used) for y in range(row_count) for x in range(col_count)]\n return (mate(chromosome_1, chromosome_2), mate(chromosome_2, chromosome_1))", "def crossover(NN1, NN2, p_c, p_m):\n if np.random.choice([0, 1], p=[1-p_c, p_c]):\n return nn.mate_neural_nets(NN1, NN2, p_m)\n else:\n return np.random.choice([NN1, NN2])", "def _crossover(self, sel):\n offspring = []\n for p1, p2 in sel:\n p1 = copy.deepcopy(p1)\n p2 = copy.deepcopy(p2)\n\n tmp = self.op.crossover(\n copy.deepcopy(p1['individual']),\n copy.deepcopy(p2['individual']))\n if not tmp[0] is None and not tmp[1] is None:\n c1 = {\n 'individual': tmp[0],\n 'fitness': self.op.fitness(tmp[0])\n }\n c2 = {\n 'individual': tmp[1],\n 'fitness': self.op.fitness(tmp[1])\n }\n\n offspring.append(\n c1 if c1['fitness'] < p1['fitness'] else p1)\n offspring.append(\n c2 if c2['fitness'] < p2['fitness'] else p2)\n else:\n offspring.extend((p1, p2))\n return offspring", "def smooth_time_constraints(list_constraints):\n\n for constraint_one in list_constraints:\n for constraint_two in list_constraints:\n if check_simultaneity(constraint_one, constraint_two):\n merge_time_constraints(list_constraints, constraint_one, constraint_two)\n return list_constraints", "def cross_over(mating_pool, offspring, generation, chance, types):\n\n\t# create empty list for children\n\tchildren = []\n\tfittest_score = 0\n\tchance_array = []\n\n\t# determine probability\n\tprobability = len(mating_pool)\n\n\t# iterate over mating pool\n\tfor i in range(len(mating_pool)):\n\n\t\t# iterate over probability\n\t\tfor j in range(probability):\n\n\t\t\t# append iteration\n\t\t\tchance_array.append(i)\n\n\t\t# decrease probability\n\t\tprobability -= 1\n\n\t# iterate over offspring\n\tfor i in range(offspring):\n\n\t\t# create an empty schedule\n\t\tschedule = create_empty_schedule()\n\n\t\t# amount of activites that have to be scheduled\n\t\tactivities = 125\n\n\t\t# choose a mother and father from the mating pool\n\t\tparents = []\n\t\tparents = mating_pool\n\t\trandom_parent = chance_array[random.randint(0, len(chance_array) - 1)]\n\t\tparent_schedule = parents[random_parent][1]\n\n\t\t# until no activities are left\n\t\twhile activities > 0:\n\n\t\t\t# choose random course from parent schedule\n\t\t\trandom_course = random.randint(0, len(parent_schedule) - 1)\n\n\t\t\t# create empty course list\n\t\t\tcourses = []\n\n\t\t\t# iterate over values in schedule\n\t\t\tfor key, value in schedule.items():\n\n\t\t\t\t# if roomlock is not empty\n\t\t\t\tif value is not None:\n\n\t\t\t\t\t# add to course list\n\t\t\t\t\tcourses.append(value)\n\n\t\t\t# initialize counters\n\t\t\tcounter = 0\n\t\t\tnewparentcounter = 0\n\n\t\t\t# if schedule has no place for the random course, a None value is chosen, or the random course is already chosen\n\t\t\twhile schedule[random_course] is not None or parent_schedule[random_course] is None or parent_schedule[random_course] in courses:\n\n\t\t\t\t# choose new random course from parent schedule\n\t\t\t\trandom_course = random.randint(0, len(parent_schedule) - 1)\n\n\t\t\t\t# increase counter\n\t\t\t\tcounter += 1\n\n\t\t\t\t# if random course still not scheduled\n\t\t\t\tif counter > 100:\n\n\t\t\t\t\t# choose new parent\n\t\t\t\t\tparent_schedule = parents[random.randint(0, len(parents) - 1)][1]\n\n\t\t\t\t\t# reset counter\n\t\t\t\t\tcounter = 0\n\n\t\t\t\t\t# increase parent-counter\n\t\t\t\t\tnewparentcounter += 1\n\n\t\t\t\t\t# if 500 new parents weren't enough\n\t\t\t\t\tif newparentcounter > 500:\n\t\t\t\t\t\tbreak\n\n\t\t\t# schedule random course on same place as parent\n\t\t\tschedule[random_course] = parent_schedule[random_course]\n\n\t\t\t# decrease activities\n\t\t\tactivities -= 1\n\n\t\t# update classes from schedule\n\t\tallcourses, student_list, chambers = update_classes_from_schedule(schedule)\n\n\t\t# create new arrays schedule properties\n\t\ttimetable_info = []\n\t\tscore_info = []\n\n\t\t# add all information about this specific schedule\n\t\tscore_info.append(allcourses)\n\t\tscore_info.append(student_list)\n\t\tscore_info.append(chambers)\n\n\t\t# perform mutatation if chance is higher than probability\n\t\tmutation(schedule, chambers, allcourses, student_list, chance, type)\n\n\t\t# add individual schedule-info to timetable array\n\t\ttimetable_info.append(score_info)\n\t\ttimetable_info.append(schedule)\n\n\t\t# calculate score\n\t\tscore = calc_score(allcourses, student_list, chambers)\n\t\tgen_scores.append([generation, score])\n\n\n\t\t# if score is better than the fittest\n\t\tif score > fittest_score:\n\n\t\t\t# adjust fittest score\n\t\t\tfittest_score = score\n\n\t\t\t# print best new score of the generation to the console\n\t\t\tprint(\"New best found ---> Schedule: {}, generation: {}, score: {}\".format(i, generation, score))\n\n\t\t# add the array with individual timetable-info to the population\n\t\tchildren.append(timetable_info)\n\n\treturn children", "def normal_closure(self, other, k=10):\n if hasattr(other, 'generators'):\n degree = self.degree\n identity = _af_new(list(range(degree)))\n\n if all(g == identity for g in other.generators):\n return other\n Z = PermutationGroup(other.generators[:])\n base, strong_gens = Z.schreier_sims_incremental()\n strong_gens_distr = _distribute_gens_by_base(base, strong_gens)\n basic_orbits, basic_transversals = \\\n _orbits_transversals_from_bsgs(base, strong_gens_distr)\n\n self._random_pr_init(r=10, n=20)\n\n _loop = True\n while _loop:\n Z._random_pr_init(r=10, n=10)\n for _ in range(k):\n g = self.random_pr()\n h = Z.random_pr()\n conj = h^g\n res = _strip(conj, base, basic_orbits, basic_transversals)\n if res[0] != identity or res[1] != len(base) + 1:\n gens = Z.generators\n gens.append(conj)\n Z = PermutationGroup(gens)\n strong_gens.append(conj)\n temp_base, temp_strong_gens = \\\n Z.schreier_sims_incremental(base, strong_gens)\n base, strong_gens = temp_base, temp_strong_gens\n strong_gens_distr = \\\n _distribute_gens_by_base(base, strong_gens)\n basic_orbits, basic_transversals = \\\n _orbits_transversals_from_bsgs(base,\n strong_gens_distr)\n _loop = False\n for g in self.generators:\n for h in Z.generators:\n conj = h^g\n res = _strip(conj, base, basic_orbits,\n basic_transversals)\n if res[0] != identity or res[1] != len(base) + 1:\n _loop = True\n break\n if _loop:\n break\n return Z\n elif hasattr(other, '__getitem__'):\n return self.normal_closure(PermutationGroup(other))\n elif hasattr(other, 'array_form'):\n return self.normal_closure(PermutationGroup([other]))", "def crossover (self, p1, p2, p_pop, c1, c2, c_pop) :\n assert self.crossover_count < self.pop_size\n assert self.get_iteration () == self.last_gen\n self.parents.append (p1)\n self.parents.append (p2)\n self.crossover_count += 2\n if self.crossover_count == self.pop_size :\n assert (self.get_iteration () == self.last_gen)\n print (self.get_iteration ())\n sys.stdout.flush ()\n self.build_model (p_pop)\n self.sample_model (c1, c2, c_pop)\n self.crossover_count = 0\n self.parents = []\n self.children = {}\n self.last_gen += 1\n self.clear_cache ()", "def set_mixture_Kk(self, clip=True):\n\t\t\n\t\tassert 0 <= self.Kk1_p <= 1., \"Kk1 Mixture ratio must be between 0 and 1\"\n\t\tassert 0 <= self.Kk2_p <= 1., \"Kk2 Mixture ratio must be between 0 and 1\"\n\t\t\n\t\tself.Kk1 = sp.zeros((self.Mm, self.Nn))\n\t\tself.Kk2 = sp.zeros((self.Mm, self.Nn))\n\t\t\n\t\tnum_comp1 = int(self.Kk1_p*self.Mm)\n\t\tnum_comp2 = self.Mm - num_comp1\n\t\tparams_Kk1_1 = [self.mu_Kk1_1, self.sigma_Kk1_1]\n\t\tparams_Kk1_2 = [self.mu_Kk1_2, self.sigma_Kk1_2]\n\t\tself.Kk1[:num_comp1, :] = random_matrix([num_comp1, self.Nn], \n\t\t\t\t\t\t\t\t\t\tparams_Kk1_1, seed = self.seed_Kk1)\n\t\tself.Kk1[num_comp1:, :] = random_matrix([num_comp2, self.Nn], \n\t\t\t\t\t\t\t\t\t\tparams_Kk1_2, seed = self.seed_Kk1)\n\t\t\n\t\tnum_comp1 = int(self.Kk2_p*self.Mm)\n\t\tnum_comp2 = self.Mm - num_comp1\n\t\tparams_Kk2_1 = [self.mu_Kk2_1, self.sigma_Kk2_1]\n\t\tparams_Kk2_2 = [self.mu_Kk2_2, self.sigma_Kk2_2]\n\t\t\n\t\tself.Kk2[:num_comp1, :] = random_matrix([num_comp1, self.Nn], \n\t\t\t\t\t\t\t\t\t\tparams_Kk2_1, seed = self.seed_Kk2)\t\t\n\t\tself.Kk2[num_comp1:, :] = random_matrix([num_comp2, self.Nn], \n\t\t\t\t\t\t\t\t\t\tparams_Kk2_2, seed = self.seed_Kk2)\n\t\t\n\t\tif clip == True:\n\t\t\tarray_dict = clip_array(dict(Kk1 = self.Kk1, Kk2 = self.Kk2))\n\t\t\tself.Kk1 = array_dict['Kk1']\n\t\t\tself.Kk2 = array_dict['Kk2']", "def rand_clown(n1=100, n2=100, sigma1=1, sigma2=2):\n x0 = np.random.randn(n1)\n x1 = x0 * x0 + sigma1 * np.random.randn(n1)\n x2 = np.vstack([sigma2 * np.random.randn(n2),\n sigma2 * np.random.randn(n2) + 2.])\n res = np.hstack([np.vstack([[x0, x1], 1. * np.ones([1, n1])]),\n np.vstack([x2, 2. * np.ones([1, n2])])]).T\n ind = np.arange(res.shape[0])\n np.random.shuffle(ind)\n return np.array(res[ind, :])", "def tensor_choose_k(boolean_mask, rng, k=1, random=False):\n\n mask = boolean_mask\n if mask.ndim > 2:\n raise Exception('Input tensor must be either 1d or 2d.')\n elif mask.ndim == 1:\n mask = mask.dimshuffle('x', 0)\n\n assert T.lt(k, mask.shape[1]), 'k must be < then # of possible choices'\n\n if random is True:\n noise = rng.uniform(mask.shape, low=0, high=mask.shape[1])\n else:\n noise = T.arange(mask.shape[1])[::-1] + 1 # Descending order\n noise = T.cast(noise, theano.config.floatX).dimshuffle('x', 0)\n\n if k == 1:\n return T.argmax(mask*noise, axis=1)\n return T.argsort(mask*noise, axis=1)[:, ::-1][:, :k]", "def main_constraints(self, batch, l_suff_stats):\n\n if self.mode == 'parametric':\n kl = 'i-projection'\n else:\n kl = 'm-projection'\n policy = self.mc.get('policy', target=False)\n t_states = torch.Tensor(batch.states)\n t_policy = self.mc.get('policy', target=True)\n target_suff_stats = t_policy.forward(t_states).detach()\n\n # split constraint if wanted\n if self.split_constraint:\n\n num_dims = target_suff_stats.size()[1]\n constraints = []\n for d in range(num_dims):\n c = (self.v[d] - policy.kl_divergence(batch, l_suff_stats[d+1], kl, 'mean'))\n constraints.append(c)\n\n else:\n constraints = [(self.v[0] - policy.kl_divergence(batch, l_suff_stats[0], kl, 'mean'))]\n\n return constraints" ]
[ "0.6233484", "0.6091983", "0.59595877", "0.59208804", "0.5847458", "0.572482", "0.5696735", "0.55340487", "0.5419363", "0.54179275", "0.541135", "0.53817564", "0.5309946", "0.5270483", "0.5232102", "0.5205794", "0.5137773", "0.5123041", "0.5108216", "0.5081783", "0.50262445", "0.5020657", "0.50206304", "0.50172514", "0.50098807", "0.5000969", "0.49971062", "0.4991316", "0.49822602", "0.4970229" ]
0.64498144
0
Operates on constraint level. Works the same as scramble_clause_crossover, except that all occurrences of clauses that occur multiple times in either individual, or that occur in both individuals, are first evenly divided over both individuals.
def avoid_duplicate_clauses_scramble_clause_crossover(ind1, ind2): ind_length = len(ind1) ind1_copy = copy.deepcopy(ind1) ind2_copy = copy.deepcopy(ind2) clauses_both_have = [] remaining_clauses = [] for clause in ind1: try: index = ind2_copy.index(clause) clauses_both_have.append(clause) del ind2_copy[index] except ValueError: remaining_clauses.append(clause) for clause in ind2: try: index = ind1_copy.index(clause) del ind1_copy[index] except ValueError: remaining_clauses.append(clause) random.shuffle(remaining_clauses) ind1[0:len(clauses_both_have)] = clauses_both_have ind2[0:len(clauses_both_have)] = clauses_both_have ind1[len(clauses_both_have):] = remaining_clauses[:len(remaining_clauses) // 2] ind2[len(clauses_both_have):] = remaining_clauses[len(remaining_clauses) // 2:] if len(ind1) != ind_length or len(ind2) != ind_length: raise Exception("Crossover operator altered the length of an individual")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def scramble_clause_crossover(ind1, ind2):\n all_clauses = ind1 + ind2\n random.shuffle(all_clauses)\n ind1[0:len(ind1)] = all_clauses[0:len(ind1)]\n ind2[0:len(ind2)] = all_clauses[len(ind1):len(ind1) + len(ind2)]", "def smart_clause_crossover_infeasibility(ind1, ind2, examples, greedy=True, probability_variant=None, temperature=1, clause_bitvector_cache=None):\n allow_duplicates = False # allow_duplicates denotes whether the resulting indivuals may contain duplicate clauses\n ind1_hard_constraints = [constr for constr in ind1 if constr[-2] == True]\n ind2_hard_constraints = [constr for constr in ind2 if constr[-2] == True]\n all_hard_constraints = ind1_hard_constraints + ind2_hard_constraints\n ind1_soft_constraints = [constr for constr in ind1 if constr[-2] == False]\n ind2_soft_constraints = [constr for constr in ind2 if constr[-2] == False]\n all_soft_constraints = ind1_soft_constraints + ind2_soft_constraints\n ind1_hard_coverage_bitvectors = compute_clause_coverage_bitvectors(ind1_hard_constraints, examples, use_infeasibility=True, clause_bitvector_cache=clause_bitvector_cache)\n ind2_hard_coverage_bitvectors = compute_clause_coverage_bitvectors(ind2_hard_constraints, examples, use_infeasibility=True, clause_bitvector_cache=clause_bitvector_cache)\n ind1_soft_coverage_bitvectors = compute_clause_coverage_bitvectors(ind1_soft_constraints, examples, use_infeasibility=True, clause_bitvector_cache=clause_bitvector_cache)\n ind2_soft_coverage_bitvectors = compute_clause_coverage_bitvectors(ind2_soft_constraints, examples, use_infeasibility=True, clause_bitvector_cache=clause_bitvector_cache)\n all_hard_coverage_bitvectors = ind1_hard_coverage_bitvectors + ind2_hard_coverage_bitvectors\n all_soft_coverage_bitvectors = ind1_soft_coverage_bitvectors + ind2_soft_coverage_bitvectors\n\n ind1_num_hard = len([constr for constr in ind1 if constr[-2] == True])\n ind2_num_hard = len([constr for constr in ind2 if constr[-2] == True])\n # num_hard = random.choice([ind1_num_hard, ind2_num_hard])\n if ind1_num_hard <= ind2_num_hard:\n num_hard = random.choice(list(range(ind1_num_hard, ind2_num_hard+1)))\n else:\n num_hard = random.choice(list(range(ind2_num_hard, ind1_num_hard + 1)))\n num_soft = len(ind1) - num_hard\n chosen_hard_clauses = []\n chosen_hard_clause_indices = []\n chosen_soft_clauses = []\n chosen_soft_clause_indices = []\n\n # Choose hard constraints\n for i in range(0, num_hard):\n if i == 0:\n combined_hard_coverage_bitvectors = all_hard_coverage_bitvectors\n else:\n combined_hard_coverage_bitvectors = [combine_coverage_bitvectors_hard_constraints(\n chosen_hard_clauses_bitvector, bitvector, examples) for bitvector in all_hard_coverage_bitvectors]\n if not allow_duplicates:\n for index in chosen_hard_clause_indices:\n for j in range(len(combined_hard_coverage_bitvectors)):\n if all_hard_constraints[index][:-2] == all_hard_constraints[j][:-2]:\n combined_hard_coverage_bitvectors[j] = [0] * len(examples)\n if greedy:\n combined_hard_coverages = [sum(coverage_bitvector) for coverage_bitvector in combined_hard_coverage_bitvectors]\n best_hard_coverage = max(combined_hard_coverages)\n best_hard_indices = [i for i in range(len(combined_hard_coverages)) if combined_hard_coverages[i] == best_hard_coverage]\n chosen_hard_clause_index = random.choice(best_hard_indices)\n else:\n coverages = [sum(x) for x in combined_hard_coverage_bitvectors]\n if probability_variant == \"linear\":\n sum_coverages = sum(coverages)\n coverages_to_probabilities = [x / sum_coverages for x in coverages]\n elif probability_variant == \"squared\":\n coverages_squared = [x ** 2 for x in coverages]\n sum_coverages_squared = sum(coverages_squared)\n coverages_to_probabilities = [x ** 2 / sum_coverages_squared for x in coverages]\n elif probability_variant == \"softmax\":\n # Softmax with normalization to prevent overflow\n coverages_max = max(coverages)\n coverages_for_softmax = [a_coverage - coverages_max for a_coverage in coverages]\n coverages_to_probabilities = np.exp(np.asarray(coverages_for_softmax) / temperature) / sum(\n np.exp(np.asarray(coverages_for_softmax) / temperature))\n\n chosen_hard_clause_index = np.random.choice(list(range(0, len(all_hard_coverage_bitvectors))),\n p=coverages_to_probabilities)\n chosen_hard_coverage_bitvector = combined_hard_coverage_bitvectors[chosen_hard_clause_index]\n if chosen_hard_clause_index < len(ind1_hard_constraints):\n chosen_hard_clause = ind1_hard_constraints[chosen_hard_clause_index]\n else:\n chosen_hard_clause = ind2_hard_constraints[chosen_hard_clause_index - len(ind1_hard_constraints)]\n\n chosen_hard_clauses.append(chosen_hard_clause)\n chosen_hard_clause_indices.append(chosen_hard_clause_index)\n chosen_hard_clauses_bitvector = chosen_hard_coverage_bitvector\n\n # Choose soft constraints\n for i in range(0, num_soft):\n if i == 0:\n combined_soft_coverage_bitvectors = all_soft_coverage_bitvectors\n else:\n combined_soft_coverage_bitvectors = [combine_coverage_bitvectors_soft_constraints(\n chosen_soft_clauses_bitvector, bitvector, examples) for bitvector in all_soft_coverage_bitvectors]\n if not allow_duplicates:\n for index in chosen_soft_clause_indices:\n for j in range(len(combined_soft_coverage_bitvectors)):\n if all_soft_constraints[index][:-2] == all_soft_constraints[j][:-2]:\n combined_soft_coverage_bitvectors[j] = [0] * len(examples)\n if greedy:\n combined_soft_coverages = [sum(coverage_bitvector) for coverage_bitvector in combined_soft_coverage_bitvectors]\n best_soft_coverage = max(combined_soft_coverages)\n best_soft_indices = [i for i in range(len(combined_soft_coverages)) if combined_soft_coverages[i] == best_soft_coverage]\n chosen_soft_clause_index = random.choice(best_soft_indices)\n else:\n coverages = [sum(x) for x in combined_soft_coverage_bitvectors]\n if probability_variant == \"linear\":\n sum_coverages = sum(coverages)\n coverages_to_probabilities = [x / sum_coverages for x in coverages]\n elif probability_variant == \"squared\":\n coverages_squared = [x ** 2 for x in coverages]\n sum_coverages_squared = sum(coverages_squared)\n coverages_to_probabilities = [x ** 2 / sum_coverages_squared for x in coverages]\n elif probability_variant == \"softmax\":\n # Softmax with normalization to prevent overflow\n coverages_max = max(coverages)\n coverages_for_softmax = [a_coverage - coverages_max for a_coverage in coverages]\n coverages_to_probabilities = np.exp(np.asarray(coverages_for_softmax) / temperature) / sum(\n np.exp(np.asarray(coverages_for_softmax) / temperature))\n\n chosen_soft_clause_index = np.random.choice(list(range(0, len(all_soft_coverage_bitvectors))),\n p=coverages_to_probabilities)\n chosen_soft_coverage_bitvector = combined_soft_coverage_bitvectors[chosen_soft_clause_index]\n if chosen_soft_clause_index < len(ind1_soft_constraints):\n chosen_soft_clause = ind1_soft_constraints[chosen_soft_clause_index]\n else:\n chosen_soft_clause = ind2_soft_constraints[chosen_soft_clause_index - len(ind1_soft_constraints)]\n\n chosen_soft_clauses.append(chosen_soft_clause)\n chosen_soft_clause_indices.append(chosen_soft_clause_index)\n chosen_soft_clauses_bitvector = chosen_soft_coverage_bitvector\n\n for i in range(len(chosen_hard_clauses)):\n hard_clause = chosen_hard_clauses[i]\n # We can safely set ind1 and ind2 to the same computed smart combination, as only one of them will make it\n # to the next generation\n ind1[i] = hard_clause\n ind2[i] = hard_clause\n\n for i in range(len(chosen_soft_clauses)):\n soft_clause = chosen_soft_clauses[i]\n ind1[num_hard+i] = soft_clause\n ind2[num_hard+i] = soft_clause", "def smart_clause_crossover(ind1, ind2, examples, greedy=True, probability_variant=None, temperature=1, clause_bitvector_cache=None):\n allow_duplicates = False # allow_duplicates denotes whether the resulting indivuals may contain duplicate clauses\n number_of_clauses = len(ind1)\n all_clauses = ind1+ind2\n chosen_clauses = []\n chosen_clause_indices = []\n ind1_coverage_bitvectors = compute_clause_coverage_bitvectors(ind1, examples, clause_bitvector_cache=clause_bitvector_cache)\n ind2_coverage_bitvectors = compute_clause_coverage_bitvectors(ind2, examples, clause_bitvector_cache=clause_bitvector_cache)\n all_coverage_bitvectors = ind1_coverage_bitvectors + ind2_coverage_bitvectors\n\n for i in range(0, number_of_clauses):\n if i == 0:\n combined_coverage_bitvectors = all_coverage_bitvectors\n else:\n combined_coverage_bitvectors = [combine_coverage_bitvectors(chosen_clauses_bitvector, bitvector, examples)\n for bitvector in all_coverage_bitvectors]\n if not allow_duplicates:\n for index in chosen_clause_indices:\n for j in range(len(combined_coverage_bitvectors)):\n if all_clauses[index] == all_clauses[j]:\n combined_coverage_bitvectors[j] = [0] * len(examples)\n combined_coverages = [sum(coverage_bitvector) for coverage_bitvector in combined_coverage_bitvectors]\n if greedy:\n best_coverage = max(combined_coverages)\n best_indices = [i for i in range(len(combined_coverages)) if combined_coverages[i] == best_coverage]\n chosen_clause_index = random.choice(best_indices)\n else:\n if probability_variant == \"linear\":\n sum_coverages = sum(combined_coverages)\n coverages_to_probabilities = [x / sum_coverages for x in combined_coverages]\n elif probability_variant == \"squared\":\n coverages_squared = [x ** 2 for x in combined_coverages]\n sum_coverages_squared = sum(coverages_squared)\n coverages_to_probabilities = [x ** 2 / sum_coverages_squared for x in combined_coverages]\n elif probability_variant == \"softmax\":\n # Softmax with normalization to prevent overflow\n coverages_max = max(combined_coverages)\n coverages_for_softmax = [a_coverage - coverages_max for a_coverage in combined_coverages]\n coverages_to_probabilities = np.exp(np.asarray(coverages_for_softmax) / temperature) / sum(\n np.exp(np.asarray(coverages_for_softmax) / temperature))\n\n chosen_clause_index = np.random.choice(list(range(0, len(all_coverage_bitvectors))),\n p=coverages_to_probabilities)\n chosen_coverage_bitvector = combined_coverage_bitvectors[chosen_clause_index]\n if chosen_clause_index < number_of_clauses:\n chosen_clause = ind1[chosen_clause_index]\n else:\n chosen_clause = ind2[chosen_clause_index - number_of_clauses]\n\n chosen_clauses.append(chosen_clause)\n chosen_clause_indices.append(chosen_clause_index)\n chosen_clauses_bitvector = chosen_coverage_bitvector\n\n for i in range(len(chosen_clauses)):\n clause = chosen_clauses[i]\n # We can safely set ind1 and ind2 to the same computed smart combination, as only one of them will make it\n # to the next generation\n ind1[i] = clause\n ind2[i] = clause", "def matched_uniform_crossover(ind1, ind2):\n # We calculate the pairwise match between ind1's and ind2's clauses\n match_matrix = np.zeros((len(ind1), len(ind2)))\n for i in range(len(ind1)):\n clause1 = ind1[i]\n for j in range(len(ind2)):\n clause2 = ind2[j]\n curr_syntactic_match = 0\n for k in range(len(clause1)):\n if k != len(clause1) - 1:\n if clause1[k] == clause2[k]:\n curr_syntactic_match += 1\n else:\n curr_syntactic_match += (1 - abs(clause1[k] - clause2[k]))\n match_matrix[i][j] = curr_syntactic_match\n\n # Arg-sort the pairwise clause matches from best to worst match\n matches_ordered = np.dstack(np.unravel_index(np.argsort(match_matrix.ravel())[::-1], (len(ind1), len(ind2))))[0]\n\n # Finally match the clauses, making sure that each clause is only matched once\n # Then perform uniform crossover on matched clauses\n ind1_matched_clauses = set()\n ind2_matched_clauses = set()\n count = 0\n for match in matches_ordered:\n i = match[0]\n j = match[1]\n if match_matrix[i][j] >= len(ind1[0])//2:\n if i not in ind1_matched_clauses and j not in ind2_matched_clauses:\n count += 1\n # Perform the uniform crossover\n for k in range(len(ind1[i])):\n if random.random() < 0.5:\n temp = ind1[i][k]\n ind1[i][k] = ind2[j][k]\n ind2[j][k] = temp\n ind1_matched_clauses.add(i)\n ind2_matched_clauses.add(j)", "def uniform_clause_crossover(ind1, ind2):\n k = len(ind1)\n for i in range(k):\n if random.random() < 0.5:\n temp = ind1[i]\n ind1[i] = ind2[i]\n ind2[i] = temp", "def shapley(self, subgraph_nodes):\n num_nodes = self.graph.num_nodes()\n subgraph_nodes = subgraph_nodes.tolist()\n\n # Obtain neighboring nodes of the subgraph g_i, P'.\n local_region = subgraph_nodes\n for _ in range(self.num_hops - 1):\n in_neighbors, _ = self.graph.in_edges(local_region)\n _, out_neighbors = self.graph.out_edges(local_region)\n neighbors = torch.cat([in_neighbors, out_neighbors]).tolist()\n local_region = list(set(local_region + neighbors))\n\n split_point = num_nodes\n coalition_space = list(set(local_region) - set(subgraph_nodes)) + [\n split_point\n ]\n\n marginal_contributions = []\n device = self.feat.device\n for _ in range(self.shapley_steps):\n permuted_space = np.random.permutation(coalition_space)\n split_idx = int(np.where(permuted_space == split_point)[0])\n\n selected_nodes = permuted_space[:split_idx]\n\n # Mask for coalition set S_i\n exclude_mask = torch.ones(num_nodes)\n exclude_mask[local_region] = 0.0\n exclude_mask[selected_nodes] = 1.0\n\n # Mask for set S_i and g_i\n include_mask = exclude_mask.clone()\n include_mask[subgraph_nodes] = 1.0\n\n exclude_feat = self.feat * exclude_mask.unsqueeze(1).to(device)\n include_feat = self.feat * include_mask.unsqueeze(1).to(device)\n\n with torch.no_grad():\n exclude_probs = self.model(\n self.graph, exclude_feat, **self.kwargs\n ).softmax(dim=-1)\n exclude_value = exclude_probs[:, self.target_class]\n include_probs = self.model(\n self.graph, include_feat, **self.kwargs\n ).softmax(dim=-1)\n include_value = include_probs[:, self.target_class]\n marginal_contributions.append(include_value - exclude_value)\n\n return torch.cat(marginal_contributions).mean().item()", "def __no_crossing(self):\n for pos_left_1 in range(self.n):\n for pos_left_2 in range(pos_left_1 + 1, self.n):\n for pos_right_2 in range(self.n):\n for pos_right_1 in range(pos_right_2 + 1, self.n):\n # For all i, j, k, m | k < i and m > j . not w(i, j) or not w(k, m)\n self.__clause(-self.preds.w(pos_left_1, pos_right_1),\n -self.preds.w(pos_left_2, pos_right_2))", "def squeeze_accept(partition):\n Write a function that\n - Sort districts by most Democratic heavy and most Republican heavy\n\n - Assign a base value of competitiveness for each district\n - Run chain, accept only if districts satisfy values under or order\n \"\"\"\n\n#--- CONSTRAINTS\n\n\"\"\"", "def clause_crossover_1x(ind1, ind2):\n k = len(ind1)\n cx_point = random.randint(1, k - 1)\n temp = ind1[cx_point:]\n ind1[cx_point:] = ind2[cx_point:]\n ind2[cx_point:] = temp", "def uniformCrossover(self, cl):\n if cons.env.format_data.discrete_action: #Always crossover condition if the phenotype is discrete (if continuous phenotype, half the time phenotype crossover is performed instead)\n self_specified_atts = copy.deepcopy(self.specified_attributes)\n cl_specified_atts = copy.deepcopy(cl.specified_attributes)\n probability = 0.5 #Equal probability for attribute alleles to be exchanged.\n\n #Make list of attribute references appearing in at least one of the parents.-----------------------------\n combined_atts = []\n for i in self_specified_atts:\n combined_atts.append(i)\n for i in cl_specified_atts:\n if i not in combined_atts:\n combined_atts.append(i)\n elif not cons.env.format_data.attribute_info[i][0]: #Attribute specified in both parents, and the attribute is discrete (then no reason to cross over)\n combined_atts.remove(i)\n combined_atts.sort()\n #--------------------------------------------------------------------------------------------------------\n changed = False;\n for att in combined_atts: #Each condition specifies different attributes, so we need to go through all attributes in the dataset.\n att_info = cons.env.format_data.attribute_info[att]\n #-----------------------------\n ref = 0\n #if att in self.specified_attributes:\n if att in self_specified_atts:\n ref += 1\n #if att in cl.specified_attributes:\n if att in cl_specified_atts:\n ref += 1\n #-----------------------------\n\n if ref == 0: #Attribute not specified in either condition (Attribute type makes no difference)\n print(\"Error: UniformCrossover!\")\n pass\n\n elif ref == 1: #Attribute specified in only one condition - do probabilistic switch of whole attribute state (Attribute type makes no difference)\n if att in self_specified_atts and random.random() > probability:\n i = self.specified_attributes.index(att) #reference to the position of the attribute in the rule representation\n cl.condition.append(self.condition.pop(i)) #Take attribute from self and add to cl\n cl.specified_attributes.append(att)\n self.specified_attributes.remove(att)\n changed = True #Remove att from self and add to cl\n\n\n if att in cl_specified_atts and random.random() < probability:\n i = cl.specified_attributes.index(att) #reference to the position of the attribute in the rule representation\n self.condition.append(cl.condition.pop(i)) #Take attribute from self and add to cl\n self.specified_attributes.append(att)\n cl.specified_attributes.remove(att)\n changed = True #Remove att from cl and add to self.\n\n\n else: #Attribute specified in both conditions - do random crossover between state alleles. The same attribute may be specified at different positions within either classifier\n #-------------------------------------------------------\n # CONTINUOUS ATTRIBUTE\n #-------------------------------------------------------\n if att_info[0]:\n i_cl1 = self.specified_attributes.index(att) #pairs with self (classifier 1)\n i_cl2 = cl.specified_attributes.index(att) #pairs with cl (classifier 2)\n tmp_key = random.randint(0,3) #Make random choice between 4 scenarios, Swap minimums, Swap maximums, Self absorbs cl, or cl absorbs self.\n if tmp_key == 0: #Swap minimum\n temp = self.condition[i_cl1][0]\n self.condition[i_cl1][0] = cl.condition[i_cl2][0]\n cl.condition[i_cl2][0] = temp\n elif tmp_key == 1: #Swap maximum\n temp = self.condition[i_cl1][1]\n self.condition[i_cl1][1] = cl.condition[i_cl2][1]\n cl.condition[i_cl2][1] = temp\n else: #absorb range\n all_list = self.condition[i_cl1] + cl.condition[i_cl2]\n new_min = min(all_list)\n new_max = max(all_list)\n if tmp_key == 2: #self absorbs cl\n self.condition[i_cl1] = [new_min,new_max]\n #Remove cl\n cl.condition.pop(i_cl2)\n cl.specified_attributes.remove(att)\n else: #cl absorbs self\n cl.condition[i_cl2] = [new_min,new_max]\n #Remove self\n self.condition.pop(i_cl1)\n self.specified_attributes.remove(att)\n #-------------------------------------------------------\n # DISCRETE ATTRIBUTE\n #-------------------------------------------------------\n else:\n pass\n tmp_list1 = copy.deepcopy(self_specified_atts)\n tmp_list2 = copy.deepcopy(cl.specified_attributes)\n tmp_list1.sort()\n tmp_list2.sort()\n if changed and (tmp_list1 == tmp_list2):\n changed = False\n\n if self.action != cl.action and random.random() > probability:\n # Switch phenotypes of 2 classifiers if GA is run in match set\n temp = self.action\n self.action = cl.action\n cl.action = temp\n changed = True\n return changed\n #-------------------------------------------------------\n # CONTINUOUS PHENOTYPE CROSSOVER\n #-------------------------------------------------------\n elif random.random() < 0.5:\n return self.actionCrossover(cl)", "def constraint_test():\n import itertools, sys\n\n show_analysis = False\n #Generated via grammar\n gr = grammar.Grammar('grammars/test_constraints.bnf')\n inputs = ([1 for _ in range(100)], [ i%3 for i in range(100)])\n for _input in inputs: \n output = gr.generate(_input)\n azr = analyser.Analyser('test',output['phenotype'],True)\n try:\n azr.create_graph()\n except ValueError as e:\n print(__name__, \"ERROR\", _input, e)\n continue\n azr.parse_graph()\n azr.apply_stresses()\n azr.create_slf_file()\n azr.test_slf_file()\n azr.parse_results()\n azr.print_stresses()\n if show_analysis:\n azr.show_analysis()\n \n #Fixed generated\n lengths = (1000, 10000)\n levels = (5, 10)\n for length_idx, level_idx in itertools.permutations([0,1]):\n try:\n GRAPH = constrained_offset_graph(lengths[length_idx],\n levels[length_idx])\n except ValueError as e:\n print(__name__, \"ERROR\", lengths[length_idx], levels[length_idx], e)\n continue\n GRAPH.save_graph(\"pylon\")\n print \"nodes:\", GRAPH.number_of_nodes()\n print \"edges\", GRAPH.number_of_edges()\n #will it blend?\n azr = analyser.Analyser('test',\"moo\",True)\n azr.my_graph = GRAPH\n azr.parse_graph()\n azr.apply_stresses()\n azr.create_slf_file()\n azr.test_slf_file()\n azr.parse_results()\n azr.print_stresses()\n if show_analysis:\n azr.show_analysis()", "def gen2_constraint(model):\n return 20, model.g[2], 100", "def smart_clause_crossover_dispatch(ind1, ind2, examples, greedy=True, probability_variant=None, temperature=1, clause_bitvector_cache=None, use_infeasibility=False):\n if use_infeasibility:\n smart_clause_crossover_infeasibility(ind1, ind2, examples, greedy=greedy, probability_variant=probability_variant, temperature=temperature, clause_bitvector_cache=clause_bitvector_cache)\n else:\n smart_clause_crossover(ind1, ind2, examples, greedy=greedy, probability_variant=probability_variant, temperature=temperature, clause_bitvector_cache=clause_bitvector_cache)", "def c_test__cross_inp(self, old_population, population_weighting, run_locals):\r\n return 1", "def _apply_crossover(pop, op, pb):\n for i in range(1, len(pop), 2):\n if random.random() < pb:\n pop[i - 1], pop[i] = op(pop[i - 1], pop[i])\n del pop[i - 1].fitness.values\n del pop[i].fitness.values\n return pop", "def test_tensor_composite_constraints_equal_penalties():\n from pygam.penalties import derivative\n\n def der1(*args, **kwargs):\n kwargs.update({'derivative':1})\n return derivative(*args, **kwargs)\n\n # create a 3D tensor where the penalty should be equal to the constraint\n term = te(0, 1, 2,\n n_splines=[4, 5, 6],\n penalties=der1,\n lam=1,\n constraints='monotonic_inc')\n\n # check all the dimensions\n for i in range(3):\n P = term._build_marginal_penalties(i).A\n C = term._build_marginal_constraints(i,\n -np.arange(term.n_coefs),\n constraint_lam=1,\n constraint_l2=0).A\n\n assert (P == C).all()", "def SetPRCatConstraint(self, model ) :\n tot = np.multiply(self.wish, self.dispo)\n for line in tot :\n for val in line :\n if not val : continue\n if self.bound>0 : model += val <= self.valBound\n elif self.bound<0 : model += val >= self.valBound", "def main_constraints(self, batch, l_suff_stats):\n\n if self.mode == 'parametric':\n kl = 'i-projection'\n else:\n kl = 'm-projection'\n policy = self.mc.get('policy', target=False)\n t_states = torch.Tensor(batch.states)\n t_policy = self.mc.get('policy', target=True)\n target_suff_stats = t_policy.forward(t_states).detach()\n\n # split constraint if wanted\n if self.split_constraint:\n\n num_dims = target_suff_stats.size()[1]\n constraints = []\n for d in range(num_dims):\n c = (self.v[d] - policy.kl_divergence(batch, l_suff_stats[d+1], kl, 'mean'))\n constraints.append(c)\n\n else:\n constraints = [(self.v[0] - policy.kl_divergence(batch, l_suff_stats[0], kl, 'mean'))]\n\n return constraints", "def _crossover(self, sel):\n offspring = []\n for p1, p2 in sel:\n p1 = copy.deepcopy(p1)\n p2 = copy.deepcopy(p2)\n\n tmp = self.op.crossover(\n copy.deepcopy(p1['individual']),\n copy.deepcopy(p2['individual']))\n if not tmp[0] is None and not tmp[1] is None:\n c1 = {\n 'individual': tmp[0],\n 'fitness': self.op.fitness(tmp[0])\n }\n c2 = {\n 'individual': tmp[1],\n 'fitness': self.op.fitness(tmp[1])\n }\n\n offspring.append(\n c1 if c1['fitness'] < p1['fitness'] else p1)\n offspring.append(\n c2 if c2['fitness'] < p2['fitness'] else p2)\n else:\n offspring.extend((p1, p2))\n return offspring", "def uniform_crossover(self, another_individual):\n size = len(another_individual.get_genotype())\n genotype1 = numpy.zeros(size, dtype=another_individual.get_genotype().dtype)\n genotype2 = numpy.zeros(size, dtype=another_individual.get_genotype().dtype)\n mask = numpy.random.choice([True,False], size=size)\n not_mask = numpy.logical_not(mask)\n genotype1[mask] = self.get_genotype()[mask]\n genotype1[not_mask] = another_individual.get_genotype()[not_mask]\n genotype2[mask] = another_individual.get_genotype()[mask]\n genotype2[not_mask] = self.get_genotype()[not_mask]\n\n return optimization.Individual(genotype1, self.fitness_evaluator, self.uniform_crossover, self.mutation_method), optimization.Individual(genotype2, self.fitness_evaluator, self.uniform_crossover, self.mutation_method)", "def gen1_constraint(model):\n return 10, model.g[1], 50", "def single_crossover(self, original1, original2):\n point=self.r.uniform(0.1,0.6)\n cut1=int(point*len(original1))\n cut2=int(point*len(original2))\n child1=original1[:cut1]+original2[cut2:]\n child2=original2[:cut2]+original1[cut1:]\n return child1, child2", "def create_cont_constraint_mat_separable(H,v1s,v2s,nSides,nConstraints,nC,\n dim_domain,dim_range,tess):\n if dim_domain != 2:\n raise ValueError\n if dim_range not in [1,2]:\n raise ValueError\n nHomoCoo=dim_domain+1 \n length_Avee = dim_range*nHomoCoo\n L1 = np.zeros((nConstraints/2,nC*nHomoCoo))\n\n \n\n nPtsInSide = 2 # Since, in 2D, the side is always a line joining 2 pts.\n# if nSides != nConstraints/(nPtsInSide*dim_domain):\n# raise ValueError(nSides,nConstraints)\n \n if nSides != nConstraints/(nPtsInSide*dim_range):\n print \" print nSides , nConstraints/(nPtsInSide*dim_range):\"\n print nSides , nConstraints/(nPtsInSide*dim_range)\n ipshell('stop')\n raise ValueError( nSides , (nConstraints,nPtsInSide,dim_range))\n\n \n if nSides != H.shape[0]:\n raise ValueError(nSides,H.shape)\n\n\n# M = nPtsInSide*dim_range\n M = nPtsInSide\n if dim_range == 1:\n raise NotImplementedError\n for i in range(nSides): \n v1 = v1s[i]\n v2 = v2s[i]\n \n h = H[i]\n a,b = h.nonzero()[0] # idx for the relevant As \n # s stands for start\n # e stands for end \n s1 = a*length_Avee \n e1 = s1+nHomoCoo \n s2 = b*length_Avee\n e2 = s2+nHomoCoo \n \n # Constraint 1: \n L[i*M,s1:e1]= v1 \n L[i*M,s2:e2]= -v1 \n # Constraint 2: \n L[i*M+1,s1:e1]= v2 \n L[i*M+1,s2:e2]= -v2 \n \n \n elif dim_range==2:\n for i in range(nSides): \n v1 = v1s[i]\n v2 = v2s[i]\n\n if np.allclose(v1,v2):\n raise ValueError(v1,v2)\n\n\n \n \n \n h = H[i]\n a,b = h.nonzero()[0] # idx for the relevant As \n \n\n # L1 is acting on columns of the following form:\n # [ a_1 b_1 c_1 d_1 a_2 b_2 c_2 d_2 ... a_Nc b_Nc c_Nc d_Nc] \n # s stands for start\n # e stands for end \n s1 = a*nHomoCoo\n e1 = s1+nHomoCoo \n s2 = b*nHomoCoo\n e2 = s2+nHomoCoo \n \n \n try: \n # Constraint 1: \n row = np.zeros(L1.shape[1])\n row[s1:e1]=v1\n row[s2:e2]=-v1 \n # x component \n L1[i*M]=row \n except:\n ipshell('fail')\n raise \n\n # Constraint 2: \n row = np.zeros(L1.shape[1])\n row[s1:e1]=v2\n row[s2:e2]=-v2 \n # x component \n L1[i*M+1]=row\n \n\n \n \n \n \n \n else:\n raise ValueError(dim_range)\n\n \n return L1", "def double_crossover(self, original1, original2):\n point1=self.r.uniform(0.1,0.3)\n point2=self.r.uniform(0.6,0.8)\n len1=len(original1)\n len2=len(original2)\n cut11=int(point1*len1)\n cut12=int(point2*len1)\n cut21=int(point1*len2)\n cut22=int(point2*len2)\n child1=original1[:cut11]+original2[cut21:cut22]+original1[cut12:]\n child2=original2[:cut21]+original1[cut11:cut12]+original2[cut22:]\n return child1, child2", "def crossover(parent1, parent2):\n child = parent1.clone()\n for k in range(parent1.num_input + parent1.num_output):\n if np.random.randint(2) == 1:\n child.identifiers[k] = parent2.identifiers[k]\n child.inhibitors[k] = parent2.inhibitors[k]\n child.enhancers[k] = parent2.enhancers[k]\n\n child.identifiers = child.identifiers[:(child.num_input +\n child.num_output)]\n child.inhibitors = child.inhibitors[:(child.num_input + child.num_output)]\n child.enhancers = child.enhancers[:(child.num_input + child.num_output)]\n\n p1range = list(range(parent1.num_input + parent1.num_output,\n parent1.size()))\n random.shuffle(p1range)\n p2range = list(range(parent2.num_input + parent2.num_output,\n parent2.size()))\n random.shuffle(p2range)\n\n p1remaining = deepcopy(p1range)\n\n # Crossing regulatory\n p1_gene_count = 0\n p2_gene_count = 0\n for p1idx in p1range:\n min_dist = config.CROSSOVER_THRESHOLD\n paired_idx = None\n for p2idx in p2range:\n gdist = parent1.protein_distance(parent2, p1idx, p2idx)\n if gdist < min_dist:\n min_dist = gdist\n paired_idx = p2idx\n if paired_idx is not None:\n if np.random.randint(2) == 0:\n chosen_parent = parent1\n chosen_idx = p1idx\n p1_gene_count += 1\n else:\n chosen_parent = parent2\n chosen_idx = p2idx\n p2_gene_count += 1\n child.identifiers = np.append(\n child.identifiers, chosen_parent.identifiers[chosen_idx])\n child.inhibitors = np.append(\n child.inhibitors, chosen_parent.inhibitors[chosen_idx])\n child.enhancers = np.append(\n child.enhancers, chosen_parent.enhancers[chosen_idx])\n # Remove from consideration again\n p2range = list(set(p2range) - set([p2idx]))\n p1remaining = list(set(p1remaining) - set([p1idx]))\n\n # Add remaining material\n if child.size() == (child.num_input + child.num_output):\n prob = 0.5\n else:\n prob = p1_gene_count / (p1_gene_count + p2_gene_count)\n\n chosen_parent = parent2\n chosen_range = p2range\n if np.random.random() < prob:\n chosen_parent = parent1\n chosen_range = p1remaining\n\n for idx in chosen_range:\n child.identifiers = np.append(child.identifiers,\n chosen_parent.identifiers[idx])\n child.inhibitors = np.append(child.inhibitors,\n chosen_parent.inhibitors[idx])\n child.enhancers = np.append(child.enhancers,\n chosen_parent.enhancers[idx])\n\n child.num_regulatory = child.size() - (child.num_input + child.num_output)\n\n # Cross dynamics\n if np.random.random() < 0.5:\n child.beta = parent1.beta\n else:\n child.beta = parent2.beta\n\n if np.random.random() < 0.5:\n child.delta = parent1.delta\n else:\n child.delta = parent2.delta\n\n return child", "def inversion_crossover(self, pop):\n children, tmpNonComb, used = ([] for i in range(3))\n for i in range(0, int(len(pop) * self.fracElite), 1):\n r = int(rand() * len(pop))\n while r == i:\n r = int(rand() * len(pop))\n\n if sum(self.cID + self.dID + self.iID) != 0:\n nonComb1 = pop[i][:np.where(self.cID + self.dID + self.iID == 1)[0][(-1)] + 1]\n nonComb2 = pop[r][:np.where(self.cID + self.dID + self.iID == 1)[0][(-1)] + 1]\n if sum(self.xID) != 0:\n comb1 = pop[i][:np.where(self.xID == 1)[0][(-1)] + 1]\n comb2 = pop[r][:np.where(self.xID == 1)[0][(-1)] + 1]\n if sum(self.cID + self.dID + self.iID) != 0:\n c = int(rand() * len(nonComb1))\n if rand() > 0.5:\n tmpNonComb.append(np.array(nonComb1[0:c + 1].tolist() + nonComb2[c + 1:].tolist()))\n else:\n tmpNonComb.append(np.array(nonComb2[0:c + 1].tolist() + nonComb1[c + 1:].tolist()))\n used.append(i)\n if sum(self.xID) != 0:\n c = int(rand() * len(comb1))\n for c1 in range(c, len(comb1), 1):\n d2 = (contains_sublist(comb2, comb1[c1]) + 1) % len(comb1)\n d1 = contains_sublist(comb1, comb2[d2])\n c2 = contains_sublist(comb2, comb1[((d1 + 1) % len(comb1))]) % len(comb1)\n tmp1 = cp.copy(comb1)\n if c1 < d1:\n tmp1[(c1 + 1):(d1 + 1)] = list(reversed(tmp1[c1 + 1:d1 + 1]))\n else:\n tmp1[d1:c1] = list(reversed(tmp1[d1:c1]))\n tmp2 = cp.copy(comb2)\n if c2 < d2:\n tmp2[c2:d2] = list(reversed(tmp2[c2:d2]))\n else:\n tmp2[(d2 + 1):(c2 + 1)] = list(reversed(tmp2[d2 + 1:c2 + 1]))\n if sum(self.cID + self.dID + self.iID) == 0 and sum(self.xID) != 0:\n children.append(tmp1)\n children.append(tmp2)\n elif sum(self.cID + self.dID + self.iID) != 0 and sum(self.xID) != 0:\n children.append(np.concatenate(tmpNonComb[(-1)], tmp1))\n children.append(np.concatenate(tmpNonComb[(-1)], tmp2))\n used.append(i)\n used.append(r)\n\n if sum(self.cID + self.dID + self.iID) != 0 and sum(self.xID) == 0:\n children = tmpNonComb\n return (\n children, used)", "def _need_parens(outer, inner, adjust):\n return _OP_ORDER[outer.__class__] >= _OP_ORDER[inner.__class__] + adjust", "def generateArithmeticConstraint(board, simpleConstraint):\n func = simpleConstraint[0]\n result = simpleConstraint[1]\n coordinates = []\n for x,y in simpleConstraint[2]:\n coordinates.append(board.getCoordinate(x, y))\n return constraint.ArithmeticConstraint(func, result, coordinates)", "def np_simul_integerizer_ortools(\n sub_int_weights,\n parent_countrol_importance,\n parent_relax_ge_upper_bound,\n sub_countrol_importance,\n sub_float_weights,\n sub_resid_weights,\n lp_right_hand_side,\n parent_hh_constraint_ge_bound,\n sub_incidence,\n parent_incidence,\n total_hh_right_hand_side,\n relax_ge_upper_bound,\n parent_lp_right_hand_side,\n hh_constraint_ge_bound,\n parent_resid_weights,\n total_hh_sub_control_index,\n total_hh_parent_control_index):\n\n from ortools.linear_solver import pywraplp\n\n STATUS_TEXT = {\n pywraplp.Solver.OPTIMAL: STATUS_OPTIMAL,\n pywraplp.Solver.FEASIBLE: STATUS_FEASIBLE,\n pywraplp.Solver.INFEASIBLE: 'INFEASIBLE',\n pywraplp.Solver.UNBOUNDED: 'UNBOUNDED',\n pywraplp.Solver.ABNORMAL: 'ABNORMAL',\n pywraplp.Solver.NOT_SOLVED: 'NOT_SOLVED',\n }\n CBC_TIMEOUT_IN_SECONDS = 60\n\n sample_count, sub_control_count = sub_incidence.shape\n _, parent_control_count = parent_incidence.shape\n sub_zone_count, _ = sub_float_weights.shape\n\n # setting indexes to -1 prevents creation of hh_controls relaxation variables\n # setting hh_control importance to zero eliminates them from the objective function\n # the latter approach is used by the cvx version\n # total_hh_sub_control_index = -1\n # total_hh_parent_control_index = -1\n sub_countrol_importance[total_hh_sub_control_index] = 0\n parent_countrol_importance[total_hh_parent_control_index] = 0\n\n # - Instantiate a mixed-integer solver\n solver = pywraplp.Solver('SimulIntegerizeCbc', pywraplp.Solver.CBC_MIXED_INTEGER_PROGRAMMING)\n solver.EnableOutput()\n solver.set_time_limit(CBC_TIMEOUT_IN_SECONDS * 1000)\n\n # constraints = [\n # x >= 0.0,\n # x <= x_max,\n #\n # relax_le >= 0.0,\n # relax_le <= lp_right_hand_side,\n # relax_ge >= 0.0,\n # relax_ge <= relax_ge_upper_bound,\n #\n # parent_relax_le >= 0.0,\n # parent_relax_le <= parent_lp_right_hand_side,\n # parent_relax_ge >= 0.0,\n # parent_relax_ge <= parent_relax_ge_upper_bound,\n # ]\n\n # x_max is 1.0 unless resid_weights is zero, in which case constrain x to 0.0\n x_max = (~(sub_float_weights == sub_int_weights)).astype(float)\n\n # - Create resid weight variables\n x = {}\n for z in range(sub_zone_count):\n for hh in range(sample_count):\n x[z, hh] = solver.NumVar(0.0, x_max[z, hh], 'x[%s,%s]' % (z, hh))\n\n # - Create positive continuous constraint relaxation variables\n relax_le = {}\n relax_ge = {}\n for z in range(sub_zone_count):\n for c in range(sub_control_count):\n # no relaxation for total households control\n if c == total_hh_sub_control_index:\n continue\n relax_le[z, c] = \\\n solver.NumVar(0.0, lp_right_hand_side[z, c], 'relax_le[%s,%s]' % (z, c))\n relax_ge[z, c] = \\\n solver.NumVar(0.0, relax_ge_upper_bound[z, c], 'relax_ge[%s,%s]' % (z, c))\n\n parent_relax_le = {}\n parent_relax_ge = {}\n for c in range(parent_control_count):\n parent_relax_le[c] = \\\n solver.NumVar(0.0, parent_lp_right_hand_side[c], 'parent_relax_le[%s]' % c)\n parent_relax_ge[c] = \\\n solver.NumVar(0.0, parent_relax_ge_upper_bound[c], 'parent_relax_ge[%s]' % c)\n\n LOG_OVERFLOW = -725\n log_resid_weights = np.log(np.maximum(sub_resid_weights, np.exp(LOG_OVERFLOW)))\n assert not np.isnan(log_resid_weights).any()\n\n log_parent_resid_weights = \\\n np.log(np.maximum(parent_resid_weights, np.exp(LOG_OVERFLOW)))\n assert not np.isnan(log_parent_resid_weights).any()\n\n # objective = cvx.Maximize(\n # cvx.sum_entries(cvx.mul_elemwise(log_resid_weights, cvx.vec(x))) +\n # cvx.sum_entries(cvx.mul_elemwise(log_parent_resid_weights, cvx.vec(cvx.sum_entries(x, axis=0)))) - # nopep8\n # cvx.sum_entries(relax_le * sub_countrol_importance) -\n # cvx.sum_entries(relax_ge * sub_countrol_importance) -\n # cvx.sum_entries(cvx.mul_elemwise(parent_countrol_importance, parent_relax_le)) -\n # cvx.sum_entries(cvx.mul_elemwise(parent_countrol_importance, parent_relax_ge))\n # )\n\n z = solver.Sum(x[z, hh] * log_resid_weights[z, hh]\n for z in range(sub_zone_count)\n for hh in range(sample_count)) + \\\n solver.Sum(x[z, hh] * log_parent_resid_weights[hh]\n for hh in range(sample_count)\n for z in range(sub_zone_count)) - \\\n solver.Sum(relax_le[z, c] * sub_countrol_importance[c]\n for z in range(sub_zone_count)\n for c in range(sub_control_count) if c != total_hh_sub_control_index) - \\\n solver.Sum(relax_ge[z, c] * sub_countrol_importance[c]\n for z in range(sub_zone_count)\n for c in range(sub_control_count) if c != total_hh_sub_control_index) - \\\n solver.Sum(parent_relax_le[c] * parent_countrol_importance[c]\n for c in range(parent_control_count)) - \\\n solver.Sum(parent_relax_ge[c] * parent_countrol_importance[c]\n for c in range(parent_control_count))\n\n objective = solver.Maximize(z)\n\n # constraints = [\n # # - sub inequality constraints\n # (x * sub_incidence) - relax_le >= 0,\n # (x * sub_incidence) - relax_le <= lp_right_hand_side,\n # (x * sub_incidence) + relax_ge >= lp_right_hand_side,\n # (x * sub_incidence) + relax_ge <= hh_constraint_ge_bound,\n # ]\n\n # - sub inequality constraints\n sub_constraint_ge = {}\n sub_constraint_le = {}\n for z in range(sub_zone_count):\n for c in range(sub_control_count):\n\n # don't add inequality constraints for total households control\n if c == total_hh_sub_control_index:\n continue\n\n sub_constraint_le[z, c] = \\\n solver.Constraint(0, lp_right_hand_side[z, c])\n for hh in range(sample_count):\n sub_constraint_le[z, c].SetCoefficient(x[z, hh], sub_incidence[hh, c])\n sub_constraint_le[z, c].SetCoefficient(relax_le[z, c], -1.0)\n\n sub_constraint_ge[z, c] = \\\n solver.Constraint(lp_right_hand_side[z, c], hh_constraint_ge_bound[z, c])\n for hh in range(sample_count):\n sub_constraint_ge[z, c].SetCoefficient(x[z, hh], sub_incidence[hh, c])\n sub_constraint_ge[z, c].SetCoefficient(relax_ge[z, c], 1.0)\n\n # constraints = [\n # # - equality constraint for the total households control\n # cvx.sum_entries(x, axis=1) == total_hh_right_hand_side,\n # ]\n\n # - equality constraint for the total households control\n constraint_eq = {}\n for z in range(sub_zone_count):\n total_hh_constraint = total_hh_right_hand_side[z]\n\n constraint_eq[z] = solver.Constraint(total_hh_constraint, total_hh_constraint)\n for hh in range(sample_count):\n constraint_eq[z].SetCoefficient(x[z, hh], 1.0)\n\n # constraints = [\n # cvx.vec(cvx.sum_entries(x, axis=0) * parent_incidence) - parent_relax_le >= 0, # nopep8\n # cvx.vec(cvx.sum_entries(x, axis=0) * parent_incidence) - parent_relax_le <= parent_lp_right_hand_side, # nopep8\n # cvx.vec(cvx.sum_entries(x, axis=0) * parent_incidence) + parent_relax_ge >= parent_lp_right_hand_side, # nopep8\n # cvx.vec(cvx.sum_entries(x, axis=0) * parent_incidence) + parent_relax_ge <= parent_hh_constraint_ge_bound, # nopep8\n # ]\n # - sub inequality constraints\n parent_constraint_le = {}\n parent_constraint_ge = {}\n for c in range(parent_control_count):\n\n if c == total_hh_parent_control_index:\n continue\n\n parent_constraint_le[c] = \\\n solver.Constraint(0, parent_lp_right_hand_side[c])\n parent_constraint_ge[c] = \\\n solver.Constraint(parent_lp_right_hand_side[c], parent_hh_constraint_ge_bound[c])\n\n for z in range(sub_zone_count):\n for hh in range(sample_count):\n parent_constraint_le[c].SetCoefficient(x[z, hh], parent_incidence[hh, c])\n parent_constraint_le[c].SetCoefficient(parent_relax_le[c], -1.0)\n\n parent_constraint_ge[c].SetCoefficient(x[z, hh], parent_incidence[hh, c])\n parent_constraint_ge[c].SetCoefficient(parent_relax_ge[c], 1.0)\n\n result_status = solver.Solve()\n\n status_text = STATUS_TEXT[result_status]\n\n if status_text in STATUS_SUCCESS:\n resid_weights_out = np.zeros(sub_resid_weights.shape)\n\n for z in range(sub_zone_count):\n for hh in range(sample_count):\n resid_weights_out[z, hh] = x[z, hh].solution_value()\n\n resid_weights_out = resid_weights_out.astype(np.float64)\n else:\n resid_weights_out = sub_resid_weights\n\n return resid_weights_out, status_text", "def phenotypeCrossover(self, cl):\n changed = False\n if self.action[0] == cl.action[0] and self.action[1] == cl.action[1]:\n return changed\n else:\n tmp_key = random.random() < 0.5 #Make random choice between 4 scenarios, Swap minimums, Swap maximums, Children preserve parent phenotypes.\n if tmp_key: #Swap minimum\n temp = self.action[0]\n self.action[0] = cl.action[0]\n cl.action[0] = temp\n changed = True\n elif tmp_key: #Swap maximum\n temp = self.action[1]\n self.action[1] = cl.action[1]\n cl.action[1] = temp\n changed = True\n\n return changed" ]
[ "0.61003923", "0.5727573", "0.5701313", "0.55589473", "0.5536318", "0.5445367", "0.5374365", "0.53616214", "0.525692", "0.51231486", "0.51078594", "0.5093244", "0.5026292", "0.49627778", "0.49534568", "0.4926632", "0.4865228", "0.48644775", "0.4859577", "0.48368546", "0.47880554", "0.47693235", "0.4764745", "0.4748204", "0.47481164", "0.4745066", "0.47359946", "0.47233436", "0.47194445", "0.47160304" ]
0.619435
0
Dispatches to the appropriate smart clause crossover operator, depending on whether the distinct should be made between infeasible and suboptimal negative examples
def smart_clause_crossover_dispatch(ind1, ind2, examples, greedy=True, probability_variant=None, temperature=1, clause_bitvector_cache=None, use_infeasibility=False): if use_infeasibility: smart_clause_crossover_infeasibility(ind1, ind2, examples, greedy=greedy, probability_variant=probability_variant, temperature=temperature, clause_bitvector_cache=clause_bitvector_cache) else: smart_clause_crossover(ind1, ind2, examples, greedy=greedy, probability_variant=probability_variant, temperature=temperature, clause_bitvector_cache=clause_bitvector_cache)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def smart_clause_crossover(ind1, ind2, examples, greedy=True, probability_variant=None, temperature=1, clause_bitvector_cache=None):\n allow_duplicates = False # allow_duplicates denotes whether the resulting indivuals may contain duplicate clauses\n number_of_clauses = len(ind1)\n all_clauses = ind1+ind2\n chosen_clauses = []\n chosen_clause_indices = []\n ind1_coverage_bitvectors = compute_clause_coverage_bitvectors(ind1, examples, clause_bitvector_cache=clause_bitvector_cache)\n ind2_coverage_bitvectors = compute_clause_coverage_bitvectors(ind2, examples, clause_bitvector_cache=clause_bitvector_cache)\n all_coverage_bitvectors = ind1_coverage_bitvectors + ind2_coverage_bitvectors\n\n for i in range(0, number_of_clauses):\n if i == 0:\n combined_coverage_bitvectors = all_coverage_bitvectors\n else:\n combined_coverage_bitvectors = [combine_coverage_bitvectors(chosen_clauses_bitvector, bitvector, examples)\n for bitvector in all_coverage_bitvectors]\n if not allow_duplicates:\n for index in chosen_clause_indices:\n for j in range(len(combined_coverage_bitvectors)):\n if all_clauses[index] == all_clauses[j]:\n combined_coverage_bitvectors[j] = [0] * len(examples)\n combined_coverages = [sum(coverage_bitvector) for coverage_bitvector in combined_coverage_bitvectors]\n if greedy:\n best_coverage = max(combined_coverages)\n best_indices = [i for i in range(len(combined_coverages)) if combined_coverages[i] == best_coverage]\n chosen_clause_index = random.choice(best_indices)\n else:\n if probability_variant == \"linear\":\n sum_coverages = sum(combined_coverages)\n coverages_to_probabilities = [x / sum_coverages for x in combined_coverages]\n elif probability_variant == \"squared\":\n coverages_squared = [x ** 2 for x in combined_coverages]\n sum_coverages_squared = sum(coverages_squared)\n coverages_to_probabilities = [x ** 2 / sum_coverages_squared for x in combined_coverages]\n elif probability_variant == \"softmax\":\n # Softmax with normalization to prevent overflow\n coverages_max = max(combined_coverages)\n coverages_for_softmax = [a_coverage - coverages_max for a_coverage in combined_coverages]\n coverages_to_probabilities = np.exp(np.asarray(coverages_for_softmax) / temperature) / sum(\n np.exp(np.asarray(coverages_for_softmax) / temperature))\n\n chosen_clause_index = np.random.choice(list(range(0, len(all_coverage_bitvectors))),\n p=coverages_to_probabilities)\n chosen_coverage_bitvector = combined_coverage_bitvectors[chosen_clause_index]\n if chosen_clause_index < number_of_clauses:\n chosen_clause = ind1[chosen_clause_index]\n else:\n chosen_clause = ind2[chosen_clause_index - number_of_clauses]\n\n chosen_clauses.append(chosen_clause)\n chosen_clause_indices.append(chosen_clause_index)\n chosen_clauses_bitvector = chosen_coverage_bitvector\n\n for i in range(len(chosen_clauses)):\n clause = chosen_clauses[i]\n # We can safely set ind1 and ind2 to the same computed smart combination, as only one of them will make it\n # to the next generation\n ind1[i] = clause\n ind2[i] = clause", "def smart_clause_crossover_infeasibility(ind1, ind2, examples, greedy=True, probability_variant=None, temperature=1, clause_bitvector_cache=None):\n allow_duplicates = False # allow_duplicates denotes whether the resulting indivuals may contain duplicate clauses\n ind1_hard_constraints = [constr for constr in ind1 if constr[-2] == True]\n ind2_hard_constraints = [constr for constr in ind2 if constr[-2] == True]\n all_hard_constraints = ind1_hard_constraints + ind2_hard_constraints\n ind1_soft_constraints = [constr for constr in ind1 if constr[-2] == False]\n ind2_soft_constraints = [constr for constr in ind2 if constr[-2] == False]\n all_soft_constraints = ind1_soft_constraints + ind2_soft_constraints\n ind1_hard_coverage_bitvectors = compute_clause_coverage_bitvectors(ind1_hard_constraints, examples, use_infeasibility=True, clause_bitvector_cache=clause_bitvector_cache)\n ind2_hard_coverage_bitvectors = compute_clause_coverage_bitvectors(ind2_hard_constraints, examples, use_infeasibility=True, clause_bitvector_cache=clause_bitvector_cache)\n ind1_soft_coverage_bitvectors = compute_clause_coverage_bitvectors(ind1_soft_constraints, examples, use_infeasibility=True, clause_bitvector_cache=clause_bitvector_cache)\n ind2_soft_coverage_bitvectors = compute_clause_coverage_bitvectors(ind2_soft_constraints, examples, use_infeasibility=True, clause_bitvector_cache=clause_bitvector_cache)\n all_hard_coverage_bitvectors = ind1_hard_coverage_bitvectors + ind2_hard_coverage_bitvectors\n all_soft_coverage_bitvectors = ind1_soft_coverage_bitvectors + ind2_soft_coverage_bitvectors\n\n ind1_num_hard = len([constr for constr in ind1 if constr[-2] == True])\n ind2_num_hard = len([constr for constr in ind2 if constr[-2] == True])\n # num_hard = random.choice([ind1_num_hard, ind2_num_hard])\n if ind1_num_hard <= ind2_num_hard:\n num_hard = random.choice(list(range(ind1_num_hard, ind2_num_hard+1)))\n else:\n num_hard = random.choice(list(range(ind2_num_hard, ind1_num_hard + 1)))\n num_soft = len(ind1) - num_hard\n chosen_hard_clauses = []\n chosen_hard_clause_indices = []\n chosen_soft_clauses = []\n chosen_soft_clause_indices = []\n\n # Choose hard constraints\n for i in range(0, num_hard):\n if i == 0:\n combined_hard_coverage_bitvectors = all_hard_coverage_bitvectors\n else:\n combined_hard_coverage_bitvectors = [combine_coverage_bitvectors_hard_constraints(\n chosen_hard_clauses_bitvector, bitvector, examples) for bitvector in all_hard_coverage_bitvectors]\n if not allow_duplicates:\n for index in chosen_hard_clause_indices:\n for j in range(len(combined_hard_coverage_bitvectors)):\n if all_hard_constraints[index][:-2] == all_hard_constraints[j][:-2]:\n combined_hard_coverage_bitvectors[j] = [0] * len(examples)\n if greedy:\n combined_hard_coverages = [sum(coverage_bitvector) for coverage_bitvector in combined_hard_coverage_bitvectors]\n best_hard_coverage = max(combined_hard_coverages)\n best_hard_indices = [i for i in range(len(combined_hard_coverages)) if combined_hard_coverages[i] == best_hard_coverage]\n chosen_hard_clause_index = random.choice(best_hard_indices)\n else:\n coverages = [sum(x) for x in combined_hard_coverage_bitvectors]\n if probability_variant == \"linear\":\n sum_coverages = sum(coverages)\n coverages_to_probabilities = [x / sum_coverages for x in coverages]\n elif probability_variant == \"squared\":\n coverages_squared = [x ** 2 for x in coverages]\n sum_coverages_squared = sum(coverages_squared)\n coverages_to_probabilities = [x ** 2 / sum_coverages_squared for x in coverages]\n elif probability_variant == \"softmax\":\n # Softmax with normalization to prevent overflow\n coverages_max = max(coverages)\n coverages_for_softmax = [a_coverage - coverages_max for a_coverage in coverages]\n coverages_to_probabilities = np.exp(np.asarray(coverages_for_softmax) / temperature) / sum(\n np.exp(np.asarray(coverages_for_softmax) / temperature))\n\n chosen_hard_clause_index = np.random.choice(list(range(0, len(all_hard_coverage_bitvectors))),\n p=coverages_to_probabilities)\n chosen_hard_coverage_bitvector = combined_hard_coverage_bitvectors[chosen_hard_clause_index]\n if chosen_hard_clause_index < len(ind1_hard_constraints):\n chosen_hard_clause = ind1_hard_constraints[chosen_hard_clause_index]\n else:\n chosen_hard_clause = ind2_hard_constraints[chosen_hard_clause_index - len(ind1_hard_constraints)]\n\n chosen_hard_clauses.append(chosen_hard_clause)\n chosen_hard_clause_indices.append(chosen_hard_clause_index)\n chosen_hard_clauses_bitvector = chosen_hard_coverage_bitvector\n\n # Choose soft constraints\n for i in range(0, num_soft):\n if i == 0:\n combined_soft_coverage_bitvectors = all_soft_coverage_bitvectors\n else:\n combined_soft_coverage_bitvectors = [combine_coverage_bitvectors_soft_constraints(\n chosen_soft_clauses_bitvector, bitvector, examples) for bitvector in all_soft_coverage_bitvectors]\n if not allow_duplicates:\n for index in chosen_soft_clause_indices:\n for j in range(len(combined_soft_coverage_bitvectors)):\n if all_soft_constraints[index][:-2] == all_soft_constraints[j][:-2]:\n combined_soft_coverage_bitvectors[j] = [0] * len(examples)\n if greedy:\n combined_soft_coverages = [sum(coverage_bitvector) for coverage_bitvector in combined_soft_coverage_bitvectors]\n best_soft_coverage = max(combined_soft_coverages)\n best_soft_indices = [i for i in range(len(combined_soft_coverages)) if combined_soft_coverages[i] == best_soft_coverage]\n chosen_soft_clause_index = random.choice(best_soft_indices)\n else:\n coverages = [sum(x) for x in combined_soft_coverage_bitvectors]\n if probability_variant == \"linear\":\n sum_coverages = sum(coverages)\n coverages_to_probabilities = [x / sum_coverages for x in coverages]\n elif probability_variant == \"squared\":\n coverages_squared = [x ** 2 for x in coverages]\n sum_coverages_squared = sum(coverages_squared)\n coverages_to_probabilities = [x ** 2 / sum_coverages_squared for x in coverages]\n elif probability_variant == \"softmax\":\n # Softmax with normalization to prevent overflow\n coverages_max = max(coverages)\n coverages_for_softmax = [a_coverage - coverages_max for a_coverage in coverages]\n coverages_to_probabilities = np.exp(np.asarray(coverages_for_softmax) / temperature) / sum(\n np.exp(np.asarray(coverages_for_softmax) / temperature))\n\n chosen_soft_clause_index = np.random.choice(list(range(0, len(all_soft_coverage_bitvectors))),\n p=coverages_to_probabilities)\n chosen_soft_coverage_bitvector = combined_soft_coverage_bitvectors[chosen_soft_clause_index]\n if chosen_soft_clause_index < len(ind1_soft_constraints):\n chosen_soft_clause = ind1_soft_constraints[chosen_soft_clause_index]\n else:\n chosen_soft_clause = ind2_soft_constraints[chosen_soft_clause_index - len(ind1_soft_constraints)]\n\n chosen_soft_clauses.append(chosen_soft_clause)\n chosen_soft_clause_indices.append(chosen_soft_clause_index)\n chosen_soft_clauses_bitvector = chosen_soft_coverage_bitvector\n\n for i in range(len(chosen_hard_clauses)):\n hard_clause = chosen_hard_clauses[i]\n # We can safely set ind1 and ind2 to the same computed smart combination, as only one of them will make it\n # to the next generation\n ind1[i] = hard_clause\n ind2[i] = hard_clause\n\n for i in range(len(chosen_soft_clauses)):\n soft_clause = chosen_soft_clauses[i]\n ind1[num_hard+i] = soft_clause\n ind2[num_hard+i] = soft_clause", "def avoid_duplicate_clauses_scramble_clause_crossover(ind1, ind2):\n ind_length = len(ind1)\n ind1_copy = copy.deepcopy(ind1)\n ind2_copy = copy.deepcopy(ind2)\n\n clauses_both_have = []\n remaining_clauses = []\n for clause in ind1:\n try:\n index = ind2_copy.index(clause)\n clauses_both_have.append(clause)\n del ind2_copy[index]\n except ValueError:\n remaining_clauses.append(clause)\n\n for clause in ind2:\n try:\n index = ind1_copy.index(clause)\n del ind1_copy[index]\n except ValueError:\n remaining_clauses.append(clause)\n\n random.shuffle(remaining_clauses)\n ind1[0:len(clauses_both_have)] = clauses_both_have\n ind2[0:len(clauses_both_have)] = clauses_both_have\n ind1[len(clauses_both_have):] = remaining_clauses[:len(remaining_clauses) // 2]\n ind2[len(clauses_both_have):] = remaining_clauses[len(remaining_clauses) // 2:]\n if len(ind1) != ind_length or len(ind2) != ind_length:\n raise Exception(\"Crossover operator altered the length of an individual\")", "def _crossover(self, sel):\n offspring = []\n for p1, p2 in sel:\n p1 = copy.deepcopy(p1)\n p2 = copy.deepcopy(p2)\n\n tmp = self.op.crossover(\n copy.deepcopy(p1['individual']),\n copy.deepcopy(p2['individual']))\n if not tmp[0] is None and not tmp[1] is None:\n c1 = {\n 'individual': tmp[0],\n 'fitness': self.op.fitness(tmp[0])\n }\n c2 = {\n 'individual': tmp[1],\n 'fitness': self.op.fitness(tmp[1])\n }\n\n offspring.append(\n c1 if c1['fitness'] < p1['fitness'] else p1)\n offspring.append(\n c2 if c2['fitness'] < p2['fitness'] else p2)\n else:\n offspring.extend((p1, p2))\n return offspring", "def matched_uniform_crossover(ind1, ind2):\n # We calculate the pairwise match between ind1's and ind2's clauses\n match_matrix = np.zeros((len(ind1), len(ind2)))\n for i in range(len(ind1)):\n clause1 = ind1[i]\n for j in range(len(ind2)):\n clause2 = ind2[j]\n curr_syntactic_match = 0\n for k in range(len(clause1)):\n if k != len(clause1) - 1:\n if clause1[k] == clause2[k]:\n curr_syntactic_match += 1\n else:\n curr_syntactic_match += (1 - abs(clause1[k] - clause2[k]))\n match_matrix[i][j] = curr_syntactic_match\n\n # Arg-sort the pairwise clause matches from best to worst match\n matches_ordered = np.dstack(np.unravel_index(np.argsort(match_matrix.ravel())[::-1], (len(ind1), len(ind2))))[0]\n\n # Finally match the clauses, making sure that each clause is only matched once\n # Then perform uniform crossover on matched clauses\n ind1_matched_clauses = set()\n ind2_matched_clauses = set()\n count = 0\n for match in matches_ordered:\n i = match[0]\n j = match[1]\n if match_matrix[i][j] >= len(ind1[0])//2:\n if i not in ind1_matched_clauses and j not in ind2_matched_clauses:\n count += 1\n # Perform the uniform crossover\n for k in range(len(ind1[i])):\n if random.random() < 0.5:\n temp = ind1[i][k]\n ind1[i][k] = ind2[j][k]\n ind2[j][k] = temp\n ind1_matched_clauses.add(i)\n ind2_matched_clauses.add(j)", "def __no_crossing(self):\n for pos_left_1 in range(self.n):\n for pos_left_2 in range(pos_left_1 + 1, self.n):\n for pos_right_2 in range(self.n):\n for pos_right_1 in range(pos_right_2 + 1, self.n):\n # For all i, j, k, m | k < i and m > j . not w(i, j) or not w(k, m)\n self.__clause(-self.preds.w(pos_left_1, pos_right_1),\n -self.preds.w(pos_left_2, pos_right_2))", "def _apply_crossover(pop, op, pb):\n for i in range(1, len(pop), 2):\n if random.random() < pb:\n pop[i - 1], pop[i] = op(pop[i - 1], pop[i])\n del pop[i - 1].fitness.values\n del pop[i].fitness.values\n return pop", "def _cross_over(self,mp,cross_rate,eta):", "def uniformCrossover(self, cl):\n if cons.env.format_data.discrete_action: #Always crossover condition if the phenotype is discrete (if continuous phenotype, half the time phenotype crossover is performed instead)\n self_specified_atts = copy.deepcopy(self.specified_attributes)\n cl_specified_atts = copy.deepcopy(cl.specified_attributes)\n probability = 0.5 #Equal probability for attribute alleles to be exchanged.\n\n #Make list of attribute references appearing in at least one of the parents.-----------------------------\n combined_atts = []\n for i in self_specified_atts:\n combined_atts.append(i)\n for i in cl_specified_atts:\n if i not in combined_atts:\n combined_atts.append(i)\n elif not cons.env.format_data.attribute_info[i][0]: #Attribute specified in both parents, and the attribute is discrete (then no reason to cross over)\n combined_atts.remove(i)\n combined_atts.sort()\n #--------------------------------------------------------------------------------------------------------\n changed = False;\n for att in combined_atts: #Each condition specifies different attributes, so we need to go through all attributes in the dataset.\n att_info = cons.env.format_data.attribute_info[att]\n #-----------------------------\n ref = 0\n #if att in self.specified_attributes:\n if att in self_specified_atts:\n ref += 1\n #if att in cl.specified_attributes:\n if att in cl_specified_atts:\n ref += 1\n #-----------------------------\n\n if ref == 0: #Attribute not specified in either condition (Attribute type makes no difference)\n print(\"Error: UniformCrossover!\")\n pass\n\n elif ref == 1: #Attribute specified in only one condition - do probabilistic switch of whole attribute state (Attribute type makes no difference)\n if att in self_specified_atts and random.random() > probability:\n i = self.specified_attributes.index(att) #reference to the position of the attribute in the rule representation\n cl.condition.append(self.condition.pop(i)) #Take attribute from self and add to cl\n cl.specified_attributes.append(att)\n self.specified_attributes.remove(att)\n changed = True #Remove att from self and add to cl\n\n\n if att in cl_specified_atts and random.random() < probability:\n i = cl.specified_attributes.index(att) #reference to the position of the attribute in the rule representation\n self.condition.append(cl.condition.pop(i)) #Take attribute from self and add to cl\n self.specified_attributes.append(att)\n cl.specified_attributes.remove(att)\n changed = True #Remove att from cl and add to self.\n\n\n else: #Attribute specified in both conditions - do random crossover between state alleles. The same attribute may be specified at different positions within either classifier\n #-------------------------------------------------------\n # CONTINUOUS ATTRIBUTE\n #-------------------------------------------------------\n if att_info[0]:\n i_cl1 = self.specified_attributes.index(att) #pairs with self (classifier 1)\n i_cl2 = cl.specified_attributes.index(att) #pairs with cl (classifier 2)\n tmp_key = random.randint(0,3) #Make random choice between 4 scenarios, Swap minimums, Swap maximums, Self absorbs cl, or cl absorbs self.\n if tmp_key == 0: #Swap minimum\n temp = self.condition[i_cl1][0]\n self.condition[i_cl1][0] = cl.condition[i_cl2][0]\n cl.condition[i_cl2][0] = temp\n elif tmp_key == 1: #Swap maximum\n temp = self.condition[i_cl1][1]\n self.condition[i_cl1][1] = cl.condition[i_cl2][1]\n cl.condition[i_cl2][1] = temp\n else: #absorb range\n all_list = self.condition[i_cl1] + cl.condition[i_cl2]\n new_min = min(all_list)\n new_max = max(all_list)\n if tmp_key == 2: #self absorbs cl\n self.condition[i_cl1] = [new_min,new_max]\n #Remove cl\n cl.condition.pop(i_cl2)\n cl.specified_attributes.remove(att)\n else: #cl absorbs self\n cl.condition[i_cl2] = [new_min,new_max]\n #Remove self\n self.condition.pop(i_cl1)\n self.specified_attributes.remove(att)\n #-------------------------------------------------------\n # DISCRETE ATTRIBUTE\n #-------------------------------------------------------\n else:\n pass\n tmp_list1 = copy.deepcopy(self_specified_atts)\n tmp_list2 = copy.deepcopy(cl.specified_attributes)\n tmp_list1.sort()\n tmp_list2.sort()\n if changed and (tmp_list1 == tmp_list2):\n changed = False\n\n if self.action != cl.action and random.random() > probability:\n # Switch phenotypes of 2 classifiers if GA is run in match set\n temp = self.action\n self.action = cl.action\n cl.action = temp\n changed = True\n return changed\n #-------------------------------------------------------\n # CONTINUOUS PHENOTYPE CROSSOVER\n #-------------------------------------------------------\n elif random.random() < 0.5:\n return self.actionCrossover(cl)", "def squeeze_accept(partition):\n Write a function that\n - Sort districts by most Democratic heavy and most Republican heavy\n\n - Assign a base value of competitiveness for each district\n - Run chain, accept only if districts satisfy values under or order\n \"\"\"\n\n#--- CONSTRAINTS\n\n\"\"\"", "def test_cost_mixed():\n # boundaries:\n vm_max = 1.05\n vm_min = 0.95\n\n # create net\n net = pp.create_empty_network()\n pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10.)\n pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4)\n pp.create_gen(net, 1, p_kw=-100, controllable=True, max_p_kw=-5, min_p_kw=-150, max_q_kvar=50,\n min_q_kvar=-50)\n pp.create_ext_grid(net, 0)\n pp.create_load(net, 1, p_kw=20, controllable=False, max_q_kvar=50, max_p_kw=100, min_p_kw=50,\n min_q_kvar=-50)\n pp.create_line_from_parameters(net, 0, 1, 50, name=\"line2\", r_ohm_per_km=0.876,\n c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876,\n max_loading_percent=100 * 690)\n\n # testing some combinations\n pp.create_polynomial_cost(net, 0, \"gen\", np.array([0, 1, 0]))\n pp.runopp(net, verbose=False)\n assert net[\"OPF_converged\"]\n assert net.res_cost == - net.res_gen.p_kw.values\n\n net.polynomial_cost.c.at[0] = np.array([[1, 0, 0]])\n pp.runopp(net, verbose=False)\n assert net[\"OPF_converged\"]\n assert net.res_cost - net.res_gen.p_kw.values**2 < 1e-5\n\n net.polynomial_cost.c.at[0] = np.array([[1, 0, 1]])\n pp.runopp(net, verbose=False)\n assert net[\"OPF_converged\"]\n assert net.res_cost - net.res_gen.p_kw.values**2 - 1 < 1e-5\n\n net.load.controllable.at[0] = True\n pp.runopp(net, verbose=False)\n assert net.res_cost - net.res_gen.p_kw.values ** 2 - 1 < 1e-5\n\n pp.create_piecewise_linear_cost(net, 0, \"load\", np.array([[0, 0], [100, 100]]), type=\"p\")\n pp.runopp(net, verbose=False)\n assert net.res_cost - net.res_gen.p_kw.values ** 2 - 1 - net.res_load.p_kw.values < 1e-5", "def test_ppt_distinguishability_werner_hiding_pairs():\n dim = 2\n sigma_0 = (np.kron(np.identity(dim), np.identity(dim)) + swap_operator(dim)) / (dim * (dim + 1))\n sigma_1 = (np.kron(np.identity(dim), np.identity(dim)) - swap_operator(dim)) / (dim * (dim - 1))\n\n states = [sigma_0, sigma_1]\n\n expected_val = 1 / 2 + 1 / (dim + 1)\n\n primal_res = ppt_distinguishability(states, probs=None, dist_method=\"min-error\", strategy=True)\n dual_res = ppt_distinguishability(states, probs=None, dist_method=\"min-error\", strategy=False)\n\n np.testing.assert_equal(np.isclose(primal_res, expected_val, atol=0.001), True)\n np.testing.assert_equal(np.isclose(dual_res, expected_val, atol=0.001), True)\n\n primal_res = ppt_distinguishability(\n states, probs=None, dist_method=\"unambiguous\", strategy=True\n )\n dual_res = ppt_distinguishability(states, probs=None, dist_method=\"unambiguous\", strategy=False)\n\n np.testing.assert_equal(np.isclose(primal_res, 1 / 3, atol=0.001), True)\n np.testing.assert_equal(np.isclose(dual_res, 1 / 3, atol=0.001), True)", "def scramble_clause_crossover(ind1, ind2):\n all_clauses = ind1 + ind2\n random.shuffle(all_clauses)\n ind1[0:len(ind1)] = all_clauses[0:len(ind1)]\n ind2[0:len(ind2)] = all_clauses[len(ind1):len(ind1) + len(ind2)]", "def uniform_clause_crossover(ind1, ind2):\n k = len(ind1)\n for i in range(k):\n if random.random() < 0.5:\n temp = ind1[i]\n ind1[i] = ind2[i]\n ind2[i] = temp", "def crossover(self):\n print(' - crossover')\n s = time.time()\n\n # make a list with all index\n tmp_list = list(range(0, self.size))\n while len(tmp_list) > 0:\n candidate_1 = random.choice(tmp_list)\n tmp_list.remove(candidate_1)\n candidate_2 = random.choice(tmp_list)\n tmp_list.remove(candidate_2)\n\n # ceck if the two candidates will crossover\n chance = random.uniform(0, 1)\n if chance <= self.crossover_rate:\n self.crossover_two_candidates(candidate_1, candidate_2)\n\n e = time.time()\n print(\" - time: \", e - s)", "def crossover(NN1, NN2, p_c, p_m):\n if np.random.choice([0, 1], p=[1-p_c, p_c]):\n return nn.mate_neural_nets(NN1, NN2, p_m)\n else:\n return np.random.choice([NN1, NN2])", "def _make_soft_copy_ops(tau, target_vars, online_vars):\n return [_make_soft_copy_op(tau, target_vars[var_name], online_vars[var_name])\n for var_name in target_vars.keys()]", "def _make_soft_copy_ops(tau, target_vars, online_vars):\n return [_make_soft_copy_op(tau, target_vars[var_name], online_vars[var_name])\n for var_name in target_vars.keys()]", "def rule_exclusivity(self, p1, p2, domain):\r\n\r\n if p1.type == self.PT.implies and p2.type == self.PT.negation:\r\n if p2.v1.type == self.PT.conjunction:\r\n if p1.v1.type == self.PT.atomic and p1.v2.type == self.PT.atomic:\r\n if p2.v1.v1.type == self.PT.atomic and p2.v1.v2.type == self.PT.atomic:\r\n if p1.v1.v1.arg_id == p1.v2.v1.arg_id and p2.v1.v1.v1.arg_id == p2.v1.v2.v1.arg_id:\r\n if not p1.v1.v1.is_name and not p1.v2.v1.is_name and not p2.v1.v1.v1.is_name and not p2.v1.v2.v1.is_name:\r\n if p1.v2.v1.predicate == p2.v1.v1.v1.predicate:\r\n i = self.get_fresh_id()\r\n p = self.Prop(self.PT.negation,\r\n self.Prop(self.PT.conjunction,\r\n self.atom_prop_replace_properties(p1.v1,\r\n i),\r\n self.atom_prop_replace_properties(\r\n p2.v1.v2, i)),\r\n None)\r\n if not self.contains_isomorphic_proposition(domain, p):\r\n return [p]\r\n return []", "def uniform_crossover(self, another_individual):\n size = len(another_individual.get_genotype())\n genotype1 = numpy.zeros(size, dtype=another_individual.get_genotype().dtype)\n genotype2 = numpy.zeros(size, dtype=another_individual.get_genotype().dtype)\n mask = numpy.random.choice([True,False], size=size)\n not_mask = numpy.logical_not(mask)\n genotype1[mask] = self.get_genotype()[mask]\n genotype1[not_mask] = another_individual.get_genotype()[not_mask]\n genotype2[mask] = another_individual.get_genotype()[mask]\n genotype2[not_mask] = self.get_genotype()[not_mask]\n\n return optimization.Individual(genotype1, self.fitness_evaluator, self.uniform_crossover, self.mutation_method), optimization.Individual(genotype2, self.fitness_evaluator, self.uniform_crossover, self.mutation_method)", "def c_test__cross_inp(self, old_population, population_weighting, run_locals):\r\n return 1", "def item_collab_filter_cosine_predicate(interactions_df, books_df, obs_interactions, target_interactions, truth_interactions, fold, setting):\n\n k = 10\n\n print(\"predicate_construction: item_collab_filter_cosine_predicate:\")\n\n def write(s, p):\n print(\"predicate_construction: item_collab_filter_cosine_predicate: writing: \" + \n './goodreads/' + str(fold) + '/' + setting + '/item_collab_filter_cosine_' + p + '.txt') \n s.to_csv('./goodreads/' + str(fold) + '/' + setting + '/item_collab_filter_cosine_' + p + '.txt',\n sep='\\t', header=False, index=True)\n\n # observed predicates\n partition = 'obs'\n observed_interactions_df = interactions_df.loc[obs_interactions, :]\n observed_books_df = books_df.loc[obs_interactions.get_level_values(1).unique()]\n\n # similarity based on shelves (<- chosen since it the most dense value)\n # TODO: incorporate sparser and less noising signals from user\n\n # index: user_ids columns: book_ids values: 1 if shelved by user 0 o.w.\n user_book_shelves = pd.Series(data=1, index=obs_interactions).unstack().fillna(0)\n # index: book_ids columns: book_ids\n item_item_sim = pd.DataFrame(data=0, index=user_book_shelves.columns, columns=user_book_shelves.columns)\n\n # block similarity calculations by language and by genre heuristics\n # use unique values of language codes to block similarity calculations: generalize by adding \n # support for clusters of languages, i.e. eng and can-en can be grouped together\n # use top-1 informative shelf name: generalize by using top-n informative shelf names\n\n # create genre to book data frame, genre is from shelf names by users\n # index: book_id, columns: genre\n genre_series = pd.Series(index=observed_books_df.index)\n print(\"predicate_construction: item_collab_filter_cosine_predicate: building genre series\")\n for book_id, book in tqdm(observed_books_df.iterrows()):\n for i in range(len(book.popular_shelves)):\n # note that popular_shelves is sorted\n if book.popular_shelves[i]['name'] != 'to-read':\n genre_series.loc[book.name] = book.popular_shelves[i]['name']\n break\n\n # add genre series as column of observed_books_df\n observed_books_df.loc[:, 'genre'] = genre_series\n\n # block by both 'genre' and 'language_code' and calculate similarity for books within block\n print(\"predicate_construction: item_collab_filter_cosine_predicate: item_item_sim matrix\")\n for _, book_group in tqdm(observed_books_df.groupby(['genre', 'language_code'])):\n similarity_matrix = cosine_similarity(user_book_shelves.loc[:, book_group.index].transpose())\n item_item_sim.loc[book_group.index, book_group.index] = similarity_matrix\n \n # Very sparse, thus very small cosine sim values\n # approach: for each book use k most similar books to it\n book_top_k_sim = item_item_sim.apply(pd.Series.nlargest, n=5).stack()\n item_item_series = pd.Series(data=1, index=book_top_k_sim.index)\n write(item_item_series, partition)", "def _make_soft_copy_op(tau, target, online):\n return target.assign_sub(tau * (target - online))", "def _make_soft_copy_op(tau, target, online):\n return target.assign_sub(tau * (target - online))", "def nonsymmetric_hardclipping_evaluation(input_generator,branches,iden_method,Plot,reference=None):\n t1 = range(8,11)\n t2 = range(8,11)\n for th1, th2 in itertools.product(t1, t2):\n th1 = th1 / 10.0\n th2 = th2 / 10.0\n thresholds = [-th1,th2]\n input_signal = input_generator.GetOutput()\n ref_nlsystem = sumpf.modules.ClipSignal(thresholds=thresholds)\n ref_nlsystem.SetInput(input_signal)\n\n found_filter_spec, nl_functions = iden_method(input_generator,ref_nlsystem.GetOutput(),branches)\n iden_nlsystem = nlsp.HammersteinGroupModel_up(input_signal=input_signal,\n nonlinear_functions=nl_functions,\n filter_irs=found_filter_spec,\n max_harmonics=range(1,branches+1))\n if reference is not None:\n ref_nlsystem.SetInput(reference)\n iden_nlsystem.SetInput(reference)\n if Plot is True:\n plot.relabelandplot(sumpf.modules.FourierTransform(ref_nlsystem.GetOutput()).GetSpectrum(),\"Reference System\",show=False)\n plot.relabelandplot(sumpf.modules.FourierTransform(iden_nlsystem.GetOutput()).GetSpectrum(),\"Identified System\",show=False)\n print \"SNR between Reference and Identified output for non symmetric hardclipping(thresholds:%r): %r\" %(thresholds,nlsp.snr(ref_nlsystem.GetOutput(),\n iden_nlsystem.GetOutput()))", "def clause_crossover_1x(ind1, ind2):\n k = len(ind1)\n cx_point = random.randint(1, k - 1)\n temp = ind1[cx_point:]\n ind1[cx_point:] = ind2[cx_point:]\n ind2[cx_point:] = temp", "def combine(sv, nod, O, oldnatA, oldnatB, oldnatres):\r\n newnatA, newnatB, newnatres = set(), set(), set()\r\n oldsetA, oldsetB, oldsetres=set(oldnatA), set(oldnatB), set(oldnatres)\r\n \r\n for allowA, a1, allowB, a2, allowres in Allowed[O]: # test compatibility of hypotheses\r\n # simple operands without distributivity\r\n setA=set(allowA) & oldsetA \r\n setB=set(allowB) & oldsetB\r\n setres=set(allowres) & oldsetres \r\n if (setres and setA and (setB or O in Unary)): # hypothesis is valid\r\n newnatA.update(setA) # add to list of possible natures\r\n newnatB.update(setB)\r\n newnatres.update(setres)\r\n\r\n # left distributivity (add list as a possible nature) \r\n if not (O in Non_distributive1) and Lst[0] in oldnatA and Lst[0] in oldnatres: \r\n newnatA.add(Lst[0]) \r\n newnatB.update(setB) \r\n newnatres.add(Lst[0])\r\n\r\n # right distributivity (add list as a possible nature) \r\n if not (O in Non_distributive2) and not (O in Unary) \\\r\n and Lst[0] in oldnatB and Lst[0] in oldnatres: \r\n newnatA.update(setA)\r\n newnatB.add(Lst[0])\r\n newnatres.add(Lst[0])\r\n \r\n # check compatibility\r\n if not (newnatres and newnatA and (newnatB or O in Unary)): \r\n print(\"\\n\", Err_incomp_nat) # ***Error: incompatible nature *** \r\n print(O, oldnatA, oldnatB)\r\n if nod.once: print(\"condition must be an event:\", nod.name)\r\n raise ReferenceError\r\n \r\n return list(newnatA), list(newnatB), list(newnatres)", "def twoPointCrossover(self, cl):\n points = []\n changed = False\n points.append( int( random.random() * ( cons.env.format_data.numb_attributes + 1 ) ) )\n secondPoint = int( random.random() * ( cons.env.format_data.numb_attributes + 1 ) )\n if points[0] > secondPoint:\n tempPoint = points[0]\n points[0] = secondPoint\n points.append( tempPoint )\n else:\n points.append( secondPoint )\n if cons.env.format_data.discrete_action:\n self_specified_atts = copy.deepcopy(self.specified_attributes)\n cl_specified_atts = copy.deepcopy(cl.specified_attributes)\n for i in range( points[1] ):\n if i >= points[0]:\n if i in self_specified_atts:\n if i not in cl_specified_atts:\n index = self.specified_attributes.index(i)\n cl.condition.append(self.condition.pop(index))\n cl.specified_attributes.append(i)\n self.specified_attributes.remove(i)\n changed = True #Remove att from self and add to cl\n elif i in cl_specified_atts:\n index = cl.specified_attributes.index(i) #reference to the position of the attribute in the rule representation\n self.condition.append(cl.condition.pop(index)) #Take attribute from self and add to cl\n self.specified_attributes.append(i)\n cl.specified_attributes.remove(i)\n changed = True\n return changed", "def solvePostNoOverlap(targetNum, defenders, dRewards, dPenalties, dCosts, aTypes, aRewards, aPenalties, q):\n \"\"\"Contains as many dummy targets as defenders, for defenders and attackers\"\"\"\n # Add the extra dummy targets\n _dRewards = copy.deepcopy(dRewards)\n _dPenalties = copy.deepcopy(dPenalties)\n _dCosts = copy.deepcopy(dCosts)\n _aRewards = copy.deepcopy(aRewards)\n _aPenalties = copy.deepcopy(aPenalties)\n for m in defenders:\n for defenderCount in defenders:\n _dRewards[m].append(0)\n _dPenalties[m].append(0)\n _dCosts[m].append(0)\n for lam in aTypes:\n _aRewards[lam].append(0)\n _aPenalties[lam].append(0)\n targetNumWithDummies = len(_dRewards[0])\n targetRange = list(range(targetNumWithDummies))\n attackerActions = targetRange\n # Get the suggestions that occur with no overlap\n overlapPlacements = getPlacements(defenders, targetNumWithDummies)\n placements = list(filter(lambda x: len(set(x)) == len(x), overlapPlacements))\n omegaKeys = getOmegaKeys(aTypes, placements, attackerActions)\n\n # Build the model\n model = Model('PrimalWithOverlap')\n w = model.continuous_var_dict(keys=omegaKeys, lb=0, ub=1, name=\"w\")\n objectiveFunction = sum([q[lam] * sum([w[s,a,lam] * defenderSocialUtility(s,a,defenders,_dRewards,_dCosts,_dPenalties) for s in placements for a in attackerActions]) for lam in aTypes])\n c1 = [sum([w[s,a,lam] * aUtility(s,a,lam,_aPenalties,_aRewards) for s in placements]) \\\n >= sum([w[s,a,lam] * aUtility(s,b,lam,_aPenalties,_aRewards) for s in placements])\n for lam in aTypes for a in attackerActions for b in attackerActions if a != b]\n c1 = [constraint for constraint in c1 if not isinstance(constraint, bool)]\n c1 = model.add_constraints(c1)\n c2 = model.add_constraints([sum([q[lam] * sum([w[s,a,lam] * utilityM(d,s,a,m,_dRewards,_dPenalties,_dCosts) for a in attackerActions for s in placements if s[m] == d]) for lam in aTypes]) \\\n >= sum([q[lam] * sum([w[s,a,lam] * utilityM(e,s,a,m,_dRewards,_dPenalties,_dCosts) for a in attackerActions for s in placements if s[m] == d]) for lam in aTypes])\n for m in defenders for d in targetRange for e in targetRange if d!=e])\n c3 = model.add_constraints([sum([w[(s,a,lam)] for s in placements for a in attackerActions]) == 1 for lam in aTypes])\n # Solve the model\n model.maximize(objectiveFunction)\n model.solve()\n # Now that w contains all the outcomes and their probabilities, sum the attacker utilities up.\n utilityPerAttacker = 0\n for k,v in w.items():\n prob = float(v)\n s,a,lam = k\n utilityPerAttacker += aUtility(s,a,lam,_aPenalties,_aRewards) * prob\n utilityPerAttacker /= len(aTypes)\n utilityPerDefender = model.solution.get_objective_value()\n utilityPerDefender /= len(defenders)\n return utilityPerDefender, utilityPerAttacker, None", "def phenotypeCrossover(self, cl):\n changed = False\n if self.action[0] == cl.action[0] and self.action[1] == cl.action[1]:\n return changed\n else:\n tmp_key = random.random() < 0.5 #Make random choice between 4 scenarios, Swap minimums, Swap maximums, Children preserve parent phenotypes.\n if tmp_key: #Swap minimum\n temp = self.action[0]\n self.action[0] = cl.action[0]\n cl.action[0] = temp\n changed = True\n elif tmp_key: #Swap maximum\n temp = self.action[1]\n self.action[1] = cl.action[1]\n cl.action[1] = temp\n changed = True\n\n return changed" ]
[ "0.6515465", "0.63803405", "0.5759596", "0.56984735", "0.5618533", "0.550331", "0.5501639", "0.5486603", "0.5480942", "0.544881", "0.54109794", "0.5244485", "0.5237865", "0.52373815", "0.52060795", "0.51889914", "0.5187329", "0.5187329", "0.51770586", "0.5146552", "0.5139957", "0.5108544", "0.5100386", "0.5100386", "0.5090024", "0.50896513", "0.507844", "0.50557566", "0.5053187", "0.5039291" ]
0.67763466
0
Operates on constraint level. This function does not make use of the distinction between suboptimal and infeasible negative examples. Performs a smart crossover on the two given individuals, and produces 1 individual (both individuals will be changed into this single resulting individual). Makes use of a heuristic that values constraints and combinations of constraints based on their coverage.
def smart_clause_crossover(ind1, ind2, examples, greedy=True, probability_variant=None, temperature=1, clause_bitvector_cache=None): allow_duplicates = False # allow_duplicates denotes whether the resulting indivuals may contain duplicate clauses number_of_clauses = len(ind1) all_clauses = ind1+ind2 chosen_clauses = [] chosen_clause_indices = [] ind1_coverage_bitvectors = compute_clause_coverage_bitvectors(ind1, examples, clause_bitvector_cache=clause_bitvector_cache) ind2_coverage_bitvectors = compute_clause_coverage_bitvectors(ind2, examples, clause_bitvector_cache=clause_bitvector_cache) all_coverage_bitvectors = ind1_coverage_bitvectors + ind2_coverage_bitvectors for i in range(0, number_of_clauses): if i == 0: combined_coverage_bitvectors = all_coverage_bitvectors else: combined_coverage_bitvectors = [combine_coverage_bitvectors(chosen_clauses_bitvector, bitvector, examples) for bitvector in all_coverage_bitvectors] if not allow_duplicates: for index in chosen_clause_indices: for j in range(len(combined_coverage_bitvectors)): if all_clauses[index] == all_clauses[j]: combined_coverage_bitvectors[j] = [0] * len(examples) combined_coverages = [sum(coverage_bitvector) for coverage_bitvector in combined_coverage_bitvectors] if greedy: best_coverage = max(combined_coverages) best_indices = [i for i in range(len(combined_coverages)) if combined_coverages[i] == best_coverage] chosen_clause_index = random.choice(best_indices) else: if probability_variant == "linear": sum_coverages = sum(combined_coverages) coverages_to_probabilities = [x / sum_coverages for x in combined_coverages] elif probability_variant == "squared": coverages_squared = [x ** 2 for x in combined_coverages] sum_coverages_squared = sum(coverages_squared) coverages_to_probabilities = [x ** 2 / sum_coverages_squared for x in combined_coverages] elif probability_variant == "softmax": # Softmax with normalization to prevent overflow coverages_max = max(combined_coverages) coverages_for_softmax = [a_coverage - coverages_max for a_coverage in combined_coverages] coverages_to_probabilities = np.exp(np.asarray(coverages_for_softmax) / temperature) / sum( np.exp(np.asarray(coverages_for_softmax) / temperature)) chosen_clause_index = np.random.choice(list(range(0, len(all_coverage_bitvectors))), p=coverages_to_probabilities) chosen_coverage_bitvector = combined_coverage_bitvectors[chosen_clause_index] if chosen_clause_index < number_of_clauses: chosen_clause = ind1[chosen_clause_index] else: chosen_clause = ind2[chosen_clause_index - number_of_clauses] chosen_clauses.append(chosen_clause) chosen_clause_indices.append(chosen_clause_index) chosen_clauses_bitvector = chosen_coverage_bitvector for i in range(len(chosen_clauses)): clause = chosen_clauses[i] # We can safely set ind1 and ind2 to the same computed smart combination, as only one of them will make it # to the next generation ind1[i] = clause ind2[i] = clause
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def smart_clause_crossover_infeasibility(ind1, ind2, examples, greedy=True, probability_variant=None, temperature=1, clause_bitvector_cache=None):\n allow_duplicates = False # allow_duplicates denotes whether the resulting indivuals may contain duplicate clauses\n ind1_hard_constraints = [constr for constr in ind1 if constr[-2] == True]\n ind2_hard_constraints = [constr for constr in ind2 if constr[-2] == True]\n all_hard_constraints = ind1_hard_constraints + ind2_hard_constraints\n ind1_soft_constraints = [constr for constr in ind1 if constr[-2] == False]\n ind2_soft_constraints = [constr for constr in ind2 if constr[-2] == False]\n all_soft_constraints = ind1_soft_constraints + ind2_soft_constraints\n ind1_hard_coverage_bitvectors = compute_clause_coverage_bitvectors(ind1_hard_constraints, examples, use_infeasibility=True, clause_bitvector_cache=clause_bitvector_cache)\n ind2_hard_coverage_bitvectors = compute_clause_coverage_bitvectors(ind2_hard_constraints, examples, use_infeasibility=True, clause_bitvector_cache=clause_bitvector_cache)\n ind1_soft_coverage_bitvectors = compute_clause_coverage_bitvectors(ind1_soft_constraints, examples, use_infeasibility=True, clause_bitvector_cache=clause_bitvector_cache)\n ind2_soft_coverage_bitvectors = compute_clause_coverage_bitvectors(ind2_soft_constraints, examples, use_infeasibility=True, clause_bitvector_cache=clause_bitvector_cache)\n all_hard_coverage_bitvectors = ind1_hard_coverage_bitvectors + ind2_hard_coverage_bitvectors\n all_soft_coverage_bitvectors = ind1_soft_coverage_bitvectors + ind2_soft_coverage_bitvectors\n\n ind1_num_hard = len([constr for constr in ind1 if constr[-2] == True])\n ind2_num_hard = len([constr for constr in ind2 if constr[-2] == True])\n # num_hard = random.choice([ind1_num_hard, ind2_num_hard])\n if ind1_num_hard <= ind2_num_hard:\n num_hard = random.choice(list(range(ind1_num_hard, ind2_num_hard+1)))\n else:\n num_hard = random.choice(list(range(ind2_num_hard, ind1_num_hard + 1)))\n num_soft = len(ind1) - num_hard\n chosen_hard_clauses = []\n chosen_hard_clause_indices = []\n chosen_soft_clauses = []\n chosen_soft_clause_indices = []\n\n # Choose hard constraints\n for i in range(0, num_hard):\n if i == 0:\n combined_hard_coverage_bitvectors = all_hard_coverage_bitvectors\n else:\n combined_hard_coverage_bitvectors = [combine_coverage_bitvectors_hard_constraints(\n chosen_hard_clauses_bitvector, bitvector, examples) for bitvector in all_hard_coverage_bitvectors]\n if not allow_duplicates:\n for index in chosen_hard_clause_indices:\n for j in range(len(combined_hard_coverage_bitvectors)):\n if all_hard_constraints[index][:-2] == all_hard_constraints[j][:-2]:\n combined_hard_coverage_bitvectors[j] = [0] * len(examples)\n if greedy:\n combined_hard_coverages = [sum(coverage_bitvector) for coverage_bitvector in combined_hard_coverage_bitvectors]\n best_hard_coverage = max(combined_hard_coverages)\n best_hard_indices = [i for i in range(len(combined_hard_coverages)) if combined_hard_coverages[i] == best_hard_coverage]\n chosen_hard_clause_index = random.choice(best_hard_indices)\n else:\n coverages = [sum(x) for x in combined_hard_coverage_bitvectors]\n if probability_variant == \"linear\":\n sum_coverages = sum(coverages)\n coverages_to_probabilities = [x / sum_coverages for x in coverages]\n elif probability_variant == \"squared\":\n coverages_squared = [x ** 2 for x in coverages]\n sum_coverages_squared = sum(coverages_squared)\n coverages_to_probabilities = [x ** 2 / sum_coverages_squared for x in coverages]\n elif probability_variant == \"softmax\":\n # Softmax with normalization to prevent overflow\n coverages_max = max(coverages)\n coverages_for_softmax = [a_coverage - coverages_max for a_coverage in coverages]\n coverages_to_probabilities = np.exp(np.asarray(coverages_for_softmax) / temperature) / sum(\n np.exp(np.asarray(coverages_for_softmax) / temperature))\n\n chosen_hard_clause_index = np.random.choice(list(range(0, len(all_hard_coverage_bitvectors))),\n p=coverages_to_probabilities)\n chosen_hard_coverage_bitvector = combined_hard_coverage_bitvectors[chosen_hard_clause_index]\n if chosen_hard_clause_index < len(ind1_hard_constraints):\n chosen_hard_clause = ind1_hard_constraints[chosen_hard_clause_index]\n else:\n chosen_hard_clause = ind2_hard_constraints[chosen_hard_clause_index - len(ind1_hard_constraints)]\n\n chosen_hard_clauses.append(chosen_hard_clause)\n chosen_hard_clause_indices.append(chosen_hard_clause_index)\n chosen_hard_clauses_bitvector = chosen_hard_coverage_bitvector\n\n # Choose soft constraints\n for i in range(0, num_soft):\n if i == 0:\n combined_soft_coverage_bitvectors = all_soft_coverage_bitvectors\n else:\n combined_soft_coverage_bitvectors = [combine_coverage_bitvectors_soft_constraints(\n chosen_soft_clauses_bitvector, bitvector, examples) for bitvector in all_soft_coverage_bitvectors]\n if not allow_duplicates:\n for index in chosen_soft_clause_indices:\n for j in range(len(combined_soft_coverage_bitvectors)):\n if all_soft_constraints[index][:-2] == all_soft_constraints[j][:-2]:\n combined_soft_coverage_bitvectors[j] = [0] * len(examples)\n if greedy:\n combined_soft_coverages = [sum(coverage_bitvector) for coverage_bitvector in combined_soft_coverage_bitvectors]\n best_soft_coverage = max(combined_soft_coverages)\n best_soft_indices = [i for i in range(len(combined_soft_coverages)) if combined_soft_coverages[i] == best_soft_coverage]\n chosen_soft_clause_index = random.choice(best_soft_indices)\n else:\n coverages = [sum(x) for x in combined_soft_coverage_bitvectors]\n if probability_variant == \"linear\":\n sum_coverages = sum(coverages)\n coverages_to_probabilities = [x / sum_coverages for x in coverages]\n elif probability_variant == \"squared\":\n coverages_squared = [x ** 2 for x in coverages]\n sum_coverages_squared = sum(coverages_squared)\n coverages_to_probabilities = [x ** 2 / sum_coverages_squared for x in coverages]\n elif probability_variant == \"softmax\":\n # Softmax with normalization to prevent overflow\n coverages_max = max(coverages)\n coverages_for_softmax = [a_coverage - coverages_max for a_coverage in coverages]\n coverages_to_probabilities = np.exp(np.asarray(coverages_for_softmax) / temperature) / sum(\n np.exp(np.asarray(coverages_for_softmax) / temperature))\n\n chosen_soft_clause_index = np.random.choice(list(range(0, len(all_soft_coverage_bitvectors))),\n p=coverages_to_probabilities)\n chosen_soft_coverage_bitvector = combined_soft_coverage_bitvectors[chosen_soft_clause_index]\n if chosen_soft_clause_index < len(ind1_soft_constraints):\n chosen_soft_clause = ind1_soft_constraints[chosen_soft_clause_index]\n else:\n chosen_soft_clause = ind2_soft_constraints[chosen_soft_clause_index - len(ind1_soft_constraints)]\n\n chosen_soft_clauses.append(chosen_soft_clause)\n chosen_soft_clause_indices.append(chosen_soft_clause_index)\n chosen_soft_clauses_bitvector = chosen_soft_coverage_bitvector\n\n for i in range(len(chosen_hard_clauses)):\n hard_clause = chosen_hard_clauses[i]\n # We can safely set ind1 and ind2 to the same computed smart combination, as only one of them will make it\n # to the next generation\n ind1[i] = hard_clause\n ind2[i] = hard_clause\n\n for i in range(len(chosen_soft_clauses)):\n soft_clause = chosen_soft_clauses[i]\n ind1[num_hard+i] = soft_clause\n ind2[num_hard+i] = soft_clause", "def uniform_crossover(self, another_individual):\n size = len(another_individual.get_genotype())\n genotype1 = numpy.zeros(size, dtype=another_individual.get_genotype().dtype)\n genotype2 = numpy.zeros(size, dtype=another_individual.get_genotype().dtype)\n mask = numpy.random.choice([True,False], size=size)\n not_mask = numpy.logical_not(mask)\n genotype1[mask] = self.get_genotype()[mask]\n genotype1[not_mask] = another_individual.get_genotype()[not_mask]\n genotype2[mask] = another_individual.get_genotype()[mask]\n genotype2[not_mask] = self.get_genotype()[not_mask]\n\n return optimization.Individual(genotype1, self.fitness_evaluator, self.uniform_crossover, self.mutation_method), optimization.Individual(genotype2, self.fitness_evaluator, self.uniform_crossover, self.mutation_method)", "def matched_uniform_crossover(ind1, ind2):\n # We calculate the pairwise match between ind1's and ind2's clauses\n match_matrix = np.zeros((len(ind1), len(ind2)))\n for i in range(len(ind1)):\n clause1 = ind1[i]\n for j in range(len(ind2)):\n clause2 = ind2[j]\n curr_syntactic_match = 0\n for k in range(len(clause1)):\n if k != len(clause1) - 1:\n if clause1[k] == clause2[k]:\n curr_syntactic_match += 1\n else:\n curr_syntactic_match += (1 - abs(clause1[k] - clause2[k]))\n match_matrix[i][j] = curr_syntactic_match\n\n # Arg-sort the pairwise clause matches from best to worst match\n matches_ordered = np.dstack(np.unravel_index(np.argsort(match_matrix.ravel())[::-1], (len(ind1), len(ind2))))[0]\n\n # Finally match the clauses, making sure that each clause is only matched once\n # Then perform uniform crossover on matched clauses\n ind1_matched_clauses = set()\n ind2_matched_clauses = set()\n count = 0\n for match in matches_ordered:\n i = match[0]\n j = match[1]\n if match_matrix[i][j] >= len(ind1[0])//2:\n if i not in ind1_matched_clauses and j not in ind2_matched_clauses:\n count += 1\n # Perform the uniform crossover\n for k in range(len(ind1[i])):\n if random.random() < 0.5:\n temp = ind1[i][k]\n ind1[i][k] = ind2[j][k]\n ind2[j][k] = temp\n ind1_matched_clauses.add(i)\n ind2_matched_clauses.add(j)", "def solve(num_wizards, num_constraints, wizards, constraints):\n\n # print(num_wizards)\n # print(num_constraints)\n # print(wizards)\n # print(constraints)\n # node_set = set(wizards)\n \n\n\n def cost(sol,num_constraints,constraints):\n constraints_satisfied = 0\n constraints_failed = []\n output_ordering_map = {k: v for v, k in enumerate(sol)}\n for c in constraints:\n\n m = output_ordering_map # Creating an alias for easy reference\n\n wiz_a = m[c[0]]\n wiz_b = m[c[1]]\n wiz_mid = m[c[2]]\n\n if (wiz_a < wiz_mid < wiz_b) or (wiz_b < wiz_mid < wiz_a):\n constraints_failed.append(c)\n else:\n constraints_satisfied += 1\n return num_constraints - constraints_satisfied\n\n def neighbors(sol):\n wiz1 = random.randint(0,num_wizards-1)\n wiz2 = random.randint(0,num_wizards-1)\n\n new_sol = copy.copy(sol)\n temp = new_sol[wiz1]\n new_sol[wiz1] = new_sol[wiz2]\n new_sol[wiz2] = temp\n \n return new_sol\n\n def acceptance_probability(old_cost,new_cost,T):\n exponent = (old_cost - new_cost) / T\n \n try:\n ans = math.exp(exponent)\n except OverflowError:\n ans = float('inf')\n return ans\n\n\n def anneal(solution, num_constraints, constraints):\n old_cost = 0\n new_cost = 0\n old_cost = cost(solution,num_constraints,constraints)\n T = 1.0\n T_min = 0.000001\n alpha = 0.98\n while T > T_min:\n i = 1\n while i <= 1000:\n new_solution = neighbors(solution)\n new_cost = cost(new_solution,num_constraints,constraints)\n if new_cost == 0:\n return new_solution,new_cost\n ap = acceptance_probability(old_cost, new_cost, T)\n if ap > random.random():\n solution = new_solution\n old_cost = new_cost\n i += 1\n T = T*alpha\n return solution, old_cost\n\n s = copy.copy(wizards)\n random.shuffle(s)\n ret = anneal(s,num_constraints,constraints)\n \n for i in range(10):\n if ret[1] == 0:\n break\n random.shuffle(s)\n new_ret = anneal(s,num_constraints,constraints)\n print(i)\n if new_ret[1] < ret[1]:\n ret = new_ret\n print(\"constraints failed: {0}\".format(ret[1]))\n return ret[0]", "def _crossover(self, sel):\n offspring = []\n for p1, p2 in sel:\n p1 = copy.deepcopy(p1)\n p2 = copy.deepcopy(p2)\n\n tmp = self.op.crossover(\n copy.deepcopy(p1['individual']),\n copy.deepcopy(p2['individual']))\n if not tmp[0] is None and not tmp[1] is None:\n c1 = {\n 'individual': tmp[0],\n 'fitness': self.op.fitness(tmp[0])\n }\n c2 = {\n 'individual': tmp[1],\n 'fitness': self.op.fitness(tmp[1])\n }\n\n offspring.append(\n c1 if c1['fitness'] < p1['fitness'] else p1)\n offspring.append(\n c2 if c2['fitness'] < p2['fitness'] else p2)\n else:\n offspring.extend((p1, p2))\n return offspring", "def test_cost_mixed():\n # boundaries:\n vm_max = 1.05\n vm_min = 0.95\n\n # create net\n net = pp.create_empty_network()\n pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10.)\n pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4)\n pp.create_gen(net, 1, p_kw=-100, controllable=True, max_p_kw=-5, min_p_kw=-150, max_q_kvar=50,\n min_q_kvar=-50)\n pp.create_ext_grid(net, 0)\n pp.create_load(net, 1, p_kw=20, controllable=False, max_q_kvar=50, max_p_kw=100, min_p_kw=50,\n min_q_kvar=-50)\n pp.create_line_from_parameters(net, 0, 1, 50, name=\"line2\", r_ohm_per_km=0.876,\n c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876,\n max_loading_percent=100 * 690)\n\n # testing some combinations\n pp.create_polynomial_cost(net, 0, \"gen\", np.array([0, 1, 0]))\n pp.runopp(net, verbose=False)\n assert net[\"OPF_converged\"]\n assert net.res_cost == - net.res_gen.p_kw.values\n\n net.polynomial_cost.c.at[0] = np.array([[1, 0, 0]])\n pp.runopp(net, verbose=False)\n assert net[\"OPF_converged\"]\n assert net.res_cost - net.res_gen.p_kw.values**2 < 1e-5\n\n net.polynomial_cost.c.at[0] = np.array([[1, 0, 1]])\n pp.runopp(net, verbose=False)\n assert net[\"OPF_converged\"]\n assert net.res_cost - net.res_gen.p_kw.values**2 - 1 < 1e-5\n\n net.load.controllable.at[0] = True\n pp.runopp(net, verbose=False)\n assert net.res_cost - net.res_gen.p_kw.values ** 2 - 1 < 1e-5\n\n pp.create_piecewise_linear_cost(net, 0, \"load\", np.array([[0, 0], [100, 100]]), type=\"p\")\n pp.runopp(net, verbose=False)\n assert net.res_cost - net.res_gen.p_kw.values ** 2 - 1 - net.res_load.p_kw.values < 1e-5", "def solvePostOverlap(targetNum, defenders, dRewards, dPenalties, dCosts, aTypes, aRewards, aPenalties, q):\n \"\"\"Contains a dummy target for defenders and attackers\"\"\"\n # Add the extra dummy target\n _dRewards = copy.deepcopy(dRewards)\n _dPenalties = copy.deepcopy(dPenalties)\n _dCosts = copy.deepcopy(dCosts)\n _aRewards = copy.deepcopy(aRewards)\n _aPenalties = copy.deepcopy(aPenalties)\n for m in defenders:\n for defenderCount in defenders:\n _dRewards[m].append(0)\n _dPenalties[m].append(0)\n _dCosts[m].append(0)\n for lam in aTypes:\n _aRewards[lam].append(0)\n _aPenalties[lam].append(0)\n targetNumWithDummies = len(_dRewards[0])\n targetRange = list(range(targetNumWithDummies))\n attackerActions = targetRange\n placements = getPlacements(defenders, targetNumWithDummies)\n omegaKeys = getOmegaKeys(aTypes, placements, attackerActions)\n\n # Build the model\n model = Model('PrimalWithOverlap')\n w = model.continuous_var_dict(keys=omegaKeys, lb=0, ub=1, name=\"w\")\n objectiveFunction = sum([q[lam] * sum([w[s,a,lam] * defenderSocialUtility(s,a,defenders,_dRewards,_dCosts,_dPenalties) for s in placements for a in attackerActions]) for lam in aTypes])\n c1 = [sum([w[s,a,lam] * aUtility(s,a,lam,_aPenalties,_aRewards) for s in placements]) \\\n >= sum([w[s,a,lam] * aUtility(s,b,lam,_aPenalties,_aRewards) for s in placements])\n for lam in aTypes for a in attackerActions for b in attackerActions if a != b]\n c1 = [constraint for constraint in c1 if not isinstance(constraint, bool)]\n c1 = model.add_constraints(c1)\n c2 = model.add_constraints([sum([q[lam] * sum([w[s,a,lam] * utilityM(d,s,a,m,_dRewards,_dPenalties,_dCosts) for a in attackerActions for s in placements if s[m] == d]) for lam in aTypes]) \\\n >= sum([q[lam] * sum([w[s,a,lam] * utilityM(e,s,a,m,_dRewards,_dPenalties,_dCosts) for a in attackerActions for s in placements if s[m] == d]) for lam in aTypes])\n for m in defenders for d in targetRange for e in targetRange if d!=e])\n c3 = model.add_constraints([sum([w[(s,a,lam)] for s in placements for a in attackerActions]) == 1 for lam in aTypes])\n # Solve the model\n model.maximize(objectiveFunction)\n model.solve()\n # Now that w contains all the outcomes and their probabilities, sum the attacker utilities up.\n utilityPerAttacker = 0\n for k,v in w.items():\n prob = float(v)\n s,a,lam = k\n utilityPerAttacker += aUtility(s,a,lam,_aPenalties,_aRewards) * prob\n utilityPerAttacker /= len(aTypes)\n utilityPerDefender = model.solution.get_objective_value()\n utilityPerDefender /= len(defenders)\n\n return utilityPerDefender, utilityPerAttacker, None", "def smart_clause_crossover_dispatch(ind1, ind2, examples, greedy=True, probability_variant=None, temperature=1, clause_bitvector_cache=None, use_infeasibility=False):\n if use_infeasibility:\n smart_clause_crossover_infeasibility(ind1, ind2, examples, greedy=greedy, probability_variant=probability_variant, temperature=temperature, clause_bitvector_cache=clause_bitvector_cache)\n else:\n smart_clause_crossover(ind1, ind2, examples, greedy=greedy, probability_variant=probability_variant, temperature=temperature, clause_bitvector_cache=clause_bitvector_cache)", "def test_penalty_compute(self):\n objective = {'G': numpy.zeros((9)),\n 'H': numpy.diag((1,)*9),\n 'X': 1}\n for penalty in self.penalties:\n result = penalty.compute([1]*self.np, objective)\n assert isinstance(result,tuple)\n # more tests go here", "def crossover(parent1, parent2):\n child = parent1.clone()\n for k in range(parent1.num_input + parent1.num_output):\n if np.random.randint(2) == 1:\n child.identifiers[k] = parent2.identifiers[k]\n child.inhibitors[k] = parent2.inhibitors[k]\n child.enhancers[k] = parent2.enhancers[k]\n\n child.identifiers = child.identifiers[:(child.num_input +\n child.num_output)]\n child.inhibitors = child.inhibitors[:(child.num_input + child.num_output)]\n child.enhancers = child.enhancers[:(child.num_input + child.num_output)]\n\n p1range = list(range(parent1.num_input + parent1.num_output,\n parent1.size()))\n random.shuffle(p1range)\n p2range = list(range(parent2.num_input + parent2.num_output,\n parent2.size()))\n random.shuffle(p2range)\n\n p1remaining = deepcopy(p1range)\n\n # Crossing regulatory\n p1_gene_count = 0\n p2_gene_count = 0\n for p1idx in p1range:\n min_dist = config.CROSSOVER_THRESHOLD\n paired_idx = None\n for p2idx in p2range:\n gdist = parent1.protein_distance(parent2, p1idx, p2idx)\n if gdist < min_dist:\n min_dist = gdist\n paired_idx = p2idx\n if paired_idx is not None:\n if np.random.randint(2) == 0:\n chosen_parent = parent1\n chosen_idx = p1idx\n p1_gene_count += 1\n else:\n chosen_parent = parent2\n chosen_idx = p2idx\n p2_gene_count += 1\n child.identifiers = np.append(\n child.identifiers, chosen_parent.identifiers[chosen_idx])\n child.inhibitors = np.append(\n child.inhibitors, chosen_parent.inhibitors[chosen_idx])\n child.enhancers = np.append(\n child.enhancers, chosen_parent.enhancers[chosen_idx])\n # Remove from consideration again\n p2range = list(set(p2range) - set([p2idx]))\n p1remaining = list(set(p1remaining) - set([p1idx]))\n\n # Add remaining material\n if child.size() == (child.num_input + child.num_output):\n prob = 0.5\n else:\n prob = p1_gene_count / (p1_gene_count + p2_gene_count)\n\n chosen_parent = parent2\n chosen_range = p2range\n if np.random.random() < prob:\n chosen_parent = parent1\n chosen_range = p1remaining\n\n for idx in chosen_range:\n child.identifiers = np.append(child.identifiers,\n chosen_parent.identifiers[idx])\n child.inhibitors = np.append(child.inhibitors,\n chosen_parent.inhibitors[idx])\n child.enhancers = np.append(child.enhancers,\n chosen_parent.enhancers[idx])\n\n child.num_regulatory = child.size() - (child.num_input + child.num_output)\n\n # Cross dynamics\n if np.random.random() < 0.5:\n child.beta = parent1.beta\n else:\n child.beta = parent2.beta\n\n if np.random.random() < 0.5:\n child.delta = parent1.delta\n else:\n child.delta = parent2.delta\n\n return child", "def solveExOverlap(targetNum, defenders, dRewards, dPenalties, dCosts, aTypes, aRewards, aPenalties, q):\n \"\"\"Contains a dummy target for defenders and attackers\"\"\"\n \"\"\"In this game the attacker and defender reason ex-ante\n (they choose to follow signals or not before a signal is sent).\"\"\"\n # Add the extra dummy target\n _dRewards = copy.deepcopy(dRewards)\n _dPenalties = copy.deepcopy(dPenalties)\n _dCosts = copy.deepcopy(dCosts)\n _aRewards = copy.deepcopy(aRewards)\n _aPenalties = copy.deepcopy(aPenalties)\n for m in defenders:\n for defenderCount in defenders:\n _dRewards[m].append(0)\n _dPenalties[m].append(0)\n _dCosts[m].append(0)\n for lam in aTypes:\n _aRewards[lam].append(0)\n _aPenalties[lam].append(0)\n targetNumWithDummies = len(_dRewards[0])\n targetRange = list(range(targetNumWithDummies))\n attackerActions = targetRange\n placements = getPlacements(defenders, targetNumWithDummies)\n omegaKeys = getOmegaKeys(aTypes, placements, attackerActions)\n\n # Build the model\n model = Model('ExAnteWithOverlap')\n w = model.continuous_var_dict(keys=omegaKeys, lb=0, ub=1, name=\"w\")\n objectiveFunction = sum([q[lam] * sum([w[sd,sa,lam] * defenderSocialUtility(sd,sa,defenders,_dRewards,_dCosts,_dPenalties) for sd in placements for sa in attackerActions]) for lam in aTypes])\n # Define the constraints\n c1 = [sum([w[sd,sa,lam] * aUtility(sd,sa,lam,_aPenalties,_aRewards) for sd in placements for sa in attackerActions]) \\\n >= sum([w[sd,sa,lam] * aUtility(sd,tPrime,lam,_aPenalties,_aRewards) for sd in placements for sa in attackerActions])\n for lam in aTypes for tPrime in targetRange]\n c1 = [constraint for constraint in c1 if not isinstance(constraint, bool)]\n c2 = [sum([q[lam] * sum([w[sd,sa,lam] * utilityM(sd[d],sd,sa,d,_dRewards,_dPenalties,_dCosts) for sd in placements for sa in attackerActions]) for lam in aTypes]) \\\n >= sum([q[lam] * sum([w[sd,sa,lam] * utilityM(tPrime,sd,sa,d,_dRewards,_dPenalties,_dCosts) for sd in placements for sa in attackerActions]) for lam in aTypes]) \\\n for d in defenders for tPrime in targetRange]\n c3 = [sum([w[sd,sa,lam] for sd in placements for sa in attackerActions]) == 1\n for lam in aTypes]\n # Add the constraints\n c1 = model.add_constraints(c1)\n c2 = model.add_constraints(c2)\n c3 = model.add_constraints(c3)\n # Solve the model\n model.maximize(objectiveFunction)\n model.solve()\n # Now that w contains all the outcomes and their probabilities, sum the attacker utilities up.\n utilityPerAttacker = 0\n for k,v in w.items():\n prob = float(v)\n s,a,lam = k\n utilityPerAttacker += aUtility(s,a,lam,_aPenalties,_aRewards) * prob\n utilityPerAttacker /= len(aTypes)\n utilityPerDefender = model.solution.get_objective_value()\n utilityPerDefender /= len(defenders)\n return utilityPerDefender, utilityPerAttacker, None", "def one_point_crossover(self, another_individual):\n size = len(another_individual.get_genotype())\n genotype1 = numpy.zeros(size, dtype=another_individual.get_genotype().dtype)\n genotype2 = numpy.zeros(size, dtype=another_individual.get_genotype().dtype)\n idx = numpy.random.randint(1, size)\n numpy.put(genotype1, range(0, idx), another_individual.get_genotype()[0:idx])\n numpy.put(genotype1, range(idx, size), self.get_genotype()[idx:size])\n numpy.put(genotype2, range(0, idx), self.get_genotype()[0:idx])\n numpy.put(genotype2, range(idx, size), another_individual.get_genotype()[idx:size])\n\n return optimization.Individual(genotype1, self.fitness_evaluator, self.crossover_method, self.mutation_method), optimization.Individual(genotype2, self.fitness_evaluator, self.crossover_method, self.mutation_method)", "def discretise_solution(solution, objective, *constraints, neighbourhood=0) -> Tuple[int, Dict[str, int]]:\n floors = {var: int(solution[var]) for var in solution}\n headroom = BUDGET - sum(floors.values())\n\n candidates = itertools.product(*(range(floor - neighbourhood, floor + neighbourhood + headroom + 1) for floor in floors.values()))\n cand_maps = (dict(zip(solution.keys(), cand)) for cand in candidates)\n constrained_candidates = filter(lambda cand: all(cons.subs(cand) == 0 for cons in constraints), cand_maps)\n cand_scores = ((objective.subs(cand), cand) for cand in constrained_candidates)\n\n return max(cand_scores, key=lambda x: x[0])", "def _compute_imprimitivity(self):\n m = floor(self._.d / 2)\n self._.antipodal = all(full_simplify(\n self._.b[i] - self._.c[self._.d - i]) == 0\n for i in range(self._.d) if i != m)\n self._.bipartite = all(a == 0 for a in self._.a)\n if self._.antipodal:\n try:\n self._.r = integralize(\n 1 + self._.b[m] / self._.c[self._.d - m])\n except TypeError:\n raise InfeasibleError(\"covering index not integral\")\n if self._.d >= 2:\n if self._.d == 2:\n b = [self._.b[0]/(self._.b[1]+1)]\n c = [Integer(1)]\n else:\n b = self._.b[:m]\n c = list(self._.c[1:m+1])\n if is_divisible(self._.d, 2):\n c[-1] *= self._.r\n scheme = self._get_class()(tuple(b), tuple(c))\n else:\n scheme = ASParameters(P=[[1]])\n self._.antipodal_subscheme = self.add_subscheme(scheme,\n self.ANTIPODAL)\n if self._.bipartite:\n if self._.d >= 2:\n b = tuple(self._.b[2*i]*self._.b[2*i+1]/self._.c[2]\n for i in range(m))\n c = tuple(self._.c[2*i+1]*self._.c[2*i+2]/self._.c[2]\n for i in range(m))\n scheme = self._get_class()(b, c)\n else:\n scheme = ASParameters(P=[[1]])\n self._.bipartite_subscheme = self.add_subscheme(scheme,\n self.BIPARTITE)", "def gen2_constraint(model):\n return 20, model.g[2], 100", "def constraint_test():\n import itertools, sys\n\n show_analysis = False\n #Generated via grammar\n gr = grammar.Grammar('grammars/test_constraints.bnf')\n inputs = ([1 for _ in range(100)], [ i%3 for i in range(100)])\n for _input in inputs: \n output = gr.generate(_input)\n azr = analyser.Analyser('test',output['phenotype'],True)\n try:\n azr.create_graph()\n except ValueError as e:\n print(__name__, \"ERROR\", _input, e)\n continue\n azr.parse_graph()\n azr.apply_stresses()\n azr.create_slf_file()\n azr.test_slf_file()\n azr.parse_results()\n azr.print_stresses()\n if show_analysis:\n azr.show_analysis()\n \n #Fixed generated\n lengths = (1000, 10000)\n levels = (5, 10)\n for length_idx, level_idx in itertools.permutations([0,1]):\n try:\n GRAPH = constrained_offset_graph(lengths[length_idx],\n levels[length_idx])\n except ValueError as e:\n print(__name__, \"ERROR\", lengths[length_idx], levels[length_idx], e)\n continue\n GRAPH.save_graph(\"pylon\")\n print \"nodes:\", GRAPH.number_of_nodes()\n print \"edges\", GRAPH.number_of_edges()\n #will it blend?\n azr = analyser.Analyser('test',\"moo\",True)\n azr.my_graph = GRAPH\n azr.parse_graph()\n azr.apply_stresses()\n azr.create_slf_file()\n azr.test_slf_file()\n azr.parse_results()\n azr.print_stresses()\n if show_analysis:\n azr.show_analysis()", "def solveExNoOverlap(targetNum, defenders, dRewards, dPenalties, dCosts, aTypes, aRewards, aPenalties, q):\n \"\"\"Contains as many dummy targets as defenders, for defenders and attackers\"\"\"\n \"\"\"In this game the attacker and defender reason ex-ante\n (they choose to follow signals or not before a signal is sent).\"\"\"\n # Add the extra dummy targets\n _dRewards = copy.deepcopy(dRewards)\n _dPenalties = copy.deepcopy(dPenalties)\n _dCosts = copy.deepcopy(dCosts)\n _aRewards = copy.deepcopy(aRewards)\n _aPenalties = copy.deepcopy(aPenalties)\n for m in defenders:\n for defenderCount in defenders:\n _dRewards[m].append(0)\n _dPenalties[m].append(0)\n _dCosts[m].append(0)\n for lam in aTypes:\n _aRewards[lam].append(0)\n _aPenalties[lam].append(0)\n targetNumWithDummies = len(_dRewards[0])\n targetRange = list(range(targetNumWithDummies))\n attackerActions = targetRange\n # Get the suggestions that occur with no overlap\n overlapPlacements = getPlacements(defenders, targetNumWithDummies)\n placements = list(filter(lambda x: len(set(x)) == len(x), overlapPlacements))\n omegaKeys = getOmegaKeys(aTypes, placements, attackerActions)\n\n # Build the model\n model = Model('ExAnteWithOverlap')\n w = model.continuous_var_dict(keys=omegaKeys, lb=0, ub=1, name=\"w\")\n objectiveFunction = sum([q[lam] * sum([w[sd,sa,lam] * defenderSocialUtility(sd,sa,defenders,_dRewards,_dCosts,_dPenalties) for sd in placements for sa in attackerActions]) for lam in aTypes])\n # Define the constraints\n c1 = [sum([w[sd,sa,lam] * aUtility(sd,sa,lam,_aPenalties,_aRewards) for sd in placements for sa in attackerActions]) \\\n >= sum([w[sd,sa,lam] * aUtility(sd,tPrime,lam,_aPenalties,_aRewards) for sd in placements for sa in attackerActions])\n for lam in aTypes for tPrime in targetRange]\n c1 = [constraint for constraint in c1 if not isinstance(constraint, bool)]\n c2 = [sum([q[lam] * sum([w[sd,sa,lam] * utilityM(sd[d],sd,sa,d,_dRewards,_dPenalties,_dCosts) for sd in placements for sa in attackerActions]) for lam in aTypes]) \\\n >= sum([q[lam] * sum([w[sd,sa,lam] * utilityM(tPrime,sd,sa,d,_dRewards,_dPenalties,_dCosts) for sd in placements for sa in attackerActions]) for lam in aTypes]) \\\n for d in defenders for tPrime in targetRange]\n c3 = [sum([w[sd,sa,lam] for sd in placements for sa in attackerActions]) == 1\n for lam in aTypes]\n # Add the constraints\n c1 = model.add_constraints(c1)\n c2 = model.add_constraints(c2)\n c3 = model.add_constraints(c3)\n # Solve the model\n model.maximize(objectiveFunction)\n model.solve()\n # Now that w contains all the outcomes and their probabilities, sum the attacker utilities up.\n utilityPerAttacker = 0\n for k,v in w.items():\n prob = float(v)\n s,a,lam = k\n utilityPerAttacker += aUtility(s,a,lam,_aPenalties,_aRewards) * prob\n utilityPerAttacker /= len(aTypes)\n utilityPerDefender = model.solution.get_objective_value()\n utilityPerDefender /= len(defenders)\n return utilityPerDefender, utilityPerAttacker, None", "def smooth_input(xs, ys, L):\n n = len(xs)\n\n # obj = [1 for i in range(n)]\n # for i in range(2 * n):\n # obj.append(0)\n\n # Create the model\n model = LpProblem(name=\"small-problem\", sense=LpMinimize)\n ws = [LpVariable(name=\"w_{}\".format(i), lowBound=0, upBound=1) for i in range(n)]\n ls = [LpVariable(name=\"L_{}\".format(i), lowBound=0) for i in range(n)]\n zs = [LpVariable(name=\"z_{}\".format(i)) for i in range(n)]\n\n # objective\n model += lpSum(ws)\n\n # constraint 1:\n # sum of Li <= L\n model += (lpSum(ls) <= L * n, \"sum of Li <= L\")\n\n # Constraint 2:\n # w_i >= |z_i - y_i|\n for i in range(n):\n model += (ws[i] + zs[i] >= ys[i], \"C2.a_{}\".format(i))\n model += (ws[i] - zs[i] >= -ys[i], \"C2.b_{}\".format(i))\n\n # Constraint 3\n # |z_i - z_j| <= L_i * dist(x_i, x_j)\n for i in range(n):\n for j in range(n):\n if i != j:\n model += (zs[i] - zs[j] - abs(xs[i] - xs[j]) * ls[i] <= 0, \"C3.a_{}_{}\".format(i, j))\n model += (zs[j] - zs[i] - abs(xs[i] - xs[j]) * ls[i] <= 0, \"C3.b_{}_{}\".format(i, j))\n\n if model.solve() == 1:\n print(\n \"------------------------------------\\nFound solution for the linear program\\n------------------------------------\\n\")\n return [[xs[i], zs[i].value()] for i in range(n)]\n # return [zi.value() for zi in zs], [li.value() for li in ls]\n\n print(\"Linear program: no solution found\")\n exit(1)\n return -1", "def Example_2():\r\n print \"\\n** Example_2: Finding the minimum of the Rosenbrock function with 2 variables under constraints **\"\r\n\r\n Ex = optim_wrapper()\r\n X0 = np.zeros(2)\r\n lim = [(-2.0, 2.0)]*2\r\n Ex.set_X0(X0)\r\n Ex.set_lim(lim)\r\n Ex.set_penalties_func(pen)\r\n Ex.set_norm_count(200)\r\n Ex.set_nb_best(100)\r\n Ex.set_obj_func(obj)\r\n Ex.set_wrapper()\r\n Ex.launch_multi_opti()\r\n print Ex\r\n\r\n X_solution = [1.0, 1.0]\r\n res_string = \"Results of the optimisation: {:03.4f}, expected results: {:03.4f}\".format(obj(Ex.get_res()), obj(X_solution))\r\n print res_string\r\n print \"*\" * len(res_string)", "def minimize(self, evaluate, constrainToLower=False, constrainToUpper=False):\n improved = array([0,0,0])\n #------------------------------------------------\n for index, member in enumerate(self.population):\n #------------------------------------------------\n source = self.population[randrange(len(self.population))]\n x = member.copyAndModify(self.maxMutations, self.scale, source, self.maxIndexes)\n if constrainToLower:\n x = maximum(self.lowerDomain, x)\n if constrainToUpper:\n x = minimum(self.upperDomain, x)\n #------------------------------------------------\n loss = evaluate(x)\n #------------------------------------------------\n if index == self.diversityIndex:\n self.diversity.update(x, loss)\n self.diversityLoss = loss\n #------------------------------------------------\n if loss < self.eliteLoss:\n member.update(x, loss)\n self.eliteIndex = index\n self.eliteLoss = loss\n improved[0] += 1\n else:\n slot = randrange(len(self.population))\n slotMember = self.population[slot]\n if (slot != self.diversityIndex) and (loss <= slotMember.loss):\n # --------------------------------------------------\n slotMember.update(x, loss)\n improved[1] += 1\n # --------------------------------------------------\n elif (index != self.diversityIndex) and (loss <= member.loss):\n # --------------------------------------------------\n member.update(x, loss)\n improved[2] += 1\n # --------------------------------------------------\n #------------------------------------------------\n # --------------------------------------------------\n # reduce the scale if there were less than 'self.minImprovements' \n # improved members in the population.\n if sum(improved) < self.minImprovements:\n self.scale *= self.gamma\n # --------------------------------------------------\n self.improvements += improved", "def crossoverIndividuals(father, mother, bwsFitnessFunction, highIsGood):\n\n #choose depth of crossover point at random\n crossoverDepth = round(random.uniform(1,father.getDepth()))\n\n #get all subtrees of father and mother at that layer of deepness\n fatherNodesAtLayer = father.getNodesAtDepth(crossoverDepth)\n motherNodesAtLayer = mother.getNodesAtDepth(crossoverDepth)\n\n numberOfNodesinLayer = pow(2, crossoverDepth)\n\n #if no fitnessfunction is supplied, use random crossover\n if bwsFitnessFunction is None:\n indexM = round(random.uniform(0,numberOfNodesinLayer - 1))\n indexF = round(random.uniform(0,numberOfNodesinLayer - 1))\n\n #if bws (Best-Worst-Subtree) crossover is used, at crossoverDepth\n #find the best subtree from father and the worst from mother\n else:\n fitnessValuesOfFatherNodes = list(map(bwsFitnessFunction, fatherNodesAtLayer))\n fitnessValuesOfMotherNodes = list(map(bwsFitnessFunction, motherNodesAtLayer))\n\n if highIsGood:\n indexF = fitnessValuesOfFatherNodes.index(max(fitnessValuesOfFatherNodes))\n indexM = fitnessValuesOfMotherNodes.index(min(fitnessValuesOfMotherNodes))\n else:\n indexF = fitnessValuesOfFatherNodes.index(min(fitnessValuesOfFatherNodes))\n indexM = fitnessValuesOfMotherNodes.index(max(fitnessValuesOfMotherNodes))\n\n fatherCrossOverNode = copy.deepcopy(fatherNodesAtLayer[indexF])\n\n #exchange identified crossover nodes\n child = copy.deepcopy(mother)\n child.updateSubTree(crossoverDepth, indexM, fatherCrossOverNode)\n\n return child", "def calculate_sub_cost(source, target, sub_cost = 2):\r\n \r\n if source == target:\r\n return 0\r\n else:\r\n return sub_cost", "def selection(self,parents,popSize):\n for i in range(popSize):\n idx1 = np.random.randint(0,popSize)\n idx2 = np.random.randint(0,popSize)\n if parents.individuals[idx1].violationSum < parents.individuals[idx2].violationSum:\n self.individuals[i] = parents.individuals[idx1]\n elif parents.individuals[idx1].violationSum > parents.individuals[idx2].violationSum:\n self.individuals[i] = parents.individuals[idx2]\n elif parents.individuals[idx1].objectiveFunction[0] < parents.individuals[idx2].objectiveFunction[0]:\n self.individuals[i] = parents.individuals[idx1]\n else:\n self.individuals[i] = parents.individuals[idx2]\n \"\"\"\n print(\"Offsprings(self) Impresso dentro de selection (FIM).\")\n self.printPopulation(popSize)\n print(\"Parents Impresso dentro de selection (FIM).\")\n parents.printPopulation(popSize)\n \"\"\"", "def solvePostNoOverlap(targetNum, defenders, dRewards, dPenalties, dCosts, aTypes, aRewards, aPenalties, q):\n \"\"\"Contains as many dummy targets as defenders, for defenders and attackers\"\"\"\n # Add the extra dummy targets\n _dRewards = copy.deepcopy(dRewards)\n _dPenalties = copy.deepcopy(dPenalties)\n _dCosts = copy.deepcopy(dCosts)\n _aRewards = copy.deepcopy(aRewards)\n _aPenalties = copy.deepcopy(aPenalties)\n for m in defenders:\n for defenderCount in defenders:\n _dRewards[m].append(0)\n _dPenalties[m].append(0)\n _dCosts[m].append(0)\n for lam in aTypes:\n _aRewards[lam].append(0)\n _aPenalties[lam].append(0)\n targetNumWithDummies = len(_dRewards[0])\n targetRange = list(range(targetNumWithDummies))\n attackerActions = targetRange\n # Get the suggestions that occur with no overlap\n overlapPlacements = getPlacements(defenders, targetNumWithDummies)\n placements = list(filter(lambda x: len(set(x)) == len(x), overlapPlacements))\n omegaKeys = getOmegaKeys(aTypes, placements, attackerActions)\n\n # Build the model\n model = Model('PrimalWithOverlap')\n w = model.continuous_var_dict(keys=omegaKeys, lb=0, ub=1, name=\"w\")\n objectiveFunction = sum([q[lam] * sum([w[s,a,lam] * defenderSocialUtility(s,a,defenders,_dRewards,_dCosts,_dPenalties) for s in placements for a in attackerActions]) for lam in aTypes])\n c1 = [sum([w[s,a,lam] * aUtility(s,a,lam,_aPenalties,_aRewards) for s in placements]) \\\n >= sum([w[s,a,lam] * aUtility(s,b,lam,_aPenalties,_aRewards) for s in placements])\n for lam in aTypes for a in attackerActions for b in attackerActions if a != b]\n c1 = [constraint for constraint in c1 if not isinstance(constraint, bool)]\n c1 = model.add_constraints(c1)\n c2 = model.add_constraints([sum([q[lam] * sum([w[s,a,lam] * utilityM(d,s,a,m,_dRewards,_dPenalties,_dCosts) for a in attackerActions for s in placements if s[m] == d]) for lam in aTypes]) \\\n >= sum([q[lam] * sum([w[s,a,lam] * utilityM(e,s,a,m,_dRewards,_dPenalties,_dCosts) for a in attackerActions for s in placements if s[m] == d]) for lam in aTypes])\n for m in defenders for d in targetRange for e in targetRange if d!=e])\n c3 = model.add_constraints([sum([w[(s,a,lam)] for s in placements for a in attackerActions]) == 1 for lam in aTypes])\n # Solve the model\n model.maximize(objectiveFunction)\n model.solve()\n # Now that w contains all the outcomes and their probabilities, sum the attacker utilities up.\n utilityPerAttacker = 0\n for k,v in w.items():\n prob = float(v)\n s,a,lam = k\n utilityPerAttacker += aUtility(s,a,lam,_aPenalties,_aRewards) * prob\n utilityPerAttacker /= len(aTypes)\n utilityPerDefender = model.solution.get_objective_value()\n utilityPerDefender /= len(defenders)\n return utilityPerDefender, utilityPerAttacker, None", "def uniformCrossover(self, cl):\n if cons.env.format_data.discrete_action: #Always crossover condition if the phenotype is discrete (if continuous phenotype, half the time phenotype crossover is performed instead)\n self_specified_atts = copy.deepcopy(self.specified_attributes)\n cl_specified_atts = copy.deepcopy(cl.specified_attributes)\n probability = 0.5 #Equal probability for attribute alleles to be exchanged.\n\n #Make list of attribute references appearing in at least one of the parents.-----------------------------\n combined_atts = []\n for i in self_specified_atts:\n combined_atts.append(i)\n for i in cl_specified_atts:\n if i not in combined_atts:\n combined_atts.append(i)\n elif not cons.env.format_data.attribute_info[i][0]: #Attribute specified in both parents, and the attribute is discrete (then no reason to cross over)\n combined_atts.remove(i)\n combined_atts.sort()\n #--------------------------------------------------------------------------------------------------------\n changed = False;\n for att in combined_atts: #Each condition specifies different attributes, so we need to go through all attributes in the dataset.\n att_info = cons.env.format_data.attribute_info[att]\n #-----------------------------\n ref = 0\n #if att in self.specified_attributes:\n if att in self_specified_atts:\n ref += 1\n #if att in cl.specified_attributes:\n if att in cl_specified_atts:\n ref += 1\n #-----------------------------\n\n if ref == 0: #Attribute not specified in either condition (Attribute type makes no difference)\n print(\"Error: UniformCrossover!\")\n pass\n\n elif ref == 1: #Attribute specified in only one condition - do probabilistic switch of whole attribute state (Attribute type makes no difference)\n if att in self_specified_atts and random.random() > probability:\n i = self.specified_attributes.index(att) #reference to the position of the attribute in the rule representation\n cl.condition.append(self.condition.pop(i)) #Take attribute from self and add to cl\n cl.specified_attributes.append(att)\n self.specified_attributes.remove(att)\n changed = True #Remove att from self and add to cl\n\n\n if att in cl_specified_atts and random.random() < probability:\n i = cl.specified_attributes.index(att) #reference to the position of the attribute in the rule representation\n self.condition.append(cl.condition.pop(i)) #Take attribute from self and add to cl\n self.specified_attributes.append(att)\n cl.specified_attributes.remove(att)\n changed = True #Remove att from cl and add to self.\n\n\n else: #Attribute specified in both conditions - do random crossover between state alleles. The same attribute may be specified at different positions within either classifier\n #-------------------------------------------------------\n # CONTINUOUS ATTRIBUTE\n #-------------------------------------------------------\n if att_info[0]:\n i_cl1 = self.specified_attributes.index(att) #pairs with self (classifier 1)\n i_cl2 = cl.specified_attributes.index(att) #pairs with cl (classifier 2)\n tmp_key = random.randint(0,3) #Make random choice between 4 scenarios, Swap minimums, Swap maximums, Self absorbs cl, or cl absorbs self.\n if tmp_key == 0: #Swap minimum\n temp = self.condition[i_cl1][0]\n self.condition[i_cl1][0] = cl.condition[i_cl2][0]\n cl.condition[i_cl2][0] = temp\n elif tmp_key == 1: #Swap maximum\n temp = self.condition[i_cl1][1]\n self.condition[i_cl1][1] = cl.condition[i_cl2][1]\n cl.condition[i_cl2][1] = temp\n else: #absorb range\n all_list = self.condition[i_cl1] + cl.condition[i_cl2]\n new_min = min(all_list)\n new_max = max(all_list)\n if tmp_key == 2: #self absorbs cl\n self.condition[i_cl1] = [new_min,new_max]\n #Remove cl\n cl.condition.pop(i_cl2)\n cl.specified_attributes.remove(att)\n else: #cl absorbs self\n cl.condition[i_cl2] = [new_min,new_max]\n #Remove self\n self.condition.pop(i_cl1)\n self.specified_attributes.remove(att)\n #-------------------------------------------------------\n # DISCRETE ATTRIBUTE\n #-------------------------------------------------------\n else:\n pass\n tmp_list1 = copy.deepcopy(self_specified_atts)\n tmp_list2 = copy.deepcopy(cl.specified_attributes)\n tmp_list1.sort()\n tmp_list2.sort()\n if changed and (tmp_list1 == tmp_list2):\n changed = False\n\n if self.action != cl.action and random.random() > probability:\n # Switch phenotypes of 2 classifiers if GA is run in match set\n temp = self.action\n self.action = cl.action\n cl.action = temp\n changed = True\n return changed\n #-------------------------------------------------------\n # CONTINUOUS PHENOTYPE CROSSOVER\n #-------------------------------------------------------\n elif random.random() < 0.5:\n return self.actionCrossover(cl)", "def crossover(x1,x2):\n for chromo in x1.chromosomes:\n result_chromos = [np.zeros((chromo.shape))]\n #result_chromos = [np.zeros((chromo.shape)) for chromo in x1.chromosomes]\n i = 0\n for j in range(len(x1.chromosomes[i])):\n for k in range(len(x1.chromosomes[i][j])):\n if(np.random.rand(1) < 0.5):\n result_chromos[i][j][k] = x1.chromosomes[i][j][k]\n else:\n result_chromos[i][j][k] = x2.chromosomes[i][j][k]\n if(np.random.rand(1)< 0.8):#at 0.3 very agressive\n result_chromos[i][j][k] += -0.05 + np.random.rand(1)*0.1\n return result_chromos", "def crossover(parent1: Individual, parent2: Individual, root_individual: RootIndividual,\n **kwargs) -> Tuple[Individual, Individual]:\n sieve = np.random.randint(2, size=len(parent1.params)) # Array of 0's and 1's\n not_sieve = sieve ^ 1 # Complement of sieve\n\n child1 = Individual(list(parent1.params * sieve + parent2.params * not_sieve), root_individual=root_individual)\n child2 = Individual(list(parent1.params * not_sieve + parent2.params * sieve), root_individual=root_individual)\n\n return child1, child2", "def method2(automaton, level):\r\n\r\n old_bad_twin = automaton\r\n i = 1\r\n while i <= level:\r\n new_bad_twin = generate_bad_twin(old_bad_twin, i)\r\n c2 = condition_C2(new_bad_twin)\r\n c3 = condition_C3(new_bad_twin)\r\n if not(c2 or c3):\r\n good_twin = generate_good_twin(new_bad_twin)\r\n synchronized, ambiguous_transitions = synchronize_1(new_bad_twin, good_twin)\r\n c1 = condition_C1(ambiguous_transitions)\r\n if not c1:\r\n for src_name, dst_name in ambiguous_transitions:\r\n states = synchronized.get_states()\r\n if find_loops(states[dst_name], {src_name}):\r\n return i - 1\r\n old_bad_twin = new_bad_twin\r\n i += 1\r\n return True", "def generate_connectivity_constraint_all(problem):\n\n if problem.num_vars == None:\n problem.compute_num_var()\n\n ret = Constraint()\n\n # Iterator over all (v, t) subsets in the graph\n for b, b_r in enumerate(problem.src):\n # Convert each set in the iterator to (v,t) format\n add_S = map(\n lambda S: list(map(problem.get_time_augmented_n_t, S)),\n problem.powerset_exclude_agent(b_r),\n )\n ret &= generate_connectivity_constraint(problem, [b], add_S)\n\n return ret", "def solve(targets, \n payoff,\n defender_resources:int=1, \n attacker_resources:int=1, \n ptype:str=\"MILP\", \n minimax:str=\"maximize\"):\n # Need a big number. Will lower bound later\n M = 9999\n\n p = cplex.Cplex()\n if ptype in (\"milp\", \"MILP\"):\n p.set_problem_type(cplex.Cplex.problem_type.MILP)\n else:\n print(\"Problem type:\",ptype,\"is not currently supported\")\n exit(1)\n\n if minimax in (\"max\",\"maximize\"):\n p.objective.set_sense(p.objective.sense.maximize)\n elif minimax in (\"min\",\"minimize\"):\n p.objective.set_sense(p.objective.sense.minimize)\n else:\n print(\"Only solves maximization or minimization problems\")\n\n num_targets = len(targets)\n # v is the z's, x's, v_def, and v_att\n v = [\"z\"+str(t) for t in range(num_targets)] \\\n + [\"x\"+str(t) for t in range(num_targets)] \\\n + [\"v_def\",\"v_att\"] \n num_variables = len(v)\n obj = np.zeros(num_variables)\n for i in range(num_variables):\n if v[i] == \"v_def\":\n obj[i] = 1.\n lb = np.zeros(num_variables)\n ub = np.ones(num_variables)\n for i in range(num_variables):\n if v[i] in (\"v_def\",\"v_att\"):\n ub[i] = cplex.infinity\n lb[i] = -1*cplex.infinity\n\n p.variables.add(obj = obj, # Objective function\n lb = lb, # Lower bound\n ub = ub, # Upper bound\n names = v) # Variable names\n # z_i \\in {0,1} Set all z_i to integer values\n [p.variables.set_types([(\"z\"+str(t),p.variables.type.integer)]) for t in range(num_targets)]\n # x_i \\in [0,1] Set all x_i to continuous values\n [p.variables.set_types([(\"x\"+str(t),p.variables.type.continuous)]) for t in range(num_targets)]\n # Also set for attacker and defender\n p.variables.set_types([(\"v_def\",p.variables.type.continuous)])\n p.variables.set_types([(\"v_att\",p.variables.type.continuous)])\n\n util_du = [M+payoff[i][2] for i in range(num_targets)]\n util_dc = [payoff[i][3] for i in range(num_targets)]\n util_ac = [M+payoff[i][3] for i in range(num_targets)]\n init_params = np.array([1.,defender_resources])\n rhs = np.hstack((init_params, util_du, util_dc, util_ac))\n\n senses = [\"E\",\"L\"] \\\n + [\"L\" for i in range(num_targets)] \\\n + [\"G\" for i in range(num_targets)]\\\n + [\"L\" for i in range(num_targets)]\n \n\n constraints = []\n zl = []\n zc = []\n xl = []\n xc = []\n for t in range(num_targets):\n zl.append(\"z\"+str(t))\n zc.append(1.)\n xl.append(\"x\"+str(t))\n xc.append(1.)\n constraints.append([zl,zc])\n constraints.append([xl,xc])\n\n # Defender's utility\n # Interleave vars and coefficients\n # Easier doing it this way that inline loops\n def_util_vars = []#np.zeros(num_targets*3)\n def_util_coef = []#np.zeros(num_targets*3)\n def_util = []\n for i in range(num_targets):\n def_util_vars = ([\"v_def\", \"x\"+str(i), \"z\"+str(i)])\n def_util_coef = ([1., (payoff[i][2] - payoff[i][1]), M])\n constraints.append([def_util_vars, def_util_coef])\n\n\n\n # Attacker strats\n att_strat_vars = []\n att_strat_coef = []\n att_strat = []\n for i in range(num_targets):\n att_strat_vars = ([\"v_att\", \"x\"+str(i)])\n att_strat_coef = ([1., payoff[i][3] - payoff[i][4]])\n constraints.append([att_strat_vars,att_strat_coef])\n\n\n # Attacker utility\n att_util_vars = []\n att_util_coef = []\n att_util = []\n for i in range(num_targets):\n att_util_vars = ([\"v_att\", \"x\"+str(i), \"z\"+str(i)])\n att_util_coef = ([1., payoff[i][3] - payoff[i][4], M])\n constraints.append([att_util_vars, att_util_coef])\n\n # Throw them all together\n constraint_names = [\"r\"+str(i) for i in range(len(constraints))]\n\n p.linear_constraints.add(lin_expr = constraints,\n senses = senses,\n rhs = rhs,\n names = constraint_names)\n p.solve()\n return p.solution.get_values()" ]
[ "0.6610924", "0.5716931", "0.5647401", "0.5542738", "0.5517727", "0.55064774", "0.548928", "0.5414949", "0.5401225", "0.539947", "0.53967386", "0.53481674", "0.53454256", "0.5297864", "0.5291615", "0.5256472", "0.525462", "0.52351964", "0.52085125", "0.51946014", "0.51941335", "0.5182893", "0.5179354", "0.5177707", "0.5156935", "0.51505363", "0.514926", "0.5136981", "0.5136792", "0.51346636" ]
0.606364
1
Operates on constraint level. This function makes use of the distinction between suboptimal and infeasible negative examples. Performs a smart crossover on the two given individuals, and produces 1 individual (both individuals will be changed into this single resulting individual). Makes use of a heuristic that values constraints and combinations of constraints based on their coverage.
def smart_clause_crossover_infeasibility(ind1, ind2, examples, greedy=True, probability_variant=None, temperature=1, clause_bitvector_cache=None): allow_duplicates = False # allow_duplicates denotes whether the resulting indivuals may contain duplicate clauses ind1_hard_constraints = [constr for constr in ind1 if constr[-2] == True] ind2_hard_constraints = [constr for constr in ind2 if constr[-2] == True] all_hard_constraints = ind1_hard_constraints + ind2_hard_constraints ind1_soft_constraints = [constr for constr in ind1 if constr[-2] == False] ind2_soft_constraints = [constr for constr in ind2 if constr[-2] == False] all_soft_constraints = ind1_soft_constraints + ind2_soft_constraints ind1_hard_coverage_bitvectors = compute_clause_coverage_bitvectors(ind1_hard_constraints, examples, use_infeasibility=True, clause_bitvector_cache=clause_bitvector_cache) ind2_hard_coverage_bitvectors = compute_clause_coverage_bitvectors(ind2_hard_constraints, examples, use_infeasibility=True, clause_bitvector_cache=clause_bitvector_cache) ind1_soft_coverage_bitvectors = compute_clause_coverage_bitvectors(ind1_soft_constraints, examples, use_infeasibility=True, clause_bitvector_cache=clause_bitvector_cache) ind2_soft_coverage_bitvectors = compute_clause_coverage_bitvectors(ind2_soft_constraints, examples, use_infeasibility=True, clause_bitvector_cache=clause_bitvector_cache) all_hard_coverage_bitvectors = ind1_hard_coverage_bitvectors + ind2_hard_coverage_bitvectors all_soft_coverage_bitvectors = ind1_soft_coverage_bitvectors + ind2_soft_coverage_bitvectors ind1_num_hard = len([constr for constr in ind1 if constr[-2] == True]) ind2_num_hard = len([constr for constr in ind2 if constr[-2] == True]) # num_hard = random.choice([ind1_num_hard, ind2_num_hard]) if ind1_num_hard <= ind2_num_hard: num_hard = random.choice(list(range(ind1_num_hard, ind2_num_hard+1))) else: num_hard = random.choice(list(range(ind2_num_hard, ind1_num_hard + 1))) num_soft = len(ind1) - num_hard chosen_hard_clauses = [] chosen_hard_clause_indices = [] chosen_soft_clauses = [] chosen_soft_clause_indices = [] # Choose hard constraints for i in range(0, num_hard): if i == 0: combined_hard_coverage_bitvectors = all_hard_coverage_bitvectors else: combined_hard_coverage_bitvectors = [combine_coverage_bitvectors_hard_constraints( chosen_hard_clauses_bitvector, bitvector, examples) for bitvector in all_hard_coverage_bitvectors] if not allow_duplicates: for index in chosen_hard_clause_indices: for j in range(len(combined_hard_coverage_bitvectors)): if all_hard_constraints[index][:-2] == all_hard_constraints[j][:-2]: combined_hard_coverage_bitvectors[j] = [0] * len(examples) if greedy: combined_hard_coverages = [sum(coverage_bitvector) for coverage_bitvector in combined_hard_coverage_bitvectors] best_hard_coverage = max(combined_hard_coverages) best_hard_indices = [i for i in range(len(combined_hard_coverages)) if combined_hard_coverages[i] == best_hard_coverage] chosen_hard_clause_index = random.choice(best_hard_indices) else: coverages = [sum(x) for x in combined_hard_coverage_bitvectors] if probability_variant == "linear": sum_coverages = sum(coverages) coverages_to_probabilities = [x / sum_coverages for x in coverages] elif probability_variant == "squared": coverages_squared = [x ** 2 for x in coverages] sum_coverages_squared = sum(coverages_squared) coverages_to_probabilities = [x ** 2 / sum_coverages_squared for x in coverages] elif probability_variant == "softmax": # Softmax with normalization to prevent overflow coverages_max = max(coverages) coverages_for_softmax = [a_coverage - coverages_max for a_coverage in coverages] coverages_to_probabilities = np.exp(np.asarray(coverages_for_softmax) / temperature) / sum( np.exp(np.asarray(coverages_for_softmax) / temperature)) chosen_hard_clause_index = np.random.choice(list(range(0, len(all_hard_coverage_bitvectors))), p=coverages_to_probabilities) chosen_hard_coverage_bitvector = combined_hard_coverage_bitvectors[chosen_hard_clause_index] if chosen_hard_clause_index < len(ind1_hard_constraints): chosen_hard_clause = ind1_hard_constraints[chosen_hard_clause_index] else: chosen_hard_clause = ind2_hard_constraints[chosen_hard_clause_index - len(ind1_hard_constraints)] chosen_hard_clauses.append(chosen_hard_clause) chosen_hard_clause_indices.append(chosen_hard_clause_index) chosen_hard_clauses_bitvector = chosen_hard_coverage_bitvector # Choose soft constraints for i in range(0, num_soft): if i == 0: combined_soft_coverage_bitvectors = all_soft_coverage_bitvectors else: combined_soft_coverage_bitvectors = [combine_coverage_bitvectors_soft_constraints( chosen_soft_clauses_bitvector, bitvector, examples) for bitvector in all_soft_coverage_bitvectors] if not allow_duplicates: for index in chosen_soft_clause_indices: for j in range(len(combined_soft_coverage_bitvectors)): if all_soft_constraints[index][:-2] == all_soft_constraints[j][:-2]: combined_soft_coverage_bitvectors[j] = [0] * len(examples) if greedy: combined_soft_coverages = [sum(coverage_bitvector) for coverage_bitvector in combined_soft_coverage_bitvectors] best_soft_coverage = max(combined_soft_coverages) best_soft_indices = [i for i in range(len(combined_soft_coverages)) if combined_soft_coverages[i] == best_soft_coverage] chosen_soft_clause_index = random.choice(best_soft_indices) else: coverages = [sum(x) for x in combined_soft_coverage_bitvectors] if probability_variant == "linear": sum_coverages = sum(coverages) coverages_to_probabilities = [x / sum_coverages for x in coverages] elif probability_variant == "squared": coverages_squared = [x ** 2 for x in coverages] sum_coverages_squared = sum(coverages_squared) coverages_to_probabilities = [x ** 2 / sum_coverages_squared for x in coverages] elif probability_variant == "softmax": # Softmax with normalization to prevent overflow coverages_max = max(coverages) coverages_for_softmax = [a_coverage - coverages_max for a_coverage in coverages] coverages_to_probabilities = np.exp(np.asarray(coverages_for_softmax) / temperature) / sum( np.exp(np.asarray(coverages_for_softmax) / temperature)) chosen_soft_clause_index = np.random.choice(list(range(0, len(all_soft_coverage_bitvectors))), p=coverages_to_probabilities) chosen_soft_coverage_bitvector = combined_soft_coverage_bitvectors[chosen_soft_clause_index] if chosen_soft_clause_index < len(ind1_soft_constraints): chosen_soft_clause = ind1_soft_constraints[chosen_soft_clause_index] else: chosen_soft_clause = ind2_soft_constraints[chosen_soft_clause_index - len(ind1_soft_constraints)] chosen_soft_clauses.append(chosen_soft_clause) chosen_soft_clause_indices.append(chosen_soft_clause_index) chosen_soft_clauses_bitvector = chosen_soft_coverage_bitvector for i in range(len(chosen_hard_clauses)): hard_clause = chosen_hard_clauses[i] # We can safely set ind1 and ind2 to the same computed smart combination, as only one of them will make it # to the next generation ind1[i] = hard_clause ind2[i] = hard_clause for i in range(len(chosen_soft_clauses)): soft_clause = chosen_soft_clauses[i] ind1[num_hard+i] = soft_clause ind2[num_hard+i] = soft_clause
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def smart_clause_crossover(ind1, ind2, examples, greedy=True, probability_variant=None, temperature=1, clause_bitvector_cache=None):\n allow_duplicates = False # allow_duplicates denotes whether the resulting indivuals may contain duplicate clauses\n number_of_clauses = len(ind1)\n all_clauses = ind1+ind2\n chosen_clauses = []\n chosen_clause_indices = []\n ind1_coverage_bitvectors = compute_clause_coverage_bitvectors(ind1, examples, clause_bitvector_cache=clause_bitvector_cache)\n ind2_coverage_bitvectors = compute_clause_coverage_bitvectors(ind2, examples, clause_bitvector_cache=clause_bitvector_cache)\n all_coverage_bitvectors = ind1_coverage_bitvectors + ind2_coverage_bitvectors\n\n for i in range(0, number_of_clauses):\n if i == 0:\n combined_coverage_bitvectors = all_coverage_bitvectors\n else:\n combined_coverage_bitvectors = [combine_coverage_bitvectors(chosen_clauses_bitvector, bitvector, examples)\n for bitvector in all_coverage_bitvectors]\n if not allow_duplicates:\n for index in chosen_clause_indices:\n for j in range(len(combined_coverage_bitvectors)):\n if all_clauses[index] == all_clauses[j]:\n combined_coverage_bitvectors[j] = [0] * len(examples)\n combined_coverages = [sum(coverage_bitvector) for coverage_bitvector in combined_coverage_bitvectors]\n if greedy:\n best_coverage = max(combined_coverages)\n best_indices = [i for i in range(len(combined_coverages)) if combined_coverages[i] == best_coverage]\n chosen_clause_index = random.choice(best_indices)\n else:\n if probability_variant == \"linear\":\n sum_coverages = sum(combined_coverages)\n coverages_to_probabilities = [x / sum_coverages for x in combined_coverages]\n elif probability_variant == \"squared\":\n coverages_squared = [x ** 2 for x in combined_coverages]\n sum_coverages_squared = sum(coverages_squared)\n coverages_to_probabilities = [x ** 2 / sum_coverages_squared for x in combined_coverages]\n elif probability_variant == \"softmax\":\n # Softmax with normalization to prevent overflow\n coverages_max = max(combined_coverages)\n coverages_for_softmax = [a_coverage - coverages_max for a_coverage in combined_coverages]\n coverages_to_probabilities = np.exp(np.asarray(coverages_for_softmax) / temperature) / sum(\n np.exp(np.asarray(coverages_for_softmax) / temperature))\n\n chosen_clause_index = np.random.choice(list(range(0, len(all_coverage_bitvectors))),\n p=coverages_to_probabilities)\n chosen_coverage_bitvector = combined_coverage_bitvectors[chosen_clause_index]\n if chosen_clause_index < number_of_clauses:\n chosen_clause = ind1[chosen_clause_index]\n else:\n chosen_clause = ind2[chosen_clause_index - number_of_clauses]\n\n chosen_clauses.append(chosen_clause)\n chosen_clause_indices.append(chosen_clause_index)\n chosen_clauses_bitvector = chosen_coverage_bitvector\n\n for i in range(len(chosen_clauses)):\n clause = chosen_clauses[i]\n # We can safely set ind1 and ind2 to the same computed smart combination, as only one of them will make it\n # to the next generation\n ind1[i] = clause\n ind2[i] = clause", "def solve(num_wizards, num_constraints, wizards, constraints):\n\n # print(num_wizards)\n # print(num_constraints)\n # print(wizards)\n # print(constraints)\n # node_set = set(wizards)\n \n\n\n def cost(sol,num_constraints,constraints):\n constraints_satisfied = 0\n constraints_failed = []\n output_ordering_map = {k: v for v, k in enumerate(sol)}\n for c in constraints:\n\n m = output_ordering_map # Creating an alias for easy reference\n\n wiz_a = m[c[0]]\n wiz_b = m[c[1]]\n wiz_mid = m[c[2]]\n\n if (wiz_a < wiz_mid < wiz_b) or (wiz_b < wiz_mid < wiz_a):\n constraints_failed.append(c)\n else:\n constraints_satisfied += 1\n return num_constraints - constraints_satisfied\n\n def neighbors(sol):\n wiz1 = random.randint(0,num_wizards-1)\n wiz2 = random.randint(0,num_wizards-1)\n\n new_sol = copy.copy(sol)\n temp = new_sol[wiz1]\n new_sol[wiz1] = new_sol[wiz2]\n new_sol[wiz2] = temp\n \n return new_sol\n\n def acceptance_probability(old_cost,new_cost,T):\n exponent = (old_cost - new_cost) / T\n \n try:\n ans = math.exp(exponent)\n except OverflowError:\n ans = float('inf')\n return ans\n\n\n def anneal(solution, num_constraints, constraints):\n old_cost = 0\n new_cost = 0\n old_cost = cost(solution,num_constraints,constraints)\n T = 1.0\n T_min = 0.000001\n alpha = 0.98\n while T > T_min:\n i = 1\n while i <= 1000:\n new_solution = neighbors(solution)\n new_cost = cost(new_solution,num_constraints,constraints)\n if new_cost == 0:\n return new_solution,new_cost\n ap = acceptance_probability(old_cost, new_cost, T)\n if ap > random.random():\n solution = new_solution\n old_cost = new_cost\n i += 1\n T = T*alpha\n return solution, old_cost\n\n s = copy.copy(wizards)\n random.shuffle(s)\n ret = anneal(s,num_constraints,constraints)\n \n for i in range(10):\n if ret[1] == 0:\n break\n random.shuffle(s)\n new_ret = anneal(s,num_constraints,constraints)\n print(i)\n if new_ret[1] < ret[1]:\n ret = new_ret\n print(\"constraints failed: {0}\".format(ret[1]))\n return ret[0]", "def solvePostOverlap(targetNum, defenders, dRewards, dPenalties, dCosts, aTypes, aRewards, aPenalties, q):\n \"\"\"Contains a dummy target for defenders and attackers\"\"\"\n # Add the extra dummy target\n _dRewards = copy.deepcopy(dRewards)\n _dPenalties = copy.deepcopy(dPenalties)\n _dCosts = copy.deepcopy(dCosts)\n _aRewards = copy.deepcopy(aRewards)\n _aPenalties = copy.deepcopy(aPenalties)\n for m in defenders:\n for defenderCount in defenders:\n _dRewards[m].append(0)\n _dPenalties[m].append(0)\n _dCosts[m].append(0)\n for lam in aTypes:\n _aRewards[lam].append(0)\n _aPenalties[lam].append(0)\n targetNumWithDummies = len(_dRewards[0])\n targetRange = list(range(targetNumWithDummies))\n attackerActions = targetRange\n placements = getPlacements(defenders, targetNumWithDummies)\n omegaKeys = getOmegaKeys(aTypes, placements, attackerActions)\n\n # Build the model\n model = Model('PrimalWithOverlap')\n w = model.continuous_var_dict(keys=omegaKeys, lb=0, ub=1, name=\"w\")\n objectiveFunction = sum([q[lam] * sum([w[s,a,lam] * defenderSocialUtility(s,a,defenders,_dRewards,_dCosts,_dPenalties) for s in placements for a in attackerActions]) for lam in aTypes])\n c1 = [sum([w[s,a,lam] * aUtility(s,a,lam,_aPenalties,_aRewards) for s in placements]) \\\n >= sum([w[s,a,lam] * aUtility(s,b,lam,_aPenalties,_aRewards) for s in placements])\n for lam in aTypes for a in attackerActions for b in attackerActions if a != b]\n c1 = [constraint for constraint in c1 if not isinstance(constraint, bool)]\n c1 = model.add_constraints(c1)\n c2 = model.add_constraints([sum([q[lam] * sum([w[s,a,lam] * utilityM(d,s,a,m,_dRewards,_dPenalties,_dCosts) for a in attackerActions for s in placements if s[m] == d]) for lam in aTypes]) \\\n >= sum([q[lam] * sum([w[s,a,lam] * utilityM(e,s,a,m,_dRewards,_dPenalties,_dCosts) for a in attackerActions for s in placements if s[m] == d]) for lam in aTypes])\n for m in defenders for d in targetRange for e in targetRange if d!=e])\n c3 = model.add_constraints([sum([w[(s,a,lam)] for s in placements for a in attackerActions]) == 1 for lam in aTypes])\n # Solve the model\n model.maximize(objectiveFunction)\n model.solve()\n # Now that w contains all the outcomes and their probabilities, sum the attacker utilities up.\n utilityPerAttacker = 0\n for k,v in w.items():\n prob = float(v)\n s,a,lam = k\n utilityPerAttacker += aUtility(s,a,lam,_aPenalties,_aRewards) * prob\n utilityPerAttacker /= len(aTypes)\n utilityPerDefender = model.solution.get_objective_value()\n utilityPerDefender /= len(defenders)\n\n return utilityPerDefender, utilityPerAttacker, None", "def test_cost_mixed():\n # boundaries:\n vm_max = 1.05\n vm_min = 0.95\n\n # create net\n net = pp.create_empty_network()\n pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10.)\n pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4)\n pp.create_gen(net, 1, p_kw=-100, controllable=True, max_p_kw=-5, min_p_kw=-150, max_q_kvar=50,\n min_q_kvar=-50)\n pp.create_ext_grid(net, 0)\n pp.create_load(net, 1, p_kw=20, controllable=False, max_q_kvar=50, max_p_kw=100, min_p_kw=50,\n min_q_kvar=-50)\n pp.create_line_from_parameters(net, 0, 1, 50, name=\"line2\", r_ohm_per_km=0.876,\n c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876,\n max_loading_percent=100 * 690)\n\n # testing some combinations\n pp.create_polynomial_cost(net, 0, \"gen\", np.array([0, 1, 0]))\n pp.runopp(net, verbose=False)\n assert net[\"OPF_converged\"]\n assert net.res_cost == - net.res_gen.p_kw.values\n\n net.polynomial_cost.c.at[0] = np.array([[1, 0, 0]])\n pp.runopp(net, verbose=False)\n assert net[\"OPF_converged\"]\n assert net.res_cost - net.res_gen.p_kw.values**2 < 1e-5\n\n net.polynomial_cost.c.at[0] = np.array([[1, 0, 1]])\n pp.runopp(net, verbose=False)\n assert net[\"OPF_converged\"]\n assert net.res_cost - net.res_gen.p_kw.values**2 - 1 < 1e-5\n\n net.load.controllable.at[0] = True\n pp.runopp(net, verbose=False)\n assert net.res_cost - net.res_gen.p_kw.values ** 2 - 1 < 1e-5\n\n pp.create_piecewise_linear_cost(net, 0, \"load\", np.array([[0, 0], [100, 100]]), type=\"p\")\n pp.runopp(net, verbose=False)\n assert net.res_cost - net.res_gen.p_kw.values ** 2 - 1 - net.res_load.p_kw.values < 1e-5", "def solveExOverlap(targetNum, defenders, dRewards, dPenalties, dCosts, aTypes, aRewards, aPenalties, q):\n \"\"\"Contains a dummy target for defenders and attackers\"\"\"\n \"\"\"In this game the attacker and defender reason ex-ante\n (they choose to follow signals or not before a signal is sent).\"\"\"\n # Add the extra dummy target\n _dRewards = copy.deepcopy(dRewards)\n _dPenalties = copy.deepcopy(dPenalties)\n _dCosts = copy.deepcopy(dCosts)\n _aRewards = copy.deepcopy(aRewards)\n _aPenalties = copy.deepcopy(aPenalties)\n for m in defenders:\n for defenderCount in defenders:\n _dRewards[m].append(0)\n _dPenalties[m].append(0)\n _dCosts[m].append(0)\n for lam in aTypes:\n _aRewards[lam].append(0)\n _aPenalties[lam].append(0)\n targetNumWithDummies = len(_dRewards[0])\n targetRange = list(range(targetNumWithDummies))\n attackerActions = targetRange\n placements = getPlacements(defenders, targetNumWithDummies)\n omegaKeys = getOmegaKeys(aTypes, placements, attackerActions)\n\n # Build the model\n model = Model('ExAnteWithOverlap')\n w = model.continuous_var_dict(keys=omegaKeys, lb=0, ub=1, name=\"w\")\n objectiveFunction = sum([q[lam] * sum([w[sd,sa,lam] * defenderSocialUtility(sd,sa,defenders,_dRewards,_dCosts,_dPenalties) for sd in placements for sa in attackerActions]) for lam in aTypes])\n # Define the constraints\n c1 = [sum([w[sd,sa,lam] * aUtility(sd,sa,lam,_aPenalties,_aRewards) for sd in placements for sa in attackerActions]) \\\n >= sum([w[sd,sa,lam] * aUtility(sd,tPrime,lam,_aPenalties,_aRewards) for sd in placements for sa in attackerActions])\n for lam in aTypes for tPrime in targetRange]\n c1 = [constraint for constraint in c1 if not isinstance(constraint, bool)]\n c2 = [sum([q[lam] * sum([w[sd,sa,lam] * utilityM(sd[d],sd,sa,d,_dRewards,_dPenalties,_dCosts) for sd in placements for sa in attackerActions]) for lam in aTypes]) \\\n >= sum([q[lam] * sum([w[sd,sa,lam] * utilityM(tPrime,sd,sa,d,_dRewards,_dPenalties,_dCosts) for sd in placements for sa in attackerActions]) for lam in aTypes]) \\\n for d in defenders for tPrime in targetRange]\n c3 = [sum([w[sd,sa,lam] for sd in placements for sa in attackerActions]) == 1\n for lam in aTypes]\n # Add the constraints\n c1 = model.add_constraints(c1)\n c2 = model.add_constraints(c2)\n c3 = model.add_constraints(c3)\n # Solve the model\n model.maximize(objectiveFunction)\n model.solve()\n # Now that w contains all the outcomes and their probabilities, sum the attacker utilities up.\n utilityPerAttacker = 0\n for k,v in w.items():\n prob = float(v)\n s,a,lam = k\n utilityPerAttacker += aUtility(s,a,lam,_aPenalties,_aRewards) * prob\n utilityPerAttacker /= len(aTypes)\n utilityPerDefender = model.solution.get_objective_value()\n utilityPerDefender /= len(defenders)\n return utilityPerDefender, utilityPerAttacker, None", "def discretise_solution(solution, objective, *constraints, neighbourhood=0) -> Tuple[int, Dict[str, int]]:\n floors = {var: int(solution[var]) for var in solution}\n headroom = BUDGET - sum(floors.values())\n\n candidates = itertools.product(*(range(floor - neighbourhood, floor + neighbourhood + headroom + 1) for floor in floors.values()))\n cand_maps = (dict(zip(solution.keys(), cand)) for cand in candidates)\n constrained_candidates = filter(lambda cand: all(cons.subs(cand) == 0 for cons in constraints), cand_maps)\n cand_scores = ((objective.subs(cand), cand) for cand in constrained_candidates)\n\n return max(cand_scores, key=lambda x: x[0])", "def matched_uniform_crossover(ind1, ind2):\n # We calculate the pairwise match between ind1's and ind2's clauses\n match_matrix = np.zeros((len(ind1), len(ind2)))\n for i in range(len(ind1)):\n clause1 = ind1[i]\n for j in range(len(ind2)):\n clause2 = ind2[j]\n curr_syntactic_match = 0\n for k in range(len(clause1)):\n if k != len(clause1) - 1:\n if clause1[k] == clause2[k]:\n curr_syntactic_match += 1\n else:\n curr_syntactic_match += (1 - abs(clause1[k] - clause2[k]))\n match_matrix[i][j] = curr_syntactic_match\n\n # Arg-sort the pairwise clause matches from best to worst match\n matches_ordered = np.dstack(np.unravel_index(np.argsort(match_matrix.ravel())[::-1], (len(ind1), len(ind2))))[0]\n\n # Finally match the clauses, making sure that each clause is only matched once\n # Then perform uniform crossover on matched clauses\n ind1_matched_clauses = set()\n ind2_matched_clauses = set()\n count = 0\n for match in matches_ordered:\n i = match[0]\n j = match[1]\n if match_matrix[i][j] >= len(ind1[0])//2:\n if i not in ind1_matched_clauses and j not in ind2_matched_clauses:\n count += 1\n # Perform the uniform crossover\n for k in range(len(ind1[i])):\n if random.random() < 0.5:\n temp = ind1[i][k]\n ind1[i][k] = ind2[j][k]\n ind2[j][k] = temp\n ind1_matched_clauses.add(i)\n ind2_matched_clauses.add(j)", "def solveExNoOverlap(targetNum, defenders, dRewards, dPenalties, dCosts, aTypes, aRewards, aPenalties, q):\n \"\"\"Contains as many dummy targets as defenders, for defenders and attackers\"\"\"\n \"\"\"In this game the attacker and defender reason ex-ante\n (they choose to follow signals or not before a signal is sent).\"\"\"\n # Add the extra dummy targets\n _dRewards = copy.deepcopy(dRewards)\n _dPenalties = copy.deepcopy(dPenalties)\n _dCosts = copy.deepcopy(dCosts)\n _aRewards = copy.deepcopy(aRewards)\n _aPenalties = copy.deepcopy(aPenalties)\n for m in defenders:\n for defenderCount in defenders:\n _dRewards[m].append(0)\n _dPenalties[m].append(0)\n _dCosts[m].append(0)\n for lam in aTypes:\n _aRewards[lam].append(0)\n _aPenalties[lam].append(0)\n targetNumWithDummies = len(_dRewards[0])\n targetRange = list(range(targetNumWithDummies))\n attackerActions = targetRange\n # Get the suggestions that occur with no overlap\n overlapPlacements = getPlacements(defenders, targetNumWithDummies)\n placements = list(filter(lambda x: len(set(x)) == len(x), overlapPlacements))\n omegaKeys = getOmegaKeys(aTypes, placements, attackerActions)\n\n # Build the model\n model = Model('ExAnteWithOverlap')\n w = model.continuous_var_dict(keys=omegaKeys, lb=0, ub=1, name=\"w\")\n objectiveFunction = sum([q[lam] * sum([w[sd,sa,lam] * defenderSocialUtility(sd,sa,defenders,_dRewards,_dCosts,_dPenalties) for sd in placements for sa in attackerActions]) for lam in aTypes])\n # Define the constraints\n c1 = [sum([w[sd,sa,lam] * aUtility(sd,sa,lam,_aPenalties,_aRewards) for sd in placements for sa in attackerActions]) \\\n >= sum([w[sd,sa,lam] * aUtility(sd,tPrime,lam,_aPenalties,_aRewards) for sd in placements for sa in attackerActions])\n for lam in aTypes for tPrime in targetRange]\n c1 = [constraint for constraint in c1 if not isinstance(constraint, bool)]\n c2 = [sum([q[lam] * sum([w[sd,sa,lam] * utilityM(sd[d],sd,sa,d,_dRewards,_dPenalties,_dCosts) for sd in placements for sa in attackerActions]) for lam in aTypes]) \\\n >= sum([q[lam] * sum([w[sd,sa,lam] * utilityM(tPrime,sd,sa,d,_dRewards,_dPenalties,_dCosts) for sd in placements for sa in attackerActions]) for lam in aTypes]) \\\n for d in defenders for tPrime in targetRange]\n c3 = [sum([w[sd,sa,lam] for sd in placements for sa in attackerActions]) == 1\n for lam in aTypes]\n # Add the constraints\n c1 = model.add_constraints(c1)\n c2 = model.add_constraints(c2)\n c3 = model.add_constraints(c3)\n # Solve the model\n model.maximize(objectiveFunction)\n model.solve()\n # Now that w contains all the outcomes and their probabilities, sum the attacker utilities up.\n utilityPerAttacker = 0\n for k,v in w.items():\n prob = float(v)\n s,a,lam = k\n utilityPerAttacker += aUtility(s,a,lam,_aPenalties,_aRewards) * prob\n utilityPerAttacker /= len(aTypes)\n utilityPerDefender = model.solution.get_objective_value()\n utilityPerDefender /= len(defenders)\n return utilityPerDefender, utilityPerAttacker, None", "def uniform_crossover(self, another_individual):\n size = len(another_individual.get_genotype())\n genotype1 = numpy.zeros(size, dtype=another_individual.get_genotype().dtype)\n genotype2 = numpy.zeros(size, dtype=another_individual.get_genotype().dtype)\n mask = numpy.random.choice([True,False], size=size)\n not_mask = numpy.logical_not(mask)\n genotype1[mask] = self.get_genotype()[mask]\n genotype1[not_mask] = another_individual.get_genotype()[not_mask]\n genotype2[mask] = another_individual.get_genotype()[mask]\n genotype2[not_mask] = self.get_genotype()[not_mask]\n\n return optimization.Individual(genotype1, self.fitness_evaluator, self.uniform_crossover, self.mutation_method), optimization.Individual(genotype2, self.fitness_evaluator, self.uniform_crossover, self.mutation_method)", "def test_penalty_compute(self):\n objective = {'G': numpy.zeros((9)),\n 'H': numpy.diag((1,)*9),\n 'X': 1}\n for penalty in self.penalties:\n result = penalty.compute([1]*self.np, objective)\n assert isinstance(result,tuple)\n # more tests go here", "def _compute_imprimitivity(self):\n m = floor(self._.d / 2)\n self._.antipodal = all(full_simplify(\n self._.b[i] - self._.c[self._.d - i]) == 0\n for i in range(self._.d) if i != m)\n self._.bipartite = all(a == 0 for a in self._.a)\n if self._.antipodal:\n try:\n self._.r = integralize(\n 1 + self._.b[m] / self._.c[self._.d - m])\n except TypeError:\n raise InfeasibleError(\"covering index not integral\")\n if self._.d >= 2:\n if self._.d == 2:\n b = [self._.b[0]/(self._.b[1]+1)]\n c = [Integer(1)]\n else:\n b = self._.b[:m]\n c = list(self._.c[1:m+1])\n if is_divisible(self._.d, 2):\n c[-1] *= self._.r\n scheme = self._get_class()(tuple(b), tuple(c))\n else:\n scheme = ASParameters(P=[[1]])\n self._.antipodal_subscheme = self.add_subscheme(scheme,\n self.ANTIPODAL)\n if self._.bipartite:\n if self._.d >= 2:\n b = tuple(self._.b[2*i]*self._.b[2*i+1]/self._.c[2]\n for i in range(m))\n c = tuple(self._.c[2*i+1]*self._.c[2*i+2]/self._.c[2]\n for i in range(m))\n scheme = self._get_class()(b, c)\n else:\n scheme = ASParameters(P=[[1]])\n self._.bipartite_subscheme = self.add_subscheme(scheme,\n self.BIPARTITE)", "def constraint_test():\n import itertools, sys\n\n show_analysis = False\n #Generated via grammar\n gr = grammar.Grammar('grammars/test_constraints.bnf')\n inputs = ([1 for _ in range(100)], [ i%3 for i in range(100)])\n for _input in inputs: \n output = gr.generate(_input)\n azr = analyser.Analyser('test',output['phenotype'],True)\n try:\n azr.create_graph()\n except ValueError as e:\n print(__name__, \"ERROR\", _input, e)\n continue\n azr.parse_graph()\n azr.apply_stresses()\n azr.create_slf_file()\n azr.test_slf_file()\n azr.parse_results()\n azr.print_stresses()\n if show_analysis:\n azr.show_analysis()\n \n #Fixed generated\n lengths = (1000, 10000)\n levels = (5, 10)\n for length_idx, level_idx in itertools.permutations([0,1]):\n try:\n GRAPH = constrained_offset_graph(lengths[length_idx],\n levels[length_idx])\n except ValueError as e:\n print(__name__, \"ERROR\", lengths[length_idx], levels[length_idx], e)\n continue\n GRAPH.save_graph(\"pylon\")\n print \"nodes:\", GRAPH.number_of_nodes()\n print \"edges\", GRAPH.number_of_edges()\n #will it blend?\n azr = analyser.Analyser('test',\"moo\",True)\n azr.my_graph = GRAPH\n azr.parse_graph()\n azr.apply_stresses()\n azr.create_slf_file()\n azr.test_slf_file()\n azr.parse_results()\n azr.print_stresses()\n if show_analysis:\n azr.show_analysis()", "def solveExCompact(targetNum, defenders, dRewards, dPenalties, dCosts, aTypes, aRewards, aPenalties, q):\n #\n # NEEDS ATTACKER AVG PER ATTACKER\n #\n \"\"\"In this game the attacker and defender reason ex-ante\n (they choose to follow signals or not before a signal is sent).\"\"\"\n # Add the extra dummy targets\n _dRewards = copy.deepcopy(dRewards)\n _dPenalties = copy.deepcopy(dPenalties)\n _dCosts = copy.deepcopy(dCosts)\n _aRewards = copy.deepcopy(aRewards)\n _aPenalties = copy.deepcopy(aPenalties)\n for m in defenders:\n for defenderCount in defenders:\n _dRewards[m].append(0)\n _dPenalties[m].append(0)\n _dCosts[m].append(0)\n for lam in aTypes:\n _aRewards[lam].append(0)\n _aPenalties[lam].append(0)\n targetNumWithDummies = len(_dRewards[0])\n targetRange = list(range(targetNumWithDummies))\n attackerActions = targetRange\n # Get the suggestions that occur with no overlap\n omegaKeys = [(t,d,tPrime,lam) for t in targetRange for d in defenders for tPrime in targetRange for lam in aTypes]\n omegaKeys2 = [(t,lam) for t in targetRange for lam in aTypes]\n\n # Build the model\n model = Model('ExAnteWithOverlap')\n w = model.continuous_var_dict(keys=omegaKeys, lb=0, name=\"w\")\n w2 = model.continuous_var_dict(keys=omegaKeys2, lb=0, name=\"w2\")\n objectiveFunction = sum([q[lam] * sum([w[t,d,t,lam] * _dRewards[d][t] for t in targetRange for d in defenders]) for lam in aTypes]) \\\n + sum([q[lam] * sum([(w2[t,lam] - sum([w[t,d,t,lam] for d in defenders])) * sum([_dPenalties[d][t] for d in defenders]) for t in targetRange]) for lam in aTypes]) \\\n + sum([q[lam] * sum([sum([w[t,d,tPrime,lam] for t in targetRange]) * _dCosts[d][tPrime] for tPrime in targetRange for d in defenders]) for lam in aTypes])\n\n # Define the constraints\n # Attacker\n attackerConstraints = [sum([w[t,d,t,lam] * _aPenalties[lam][t] for t in targetRange for d in defenders]) \\\n + sum([(w2[t,lam] - sum([w[t,d,t,lam] for d in defenders])) * _aRewards[lam][t] for t in targetRange]) \\\n >= sum([w[t,d,tPrime,lam] * _aPenalties[lam][tPrime] for t in targetRange for d in defenders]) \\\n + sum([(w2[t,lam] - sum([w[t,d,tPrime,lam] for d in defenders])) * _aRewards[lam][tPrime] for t in targetRange]) \\\n for lam in aTypes for tPrime in targetRange]\n # Defender\n defenderConstraints = [sum([q[lam] * sum([w[t,d,t,lam] * _dRewards[d][t] for t in targetRange]) for lam in aTypes]) \\\n + sum([q[lam] * sum([(w2[t,lam] - sum([w[t,d,t,lam] for d in defenders])) * _dPenalties[d][t] for t in targetRange]) for lam in aTypes]) \\\n + sum([q[lam] * sum([sum([w[t,d,tPrimePrime,lam] for t in targetRange]) * _dCosts[d][tPrimePrime] for tPrimePrime in targetRange]) for lam in aTypes]) \\\n >= sum([q[lam] * w2[tPrime,lam] * _dRewards[d][tPrime] for lam in aTypes]) \\\n + sum([q[lam] * sum([sum([w[t,dPrime,t,lam] for dPrime in defenders if dPrime != d]) * _dRewards[d][t] for t in targetRange if t != tPrime]) for lam in aTypes]) \\\n + sum([q[lam] * sum([w[t,d,t,lam] * _dPenalties[d][t] for t in targetRange if t != tPrime]) for lam in aTypes]) \\\n + sum([q[lam] * sum([(w2[t,lam] - sum([w[t,dPrime,t,lam] for dPrime in defenders])) * _dPenalties[d][t] for t in targetRange if t != tPrime]) for lam in aTypes]) \\\n + _dCosts[d][tPrime] \\\n for d in defenders for tPrime in targetRange]\n # Proposition constraints\n p11Constraints = [sum([w2[t,lam] for t in targetRange]) == 1 for lam in aTypes]\n p12Constraints = [sum([w[t,d,tPrime,lam] for tPrime in targetRange]) == w2[t,lam] for lam in aTypes for d in defenders for t in targetRange]\n p13Constraints = [sum([w[t,d,tPrime,lam] for d in defenders]) <= w2[t,lam] for lam in aTypes for tPrime in targetRange for t in targetRange]\n # Add the constraints\n attackerConstraints = model.add_constraints(attackerConstraints)\n defenderConstraints = model.add_constraints(defenderConstraints)\n p11Constraints = model.add_constraints(p11Constraints)\n p12Constraints = model.add_constraints(p12Constraints)\n p13Constraints = model.add_constraints(p13Constraints)\n # Solve the model\n model.maximize(objectiveFunction)\n model.solve()\n print(model.get_solve_status())\n return model.solution.get_objective_value(), model, None", "def solvePostNoOverlap(targetNum, defenders, dRewards, dPenalties, dCosts, aTypes, aRewards, aPenalties, q):\n \"\"\"Contains as many dummy targets as defenders, for defenders and attackers\"\"\"\n # Add the extra dummy targets\n _dRewards = copy.deepcopy(dRewards)\n _dPenalties = copy.deepcopy(dPenalties)\n _dCosts = copy.deepcopy(dCosts)\n _aRewards = copy.deepcopy(aRewards)\n _aPenalties = copy.deepcopy(aPenalties)\n for m in defenders:\n for defenderCount in defenders:\n _dRewards[m].append(0)\n _dPenalties[m].append(0)\n _dCosts[m].append(0)\n for lam in aTypes:\n _aRewards[lam].append(0)\n _aPenalties[lam].append(0)\n targetNumWithDummies = len(_dRewards[0])\n targetRange = list(range(targetNumWithDummies))\n attackerActions = targetRange\n # Get the suggestions that occur with no overlap\n overlapPlacements = getPlacements(defenders, targetNumWithDummies)\n placements = list(filter(lambda x: len(set(x)) == len(x), overlapPlacements))\n omegaKeys = getOmegaKeys(aTypes, placements, attackerActions)\n\n # Build the model\n model = Model('PrimalWithOverlap')\n w = model.continuous_var_dict(keys=omegaKeys, lb=0, ub=1, name=\"w\")\n objectiveFunction = sum([q[lam] * sum([w[s,a,lam] * defenderSocialUtility(s,a,defenders,_dRewards,_dCosts,_dPenalties) for s in placements for a in attackerActions]) for lam in aTypes])\n c1 = [sum([w[s,a,lam] * aUtility(s,a,lam,_aPenalties,_aRewards) for s in placements]) \\\n >= sum([w[s,a,lam] * aUtility(s,b,lam,_aPenalties,_aRewards) for s in placements])\n for lam in aTypes for a in attackerActions for b in attackerActions if a != b]\n c1 = [constraint for constraint in c1 if not isinstance(constraint, bool)]\n c1 = model.add_constraints(c1)\n c2 = model.add_constraints([sum([q[lam] * sum([w[s,a,lam] * utilityM(d,s,a,m,_dRewards,_dPenalties,_dCosts) for a in attackerActions for s in placements if s[m] == d]) for lam in aTypes]) \\\n >= sum([q[lam] * sum([w[s,a,lam] * utilityM(e,s,a,m,_dRewards,_dPenalties,_dCosts) for a in attackerActions for s in placements if s[m] == d]) for lam in aTypes])\n for m in defenders for d in targetRange for e in targetRange if d!=e])\n c3 = model.add_constraints([sum([w[(s,a,lam)] for s in placements for a in attackerActions]) == 1 for lam in aTypes])\n # Solve the model\n model.maximize(objectiveFunction)\n model.solve()\n # Now that w contains all the outcomes and their probabilities, sum the attacker utilities up.\n utilityPerAttacker = 0\n for k,v in w.items():\n prob = float(v)\n s,a,lam = k\n utilityPerAttacker += aUtility(s,a,lam,_aPenalties,_aRewards) * prob\n utilityPerAttacker /= len(aTypes)\n utilityPerDefender = model.solution.get_objective_value()\n utilityPerDefender /= len(defenders)\n return utilityPerDefender, utilityPerAttacker, None", "def _crossover(self, sel):\n offspring = []\n for p1, p2 in sel:\n p1 = copy.deepcopy(p1)\n p2 = copy.deepcopy(p2)\n\n tmp = self.op.crossover(\n copy.deepcopy(p1['individual']),\n copy.deepcopy(p2['individual']))\n if not tmp[0] is None and not tmp[1] is None:\n c1 = {\n 'individual': tmp[0],\n 'fitness': self.op.fitness(tmp[0])\n }\n c2 = {\n 'individual': tmp[1],\n 'fitness': self.op.fitness(tmp[1])\n }\n\n offspring.append(\n c1 if c1['fitness'] < p1['fitness'] else p1)\n offspring.append(\n c2 if c2['fitness'] < p2['fitness'] else p2)\n else:\n offspring.extend((p1, p2))\n return offspring", "def smart_clause_crossover_dispatch(ind1, ind2, examples, greedy=True, probability_variant=None, temperature=1, clause_bitvector_cache=None, use_infeasibility=False):\n if use_infeasibility:\n smart_clause_crossover_infeasibility(ind1, ind2, examples, greedy=greedy, probability_variant=probability_variant, temperature=temperature, clause_bitvector_cache=clause_bitvector_cache)\n else:\n smart_clause_crossover(ind1, ind2, examples, greedy=greedy, probability_variant=probability_variant, temperature=temperature, clause_bitvector_cache=clause_bitvector_cache)", "def calculate_sub_cost(source, target, sub_cost = 2):\r\n \r\n if source == target:\r\n return 0\r\n else:\r\n return sub_cost", "def smooth_input(xs, ys, L):\n n = len(xs)\n\n # obj = [1 for i in range(n)]\n # for i in range(2 * n):\n # obj.append(0)\n\n # Create the model\n model = LpProblem(name=\"small-problem\", sense=LpMinimize)\n ws = [LpVariable(name=\"w_{}\".format(i), lowBound=0, upBound=1) for i in range(n)]\n ls = [LpVariable(name=\"L_{}\".format(i), lowBound=0) for i in range(n)]\n zs = [LpVariable(name=\"z_{}\".format(i)) for i in range(n)]\n\n # objective\n model += lpSum(ws)\n\n # constraint 1:\n # sum of Li <= L\n model += (lpSum(ls) <= L * n, \"sum of Li <= L\")\n\n # Constraint 2:\n # w_i >= |z_i - y_i|\n for i in range(n):\n model += (ws[i] + zs[i] >= ys[i], \"C2.a_{}\".format(i))\n model += (ws[i] - zs[i] >= -ys[i], \"C2.b_{}\".format(i))\n\n # Constraint 3\n # |z_i - z_j| <= L_i * dist(x_i, x_j)\n for i in range(n):\n for j in range(n):\n if i != j:\n model += (zs[i] - zs[j] - abs(xs[i] - xs[j]) * ls[i] <= 0, \"C3.a_{}_{}\".format(i, j))\n model += (zs[j] - zs[i] - abs(xs[i] - xs[j]) * ls[i] <= 0, \"C3.b_{}_{}\".format(i, j))\n\n if model.solve() == 1:\n print(\n \"------------------------------------\\nFound solution for the linear program\\n------------------------------------\\n\")\n return [[xs[i], zs[i].value()] for i in range(n)]\n # return [zi.value() for zi in zs], [li.value() for li in ls]\n\n print(\"Linear program: no solution found\")\n exit(1)\n return -1", "def solve(targets, \n payoff,\n defender_resources:int=1, \n attacker_resources:int=1, \n ptype:str=\"MILP\", \n minimax:str=\"maximize\"):\n # Need a big number. Will lower bound later\n M = 9999\n\n p = cplex.Cplex()\n if ptype in (\"milp\", \"MILP\"):\n p.set_problem_type(cplex.Cplex.problem_type.MILP)\n else:\n print(\"Problem type:\",ptype,\"is not currently supported\")\n exit(1)\n\n if minimax in (\"max\",\"maximize\"):\n p.objective.set_sense(p.objective.sense.maximize)\n elif minimax in (\"min\",\"minimize\"):\n p.objective.set_sense(p.objective.sense.minimize)\n else:\n print(\"Only solves maximization or minimization problems\")\n\n num_targets = len(targets)\n # v is the z's, x's, v_def, and v_att\n v = [\"z\"+str(t) for t in range(num_targets)] \\\n + [\"x\"+str(t) for t in range(num_targets)] \\\n + [\"v_def\",\"v_att\"] \n num_variables = len(v)\n obj = np.zeros(num_variables)\n for i in range(num_variables):\n if v[i] == \"v_def\":\n obj[i] = 1.\n lb = np.zeros(num_variables)\n ub = np.ones(num_variables)\n for i in range(num_variables):\n if v[i] in (\"v_def\",\"v_att\"):\n ub[i] = cplex.infinity\n lb[i] = -1*cplex.infinity\n\n p.variables.add(obj = obj, # Objective function\n lb = lb, # Lower bound\n ub = ub, # Upper bound\n names = v) # Variable names\n # z_i \\in {0,1} Set all z_i to integer values\n [p.variables.set_types([(\"z\"+str(t),p.variables.type.integer)]) for t in range(num_targets)]\n # x_i \\in [0,1] Set all x_i to continuous values\n [p.variables.set_types([(\"x\"+str(t),p.variables.type.continuous)]) for t in range(num_targets)]\n # Also set for attacker and defender\n p.variables.set_types([(\"v_def\",p.variables.type.continuous)])\n p.variables.set_types([(\"v_att\",p.variables.type.continuous)])\n\n util_du = [M+payoff[i][2] for i in range(num_targets)]\n util_dc = [payoff[i][3] for i in range(num_targets)]\n util_ac = [M+payoff[i][3] for i in range(num_targets)]\n init_params = np.array([1.,defender_resources])\n rhs = np.hstack((init_params, util_du, util_dc, util_ac))\n\n senses = [\"E\",\"L\"] \\\n + [\"L\" for i in range(num_targets)] \\\n + [\"G\" for i in range(num_targets)]\\\n + [\"L\" for i in range(num_targets)]\n \n\n constraints = []\n zl = []\n zc = []\n xl = []\n xc = []\n for t in range(num_targets):\n zl.append(\"z\"+str(t))\n zc.append(1.)\n xl.append(\"x\"+str(t))\n xc.append(1.)\n constraints.append([zl,zc])\n constraints.append([xl,xc])\n\n # Defender's utility\n # Interleave vars and coefficients\n # Easier doing it this way that inline loops\n def_util_vars = []#np.zeros(num_targets*3)\n def_util_coef = []#np.zeros(num_targets*3)\n def_util = []\n for i in range(num_targets):\n def_util_vars = ([\"v_def\", \"x\"+str(i), \"z\"+str(i)])\n def_util_coef = ([1., (payoff[i][2] - payoff[i][1]), M])\n constraints.append([def_util_vars, def_util_coef])\n\n\n\n # Attacker strats\n att_strat_vars = []\n att_strat_coef = []\n att_strat = []\n for i in range(num_targets):\n att_strat_vars = ([\"v_att\", \"x\"+str(i)])\n att_strat_coef = ([1., payoff[i][3] - payoff[i][4]])\n constraints.append([att_strat_vars,att_strat_coef])\n\n\n # Attacker utility\n att_util_vars = []\n att_util_coef = []\n att_util = []\n for i in range(num_targets):\n att_util_vars = ([\"v_att\", \"x\"+str(i), \"z\"+str(i)])\n att_util_coef = ([1., payoff[i][3] - payoff[i][4], M])\n constraints.append([att_util_vars, att_util_coef])\n\n # Throw them all together\n constraint_names = [\"r\"+str(i) for i in range(len(constraints))]\n\n p.linear_constraints.add(lin_expr = constraints,\n senses = senses,\n rhs = rhs,\n names = constraint_names)\n p.solve()\n return p.solution.get_values()", "def dual_problem(\n states: list[np.ndarray], probs: list[float] = None, dist_method=\"min-error\"\n) -> float:\n constraints = []\n meas = []\n\n dim_x, _ = states[0].shape\n\n y_var = cvxpy.Variable((dim_x, dim_x), hermitian=True)\n objective = cvxpy.Minimize(cvxpy.trace(cvxpy.real(y_var)))\n\n dim = int(np.log2(dim_x))\n dim_list = [2] * int(np.log2(dim_x))\n sys_list = list(range(1, dim, 2))\n # dim_list = [3, 3]\n\n if dist_method == \"min-error\":\n for i, _ in enumerate(states):\n meas.append(cvxpy.Variable((dim_x, dim_x), PSD=True))\n constraints.append(\n cvxpy.real(y_var - probs[i] * states[i])\n >> partial_transpose(meas[i], sys=sys_list, dim=dim_list)\n )\n\n if dist_method == \"unambiguous\":\n for j, _ in enumerate(states):\n sum_val = 0\n for i, _ in enumerate(states):\n if i != j:\n sum_val += cvxpy.real(cvxpy.Variable()) * probs[i] * states[i]\n meas.append(cvxpy.Variable((dim_x, dim_x), PSD=True))\n constraints.append(\n cvxpy.real(y_var - probs[j] * states[j] + sum_val)\n >> partial_transpose(meas[j], sys=sys_list, dim=dim_list)\n )\n\n meas.append(cvxpy.Variable((dim_x, dim_x), PSD=True))\n constraints.append(\n cvxpy.real(y_var) >> partial_transpose(meas[-1], sys=sys_list, dim=dim_list)\n )\n\n problem = cvxpy.Problem(objective, constraints)\n sol_default = problem.solve()\n\n # print(np.around(y_var.value, decimals=3))\n\n return sol_default", "def minimize(self, evaluate, constrainToLower=False, constrainToUpper=False):\n improved = array([0,0,0])\n #------------------------------------------------\n for index, member in enumerate(self.population):\n #------------------------------------------------\n source = self.population[randrange(len(self.population))]\n x = member.copyAndModify(self.maxMutations, self.scale, source, self.maxIndexes)\n if constrainToLower:\n x = maximum(self.lowerDomain, x)\n if constrainToUpper:\n x = minimum(self.upperDomain, x)\n #------------------------------------------------\n loss = evaluate(x)\n #------------------------------------------------\n if index == self.diversityIndex:\n self.diversity.update(x, loss)\n self.diversityLoss = loss\n #------------------------------------------------\n if loss < self.eliteLoss:\n member.update(x, loss)\n self.eliteIndex = index\n self.eliteLoss = loss\n improved[0] += 1\n else:\n slot = randrange(len(self.population))\n slotMember = self.population[slot]\n if (slot != self.diversityIndex) and (loss <= slotMember.loss):\n # --------------------------------------------------\n slotMember.update(x, loss)\n improved[1] += 1\n # --------------------------------------------------\n elif (index != self.diversityIndex) and (loss <= member.loss):\n # --------------------------------------------------\n member.update(x, loss)\n improved[2] += 1\n # --------------------------------------------------\n #------------------------------------------------\n # --------------------------------------------------\n # reduce the scale if there were less than 'self.minImprovements' \n # improved members in the population.\n if sum(improved) < self.minImprovements:\n self.scale *= self.gamma\n # --------------------------------------------------\n self.improvements += improved", "def run(self, no_improv_gen):\r\n bestvalue = min(self.cost_populations)\r\n no_improvement_tries = 0\r\n starttime = timeit.default_timer()\r\n\r\n while no_improvement_tries < no_improv_gen:\r\n endtime = timeit.default_timer()\r\n print(f\"Best value: {bestvalue}, no improvement tries: {no_improvement_tries}, time:{endtime - starttime}\")\r\n\r\n self.improve_population()\r\n self.sort_values()\r\n self.make_parents()\r\n self.parents_loop()\r\n \r\n # add best of the old population to the population\r\n while len(self.district_population) < self.population_size:\r\n index = self.best_costs.index(min(self.best_costs))\r\n self.cost_populations.append(self.best_costs[index])\r\n self.district_population.append(self.best_districts[index])\r\n del self.best_costs[index]\r\n del self.best_districts[index]\r\n\r\n if min(self.cost_populations) < bestvalue:\r\n bestvalue = min(self.cost_populations)\r\n no_improvement_tries = 0\r\n else:\r\n no_improvement_tries += 1\r\n \r\n self.best_districts = []\r\n self.best_costs = []\r\n self.worst_districts = []\r\n \r\n bestdistrict = self.cost_populations.index(bestvalue)\r\n return self.district_population[bestdistrict]", "def Example_2():\r\n print \"\\n** Example_2: Finding the minimum of the Rosenbrock function with 2 variables under constraints **\"\r\n\r\n Ex = optim_wrapper()\r\n X0 = np.zeros(2)\r\n lim = [(-2.0, 2.0)]*2\r\n Ex.set_X0(X0)\r\n Ex.set_lim(lim)\r\n Ex.set_penalties_func(pen)\r\n Ex.set_norm_count(200)\r\n Ex.set_nb_best(100)\r\n Ex.set_obj_func(obj)\r\n Ex.set_wrapper()\r\n Ex.launch_multi_opti()\r\n print Ex\r\n\r\n X_solution = [1.0, 1.0]\r\n res_string = \"Results of the optimisation: {:03.4f}, expected results: {:03.4f}\".format(obj(Ex.get_res()), obj(X_solution))\r\n print res_string\r\n print \"*\" * len(res_string)", "def gen2_constraint(model):\n return 20, model.g[2], 100", "def anneal(solution):\n old_cost = cost(solution)\n T = 1.0\n T_min = 0.00001\n ALPHA = 0.9\n\n while T > T_min:\n i = 1\n while i <= 100:\n new_solution = neighbor(solution)\n new_cost = cost(new_solution)\n ap = acceptance_probability(old_cost, new_cost, T)\n if ap > random.random():\n solution = new_solution\n old_cost = new_cost\n i += 1\n T = T * ALPHA\n\n return solution, old_cost", "def test_cost_consideration():\n # input\n net = create_test_net()\n idx = pp.create_sgen(net, 1, 1.3, index=2)\n pp.create_poly_cost(net, idx, \"sgen\", 2.3, index=4)\n pp.runpp(net)\n assert all(net.sgen.index.values == np.array([0, 5, 2]))\n assert all(net.poly_cost.element == np.array([0, 0, 5, 2]))\n\n for cost_type in [\"poly_cost\", \"pwl_cost\"]:\n\n if cost_type == \"pwl_cost\":\n for poly in net.poly_cost.itertuples():\n net.poly_cost.drop(poly.Index, inplace=True)\n pp.create_pwl_cost(net, poly.element, poly.et, [[0, 20, 1]], index=poly.Index)\n\n # eq generation\n boundary_buses = [0, 2]\n internal_buses = [1]\n eq_net1 = pp.grid_equivalents.get_equivalent(net, \"rei\", boundary_buses, internal_buses)\n eq_net2 = pp.grid_equivalents.get_equivalent(net, \"rei\", boundary_buses, internal_buses,\n return_internal=False)\n\n # check elements\n check_elements_amount(eq_net1, {\"bus\": 6, \"load\": 3, \"sgen\": 3, \"shunt\": 5, \"ext_grid\": 1,\n \"line\": 2, \"impedance\": 10, cost_type: 4},\n check_all_pp_elements=True)\n check_elements_amount(eq_net2, {\"bus\": 5, \"load\": 3, \"sgen\": 2, \"shunt\": 5, \"ext_grid\": 1,\n \"impedance\": 10, cost_type: 3},\n check_all_pp_elements=True)\n assert all(eq_net1.sgen.index.values == np.array([0, 1, 2])) # simple create_sgen()\n # without index=... expected\n assert all(eq_net2.sgen.index.values == np.array([0, 1]))\n\n # --- check poly cost\n # eq_net1\n assert np.all(net[cost_type].loc[net[cost_type].et == \"ext_grid\"].values ==\n eq_net1[cost_type].loc[eq_net1[cost_type].et == \"ext_grid\"])\n for i in range(3):\n idx_net = net.sgen.sort_values(\"p_mw\").index[i]\n idx_eq_net = eq_net1.sgen.sort_values(\"p_mw\").index[i]\n assert np.all(net[cost_type].loc[(net[cost_type].element == idx_net) &\n (net[cost_type].et == \"sgen\")].drop(\n columns=[\"element\"]).values ==\n eq_net1[cost_type].loc[(eq_net1[cost_type].element == idx_eq_net) &\n (eq_net1[cost_type].et == \"sgen\")].drop(\n columns=[\"element\"]).values)\n\n # eq_net2\n assert np.all(net[cost_type].loc[net[cost_type].et == \"ext_grid\"].values ==\n eq_net2[cost_type].loc[eq_net2[cost_type].et == \"ext_grid\"])\n for i in range(2):\n idx_net = net.sgen.loc[~net.sgen.bus.isin(boundary_buses+internal_buses)].sort_values(\n \"p_mw\").index[i]\n idx_eq_net = eq_net2.sgen.sort_values(\"p_mw\").index[i]\n assert np.all(net[cost_type].loc[(net[cost_type].element == idx_net) &\n (net[cost_type].et == \"sgen\")].drop(\n columns=[\"element\"]).values ==\n eq_net2[cost_type].loc[(eq_net2[cost_type].element == idx_eq_net) &\n (eq_net2[cost_type].et == \"sgen\")].drop(\n columns=[\"element\"]).values)", "def uniformCostSearch(problem):\r\n\t\"*** YOUR CODE HERE ***\"\r\n\r\n\r\n\tutil.raiseNotDefined()", "def crossover(parent1, parent2):\n child = parent1.clone()\n for k in range(parent1.num_input + parent1.num_output):\n if np.random.randint(2) == 1:\n child.identifiers[k] = parent2.identifiers[k]\n child.inhibitors[k] = parent2.inhibitors[k]\n child.enhancers[k] = parent2.enhancers[k]\n\n child.identifiers = child.identifiers[:(child.num_input +\n child.num_output)]\n child.inhibitors = child.inhibitors[:(child.num_input + child.num_output)]\n child.enhancers = child.enhancers[:(child.num_input + child.num_output)]\n\n p1range = list(range(parent1.num_input + parent1.num_output,\n parent1.size()))\n random.shuffle(p1range)\n p2range = list(range(parent2.num_input + parent2.num_output,\n parent2.size()))\n random.shuffle(p2range)\n\n p1remaining = deepcopy(p1range)\n\n # Crossing regulatory\n p1_gene_count = 0\n p2_gene_count = 0\n for p1idx in p1range:\n min_dist = config.CROSSOVER_THRESHOLD\n paired_idx = None\n for p2idx in p2range:\n gdist = parent1.protein_distance(parent2, p1idx, p2idx)\n if gdist < min_dist:\n min_dist = gdist\n paired_idx = p2idx\n if paired_idx is not None:\n if np.random.randint(2) == 0:\n chosen_parent = parent1\n chosen_idx = p1idx\n p1_gene_count += 1\n else:\n chosen_parent = parent2\n chosen_idx = p2idx\n p2_gene_count += 1\n child.identifiers = np.append(\n child.identifiers, chosen_parent.identifiers[chosen_idx])\n child.inhibitors = np.append(\n child.inhibitors, chosen_parent.inhibitors[chosen_idx])\n child.enhancers = np.append(\n child.enhancers, chosen_parent.enhancers[chosen_idx])\n # Remove from consideration again\n p2range = list(set(p2range) - set([p2idx]))\n p1remaining = list(set(p1remaining) - set([p1idx]))\n\n # Add remaining material\n if child.size() == (child.num_input + child.num_output):\n prob = 0.5\n else:\n prob = p1_gene_count / (p1_gene_count + p2_gene_count)\n\n chosen_parent = parent2\n chosen_range = p2range\n if np.random.random() < prob:\n chosen_parent = parent1\n chosen_range = p1remaining\n\n for idx in chosen_range:\n child.identifiers = np.append(child.identifiers,\n chosen_parent.identifiers[idx])\n child.inhibitors = np.append(child.inhibitors,\n chosen_parent.inhibitors[idx])\n child.enhancers = np.append(child.enhancers,\n chosen_parent.enhancers[idx])\n\n child.num_regulatory = child.size() - (child.num_input + child.num_output)\n\n # Cross dynamics\n if np.random.random() < 0.5:\n child.beta = parent1.beta\n else:\n child.beta = parent2.beta\n\n if np.random.random() < 0.5:\n child.delta = parent1.delta\n else:\n child.delta = parent2.delta\n\n return child", "def uniform_cost_search(problem):\n fringe = util.PriorityQueueWithFunction(lambda x: x.get_cost())\n return general_search(problem, fringe)", "def _constraints_utility(self):\n\n def rule(model):\n total = summation(self.utilities, model.A)\n return model.A_total == total\n\n self.model.constrain_A_total = Constraint(rule=rule)\n\n def rule(model):\n total = 2 * summation(self.utilities, model.A2)\n return model.A2_total == total\n\n self.model.constrain_A2_total = Constraint(rule=rule)\n\n def rule(model):\n total = 3 * summation(self.utilities, model.A3)\n return model.A3_total == total\n\n self.model.constrain_A3_total = Constraint(rule=rule)\n\n def rule(model):\n total = 4 * summation(self.utilities, model.A4)\n return model.A4_total == total\n\n self.model.constrain_A4_total = Constraint(rule=rule)\n\n def rule(model):\n completion_bonus = self.task_completion_bonus * self.task_duration\n total = summation(completion_bonus, model.T_total)\n return model.Completion_total == total\n\n self.model.constrain_completion_total = Constraint(rule=rule)\n\n def rule(model):\n scaling = 0.2\n affinity = np.outer(c.AFFINITY_COGNITIVE, self.task_cognitive_load)\n\n # TODO(cathywu) replace this code when \"simple slicing\" is clarified\n zeros1 = np.zeros((1, self.num_tasks))\n zeros2 = np.zeros((2, self.num_tasks))\n zeros3 = np.zeros((3, self.num_tasks))\n\n total = summation(affinity, model.A)\n total += summation(affinity, model.A2)\n total += summation(affinity, model.A3)\n total += summation(affinity, model.A4)\n\n total += summation(np.vstack((affinity[1:, :], zeros1)), model.A2)\n total += summation(np.vstack((affinity[1:, :], zeros1)), model.A3)\n total += summation(np.vstack((affinity[1:, :], zeros1)), model.A4)\n\n total += summation(np.vstack((affinity[2:, :], zeros2)), model.A3)\n total += summation(np.vstack((affinity[2:, :], zeros2)), model.A4)\n\n total += summation(np.vstack((affinity[3:, :], zeros3)), model.A4)\n total *= scaling\n\n return model.Affinity_cognitive_total == total\n\n self.model.constrain_affinity_cognitive_total = Constraint(rule=rule)" ]
[ "0.6040425", "0.57220644", "0.5651253", "0.5630593", "0.5616365", "0.5534202", "0.5531391", "0.5526905", "0.54685897", "0.54388416", "0.54252905", "0.5397082", "0.53935945", "0.5393315", "0.53874266", "0.53383327", "0.5333689", "0.53286886", "0.5300002", "0.52907956", "0.5286129", "0.5266891", "0.5242797", "0.5226825", "0.52266717", "0.5217497", "0.52111536", "0.5209541", "0.52073073", "0.51903504" ]
0.6755689
0
Data { get; } > int
def Data(self) -> int:
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def value(self) -> int:\n return self._data", "def getInteger(self):", "def getInteger(self):", "def getValue(self) -> int:\n ...", "def data(self, value):\n if not isinstance(value, int):\n raise TypeError(\"data must be an integer\")\n else:\n self.__data = value", "def getInteger(self):\n pass", "def data(self, value):\n if not isinstance(value, int):\n raise TypeError(\"data must be an integer\")\n self.__data = value", "def value(self) -> int:\n return self.length", "def data(self, value):\n if type(value) != int:\n raise TypeError(\"data must be an integer\")\n else:\n self.__size = value", "def __int__(self):\n\n return self.value", "def data(self, value):\n if isinstance(data, int) is False:\n raise TypeError(\"data must be an integer\")\n self.__data = value", "def __int__(self):\n return self.get_raw_int()", "def value(self):\n return self.__n", "def __index__(self):\n return int(self)", "def get_value(self):\r\n return 0", "def Get(self):\n value=0\n return value", "def value(self) -> int:\n return self._value", "def __len__(self):\n # type: () -> int\n return len(self.data)", "def z(self) -> int:", "def value(self):\n return 0", "def getNumData(self):\n return len(self.data)", "def __len__(self):\n return int(self.total)", "def Value(self) -> _n_0_t_14:", "def __len__(self):\n return(self.data_len)", "def get_data_idx(self)->list:\n return self.__data_idx", "def __len__(self) -> int:\n if self.data is None:\n return 0\n return len(self.data)", "def OffsetToStringData(self) -> int:", "def getData(self):\n return struct.unpack(\"!d\",self.data)[0]", "def value(self, p_int): # real signature unknown; restored from __doc__\n pass", "def __len__(self) -> int:\n return len(self.data)" ]
[ "0.7560098", "0.6998426", "0.6998426", "0.69836044", "0.68988633", "0.6810876", "0.68026674", "0.6762897", "0.6762579", "0.6620448", "0.6615782", "0.65521204", "0.6511656", "0.6465311", "0.6452893", "0.643525", "0.6413299", "0.6396933", "0.63801414", "0.6359766", "0.6356796", "0.6325206", "0.6322338", "0.6273856", "0.6273769", "0.62498045", "0.6246462", "0.62460715", "0.62434304", "0.62270236" ]
0.86933905
0
Item { get; set; } > bool
def Item(self) -> bool:
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __bool__(self):\n return bool(self._items)", "def __bool__(self):\n return bool(self._items)", "def get_bool(self, item: str) -> bool:\n return as_bool(self[item])", "def __bool__(self):\n return bool(self.get_value())", "def value(self) -> bool:", "def __contains__ (self, item):\n return False", "def bool(self, item, default=None):\n try:\n item = self.__getattr__(item)\n except AttributeError as err:\n if default is not None:\n return default\n raise err\n\n if isinstance(item, (bool, int)):\n return bool(item)\n\n if (isinstance(item, str) and\n item.lower() in ('n', 'no', 'false', 'f', '0')):\n return False\n\n return True if item else False", "def __bool__(self):\n return bool(self.obj)", "def has(self, item):\n return item in self.mut", "def __bool__(self):\r\n return self.valid", "def __bool__(self):\n return bool(self._value)", "def __contains__(self, item: OidValue) -> bool:\n item = to_int_tuple(item)\n return self.value == item[0 : len(self.value)]", "def __contains__(self, item):\n return self.contains(item)", "def __bool__(self):\n return self is TRUE", "def bool(self, obj):\n return True", "def bool(self, obj):\n return True", "def __contains__(self, item):\n pass", "def is_satisfied(self, item: Any) -> bool:", "def _apply_item(self, item: Item) -> bool:\n return False", "def __contains__(self, item):\n\n if self[item]:\n return True\n return False", "def test_bool_field():", "def __bool__(self):\n raise ValueError(\"bool() not permitted\")", "def IsItemChecked(self, item):\r\n\r\n return item.IsChecked()", "def __bool__(self):\n\t\treturn not self.empty()", "def __contains__(self, item):", "def has_item(self, item):\n return item in self.set", "def has_item(self, item):\n return item in self.set", "def get_bool2(self):\n pass", "def __contains__(self, item):\n\t\treturn item in self.__dict__.values()", "def __contains__(self, item: Any) -> bool:\n return item in self.item_to_index" ]
[ "0.76140684", "0.76140684", "0.71920353", "0.7042644", "0.70174855", "0.6897622", "0.68056196", "0.67807496", "0.67592233", "0.6713015", "0.66944444", "0.66852814", "0.6675724", "0.6628342", "0.6621324", "0.6621324", "0.6600947", "0.6575022", "0.6563937", "0.65453804", "0.65210843", "0.6519773", "0.6511947", "0.65072644", "0.64827454", "0.6481432", "0.6481432", "0.6477744", "0.6452274", "0.6437997" ]
0.8566267
0
Mask { get; } > int
def Mask(self) -> int:
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mask(self):\n return self.mask_index", "def mask(self):\n return ((2**(self.width) - 1) << self.lsb)", "def mask_index(self) -> int:\n return self._mask_index", "def mask(self):", "def mask(self) -> list[int]:\n return self._mask", "def mask(self):\n return self._mask", "def mask(self):\n return self._mask", "def mask(self):\n return self._mask", "def mask(self):\n return self._mask", "def mask_id(self) -> int:\n return self.tokenizer.get_command('MASK').Id", "def get_bitmask(self):\r\n return self.__bitmask__", "def mask(self) -> str:\n return self.tokenizer.get_command('MASK').Id", "def mask(n):\n if n >= 0:\n return 2**n - 1\n else:\n return 0", "def mask(n):\n if n >= 0:\n return 2**n - 1\n else:\n return 0", "def _get_maskLength(self):\n return self.__maskLength", "def mask_id(self):\n m = 2 * self.mask_full()\n m[0:self.size, 0:self.size] = self.id * self.mask()\n return m.astype(np.int16)", "def mask(self):\n return type(self)(self.data.mask, self.bset)", "def bitmask(n: int) -> int:\n if n >= 0:\n return (1 << n) - 1\n else:\n return -1 << -n", "def __int__(self):\n\n return self.bitflags", "def indicator(self):\n return (~self.mask).astype(numpy.int_)", "def _mask(self):\n if self.__mask is None:\n # need this to be *exactly* the numpy boolean False\n return nomask\n return self.__mask", "def get_sample_mask(self):", "def zero_mask(self):\n accum = 0\n for i in range(self.data.itemsize):\n accum += (0x55 << (i << 3))\n return accum", "def mask(self):\n return np.ones((self.size, self.size))", "def mask(self):\n return list(self._mask_generator())", "def pos_mask(row: int, col: int) -> int:\n assert 0 <= row < 8\n assert 0 <= col < 8\n return 0x8000000000000000 >> col >> row * 8", "def shiftr_bitmask(self):\r\n self.__bitmask__ = self.__bitmask__ >> 1", "def one_mask(self):\n accum = 0\n for i in range(self.data.itemsize):\n accum += (0xAA << (i << 3))\n return accum", "def mask(self, mask):\n return MaskedDistribution(self, mask)", "def get_mask_offset(mask):\n # use ctypes to truncate the result to a uint32\n cmask = ctypes.c_uint32(mask).value\n return _bruijn32lookup[ctypes.c_uint32((mask & -mask) * 0x077cb531).value >> 27]" ]
[ "0.7837592", "0.76170206", "0.75914097", "0.75121796", "0.74010247", "0.7349037", "0.7349037", "0.7349037", "0.7349037", "0.71888405", "0.70815897", "0.7005188", "0.6910063", "0.6910063", "0.68605787", "0.68557954", "0.68479794", "0.67024934", "0.6688607", "0.66692984", "0.66562295", "0.6576119", "0.64902174", "0.64855975", "0.64763117", "0.64638096", "0.64333326", "0.6420627", "0.6413732", "0.6405303" ]
0.8851983
0
Offset { get; } > int
def Offset(self) -> int:
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def offset(self):\n return self.__offset", "def offset(self):\n return self.__offset", "def offset(self):\r\n return self._get_instantiation()[3]", "def offset(self):\n return self._offset", "def offset(self):\n return self._offset", "def offset(self):\n return self._offset", "def offset(self):\n return self._offset", "def offset(self):\n return self._offset", "def offset(self):\n return self._offset", "def offset(self):\n return self._offset", "def offset(self):\r\n return self.buf[0].unib[9:11]", "def offset(self):\n self._fetch_if_needed()\n return self._offset", "def get_offset(self):\n return self.offset", "def offset(self, offset):\n raise NotImplementedError(\"This should have been implemented.\")", "def tell(self):\n return self.offset", "def top_offset(self):\n raise NotImplementedError", "def offset(self):\n\n return self._offset", "def offset_id(offset: int, id_: int) -> int:\n return id_ if id_ > 0 else offset - id_", "def offset(self):\n return self.unpack_dword(0x0)", "def _get_next_offset(self):\n return self.__offset", "def GetOffset(self, *args, **kwargs):\n pass", "def smpte_offset(self) -> int:\n return self.__smpte_offset", "def offset(self):\n return self.query.offset", "def min_offset(self):\n return self.offset", "def ReturnOffset(offset):\r\n return _hiew.ReturnOffset(offset)", "def find_offset(self,value):\n return self.header.find_offset(value)", "def elemoffset(self):\n return self.offset // self.itemsize", "def OffsetToStringData(self) -> int:", "def getOffset(self):\n return _libsbml.Unit_getOffset(self)", "def byteIndex(self):\n return self.offset" ]
[ "0.7361396", "0.7361396", "0.72346246", "0.7174349", "0.7174349", "0.7174349", "0.7174349", "0.7174349", "0.7174349", "0.7174349", "0.71212727", "0.708579", "0.70526683", "0.7044136", "0.70365083", "0.70010835", "0.6850568", "0.6844158", "0.6821544", "0.6786369", "0.6754752", "0.67042303", "0.65987337", "0.65828437", "0.6574491", "0.6532103", "0.65231407", "0.64974785", "0.64839745", "0.64813876" ]
0.8861596
0
Keys { get; } > NameObjectCollectionBase.KeysCollection
def Keys(self) -> NameObjectCollectionBase.KeysCollection:
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_keys(self):\r\n return self._keys", "def keys(self) -> List:\n pass", "def keys(self):\r\n return [k for k in self]", "def keys(self):\n raise NotImplementedError", "def keys(self):\n raise NotImplementedError('keys() should have been replaced by a metaclass')", "def keys(self):\n return [ x for x in self ]", "def keys(self):\n return _keys(self)", "def keys(self):\n return _keys(self)", "def Keys(self) -> _n_1_t_4:", "def keys(self) -> List[str]:\n raise NotImplementedError", "def keys(self):\n return", "def keys(self):\n return list(self.__iter__())", "def keys(self) -> Sequence[str]:\n raise NotImplementedError", "def keys(self):\n return self.__keys", "def getkeys(self):\n return list(self.keys)", "def keys(self):\n return self.keys", "def keys(self):\n return list(self.iterkeys())", "def keys(self):\n return self._keys", "def keys(self):\n return self._keys", "def keys(self) -> t.List[str]: # type: ignore[override]\n return list(self.__keys)", "def keys(self):\r\n return [key for key, value in self.iteritems()]", "def keys(self):\n\n return self.keys_set", "def iterkeys(self):\n return self.__iter__()", "def iterkeys(self):", "def iterkeys(self):", "def keys(self):\n if self._keys is not None:\n return self._keys\n self._set_keys()\n return self._keys", "def AllKeys(self) -> _n_0_t_1[str]:", "async def keys(self) -> Iterable[str]:", "def keys(self):\n\n return list(self.iterkeys())", "def keys():" ]
[ "0.8030493", "0.79718614", "0.78185", "0.771581", "0.76575226", "0.76478493", "0.7614883", "0.7614883", "0.75922537", "0.758795", "0.758302", "0.75540566", "0.75408614", "0.753722", "0.75090593", "0.74985516", "0.7471102", "0.7463831", "0.7463831", "0.7461818", "0.7459962", "0.7421005", "0.7359799", "0.73576933", "0.73576933", "0.7347095", "0.7340029", "0.7326453", "0.7251409", "0.7239305" ]
0.93455327
0
AllKeys { get; } > Array
def AllKeys(self) -> _n_0_t_1[str]:
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def keys(self) -> List:\n pass", "def keysAll():", "def keys(self):\r\n return [k for k in self]", "def get_keys(self):\r\n return self._keys", "def keys(self):\n return [ x for x in self ]", "def getkeys(self):\n return list(self.keys)", "def keys(self):\n return", "def list_all_keys(self):\n \n return self.keys", "def keyValues(self): # real signature unknown; restored from __doc__\n return []", "def keys(self):\n return self.keys", "def keys(self):\n return _keys(self)", "def keys(self):\n return _keys(self)", "def Keys(self) -> _n_1_t_4:", "def keys(self):\n return list(self.iterkeys())", "def keys(self):\n raise NotImplementedError", "def keys():", "def keys(self):\n return list(self.__iter__())", "def keys(self):\n return self.__keys", "def keys(self):\r\n return [key for key, value in self.iteritems()]", "async def keys(self) -> Iterable[str]:", "def keys(self):\n return self._keys", "def keys(self):\n return self._keys", "def keys(self):\n\n return list(self.iterkeys())", "def keys(self):\n return [key for key, value in self.items()]", "def keys(self) -> t.List[str]: # type: ignore[override]\n return list(self.__keys)", "def keys(self) -> List[str]:\n raise NotImplementedError", "def keys(self):\n return self.ArrayNames", "def Keys(self) -> NameObjectCollectionBase.KeysCollection:", "def keys(self):\n return self._sequence[:]", "def array_keys(item):\n return item.keys()" ]
[ "0.824508", "0.8210384", "0.811888", "0.80539304", "0.79637027", "0.79416925", "0.78825855", "0.786728", "0.7850918", "0.78212106", "0.7814491", "0.7814491", "0.7811005", "0.77953106", "0.77004904", "0.76996964", "0.76911926", "0.76835346", "0.7661703", "0.76457775", "0.7628854", "0.7628854", "0.7620842", "0.76141065", "0.75752276", "0.75557315", "0.75382555", "0.7520632", "0.75066495", "0.750071" ]
0.83905303
0
Item { get; set; } > str
def Item(self) -> str:
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __str__(self):\n return str(self.item)", "def __str__(self):\r\n return str(self._items)", "def __str__(self):\r\n return str(self._items)", "def Value(self) -> str:", "def __str__(self):\n return str(self._items)", "def string(self):\n return self._my_string", "def __str__(self):\n return str(self.GetString())", "def string(self):\n return f'y = {self.a.item()}'", "def test_str(self):\n item = self.item\n\n self.assertEqual(str(item), self.item_raw['name'])", "def __str__(self) -> str:\n return str(self.getvalue())", "def __str__(self) -> str:\n return str(self.getvalue())", "def __str__(self) -> str:\n return str(self.getvalue())", "def test_string_representation(self) -> None:\n item = Item(text=\"some text\")\n self.assertEqual(str(item), \"some text\")", "def __str__(self): # pragma: nocover\n return str(self.value)", "def mapper(item: Union[str, object]) -> str:\n return str(item)", "def __str__(self):\n return ' '.join([str(item) for item in self])", "def __str__(self):\r\n return str(self.value())", "def __str__(self):\n return str(self.value())", "def simple_str(self):\n pass", "def valueToString():", "def __str__(self):\r\n return self.value", "def __str__(self) -> str:\n return str(self.value)", "def __str__(self):\n return self.string", "def __str__(self):\r\n\t\treturn \"({}, {})\".format(self.type, self.value)", "def format_item(self,obj):\n return unicode(obj)", "def value(self):\n return self.string", "def get_string(self):\n return self.__str", "def __str__(self):\n\n\t\treturn str(self.__value)", "def __str__(self) -> str:\n return self.value", "def __str__(self) -> str:\n return self.value" ]
[ "0.74461406", "0.70737", "0.70737", "0.6902582", "0.6873929", "0.68218076", "0.67909247", "0.67569864", "0.67167395", "0.66953707", "0.66953707", "0.66953707", "0.6675318", "0.66375804", "0.6594026", "0.65922886", "0.6584816", "0.654529", "0.6544038", "0.65272146", "0.6521561", "0.65086627", "0.64882094", "0.64810306", "0.6477088", "0.6474", "0.644895", "0.64472777", "0.6440771", "0.6440771" ]
0.8269495
1
Action { get; } > NotifyCollectionChangedAction
def Action(self) -> NotifyCollectionChangedAction:
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_collection_updated(self, obj, state, old_name):\n icon_files = {\n \"gerber\": self.app.resource_location + \"/flatcam_icon16.png\",\n \"excellon\": self.app.resource_location + \"/drill16.png\",\n \"cncjob\": self.app.resource_location + \"/cnc16.png\",\n \"geometry\": self.app.resource_location + \"/geometry16.png\",\n \"script\": self.app.resource_location + \"/script_new16.png\",\n \"document\": self.app.resource_location + \"/notes16_1.png\"\n }\n\n if state == 'append':\n for act in self.app.ui.menuobjects.actions():\n try:\n act.triggered.disconnect()\n except TypeError:\n pass\n self.app.ui.menuobjects.clear()\n\n gerber_list = []\n exc_list = []\n cncjob_list = []\n geo_list = []\n script_list = []\n doc_list = []\n\n for name in self.get_names():\n obj_named = self.get_by_name(name)\n if obj_named.kind == 'gerber':\n gerber_list.append(name)\n elif obj_named.kind == 'excellon':\n exc_list.append(name)\n elif obj_named.kind == 'cncjob':\n cncjob_list.append(name)\n elif obj_named.kind == 'geometry':\n geo_list.append(name)\n elif obj_named.kind == 'script':\n script_list.append(name)\n elif obj_named.kind == 'document':\n doc_list.append(name)\n\n def add_act(o_name):\n obj_for_icon = self.get_by_name(o_name)\n menu_action = QtWidgets.QAction(parent=self.app.ui.menuobjects)\n menu_action.setCheckable(True)\n menu_action.setText(o_name)\n menu_action.setIcon(QtGui.QIcon(icon_files[obj_for_icon.kind]))\n menu_action.triggered.connect(\n lambda: self.set_active(o_name) if menu_action.isChecked() is True else\n self.set_inactive(o_name))\n self.app.ui.menuobjects.addAction(menu_action)\n\n for name in gerber_list:\n add_act(name)\n self.app.ui.menuobjects.addSeparator()\n\n for name in exc_list:\n add_act(name)\n self.app.ui.menuobjects.addSeparator()\n\n for name in cncjob_list:\n add_act(name)\n self.app.ui.menuobjects.addSeparator()\n\n for name in geo_list:\n add_act(name)\n self.app.ui.menuobjects.addSeparator()\n\n for name in script_list:\n add_act(name)\n self.app.ui.menuobjects.addSeparator()\n\n for name in doc_list:\n add_act(name)\n\n self.app.ui.menuobjects.addSeparator()\n self.app.ui.menuobjects_selall = self.app.ui.menuobjects.addAction(\n QtGui.QIcon(self.app.resource_location + '/select_all.png'),\n _('Select All')\n )\n self.app.ui.menuobjects_unselall = self.app.ui.menuobjects.addAction(\n QtGui.QIcon(self.app.resource_location + '/deselect_all32.png'),\n _('Deselect All')\n )\n self.app.ui.menuobjects_selall.triggered.connect(lambda: self.on_objects_selection(True))\n self.app.ui.menuobjects_unselall.triggered.connect(lambda: self.on_objects_selection(False))\n\n elif state == 'delete':\n for act in self.app.ui.menuobjects.actions():\n if act.text() == obj.options['name']:\n try:\n act.triggered.disconnect()\n except TypeError:\n pass\n self.app.ui.menuobjects.removeAction(act)\n break\n elif state == 'rename':\n for act in self.app.ui.menuobjects.actions():\n if act.text() == old_name:\n add_action = QtWidgets.QAction(parent=self.app.ui.menuobjects)\n add_action.setText(obj.options['name'])\n add_action.setIcon(QtGui.QIcon(icon_files[obj.kind]))\n add_action.triggered.connect(\n lambda: self.set_active(obj.options['name']) if add_action.isChecked() is True else\n self.set_inactive(obj.options['name']))\n\n self.app.ui.menuobjects.insertAction(act, add_action)\n\n try:\n act.triggered.disconnect()\n except TypeError:\n pass\n self.app.ui.menuobjects.removeAction(act)\n break\n elif state == 'delete_all':\n for act in self.app.ui.menuobjects.actions():\n try:\n act.triggered.disconnect()\n except TypeError:\n pass\n self.app.ui.menuobjects.clear()\n\n self.app.ui.menuobjects.addSeparator()\n self.app.ui.menuobjects_selall = self.app.ui.menuobjects.addAction(\n QtGui.QIcon(self.app.resource_location + '/select_all.png'),\n _('Select All')\n )\n self.app.ui.menuobjects_unselall = self.app.ui.menuobjects.addAction(\n QtGui.QIcon(self.app.resource_location + '/deselect_all32.png'),\n _('Deselect All')\n )\n self.app.ui.menuobjects_selall.triggered.connect(lambda: self.on_objects_selection(True))\n self.app.ui.menuobjects_unselall.triggered.connect(lambda: self.on_objects_selection(False))", "def notifyObservers(self):", "def _action(self):\n pass", "def action(self,item):\r\n pass", "def _refreshActionTriggeredSlot(self):\r\n \r\n self._controller.model.refresh(self._controller.model.activeIndex)", "def subscribeViewerChanged(self, action: Callable) -> int:\n self.sub_id += 1\n self._viewer_changed_subscribers[self.sub_id] = action\n return self.sub_id", "def __facilityChanged(self):\n self.removeAllItems()\n self._update()", "def on_clicked(self, func):\n return self._observers.connect('clicked', func)", "def action(self, action):\n self._action = action", "def comboBoxChanged(self):\n new_action = self.ui.comboBoxAction.currentText().lower()\n self.parameters['action'] = new_action\n\n self.changed.emit()", "def action(self):\n pass", "def action(self):\n pass", "def on_changed(self, func):\n return self._observers.connect('changed', lambda val: func(val))", "def on_changed(self, func):\n return self._observers.connect('changed', lambda val: func(val))", "def on_add(self):\n self.notify(on_add())", "def on_update(self, action):\n self._last_foreign.on_update(action)\n return self", "def on_clicked(self, func):\n cid = self.cnt\n self.observers[cid] = func\n self.cnt += 1\n return cid", "def on_clicked(self, func):\n cid = self.cnt\n self.observers[cid] = func\n self.cnt += 1\n return cid", "def action(self, action):\n\n self._action = action", "def receiveAction(self, action):\n self.action = action", "def on_clicked(self, func):\n return self._observers.connect('clicked', lambda event: func(event))", "def actions(self):\n raise NotImplementedError", "def action(self):\n return self._action", "def action(self):\n return self._action", "def action(self):\n return self._action", "def action(self):\n return self._action", "def action(self):\n return self._action", "def action(self):\n return self._action", "def _list_items_changed_handler ( self, name, not_used, event ):\n arg_lists = self._get_instance_handlers( name[:-6] )\n\n for item in event.removed:\n for args in arg_lists:\n item.on_trait_change( remove = True, *args )\n\n for item in event.added:\n for args in arg_lists:\n item.on_trait_change( *args )", "def setAction(self, func):\n\t\tself.action = func" ]
[ "0.57026577", "0.55472505", "0.5491581", "0.5480743", "0.5450638", "0.53421706", "0.53055394", "0.5241962", "0.5201908", "0.5185991", "0.51797247", "0.51797247", "0.51709735", "0.51709735", "0.51529616", "0.51474416", "0.51144034", "0.51144034", "0.5096343", "0.5094186", "0.5075942", "0.5035513", "0.5017384", "0.5017384", "0.5017384", "0.5017384", "0.5017384", "0.5017384", "0.4988361", "0.49652767" ]
0.92143404
0
NewItems { get; } > IList
def NewItems(self) -> _n_1_t_7:
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def OldItems(self) -> _n_1_t_7:", "def items(self) -> List:\n pass", "def new():\n list_new()", "def add_item(self, new_item):\n [self.item_list.append(new_item) for item in self.item_list\n if new_item not in self.item_list]", "def handle_list_items(self, object, name, old, new):\n raise NotImplementedError", "def new_plist(self):\n # Not implemented at this time.\n pass", "def __call__(self, items: List[Item]) -> List[Item]:", "def items(self):", "def add(self, item):", "def append (self, item):\n pass", "def append(self, new):\n new = HistoryItem(new)\n list.append(self, new)\n new.idx = len(self)", "def __append_to_item_list(self):\n Item.get_item_list().append(self)", "def handle_list(self, object, name, old, new):\n raise NotImplementedError", "def get_items(self):\n return []", "def test_adding_item_to_list(create_shopping_item, create_shopping_list):\n shopping_list = create_shopping_list\n items_before = shopping_list.items.values_list().count()\n new_item = create_shopping_item\n shopping_list.items.add(new_item)\n items_after = shopping_list.items.values_list().count()\n assert items_after > items_before\n assert items_before == 0\n assert items_after == 1", "def __init__(self):\n self.item_list = []", "def handle_list_items(self, object, name, old, new):\n self.handle_list(object, name, new.removed, new.added)", "def Push(self, item):\n self.list.append(item)", "def push(self, new_item):\n self.items.append(new_item)", "def __getitem__(self, item):\n return self.getList()", "def addItems(*args):", "def append(self, item: Any) -> BaseList:\n super().append(item)\n return self", "def get_items_to_index(self):\n\t\treturn []", "def add_new_item():\n\n lst = item_list()\n return render_template('index.html', sell_flag=1, items=lst)", "def DoAdd(self,event):\r\n newItem = self.data.add()\r\n if newItem and newItem not in self.items:\r\n self.items = self.data.getItemList()\r\n index = self.items.index(newItem)\r\n self.list.InsertItems([newItem],index)", "def __init__(self):\r\n self.items = []", "def set_new(self, new_key, new_value):\r\n hashed_key = self.hash_key(new_key) \r\n\r\n self._items.insert(hashed_key, new_value)\r\n \r\n return self.get_items()", "def __init__(self):\r\n self._items = [[] for _ in range(20)]", "def _list(self):\n raise NotImplementedError", "def items():" ]
[ "0.7087581", "0.68237215", "0.66904354", "0.66464525", "0.6514788", "0.64900506", "0.63648415", "0.62899405", "0.6236077", "0.6183304", "0.6155255", "0.6122164", "0.60757506", "0.60671026", "0.6063697", "0.60569775", "0.60379", "0.60241634", "0.6001961", "0.59869635", "0.59740776", "0.59614974", "0.5956404", "0.5928447", "0.5913109", "0.5871558", "0.5869302", "0.58652014", "0.58623415", "0.585866" ]
0.81301445
0
NewStartingIndex { get; } > int
def NewStartingIndex(self) -> int:
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def OldStartingIndex(self) -> int:", "def index(self) -> int:", "def _index(self) -> int:\n return -1", "def get_next_position(self):", "def up_index(index):\n return 2 * index", "def index(self, x) -> int:\n pass", "def start_index(self):\r\n return (self.per_page * (self.page_number - 1)) + 1", "def Offset(self) -> int:", "def Offset(self) -> int:", "def Offset(self) -> int:", "def _left(self, index):\r\n return 2*index + 1", "def down_index(index):\n return 2 * index + 1", "def rindex(self, sub) -> int:\n pass", "def generate_reverse_index(self):", "def get_new_head_node_index(old_head_node_index: int) -> int:\n # '-1' means that there is no more new node not visited.\n new_head_index = -1\n if old_head_node_index < self.length - 1:\n for index in range(old_head_node_index + 1, self.length):\n if len(self.hashTable[index].keys) > 0:\n new_head_index = index\n break\n return new_head_index", "def get_new_index(selected_index):\n new_index = []\n cur_index = 0\n for jj in range(len(selected_index)):\n\n if selected_index[jj] == cur_index + 1:\n cur_index += 1\n new_index.append(cur_index)\n\n else:\n new_index.append(cur_index)\n new_index = np.array(new_index)\n return new_index", "def __start_index(self):\n # Special case, return zero if no items.\n if self.paginator.count == 0:\n return 0\n return (self.paginator.per_page_limit * (self.number - 1)) + 1", "def insert_index(self):\n pass", "def _normalize_index(self, index: int):\n if index < 0:\n return len(self) + index\n else:\n return index", "def setDefaultIndex():\r\n return 0", "def fix_index(self):\n if self.record_size <= self.size:\n self.record_size += 1\n if self.index % self.size == 0:\n self.isFull = True if len(self._storage) == self.size else False\n if self.replace_flag:\n self.index = 1\n return self.index\n else:\n sys.stderr.write('Experience replay buff is full and replace is set to FALSE!\\n')\n return -1\n else:\n self.index += 1\n return self.index", "def getNextIndex (self, gameState, currIndex):\n nextIndex = currIndex + 1\n if (nextIndex >= gameState.getNumAgents()):\n nextIndex = 0\n return nextIndex", "def index(self) -> int:\r\n return self._index", "def index(self, value, start=None, stop=None): # real signature unknown; restored from __doc__\n return 0", "def _set_index(self):\n self.index = 0\n # If offset is negative, target window might start before 0\n self.index = -min(0, self._get_target_index())", "def parent_idx(idx):\n return (idx - 1) >> 1", "def west_index(self, index):\n if index % self.size == 0:\n return -1\n else:\n return index - 1", "def reset_index(self):\n self.increments = 0", "def north_index(self, index):\n return index - self.size", "def determineIndexOffset(i):\n n = 1\n triangleSum = 0\n while i > triangleSum:\n triangleSum = triangleSum + n\n n = n + 1\n return n - 1" ]
[ "0.8996522", "0.6921661", "0.685216", "0.6768644", "0.6754769", "0.657122", "0.64902514", "0.6434296", "0.6434296", "0.6434296", "0.6347615", "0.63349503", "0.6308652", "0.62746763", "0.62228894", "0.6207351", "0.617466", "0.6152202", "0.61070955", "0.60914516", "0.6084145", "0.60780823", "0.6077503", "0.60633755", "0.60597825", "0.60349435", "0.59742147", "0.59722835", "0.5944334", "0.594383" ]
0.91030383
0
OldItems { get; } > IList
def OldItems(self) -> _n_1_t_7:
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def removeOldItems(self):\n pass", "def GetOldItem(self):\r\n\r\n return self._itemOld", "def handle_list_items(self, object, name, old, new):\n raise NotImplementedError", "def NewItems(self) -> _n_1_t_7:", "def handle_list_items(self, object, name, old, new):\n self.handle_list(object, name, new.removed, new.added)", "def update_cloud_watch_obj_list(old_list, new_list):\n\n # Add new.\n for new_item in new_list:\n if new_item not in old_list:\n new_item.added = True\n old_list.append(new_item)\n\n # Remove deleted.\n for old_item in old_list:\n if old_item not in new_list:\n old_list.remove(old_item)\n\n return old_list", "def items(self) -> List:\n pass", "def _items_updated(self, change):\n if self.root:\n # The whole list changed.\n if change['type'] == 'update':\n added = set(change['value']) - set(change['oldvalue'])\n removed = set(change['oldvalue']) - set(change['value'])\n for item in removed:\n self._item_removed(item)\n for item in added:\n self._item_added(item)\n\n # An operation has been performed on the list.\n elif change['type'] == 'container':\n op = change['operation']\n\n # itemren have been added\n if op in ('__iadd__', 'append', 'extend', 'insert'):\n if 'item' in change:\n self._item_added(change['item'])\n if 'items' in change:\n for item in change['items']:\n self._item_added(item)\n\n # itemren have been removed.\n elif op in ('__delitem__', 'remove', 'pop'):\n if 'item' in change:\n self._item_removed(change['item'])\n if 'items' in change:\n for item in change['items']:\n self._item_removed(item)\n\n # One item was replaced.\n elif op in ('__setitem__'):\n old = change['olditem']\n if isinstance(old, list):\n for item in old:\n self._item_removed(item)\n else:\n self._item_removed(old)\n\n new = change['newitem']\n if isinstance(new, list):\n for item in new:\n self._item_added(item)\n else:\n self._item_added(new)\n\n self._recompute_indexes()", "def getOldCodeList(self):\n tmp = []\n for child in self.children:\n tmp.extend(child.getOldCodeList())\n return tmp", "def handle_list(self, object, name, old, new):\n raise NotImplementedError", "def __call__(self, items: List[Item]) -> List[Item]:", "def get_items_to_index(self):\n\t\treturn []", "def reconcile_list(host: Component, key: str, old: List, new: List) -> List:\n zipped = zip_longest(old, new, fillvalue=None)\n reconciled_list = [\n reconcile(host, key, ndx, old_item, new_item)\n for ndx, (old_item, new_item) in enumerate(zipped)\n ]\n return [r for r in reconciled_list if r is not None]", "def test_list_inplace_update(self):\r\n vm = List.value_manager(None, None, [1,2,3])\r\n assert not vm.changed\r\n vm.value.append(4)\r\n assert vm.changed", "def replaced(L, old, new):\n return [x if x != old else new for x in L]", "def add_item(self, new_item):\n [self.item_list.append(new_item) for item in self.item_list\n if new_item not in self.item_list]", "def get_changed_primitive_list(old_objects, new_objects):\n\n changed_objects = {}\n\n # Try and detect which items have changed\n for old_object in old_objects:\n if old_object not in new_objects:\n if old_object not in changed_objects:\n changed_objects[old_object] = {'old': old_object}\n else:\n changed_objects[old_object]['old'] = old_object\n\n for new_object in new_objects:\n if new_object not in old_objects:\n if new_object not in changed_objects:\n changed_objects[new_object] = {'new': new_object}\n else:\n changed_objects[new_object]['new'] = new_object\n\n return changed_objects", "def items(self) -> 'ItemsView[str, str]':\n return _EntityFixupItems(self)", "def create_list(oldList):\n\n #If list is empty...\n #return empty list\n if(oldList == []):\n return []\n\n #Index is assumed to be last value in list\n index = len(oldList[0])-1\n\n #Create new list\n testList = []\n for value in oldList:\n testList.append(value[index])\n\n return testList", "def extended(self) -> List:\n raise NotImplementedError", "def get_items_changed(self, base_ref='HEAD'):\n command = ['diff-index', '--name-only',\n '--cached', base_ref]\n res = self.run(command)\n items = res.split('\\n') if res else []\n return items", "def getItemList(self):\r\n raise AbstractError\r\n return []", "def test_list_update(self):\r\n vm = List.value_manager(None, None, [1,2,3])\r\n assert not vm.changed\r\n vm.value = [4,5,6]\r\n assert vm.changed", "def items(self):", "def item_diffs(old_items=None, new_items=None):\n\n if not old_items:\n old_items = {}\n\n if not new_items:\n new_items = {}\n\n new_ids = set(new_items.keys())\n old_ids = set(old_items.keys())\n added = [new_items[x] for x in new_ids.difference(old_ids)]\n removed = [old_items[x] for x in old_ids.difference(new_ids)]\n intersected_ids = new_ids.intersection(old_ids)\n updated = [new_items[x] for x in [x for x in intersected_ids if new_items[x] != old_items[x]]]\n\n return {\n 'added': added,\n 'removed': removed,\n 'updated': updated\n }", "def SetOldItem(self, item):\r\n \r\n self._itemOld = item", "def collate(items):\n # return batch items as a list\n return items", "def get_items(self):\n return []", "def handle_list_items_special(self, object, name, old, new):\n wh = self.wrapped_handler_ref()\n if wh is not None:\n wh(object, name, new.removed, new.added)", "def get_items(self):\n return self.item_list" ]
[ "0.70305353", "0.6830201", "0.663443", "0.6420602", "0.63441193", "0.62663555", "0.6243105", "0.61490303", "0.6047346", "0.6038509", "0.60280555", "0.6027139", "0.6003214", "0.5964436", "0.5814353", "0.5791283", "0.5747276", "0.5704636", "0.5686104", "0.5667873", "0.56523323", "0.56231374", "0.5594943", "0.55891913", "0.55887765", "0.5585668", "0.5581286", "0.5546795", "0.5534581", "0.55270773" ]
0.8244279
0
OldStartingIndex { get; } > int
def OldStartingIndex(self) -> int:
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def NewStartingIndex(self) -> int:", "def _index(self) -> int:\n return -1", "def index(self) -> int:", "def start_index(self):\r\n return (self.per_page * (self.page_number - 1)) + 1", "def get_next_position(self):", "def _left(self, index):\r\n return 2*index + 1", "def Offset(self) -> int:", "def Offset(self) -> int:", "def Offset(self) -> int:", "def up_index(index):\n return 2 * index", "def get_new_head_node_index(old_head_node_index: int) -> int:\n # '-1' means that there is no more new node not visited.\n new_head_index = -1\n if old_head_node_index < self.length - 1:\n for index in range(old_head_node_index + 1, self.length):\n if len(self.hashTable[index].keys) > 0:\n new_head_index = index\n break\n return new_head_index", "def north_index(self, index):\n return index - self.size", "def startIndex(self):\n return self._startIndex", "def down_index(index):\n return 2 * index + 1", "def __start_index(self):\n # Special case, return zero if no items.\n if self.paginator.count == 0:\n return 0\n return (self.paginator.per_page_limit * (self.number - 1)) + 1", "def index(self, x) -> int:\n pass", "def parent_idx(idx):\n return (idx - 1) >> 1", "def get_parent_index(self):\n return (self.index - 1) // 2", "def west_index(self, index):\n if index % self.size == 0:\n return -1\n else:\n return index - 1", "def default_start_index(self):\n return self._default_start_index", "def minimum_index(self):\n return self._minidx", "def get_left_child_index(self):\n return (2 * self.index) + 1", "def current_index(self) -> int:\n return self._current_index", "def index(self) -> int:\r\n return self._index", "def reset_index(self):\n self.increments = 0", "def _get_prev_correlation_index(self, index: int) -> int:\n pass", "def _get_target_index(self):\n return (self.index + self.source_window * (not self.overlapping) +\n self.offset)", "def get_left_index(i):\n pos = i + 1\n left_pos = 2 * pos\n left_index = left_pos - 1\n return left_index", "def _get_index(self):\n\n return self.index\n\n # to avoid having differences bigger than 2pi", "def MinimumIndex(self):\n return self._minIndex" ]
[ "0.86372566", "0.68770224", "0.66470605", "0.65928936", "0.65164727", "0.65080565", "0.6458375", "0.6458375", "0.6458375", "0.6396853", "0.6364111", "0.6346274", "0.6303965", "0.62449104", "0.6215478", "0.6213587", "0.61976075", "0.6184642", "0.61814433", "0.614664", "0.6142431", "0.61381346", "0.61261624", "0.611256", "0.60995156", "0.6082139", "0.60786897", "0.6073317", "0.6069181", "0.6059895" ]
0.93224186
0
Count { get; } > int
def Count(self) -> int:
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def count(self):\n return int()", "def count(self):\n return int()", "def count() -> int:\n pass", "def count(self):\n # TODO not implemented yet\n return 0", "def get_count(self):\r\n return self.count", "def count(self):\n\n raise NotImplementedError", "def __len__(self):\n return self.count", "def count(self, value): # real signature unknown; restored from __doc__\n return 0", "def count(self):\n return self._lift(\"count\")", "def count():", "def count(self) -> int:\n return self.__count", "def __len__(self):\n return self._count", "def __len__(self):\n return self._count", "def count(self):\n return len(self)", "def count(self):\n return self.size()", "def __len__(self, count=0):\n return len(self.next(), count+1) if self.next() else count", "def count(self):\n return self.get_count()", "def count(self) -> int:\n return self._count", "def count(self) -> int:\n return self._count", "def count(self) -> int:\n return self._count", "def __len__(self):\n return self._count()", "def count(self):\n return self.properties.get('count')", "def get_count(self):\n return self.count", "def get_count(self):\n return self.count", "def counter(self) -> int:", "def counter(self) -> int:", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count" ]
[ "0.8202064", "0.8202064", "0.8039796", "0.7807312", "0.7733252", "0.77277046", "0.77222186", "0.77221006", "0.77153176", "0.77069134", "0.76832217", "0.7601363", "0.7601363", "0.7588302", "0.75655687", "0.751755", "0.7516682", "0.75138134", "0.75138134", "0.75138134", "0.74922985", "0.74822724", "0.7463599", "0.7463599", "0.7457906", "0.7457906", "0.7453633", "0.7453633", "0.7453633", "0.7453633" ]
0.85905355
0
IsSynchronized { get; } > bool
def IsSynchronized(self) -> bool:
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def interlocked(self):\n return not bool(self.interlock.get())", "def is_concurrent(self):\n return self.concurrent", "def is_synchronized(self):\r\n sync_state = True\r\n \r\n for particle in self.population:\r\n sync_state = (sync_state and particle.sync)\r\n \r\n if not sync_state:\r\n break;\r\n \r\n return sync_state", "def isSync(self):\n return False", "def is_atomic(self) -> bool:\n return False", "def is_synchronized(self):\n up = self.upper_binary_tree()\n down = self.lower_binary_tree()\n return down.canopee() == up.canopee()", "def Locked(self) -> bool:", "def is_locked(self):\r\n pass", "def __bool__(self):\n return self.wait(0)", "def is_unlocked(self):\r\n return self._lock.is_unlocked()", "def is_unlocked(self):\r\n return self._lock_fd is None", "def locked(self):\n return self.counter <= 0", "def is_async(self) -> bool:", "def isVolatile(self) -> bool:\n ...", "def __bool__(self) -> bool:\n return self._rpc is not None", "def is_locked(self):\n if not hasattr(self, \"_memo_init\"):\n return False\n else:\n return self._locked", "def is_locked(self):\n return self._is_locked", "def is_locked(self):\n ret_val = self._is_locked()\n return ret_val", "def __bool__(self) -> bool:\n return self._connected_event.is_set()", "def is_locked(self):\n return self.lock_obj.is_locked()", "def check_sync(self):\r\n if not self.awaiting_sync:\r\n return True\r\n self.check_ack_queue()\r\n return not self.awaiting_sync", "def is_locked(self):\n return self._unit_got == False", "def locked(self) -> bool:\n return self.__locked", "def locked(self):\n if self._locked == None:\n return False\n return self._locked", "def is_locked(self) -> bool | None:\n return self.instrument.is_locked", "def locked(self):\n return self.is_locked", "def is_first_synced(self):\n return True", "def _has_thread(self) -> bool:\n with self._condition:\n return not self._is_disposed and self._thread is not None", "def __bool__(self):\n return self is TRUE", "def Blocking(self) -> bool:" ]
[ "0.72966087", "0.72377276", "0.7227178", "0.7179509", "0.71317625", "0.6986824", "0.6875304", "0.6810426", "0.6744867", "0.67237324", "0.65174085", "0.65084296", "0.6480448", "0.647981", "0.64671266", "0.6462374", "0.64194214", "0.63950187", "0.6380033", "0.63241255", "0.63210654", "0.6304741", "0.6292868", "0.62898195", "0.62799406", "0.6264917", "0.622589", "0.62252545", "0.62051797", "0.6163513" ]
0.9197473
0
Item { get; set; } > str
def Item(self) -> str:
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __str__(self):\n return str(self.item)", "def __str__(self):\r\n return str(self._items)", "def __str__(self):\r\n return str(self._items)", "def Value(self) -> str:", "def __str__(self):\n return str(self._items)", "def string(self):\n return self._my_string", "def __str__(self):\n return str(self.GetString())", "def string(self):\n return f'y = {self.a.item()}'", "def test_str(self):\n item = self.item\n\n self.assertEqual(str(item), self.item_raw['name'])", "def __str__(self) -> str:\n return str(self.getvalue())", "def __str__(self) -> str:\n return str(self.getvalue())", "def __str__(self) -> str:\n return str(self.getvalue())", "def test_string_representation(self) -> None:\n item = Item(text=\"some text\")\n self.assertEqual(str(item), \"some text\")", "def __str__(self): # pragma: nocover\n return str(self.value)", "def mapper(item: Union[str, object]) -> str:\n return str(item)", "def __str__(self):\n return ' '.join([str(item) for item in self])", "def __str__(self):\r\n return str(self.value())", "def __str__(self):\n return str(self.value())", "def simple_str(self):\n pass", "def valueToString():", "def __str__(self):\r\n return self.value", "def __str__(self) -> str:\n return str(self.value)", "def __str__(self):\n return self.string", "def __str__(self):\r\n\t\treturn \"({}, {})\".format(self.type, self.value)", "def format_item(self,obj):\n return unicode(obj)", "def value(self):\n return self.string", "def get_string(self):\n return self.__str", "def __str__(self):\n\n\t\treturn str(self.__value)", "def __str__(self) -> str:\n return self.value", "def __str__(self) -> str:\n return self.value" ]
[ "0.74461406", "0.70737", "0.70737", "0.6902582", "0.6873929", "0.68218076", "0.67909247", "0.67569864", "0.67167395", "0.66953707", "0.66953707", "0.66953707", "0.6675318", "0.66375804", "0.6594026", "0.65922886", "0.6584816", "0.654529", "0.6544038", "0.65272146", "0.6521561", "0.65086627", "0.64882094", "0.64810306", "0.6477088", "0.6474", "0.644895", "0.64472777", "0.6440771", "0.6440771" ]
0.8269495
0
SyncRoot { get; } > object
def SyncRoot(self) -> object:
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getSyncObj(self):\n \n return self.sync_obj", "def get_root(self) -> object:", "def getRoot(obj):", "def get_sync_attrs(self):\n return self._sync_attrs", "def obj(self) -> object:\n pass", "def obj(self):\n if not self._obj:\n self._get()\n return self._obj", "def obj(self):\r\n return self._obj", "def sync(self):\n return self._sync", "def obj(self):\n return self._obj", "def async_object(self):\n return self._async_object or self._object", "def root(self):\n return self.__root__", "def root(self):\n return self._root", "def get_root(self):\n return self._root", "def root(self):\n return self[0]", "def root(self):\n\t\treturn self._root", "def root(self):\n\t\treturn self._root", "def object(self):\n return self._object", "def root(self):\n return self._root()", "def root(self):\n return self._root", "def root(self):\n return self._root", "def root(self):\n return self._root", "def SyncClockMaster(self):\n if self.force_auto_sync:\n self.get('SyncClockMaster')\n return self._SyncClockMaster", "def get_obj(self):\n assert self._obj is not None, \"Object XML tree has not been generated yet!\"\n return self._obj", "def getRoot(self):\n return self.__root", "def getObject(self):\n return self.base.get(\"object\", [])", "def get_object(self):\n return self._object", "def SyncClockRef(self):\n if self.force_auto_sync:\n self.get('SyncClockRef')\n return self._SyncClockRef", "def toJSON(self):\n\t\treturn json.dumps(self.root, default=lambda o: o.__dict__)", "def data_object(self) -> any:\r\n\r\n return self.__data_object", "def root(self) -> Root:\n root = self.open(Root.type).signed\n if not isinstance(root, Root):\n raise RuntimeError(\"Unexpected root type\")\n return root" ]
[ "0.7451063", "0.6630365", "0.65472794", "0.6129156", "0.6124222", "0.6085068", "0.60512507", "0.6035903", "0.59617585", "0.59338695", "0.58426213", "0.5795738", "0.56910855", "0.55769795", "0.5556127", "0.5556127", "0.5536175", "0.553319", "0.55188656", "0.55188656", "0.55188656", "0.55045617", "0.54970175", "0.54937494", "0.54793185", "0.5471053", "0.54564977", "0.54529697", "0.5448675", "0.54464215" ]
0.83236295
0
Current { get; } > str
def Current(self) -> str:
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def current(self) -> str:\n return self.s[self.pos]", "def string_val(self) -> str:\n return self.current_token", "def __str__(self) -> str:\n\n return str(self.current_obj)", "def get_string(self):\n return self.__str", "def Value(self) -> str:", "def get_string(self):\n current = self\n l = []\n while current:\n l.append(current.value)\n current = current.next\n return \"\".join(l)", "def current(self):\r\n return self.string[self.index:]", "def state(self):\r\n return str(self)", "def string(self):\n return self._my_string", "def __pout__(self):\n return self.__str__()", "def state(self) -> str:", "def current(self):\n return self.name", "def state(self):\n\n\t\treturn str(self)", "def state(self):\n return str(self)", "def __str__(self):\n\n\t\treturn str(self.__value)", "def getCurrentString(self):\r\n if self.getTurn() == RED:\r\n return 'red'\r\n return 'white'", "def __str__(self):\r\n return str(self.value())", "def __str__(self) -> str:\n return self.value", "def __str__(self) -> str:\n return self.value", "def __str__(self) -> str:\n return self.value", "def __str__(self) -> str:\n return self.value", "def __str__(self) -> str:\n return self.value", "def __str__(self) -> str:\n return str(self.getvalue())", "def __str__(self) -> str:\n return str(self.getvalue())", "def __str__(self) -> str:\n return str(self.getvalue())", "def __str__(self):\n return str(self.value())", "def string(self):\n return f'y = {self.a.item()}'", "def __repr__(self) -> str:\n return self.value", "def __str__(self):\r\n return self.value", "def current(self):\n pass" ]
[ "0.7923747", "0.7441225", "0.7267023", "0.7167032", "0.71462184", "0.70835453", "0.70250505", "0.6957958", "0.6817078", "0.67957383", "0.679172", "0.6759429", "0.6736939", "0.66951215", "0.6673996", "0.6666931", "0.66513854", "0.66341764", "0.66341764", "0.66341764", "0.66341764", "0.66341764", "0.662976", "0.662976", "0.662976", "0.6622222", "0.6571685", "0.6541164", "0.6538053", "0.65349406" ]
0.8545434
0
Get the profit_currency price of asset in the given timestamp
def get_rate_in_profit_currency(self, asset: Asset, timestamp: Timestamp) -> Price: if asset == self.profit_currency: rate = Price(ONE) else: rate = PriceHistorian().query_historical_price( from_asset=asset, to_asset=self.profit_currency, timestamp=timestamp, ) return rate
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_coin_price(asset, time=None):\n url = 'https://rest.coinapi.io/v1/exchangerate/{}/USD'.format(asset)\n if time is not None:\n url = url + '?time={}'.format(time)\n headers = {'X-CoinAPI-Key': os.environ.get('COIN_API_KEY', '')}\n r = requests.get(url, headers=headers)\n if r.status_code / 100 == 2:\n price = {\"price\": r.json()['rate']}\n return price\n else:\n return {\"error\": r.content.decode('utf-8')}", "def get_stock_price(stock):\n pass", "def get_price(data):\n return data[\"summaryDetail\"][\"regularMarketPreviousClose\"][\"raw\"]", "def price_for_at(self, symbol, timestamp, tf=60):\n\t\ttf = 60*60\n\t\tmapped_tf = self.TF_MAP[tf]\n\t\ttimestamp = int(timestamp / tf) * tf\n\n\t\tif self._session:\n\t\t\treturn self._session.get_klines(symbol=symbol, interval=mapped_tf, startTime=int(timestamp*1000), endTime=int((timestamp+tf)*1000))\n\n\t\treturn None", "def getPrice(coin,cur):\n price = 'https://api.coinmarketcap.com/v1/ticker/' + coin\n json = requests.get(price).json()\n value = json[0]['price_' + str(cur)]\n return value", "def get_price(horizon_host, pair):\n print \"fetching latest price for:\" + pair[\"name\"]\n params = make_trade_params(pair)\n res = requests.get(horizon_host + \"/trades\", params).json()\n try:\n trade_record = res[\"_embedded\"][\"records\"][0]\n except IndexError:\n return DatedPrice(date=datetime.utcfromtimestamp(0), price=0)\n price = float(trade_record[\"price\"][\"n\"]) / float(trade_record[\"price\"][\"d\"])\n timestamp = parser.parse(trade_record[\"ledger_close_time\"])\n return DatedPrice(date=timestamp, price=price)", "def getprice():\n\n print(\"Get price\")\n latest_price = get_latest_price(item_code)\n return latest_price", "def price_from_vol( self, vol ):\n if self._vol_type == \"LogNormal\":\n S = self._deal_terms[ \"underlyer\" ].spot_value\n K = self._deal_terms[ \"payoff\" ].payoff_terms[ \"strike\" ]\n time_to_mat = self._deal_terms[ \"maturity\" ] - self._pricing_date\n r = CSA_map[ self._deal_terms[ \"CSA\" ] ].short_rate\n d1 = 1 / ( vol * np.sqrt( time_to_mat ) ) * ( np.log( S / K ) + ( r + 0.5 * vol ** 2 ) * time_to_mat )\n d2 = d1 - vol * np.sqrt( time_to_mat ) \n CallPrice = S * norm.cdf( d1 ) - K * np.exp( -r * time_to_mat ) * norm.cdf( d2 ) \n\n if self._deal_terms[ \"payoff\" ].payoff_name == \"European Call\":\n return CallPrice\n elif self._deal_terms[ \"payoff\" ].payoff_name == \"European Put\":\n return CallPrice + K * np.exp( -r * time_to_mat ) - S \n else:\n raise NameError( \"Unsupported vol type : \" + self._deal_terms[ \"Payoff\" ].payoff_name )\n else:\n raise NameError( \"Unsupported vol type : \" + self._vol_type )", "def get_market_price(self, exchange, pair, type):\n return self.ccxt.get_market_price(exchange, pair, type)", "def getCurrentPrice(self,primary,secondary):\n pair = self.getTradedPair(primary,secondary)\n uri = \"https://api.kraken.com/0/public/Ticker\"\n requestUrl = uri + \"?pair=\" + pair\n jsonResponse = self.getJson(requestUrl)\n currentPrice = jsonResponse[\"result\"][pair][\"c\"]\n return currentPrice", "def getCurrentPrice(self,primary,secondary):\n pair = self.getTradedPair(primary,secondary)\n jsonResponse = self.getJson(\"https://poloniex.com/public?command=returnTicker\")\n currentPrice = jsonResponse[pair][\"last\"]\n return currentPrice", "def compute_time_price(supplier_with_transaction):\n supplier_item = supplier_with_transaction.get('supplier_detail')\n transaction_item = supplier_with_transaction.get('supplier_transaction')\n # Check if there is time prices or not\n if supplier_with_transaction.get('time_price'):\n # Check if we will compute in complex or simple\n if not supplier_item.get('has_complex_minute_price'):\n # start to calculate the simple version for time price\n charging_start = transaction_item.get('charging_start')\n charging_end = transaction_item.get('charging_end')\n if charging_start and charging_end:\n charging_start_obj = datetime.strptime(charging_start, '%Y-%m-%dT%H:%M:%S')\n charging_end_obj = datetime.strptime(charging_end, '%Y-%m-%dT%H:%M:%S')\n duration_in_minutes = (charging_end_obj - charging_start_obj).total_seconds() / 60\n # Check for min duration\n if supplier_item.get('min_duration') and duration_in_minutes < supplier_item.get('min_duration'):\n duration_in_minutes = supplier_item.get('min_duration')\n price = supplier_item.get('simple_minute_price')\n total_price = price * duration_in_minutes\n return total_price\n else:\n # start calculate the complex version for time price\n total_price = 0\n if supplier_item.get('interval') == 'start':\n for start_rec in supplier_item.get('time_price'):\n timeframe = start_rec.get('billing_each_timeframe') * 60\n if start_rec.get('hour_from', 0) > start_rec.get('hour_to', 0):\n duration = (start_rec.get('hour_to') - start_rec.get('hour_from')) * 60\n else:\n duration = (start_rec.get('hour_to') - (24 - start_rec.get('hour_from'))) * 60\n duration_after_timeframe = duration % timeframe\n total_duration = duration + duration_after_timeframe\n total_price += total_duration * start_rec.get('minute_price')\n else:\n for end_rec in supplier_item.get('time_price'):\n timeframe = end_rec.get('billing_each_timeframe') * 60\n if end_rec.get('hour_from', 0) > end_rec.get('hour_to', 0):\n duration = (end_rec.get('hour_to') - end_rec.get('hour_from')) * 60\n else:\n duration = (end_rec.get('hour_to') - (24 - end_rec.get('hour_from'))) * 60\n duration_after_timeframe = duration % timeframe\n total_duration = duration - (timeframe - duration_after_timeframe)\n total_price += total_duration * end_rec.get('minute_price')\n\n return total_price\n else:\n total_price = 0\n return total_price", "async def get_price(self) -> PairPrice:\n try:\n data = await self._network.get_response_content_from_get_request(\n url=self._BTC_FEED_URL, format=DataFormat.JSON\n )\n except NetworkError as e:\n msg = f\"Error getting BTC feed from {self._BTC_FEED_URL}\"\n log.exception(msg)\n raise BtcFeedError() from e\n\n try:\n price = 1 / float(data[\"price\"])\n except KeyError as e:\n msg = f\"Missing price field in BTC feed from {self._BTC_FEED_URL}\"\n log.exception(msg)\n raise BtcFeedError() from e\n except ValueError as e:\n msg = f\"Error value in price field in BTC feed from {self._BTC_FEED_URL}: {data['price']}\"\n log.exception(msg)\n raise BtcFeedError() from e\n\n return PairPrice(pair=(self._btc, self._market), price=price)", "def get_current_price(self):\n URL = config.coin['price_hist_url'] + self.ticker.lower()\n try:\n r = requests.get(URL)\n data = json.loads(r.text)\n value = data['last']\n timestamp = data['timestamp']\n self.current_price = value\n self.current_datetime = timestamp\n except Exception as err:\n logger.error(err)", "def get_product_price(product):\n return latest_product_version(product).price", "def getCurrentPrice(self,primary,secondary):\n pair = self.getTradedPair(primary,secondary)\n uri = \"https://www.bitfinex.com/v2/ticker/t\"\n requestUrl = uri + pair\n jsonResponse = self.getJson(requestUrl)\n currentPrice = jsonResponse[0]\n return currentPrice", "def get_coin_price(df: pd.DataFrame, coin_name: str) -> float:\n try:\n return df[(df['coin_name'] == coin_name)]['rates'].values[0]\n except(IndexError):\n print('Error: Could not find the coin.')\n return None", "def getCurrentPrice(self,primary,secondary):\n pair = self.getTradedPair(primary,secondary)\n uri = \"https://www.bitstamp.net/api/v2/ticker/\"\n requestUrl = uri + pair\n jsonResponse = self.getJson(requestUrl)\n currentPrice = jsonResponse[\"last\"]\n return currentPrice", "def price(self) -> float:\n if self.product:\n price = self.product.prices.filter(active=True).first()\n return int(price.unit_amount / 100)\n return -1", "def getCurrentPrice(self,primary,secondary):\n pair = self.getTradedPair(primary,secondary)\n uri = \"https://bittrex.com/api/v1.1/public/getticker?market=\"+pair\n jsonResponse = self.getJson(uri)\n currentPrice = jsonResponse[\"result\"][\"Last\"]\n return currentPrice", "def get_price():\n return uniform(1.0, 350.0)", "def poll_price_data():\n resp = requests.get(COINDESK_ENDPOINT) # Powered by CoinDesk\n if resp.status_code == 200:\n logging.info(\"GET request succeeded\")\n data = resp.json()\n data_dict = {\n \"id\": str(uuid.uuid1()),\n \"time\": data['time']['updated'],\n \"currency\": data['bpi']['USD']['code'],\n \"price\": data['bpi']['USD']['rate']\n }\n return data_dict\n else:\n logging.error(\"GET request failed\")", "def get_price(self, spot, t = 0, k = 1):\n if k == 0:\n return self.fv * np.exp(- spot * (self.maturity - t))\n else:\n return self.fv / np.power(1 + spot / k, (self.maturity - t) * k)", "def price(self, tf=None):\n if self._ticks:\n return (self._ticks[-1][1] + self._ticks[-1][2]) * 0.5\n else:\n candles = None\n if tf and self._candles.get(tf):\n candles = self._candles[tf]\n elif self._candles.get(Instrument.TF_SEC):\n candles = self._candles[Instrument.TF_SEC]\n elif self._candles.get(Instrument.TF_MIN):\n candles = self._candles[Instrument.TF_MIN]\n\n if candles:\n return candles[-1].close\n\n return None", "def volatility_efficiency(asset, strategy):\n perf = performances(asset, strategy)\n strat_perf = np.cumsum(perf)\n\n buy_hold_perf = np.cumsum(asset['forward_returns'])\n\n strat_v_bh = strat_perf[-1] / buy_hold_perf[-1]\n\n return strat_v_bh / time_in_market(strategy)", "def get_price_for_volume_at(conn, sticker, limit_price, volume, is_back, timestamp):\n tick = get_last_tick_before(conn, sticker, timestamp)\n rets = get_volume_at_price([tick], limit_price, volume, is_back)\n return rets[0]", "def get_price(item):\n return float(item[1])", "def get_cost(self, symbol) -> float:\n if len(symbol) <= 6:\n search = self.trader.stock_positions + self.trader.crypto_positions\n for p in search:\n if p['symbol'] == symbol:\n return p['avg_price']\n return None\n else:\n for p in self.trader.option_positions:\n if p['occ_symbol'] == symbol:\n return p['avg_price']", "def curProfitResponse(curPrice, prevPrice, coff):\n\treturn curProfit(curPrice, prevPrice, demandIntcpt, k1, k2, aPrInc, bPrDec, unitCost, coff)", "def get_price(self, pair='XBTZAR'):\n data = {'pair': pair}\n query_string = build_query_string(data)\n\n r = requests.get(build_api_call(self.base_url, None, 'ticker', query_string))\n if r.status_code == 200:\n return r.json()" ]
[ "0.70943767", "0.64979786", "0.6457114", "0.6435026", "0.6293523", "0.6200089", "0.61181283", "0.6104213", "0.6102069", "0.6028417", "0.59867626", "0.5976813", "0.5961364", "0.5941147", "0.59125704", "0.58891153", "0.58765477", "0.5851118", "0.5827735", "0.5793684", "0.57936484", "0.576881", "0.5737797", "0.57022053", "0.56954336", "0.5649099", "0.5648825", "0.56301916", "0.5622134", "0.56124854" ]
0.7192842
0
Add an asset acquisition event for the pot and count it in PnL if needed. If a custom price for the asset should be used it can be passed here via given_price. Price is always in profit currency during accounting.
def add_acquisition( self, # pylint: disable=unused-argument event_type: AccountingEventType, notes: str, location: Location, timestamp: Timestamp, asset: Asset, amount: FVal, taxable: bool, given_price: Optional[Price] = None, extra_data: Optional[dict] = None, **kwargs: Any, # to be able to consume args given by add_asset_change_event ) -> None: if amount == ZERO: # do nothing for zero acquisitions return if given_price is not None: price = given_price else: try: price = self.get_rate_in_profit_currency(asset=asset, timestamp=timestamp) except (PriceQueryUnsupportedAsset, RemoteError): price = ZERO_PRICE except NoPriceForGivenTimestamp as e: # In the case of NoPriceForGivenTimestamp when we got rate limited we let # it propagate so the user can take action after the report is made if e.rate_limited is True: raise price = ZERO_PRICE prefork_events = handle_prefork_asset_acquisitions( cost_basis=self.cost_basis, location=location, timestamp=timestamp, asset=asset, amount=amount, price=price, ignored_asset_ids=self.ignored_asset_ids, starting_index=len(self.processed_events), ) for prefork_event in prefork_events: self._add_processed_event(prefork_event) if taxable is True: taxable_amount = amount free_amount = ZERO else: taxable_amount = ZERO free_amount = amount event = ProcessedAccountingEvent( type=event_type, notes=notes, location=location, timestamp=timestamp, asset=asset, taxable_amount=taxable_amount, free_amount=free_amount, price=price, pnl=PNL(), # filled out later cost_basis=None, index=len(self.processed_events), ) if extra_data: event.extra_data = extra_data self.cost_basis.obtain_asset(event) # count profit/losses if we are inside the query period if timestamp >= self.query_start_ts and taxable: self.pnls[event_type] += event.calculate_pnl( count_entire_amount_spend=False, count_cost_basis_pnl=True, ) self._add_processed_event(event)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def buy_asset(self, asset):\n from Game.models import Asset, Transaction\n asset_comms = ACommunication(settings.API_URL)\n asset = asset_comms.get_asset_quote(asset)\n total = (asset.buy * asset.quantity)\n buy = asset.buy\n sell = asset.sell\n quantity = asset.quantity\n name = asset.name\n type = asset.type\n\n if quantity <= 0:\n return {\"error\": True,\n \"message\": \"You need to buy at least one asset\"}\n\n if self.liquid_with_loans >= total:\n asset = Asset.safe_get(name=asset.name)\n # if not asset then create one\n if not asset:\n asset = Asset(name=name,\n type=type)\n asset.save()\n\n asset.quantity = quantity\n asset.buy = buy\n asset.sell = sell\n\n self.create_or_update_ownership(asset, quantity)\n\n Transaction(wallet=self, asset=asset, asset_price_buy=asset.buy,\n asset_price_sell=asset.sell,\n date=datetime.datetime.now(), quantity=quantity,\n is_purchase=True, visibility=False).save()\n\n self.liquid -= total\n self.liquid = round(self.liquid, 3)\n self.save()\n return {\"error\": False, \"message\": \"Purchase has been successful\"}\n else:\n return {\"error\": True, \"message\": \"Not enough cash\"}", "def add_spend(\n self,\n event_type: AccountingEventType,\n notes: str,\n location: Location,\n timestamp: Timestamp,\n asset: Asset,\n amount: FVal,\n taxable: bool,\n given_price: Optional[Price] = None,\n taxable_amount_ratio: FVal = ONE,\n count_entire_amount_spend: bool = True,\n count_cost_basis_pnl: bool = True,\n extra_data: Optional[dict[str, Any]] = None,\n ) -> tuple[FVal, FVal]:\n if amount == ZERO: # do nothing for zero spends\n return ZERO, ZERO\n\n if asset.is_fiat() and event_type == AccountingEventType.TRADE:\n taxable = False # for buys with fiat do not count it as taxable\n\n handle_prefork_asset_spends(\n cost_basis=self.cost_basis,\n asset=asset,\n amount=amount,\n timestamp=timestamp,\n )\n if given_price is not None:\n price = given_price\n else:\n price = self.get_rate_in_profit_currency(\n asset=asset,\n timestamp=timestamp,\n )\n\n if asset == A_KFEE:\n count_cost_basis_pnl = False\n taxable = False\n\n spend_cost = None\n if count_cost_basis_pnl:\n spend_cost = self.cost_basis.spend_asset(\n location=location,\n timestamp=timestamp,\n asset=asset,\n amount=amount,\n rate=price,\n taxable_spend=taxable,\n )\n taxable_amount = taxable_amount_ratio * amount\n free_amount = amount - taxable_amount\n if spend_cost:\n taxable_amount = spend_cost.taxable_amount\n free_amount = amount - spend_cost.taxable_amount\n\n spend_event = ProcessedAccountingEvent(\n type=event_type,\n notes=notes,\n location=location,\n timestamp=timestamp,\n asset=asset,\n taxable_amount=taxable_amount,\n free_amount=free_amount,\n price=price,\n pnl=PNL(), # filled out later\n cost_basis=spend_cost,\n index=len(self.processed_events),\n )\n if extra_data:\n spend_event.extra_data = extra_data\n # count profit/losses if we are inside the query period\n if timestamp >= self.query_start_ts and taxable:\n self.pnls[event_type] += spend_event.calculate_pnl(\n count_entire_amount_spend=count_entire_amount_spend,\n count_cost_basis_pnl=count_cost_basis_pnl,\n )\n\n self._add_processed_event(spend_event)\n return free_amount, taxable_amount", "def perform_trading(self, event : event.EventMarket)-> event.EventFilled:\n pass", "def transact_asset(self, txn):\n if txn.dt < self.current_dt:\n raise ValueError(\n 'Transaction datetime (%s) is earlier than '\n 'current portfolio datetime (%s). Cannot '\n 'transact assets.' % (txn.dt, self.current_dt)\n )\n self.current_dt = txn.dt\n\n txn_share_cost = txn.price * txn.quantity\n txn_total_cost = txn_share_cost + txn.commission\n\n if txn_total_cost > self.cash:\n if settings.PRINT_EVENTS:\n print(\n 'WARNING: Not enough cash in the portfolio to '\n 'carry out transaction. Transaction cost of %s '\n 'exceeds remaining cash of %s. Transaction '\n 'will proceed with a negative cash balance.' % (\n txn_total_cost, self.cash\n )\n )\n\n self.pos_handler.transact_position(txn)\n\n self.cash -= txn_total_cost\n\n # Form Portfolio history details\n direction = \"LONG\" if txn.direction > 0 else \"SHORT\"\n description = \"%s %s %s %0.2f %s\" % (\n direction, txn.quantity, txn.asset.upper(),\n txn.price, datetime.datetime.strftime(txn.dt, \"%d/%m/%Y\")\n )\n if direction == \"LONG\":\n pe = PortfolioEvent(\n dt=txn.dt, type='asset_transaction',\n description=description,\n debit=round(txn_total_cost, 2), credit=0.0,\n balance=round(self.cash, 2)\n )\n self.logger.info(\n '(%s) Asset \"%s\" transacted LONG in portfolio \"%s\" '\n '- Debit: %0.2f, Balance: %0.2f' % (\n txn.dt.strftime(settings.LOGGING[\"DATE_FORMAT\"]),\n txn.asset, self.portfolio_id,\n round(txn_total_cost, 2), round(self.cash, 2)\n )\n )\n else:\n pe = PortfolioEvent(\n dt=txn.dt, type='asset_transaction',\n description=description,\n debit=0.0, credit=-1.0 * round(txn_total_cost, 2),\n balance=round(self.cash, 2)\n )\n self.logger.info(\n '(%s) Asset \"%s\" transacted SHORT in portfolio \"%s\" '\n '- Credit: %0.2f, Balance: %0.2f' % (\n txn.dt.strftime(settings.LOGGING[\"DATE_FORMAT\"]),\n txn.asset, self.portfolio_id,\n -1.0 * round(txn_total_cost, 2), round(self.cash, 2)\n )\n )\n self.history.append(pe)", "async def onBought( # type: ignore[override]\n self, event: Event, strategy: Optional[EventHandler]\n ) -> None:\n pass", "def aveVolumeCalc(ins, date):\n cal = ins.Currency().Calendar()\n enddate = cal.AdjustBankingDays(date, 0)\n startdate = cal.AdjustBankingDays(date, AVERAGING_PERIOD)\n\n prices=[]\n histprices = acm.FPrice.Select(\"instrument = %s and market = '%s' \\\n and day > '%s' and day <='%s'\" % \n (ins.Oid(), DAILY_MARKET, startdate, enddate))\n \n for price in histprices:\n settle = price.Settle()\n if settle >= 0:\n prices.append(settle)\n \n #upgrade 2013 fix for failure during run - acm.Math().AverageOf seems buggy\n try:\n avgprice = (sum(prices)/len(prices))\n except ZeroDivisionError:\n avgprice = 0\n \n #avgprice = acm.Math().AverageOf(prices, None)\n \n #Overwrite today's price if you find it \n newPrice = acm.FPrice.Select01(\"instrument = %s and market = '%s' and day = %s\" % \n (ins.Oid(), THREE_MONTH_MARKET, enddate),\n 'NaN')\n if not newPrice:\n newPrice = acm.FPrice()\n newPrice.Instrument(ins)\n newPrice.Day(enddate)\n newPrice.Market(THREE_MONTH_MARKET)\n newPrice.Currency(ins.Currency())\n\n newPrice.Settle(avgprice)\n try:\n newPrice.Commit()\n print 'INFO: %s price for %s was created on %s' %(THREE_MONTH_MARKET, ins.Name(), date)\n except Exception, err:\n print 'ERROR: %s price for %s did not commit: %s' %(THREE_MONTH_MARKET, ins.Name(), str(err))\n \n return newPrice", "def api_asset_add(char_code: str, name: str, capital: str, interest: str):\n capital, interest = float(capital), float(interest)\n asset = Asset(char_code=char_code, name=name, capital=capital, interest=interest)\n\n if app.bank.contains(asset):\n return f\"Asset '{name}' already exists\", 403\n\n app.bank.add(asset)\n return f\"Asset '{name}' was successfully added\", 200", "def cp_asset_sale(self, amt: float) -> str:\n raise NotImplementedError", "def m_ts_OrderAdded(self, sender, e):\r\n print(\"Order was added with price of {0}.\".format(e.Order.LimitPrice))", "def __init__(self, asset, orderType, positionSize, entryPrice, entryBarNum, entryTime=None, exitPrice=None, exitTime = None, exitBarNum=None,enableCommission=True):\n self.asset = asset\n self.orderType = str(orderType).lower()\n self.positionSize = positionSize\n self.entryPrice = entryPrice\n self.entryTime = entryTime\n self.entryBarNum = entryBarNum\n self.exitPrice = exitPrice\n self.exitTime = exitTime\n self.exitBarNum = exitBarNum\n self.enableCommission = enableCommission", "def add_assets(char_code, name, capital, interest):\n try:\n capital = float(capital)\n interest = float(interest)\n except:\n redirect(url_for(\"page_not_found\"))\n if name in app.bank:\n abort(403)\n app.bank[name] = Asset(name, char_code, capital, interest)\n return f\"Asset '{name}' was successfully added\", 200", "def m_req_Update(self, sender, e):\r\n if e.Instrument != None and e.Error == None:\r\n # Instrument was found\r\n print(\"Found: {0}\".format(e.Instrument.Name))\r\n # Subscribe for Inside Market Data\r\n self.m_ps = ttapi.PriceSubscription(e.Instrument, ttapi.Dispatcher.Current)\r\n self.m_ps.Settings = ttapi.PriceSubscriptionSettings(ttapi.PriceSubscriptionType.InsideMarket)\r\n self.m_ps.FieldsUpdated += self.m_ps_FieldsUpdated\r\n self.m_ps.Start()\r\n elif e.IsFinal:\r\n # Instrument was not found and TT API has given up looking for it\r\n print(\"Cannot find instrument: {0}\".format(e.Error.Message))\r\n self.Dispose()", "def slot_trade(self, gox, (date, price, volume, typ, own)):\r\n pass", "def assets_publish(ctx, metadata, brizo, price, service_endpoint, timeout):\n from .api.assets import create\n response = create(metadata,\n secret_store=not brizo,\n price=price,\n service_endpoint=service_endpoint,\n timeout=timeout,\n ocean=ctx.obj['ocean'])\n echo(response)", "def test_set_asset_license(self):\n\n story = create_story(title=\"Test Story\", summary=\"Test Summary\",\n byline=\"Test Byline\", status='published')\n asset = create_html_asset(type='text', title='Test Asset', \n body='Test content')\n story.assets.add(asset)\n story.save()\n self.assertNotEqual(story.license, 'CC BY-NC-SA')\n self.assertEqual(asset.license, '')\n story.license = 'CC BY-NC-SA'\n set_asset_license(sender=Story, instance=story)\n asset = Asset.objects.get(pk=asset.pk)\n self.assertEqual(asset.license, story.license)", "def on_trade(self, trade: TradeData):\n self.put_event()", "def on_trade(self, trade: TradeData):\n self.put_event()", "def on_trade(self, trade: TradeData):\n self.put_event()", "def on_trade(self, trade: TradeData):\n self.put_event()", "def event1920():\n header(1920)\n end_if_event_flag_on(51810980) # Malevolence obtained.\n end_if_event_flag_on(11810619) # Snuggly trade event.\n\n skip_if_event_flag_off(4, EVENT.XanthousCrownDropped)\n wait(3.0)\n flag.disable(50006770)\n flag.disable(EVENT.XanthousCrownDropped)\n item.award_item_to_host_only(ITEMLOT.JeremiahReward)\n\n if_player_owns_armor(0, ARMOR.CrownOfGold)\n if_player_does_not_own_armor(1, ARMOR.CrownOfGold)\n if_event_flag_off(1, 51810980) # Malevolence trade not picked up.\n if_condition_true(0, 1)\n flag.enable(EVENT.XanthousCrownDropped)", "async def onSold( # type: ignore[override]\n self, event: Event, strategy: Optional[EventHandler]\n ) -> None:\n pass", "def on_trade(self, trade: TradeData):\n # print(\"on_trade\")\n # print(trade)\n self.put_event()", "def adosc(portfolio_item,transaction_volume, buy_threshold_difference=2, sell_threshold_difference=2, period='5d',\n fastperiod=3, slowperiod=10):\n from yahooquery import Ticker\n from time import sleep\n from math import floor\n import talib\n from .TradeHistoryItem import log_trade\n from API.Help import pct_change, initialize_alpaca\n\n alpaca = initialize_alpaca()\n ticker = str(portfolio_item)\n yahoo_ticker = Ticker(ticker)\n history = yahoo_ticker.history(period=period, interval=portfolio_item.portfolio.get_trading_frequency())\n ticker_adosc = talib.ADOSC(high=history['high'], low=history['low'], close=history['close'],\n volume=history['volume'], fastperiod=fastperiod, slowperiod=slowperiod)\n ticker_adosc_pct = pct_change(ticker_adosc)\n\n # Buy when in the bottom of a dip in the chalking oscillator graph\n if ticker_adosc_pct[-2] < 0 and \\\n abs(ticker_adosc_pct[-2] - ticker_adosc_pct[-1]) > buy_threshold_difference and \\\n ticker_adosc_pct[-1] > 0 and portfolio_item.transaction_status != portfolio_item.BUY:\n if portfolio_item.transaction_status == 2: # only buy to cover if stock has been shorted before\n print('buying to cover {} shares of {}'.format(transaction_volume, ticker))\n alpaca.submit_order(ticker, transaction_volume, 'buy', 'market', 'day')\n portfolio_item.buy_to_cover(transaction_volume=transaction_volume)\n log_trade(portfolio_item=portfolio_item, transaction_volume=transaction_volume, transaction_type=2)\n sleep(1) # hopefully combats 403 alpaca error\n print('buying {} shares of {}'.format(transaction_volume, ticker))\n alpaca.submit_order(ticker, transaction_volume, 'buy', 'market', 'day')\n portfolio_item.buy(transaction_volume=transaction_volume)\n log_trade(portfolio_item=portfolio_item, transaction_volume=transaction_volume, transaction_type=0)\n\n # Sell at a tip in chaikin oscillator\n elif ticker_adosc_pct[-2] > 0 and \\\n abs(ticker_adosc_pct[-2] - ticker_adosc_pct[-1]) > sell_threshold_difference and \\\n ticker_adosc_pct[-1] < 0:\n if portfolio_item.transaction_status == portfolio_item.BUY: # making sure stock exists before selling it\n print('selling {} shares of {}'.format(transaction_volume, ticker))\n alpaca.submit_order(ticker, transaction_volume, 'sell', 'market', 'day')\n portfolio_item.sell(transaction_volume=transaction_volume)\n log_trade(portfolio_item=portfolio_item, transaction_volume=transaction_volume, transaction_type=1)\n sleep(1)\n if portfolio_item.transaction_status != portfolio_item.SHORT: # make sure we dont short twice in a row\n transaction_volume = floor(portfolio_item.cash_allocated / (\n portfolio_item.ticker.price_now * 1.1)) # gives us a 10% buffer if the stock goes the other way\n print('shorting {} shares of {}'.format(transaction_volume, ticker))\n alpaca.submit_order(ticker, transaction_volume, 'sell', 'market', 'day')\n portfolio_item.short(transaction_volume=transaction_volume)\n log_trade(portfolio_item=portfolio_item, transaction_volume=transaction_volume, transaction_type=3)\n # Add other indicators to aid this oscillator, correlation between this and aroon, fall at the same time there is\n # actually a dip\n # MFI, combined with chaikin shows good opportunity to buy", "def can_i_afford_intent_handler(handler_input):\n # type: (HandlerInput) -> Response\n\n slots = handler_input.request_envelope.request.intent.slots\n print(f\"Slots: {slots}\")\n purchase = slots['purchase'].value.lower()\n print(f\"purchase: {purchase}\")\n\n monzo = MonzoGetter(ACCESS_TOKEN)\n monthly_spend = monzo.get_monthly_spend_pounds()\n\n try:\n price = price_lookup_pounds[purchase]\n if price > (MONTHLY_BUDGET - monthly_spend):\n speech_text = f\"Sorry, you can't afford this. A {purchase} \" \\\n f\"costs about {price} pounds. You've already spent \" \\\n f\"{monthly_spend} pounds this month.\"\n else:\n remaining = MONTHLY_BUDGET - monthly_spend - price\n speech_text = f\"You can afford that. A {purchase} costs about \" \\\n f\"{price} pounds. If you buy it your remaining \" \\\n f\"monthly budget will be {remaining}\"\n except KeyError:\n # Just in case....\n speech_text = \"Sorry, we couldn't find a price for that product.\" \\\n f\"You have {MONTHLY_BUDGET - monthly_spend} pounds\" \\\n \" left to spend this month\"\n\n handler_input.response_builder.speak(speech_text).set_card(\n SimpleCard(\"Hello World\", speech_text)).set_should_end_session(\n False)\n return handler_input.response_builder.response", "def _initalize_portfolio_with_cash(self):\n self.cash = copy.copy(self.starting_cash)\n\n if self.starting_cash > 0.0:\n self.history.append(\n PortfolioEvent.create_subscription(\n self.current_dt, self.starting_cash, self.starting_cash\n )\n )\n\n self.logger.info(\n '(%s) Funds subscribed to portfolio \"%s\" '\n '- Credit: %0.2f, Balance: %0.2f' % (\n self.current_dt.strftime(settings.LOGGING[\"DATE_FORMAT\"]),\n self.portfolio_id,\n round(self.starting_cash, 2),\n round(self.starting_cash, 2)\n )\n )", "async def on_trade_accept(self, trade: \"steam.TradeOffer\") -> None:", "def add_appliance(itemcode, description, marketprice, rentalprice):\n\n itembrand = input(\"Enter item brand: \")\n itemvoltage = input(\"Enter item voltage: \")\n newitem = ElectricAppliances \\\n (itemcode, description, marketprice, rentalprice,\n itembrand, itemvoltage)\n\n FULLINVENTORY[itemcode] = newitem.returnasdictionary()\n print(\"New inventory item added\")", "def before_trading_start(context, data):\r\n context.output = pipeline_output('pipeline')\r\n\r\n # sort by earning yield\r\n context.output = context.output.sort(\r\n columns='Free Cash Flow', ascending=False)\r\n\r\n # get top 20 stocks as security list\r\n context.eligible_assets = context.output.iloc[:19]", "def buy(self, price, volume):\r\n self.order(\"bid\", price, volume)", "def what_if(self, account_name, asset_name, delta):\n account = self.get_account(account_name)\n asset = account.get_asset(asset_name)\n asset.what_if(delta)\n # We take the money out of account.\n account.add_cash(-delta)" ]
[ "0.5709984", "0.5651042", "0.512311", "0.511978", "0.5055525", "0.50209546", "0.49926344", "0.4990619", "0.49385113", "0.4801994", "0.47719896", "0.47582433", "0.4751332", "0.47279665", "0.47041407", "0.4679356", "0.4679356", "0.4679356", "0.4679356", "0.46743053", "0.46616942", "0.46587616", "0.46549225", "0.46359172", "0.46166578", "0.46093658", "0.46020818", "0.45995733", "0.4597147", "0.45874307" ]
0.68399614
0
Add an asset spend event for the pot and count it in PnL if needed If a custom price for the asset should be used it can be passed here via given_price. Price is always in profit currency during accounting. If taxable_ratio is given then this is how we initialize the taxable and free amounts in the case of missing cost basis. By default it's all taxable. If count_entire_amount_spend is True then the entire amount is counted as a spend. Which means an expense (negative pnl). If count_cost_basis_pnl is True then we also count any profit/loss the asset may have had compared to when it was acquired. Returns (free, taxable) amounts.
def add_spend( self, event_type: AccountingEventType, notes: str, location: Location, timestamp: Timestamp, asset: Asset, amount: FVal, taxable: bool, given_price: Optional[Price] = None, taxable_amount_ratio: FVal = ONE, count_entire_amount_spend: bool = True, count_cost_basis_pnl: bool = True, extra_data: Optional[dict[str, Any]] = None, ) -> tuple[FVal, FVal]: if amount == ZERO: # do nothing for zero spends return ZERO, ZERO if asset.is_fiat() and event_type == AccountingEventType.TRADE: taxable = False # for buys with fiat do not count it as taxable handle_prefork_asset_spends( cost_basis=self.cost_basis, asset=asset, amount=amount, timestamp=timestamp, ) if given_price is not None: price = given_price else: price = self.get_rate_in_profit_currency( asset=asset, timestamp=timestamp, ) if asset == A_KFEE: count_cost_basis_pnl = False taxable = False spend_cost = None if count_cost_basis_pnl: spend_cost = self.cost_basis.spend_asset( location=location, timestamp=timestamp, asset=asset, amount=amount, rate=price, taxable_spend=taxable, ) taxable_amount = taxable_amount_ratio * amount free_amount = amount - taxable_amount if spend_cost: taxable_amount = spend_cost.taxable_amount free_amount = amount - spend_cost.taxable_amount spend_event = ProcessedAccountingEvent( type=event_type, notes=notes, location=location, timestamp=timestamp, asset=asset, taxable_amount=taxable_amount, free_amount=free_amount, price=price, pnl=PNL(), # filled out later cost_basis=spend_cost, index=len(self.processed_events), ) if extra_data: spend_event.extra_data = extra_data # count profit/losses if we are inside the query period if timestamp >= self.query_start_ts and taxable: self.pnls[event_type] += spend_event.calculate_pnl( count_entire_amount_spend=count_entire_amount_spend, count_cost_basis_pnl=count_cost_basis_pnl, ) self._add_processed_event(spend_event) return free_amount, taxable_amount
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_acquisition(\n self, # pylint: disable=unused-argument\n event_type: AccountingEventType,\n notes: str,\n location: Location,\n timestamp: Timestamp,\n asset: Asset,\n amount: FVal,\n taxable: bool,\n given_price: Optional[Price] = None,\n extra_data: Optional[dict] = None,\n **kwargs: Any, # to be able to consume args given by add_asset_change_event\n ) -> None:\n if amount == ZERO: # do nothing for zero acquisitions\n return\n\n if given_price is not None:\n price = given_price\n else:\n try:\n price = self.get_rate_in_profit_currency(asset=asset, timestamp=timestamp)\n except (PriceQueryUnsupportedAsset, RemoteError):\n price = ZERO_PRICE\n except NoPriceForGivenTimestamp as e:\n # In the case of NoPriceForGivenTimestamp when we got rate limited we let\n # it propagate so the user can take action after the report is made\n if e.rate_limited is True:\n raise\n price = ZERO_PRICE\n\n prefork_events = handle_prefork_asset_acquisitions(\n cost_basis=self.cost_basis,\n location=location,\n timestamp=timestamp,\n asset=asset,\n amount=amount,\n price=price,\n ignored_asset_ids=self.ignored_asset_ids,\n starting_index=len(self.processed_events),\n )\n for prefork_event in prefork_events:\n self._add_processed_event(prefork_event)\n\n if taxable is True:\n taxable_amount = amount\n free_amount = ZERO\n else:\n taxable_amount = ZERO\n free_amount = amount\n event = ProcessedAccountingEvent(\n type=event_type,\n notes=notes,\n location=location,\n timestamp=timestamp,\n asset=asset,\n taxable_amount=taxable_amount,\n free_amount=free_amount,\n price=price,\n pnl=PNL(), # filled out later\n cost_basis=None,\n index=len(self.processed_events),\n )\n if extra_data:\n event.extra_data = extra_data\n self.cost_basis.obtain_asset(event)\n # count profit/losses if we are inside the query period\n if timestamp >= self.query_start_ts and taxable:\n self.pnls[event_type] += event.calculate_pnl(\n count_entire_amount_spend=False,\n count_cost_basis_pnl=True,\n )\n\n self._add_processed_event(event)", "def envisaged_profit(self):\n profit = round(\n self.calcul_buy_nb_action() * self.take_profit - self.investment_price(),\n 2,\n )\n percent_profit = round(profit * 100 / self.capital, 2)\n return profit, percent_profit", "def buy_cost(self, buy_price, count):\n fee = 20 if math.floor(count*buy_price*1000*self.fee_count*self.handling_fee) <= 20 else math.ceil(count*buy_price*1000*self.fee_count*self.handling_fee)\n return int(buy_price*1000*count+fee)", "def profit_per_item(self, pk=None):\n total_profit = 0\n total_cost = self.item_cost + self.shipping_cost + self.listing_fee + self.final_value_fee\n total_paid = self.shipping_paid + self.item_paid\n total_profit = total_paid - total_cost\n return total_profit", "def buy_asset(self, asset):\n from Game.models import Asset, Transaction\n asset_comms = ACommunication(settings.API_URL)\n asset = asset_comms.get_asset_quote(asset)\n total = (asset.buy * asset.quantity)\n buy = asset.buy\n sell = asset.sell\n quantity = asset.quantity\n name = asset.name\n type = asset.type\n\n if quantity <= 0:\n return {\"error\": True,\n \"message\": \"You need to buy at least one asset\"}\n\n if self.liquid_with_loans >= total:\n asset = Asset.safe_get(name=asset.name)\n # if not asset then create one\n if not asset:\n asset = Asset(name=name,\n type=type)\n asset.save()\n\n asset.quantity = quantity\n asset.buy = buy\n asset.sell = sell\n\n self.create_or_update_ownership(asset, quantity)\n\n Transaction(wallet=self, asset=asset, asset_price_buy=asset.buy,\n asset_price_sell=asset.sell,\n date=datetime.datetime.now(), quantity=quantity,\n is_purchase=True, visibility=False).save()\n\n self.liquid -= total\n self.liquid = round(self.liquid, 3)\n self.save()\n return {\"error\": False, \"message\": \"Purchase has been successful\"}\n else:\n return {\"error\": True, \"message\": \"Not enough cash\"}", "def transact_shares(self, action, quantity, price, commission, bid=None, ask=None):\n if bid is None: \n bid = price\n if ask is None:\n ask = price\n\n if action is None:\n return\n\n self.total_commission += commission\n\n # Adjust total bought and sold\n if action == \"BOT\":\n self.avg_bot = (self.avg_bot * self.buys + price * quantity) / (self.buys + quantity)\n\n if self.net < 0:\n self.realised_pnl += min(quantity, abs(self.net)) * (self.avg_price - price) - commission # Adjust realised PNL\n commission = 0 # assume commission is all in realised_pnl\n # Increasing long position\n self.avg_price = (self.avg_price * self.net + price * quantity + commission) / (self.net + quantity)\n self.buys += quantity\n self.total_bot = self.buys * self.avg_bot\n\n # action == \"SLD\"\n else:\n self.avg_sld = (self.avg_sld * self.sells + price * quantity) / (self.sells + quantity)\n\n if self.net > 0:\n self.realised_pnl += min(quantity, abs(self.net)) * (price - self.avg_price) - commission # Adjust realised PNL\n commission = 0 # assume commission is all in realised_pnl\n\n self.avg_price = (self.avg_price * self.net - price * quantity - commission) / (self.net - quantity)\n self.sells += quantity\n self.total_sld = self.sells * self.avg_sld\n\n # Adjust net values, including commissions\n self.net = self.buys - self.sells\n self.net_total = self.total_sld - self.total_bot\n self.net_incl_comm = self.net_total - self.total_commission\n self.cost_basis = self.net * self.avg_price\n\n self.update_market_value(bid, ask)", "def profit_per_item_percentage(self, pk=None):\n total_profit_percentage = 0\n total_cost = self.item_cost + self.shipping_cost + self.listing_fee + self.final_value_fee\n total_paid = self.shipping_paid + self.item_paid\n total_profit_percentage = round(100*((total_paid - total_cost) / total_cost), 2)\n return total_profit_percentage", "def calculate_profit(self):", "def calculate_buy_price(price: float):\n return round(price / (1 + CONF.trade_advantage_in_percent / 100), 1)", "def update_profitability(self, currency):\n jobmanager = self.jobmanagers[currency]\n last_job = jobmanager.latest_job\n pscore, ratio, _ = self.price_data[currency]\n # We can't update if we don't have a job and profit data\n if last_job is None or pscore is None:\n return False\n\n max_blockheight = jobmanager.config['max_blockheight']\n if max_blockheight is not None and last_job.block_height >= max_blockheight:\n self.profit_data[currency] = 0\n self.logger.debug(\n \"{} height {} is >= the configured maximum blockheight of {}, \"\n \"setting profitability to 0.\"\n .format(currency, last_job.block_height, max_blockheight))\n return True\n\n block_value = last_job.total_value / 100000000.0\n diff = bits_to_difficulty(hexlify(last_job.bits))\n\n self.profit_data[currency] = (block_value * float(pscore) / diff) * ratio * 1000000\n self.logger.debug(\n \"Updating {} profit data;\\n\\tblock_value {};\\n\\tavg_price {:,.8f}\"\n \";\\n\\tdiff {};\\n\\tratio {};\\n\\tresult {}\"\n .format(currency, block_value, float(pscore), diff,\n ratio, self.profit_data[currency]))\n self.manager.log_event(\"{name}.profitability.{curr}:{metric}|g\"\n .format(name=self.manager.config['procname'],\n curr=currency,\n metric=self.profit_data[currency]))\n return True", "def get_sell_amount_from_buy_amount(\n self, prices, fee, arith_traits\n ):\n buy_token_price = prices[self.buy_token]\n sell_token_price = prices[self.sell_token]\n\n if buy_token_price and sell_token_price:\n xrate = F(buy_token_price, sell_token_price)\n return arith_traits.compute_sell_from_buy_amount(\n buy_amount=self.buy_amount,\n xrate=xrate,\n buy_token_price=buy_token_price,\n fee=fee\n )\n else:\n assert self.buy_amount == 0\n return 0", "def amount_to_pay_in_period(self):\n assert self.type == \"N\", _(\"Subscription must be normal to use this method\")\n period_start, period_end = self.get_current_period()\n price_per_day = (\n self.get_price_for_full_period() / (period_end - period_start).days\n )\n days_not_used = 30 * self.frequency - (date.today() - period_start).days\n return int(price_per_day * days_not_used)", "def pay_costs(self):\r\n cost = self.cost\r\n if cost:\r\n self.owner.player.char_ob.pay_money(cost)\r\n self.owner.player.msg(\"You pay %s coins for the event.\" % cost)", "def sell_cost(self, sell_price, count):\n\n g_cost = math.floor(self.g_fee * sell_price * 1000 * count)\n handling_cost = math.ceil(self.handling_fee * self.fee_count * sell_price * 1000 * count)\n new_fee = g_cost + handling_cost\n print(sell_price, self.g_fee, self.handling_fee, self.fee_count, new_fee)\n return int(sell_price*1000*count-new_fee)", "def shareholder_equity_to_total_assets(self):\n balance_sheet = self.stock.balance_sheet_dict\n\n # Check for Null values first\n # TODO: make the note more specific\n if 'Total Assets' not in balance_sheet or 'Total Liabilities' not in balance_sheet:\n self.stock.append_calc_result('At least 50% equity to assets ratio?', 'N/A', 'N/A', 'Not enough data found')\n return\n\n value = (balance_sheet['Total Assets'] - balance_sheet['Total Liabilities']) / balance_sheet['Total Assets']\n criteria_passed = ''\n if value >= 0.5:\n criteria_passed = 'Yes'\n elif value < 0.5:\n criteria_passed = 'No'\n\n self.stock.append_calc_result('At least 50% equity to assets ratio?', round(value, 2), criteria_passed, '')", "def amount_already_paid_in_period(self):\n assert self.type == \"N\", _(\"Subscription must be normal to use this method\")\n period_start, period_end = self.get_current_period()\n price_per_day = (\n self.get_price_for_full_period() / (period_end - period_start).days\n )\n days_already_used = (date.today() - period_start).days\n amount = int(price_per_day * days_already_used)\n if amount > self.get_price_for_full_period():\n amount = self.get_price_for_full_period()\n if amount < 0:\n amount = 0\n return amount", "def aggregate_offer_qty(self, trade_price):\n qty = 0\n for i in range(len(self.offer)):\n # print(\"trade_price = {} offer[{}] = {}\".format(trade_price, i, self.offer[i].price))\n if self.offer[i].price <= trade_price:\n qty += self.offer[i].qty\n # print(\"Running qty = {}\".format(qty))\n return qty", "def cp_asset_sale(self, amt: float) -> str:\n raise NotImplementedError", "def calc_annual_heating_savings (self):\n price = (self.diesel_prices + self.cd['heating fuel premium'])\n\n #~ self.base_heating_cost =\n\n #~ self.proposed_heating_cost =\n\n\n\n\n self.annual_heating_savings = self.reduction_diesel_used * price\n #~ print 'self.annual_heating_savings',self.annual_heating_savings", "def calc_profit(self, assignment):\n return sum([self.profit(agent, task)\n for agent, tasks in assignment.items() \n for task in tasks])", "def volatility_efficiency(asset, strategy):\n perf = performances(asset, strategy)\n strat_perf = np.cumsum(perf)\n\n buy_hold_perf = np.cumsum(asset['forward_returns'])\n\n strat_v_bh = strat_perf[-1] / buy_hold_perf[-1]\n\n return strat_v_bh / time_in_market(strategy)", "def _compute_amount(self, base_amount, price_unit, quantity=1.0, product=None, partner=None):\n\n self.ensure_one()\n\n if self.amount_type != 'margin':\n return super(AccountTax, self)._compute_amount(\n base_amount,\n price_unit,\n quantity=quantity,\n product=product,\n partner=partner\n )\n\n return base_amount - (base_amount / (1 + self.amount / 100))", "def trade_offer(self, price):\n if self.is_sold:\n raise AlreadySoldError(\"Electricity product already sold\")\n\n if self.remaining_slots <= 0:\n raise LeadtimePassedError(\"Lead time passed\")\n\n succesful_trade = random.random() < self.selling_chance(price)\n self.remaining_slots -= 1\n\n if succesful_trade:\n profit = price - self.product_price\n self.is_sold = True\n elif self.remaining_slots == 0:\n profit = -self.product_price\n else:\n profit = 0\n\n return (profit, self.is_sold)", "def get_rate_in_profit_currency(self, asset: Asset, timestamp: Timestamp) -> Price:\n if asset == self.profit_currency:\n rate = Price(ONE)\n else:\n rate = PriceHistorian().query_historical_price(\n from_asset=asset,\n to_asset=self.profit_currency,\n timestamp=timestamp,\n )\n return rate", "def simulate_trading(prices, actions, cost_per_trade=0.02):\n pnl = 0\n position = 0\n market_price = prices.market_price.values\n buy_price = np.maximum(prices.bid_price, prices.ask_price).values\n sell_price = np.minimum(prices.bid_price, prices.ask_price).values\n\n for i in range(len(actions)):\n if i > 0:\n pnl += position * (market_price[i] - market_price[i - 1])\n\n if actions[i] == 1:\n pnl -= cost_per_trade\n pnl -= buy_price[i]\n pnl += market_price[i]\n position += 1\n elif actions[i] == -1:\n pnl -= cost_per_trade\n pnl += sell_price[i]\n pnl -= market_price[i]\n position -= 1\n\n return pnl / len(actions)", "def total(self, desired_period: int = 12):\n self._trigger_gather()\n result = Decimal(0)\n for item in self.elements:\n result += item.income.amount(desired_period)\n return(Decimal(result))", "def total(self, desired_period: int = 12):\n self._trigger_gather()\n result = Decimal(0)\n for item in self.elements:\n result += item.income.amount(desired_period)\n return(Decimal(result))", "def buy(self,\n currency_pair,\n rate,\n amount):\n pass", "def total_spent(self):\n total_sum = Order.objects.filter(\n email=self.email).aggregate(\n Sum('total_price')\n ).get('total_price__sum')\n return round(total_sum, 4) if total_sum else 0", "def _update_profit(self, cost: float):\n\n if cost > 0:\n self._total_debit += cost\n else:\n self._total_credit += -(cost)" ]
[ "0.5035324", "0.49876693", "0.49540687", "0.493301", "0.49177417", "0.49021614", "0.4851298", "0.48422468", "0.4785735", "0.47805077", "0.4779018", "0.47449985", "0.4704121", "0.46818066", "0.46495992", "0.46223575", "0.46067038", "0.46018207", "0.4594187", "0.4564389", "0.45502788", "0.4536954", "0.45172995", "0.44971567", "0.44947317", "0.44647115", "0.44647115", "0.44468454", "0.44388247", "0.44366133" ]
0.74859834
0
Calculates the prices for assets going in and out of a swap/trade.
def get_prices_for_swap( self, timestamp: Timestamp, amount_in: FVal, asset_in: Asset, amount_out: FVal, asset_out: Asset, fee_info: Optional[tuple[FVal, Asset]], ) -> Optional[tuple[Price, Price]]: if ZERO in (amount_in, amount_out): log.error( f'At get_prices_for_swap got a zero amount. {asset_in=} {amount_in=} ' f'{asset_out=} {amount_out=}. Skipping ...') return None try: out_price = self.get_rate_in_profit_currency( asset=asset_out, timestamp=timestamp, ) except (PriceQueryUnsupportedAsset, NoPriceForGivenTimestamp, RemoteError): out_price = None try: in_price = self.get_rate_in_profit_currency( asset=asset_in, timestamp=timestamp, ) except (PriceQueryUnsupportedAsset, RemoteError): in_price = None except NoPriceForGivenTimestamp as e: in_price = None if e.rate_limited is True and out_price is None: raise # in_price = out_price = None -> notify user if fee_info is not None: try: fee_price = self.get_rate_in_profit_currency( asset=fee_info[1], timestamp=timestamp, ) except (PriceQueryUnsupportedAsset, RemoteError): fee_price = None # Determine whether to use `out_price` or `in_price` for calculations price_to_use: Literal['in', 'out'] if asset_out.is_fiat() and asset_out is not None: price_to_use = 'out' # Use `out_price` if `asset_out` is fiat elif asset_in.is_fiat() and asset_in is not None: price_to_use = 'in' # Use `in_price` if `asset_in` is fiat elif out_price is not None: price_to_use = 'out' # Prefer `out_price` over `in_price` elif in_price is not None: price_to_use = 'in' else: # Can't proceed if there is no price known return None if price_to_use == 'in': total_paid = amount_in * in_price # type: ignore[operator] # in_price is not None else: total_paid = amount_out * out_price # type: ignore[operator] # out_price is not None if asset_in.is_fiat(): if fee_info is not None and fee_price is not None: total_paid -= fee_price * fee_info[0] # Subtract fee from cost basis calculated_out_price = Price(total_paid / amount_out) if price_to_use == 'in': calculated_in_price = in_price else: calculated_in_price = Price((amount_out * out_price) / amount_in) # type: ignore[operator] # out_price is not None # noqa: E501 else: # if asset_out is fiat or both assets are crypto or both are fiat if fee_info is not None and fee_price is not None: total_paid += fee_price * fee_info[0] # Add fee to cost basis calculated_in_price = Price(total_paid / amount_in) if price_to_use == 'out': calculated_out_price = out_price # type: ignore[assignment] # out_price is not None # noqa: E501 else: calculated_out_price = Price((amount_in * in_price) / amount_out) # type: ignore[operator] # in_price is not None # noqa: E501 return (calculated_out_price, calculated_in_price) # type: ignore[return-value] # calculated_in_price is not None # noqa: E501
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_market_prices(prices):\n denom = prices.bid_volume + prices.ask_volume\n numer = (prices.bid_price * prices.ask_volume +\n prices.ask_price * prices.bid_volume)\n mask = denom == 0\n denom[mask] = 2\n numer[mask] = prices.bid_price[mask] + prices.ask_price[mask]\n prices = prices.copy()\n prices['market_price'] = numer / denom\n return prices", "def _calc_return(self, order_original, perf_df):\r\n\r\n order = order_original.copy()\r\n no_sec = len(self.perf_data)\r\n price_names = np.array(['price_' + str(i) for i in xrange(1, no_sec + 1)])\r\n ret = np.zeros((np.shape(order)[0], no_sec))\r\n\r\n transaction_cost = 0\r\n\r\n # buy_list vs sell_list contains order bought vs sold that cannot be matched yet to determine the return\r\n # For example when something has been bought, but nothing or not enough has been sold yet, the residue will be\r\n # listed in these lists.\r\n buy_shares = np.zeros((np.shape(order)[0], no_sec))\r\n buy_price = np.zeros((np.shape(order)[0], no_sec))\r\n sell_shares = np.zeros((np.shape(order)[0], no_sec))\r\n sell_price = np.zeros((np.shape(order)[0], no_sec))\r\n\r\n # bl_first vs sl_first indicates which row in buy_list vs sell_list can be used to \"match\" bought/sold shares.\r\n # It automatically points to the oldest row with still outstanding shares. Initial value is -1\r\n # bl_last vs sl_last indicates which row in buy_list vs sell_list can be used to write outstanding shares to.\r\n bl_first = np.ones(no_sec).astype(int) * -1\r\n bl_last = np.zeros(no_sec).astype(int)\r\n sl_first = np.ones(no_sec).astype(int) * -1\r\n sl_last = np.zeros(no_sec).astype(int)\r\n\r\n for ind in range(0, np.shape(order)[0]):\r\n bl_first[(bl_first == -1) & (bl_last > 0)] = 0\r\n sl_first[(sl_first == -1) & (sl_last > 0)] = 0\r\n\r\n # Three situations, per type: buy, sell, nothing\r\n # If nothing, skip to next day\r\n # Only returns made on one day are determined, later they will be accumulated.\r\n\r\n # Situation A.A: Sell order & outstanding buys larger than sell order\r\n col_to_change = (order[ind, :] < 0) & (np.sum(buy_shares, 0) > -order[ind, :])\r\n if sum(col_to_change) != 0:\r\n share_cumsum = np.cumsum(buy_shares, 0)\r\n share_compl = (share_cumsum < -order[ind, :]) & col_to_change\r\n numb_shares = sum(buy_shares * share_compl, 0)[col_to_change]\r\n ret[ind, col_to_change] += numb_shares * perf_df.loc[ind, price_names[col_to_change]] \\\r\n - sum(buy_shares * buy_price * share_compl, 0)[col_to_change]\r\n buy_shares[share_compl] = 0\r\n bl_first += sum(share_compl)\r\n order[col_to_change] += numb_shares\r\n\r\n ret[ind, col_to_change] += perf_df.loc[ind, price_names[col_to_change]] * -order[ind, col_to_change] * (1 - transaction_cost) \\\r\n - buy_price[bl_first[col_to_change], col_to_change] \\\r\n * -order[ind, col_to_change] * (1 + transaction_cost)\r\n buy_shares[bl_first[col_to_change], col_to_change] += order[ind, col_to_change]\r\n order[ind, col_to_change] = 0\r\n\r\n # Situation A.B: Sell order & outstanding buys smaller than or equal to sell order\r\n # --> just fill out all outstanding buys, and change order. This order will be added to sell list in A.C\r\n col_to_change = (order[ind, :] < 0) & (np.sum(buy_shares, 0) > 0) \\\r\n & (np.sum(buy_shares, 0) <= -order[ind, :])\r\n if sum(col_to_change) != 0:\r\n numb_shares = buy_shares[:, col_to_change]\r\n price_shares = buy_price[:, col_to_change]\r\n ret[ind, col_to_change] += np.sum(numb_shares, 0) * \\\r\n perf_df.loc[ind, price_names[col_to_change]].values * (1 - transaction_cost) \\\r\n - np.sum(numb_shares * price_shares, 0) * (1 + transaction_cost)\r\n order[ind, col_to_change] += np.sum(numb_shares, 0)\r\n buy_shares[:, col_to_change] = 0\r\n bl_first[col_to_change] = bl_last[col_to_change] - 1\r\n\r\n # Situation A.C: Sell order & no outstanding buys\r\n col_to_change = (order[ind, :] < 0) & (np.sum(buy_shares, 0) == 0)\r\n if sum(col_to_change) != 0:\r\n row_to_change = bl_last[col_to_change]\r\n sell_shares[row_to_change, col_to_change] = -order[ind, col_to_change]\r\n sell_price[row_to_change, col_to_change] = perf_df.loc[ind, price_names[col_to_change]]\r\n sl_last[col_to_change] += 1\r\n\r\n # Situation B.A: Buy order & outstanding sells larger than buy order\r\n col_to_change = (order[ind, :] > 0) & (np.sum(sell_shares, 0) > order[ind, :])\r\n if sum(col_to_change) != 0:\r\n share_cumsum = np.cumsum(sell_shares, 0)\r\n share_compl = (share_cumsum < order[ind, :]) & col_to_change\r\n numb_shares = sum(sell_shares * share_compl, 0)[col_to_change]\r\n ret[ind, col_to_change] += sum(sell_shares * sell_price * share_compl, 0)[col_to_change] * (1 - transaction_cost)\\\r\n - numb_shares * perf_df.loc[ind, price_names[col_to_change]] * (1 + transaction_cost)\r\n sell_shares[share_compl] = 0\r\n sl_first += sum(share_compl)\r\n order[col_to_change] += -numb_shares\r\n\r\n ret[ind, col_to_change] += sell_price[sl_first[col_to_change], col_to_change] * order[ind, col_to_change] * (1 - transaction_cost)\\\r\n - perf_df.loc[ind, price_names[col_to_change]] * order[ind, col_to_change] * (1 + transaction_cost)\r\n sell_shares[sl_first[col_to_change], col_to_change] += -order[ind, col_to_change]\r\n order[ind, col_to_change] = 0\r\n\r\n # Situation B.B: Buy order & outstanding sells smaller than buy order\r\n # --> just fill out all outstanding sells, and change order. This order will be added to buy list in B.C\r\n col_to_change = (order[ind, :] > 0) & \\\r\n (np.sum(sell_shares, 0) > 0) & (np.sum(sell_shares, 0) <= order[ind, :])\r\n if sum(col_to_change) != 0:\r\n numb_shares = sell_shares[:, col_to_change]\r\n price_shares = sell_price[:, col_to_change]\r\n ret[ind, col_to_change] += np.sum(numb_shares * price_shares, 0) * (1 - transaction_cost) \\\r\n - np.sum(numb_shares, 0) * perf_df.loc[ind, price_names[col_to_change]] * (1 + transaction_cost)\r\n order[ind, col_to_change] += -np.sum(numb_shares, 0)\r\n sell_shares[:, col_to_change] = 0\r\n sl_first[col_to_change] = sl_last[col_to_change] - 1\r\n\r\n # Situation B.C: Buy order & no outstanding sells\r\n col_to_change = (order[ind, :] > 0) & (np.sum(sell_shares, 0) == 0)\r\n if sum(col_to_change) != 0:\r\n row_to_change = bl_last[col_to_change]\r\n buy_shares[row_to_change, col_to_change] = order[ind, col_to_change]\r\n buy_price[row_to_change, col_to_change] = perf_df.loc[ind, price_names[col_to_change]]\r\n bl_last[col_to_change] += 1\r\n\r\n ret_abs = np.array([sum(ret[:r]) for r in range(1, len(ret) + 1)])\r\n returns_abs = np.sum(ret_abs, 1)\r\n returns_rel = [i / self.context['max_notional'] + 1 for i in returns_abs]\r\n\r\n return returns_rel, returns_abs, ret_abs", "def ma_vs_buyandhold(n1, n2, prices, starting_money):\n\n # Moving averages\n ma1 = moving_average(prices, n1)\n ma2 = moving_average(prices, n2)\n\n # Crossovers\n cos = cross_overs(ma1, ma2)\n\n # Make trades using crossover strategy, get list of values\n ma_values = make_trades(starting_money, prices, cos)\n\n # Get buy and hold strategy values\n first_value = prices[n2 - 1] # start trading at the same point in time as ma strategy\n \n # Buy and hold value: start trading from period n2\n # List comprehension for convenient looping\n bh_values = [starting_money for p in prices[:n2]] # Before trading starts\n bh_values += [starting_money * p / first_value for p in prices[n2:]] # After trading starts \n\n print(\"Buy and hold: \" + str(bh_values[-1]))\n print(\"Crossover MA: \" + str(ma_values[-1]))\n\n return [bh_values, ma_values]", "def __calculate_portfolio_returns(self):\n\n p_bar = tqdm(range(1), desc=\" Calculating returns\", leave=False)\n\n trade_data = self.historical_trade_data\n\n # Helper functions to calculate cash inflow and outflow\n def f_min(x):\n return x.apply(lambda x: min(x, 0))\n\n def f_max(x):\n return x.apply(lambda x: max(x, 0))\n\n # Calculate cash inflow and outflow\n trade_data[\n pd.MultiIndex.from_product(\n [[\"Period cash inflow\"], self.tickers_list + [\"Total\"]]\n )\n ] = -1 * trade_data[\"Investment delta\"][:].apply(lambda x: f_min(x), axis=0)\n\n trade_data[\n pd.MultiIndex.from_product(\n [[\"Period cash outflow\"], self.tickers_list + [\"Total\"]]\n )\n ] = trade_data[\"Investment delta\"][:].apply(lambda x: f_max(x), axis=1)\n\n # Calculate period return\n\n trade_data[\n pd.MultiIndex.from_product(\n [[\"Period absolute return\"], self.tickers_list + [\"Total\"]]\n )\n ] = (trade_data[\"End Value\"] + trade_data[\"Period cash inflow\"]) - (\n trade_data[\"End Value\"].shift(1).fillna(0)\n + trade_data[\"Period cash outflow\"]\n )\n\n trade_data[\n pd.MultiIndex.from_product(\n [[\"Period percentage return\"], self.tickers_list + [\"Total\"]]\n )\n ] = (trade_data[\"End Value\"] + trade_data[\"Period cash inflow\"]) / (\n trade_data[\"End Value\"].shift(1).fillna(0)\n + trade_data[\"Period cash outflow\"]\n ) - 1\n\n trade_data[\"Period percentage return\"].fillna(0, inplace=True)\n\n self.historical_trade_data = trade_data\n\n self.portfolio_returns = self.historical_trade_data[\"Period percentage return\"][\n \"Total\"\n ]\n\n p_bar.n += 1\n p_bar.refresh()", "def entrycalc(self, lows, o):\n price = float(self.price)\n \n #print(nextTrade==price,nextTradeSeller==price)\n for i in range(2, self.entries + 1):\n if len(self.entryprices) > 0:\n avgentryprice = sum(self.entryprices) / len(self.entryprices)\n #if previous entry has been placed and current hasn't and other args are met\n if self.dentry[\"placedOrder\" + str(i - 1) + self.chartnumber] and price < avgentryprice and float(price) < lows[-2] and float(price) < float(o) and not self.dentry[\"placedOrder\" + str(i) + self.chartnumber]:\n self.dentry[\"placedOrder\" + str(i) + self.chartnumber] = True\n #add these to dict\n print(\"trade number\",str(i))\n self.dentry[\"tradeEntries\" + str(i) + self.chartnumber] += 1\n #self.totalentries += 1\n \n #I changed these from price to nextTrade\n self.dentry[\"orderPrice\" + str(i) + self.chartnumber] = price\n #self.dentry[\"orderPrice\" + str(i) + chartnumber] = self.nextTrade\n \n #altbuy = int(self.dentry[\"buy\" + str(i) + chartnumber] / price)\n altbuy = int(self.dentry[\"buy\" + str(i) + self.chartnumber] / self.nextTrade)\n \n #self.availablebase -= altbuy * price\n self.availablebase -= altbuy * self.nextTrade\n altbuy -= altbuy * .001\n self.amtofalt += altbuy\n ###HOW LONG TO WE WANT ENTRYPRICES TO BE??\n \n #self.entryprices.append(price)\n self.entryprices.append(self.nextTrade)\n if self.graphics:\n self.graph.buy(self.masterDick[\"currentPrice\" + self.chartnumber], self.masterDick[\"count\" + self.chartnumber], self.chartnumber, i)\n #print(\"Fun:\",self.amtofalt)\n print(\"Buy\" + str(i),self.dentry[\"buy\" + str(i) + self.chartnumber])\n break", "def calculate_prices(self, good=None):\n\n stock = self.calculate_init_stock(good)\n buy = self.buying_price()\n\n if stock == 0:\n sell = 0\n buy = buy + (buy * 0.5)\n\n elif stock < 500:\n # mild bug: stock, without selling price\n sell = self.selling_price()\n elif stock >= 500:\n # higher production, lower prices\n sell = self.selling_price() / 2\n buy = buy - (buy * 0.5)\n\n return [buy, sell, stock]", "def aveVolumeCalc(ins, date):\n cal = ins.Currency().Calendar()\n enddate = cal.AdjustBankingDays(date, 0)\n startdate = cal.AdjustBankingDays(date, AVERAGING_PERIOD)\n\n prices=[]\n histprices = acm.FPrice.Select(\"instrument = %s and market = '%s' \\\n and day > '%s' and day <='%s'\" % \n (ins.Oid(), DAILY_MARKET, startdate, enddate))\n \n for price in histprices:\n settle = price.Settle()\n if settle >= 0:\n prices.append(settle)\n \n #upgrade 2013 fix for failure during run - acm.Math().AverageOf seems buggy\n try:\n avgprice = (sum(prices)/len(prices))\n except ZeroDivisionError:\n avgprice = 0\n \n #avgprice = acm.Math().AverageOf(prices, None)\n \n #Overwrite today's price if you find it \n newPrice = acm.FPrice.Select01(\"instrument = %s and market = '%s' and day = %s\" % \n (ins.Oid(), THREE_MONTH_MARKET, enddate),\n 'NaN')\n if not newPrice:\n newPrice = acm.FPrice()\n newPrice.Instrument(ins)\n newPrice.Day(enddate)\n newPrice.Market(THREE_MONTH_MARKET)\n newPrice.Currency(ins.Currency())\n\n newPrice.Settle(avgprice)\n try:\n newPrice.Commit()\n print 'INFO: %s price for %s was created on %s' %(THREE_MONTH_MARKET, ins.Name(), date)\n except Exception, err:\n print 'ERROR: %s price for %s did not commit: %s' %(THREE_MONTH_MARKET, ins.Name(), str(err))\n \n return newPrice", "def _compute_bundle_price(self, bundle):\n details = ''\n # 1. Build a mapping from resource-specific info to price\n res_to_price_map = self._res_man.get_res_to_price_mapping()\n if self._use_price_token and bundle.has_price_token():\n # Use token if enabled & provided. If token has expired, we don't\n # change the res_to_price_map, which causes the most recent prices\n # to be used instead\n token = bundle.get_price_token()\n try:\n with self._history_lock:\n price_mapping = self._price_history[token]\n res_to_price_map = price_mapping\n logger.debug(f'Using prices saved with token {token}')\n except KeyError:\n details = f'token {token} has expired!'\n logger.debug(details)\n # 2. Zones\n zone_price = 0\n zones = bundle.copy_zones()\n for zone in zones:\n zone_id = zone.zone_id\n for resource in zone.resources:\n res_type = resource.get_res_type()\n qty = resource.get_value()\n price = res_to_price_map[zone_id][res_type]\n zone_price += qty * price\n # 3. Links\n link_price = 0\n links = bundle.copy_links()\n for link in links:\n src_zone_id, dst_zone_id = link.get_src_and_dst()\n link_price += qty * res_to_price_map[src_zone_id][dst_zone_id]\n price = res_to_price_map[src_zone_id][dst_zone_id]\n # 4. Account for bundle duration\n duration = bundle.duration\n price = zone_price + link_price\n full_price = price * duration\n logger.debug(\n f'Bundle $ = {full_price} for duration {duration}. '\n f'Per time unit: price = {price} '\n f'(zones = {zone_price}, links = {link_price})'\n )\n return full_price, details", "def _get_book_prices(self):\n for k in self.orders.keys():\n if self.orders[k].type == 'ask':\n self.ask_prices.append(self.orders[k].price)\n self.ask_snapshot[k] = self.orders[k]\n elif self.orders[k].type == 'bid':\n self.bid_prices.append(self.orders[k].price)\n self.bid_snapshot[k] = self.orders[k]\n # Sorting and removing dubbing\n self.ask_prices = list(dict.fromkeys(sorted(self.ask_prices)))\n self.bid_prices = list(dict.fromkeys(sorted(self.bid_prices, reverse=True)))", "def calc_energy_and_price(self) -> (float, float):\n\n cost_sum = 0\n energy_sum = 0\n for pump_id in self.pumps:\n pump_energy, pump_cost = self.pumps[pump_id].calculate_energy_and_cost()\n cost_sum += pump_cost\n energy_sum += pump_energy\n\n pump_id.append_index = 0\n\n assert energy_sum >= 0, \"The pumping energy cant be negative!\"\n assert cost_sum >= 0, \"The pumping cost cant be negative!\"\n return energy_sum, cost_sum", "def calc_b_a(self, data):\n # Calculate our expected bid / ask\n mkt_bid = data['Bid'].values[-1]\n mkt_ask = data['Ask'].values[-1]\n last_trade = data['Trade Price'].values[-1]\n shares = self.calc_shares(data=data, last_trade=last_trade)\n\n k = (mkt_bid - mkt_ask) / (mkt_bid + mkt_ask) * -100\n\n our_bid = np.average(data['Bid'], weights=self.weights) - k\n our_ask = np.average(data['Ask'], weights=self.weights) + k\n\n self.portfoolio['Unrealized_PnL'] = self.portfoolio['Shares'] * last_trade - self.portfoolio['Avg_Cost'] * self.portfoolio['Shares']\n\n if shares == 0:\n # Skip 0 share orders\n pass\n elif abs(self.portfoolio['Cost']) >= self.max_pos * .75:\n # If position size at or above 95% of max, reduce position\n self.risk_control(bid=mkt_bid, ask=mkt_ask, last_trade=last_trade)\n\n elif our_bid >= mkt_bid:\n # Buy at bid\n self.trade(shares=shares, price=mkt_bid, last_trade=last_trade)\n elif our_ask <= mkt_ask:\n # Sell at ask\n self.trade(shares=-shares, price=mkt_ask, last_trade=last_trade)\n else:\n print('No order placed')", "def _calc_ptf_asset_vals( self, ptf_asset_vals, asset_rtns, chg_wts=None ): \n if chg_wts is None:\n chg_wts = np.zeros_like(asset_rtns)\n else:\n assert np.isclose( chg_wts.sum(), 0 ), 'Changes in weights must sum to 0.'\n \n # Obtain the new weights from the old, and deduct transaction costs from cash\n tot_ptf_val = ptf_asset_vals.sum()\n ptf_asset_vals_ex_cost = ptf_asset_vals + chg_wts * tot_ptf_val\n ptf_asset_vals_ex_cost[0] -= np.sum( self.trans_costs * np.abs(chg_wts[1:]) ) * tot_ptf_val\n return ptf_asset_vals_ex_cost.ravel() * (1 + asset_rtns.ravel() )", "def calc_earning(self, data=None):\n result = Result()\n if data is None:\n data = self.security\n self.calcDecision()\n first_purchase_method = self.check_first_purchase_method()\n for i in np.arange(len(data['Close'])):\n if data['FinalDecision'].iloc[i] is None:\n pass\n elif data['FinalDecision'].iloc[i] == TransactionType.BUY:\n if data['FinalDecision'].iloc[i-1] == TransactionType.BUY:\n pass\n else:\n if (self.buys_made + self.sells_made) == 0:\n if first_purchase_method == FirstTransactionType.INIT_CAPITAL:\n self.shares_own = int((self.init_capital/data['Close'].iloc[i]))\n self.buys_made += 1\n elif first_purchase_method == FirstTransactionType.STOCK_QUANTITY:\n self.shares_own = self.stock_quantity\n self.buys_made += 1\n else:\n self.shares_own = int(self.final_capital / data['Close'].iloc[i])\n self.final_capital = self.final_capital % data['Close'].iloc[i]\n #print(self.shares_own)\n\n elif data['FinalDecision'].iloc[i] == TransactionType.SELL:\n if data['FinalDecision'].iloc[i-1] == TransactionType.SELL:\n pass\n else:\n if (self.buys_made + self.sells_made) == 0:\n pass\n else:\n self.final_capital += self.shares_own * data['Close'].iloc[i]\n self.shares_own = 0\n self.sells_made +=1\n #Checar si es el momento mas alto o bajo de ganancias\n if self.shares_own == 0:\n if (self.highest_point is None\n or self.highest_point < self.final_capital):\n self.highest_point = self.final_capital\n if (self.lowest_point is None\n or self.lowest_point > self.final_capital\n or self.lowest_point == 0):\n self.lowest_point = self.final_capital\n else:\n if (self.highest_point is None\n or self.highest_point < (self.shares_own * data['Close'].iloc[i])):\n self.highest_point = self.final_capital\n if (self.lowest_point is None\n or self.lowest_point > (self.shares_own * data['Close'].iloc[i])\n or self.lowest_point == 0):\n self.lowest_point = self.final_capital\n self.calcRealFinalCapital()\n self.calcDiferencePercentage()", "def calc_performance(self):\n for symbol in self.portfolio.assets.keys():\n\n # Total the Performance of all the trades\n start = self.portfolio.trades[symbol].index[0]\n end = self.portfolio.trades[symbol].index[-1]\n trades = len(self.record[symbol])\n profit = self.record[symbol]['profit'].sum()\n loss = self.record[symbol]['loss'].sum()\n # Total or average the trade info for all the trades\n try:\n wins = len(self.record[symbol].groupby('win/loose').groups['w'])\n except (ValueError, KeyError):\n wins = 0\n try:\n losses = len(self.record[symbol].groupby('win/loose').groups['l'])\n except (ValueError, KeyError):\n losses = 0\n try:\n washes = len(self.record[symbol].groupby('win/loose').groups['-'])\n except (ValueError, KeyError):\n washes = 0\n max_drawdown = self.record[symbol]['drawdown'].max()\n average_drawdown = self.record[symbol]['drawdown'].mean()\n max_drawdown_time = self.record[symbol]['drawdown days'].max()\n average_drawdown_time = self.record[symbol]['drawdown days'].mean()\n # Average the risk and market comparisons for all trades\n vol_risk = self.record[symbol]['volatility'].mean()\n beta = self.record[symbol]['beta'].mean()\n lpm_risk = self.record[symbol]['lpm'].mean()\n e_r = self.record[symbol]['expected_return'].mean()\n # Calculate Risk measures\n treynor_ratio = (e_r - self.risk_free_return) / beta\n sharpe_ratio = (e_r - self.risk_free_return) / vol_risk\n # Package up the data for each symbol\n self.performance[symbol] = {\n 'start': start,\n 'end': end,\n 'trades': trades,\n 'wins': wins,\n 'losses': losses,\n 'washes': washes,\n 'profit': profit,\n 'loss': loss,\n 'net_profit': profit - loss,\n 'profit_factor': profit / loss if loss != 0 else 1.0,\n 'percent_profitable': wins / trades if trades != 0 else 0.0,\n 'average_trade_net_profit' : (profit - loss) / trades if trades != 0 else 0.0,\n 'max_drawdown' : max_drawdown,\n 'average_drawdown' : average_drawdown,\n 'max_drawdown_days' : max_drawdown_time,\n 'average_drawdown_days' : average_drawdown_time,\n 'volatility_risk' : vol_risk,\n 'beta' : beta,\n 'lower_partial_moment_risk' : lpm_risk,\n 't_r' : treynor_ratio,\n 's_r' : sharpe_ratio\n }\n\n return self", "def get_swapbuy( database, date_start, date_end, currency, services ):\n query = string.Template( QUERY_SWAPBUYS ).substitute( dict(\n date_start = date_start,\n date_end = date_end,\n currency = currency,\n services = ','.join( [ ''.join( [ '\\'', service, '\\'' ] )\n for service in services ] ) ) )\n cursor = database.cursor()\n cursor.execute( query )\n fetched = cursor.fetchall()\n cursor.close()\n\n result = {}\n for row in fetched:\n result.setdefault(\n row[ 'service_name' ], {} )[\n# row[ 'data_date' ] ] = row[ 'swapbuy' ]\n row[ 'data_date' ] ] = float( row[ 'swapbuy' ] ) / row[ 'days' ]\n\n return result", "def get_transaction_prices(self):\n cleaned_data = self.cleaned_data()\n supplier_cleaned_data = cleaned_data.get('cleaned_supplier_data')\n transaction_cleaned_data = cleaned_data.get('cleaned_transaction_data')\n merged_data = self.merge_supplier_transaction(supplier_cleaned_data, transaction_cleaned_data)\n calculated_data = self.calculate_prices(merged_data)\n self.export_calculated_prices(calculated_data)\n return calculated_data", "def get_price_changes(area, data_path):\n\n block_info = pd.read_csv(os.path.join(data_path, 'block_info.csv'))\n area = block_info.loc[block_info['PaidParkingArea'] == area]\n\n price_changes = defaultdict(list)\n time_changes = defaultdict(list)\n\n for key in area['ElementKey'].unique():\n block = area.loc[area['ElementKey'] == key]\n block = block.dropna(subset=['WeekdayRate1'])\n block = block.sort_values(by='EffectiveStartDate')\n\n prices = block.loc[:, ['WeekdayRate1', 'WeekdayRate2', 'WeekdayRate3', \n 'SaturdayRate1', 'SaturdayRate2', 'SaturdayRate3']].values\n times = block.loc[:, ['StartTimeWeekday', 'EndTimeWeekday', \n 'StartTimeSaturday', 'EndTimeSaturday']].values\n dates = block.loc[:, ['EffectiveStartDate', 'EffectiveEndDate']].values\n\n for i in xrange(len(block)-1):\n if not np.array_equal(prices[i], prices[i+1]):\n price_changes[(dates[i+1,0], dates[i,0])].append((key, prices[i]-prices[i+1]))\n if not np.array_equal(times[i], times[i+1]):\n time_changes[(dates[i+1,0], dates[i,0])].append((key, times[i], times[i+1]))\n \n return price_changes, time_changes", "def calculate_reserves(self):\n # TODO: Add back cash dividends and deduct exchange costs\n console.print(\"Still has to be build.\")", "def compute_portvals(start_date, end_date, trades_df, start_val):\n # SETTING UP ORDERS DATAFRAME\n # Read orders file into a dataframe http://pandas.pydata.org/pandas-docs/stable/io.html#io-read-csv-table \n orders = trades_df\n symbols = np.unique(orders['Symbol']).tolist() # List of all the symbols used in orders\n\n # SETTING UP PRICES DATAFRAME\n # Read in adjusted closing prices for given symbols, date range... drop non-trading days... add cash column\n dates = pd.date_range(start_date, end_date)\n prices = get_data(symbols, dates, addSPY=False).dropna()\n prices['cash'] = 1.00\n\n # SETTING UP TRADES DATAFRAME\n # Daily snapshot of portfolio changes (+ = Buy Order, - = Sell Order) with cash adjustments\n trades = pd.DataFrame(0.00, index=prices.index, columns=symbols)\n trades['cash'] = 0.00\n\n for row_index, row in orders.iterrows():\n try:\n if row.Order == 'SELL':\n trades.ix[row.Date,row.Symbol] += (-1 * row.Shares) # Subtract ShareAmount for Sell \n trades.ix[row.Date,'cash'] += (row.Shares * prices.ix[row.Date, row.Symbol]) #adjust cash value for Sell\n elif row.Order == 'BUY':\n trades.ix[row.Date,row.Symbol] += (row.Shares) # Add ShareAmount for Buy\n trades.ix[row.Date,'cash'] += (-1 * row.Shares * prices.ix[row.Date, row.Symbol]) #adjust cash value for Buy\n else:\n print 'ERROR: order type not recognized, looking for BUY or SELL'\n except:\n print 'Unknown Error:'\n\n\n # SETTING UP HOLDINGS DATAFRAME \n # accumulating trades into holdings dataframe, snapshot of shares and cash for given day\n holdings = pd.DataFrame(0.00, index=prices.index, columns=symbols)\n holdings['cash'] = 0.00\n holdings.ix[start_date,'cash'] = start_val # add starting cash value\n previous_row = holdings.iloc[0]\n for row_index, row in holdings.iterrows():\n holdings.ix[row_index] = previous_row + trades.ix[row_index] #previous day's value + trades\n previous_row = row\n\n #SETTING UP VALUES DATAFRAME\n # convert shares into their respective dollar amounts\n values = pd.np.multiply(holdings, prices)\n #DAILY VALUE OF THE PORTFOLIO\n portvals = values.sum(axis=1)\n return portvals", "def market_value(self, ref_prices, suspensions=None):\n # TODO some securities could not be able to be traded\n if suspensions is None:\n suspensions = []\n \n market_value_float = 0.0\n market_value_frozen = 0.0 # suspended or high/low limit\n for sec in self.holding_securities:\n size = self.get_position(sec).current_size\n # TODO PortfolioManager object should not access price\n price = ref_prices[sec]\n mv_sec = price * size\n if sec in suspensions:\n market_value_frozen += mv_sec\n else:\n market_value_float += mv_sec\n \n return market_value_float, market_value_frozen", "def solve_prices(self):\n return None", "def get_prices(self):\n pass", "def compute_portvals(start_date, end_date, orders_file, start_val):\n \n #Read order file\n orders = pd.read_csv( orders_file, parse_dates = [0])\n \n #Get symbols making up the portfolio\n stock_symbols = list( set( orders[\"Symbol\"] ) )\n dates = pd.date_range(start_date, end_date)\n \n #Read stock prices\n stock_prices = get_data(stock_symbols, dates)\n \n #Create a portfolio keeping track of positions, \n #_CASH column indicates cash position, _VALUE total portfolio value\n #_LEVERAGE the leverage of portfolio when we allow for short selling\n symbols = stock_symbols[:] #Shallow copy of the list\n symbols.append(\"_CASH\")\n symbols.append(\"_VALUE\")\n symbols.append(\"_LEVERAGE\")\n \n #Index contains only business days, same dates as stock prices\n portfolio = pd.DataFrame(index=stock_prices.index, columns = symbols )\n portfolio.fillna(0) \n portfolio[\"_CASH\"][0] = start_val\n portfolio[\"_VALUE\"][0] = start_val\n \n #Snapshot of a portfolio at any time. To avoid using numerical indexes\n portfolio_snapshot = dict.fromkeys ( symbols, 0 )\n portfolio_snapshot[\"_CASH\"] = start_val\n portfolio[\"_VALUE\"] = start_val\n \n #Now calcualte portfolio day by day\n for date in portfolio.index:\n #Check transactions for the day\n day_orders = orders[ orders[\"Date\"] == date ] \n \n for ord in day_orders.iterrows():\n symbol = ord[1][ \"Symbol\"] \n stock_price = stock_prices[ symbol ][ date ]\n shares = ord[1][\"Shares\" ]\n side = ord[1][\"Order\"]\n \n if side == \"BUY\":\n portfolio_snapshot[ \"_CASH\" ] -= stock_price * shares\n portfolio_snapshot[ symbol ] += shares \n elif side == \"SELL\":\n portfolio_snapshot[ \"_CASH\" ] += stock_price * shares\n portfolio_snapshot[ symbol ] -= shares\n else:\n raise \"Order not recognized.\"\n \n #Compute portfolio value\n portfolio_snapshot[ \"_VALUE\" ] = portfolio_snapshot[ \"_CASH\" ]\n shorts = longs = 0\n for symbol in stock_symbols: \n stock_price = stock_prices[ symbol ][ date ]\n shares = portfolio_snapshot[ symbol ]\n notional = stock_price*shares\n if shares > 0:\n longs += notional\n else:\n shorts += notional\n \n portfolio_snapshot[ \"_VALUE\" ] += notional\n \n #Compute leverage\n leverage = (longs+shorts)/(longs-shorts + portfolio_snapshot[ \"_CASH\" ] )\n portfolio_snapshot[ \"_LEVERAGE\" ] = leverage\n \n #Assert we never achieve a leverage > 2.0\n if leverage > 2:\n raise \"Leverage > 2.0 achieved\"\n \n #Update portfolio from the daily snapshot\n #TODO: Is this causing performance issues?\n for symbol in portfolio.keys():\n portfolio[ symbol ][ date ] = portfolio_snapshot[ symbol ]\n \n return portfolio", "def _trading_cost(self, current_weights, prev_weights):\n delta_weight = current_weights - prev_weights\n delta_weight = delta_weight[:-1] # No costs associated with risk-free asset\n trading_cost = self.kappa1 * cp.abs(delta_weight) + self.kappa2 * cp.square(delta_weight) # Vector of trading costs per asset\n\n return cp.sum(trading_cost)", "def _set_current_prices(self) -> None:\n strategy = cast(Strategy, self.context.strategy)\n eth_price = strategy.contract_status.get(\n \"priceprovider_get_latest_answer\", None\n )\n btc_price = strategy.contract_status.get(\n \"btcpriceprovider_get_latest_answer\", None\n )\n\n if eth_price is None or btc_price is None:\n self.context.logger.info(\"No price to store.....\")\n return\n self._current_price = {\n ETH: self._get_price(eth_price),\n BTC: self._get_price(btc_price),\n }", "def shareholder_equity_to_total_assets(self):\n balance_sheet = self.stock.balance_sheet_dict\n\n # Check for Null values first\n # TODO: make the note more specific\n if 'Total Assets' not in balance_sheet or 'Total Liabilities' not in balance_sheet:\n self.stock.append_calc_result('At least 50% equity to assets ratio?', 'N/A', 'N/A', 'Not enough data found')\n return\n\n value = (balance_sheet['Total Assets'] - balance_sheet['Total Liabilities']) / balance_sheet['Total Assets']\n criteria_passed = ''\n if value >= 0.5:\n criteria_passed = 'Yes'\n elif value < 0.5:\n criteria_passed = 'No'\n\n self.stock.append_calc_result('At least 50% equity to assets ratio?', round(value, 2), criteria_passed, '')", "def calc_returns(prices):\n returns = []\n for i in range(len(prices) - 1):\n ret = (prices[i + 1] - prices[i]) / prices[i]\n returns.append(ret)\n return returns", "def get_price_of_shopping_cart(price_in, price_out):\n return price_in + price_out", "def trading_alg(self,table_name = None, buy_now = False, strategy_name = \"sma9\"):\n \n self.bs.buyed_stocks = 0\n self.bs.money = self.bs.startCredit\n spy_stocks = self.load_data(table_name = table_name, symbols = [\"SPY\"])\n spy_stocks = FinI.add_indicators(spy_stocks)\n \n if self.symbols:\n symbols = self.symbols\n else:\n symbols = self.db.get_symbols()\n\n # symbols = [\"INTC\",\"BYND\",\"ZM\",\"NKE\",\"HIMX\",\"JKS\",\"ENPH\",\"DUK\",\"GE\",\"DIS\",\"LEVI\",\"NVAX\",\"SLCA\",\"GPS\"]\n \n for symbol in symbols:\n print(\"symbol: \" + str(symbol))\n \n sub_data = self.load_data(table_name = table_name, symbols = symbol)\n if len(sub_data) < 1:\n break\n\n self.bt_stocks = FinI.add_indicators(sub_data)\n self.bt_stocks = FinI.add_fib(self.bt_stocks)\n # print(self.bt_stocks)\n print(self.bt_stocks[\"sma30\"])\n print(\"calculating percent change:\" + str(symbol))\n # sub_data = self.stocks.loc[self.stocks.sym ==symbol[0]].sort_values(by='index')\n \n self.symbols = symbol[0]\n \n # self.prev_stock = sub_data.iloc[0]\n # self.bt_stocks.iloc[0] = sub_data.iloc[0]\n\n # self.sell_marks = self.sell_marks.iloc[0:0]\n # self.buy_marks = self.buy_marks.iloc[0:0]\n self.bs.transactions = 0\n self.bs.profit_perc = 0\n \n # trend_indicator = \n # TODO mechanism for select strategies\n # self.ts_boll(buy_now = buy_now, at_settings = None, symbol = symbol, spy_stocks = spy_stocks)\n self.ts_eval(buy_now = buy_now, at_settings = None, symbol = symbol, spy_stocks = spy_stocks, strategy_logic = strategy_name)\n\n # call the method with passed and assembled name\n # method = getattr(self, 'ts_' + strategy_name)\n # method(buy_now = buy_now, at_settings = None, symbol = symbol, spy_stocks = spy_stocks, strategy_name = strategy_name)", "def get_holding_returns(self, prices, holding_period):\n holding_returns = prices.pct_change(periods=holding_period).shift(-holding_period).fillna(0)\n return holding_returns" ]
[ "0.6414581", "0.5868107", "0.5832748", "0.5781452", "0.57402825", "0.56914604", "0.56871104", "0.5682011", "0.567011", "0.5665571", "0.5613298", "0.5596176", "0.5581445", "0.5539411", "0.55184567", "0.5488983", "0.54832494", "0.54776424", "0.54453236", "0.5431624", "0.5424075", "0.5421954", "0.54190904", "0.5396943", "0.53938955", "0.53717065", "0.53676325", "0.5363312", "0.5351987", "0.5340909" ]
0.69301
0
Predict response for given x based on fitted coefficients.
def predict(self, x: np.ndarray): return self.fit_function(x, self.coefficients)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def predict(self, x):\n \"\"\"containing values for the independent variables.\"\"\"\n _linear_predictor_typecheck(x, self._coeff)\n return dot(self._coeff, x) + self._intercept", "def predict(self, x):\n if(self.__trained == False):\n raise ModelNotTrainedException(self.predict.__name__)\n \n x = np.array(x, 'float64')\n return (self.coef_[1] * x) + self.coef_[0]", "def predict(self, fit_result, x):\n raise NotImplementedError()", "def predict(self, X) :\n if self.coef_ is None :\n raise Exception(\"Model not initialized. Perform a fit first.\")\n\n X = self.generate_polynomial_features(X) # map features\n\n ### ========== TODO : START ========== ###\n # part c: predict y\n # for this we first get the single value of feature vector, then X in the transposed form and then we have to multiply by Theta\n\n y = np.dot(X, self.coef_)#coef is the coef matrix\n ### ========== TODO : END ========== ###\n\n\n return y", "def predict(self, x, **kwargs):\n return self.__multi_layer_perceptron.predict(x)", "def predict(self, X): \n return self.f(X, self.coefficients)", "def predict(self, x: np.ndarray) -> np.ndarray:\n return self.descent.predict(x)", "def predict(self, x):\n return self.model.predict(x)", "def predict(self, x, **kwargs):\n return self.tuner.get_best_models(1)[0].predict(x, **kwargs)", "def predict(self, x):\n y_hat = (self.model['b1'] * x) + self.model['b0']\n return y_hat", "def predict(self,x):\n return self._model.predict(x)", "def predict(self, X):\n check_is_fitted(self, \"coef_\")\n X_reduced_test = self._pca.transform(X)\n predictions = self._model.predict(X_reduced_test)\n return predictions", "def predict(self, x):\n new_x = np.array(self.transform(x.reshape(1, -1)).flatten())\n return self.clf.predict(new_x.T)", "def predict(self, x_test):\n return self.lin_reg.predict(self.poly_reg.fit_transform(x_test))", "def predict(self, x):\r\n x = np.array(x)\r\n t = np.arange(0, self.__InputLength, 1)\r\n t_pred = np.arange(self.__InputLength, self.__InputLength + self.__OutputLength, 1)\r\n\r\n pred = []\r\n\r\n for dim in range(x.shape[1]):\r\n f = np.polyfit(t, x[-self.__InputLength:,dim], 3)\r\n poly3 = np.poly1d(f)\r\n\r\n pred.append(poly3(t_pred))\r\n\r\n pred = np.asarray(pred).transpose()\r\n\r\n return pred", "def predict(self, x):\n raise NotImplementedError(\"Please Implement this method\")", "def predict(self, x: np.ndarray):\n return self.model.predict(x)", "def fit_predict(self, X, y):\n return self.fit(X, y).predict(X)", "def fit_predict(self, X, y):\n return self.fit(X, y).predict(X)", "def fit_predict(self, X, y):\n return self.fit(X, y).predict(X)", "def fit_predict(self, X, y):\n return self.fit(X, y).predict(X)", "def prediction(self, x):\n t = self.model.predict(x)\n return t", "def predict(self, x):\n return self.layers[0].predict(x.astype(self.dtype))", "def predict(self, x):\n \n\n return predictions", "def predict(self, x):\n\n pred = self.decision_function(x)\n return pred", "def predict(self, x):\n pred = x\n for m in self.models:\n pred = m.predict(pred)\n\n return pred", "def predict(self, x):\n\n y_pred = self._fm.predict(x)\n e_pred = self._rm.predict(x)\n return y_pred + e_pred", "def predict(self, x):\n features = self._get_features(x)\n\n y_pred = self.classifier.predict(features)\n\n return y_pred", "def fit_predict(self, X, y=None):\n return self.fit(X, y).predict(X)", "def predict(self, x):\n return self._root.predict(x)" ]
[ "0.81227326", "0.79453236", "0.7797504", "0.76598966", "0.7611417", "0.75813264", "0.74996483", "0.74753344", "0.74670166", "0.7459472", "0.7450515", "0.744085", "0.74362046", "0.7429248", "0.7394951", "0.7328513", "0.73182887", "0.7303986", "0.7303986", "0.7303986", "0.7303986", "0.7271431", "0.7263847", "0.72587335", "0.7211382", "0.7211164", "0.71933925", "0.7190798", "0.71847767", "0.71785" ]
0.8447172
0
Get residual after fit.
def get_residual(self) -> np.ndarray: return self._calculate_residual(self.coefficients)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def residuals(self):\r\n return self.__residuals", "def residuals(self) -> npt.NDArray[np.float64]:\n return self.data - self.theory", "def residuals_(self):\n return self._residuals", "def _residuals(self):\n if self.model_fit is None:\n self._uvts_cls_logger.error(\"No model has been fitted, residuals cannot be computed!\")\n sys.exit(\"STOP\")\n\n try:\n # use fittedvalues to fill in the model dictionary\n self.residuals = pd.Series(np.asarray(self._train_dt['y']) - np.asarray(self.fittedvalues).flatten(),\n index=self._train_dt['y'].index)\n self.upper_whisker_res = self.residuals.mean() + 1.5 * (\n self.residuals.quantile(0.75) - self.residuals.quantile(0.25))\n except (KeyError, AttributeError):\n self._uvts_cls_logger.exception(\"Exception occurred: Model was not fitted or ts has other structure\")\n\n return self", "def postfit_residuals(self) -> NONEARRAY:\n pass", "def _residual_edp(self, params):\n data = self.F**2\n model = np.absolute(self._model())**2\n sigma = self.sigma\n return (data[self.mask]-model[self.mask]) / sigma[self.mask] \n \n # The following three lines do not reproduce Sun's results, which proves\n # that the fits were done through intensity, not form factor.\n #data = self.F\n #model = np.absolute(self._model())\n #return (data - model) ", "def residuals(self, p, data, X):\n err = data - self.fitfunc(X,p)\n return err", "def postfit_residuals(self) -> NONEARRAY:\n if self._successful:\n return self._postfit_residuals\n else:\n return None", "def resid(self):\n # GH#5255\n return self.model.endog - self.fittedvalues", "def residuals(self, X=None, y=None) -> np.ndarray:\n if y is None:\n return self.model.data.y.unnormalized_y - self.predict(X)\n else:\n return y - self.predict(X)", "def get_residual(self, beta: ndarray) -> ndarray:\n return self.data.weight*(self.data.obs -\n self.fevar.mapping(beta))", "def _get_residuals(self, model: Model) -> np.ndarray:\n try:\n # pyre-fixme[16]: `Model` has no attribute `model`.\n return model.model.resid.values\n except Exception:\n fcst = model.predict(steps=1, freq=\"D\", include_history=True)\n # pyre-fixme[16]: `None` has no attribute `merge`.\n # pyre-fixme[16]: `Optional` has no attribute `to_dataframe`.\n merge = fcst.merge(model.data.to_dataframe(), on=\"time\")\n for col in merge.columns:\n if col != \"time\" and (\"fcst\" not in col):\n return merge[col].values - merge[\"fcst\"].values\n raise ValueError(\"Couldn't find residual or forecast values in model\")", "def residual(self, y,r):\n u,v,tt = self.split(y)\n fiu,fiv,fitt = self.problem.internal_forces(u,v,tt)\n R = np.concatenate((fiu,fiv,fitt))\n R = self.residualApplyBCs(R,y,r)\n return R", "def _compute_residuals(self):\n residuls = self.I - self.E\n return residuls", "def residual(self,name):\n state = self.getstate(name)\n m = self.hit.vec \n x = state.vec\n res = m - self.hmatrix*x\n debug('kfnode.residual',(name,res))\n return res", "def _residual_lattice(self, params):\n model = np.sqrt(self.calc_q_square())\n data = np.absolute(self.q)\n return (model[self.mask] -data[self.mask])", "def residuals(self,x=None,y=None,retdata=False):\n if x is None or y is None:\n if self.data is None:\n raise ValueError('must either specify data or save fitted data')\n x,y,weights = self.data\n\n if self(x).shape != y.shape:\n raise ModelTypeError('y array does not match output of model for input x')\n if retdata:\n return x,y,y-self(x)\n else:\n return y-self(x)", "def _residual(function, p, x, y, y_err):\n return (y - function(p, x)) / y_err", "def residual ( self , dataset , **kwargs ) :\n hdata = self.make_histo ( **kwargs )\n dataset.project ( hdata , ( self.yvar.name , self.xvar.name ) )\n return self.residual_histo ( hdata )", "def residual(params, model_func, x, data, min_x_param=None, max_x_param=None,\n eps=None):\n # Crop the X data according to a fit parameter\n if min_x_param is not None or max_x_param is not None:\n min_x = params.get(min_x_param, None)\n max_x = params.get(max_x_param, None)\n x, data = crop_x_y(x, data, min_x=min_x, max_x=max_x,\n include_bounds=False)\n\n # Calculate data according to the model function\n model = model_func(x, **params)\n\n # Calculate the residuals of the model and the given data\n if eps is None:\n return model - data\n return (model - data) / eps", "def compute_residuals(self):\n\n r = self.rsdl()\n adapt_tol = self.opt['RelStopTol']\n\n if self.opt['AutoStop', 'Enabled']:\n adapt_tol = self.tau0 / (1. + self.k)\n\n return r, adapt_tol", "def residual(us):\n return self.h_S(z0, us) - h_P", "def _residual(self, x):\n h = x\n h = self.b1(h)\n h = self.activation(h)\n h = self._upsample_conv(h, self.c1) if self.upsample else self.c1(h)\n h = self.b2(h)\n h = self.activation(h)\n h = self.c2(h)\n\n return h", "def compute_residuals(r):\n global conv_residuals\n conv_residuals.append(r)\n return", "def _residual(self, x):\n h = x\n h = self.c1(h)\n h = self.activation(h)\n h = self.c2(h)\n h = F.avg_pool2d(h, 2)\n\n return h", "def residuals(x, y, filename):\n empirical_data = y\n #print(x)\n # call convert function\n\n ans = (empirical_data - run_model(x, filename))/empirical_data * 100\n #print(ans)\n return ans", "def residualNorm(self):\n return math.sqrt(self.residualNorm2())", "def residual_of(self, z):\n raise \"Not implemented yet\"\n return z - dot(self._H, self._x)", "def _residual(self, x):\n h = x\n h = self.activation(h)\n h = self.c1(h)\n h = self.activation(h)\n h = self.c2(h)\n if self.downsample:\n h = F.avg_pool2d(h, 2)\n\n return h", "def getResidual(self, data, Xp):\n return data - Xp" ]
[ "0.74578893", "0.7413885", "0.740772", "0.7328536", "0.7270871", "0.711375", "0.71051645", "0.7091559", "0.7005274", "0.69731915", "0.69583935", "0.69441044", "0.68942314", "0.6860036", "0.68399817", "0.6828115", "0.68206584", "0.68188107", "0.6794833", "0.6759753", "0.67291135", "0.6689804", "0.66772145", "0.6671117", "0.6665436", "0.66094637", "0.659484", "0.65499395", "0.6517026", "0.6345352" ]
0.77138394
0
Get estimated response vector based on fit.
def get_estimate(self) -> np.ndarray: return self.fit_function(self.x, self.coefficients)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fittedvalues(self):\n return self.model.predict(self.params)\n # TODO: GH#5255 is this necessarily equivalent to self.predict()?", "def predict(self, fit_result, x):\n raise NotImplementedError()", "def estimate(self, x, y):\n\n self.regressor.fit(x, y)\n y_predict = self.regressor.predict(x)\n\n return y_predict", "def adjusted_response(X, y, coef, intercept=0):\n Xbeta = intercept + np.dot(X, coef)\n w = np.exp(Xbeta)\n z = Xbeta + (y / w) - 1\n return w, z", "def yfit(self):\n return np.dot(self.amatrix,self.acoeff)", "def predict(self, x: np.ndarray):\n return self.fit_function(x, self.coefficients)", "def fit(self,vec):\n\t\tif self.base_vecs is None or self.inv_vecs is None:\n\t\t\traise OrderError('Fit','Invalid Order: Run fitter.PrepFit before' \\\n\t\t\t\t+ ' running fitter.Fit')\n\n\t\tfinal = [np.dot(vec,base_vec) for base_vec in self.base_vecs]\n\n\t\treturn np.transpose(np.dot(self.inv_vecs,np.transpose(final)))", "def predict(self, X): \n return self.f(X, self.coefficients)", "def get_fit_intercept(self):\n return self.fit_intercept", "def fit_predict(self, X, y):\n return self.fit(X, y).predict(X)", "def fit_predict(self, X, y):\n return self.fit(X, y).predict(X)", "def fit_predict(self, X, y):\n return self.fit(X, y).predict(X)", "def fit_predict(self, X, y):\n return self.fit(X, y).predict(X)", "def get_intercepts(fit, y):\n x = fit[0] * (y * y) + fit[1] * y + fit[2]\n return x", "def predict(self):\n RV = np.zeros((self.N,self.P))\n for term_i in range(self.n_terms):\n RV+=np.dot(self.Fstar()[term_i],np.dot(self.B()[term_i],self.Astar()[term_i]))\n return RV", "def evaluate(self):\n RV = -self.predict()\n RV += self.Ystar()\n return RV", "def fit_predict(self, X, y=None):\n return self.fit(X, y).predict(X)", "def fit(self, X):", "def fit(self, x):\n pass", "def get_fit_x(self, y):\n if self.line_fit_m.size == 0:\n return np.empty(y.shape)\n fit = self.line_fit\n return np.array(fit[0] * y ** 2 + fit[1] * y + fit[2]).astype(\"int\")", "def fit():\n pass", "def calc_error(self):\n if self._fit_data.y is not None and self._fit_data.y_fit is not None:\n self._fit_data.error_vector = self._fit_data.y - self._fit_data.y_fit", "def predict(self, X_test):\n if self.basis_func is not None:\n X_transformed = self.basis_func(X_test)\n else:\n X_transformed = X_test\n\n # Marginalise predictions over hyperparameters\n mu = np.zeros([len(self.hypers), X_transformed.shape[0]])\n var = np.zeros([len(self.hypers), X_transformed.shape[0]])\n\n for i, h in enumerate(self.hypers):\n mu[i] = np.dot(self.models[i][0].T, X_transformed.T)\n var[i] = 1. / h[1] + np.diag(np.dot(np.dot(X_transformed, self.models[i][1]), X_transformed.T))\n\n m = mu.mean(axis=0)\n v = var.mean(axis=0)\n # Clip negative variances and set them to the smallest\n # positive float value\n if v.shape[0] == 1:\n v = np.clip(v, np.finfo(v.dtype).eps, np.inf)\n else:\n v = np.clip(v, np.finfo(v.dtype).eps, np.inf)\n v[np.where((v < np.finfo(v.dtype).eps) & (v > -np.finfo(v.dtype).eps))] = 0\n\n return m, v", "def predict_response_variable(self, **kwargs):\n pass", "def fit(self, X, y):", "def fit(self, X, y):", "def fit(self, X, y):", "def fit_predict(self, X, y=None):\n return super().fit_predict(X, y)", "def fit_predict(self):\n raise AttributeError", "def svm_predict(self, x) -> np.array:\r\n if self.svmModel is None:\r\n print(\"svm not trained, please run svm_fit first!\")\r\n return None\r\n else:\r\n return self.svmModel.predict(x)" ]
[ "0.63615495", "0.62313896", "0.5925463", "0.59242433", "0.58122206", "0.580785", "0.577574", "0.57283205", "0.57026815", "0.56927335", "0.56927335", "0.56927335", "0.56927335", "0.5685302", "0.56588197", "0.5631761", "0.5619938", "0.55874497", "0.5569511", "0.55256623", "0.5521549", "0.5518398", "0.55179137", "0.5504535", "0.54873085", "0.54873085", "0.54873085", "0.5484089", "0.54733074", "0.5463189" ]
0.65292436
0
Open a csv file and return the data in a dictionary where each key is a corresponding csv column name and each value for the key correspond to a list of the windows data. Each data windows is a list.
def getCSV(filename, reference_column="time", windows_separator=""): reader = csv.DictReader(open(filename)) csv_data = {} for row in reader: for col, value in row.iteritems(): csv_data.setdefault(col, []).append(value) if reference_column not in csv_data.keys(): raise ValueError('Reference column name {} not in the {} csv file. Aborting.'.format(reference_column, filename)) # get windows separation line numbers. reference_points = [i for i, v in enumerate(csv_data[reference_column]) if v == windows_separator] if not reference_points: raise ValueError('Windows separator has not been found in {} csv file. Aborting.'.format(reference_column, filename)) result = defaultdict(list) for i, r in enumerate(reference_points): for col in csv_data.keys(): if i == 0: result[col].append([csv_data[col][v] for v in range(r)]) else: result[col].append([csv_data[col][v] for v in range(reference_points[i - 1] + 1, r)]) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def open_some_data(the_file_name: str) -> dict:\n\n result: dict = open_csv(file_name=the_file_name)\n return result", "def _csv_to_dict(name):\n csv_path = _get_csv_path(name)\n result = []\n with open(csv_path) as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n result.append(row)\n return result", "def load_data(self):\n df = pandas.read_csv(self.path)\n self.data_dict = df.to_dict(orient=\"list\")\n return self.data_dict", "def csv_to_dict(filename):\n data_list = []\n \n with open(filename, 'rb') as datafile:\n data_reader = csv.DictReader(datafile, delimiter = ',')\n for row in data_reader:\n data_list.append(row)\n\n return data_list", "def _read_csv(file_name):\n with open(file_name) as boards:\n rows = csv.DictReader(boards, delimiter=',', quotechar='\"')\n formatted_data = []\n for row in rows:\n formatted_data.append(dict(row))\n return formatted_data", "def csv2dicts(csvfile, names=None):\n data = []\n for row_index, row in enumerate(csvfile):\n if row_index == 0:\n if names:\n keys = names\n else:\n keys = row\n print(keys)\n continue\n data.append({key: value for key, value in zip(keys, row)})\n return data", "def csvToDict(filepath):\n data = []\n with open(getcwd() + filepath, 'r') as dataset:\n assert csv.Sniffer().has_header(dataset.read(9999)), 'No headers'\n dataset.seek(0)\n dialect = csv.Sniffer().sniff(dataset.read(99999))\n dataset.seek(0)\n reader = csv.DictReader(dataset, dialect=dialect)\n headers = reader.fieldnames\n for row in reader:\n data.append(row)\n\n data = assert_data_format(data)[0]\n\n return data, headers", "def readcsv(csvfile):\n logger = log.getLogger('obslog.readcsv')\n\n if not os.path.exists(csvfile):\n logger.error('Cannot access %s', csvfile)\n raise SystemExit\n\n data = {}\n with open(csvfile, mode='r') as csv_file:\n reader = csv.DictReader(csv_file)\n for row in reader:\n data[row['FITSFILE']] = row\n logger.debug('Data: %s', data)\n return data", "def get_csv_data(file_name: str) -> Iterator[list]:\n with open(file_name) as f:\n # creating a reader instance that can iterate over rows of file.\n reader = DictReader(f)\n\n # iterating over rows:\n for row in reader:\n yield dict(row) # returning the dicts for each row in the dataset.", "def load_data(filename='KSI.csv'):\r\n d = []\r\n with open(filename) as csv_file:\r\n # csv_reader = csv.reader(csv_file, delimiter=',')\r\n csv_reader = csv.DictReader(csv_file, delimiter=',')\r\n for line_count, row in enumerate(csv_reader):\r\n if line_count == 0:\r\n print(f'Column names are \\n{\", \".join(row)}')\r\n # column_names = row\r\n else:\r\n d.append(row)\r\n # print(f'Processed {line_count} lines.')\r\n return d", "def load_csv(file):\n with open(file) as csvfile:\n reader = csv.DictReader(csvfile)\n return [dict(row) for row in reader]", "def read_csv_as_dicts(csv_input_file_name):\n input_table = read_csv_as_table(csv_input_file_name, skip_first_line=False)\n\n # first line should contain headers\n header = input_table[0]\n # rest lines would contain actual data\n data = input_table[1:]\n\n output = []\n # process all lines with data\n for input_line in data:\n record = {}\n for i in range(len(header)):\n record[header[i]] = input_line[i]\n output.append(record)\n return output", "def get_dictionary_from_csv(file):\n csv_file = file[:-4] # avoid .txt extension\n csv_file += \"_dico.csv\"\n dic = pd.read_csv(csv_file, delimiter=',')\n return list(dic.columns)", "def read_from_csv(self, csv_file):\n data = []\n with codecs.open(csv_file, 'r', encoding='utf-8') as csvfile:\n header = None\n for i, line in enumerate(csvfile):\n line_split = [x.strip() for x in line.split(\"|\")]\n line_data = [x for x in line_split if len(x) > 0]\n if i == 0:\n header = line_data\n else:\n entry = {}\n for i,datum in enumerate(line_data):\n entry[header[i]] = datum\n data.append(entry)\n print \"Loaded %d entries from %s\" % (len(data), csv_file)\n return data", "def create_waves_dict(csv_file):\n with open(csv_file) as file:\n reader = csv.DictReader(file)\n waves_dict = {row[\"Date\"]: row[\"Wave Height\"] for row in reader}\n return waves_dict", "def readData(filename):\r\n data_d = {}\r\n with open(filename) as f:\r\n df = pd.read_csv(f, header=0, dtype='str',sep=';')\r\n df = df.loc[:, ~df.columns.str.contains('^Unnamed')]\r\n df_dict = df.to_dict(orient='index')\r\n for i,val in df_dict.iteritems(): \r\n clean_row = [(k, p.proc(v)) for (k, v) in val.iteritems()]\r\n row_id = val['line_nr']\r\n data_d[row_id] = dict(clean_row)\r\n return data_d\r\n return df", "def creating_dict_from_csv(self) -> dict:\n dictionary = {}\n for row in self.__read_csv():\n if dictionary.get(row[0]):\n dictionary[row[0]].append((row[1], row[2]))\n else:\n dictionary[row[0]] = [(row[1], row[2])]\n\n for key, value in dictionary.items():\n dictionary[key] = sorted(value, key=lambda x: x[1], reverse=True)\n\n return dictionary", "def read_csv(gw):\n\n data = {}\n\n with open(csv_file_name.format(gw=gw)) as csvfile:\n reader = csv.reader(csvfile, delimiter=',')\n\n for i, r in enumerate(reader):\n if i < 3:\n continue\n data[r[2]] = {'points': int(r[7]), 'rank': int(r[0])}\n return data", "def load_csv_to_dict(filename):\n row_len = list()\n result = dict()\n with open(filename, 'r') as csvfile:\n reader = csv.reader(csvfile)\n for row in reader:\n key = row[0].strip()\n values = [v.strip() for v in row[1:]]\n result[key] = values\n row_len.append(len(values))\n return result, max(row_len)", "def read_stats_csv(filename):\n\n df_dict = {}\n df = pd.read_csv(filename, header=[0, 1, 2])\n\n # Check if End column data type is datetime - if so use start date as index, otherwise use file number;\n # Use start date as index - Note: df[\"End\"] is interpreted as a dataframe here not a series as in hdf5\n if df[\"End\"].dtypes.all() == pd.Timestamp:\n # Drop redundant columns\n if \"File Number\" in df.columns:\n df = df.drop(\"File Number\", axis=1, level=0)\n df = df.drop(\"End\", axis=1, level=0)\n df = df.set_index(df.columns[0])\n df.index.name = \"Date\"\n\n # Convert timestamps to datetime\n try:\n df.index = pd.to_datetime(df.index, format=\"%Y-%m-%d %H:%M:%S\")\n except:\n try:\n # Timestamp will likely be in local (UK) format if csv file has been subsequently edited and saved\n df.index = pd.to_datetime(df.index, format=\"%d/%m/%Y %H:%M\")\n except:\n raise\n # Use file number as index\n else:\n df = df.drop([\"Start\", \"End\"], axis=1, level=0)\n df = df.set_index(df.columns[0])\n df.index.name = \"File Number\"\n\n df.columns.rename([\"channels\", \"stats\", \"units\"], inplace=True)\n logger = filename.split(\"Statistics_\")[-1].split(\".\")[0]\n df_dict[logger] = df\n\n return df_dict", "def load_csv(file):\n import csv\n reader = csv.reader(open(file, 'r'))\n columns = reader.next()\n c2i = dict((columns[i], i) for i in range(len(columns)))\n data = {}\n excluded = set([REP_CSV_HED_TIM, REP_CSV_HED_HER])\n for row in reader:\n \n # get relevant info from the line\n time = float(row[c2i[REP_CSV_HED_TIM]])\n hero = row[c2i[REP_CSV_HED_HER]]\n other = dict((c, REP_CSV_HANDLERS.get(c, REP_CSV_DEFHANDLER)(row[c2i[c]])) for c in columns if c not in excluded)\n \n # add to the data dictionary\n if hero not in data: data[hero] = []\n data[hero].append([time] + [other])\n \n return data", "def DictData(self):\n reader = csv.DictReader( open( self.file, \"rU\" ), dialect = \"excel\" )\n return reader", "def get_data():\n data = {}\n with open(app.config['DATA_CSV'], 'r') as csvfile:\n presence_reader = csv.reader(csvfile, delimiter=',')\n for i, row in enumerate(presence_reader):\n if len(row) != 4:\n # ignore header and footer lines\n continue\n\n try:\n user_id = int(row[0])\n date = datetime.strptime(row[1], '%Y-%m-%d').date()\n start = datetime.strptime(row[2], '%H:%M:%S').time()\n end = datetime.strptime(row[3], '%H:%M:%S').time()\n except (ValueError, TypeError):\n log.debug('Problem with line %d: ', i, exc_info=True)\n\n data.setdefault(user_id, {})[date] = {'start': start, 'end': end}\n return data", "def csv_to_dict(self):\n log = logger.configure(\"default\")\n try: \n df = pd.read_csv(self.__csv_path)\n except IOError as e:\n # file not found\n log.error('Could not import {}. Got error {}'.format(self.__csv_path, e))\n raise \n else:\n cols = list(df.columns)\n metafield_cols = [col for col in cols if 'metafields' in col]\n if metafield_cols == [] or 'Handle' not in cols:\n # relevant columns don't exist\n log.error('{} does not contain `Handle` or `metafields` named columns'.format(self.__csv_path))\n raise\n else:\n new_cols = ['Handle'] + metafield_cols\n df = df[new_cols].set_index('Handle')\n df = df[~df.index.duplicated(keep='first')]\n return df.to_dict('index')", "def read_names_into_dict():\n d = dict()\n with open(\"SP_500_firms.csv\") as csvfile:\n input_file = csv.DictReader(csvfile)\n for row in input_file:\n #print(row)\n d[row['Symbol']] = [row['Name'],row['Sector']]\n return d", "def _read_csv_to_dictionary_list(file_name):\n catalog_list = []\n with open(file_name) as csvfile:\n reader = csv.DictReader(csvfile)\n for item in reader:\n catalog_list.append(item)\n return catalog_list", "def read_csv(path):\r\n data = []\r\n csv_file = open(path)\r\n for row in csv.DictReader(csv_file):\r\n data.append(row)\r\n csv_file.close() \r\n return data", "def csv_dict_reader(file_path):\r\n with open(file_path, 'r') as file_obj:\r\n\r\n reader = csv.DictReader(file_obj, delimiter=',')\r\n for line in reader:\r\n #print(line[\"variable_name \"]),\r\n print(line[\"dataset\"])", "def save_csv_into_dictionary(csv_file):\n\n dictionary = OrderedDict()\n with open(csv_file, newline='') as file:\n reader = csv.reader(file)\n for row in reader:\n dictionary[row[0]] = row[1]\n return dictionary", "def parse_trick_ascii(csv_file):\n data_file = csv.DictReader(open(csv_file))\n single_run_data_dict = {'altitude' : [0.0],\n 'latitude' : [0.0],\n 'longitude' : [0.0]}\n # Your code here\n # ...\n # return the dict\n return single_run_data_dict" ]
[ "0.6284877", "0.62292904", "0.622039", "0.62021005", "0.61573863", "0.6139601", "0.613843", "0.6113844", "0.6068469", "0.6050086", "0.6049344", "0.60448176", "0.6007195", "0.6001365", "0.59572005", "0.5940141", "0.5926632", "0.5885316", "0.58824843", "0.58735824", "0.5866467", "0.5866078", "0.58575785", "0.5825311", "0.57955366", "0.57698756", "0.57667977", "0.57613355", "0.57456213", "0.5722469" ]
0.6242013
1
Creates an aiohttp session
async def create_session(self): # Creating a session under an async function is recommended self.session = aiohttp.ClientSession()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def create_session() -> aiohttp.ClientSession:\n\n headers = generate_header()\n\n client_session = aiohttp.ClientSession(headers=headers)\n return client_session", "def make_session():\n import aiohttp\n conn = aiohttp.TCPConnector(limit_per_host=int(\n os.getenv('AIO_CONN_LIMIT', 10)))\n timeout = aiohttp.ClientTimeout(\n total=int(os.getenv('AIO_TOTAL_TIMEOUT', 80)),\n connect=int(os.getenv('AIO_CONN_TIMEOUT', 15)),\n sock_read=int(os.getenv('AOI_READ_TIMEOUT', 30)),\n sock_connect=int(os.getenv('AIO_CONN_TIMEOUT', 15)),\n )\n s = aiohttp.ClientSession(connector=conn, timeout=timeout)\n return s", "async def websession() -> AsyncGenerator[ClientSession, None]:\n async with ClientSession() as aiohttp_session:\n yield aiohttp_session\n\n closed_event = create_aiohttp_closed_event(aiohttp_session)\n await aiohttp_session.close()\n await closed_event.wait()", "def build_session():\n return requests.Session()", "def create(self):\n if self._session:\n self.close()\n\n if not self._session:\n self._session = requests.Session()\n self._session.mount('http://', ra.HTTPAdapter(max_retries=self._max_retries))\n self._session.mount('https://', ra.HTTPAdapter(max_retries=self._max_retries))\n\n msg = u'Created internal requests Session instance {0:#0x}'\n utils.log_with_debug_info(logging.DEBUG, msg.format(id(self._session)))", "def create(self):\n\t\tif self._session:\n\t\t\tself.close()\n\n\t\tif not self._session:\n\t\t\tself._session = requests.Session()\n\t\t\tself._session.mount('http://', ra.HTTPAdapter(max_retries=self._max_retries))\n\t\t\tself._session.mount('https://', ra.HTTPAdapter(max_retries=self._max_retries))\n\n\t\t\tmsg = u'Created internal requests Session instance {0:#0x}'\n\t\t\tlog_with_debug_info(logging.DEBUG, msg.format(id(self._session)))", "def _create_session(self):\n self.session = requests.Session() # pragma: no cover\n self.session.headers[\"Accept\"] = \"application/json\" # pragma: no cover\n if self.user: # pragma: no cover\n self.session.auth = (self.user, self.cred) # pragma: no cover", "def create_session(self, loop):\n session = ClientSession(loop=loop, json_serialize=json_dumps)\n # Setting directly on `session` will raise deprecation warning\n object.__setattr__(session, \"_request\", self.match_request)\n return session", "def session():\n def session():\n return BaseUrlSession()\n return session", "async def _mk_http_connection(self) -> ClientSession:\n if self._ssl_context is not None:\n connector = TCPConnector(ssl=self._ssl_context)\n base_url = f'https://{self._netloc}/'\n else:\n connector = TCPConnector()\n base_url = f'http://{self._netloc}/'\n\n return ClientSession(base_url, connector=connector, timeout=ClientTimeout(self._socket_timeout))", "def session():\n s = requests.Session()\n retries = Retry(total=5, backoff_factor=0.5)\n s.mount(\"http://\", HTTPAdapter(max_retries=retries))\n return s", "def create_session(obj):\n session = requests.Session()\n if obj.user is not None and obj.password is not None:\n session.auth = (obj.user, obj.password)\n\n # Proxy setup\n if obj.proxy is not None:\n proxy = '%s://%s:%s' % (translate_proxy_scheme(obj.proxy_type),\n obj.proxy_host, obj.proxy_port)\n session.proxies = {'http': proxy, 'https': proxy}\n\n # Emulate curl's way of handling SSL\n if obj.cainfo is not None:\n # CA certificates\n session.verify = obj.cainfo\n if obj.sslcert is not None:\n # Client certificate\n session.cert = obj.sslcert\n if obj.verifypeer is not None and not obj.verifypeer:\n # Disable certificate validation\n session.verify = False\n if obj.verifyhost is not None and not obj.verifyhost:\n # Check the certificate, but do not verify that the hostname matches it.\n session.mount('https://', HostNameIgnoringAdapter())\n else:\n # Setup the retry strategy\n session.mount('https://', HTTPAdapter(max_retries=retries))\n # setup retry strategy for http connections\n session.mount('http://', HTTPAdapter(max_retries=retries))\n\n return session", "def create_session(self):\n self._session = self.create_scoped_session()\n self.session = self._session()", "async def start_session(self):\n\t\t...", "def _create_session(self) -> Session:\n session = Session()\n\n # Sets the client side and server side SSL cert verification, if provided as properties.\n if ssl_config := self.properties.get(SSL):\n if ssl_ca_bundle := ssl_config.get(CA_BUNDLE): # type: ignore\n session.verify = ssl_ca_bundle\n if ssl_client := ssl_config.get(CLIENT): # type: ignore\n if all(k in ssl_client for k in (CERT, KEY)):\n session.cert = (ssl_client[CERT], ssl_client[KEY])\n elif ssl_client_cert := ssl_client.get(CERT):\n session.cert = ssl_client_cert\n\n # If we have credentials, but not a token, we want to fetch a token\n if TOKEN not in self.properties and CREDENTIAL in self.properties:\n self.properties[TOKEN] = self._fetch_access_token(session, self.properties[CREDENTIAL])\n\n # Set Auth token for subsequent calls in the session\n if token := self.properties.get(TOKEN):\n session.headers[AUTHORIZATION_HEADER] = f\"{BEARER_PREFIX} {token}\"\n\n # Set HTTP headers\n session.headers[\"Content-type\"] = \"application/json\"\n session.headers[\"X-Client-Version\"] = ICEBERG_REST_SPEC_VERSION\n session.headers[\"User-Agent\"] = f\"PyIceberg/{__version__}\"\n\n # Configure SigV4 Request Signing\n if str(self.properties.get(SIGV4, False)).lower() == \"true\":\n self._init_sigv4(session)\n\n return session", "def refresh_session(self):\n if self.session:\n try:\n yield from self.session.close()\n except:\n # we don't care if closing the session does nothing\n pass \n\n self.session = aiohttp.ClientSession()\n self._session_start = time.time()", "def __init__(\n self, host, password, username='smile', port=80, timeout=DEFAULT_TIMEOUT, websession=None, legacy_anna=False,\n ):\n\n if websession is None:\n async def _create_session():\n return aiohttp.ClientSession()\n\n loop = asyncio.get_event_loop()\n self.websession = loop.run_until_complete(_create_session())\n else:\n self.websession = websession\n\n self._auth=aiohttp.BasicAuth(username, password=password)\n\n self._legacy_anna = legacy_anna\n self._timeout = timeout\n self._endpoint = \"http://\" + host + \":\" + str(port)\n self._throttle_time = None\n self._throttle_all_time = None\n self._domain_objects = None", "def init():\n # make sure pool is initialized\n global pool\n if not pool:\n pool = aiohttp.ClientSession(\n connector=aiohttp.TCPConnector(limit=config.MAX_PARALLEL_REQUESTS),\n raise_for_status=False,\n trust_env=True,\n auth=aiohttp.BasicAuth( config.CACHE_USERNAME, config.CACHE_PASSWORD ),\n )", "def mock_aiohttp_client():\n mocker = AiohttpClientMocker()\n\n def create_session(hass, *args, **kwargs):\n session = mocker.create_session(hass.loop)\n\n async def close_session(event):\n \"\"\"Close session.\"\"\"\n await session.close()\n\n hass.bus.async_listen_once(EVENT_HOMEASSISTANT_CLOSE, close_session)\n\n return session\n\n with mock.patch(\n \"homeassistant.helpers.aiohttp_client._async_create_clientsession\",\n side_effect=create_session,\n ):\n yield mocker", "def _new_session(self):\n try:\n self._session.close()\n except (AttributeError,TypeError):\n pass\n self._session = requests.Session()\n return self._session", "def _create_nb_session(self):\n header = {\"Authorization\": \"Token {}\".format(settings.NB_API_KEY)}\n session = requests.Session()\n session.headers.update(header)\n self.nb_session = session\n log.info(\"Created new HTTP Session for NetBox.\")\n return session", "def get_session():\n\n jwt_secret = base64.urlsafe_b64decode(os.getenv('AUTH0_CLIENT_SECRET'))\n claims = {\n 'sub': 'rf|airflow-user',\n 'iat': datetime.utcnow(),\n 'exp': datetime.utcnow() + timedelta(hours=3)\n }\n encoded_jwt = jwt.encode(claims, jwt_secret, algorithm='HS256')\n session = requests.Session()\n\n session.headers.update({'Authorization': 'Bearer {}'.format(encoded_jwt)})\n return session", "def mbta_session() -> BaseUrlSession:\n cfg = config()\n session = BaseUrlSession(cfg.api_root)\n if cfg.api_key:\n session.headers.update({\"x-api-key\": cfg.api_key})\n return session", "def create_session(url):\n # type: (str) -> qi.Session\n application = qi.Application([NAOqiBackend.__name__, \"--qi-url={}\".format(url)])\n try: application.start()\n except RuntimeError as e:\n raise RuntimeError(\"Couldn't connect to robot @ {}\\n\\tOriginal Error: {}\".format(url, e))\n return application.session", "def _initialize_session(self):\n session = requests.Session()\n session.auth = (self.login, self.password)\n session.verify = False\n session.headers.update({'Accept': 'application/json'})\n session.headers.update({'Content-type': 'application/json'})\n return session", "def _getHttpSession(self):\n\n if self.httpSession is None:\n self.httpSession = requests.Session()\n return self.httpSession", "def _session(self):\n if self.session is None:\n self.session = create_session(self.config, self.auth)\n return self.session", "def _create_redash_session():\n session = requests.Session()\n session.headers.update({'Authorization': 'Key {}'.format(API_KEY)})\n return session", "def session_client(session_app):\n yield Client(session_app)", "def create_session(self, **params):\n raise NotImplementedError('Should be implemented by a sub-class.')" ]
[ "0.8836229", "0.83560354", "0.71402645", "0.7037424", "0.7029477", "0.70231855", "0.6983279", "0.69093186", "0.68392366", "0.6755684", "0.67054135", "0.6637041", "0.66357857", "0.66224635", "0.6607673", "0.65447205", "0.65434366", "0.6540857", "0.6540471", "0.6527224", "0.6521137", "0.6483489", "0.64512336", "0.64209837", "0.6418811", "0.63861704", "0.6374237", "0.63108456", "0.62788755", "0.6256165" ]
0.8627391
1
Output a section header
def section_header(text): print "---- %s ----" % text
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_section_header(title, color):\n block = \"#\" * (len(title) + 2)\n print(\"\\n\" + color + Style.BRIGHT + block)\n print(\"#\", title)\n print(block + \"\\n\" + Style.RESET_ALL)", "def print_section_header(title, color):\n\tblock = \"#\" * (len(title) + 2)\n\tprint(color + Style.BRIGHT + block)\n\tprint(\"#\", title)\n\tprint(block + \"\\n\" + Style.RESET_ALL)", "def print_section_header(title, COLOR):\n\tblock = \"#\" * (len(title) + 2)\n\tprint(COLOR + Style.BRIGHT + block)\n\tprint(\"#\", title)\n\tprint(block + \"\\n\" + Style.RESET_ALL)", "def _write_section_start(section_name, fobj):\n\n fobj.write(string.capwords(section_name, '_') + '\\n')", "def header_print(output):\n print(\"\\n----------------------------------------------------------------\")\n print(output)\n print(\"----------------------------------------------------------------\")", "def print_header(name, texfile):\n texfile.write('\\n')\n texfile.write('%--------------------\\n')\n texfile.write('%---' + name.upper() + ('-' * (17 - len(name))) + '\\n')\n texfile.write('%--------------------\\n')", "def header(name, value):\n print '%s: %s\\n' % (name, value)", "def print_header_information():\n\t\tprint \"Elijah Molloy\"\n\t\tprint \"70-510 - Spring 1 - 2018\"\n\t\tprint \"PROGRAMMING ASSIGNMENT #4\\n\"", "def print_header():\n print('------------------------------------')\n print(' Lesson04')\n print(' Kata Fourteen Assignment')\n print('------------------------------------\\n')", "def fill_header_section():\n section = _SectionData(\"Header\")\n section.props.append((\"FormatVersion\", 1))\n section.props.append((\"Source\", get_combined_ver_str()))\n section.props.append((\"Type\", \"Configuration\"))\n section.props.append((\"Note\", \"User settings of SCS Blender Tools\"))\n author = bpy.context.user_preferences.system.author\n if author:\n section.props.append((\"Author\", str(author)))\n section.props.append((\"ConfigStoragePlace\", _property_utils.get_by_type(bpy.types.GlobalSCSProps.config_storage_place)))\n section.props.append((\"DumpLevel\", _property_utils.get_by_type(bpy.types.GlobalSCSProps.dump_level)))\n return section", "def get_hierarchy_section_header(self, path):\n section_index = len(path) - 1\n section = path[section_index][0]\n html = f\"Section {section.roman_numeral}: {section.title.capitalize()}\"\n\n return html", "def print_header():\n \n print_from_file(\"html/header.html\")", "def show_header():\n return {};", "def section(sect_name, width=TERMINAL_CHARS):\n\n true_length = width - len(sect_name) - 1\n print(\"\\n_%s\" % (sect_name.upper() + \"_\" * true_length))", "def _sectionheader(self):\n header = SectionHeader()\n header.crc = self.reader.readint(2)\n header.id = self.reader.readint(2)\n header.len = self.reader.readint(4)\n header.versnr = self.reader.readint(1)\n header.protnr = self.reader.readint(1)\n header.reserved = self.reader.reads(6)\n if header.reserved:\n header.reserved = header.reserved.replace('\\x00', '')\n\n return header", "def print_section(self, s):\n section = s.upper()\n\n self.print_newline()\n self.print_newline()\n self._write('%s\\n' % section)\n self._write('%s\\n' % ('-' * len(section)))\n self.print_newline()", "def generate_header(self, header=None):\n if header is None:\n header = self.header\n\n lines = [self.PREFIX_HEAD + '!b']\n for k, v in header.items():\n if k in ('labels', 'categories'):\n v = ', '.join(v)\n elif k == 'draft':\n v = repr(v)\n lines.append(self.HEADER_FMT % (k, v))\n lines.append(self.PREFIX_END)\n return '\\n'.join([_f for _f in lines if _f]) + '\\n'", "def write_header(self):\r\n if self.arguments['--out']:\r\n self.file = open(self.arguments['--out'], \"w+\")\r\n self.file.write(self.version)\r\n for list_item in self.list_of_header_objects:\r\n self.file.write(list_item.line)\r\n self.file.write(self.body_header_line.line)\r\n self.file.close()\r\n else:\r\n for list_item in self.list_of_header_objects:\r\n print(list_item.line)\r\n print(self.body_header_line.line)", "def print_header():\n print(\"STEM Center Temperature Project\")\n print(\"Shaotong Wen\")", "def WriteHeader(self):\n return", "def write_header(self, total_blocks):\n self.write_string('ASEF')\n self.write('2H', (1, 0))\n self.write('i', total_blocks)", "def header(self):\n ...", "def header(self, **args):\n return self.pageConfig['header'] % self.pageConfig", "def format_header(self, text: str, anchor: Optional[str] = None) -> str:", "def print_header(module, fd):\n module_name = str(module.arg)\n header = OrderedDict()\n header['swagger'] = '2.0'\n header['info'] = {\n 'description': '%s API generated from %s' % (\n module_name, module.pos.ref.rsplit('/')[-1]),\n 'version': '1.0.0',\n 'title': str(module_name + ' API')\n }\n header['host'] = 'localhost:8080'\n # TODO: introduce flexible base path. (CLI options?)\n header['basePath'] = '/restconf'\n header['schemes'] = ['http']\n return header", "def Show_Headers( self ):\r\n self.system.Change_Seq( \"Header\" )", "def print_section(section_name, width=120):\n section_name = ' ' + section_name + ' '\n print('{:=^{ }}'.format(section_name, width))", "def WriteHeader(self, output_mediator):\n self.WriteText('{')\n self._event_counter = 0", "def pp_file_header(self):\n self.separator()\n for item in self.file_header:\n print(item.ljust(27, ' ') + \": {}\".format(self.file_header[item]))\n \n self.separator()", "def format_header(self, header):\n raise NotImplementedError()" ]
[ "0.7780516", "0.77521163", "0.7658305", "0.7110185", "0.71099603", "0.70462835", "0.70025337", "0.69644576", "0.6949137", "0.6932552", "0.6805164", "0.6789609", "0.677836", "0.67727095", "0.67724276", "0.67541957", "0.67465717", "0.67429996", "0.6727082", "0.6685526", "0.66713095", "0.66709054", "0.6640218", "0.6597265", "0.65964735", "0.6585167", "0.6584502", "0.65507865", "0.6531619", "0.6514606" ]
0.83694506
0
Return the first 10 digits of the sum
def first_10_digits_of_sum(): sum_ = sum([int(i) for i in read_data()]) return str(sum_)[:10]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sumDigit():", "def sum_of_digits(n):\n rest_of_num, last_num = split(n)\n if rest_of_num < 10:\n \treturn last_num + rest_of_num\n return last_num + sum_of_digits(rest_of_num)", "def digit_sum(x):\n s = 0\n while x>0:\n s = s+(x%10)\n x = x//10\n\n return s", "def sum_digits(n):\n if (n < 10):\n return n\n else:\n all_but_last, last = split(n)\n return sum_digits(all_but_last) + last", "def digit_sum(n):\n s = 0\n while n:\n s += n % 10\n n //= 10\n return s", "def sum_digits(n):\n num = n\n incTen = 1\n summy = 0\n if num > 10:\n while incTen * 10 < num:\n incTen = incTen * 10\n while incTen >= 10:\n summy += num // incTen\n num = num % incTen\n incTen = incTen // 10\n summy += num\n return summy\n elif num == 10:\n return 1\n else:\n return num", "def sum_digits(n):\n \"*** YOUR CODE HERE ***\"\n count=0\n length=len(str(n))\n last=0\n sum=0\n while count<length:\n last=n%10\n n//=10\n sum+=last\n count+=1\n return sum", "def digit_sum(n):\n\treturn sum(int(c) for c in str(n))", "def digital_sum(n):\n r = 0\n while n:\n r, n = r + n % 10, n // 10\n return r", "def digitSum ( n ) :\n return sum ( map ( int , str ( n ) ) )", "def first_n_digits(num, n):\n return num // 10 ** (int(math.log(num, 10)) - n + 1)", "def fn(n):\n digits = [int(x) for x in str(n)]\n for i in reversed(range(len(digits)//2+1)): \n if digits[i] < 9: break \n else: return 10*n + 11\n digits[i] = digits[~i] = digits[i] + 1\n for ii in range(i): \n digits[~ii] = digits[ii]\n for ii in range(i+1, len(digits)//2+1): \n digits[ii] = digits[~ii] = 0\n return int(\"\".join(map(str, digits)))", "def sum_digits(n):\n sum = 0\n while n > 0:\n num = n % 10\n sum += num\n n //= 10\n return sum", "def sum_of_digits(n):\n return sum(int(c) for c in str(n))", "def add_digits(n):\n return sum([int(d) for d in str(n)])", "def digit_sum(n):\n sum_of_digits = 0\n for c in str(n):\n sum_of_digits += int(c)\n return sum_of_digits", "def get_first_digit(x):\n x = int(x)\n if x < 0:\n return 0\n x = str(x)\n if len(x) == 1: # less than 10 ?\n return 0\n else:\n return int(x[0])", "def sum_digits(n):\n digits = [int(i) for i in str(n)]\n return sum(digits)", "def plus_one(digits):\n result = 0\n power = 0\n for index in range(len(digits) - 1, -1, -1):\n result += (digits[index] * (10 ** power))\n power += 1\n\n result += 1\n return list(map(lambda x: int(x), str(result)))", "def min_digit(x):\n \"\"\" GRAPSTE TON KWDIKA SAS APO KATW \"\"\"\n s = 10\n while(x>0):\n if(s>(x%10)):\n s = x%10\n x = x//10\n return s", "def sum_numbers_one_to_ten():\n sum=0\n for num in range(1,11):\n sum=sum+num\n return sum\n pass", "def sum_of_digits_in_number(n: int) -> int:\n return sum(int(digit) for digit in str(n))", "def keep_summing(x):\n \"\"\" GRAPSTE TON KWDIKA SAS APO KATW \"\"\"\n s=0\n k=0\n while(x>0):\n s = s+(x%10)\n x = x//10\n while(s>9):\n while(s>0):\n k = k + (s%10)\n s = s//10\n s = k\n return s", "def firstTen(self, num):\n\t\tif num <= 10:\n\t\t\treturn 1 \n\t\treturn 0", "def get_sum_of_digits(number):\n return sum(int(digit) for digit in str(number))", "def calculate_digits_sum(number: int) -> int:\n\n # Return value\n ret = 0\n\n while number != 0:\n # Extract the last digit number and add it to ret\n ret += number % 10\n\n # Delete the last digit of the number\n number //= 10\n\n return ret", "def val( digits ):\n v= 0\n p= 1\n for d in digits:\n v += d*p\n p *= 10\n return v", "def round_to_ten(number):\n count = len(str(int(number)))\n if number == 1:\n count = 0\n elif number % 10 == 0:\n count = count - 1\n return 10 ** count", "def problem_48():\n\n return int(str(sum(x**x for x in range(1, 1001)))[-10:])", "def sum_string_digits(my_str):\n\n return 0 # this is a placeholder. remove it." ]
[ "0.7978776", "0.7390683", "0.7385932", "0.72872734", "0.7234461", "0.71717376", "0.7130944", "0.6975113", "0.691324", "0.6908647", "0.6887424", "0.68067276", "0.667946", "0.6674701", "0.66520053", "0.6575619", "0.65425557", "0.64808357", "0.64773303", "0.64495385", "0.6437903", "0.64058644", "0.6401748", "0.63677657", "0.63262403", "0.63102776", "0.62910616", "0.6235431", "0.6198735", "0.6194377" ]
0.88592225
0
Print ninja code block
def ninja_block(): return ''' ```sh # In build directory ninja ```'''
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def repl_print_statements():\n pass", "def ninja_simulate_block():\n return '''\n```sh\n# In build directory\nninja && ./simulate\n```'''", "def example():\n print \"\"\"\n \"\"\"", "def example():\n print \"\"\"\n \"\"\"", "def print_out():\n pass", "def debug():\n def _debug(x):\n return e.String(x.as_source())\n yield (\"(λ any . str)\", _debug)", "def code():", "def sequential_print_statements():\n pass", "def print_post():\n print('| | |'),", "def display_message():\n\tprint(\"Learnt to write functions, which are named blocks of code that are designed to do one specific job.\")", "def print_blocks(bril):\n import briltxt\n\n func = bril['functions'][0] # We only process one function.\n for block in form_blocks(func['instrs']):\n # Mark the block.\n leader = block[0]\n if 'label' in leader:\n print('block \"{}\":'.format(leader['label']))\n block = block[1:] # Hide the label, for concision.\n else:\n print('anonymous block:')\n\n # Print the instructions.\n for instr in block:\n print(' {}'.format(briltxt.instr_to_string(instr)))", "async def print_code(self):\n print(\"Current code:\", self.bot.get_code())\n await self.bot.reply('k')", "def debug(node):\n print \"%r\" % node", "def print_in_block(message):\n print(\"|\", message)", "def print_code(func):\n print(inspect.getsource(func))", "def show_code(code):\n\n print('The code was: '+str(code))", "def printGeneration(tree):\n for mod in tree:\n if mod.param != []:\n print(str(mod.symbol) + str(mod.param).replace(\"[\",\"(\").replace(\"]\",\")\"),end=\"\")\n else:\n print(str(mod.symbol),end=\"\")\n print(\"\")", "def summarize(self):\n info(\"Running \" + self.title + \" generator\")", "def print_block():\n do_twice(do_block)\n print_column()", "def print_in_block(message):\n print(\"=\"*4, message)", "def showInstructions():\n print(\"\"\"\n RPG Game\n ========\n Commands:\n go [direction]\n get [item]\n\n\t\"\"\")", "def simulate_block():\n return '''\n```sh\n# In build directory\n./simulate\n```'''", "def verbose(self, block: Block):\n print('\\n\\n==============================')\n print('Hash:\\t\\t', block.hash.hexdigest())\n print('Previous Hash:\\t', block.previous_hash.hexdigest())\n print('Nounce:\\t\\t', block.nonce)\n print('Data:\\t\\t', block.data)\n print('\\n\\n==============================')", "def print_block():\n do_twice(do_block)\n do_twice(do_block)\n print_column()", "def main():\n print \"Printing Sample Status\"", "def eval_python_blocks(req, body):\n localsdict = {\"request\": req}\n globalsdict = {}\n\n old_stdout = sys.stdout\n old_stderr = sys.stderr\n\n try:\n start = 0\n while body.find(\"<%\", start) != -1:\n start = body.find(\"<%\")\n end = body.find(\"%>\", start) \n\n if start != -1 and end != -1:\n codeblock = body[start+2:end].lstrip()\n\n sys.stdout = StringIO.StringIO()\n sys.stderr = StringIO.StringIO()\n\n try:\n exec codeblock in localsdict, globalsdict\n\n except Exception, e:\n print \"ERROR in processing: %s\" % e\n\n output = sys.stdout.getvalue() + sys.stderr.getvalue()\n body = body[:start] + output + body[end+2:]\n\n finally:\n sys.stdout = old_stdout\n sys.stderr = old_stderr\n\n return body", "def useful():\n\n print('I do something.')", "def print_debug(context: str = \"\") -> None:\r\n print(context)\r\n print(\"This is the current board\")\r\n print(example)\r\n print(\"This is the conflict space\")\r\n print(conflict_space)\r\n print(\"This is the safeboard\")\r\n print(safeboard)", "def _print(self, text):\n\t\tif self.verbose:\n\t\t\tprint text", "def test_03_pass_print(self):\n print('Hello World!')" ]
[ "0.6483528", "0.63958025", "0.63629156", "0.63629156", "0.62017804", "0.6197296", "0.60597914", "0.6041047", "0.5984415", "0.5972134", "0.5945607", "0.58953977", "0.58189815", "0.5808244", "0.58080333", "0.5800939", "0.5800462", "0.57823473", "0.57628834", "0.57350373", "0.5716457", "0.5700911", "0.56960815", "0.5695031", "0.5693696", "0.5678247", "0.564635", "0.5629804", "0.5627878", "0.5626034" ]
0.68514085
0
Print simulate and ninja code block
def ninja_simulate_block(): return ''' ```sh # In build directory ninja && ./simulate ```'''
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def simulate_block():\n return '''\n```sh\n# In build directory\n./simulate\n```'''", "def sequential_print_statements():\n pass", "def repl_print_statements():\n pass", "def print_out():\n pass", "def run(self):\r\n self.inst.write(':RUN')", "def main():\n print \"Printing Sample Status\"", "def test_03_pass_print(self):\n print('Hello World!')", "def code():", "def display_message():\n\tprint(\"Learnt to write functions, which are named blocks of code that are designed to do one specific job.\")", "def useful():\n\n print('I do something.')", "def part_1():\n print(\"You finally get out of the forest\")\n time.sleep(1)\n print(\"You see a giant frost spider in the distance\")\n print(r\"\"\"\n (\n )\n (\n /\\ .-\" \"-. /\\\n //\\\\/ ,,, \\//\\\\\n |/\\| ,;;;;;, |/\\|\n //\\\\\\;-\" \"-;///\\\\\n // \\/ . \\/ \\\\\n (| ,-_| \\ | / |_-, |)\n //`__\\.-.-./__`\\\\\n // /.-(() ())-.\\ \\\\\n (\\ |) '---' (| /)\n ` (| |) `\n \\) (/)\"\"\")", "async def print_code(self):\n print(\"Current code:\", self.bot.get_code())\n await self.bot.reply('k')", "def print_block():\n do_twice(do_block)\n do_twice(do_block)\n print_column()", "def print_block():\n do_twice(do_block)\n print_column()", "def do_block():\n print_column()\n print_rows()", "def do_block():\n print_column()\n print_rows()", "def test(): # TO BE DELETED WHEN PROGRAM COMPLETED\n print('methode test')", "def example():\n print \"\"\"\n \"\"\"", "def example():\n print \"\"\"\n \"\"\"", "def printMe():\n\n print(\"meeeeeee\")", "def summarize(self):\n info(\"Running \" + self.title + \" generator\")", "def print_in_block(message):\n print(\"=\"*4, message)", "def printMe():\n\n print(\"Meeeeeee!\")", "def showInstructions():\n print(\"\"\"\n RPG Game\n ========\n Commands:\n go [direction]\n get [item]\n\n\t\"\"\")", "def printMe():\n print(\"meeeeeee\")", "def test_print_end(self):\n response = support.create_project(self, 'madison')\n self.assertFalse(\n response.failed,\n Message('should have created project', response=response)\n )\n\n print_string = string.ascii_lowercase\n\n code = '\\n'.join([\n 'import cauldron as cd',\n 'cd.display.text(\"Hello World\")',\n 'print(\"{}\")'.format(print_string)\n ])\n\n support.add_step(self, contents=code)\n\n response = support.run_command('run -f')\n self.assertFalse(\n response.failed,\n Message('should have run step', response=response)\n )\n\n project = cauldron.project.get_internal_project()\n dom = project.steps[1].dom # type: str\n\n self.assertEqual(\n dom.count(print_string),\n 2,\n 'should have printed ascii lowercase'\n )", "def main():\n load()\n\n print(generate())", "def test1():\n print('This is a test1')", "def postloop() -> None:\n print(\"\\n\")", "def test_JupyterNotebooks1(self):\n\n self.delayDisplay(\"Starting the test\")\n\n # TODO: implement test\n\n self.delayDisplay('Test passed!')" ]
[ "0.6855271", "0.6472742", "0.64571154", "0.6344158", "0.62682426", "0.6237703", "0.620284", "0.6160858", "0.6158303", "0.6095411", "0.6083449", "0.6054962", "0.60332555", "0.60188013", "0.5985015", "0.5985015", "0.59843993", "0.5969625", "0.5969625", "0.5918395", "0.59150785", "0.5854311", "0.5851309", "0.58417755", "0.5833123", "0.58209854", "0.58202004", "0.5803819", "0.58038014", "0.5769021" ]
0.69238406
0
Extract attributes from a raw data set which as class in the in the first column
def extractAttrs(data): return [instance[1:] for instance in data]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_fields(data):\n return data['train'][data['train'].keys()[0]].attrs.keys()", "def extract(self, data):", "def preprocess_soy(soy_path):\n\n print('[ INFO ]: Preprocessing soy data...')\n\n # Rename headers of data frame\n soy_data = pd.read_csv(soy_path, header=None)\n soy_data.columns = ['attr_{}'.format(i) for i in range(0,len(soy_data.columns))]\n\n # Place classes into list\n classes = soy_data[soy_data.columns[-1]].unique().tolist()\n\n return soy_data, classes", "def separate_feature_class(data):\n data_c = data.copy()\n y = data_c.reindex(columns=['class'])\n X = data_c.drop(columns='class')\n return X,y", "def getDataAttributes(self):\n asRet = [];\n asAttrs = dir(self);\n for sAttr in asAttrs:\n if sAttr[0] == '_' or sAttr[0] == 'k':\n continue;\n if sAttr in self.kasInternalAttributes:\n continue;\n oValue = getattr(self, sAttr);\n if callable(oValue):\n continue;\n asRet.append(sAttr);\n return asRet;", "def extract_attribute(self, cls, attr_name):\r\n\r\n def extract(value, name):\r\n\r\n try:\r\n return getattr(value, name)\r\n except (AttributeError, IndexError):\r\n return None\r\n\r\n attributes = [\r\n extract(value, attr_name)\r\n for galaxy in self.galaxies\r\n for value in galaxy.__dict__.values()\r\n if isinstance(value, cls)\r\n ]\r\n\r\n if attributes == []:\r\n return None\r\n elif isinstance(attributes[0], float):\r\n return values.ValuesIrregular(values=attributes)\r\n elif isinstance(attributes[0], tuple):\r\n return grid_2d_irregular.Grid2DIrregular(grid=attributes)", "def get_data_class_attr_list(self, o):\n alist = None # Attributes to store\n ff = None # Load filter function\n for cl in self.data_classes:\n if isinstance(o, cl):\n alist = self.data_classes[cl][0]\n ff = self.data_classes[cl][1]\n break\n return (alist, ff)", "def meta(cls):\n if getattr(cls, '__from_class__', None) is not None:\n cls = cls.__from_class__\n attribute_info = {}\n for name, value in cls.__table__.columns.items():\n attribute_info[name] = str(value.type).lower()\n\n return {cls.__name__: attribute_info}", "def attributes(self):", "def parse_attributes(cls):\n cls._fields = []\n cls._tables = []\n for attr_k in dir(cls):\n try:\n attr = object.__getattribute__(cls, attr_k)\n except AttributeError:\n continue\n if issubclass(attr.__class__, ReferenceManyField):\n cls._tables.append(attr_k)\n elif issubclass(attr.__class__, Field):\n cls._fields.append(attr_k)", "def thrift_attrs(obj_or_cls) -> List[str]:\n return [v[1] for v in obj_or_cls.thrift_spec.values()]", "def get_dataclass_attributes(cls) -> Dict[str, Tuple[Any, str]]:\n fields = cls.__dataclass_fields__.values()\n attrs = {}\n for field in fields:\n if field.type != InitVar:\n attrs[field.name] = field.type, \"\"\n return attrs", "def _select_attributes(self, item_data):\r\n return item_data", "def extract(self):\n pass", "def get_attributes(cls):\r\n return []", "def load_classes():\n \tfnm = \"../datasets/bbc/bbc.classes\"\n \tconverters = { \"docid\": toInt, \"docid\":toInt}\n \tX = pandas.read_table(fnm, header=None, sep=\" \", skiprows=4, comment=\"%\", names= [\"docid\", \"classid\"], converters=converters)\n \treturn X", "def _get_class_attributes(attributes, cls, properties, mapper_attrs, table_attributes):\n for attr in attributes:\n if attr.startswith('_'):\n continue\n if attr.endswith('_id'):\n continue\n if attr in ('metadata', 'query'):\n continue\n var = getattr(cls, attr)\n if callable(var):\n continue\n if isinstance(var, property):\n try:\n attr = '_' + attr\n var = getattr(cls, attr)\n properties.append(attr)\n except AttributeError as err:\n print('could not determin corresponding attribute for property {}'.format(var))\n continue\n if isinstance(var, InstrumentedAttribute):\n var = cast(InstrumentedAttribute, var)\n table_attributes[attr] = mapper_attrs[attr]", "def get_extra(self):\n\t\tselect = []\n\t\tfor cls in range(1, NCLASSES + 1):\n\t\t\tselect.append(where(self.labels == cls)[0][:self.limits[cls - 1]])\n\t\tfilter = concatenate(select)\n\t\treturn self.data[filter, :], self.labels[filter]", "def attributes(self):\n return { k: getattr(self, k) for k in self.__class__.columns().keys() }", "def _read_attributes(root):\n output_list = []\n for _, value in enumerate(root[0][2]):\n attr = Attribute(value)\n output_list.append(attr)\n return output_list", "def getfield(self, pkt, s):\n class_id = getattr(pkt, self._entity_class)\n attribute_mask = getattr(pkt, self._attributes_mask)\n entity_class = omci_entities.entity_id_to_class_map[class_id]\n indices = entity_class.attribute_indices_from_mask(attribute_mask)\n data = {}\n table_attribute_mask = 0\n for index in indices:\n try:\n fld = entity_class.attributes[index].field\n except IndexError as e:\n log.error(\"attribute-decode-failure\", attribute_index=index,\n entity_class=entity_class, e=e)\n continue\n try:\n s, value = fld.getfield(pkt, s)\n except Exception as _e:\n raise\n if isinstance(pkt, OmciGetResponse) and isinstance(fld, OmciTableField):\n data[fld.name + '_size'] = value\n table_attribute_mask = table_attribute_mask | (1 << (16 - index))\n else:\n data[fld.name] = value\n if table_attribute_mask:\n data['table_attribute_mask'] = table_attribute_mask\n return s, data", "def get_attributes(self) -> Dict[str, str]:\n pass", "def getAttributes(self):\n pass", "def getfield(self, pkt, s):\n class_id = getattr(pkt, self._entity_class)\n entity_class = omci_entities.entity_id_to_class_map.get(class_id)\n data = {}\n for attribute in entity_class.attributes:\n if AttributeAccess.SetByCreate not in attribute.access:\n continue\n if attribute.field.name == 'managed_entity_id':\n continue\n fld = attribute.field\n s, value = fld.getfield(pkt, s)\n data[fld.name] = value\n return s, data", "def reaction_class_from_data(class_typ, class_spin,\n class_radrad, class_isc):\n return (class_typ, class_spin, class_radrad, class_isc)", "def class2df(cl):\n\n attrs = vars(cl)\n cl = {item[0]: item[1] for item in attrs.items()}\n\n return pd.Series(cl)", "def get_class_attr_list(self, o):\n alist = None # Attributes to store\n ff = None # Load filter function\n for cl in self.classes:\n if isinstance(o, cl):\n alist = self.classes[cl][0]\n ff = self.classes[cl][1]\n break\n if isinstance(o, Block._ComponentDataClass):\n # If you're here you are trying to serialize an element of an\n # indexed block at the top level. We do want to allow that, so\n # we'll pretend it's a block.\n alist = self.classes[Block][0]\n ff = self.classes[Block][1]\n return (alist, ff)", "def extract_data(self):\r\n self.parse()\r\n lst = []\r\n for i in self.table.text.split(\"\\n\")[3:]:\r\n if i != \"\" and bool(re.search(r'\\d', i)):\r\n lst.append(i.replace(u'\\xa0', ''))\r\n single = lst.pop(-3)\r\n lst = [i + \" \" + j for i, j in zip(lst[::2], lst[1::2])]\r\n lst.append(single)\r\n return lst[0:28]", "def get_class_attributes(cls) -> Dict[str, Tuple[Any, str]]:\n try:\n source = inspect.getsource(cls.__init__) or \"\"\n if not source:\n return {}\n except TypeError:\n return {}\n source = utils.join(source.split(\"\\n\"))\n node = ast.parse(source)\n\n attr_list: List[Tuple] = []\n module = importlib.import_module(cls.__module__)\n globals = dict(inspect.getmembers(module))\n for x in ast.walk(node):\n if isinstance(x, _ast.AnnAssign):\n attr, lineno, type_str = parse_annotation_assign(x)\n type = eval(type_str, globals)\n attr_list.append((attr, lineno, type))\n if isinstance(x, _ast.Attribute) and isinstance(x.ctx, _ast.Store):\n attr_list.append(parse_attribute_with_lineno(x))\n attr_list = sorted(attr_list, key=lambda x: x[1])\n\n attrs: Dict[str, Tuple[Any, str]] = {}\n lines = source.split(\"\\n\")\n for name, lineno, *type in attr_list:\n if name.startswith(\"self.\"):\n name = name[5:]\n desc = get_description(lines, lineno)\n if type:\n attrs[name] = type[0], desc # Assignment with type annotation wins.\n elif name not in attrs:\n attrs[name] = None, desc\n return attrs", "def _generate_rowklass(self):\n header = six.next(self.resolved)\n clean = []\n for h in header:\n underscoreless = h.strip().lower().replace(' ', '_').replace('.', '_')\n specialless = underscoreless.replace('(', '').replace(')', '').replace('?', '').replace('-', '')\n if specialless == '':\n clean.append(specialless)\n continue\n try:\n num = int(specialless[0])\n numbers = {1: 'one', 2: 'two', 3: 'three', 4: 'four', 5: 'five',\n 6: 'six', 7: 'seven', 8: 'eight', 9: 'nine', 10: 'ten'}\n numless = numbers[num] + specialless[1:]\n cleaned = numless\n except ValueError:\n cleaned = specialless\n\n more = 1\n while cleaned in clean:\n more += 1\n cleaned += str(more)\n\n clean.append(cleaned)\n\n for i, v in enumerate(clean):\n if v == '':\n clean[i] = 'field_' + str(i)\n self.rowklass = collections.namedtuple('RowKlass', clean)" ]
[ "0.623917", "0.62341034", "0.5990778", "0.59630466", "0.5815131", "0.576771", "0.5712347", "0.5694353", "0.5662337", "0.56433815", "0.5634265", "0.55878204", "0.55013424", "0.5436354", "0.541804", "0.5418011", "0.5416972", "0.541447", "0.541183", "0.5401354", "0.5395833", "0.5392", "0.53875273", "0.538666", "0.53809005", "0.5365485", "0.5352007", "0.5351021", "0.5347687", "0.53208905" ]
0.71550965
0
Return instances in duplicates from data. If remove_all then remove all instances else remove all but first instance
def removeDuplicates(data, duplicates, remove_all): matches = [{'instance':instance, 'number':0} for instance in duplicates] out = [] for instance in data: is_duplicate = False for m in matches: if m['instance'] == instance: if m['number'] > 0 or remove_all: is_duplicate = True m['number'] = m['number'] + 1 if not is_duplicate: out.append(instance) return out
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def without_duplicates(self) -> \"SampleDataSet\":\n return SampleDataSet(self._data.drop_duplicates())", "def _remove_duplicates(self):\n self.search_query = remove_duplicates(self.search_query)", "def clean_duplicate(self):\r\n self.elements = list(set(self.elements))\r\n self.elements = [e for e in self.elements if e != '']", "def dedupe(self):\n elems = []\n for x in self.elems:\n if x not in elems:\n elems.append(x)\n return _coconut_tail_call(self.__class__, *elems)", "def removeDuplicatesInList(self, data):\n newDataList = []\n for i in data:\n if newDataList.count(i) == 0:\n newDataList.append(i)\n data.clear()\n data += newDataList", "def remove_duplicates(self) -> bool:\n return self._remove_duplicates", "def rm_duplicates(self):\n # get uniq representation of existing detection documents\n existing = set(ed.uniq_data for ed in self.existing_detections)\n # remove duplicates\n for idx in xrange(len(self.new_detections)-1, -1, -1):\n nd = self.new_detections[idx]\n if nd.uniq_data in existing:\n self.new_detections.pop(idx)", "def removeDuplicate(self,permutations=True):\n ind,ok = self.testDuplicate(permutations)\n return self[ind[ok]]", "def remove_duplicates(data):\n already_used_items = {}\n return_data = []\n\n for item in data:\n # Yes, I know that I can find used items in the return_data,\n # but HW requires this logic.\n if not already_used_items.get(item):\n return_data.append(item)\n already_used_items[item] = True\n\n return return_data", "def _purge_duplicates(f):\n @functools.wraps(f)\n def wrapper(*args, **kwds):\n ret_val = f(*args, **kwds)\n new_list = []\n for item in ret_val:\n if item in new_list:\n continue\n new_list.append(item)\n return new_list\n return wrapper", "def remove_duplicates_orderly(cls, list_with_duplicates: list, preserve_first_encounter: bool = True,\n\t\t\t\t\t\t\t\t preserve_original_list: bool = False) -> list:\n\t\tlist_set = set(list_with_duplicates)\n\t\tlist_new = list_with_duplicates.copy() if preserve_original_list else list_with_duplicates\n\t\tif len(list_new) == len(list_set): # No extra\n\t\t\treturn list_new\n\t\tif preserve_first_encounter:\n\t\t\tlist_new.reverse()\n\t\tfor index in range(len(list_new) - 1, -1, -1):\n\t\t\titem = list_new[index]\n\t\t\tif item in list_set:\n\t\t\t\tlist_set.remove(item)\n\t\t\telse:\n\t\t\t\tlist_new.pop(index)\n\t\tif preserve_first_encounter:\n\t\t\tlist_new.reverse()\n\t\treturn list_new", "def removeDuplicates(list):\n\treturn Set((item for item in list))", "def remove_duplicates(somelist):\n return set(somelist)", "def remove_identical(list):\n seen = set()\n seen_add = seen.add\n return [x for x in list if not (x in seen or seen_add(x))]", "def remove_duplicates(self):\n cur = self.head\n prev = None\n\n dup_values = dict()\n\n while cur:\n if cur.data in dup_values:\n # Remove node:\n prev.next = cur.next\n else:\n # Have not encountered element before.\n dup_values[cur.data] = 1\n prev = cur\n cur = prev.next", "def remove_duplicates(self,list_):\r\n ret =[]\r\n\r\n for item in list_:\r\n if item not in ret:\r\n ret.append(item)\r\n removed = len(list_)-len(ret)\r\n logger.info('%d duplicate%s removed.' %(removed,plural_or_not(removed)))\r\n return ret", "def __delete_duplicates(self):\n log = logging.getLogger()\n log.debug(\"\\n---> Duplicate check <---\")\n\n chromosomes = list(set(self.chromosomes))\n diff = self.size - len(chromosomes)\n\n if diff > 0:\n log.debug(\"---> Duplicate(s) found! <---\")\n for i in range(diff):\n chromosomes.append(\n Chromosome(self.__generate_random_gene_sequence(),\n self.environment))\n else:\n log.debug(\"---> No duplicates found! <---\")\n\n self.chromosomes = chromosomes", "def removeDuplicates(list):\n\treturn set((item for item in list))", "def deduplicate(items):\n seen = set()\n for item in items:\n if not item.id in seen:\n seen.add(item.id)\n yield item\n else:\n logging.debug(\"Removing duplicated sample %s\", item.id)", "def remove_sorted_duplicates(self):\n cur = self.head\n while cur is not None and cur.next is not None:\n if cur.next.data == cur.data:\n cur.next = cur.next.next\n else:\n cur = cur.next\n return self.head", "def _trim_duplicates(all_matches):\n trimmed_list = IndexedSet()\n for match in all_matches:\n if (\n match\n and match not in trimmed_list\n and match[::-1] not in trimmed_list\n ):\n trimmed_list.add(match)\n return trimmed_list", "def remove_duplicates(self, objects: list):\n # Filter list removing duplicates\n result = [\n item\n for index, item in enumerate(objects)\n if item not in objects[index + 1 :]\n ]\n return result", "def drop_duplicates(self, unique_id='identity', keep='first'):\n if self.duplicates(unique_id):\n cases_before = self._data.shape[0]\n self._data.drop_duplicates(subset=unique_id, keep=keep, inplace=True)\n if self._verbose_infos:\n cases_after = self._data.shape[0]\n droped_cases = cases_before - cases_after\n msg = '%s duplicated case(s) dropped, %s cases remaining'\n print(msg % (droped_cases, cases_after))\n return None", "def remove_duplicates_badSolution( li ):\n newli=[]\n seen = set()\n for item in li:\n if item not in seen:\n seen.add( item )\n newli.append(item)\n\n return newli", "def remove_duplicates(self, hits):\n\t\tseen = set()\n\t\tkeep = []\n\n\t\tfor i in range(len(hits)):\n\t\t\tif hits[i][\"Text\"] not in seen:\n\t\t\t\tseen.add(hits[i][\"Text\"])\n\t\t\t\tkeep.append(hits[i])\n\n\t\treturn keep", "def _remove_dupes(recs, input, bad_movies, hist_list=[], feedback_list=[]):\n all_rated = input + bad_movies + hist_list + feedback_list\n nonlocal dupes\n dupes = [x for x in recs if x[0] in input]\n return [x for x in recs if x[0] not in all_rated]", "def distinct(self):\n memory = set()\n\n def _distinct(iterator):\n while True:\n item = next(iterator)\n if item in memory:\n continue\n memory.add(item)\n return item\n return self.__class__(self, _distinct)", "def removeDuplicates(seq):\r\n seen = set()\r\n seen_add = seen.add\r\n return [x for x in seq if not (x in seen or seen_add(x))]", "def drop_duplicate_rows(self):\n if self._pandas_flag:\n self.data_frame = self.data_frame.drop_duplicates()\n else:\n self.data_frame = self.data_frame.dropDuplicates()", "def remove_duplicates(mylist):\n return list(set(mylist))" ]
[ "0.69091046", "0.6639899", "0.6555161", "0.64862686", "0.6403717", "0.640026", "0.63934326", "0.62803054", "0.62781584", "0.6226226", "0.6102793", "0.60704607", "0.6057449", "0.60571265", "0.60397804", "0.6036129", "0.6002325", "0.5980732", "0.59714365", "0.5960172", "0.5935085", "0.59166193", "0.5877667", "0.5867117", "0.58578855", "0.58521456", "0.5846234", "0.5845836", "0.58449036", "0.58261615" ]
0.76257706
0
Append a description to file path made up of dir and file_name
def appendDescription(dir, file_name, description): path = os.path.join(dir, file_name) base, ext = os.path.splitext(path) return base + '.' + description + ext
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def buildPath(dir, file_name, ext, description = None):\n\tpath = os.path.join(dir, file_name)\n\tbase, _ = os.path.splitext(path)\n\tif not description == None:\n\t\tbase = base + '.' + description\n\treturn base + ext", "def format_filename(title: str, id: Any, ext: str = \".\", dirFormat=None):\r\n ...", "def set_new_filedetails(self, name, path):\n File.filename(name)\n File.filepath(path)", "def set_file_path_name(self):\n self.file_path_name = self.get_file_path() + self.get_file_name()", "def add_path(self, widget, title, file_text):\r\n\r\n # Decide default directory\r\n default_dir = get_default_dir()\r\n\r\n # File picker\r\n options = QFileDialog.Options()\r\n options |= QFileDialog.DontUseNativeDialog\r\n filename = QFileDialog.getOpenFileName(self, title, default_dir, file_text, options=options)\r\n\r\n if filename[0]:\r\n widget.setText(filename[0].replace(\"/\",\"\\\\\"))", "def show_path(\n r_f_location,\n description,\n in_out,\n usage,\n isDir=False):\n location = \"root['FILES']\" + r_f_location\n path=str(eval(location))\n basename = os.path.basename(path)\n OMFITx.Label(\n text=in_out.upper() + \": \" + basename + \" - \" + description + \", at \" +r_f_location,\n# text=in_out.upper() + \": \" + basename + \" - \" + r_f_location,\n# text=in_out.upper() + \": \" + basename + \" - \" + description,\n align='left'\n ,\n bg=in_out_colors[in_out]\n )", "def opath ( dir_name, file_name = None ):\n if file_name:\n return os.path.join(output_path, dir_name, file_name)\n return os.path.join(output_path, dir_name)", "def build_file_path(dir_name, file_name, ext):\n return os.path.join(dir_name, os.path.extsep.join((file_name, ext)))", "def add_file(self, filename):\n # If absolute path\n if filename[0] == '/':\n self.filenames.append(filename)\n else:\n filename = self.current_path + filename\n self.filenames.append(filename)", "def path(self, args):\n dir_path = self.dir_path_(*args)\n return os.path.join(dir_path, self.file_name)", "def path(self, f):\n\t\treturn os.path.join(self.directory, f)", "def _formatPath(directoryPath, filePath):\n return directoryPath + \"\\\\\" + filePath", "def _add_path(dir_name, payload_info_list):\n path_count_dict = {}\n for payload_info_dict in payload_info_list:\n file_name = payload_info_dict[\"filename\"] or payload_info_dict[\"pid\"]\n path = d1_common.utils.filesystem.gen_safe_path(dir_name, \"data\", file_name)\n path_count_dict.setdefault(path, 0)\n path_count_dict[path] += 1\n if path_count_dict[path] > 1:\n path_base, path_ext = os.path.splitext(path)\n path = \"{}({}){}\".format(path_base, path_count_dict[path], path_ext)\n payload_info_dict[\"path\"] = path", "def create_file_name(self):\n # create a unique id for the file name\n index = self.helpers.alpha_uuid()\n\n filename = self.form['FieldStorage'][self.image_cid].filename\n extension = guess_extension(guess_type(filename)[0])\n return ( # concatenates the following data\n self.articleData.get('directory') + # directory\n '/' + # slash\n self.articleData.get('article_name') + # the article name\n '-' + # hyphen character\n index + # the id of the image\n extension\n )", "def append_to_path(path, name):\n if path[-1] == '/' or path[-1] == ':':\n return path + name\n else:\n return str(path) + str('/') + str(name)", "def print_path(file_name, file_path):\r\n# Get the current directory path and add to it the file name for full path.\r\n\tfile_path = (os.path.join(os.getcwd(), file_path)) \r\n\tprint(file_name, file_path)", "def get_aug_path(file_path: str) -> str:\n return \"/files%s\" % file_path", "def add_file(self, path):\n pass", "def append_to_filename(filepath: str, name_suffix: str, new_ext: Optional[str] = None) -> str:\n ext = new_ext or filepath_ext(filepath)\n name = filepath_name_only(filepath)\n return str(pathlib.Path(filepath).with_name(name+name_suffix).with_suffix(ext))", "def out_filename(self, filetype, dir, format='old'):\n filename = self.filename(filetype=filetype, format=format)\n return Path(dir) / filename", "def file_name(id, title, kind=\"src\"):\n fn_template = conf.template_source_file_name\n if kind == \"tst\":\n fn_template = conf.template_test_file_name\n\n return fn_template.format(id=id, title=title.replace(\"-\", \"_\"))", "def Directory(self) -> str:", "def add_path(self):\n name = self._get_path(\"Music File or Directory: \")\n if name != None:\n self._clear_window()\n self.player.add(name)\n self.refresh_window()", "def __str__(self): #XXX Cambiar a __str__(self)\n return _('PPC-Project file') + \" \" + \"\".join(['(', ', '.join(self.filenamePatterns()), ')'])", "def change_dir(filename):", "def create_filename (self):\n\t\tassert self.__patient_name and self.__location_name, \"New filename could not be determined, one or more needed arguments is empty!\"\n\t\t_patient_name = self.__patient_name.split(' ')\n\t\t_patient_name.reverse()\n\t\t\n\t\treturn os.path.join(os.path.dirname(self.file._path), \"%s MR %s%s\" % (self.__location_name, ', '.join(_patient_name).upper(), self._file.extension))", "def append_subfiles(self, di, lines, lang):\n if lang==\"en\":lang_index=1\n elif lang==\"it\":lang_index=2\n try:\n #if di[0] in self.name_stripped:\n for key in TREE.keys():\n if di[0]==key and di[0] in self.name_stripped:\n deco=[\"&nbsp;&nbsp;|-&nbsp;\" for i in range(len(TREE[key])-1)]\n deco.append(\"&nbsp;&nbsp;`-&nbsp;\")\n for d, fi in zip(deco, TREE[key]):\n bough=\"%s%s\" % (d,fi[lang_index])\n path=\"%s%s/%s/%s\" % (self.backstring, lang, di[0], fi[0])\n lines.append(\"%s %s\" % (path, bough))\n except IndexError:\n pass\n return lines", "def _add_filename_metadata(self, extra_metadata): \n \n # Make sure product_info section exists\n extra_metadata.setdefault('product_info', {})\n \n file_name = os.path.basename(self.fname)\n fn_comps = file_name.split(\"_\")\n \n if self.__class__ == SAFESentinel1:\n component = fn_comps[2]\n if len(component) < 4: \n resolution = 'N/A'\n else:\n resolution = component[-1]\n \n extra_metadata['product_info']['Resolution'] = resolution\n \n # Add file/scan name \n extra_metadata['product_info']['Name'] = os.path.splitext(file_name)[0]\n \n # Add Satellite and Mission from the file path\n comp_1 = fn_comps[0].upper()\n extra_metadata['platform']['Mission'] = \"Sentinel-%s\" % comp_1[1]\n extra_metadata['platform']['Satellite'] = \"Sentinel-%s\" % comp_1[1:]", "def _create_readme(self, name, summary, description):\n return \"\"\"\n %(header_bar)s\n %(header)s\n %(header_bar)s\n\n %(content)s\n \"\"\" % {\n 'header': name,\n 'header_bar': '=' * len(name),\n 'content': '\\n\\n'.join(\n content\n for content in (summary, description)\n if content\n ) or 'Describe your extension.',\n }", "def collected_filename(cfg, collect_dir, i=None):\n if i is not None:\n file = cfg[\"files\"][i]\n else:\n file = cfg[\"file\"]\n ext = path.splitext(file)[1]\n name = cfg[\"id\"]\n if i is not None:\n name += \"_\" + str(i)\n return path.join(collect_dir, name + ext)" ]
[ "0.69394153", "0.611801", "0.60918874", "0.6038287", "0.5996476", "0.59092975", "0.5773148", "0.5747158", "0.5734673", "0.5732687", "0.5712149", "0.57088184", "0.5703568", "0.56297904", "0.56046903", "0.56017125", "0.55617553", "0.55592865", "0.55451745", "0.5540674", "0.5525307", "0.55093503", "0.5505034", "0.55025935", "0.547304", "0.54561925", "0.54439574", "0.54213876", "0.54119974", "0.54113054" ]
0.8635285
0
Build a path from a dir file_name and ext and optionally description
def buildPath(dir, file_name, ext, description = None): path = os.path.join(dir, file_name) base, _ = os.path.splitext(path) if not description == None: base = base + '.' + description return base + ext
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_file_path(dir_name, file_name, ext):\n return os.path.join(dir_name, os.path.extsep.join((file_name, ext)))", "def appendDescription(dir, file_name, description):\n\tpath = os.path.join(dir, file_name)\n\tbase, ext = os.path.splitext(path)\n\treturn base + '.' + description + ext", "def construct_filename(output_dir,\n file_descriptor,\n extension,\n *args,\n **kwargs):\n if len(args) == 0 and len(kwargs) == 0:\n return Path(output_dir,\n '{}{}'.format(file_descriptor, extension))\n elif len(args) == 0:\n return Path(output_dir,\n '{}_{}{}'.format('_'.join([f'{k}{v}' for k, v in kwargs.items()\n if v is not None]),\n file_descriptor,\n extension))\n elif len(kwargs) == 0:\n return Path(output_dir,\n '{}_{}{}'.format('_'.join([ar for ar in args if ar is not None]),\n file_descriptor,\n extension))\n else:\n return Path(output_dir,\n '{}_{}_{}{}'.format('_'.join([ar for ar in args if ar is not None]),\n '_'.join([f'{k}{v}' for k, v in kwargs.items()\n if v is not None]),\n file_descriptor,\n extension))", "def _make_fname(song, ext=None, av=None, subdir=None):\n # pylint: disable=E1103\n # Instance of 'bool' has no 'extension' member (some types not inferable)\n ddir = os.path.join(Config.DDIR.get, subdir) if subdir else Config.DDIR.get\n if not os.path.exists(ddir):\n os.makedirs(ddir)\n\n if ext:\n extension = ext\n\n else:\n stream = streams.select(streams.get(song),\n audio=av == \"audio\", m4a_ok=True)\n extension = stream['ext']\n\n # filename = song.title[:59] + \".\" + extension\n filename = song.title + \".\" + extension\n filename = os.path.join(ddir, mswinfn(filename.replace(\"/\", \"-\")))\n filename = filename.replace('\"', '')\n return filename", "def get_path(dir_name, file_format, args):\n fname = \"{exp}-excl-{excl}\".format(exp=args.exp,\n excl=args.feature_set)\n path = os.path.join(SAVE_ROOT, dir_name, args.rbp,\n fname + file_format)\n\n # make the directory if it doesn't exist_ok\n os.makedirs(os.path.dirname(path), exist_ok=True)\n\n return path", "def path(self, args):\n dir_path = self.dir_path_(*args)\n return os.path.join(dir_path, self.file_name)", "def create_file_path(fname, direc=\"data/result/\"):\n path = os.path.join(TOP_LEVEL, direc, fname)\n return path", "def opath ( dir_name, file_name = None ):\n if file_name:\n return os.path.join(output_path, dir_name, file_name)\n return os.path.join(output_path, dir_name)", "def format_filename(title: str, id: Any, ext: str = \".\", dirFormat=None):\r\n ...", "def build_dirname(f: mutagen.FileType) -> pathlib.Path:\n d = dict(f)\n rel_pth = [_lib_directory]\n # albumartist with fallback of track artist\n artist = d.get(\"albumartist\", d.get(\"artist\", None))\n # confirm\n artist_prompt = \"Enter album artist\" +\\\n (\"\" if artist is None else \" (default {0})\".format(*artist)) + \": \"\n artist_inpt = input(artist_prompt)\n rel_pth.append(artist_inpt if artist_inpt else artist[0])\n # album\n album = d.get(\"album\", None)\n album_prompt = \"Enter album\" +\\\n (\"\" if album is None else \" (default {0})\".format(*album)) + \": \"\n album_inpt = input(album_prompt)\n rel_pth.append(album_inpt if album_inpt else album[0])\n return pathlib.Path(os.path.join(*rel_pth))", "def get_full_filename(dirname, name, ext, tmstamp=False):\n fill = '_' + str_current_time() if tmstamp else ''\n fmt = '/{}{}{}' if ext.startswith('.') else '/{}{}.{}'\n return resolve(dirname) + fmt.format(name, fill, ext)", "def file_path(instance, filename):\n hashcode = hash(filename)\n mask = 255 # bitmask\n # use the first and second bytes of the hash code represented as\n # zero-padded hex numbers as directory names\n # provides 256 * 256 = 65536 of possible directory combinations\n dir1 = \"{:0>2x}\".format(hashcode & mask)\n dir2 = \"{:0>2x}\".format((hashcode >> 8) & mask)\n # Galaxy doesn't process names with parentheses in them\n filename = re.sub('[()]', '_', filename)\n return os.path.join(dir1, dir2, filename)", "def get_full_path(file_extension=True) -> str:\n return get_directory() + \"/\" + get_filename(file_extension=file_extension)", "def built_file_path(self, name, type=None, **kw):\n raise NotImplementedError", "def abs_path(file_name: str, directory: str) -> str:\r\n return os.path.join(directory, file_name)", "def _make_path_list(cfg, dir_name, file_name, rank=None):\n if not cfg.DATASET.IS_ABSOLUTE_PATH:\n assert len(dir_name) == 1 or len(dir_name) == len(file_name)\n if len(dir_name) == 1:\n file_name = [os.path.join(dir_name[0], x) for x in file_name]\n else:\n file_name = [os.path.join(dir_name[i], file_name[i])\n for i in range(len(file_name))]\n\n if cfg.DATASET.LOAD_2D: # load 2d images\n temp_list = copy.deepcopy(file_name)\n file_name = []\n for x in temp_list:\n suffix = x.split('/')[-1]\n if suffix in ['*.png', '*.tif']:\n file_name += sorted(glob.glob(x, recursive=True))\n else: # complete filename is specified\n file_name.append(x)\n\n file_name = _distribute_data(cfg, file_name, rank)\n return file_name", "def out_filename(self, filetype, dir, format='old'):\n filename = self.filename(filetype=filetype, format=format)\n return Path(dir) / filename", "def make_file_name(name):\n expanded_path = os.path.expandvars(make_fp_rel(name))\n return expanded_path", "def path(filename: str) -> str:\n path = os.path.dirname(sys.argv[0])\n if not path:\n path = '.'\n return path + '/' + filename", "def generate_file_path(directory: str, file_name: str):\n return os.path.join(os.getcwd(), directory, file_name)", "def _create_file_paths(folder):\n debut = \"chroma-nnls\"\n instrument = [\"piano\", \"orchestra\"]\n style = [\"baroque\", \"classical\", \"romantic\", \"modern\", \"addon\"]\n file_names = [\"_\".join([debut, i, s]) for i in instrument for s in style]\n # file_names = [\"test0\"]\n\n return [folder + fn + \".csv\" for fn in file_names]", "def __call__(self, components: Sequence[Text]) -> Text:\n return os.path.join(self._dirpath, *components)", "def make_path(self, filename):\n return os.path.join(self.root_path, filename)", "def convert_to_path(name):\n if name.startswith('.'):\n remainder = name.lstrip('.')\n dot_count = (len(name) - len(remainder))\n prefix = '../'*(dot_count-1)\n else:\n remainder = name\n dot_count = 0\n prefix = ''\n filename = prefix + os.path.join(*remainder.split('.'))\n return (filename, dot_count)", "def __make_path(self, filename):\n return self.__path() + os.sep + filename", "def build_output_file_path(dir, role, id):\n clean_dir = dir.rstrip(\"/\")\n punct = string.punctuation\n junk = punct + \" \"\n trantab = string.maketrans(junk, \"_________________________________\")\n clean_role = role.encode(\"ascii\", \"replace\").strip(junk).translate(trantab)\n path = (clean_dir + \"/\" + clean_version + \"-\" + clean_role + \"-\" +\n str(id) + \".json\")\n return re.sub(r'(_)\\1+', r'\\1', path)", "def file_path(cls, folder_path, file_title, file_extension):\n return cls(os.path.join(str(folder_path), f\"{file_title}.{file_extension}\"))", "def join_path(base, name, ext):\n return os.path.join(base, name + ext)", "def _create_dir_name(date, dir_structure='ymd', is_exif=True):\n if is_exif:\n date_split = date.split(' ')[0].split(':')\n else:\n date_split = date.split(' ')[0].split('-')\n dir_name = '\\\\'\n if 'y' in dir_structure:\n dir_name += date_split[0] + '\\\\'\n if 'm' in dir_structure:\n dir_name += '_'.join(d for d in date_split[:2]) + '\\\\'\n if 'd' in dir_structure:\n dir_name += '_'.join(d for d in date_split[:3]) + '\\\\'\n return dir_name", "def file_path(file_name, path):\n return path.rstrip('\\/') + \"/{0}\".format(file_name) if path else os.getcwd() + \"/{0}\".format(file_name)" ]
[ "0.75661904", "0.6550482", "0.60422546", "0.6027538", "0.59308547", "0.59081006", "0.5825218", "0.582117", "0.58164984", "0.57852477", "0.57776", "0.57530975", "0.5721652", "0.56592196", "0.5570957", "0.5562757", "0.55584544", "0.55302304", "0.55194324", "0.5516335", "0.55006784", "0.5468124", "0.5463953", "0.5460081", "0.5458572", "0.54548377", "0.54244137", "0.54204756", "0.53972363", "0.53603727" ]
0.8475875
0
Parse an attributes file
def parseAttrs(file_name): lines = file(file_name).read().strip().split('\n') lines = [x.strip() for x in lines if len(x.strip()) > 0] return [parseAttrLine(x) for x in lines]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_attributes(filename):\n attributes = {}\n with open(filename) as f:\n for line in f:\n # Split line into student, college, year, major\n fields = line.split()\n student = int(fields[0])\n college = int(fields[1])\n year = int(fields[2])\n major = int(fields[3])\n \n # Store student in the dictionary\n attributes[student] = {'college': college,\n 'year': year,\n 'major': major}\n return attributes", "def load_attributes():\n\n # <attribute_id> <attribute_name>\n attributes_file = open(PROJECT_ROOT +'/data/attributes.txt').readlines()\n attributes_file = [i.strip().split() for i in attributes_file]\n\n # <certainty_id> <certainty_name>\n certainties_file = open(PROJECT_ROOT +'/data/CUB_200_2011/attributes/certainties.txt').readlines()\n certainties_file = [i.strip().split() for i in certainties_file]\n\n # <image_id> <attribute_id> <is_present> <certainty_id> <time>\n labels_file = open(PROJECT_ROOT +'/data/CUB_200_2011/attributes/image_attribute_labels.txt').readlines()\n labels_file = [i.strip().split() for i in labels_file]\n\n attribute_ids = {}\n for i in attributes_file:\n attribute_ids[i[1]] = int(i[0])\n\n certainty_ids = {}\n for i in certainties_file:\n certainty_ids[i[1]] = int(i[0])\n\n label_ids = {}\n for i in labels_file:\n label_ids[(int(i[0]), int(i[1]))] = list(map(lambda x:int(float(x)), i[2:]))\n\n return attribute_ids, certainty_ids, labels_file, label_ids", "def _parse_attributes(self, attributes):\n\n var_value_pairs = attributes.split()\n\n self.logger.debug('attributes:{} pairs:{}'.format(attributes, var_value_pairs))\n\n for var_value_pair in var_value_pairs:\n (var, separator, value) = var_value_pair.partition('=')\n value = value.replace('\"', '')\n self._current_element.add_attribute(var, value)", "def read_atts(self, file_name):\n\n match = re.match(self.regex_pattern, file_name)\n\n return match.groupdict()", "def parseAttrLine(line):\n\tpre, post = line.strip().split(':')\n\tnumber, attr = pre.strip().split('.')\n\tattr = attr.strip().replace('%','').replace(' ', '-')\n\tvals = [clean(x) for x in post.strip().strip('.').split(',')]\n\treturn {'num':int(number), 'attr':clean(attr), 'vals':vals}", "def read_attribs(self):\n\n attribs = {}\n while self.index < self.length:\n self.ignore_whitespaces()\n if self.xtext[self.index] == '>':\n break\n name = self.read_until('=')\n self.index += 1\n self.read_until('\"')\n self.index += 1\n value = self.read_until('\"')\n self.index += 1\n\n attribs[name] = value\n\n return attribs", "def _parse_attributes(self, attributes, node):\n for attr in attributes:\n if attr.value.ByteSize() > self.MAX_NODE_ATTRIBUTE_VALUE_BYTES:\n message = f\"The attribute value of node({node.name}) \" \\\n f\"is over {self.MAX_NODE_ATTRIBUTE_VALUE_BYTES} Bytes, will ignore.\"\n logger.warning(message)\n continue\n if attr.name in ('input_is_dynamic_shape', 'output_is_dynamic_shape') and not \\\n node.is_dynamic_shape_node and attr.value.bool_val:\n node.is_dynamic_shape_node = True\n node.add_attr({attr.name: str(attr.value)})", "def __init__(self, fileName):\n reader = pcssTools.PcssFileReader(fileName)\n lines = reader.getLines()\n self._attributes = {}\n inputCounter = 0\n outputCounter = 0\n for line in lines:\n\n [name, attributeType, optional, niceName, featureClass, io] = line.split('\\t')\n att = PcssFileAttribute(name, attributeType, optional, niceName, featureClass, io)\n if (att.isInputAttribute()):\n att.setInputOrder(inputCounter)\n inputCounter += 1\n if (att.isOutputAttribute()):\n att.setOutputOrder(outputCounter)\n outputCounter += 1\n self.setFileAttribute(att)", "def read_attr_type_file():\n with open(args.attr_type_file_path, 'r') as f:\n content = f.readlines()\n\n # Strip lines of newline/return characters in csv file\n content = [x.strip(' \\t\\n\\r') for x in content]\n\n # Generate dictionary of types and their count\n attribute_type_dict = {}\n for item in content:\n key, value = item.split('|')\n attribute_type_dict[key.strip()] = value.strip()\n \n return attribute_type_dict", "def readAttributesFile(self, filepath):\n raw_data = np.genfromtxt(filepath, skip_header=1, delimiter=\",\", filling_values=0, dtype=None)\n data = [list(item)[1:] for item in raw_data]\n\n self.attributeMatrix = np.asmatrix(data)\n n = self.attributeMatrix.shape[1]\n self.location = self.attributeMatrix[:, 0:2]\n self.location = self.location.astype('float')\n self.pop = self.attributeMatrix[:, 2:n].astype('int')\n # self.pop[np.where(self.pop < 0)[0], np.where(self.pop < 0)[1]] = 0\n self.n_group = n-2\n self.n_location = self.attributeMatrix.shape[0]\n self.pop_sum = np.sum(self.pop, axis=1)\n self.tract_id = np.asarray([x[0] for x in raw_data]).astype(str)\n self.tract_id = self.tract_id.reshape((self.n_location, 1))\n\n return self.attributeMatrix", "def _read_attributes(root):\n output_list = []\n for _, value in enumerate(root[0][2]):\n attr = Attribute(value)\n output_list.append(attr)\n return output_list", "def _parseAttributeString(self, line):\n attribute, value = line.partition(' ')[::2]\n self._setAttribute(attribute, value)", "def parse(self,att_line):\r\n att_line = {item.split('=')[0]:item.split('=')[1] for item in att_line.split(';')}\r\n if 'ID' in att_line:\r\n self.id = att_line.pop('ID',None)\r\n if 'Name' in att_line:\r\n self.name = att_line.pop('Name',None)\r\n if 'Alias' in att_line:\r\n self.alias = att_line.pop('Alias',None)\r\n if 'Parent' in att_line:\r\n self.parent = att_line.pop('Parent',None)\r\n if 'Target' in att_line:\r\n self.target = att_line.pop('Target',None)\r\n if 'Gap' in att_line:\r\n self.gap = att_line.pop('Gap',None)\r\n if 'Derives_from' in att_line:\r\n self.derives_from = att_line.pop('Derives_from',None)\r\n if 'Note' in att_line:\r\n self.note = att_line.pop('Note',None)\r\n if 'Dbxref' in att_line:\r\n self.dbxref = att_line.pop('Dbxref',None)\r\n if 'Ontology_term' in att_line:\r\n self.ontology_term = att_line.pop('Ontology_term',None)\r\n if 'Is_circular' in att_line:\r\n self.is_circular = att_line.pop('Is_circular',None)\r\n if self.allele_key in att_line:\r\n self.allele = att_line.pop(self.allele_key,None).split(',')\r\n if self.discovered_key in att_line:\r\n discovered_val = att_line.pop(self.discovered_key,None)\r\n if discovered_val in self.enabled:\r\n self.discovered = True\r\n elif discovered_val in self.disabled:\r\n self.discovered = False\r\n if self.validated_key in att_line:\r\n validated_val = att_line.pop(self.validated_key,None)\r\n if validated_val in self.enabled:\r\n self.validated = True\r\n elif validated_val in self.disabled:\r\n self.validated = False\r\n if self.active_key in att_line:\r\n active_val = att_line.pop(self.active_key,None)\r\n if active_val in self.enabled:\r\n self.active = False\r\n elif active_val in self.disabled:\r\n self.active = True\r\n else:\r\n pass#print(\"{} is not a valid entry for {}\".format(active_val,self.active_key))\r\n else:\r\n #TODO this will make every element withought existing value active.\r\n self.active = True\r\n rest_dict = {}\r\n for key,value in att_line.items():\r\n if self.history_key in key:\r\n parts = att_line[key].split(',')\r\n if len(parts) == 3:\r\n self.add_history(*parts)\r\n else:\r\n reason = '.'.join(parts[2:])\r\n self.add_history(parts[0],parts[1],reason)\r\n else:\r\n rest_dict[key] = value\r\n if rest_dict != {}:\r\n #TODO: maybe add this to a logger instead\r\n #print(\"attributes put in other: {}\".format(', '.join(att_line.keys())))\r\n self.other = att_line\r\n return self", "def attributes(self, *args):\n kwargs = {}\n if args:\n kwargs[\"attributenames\"] = args\n\n r = self._token_id_request(urljoin(self._url, Client._attribute_resource), **kwargs)\n\n # parse contennt looking for all attributes\n attributes = []\n for line in r.text.splitlines():\n r = re.match(\"(userdetails\\.attribute\\.name)=(.*)\", line)\n if r:\n name = r.groups()[1]\n attributes.append([name, None])\n continue # next line\n\n r = re.match(\"(userdetails\\.attribute\\.value)=(.*)\", line)\n if r:\n value = r.groups()[1]\n # last name parsed is where it has to\n # be stacked\n if attributes[-1][1] == None:\n attributes[-1][1] = value\n if isinstance(attributes[-1][1], list):\n attributes[-1][1].append(value)\n else:\n # cast to list\n attributes[-1].append([attributes[-1][1], value])\n\n return dict([(item[0], item[1]) for item in attributes])", "def _initialize_attributes(self, string_as_file):\n for row in string_as_file:\n first = row[0]\n second = row[1]\n third = row[3]\n match first:\n case 'quadrat':\n self.quadrat = { 'id': second, 'comment': third }\n case 'waypoint':\n self.waypoint = { 'name': second, 'comment': third }", "def parse(self, filehandle):\n l = filehandle.readline()\n if l.split()[0] != '##maf':\n return\n else:\n self.setpar(l.split()[1:])\n\n l=filehandle.readline()\n while l:\n la = l.split()\n## print la\n if(len(la)==0 or la[0]=='#'):\n## print \"skipping\"\n 1\n elif(la[0]=='a'):\n## print \"reading alignment\"\n self.readalign(la[1:], filehandle)\n else:\n## print \"end of records\"\n return\n\n l=filehandle.readline()", "def _parse_filename(filename, metadata):\n\n file_noext = os.path.splitext(filename)[0]\n fname = file_noext.split(\"_\")\n\n metadata[\"scene_id\"] = fname[1]\n metadata[\n \"beam_mode\"] = sat_properties.radarsat_product_characteristics[\n fname[2]]\n metadata[\"product_type\"] = fname[-1]\n try:\n metadata[\n \"product_description\"] = sat_properties.radarsat_1_data_products[\n fname[-1][:3]]['description']\n except Exception:\n metadata[\"product_description\"] = \"\"\n\n metadata[\"scene_mean_time\"] = datetime.datetime.strptime(\n fname[3] + fname[4], \"%Y%m%d%H%M%S\")\n\n return metadata", "def parse_attributes(self, attr):\n result = {}\n annotations = []\n # Sanitize and split attributes up\n split_attr = attr.strip(' \\t\\n;').split(';')\n for pair in split_attr:\n splitpair = pair.split('=')\n if len(splitpair) != 2:\n continue\n if splitpair[0] == \"ID\":\n result['identifier'] = splitpair[1]\n elif splitpair[0] == \"Name\":\n result['name'] = splitpair[1]\n elif splitpair[0] == \"Parent\":\n result['parent_id'] = splitpair[1]\n elif splitpair[0] == \"Dbxref\" or splitpair[0] == \"Ontology_term\":\n annotations.append(splitpair)\n # Make sure we found an ID\n if \"identifier\" not in result:\n return {}\n # Add annotations if we found any\n if annotations:\n result[\"annotations\"] = annotations\n return result", "def _parse_aqara_attributes(self, value):\n attributes = {}\n attribute_names = {\n 1: BATTERY_VOLTAGE_MV,\n 3: TEMPERATURE,\n 4: XIAOMI_ATTR_4,\n 5: XIAOMI_ATTR_5,\n 6: XIAOMI_ATTR_6,\n 10: PATH,\n }\n result = {}\n while value:\n skey = int(value[0])\n svalue, value = foundation.TypeValue.deserialize(value[1:])\n result[skey] = svalue.value\n for item, val in result.items():\n key = (\n attribute_names[item]\n if item in attribute_names\n else \"0xff01-\" + str(item)\n )\n attributes[key] = val\n if BATTERY_VOLTAGE_MV in attributes:\n attributes[BATTERY_LEVEL] = int(\n self._calculate_remaining_battery_percentage(\n attributes[BATTERY_VOLTAGE_MV]\n )\n )\n return attributes", "def readAttributes(self, *args):\n return _libsbml.ASTBasePlugin_readAttributes(self, *args)", "def readAttributes(self, *args):\n return _libsbml.MultiASTPlugin_readAttributes(self, *args)", "def __init__(self):\n\n try:\n # read attributes from attributes file\n with open(const.Storage.ATTRIBUTES) as attributes_file:\n # read the file and parse it to JSON data\n json_data = attributes_file.read()\n attributes = json.loads(json_data)\n\n # set attributes\n self.id = str(attributes[\"id\"])\n self.length = float(attributes[\"length\"])\n self.width = float(attributes[\"width\"])\n except OSError:\n raise OSError(\"The attributes file could not be opened.\")", "def load_attribute_data():\n global attr_value_counts, attr_counts, value_counts, \\\n attr_value_ratios, attrs\n\n print \"Loading extraction data...\"\n with open('./data/common_extractions.json') as f:\n place_data = json.loads(f.read())\n for place in place_data:\n for attr in place_data[place]:\n if attr not in attr_value_counts:\n attrs.add(attr)\n attr_value_counts[attr] = {}\n attr_counts[attr] = 0\n for value in place_data[place][attr]:\n c = place_data[place][attr][value]\n value_counts[value] = value_counts.get(value, 0) + c\n attr_counts[attr] += c\n attr_value_counts[attr][value] = \\\n attr_value_counts[attr].get(value, 0) + c\n \n for attr in attrs:\n attr_value_ratios[attr] = {}\n for value in attr_value_counts[attr]:\n attr_value_ratios[attr][value] = float(attr_value_counts[attr][value]) \\\n / attr_counts[attr]", "def parse_attributes(self):\n attrs = {}\n error = False\n for header, attr in self.app.config['SSO_ATTRIBUTE_MAP'].items():\n required, name = attr\n value = request.environ.get(header, None)\n\n attrs[name] = value\n if not value or value == '':\n if required:\n error = True\n return attrs, error", "def parseAttributes(self, block, scanf_list, special_list={}, skip_list=[]):\n\n # remove trailing newlines\n block = block.strip('\\n')\n\n # create a hash of the attributes for easy lookup\n scanf_map, order = self._createAttributeFormattingMap(scanf_list)\n\n # loop over the block line by line\n index = 0\n rest = []\n lines = block.split('\\n')\n while index < len(lines):\n\n # grab line and increment\n line = lines[index]\n index += 1\n\n # gather up indented child lines\n children = []\n while (index < len(lines)) and re.match('^(?:\\t| )', lines[index]):\n children.append(lines[index])\n index += 1\n\n # add children to line\n children.insert(0, line)\n line = \"\\n\".join(children)\n\n # use proper seperator to grab the attribute name\n attribute = re.split('\\s', line)[0]\n\n # skip attribute\n if attribute in skip_list:\n #print \"skip_list-> \", attribute, line\n rest.append(line)\n\n # use special formatter\n elif attribute in special_list:\n #print \"special_list-> \", attribute, line\n special_list[attribute](line)\n\n # use scanf formatter\n elif attribute in scanf_map:\n #print \"scanf-> \", attribute, line\n value = self._parseAttributeScanf(line, scanf_map[attribute])\n\n # remove from tuple if single value\n if len(value) == 1:\n value = value[0]\n\n # set attribute\n self._setAttribute(attribute, value)\n\n # attribute not found\n else:\n #print \"rest-> \", attribute, line\n rest.append(line)\n\n # add default entires for missing attibutes\n for attribute in scanf_map.keys():\n if not hasattr(self, attribute):\n setattr(self, attribute, None)\n\n # return unused lines\n return \"\\n\".join(rest) + \"\\n\"", "def parse(self):\n count = [] #count for trainset_size\n with open(self.file) as f:\n for line in f:\n data = line.split(\" \")[0]\n filename = data[:-1]\n id = data[-1:]\n if (filename not in count):\n count.append(filename)\n\n acid = \"\"\n structure = \"\"\n with open(self.directory+\"/\"+filename+\".dssp\") as dssp:\n for i in range(28): #skip lines we don't need\n next(dssp)\n for line in dssp:\n if (line[9] != \" \" and line[10] == \" \" and line[11] == id and line[13] not in (\"*\",\"!\",\"B\",\"Z\",\"X\")):\n #amino acid sequence\n if (line[13].islower()):\n acid += \"C\"\n else:\n acid += line[13]\n\n #sequence of the structure\n if (line[16] in (\"H\",\"G\",\"I\")):\n structure += \"H\"\n elif (line[16] in (\"E\",\"B\")):\n structure += \"E\"\n else:\n structure += \"C\"\n\n if (len(count) > self.trainset_size):\n self.testset.append((acid,structure))\n else:\n self.trainset.append((acid,structure))", "def set_file_attr(self):\n if self.resolution == 1000:\n satellite_type = ['AQUA', 'TERRA']\n if self.satellite in satellite_type:\n try:\n h4r = SD(self.in_file, SDC.READ)\n self.file_attr = attrs2dict(h4r.attributes())\n except Exception as e:\n print(str(e))\n else:\n raise ValueError(\n 'Cant read this satellite`s data.: {}'.format(self.satellite))\n else:\n raise ValueError(\n \"Cant handle this resolution: \".format(self.resolution))", "def _parse_attr(self, attr_proto):\n attrs = {}\n for a in attr_proto:\n for f in ['f', 'i', 's']:\n if a.HasField(f):\n attrs[a.name] = getattr(a, f)\n for f in ['floats', 'ints', 'strings']:\n if list(getattr(a, f)):\n assert a.name not in attrs, \"Only one type of attr is allowed\"\n attrs[a.name] = tuple(getattr(a, f))\n for f in ['t', 'g']:\n if a.HasField(f):\n attrs[a.name] = getattr(a, f)\n for f in ['tensors', 'graphs']:\n if list(getattr(a, f)):\n raise NotImplementedError(\"Filed {} is not supported in mxnet.\".format(f))\n if a.name not in attrs:\n raise ValueError(\"Cannot parse attribute: \\n{}\\n.\".format(a))\n return attrs", "def parse_attrs(buf):\r\n attrs = []\r\n while buf:\r\n t = ord(buf[0])\r\n l = ord(buf[1])\r\n if l < 2:\r\n break\r\n d, buf = buf[2:l], buf[l:]\r\n attrs.append((t, d))\r\n return attrs", "def parse_file(axmlfile, **kwargs):\n adm = ADM()\n from .common_definitions import load_common_definitions\n load_common_definitions(adm)\n load_axml_file(adm, axmlfile, **kwargs)\n return adm" ]
[ "0.68706465", "0.6828883", "0.6731427", "0.66256315", "0.6585804", "0.6552646", "0.6546952", "0.6446353", "0.639881", "0.63499784", "0.6308809", "0.62747127", "0.62701064", "0.62394685", "0.61743546", "0.6151755", "0.61405575", "0.6116673", "0.60649294", "0.605982", "0.60079515", "0.5972718", "0.5944008", "0.5938555", "0.58934975", "0.5861414", "0.5842905", "0.58239704", "0.58144474", "0.57565635" ]
0.80092627
0
Parse a classes file
def parseClasses(file_name): lines = file(file_name).read().strip().split('\n') lines = [x.strip() for x in lines if len(x.strip()) > 0] classes = [] for l in lines: classes = classes + [clean(x) for x in l.split(',')] return classes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def GetClassesFromFile(self,file_path):\n classes = []\n try:\n fl = open(file_path,\"r\")\n for line in fl.readlines():\n if \"class\" in line and \":\" in line:\n line = line.strip(\"class \")\n line2 = \"\"\n for i in line:\n if i!=\":\": line2+=i\n\n classes.append(line2)\n if classes:\n return classes\n else:\n return False\n fl.close()\n except:\n return False", "def classes_description_from_file(self, content_as_lines : list, root_path:str)-> list: # pure function # -> list of ClassDescription\n\n class_started = False\n classes_information = []\n current_class = None\n class_description.ClassDescription.root_path = root_path\n for index, line in enumerate(content_as_lines):\n check_result = self.line_parser('class', line)\n if check_result == 'start':\n if class_started: # need to take into account, that start can represent another class start, i.e. end\n current_class.line_end = index - 1\n classes_information.append(current_class)\n class_started = False\n class_started = True\n class_name = self.name_extractor('class', line)\n start, end = self.extract_brackets_position(line)\n parent_name = line[start + 1:end].strip()\n current_class = class_description.ClassDescription(class_name=class_name,\n file_name=relative_filename,\n line_start=index,\n child_of=parent_name)\n elif check_result == 'end' and class_started:\n current_class.line_end = index - 1\n classes_information.append(current_class)\n class_started = False\n print(\"This condition works!\")\n\n return classes_information", "def parseFile(self,filename):\n\n name = '[0-9a-zA-Z_]+'\n string = '\\\\\"(.+)\\\\\"'\n\n testclass = None\n functionName = None\n\n fin = open(filename, 'r')\n for line in fin:\n # testclass starts\n res = re.match('class ('+name+')', line)\n if res != None:\n testclass = res.group(1)\n\n # end of testclass \n if re.match('};', line) != None:\n testclass = None\n\n # function start\n res = re.match('\\\\s+void ('+name+')\\\\(\\\\)', line)\n if res != None:\n functionName = res.group(1)\n\n elif re.match('\\\\s+}', line) != None:\n functionName = None\n\n if functionName == None:\n continue\n\n # check\n res = re.match('\\s+check.*\\('+string, line)\n if res != None:\n code = res.group(1)\n\n # code..\n res = re.match('\\\\s+'+string, line)\n if res != None:\n code = code + res.group(1)\n\n # assert\n res = re.match('\\\\s+ASSERT_EQUALS\\\\(\\\\\"([^\"]*)\\\\\",', line)\n if res != None and len(code) > 10:\n node = { 'testclass':testclass,\n 'functionName':functionName,\n 'code':code,\n 'expected':res.group(1) }\n self.nodes.append(node)\n code = ''\n\n # close test file\n fin.close()", "def parse_code_classes(self):\n # Step1 : Gather XML files list\n if not self._xml_files_list:\n self.parse_code_files(store_xml_files_list=True)\n\n # Step 2: Parse all corresponding XML files.\n classes, classes_per_file = parse_xml_files_list(ClassLevelParser, self._xml_files_list)\n return classes, classes_per_file", "def read_classes_from_file(self, class_file):\n items = []\n with open(class_file) as f:\n for cl in f.readlines():\n # c:code, d:description\n item = [{'value': c, 'text': f'{c}: ' + d.replace('\\n','')} for c, d in [cl.split(',')]]\n items+=item\n \n return items", "def getClasses(classesFile):\n classes = None\n with open(classesFile, 'rt') as f:\n classes = f.read().rstrip('\\n').split('\\n')\n return classes", "def process_class_list(self, module, classes):", "def load_classes(path):\n with open(path, 'r') as f:\n names = f.read().split(\"\\n\")\n # Filter removes empty strings (such as last line)\n return list(filter(None, names))", "def list_classes(filename, output_file):\n file_pointer = open(filename)\n file_split = filename.replace(\"/\",\".\")\n file_split = file_split.split(\".\")\n\n class_re = re.compile(\"^class ([A-Za-z]+[^\\(:]*)\")\n method_re = re.compile(\"^ def ([a-z][a-z_]*)\")\n # remove_self_re = re.compile(r\"self(, )?\")\n first = True\n\n for line in file_pointer:\n\n class_names = class_re.findall(line)\n if len(class_names) > 0:\n if first:\n first = False\n output_file.write(\"Classes\\n\")\n output_file.write(\"^^^^^^^\\n\")\n output_file.write(\"- \")\n module = file_split[4]\n class_name = class_names[0]\n output_file.write(f\":class:`~arcade.{module}.{class_name}`\")\n output_file.write(\"\\n\")\n\n method_names = method_re.findall(line)\n for method_name in method_names:\n # method_name = name[2]\n output_file.write(f\" - :func:`~arcade.{module}.{class_name}.{method_name}`\\n\")\n # name = remove_self_re.sub(\"\", name)\n\n if not first:\n output_file.write(\"\\n\")", "def compile_class(self):\r\n self.tokenizer.advance() # ignore 'class' keyword\r\n self.class_name = self.tokenizer.identifier()\r\n self.tokenizer.advance()\r\n self.tokenizer.advance() # ignore '{' symbol\r\n while self.tokenizer.curtok < len(self.tokenizer.tokens) - 1:\r\n dec = self.tokenizer.key_word()\r\n if dec == \"field\" or dec == \"static\":\r\n self.compile_var_dec()\r\n else:\r\n self.compile_subroutine()\r\n self.tokenizer.advance()", "def CompileClass(self):\n\n ## Go to first token\n self.Tokenizer.advance()\n\n ## Expecting class keyword\n self._eat('class')\n self._write_opener('class')\n self._write_entry('keyword','class')\n\n ## Now handle the identifier\n\n if not self.Tokenizer.currentTokenType == \"IDENTIFIER\":\n raise ValueError(\"ERROR_UNEXPECTED_TOKEN: \" + self.Tokenizer.currentTokenType + \" \" + self.Tokenizer.currentToken )\n else:\n self._write_entry(self.Tokenizer.currentTokenType.lower(), self.Tokenizer.currentToken)\n\n self.Tokenizer.advance()\n\n ## Now opening curly bracket\n self._eat('{')\n self._write_entry('symbol','{')\n\n #self.Tokenizer.advance()\n\n\n # Now expecting 0 or more classVarDec\n\n # self.Tokenizer.advance()\n #\n # if self.Tokenizer.currentTokenType == \"KEYWORD\" and self.Tokenizer.currentToken in [\"static\", \"field\"]:\n # self._write_closer('class')\n # self.outputFile.close()\n\n\n ## Finally the closing brace\n try:\n self._eat('}')\n self._write_entry('symbol', '}')\n self._write_closer('class')\n except:\n print(\"waah\")\n\n self.outputFile.close()", "def parse(self, infile):\r\n raise NotImplementedError()", "def load_classes(path):\n fp = open(path, \"r\")\n names = fp.read().split(\"\\n\")[:-1]\n # -1까지 하는 이유 마지막에 공백이 있다.\n print(\"Load Class Nums : \",len(names))\n return names", "def _parse(self, infile):\n raise NotImplementedError()", "def load_class_ck(path):\n fp = open(path, \"r\",encoding=\"utf-8\")\n names = fp.read().split(\"\\n\")[:-1]\n return names", "def __parse(self):\n # raw/objects: detect name, type, use major tag for type as parent node\n # raw/graphics: as object raw, but add TILE_PAGE\n # init: usually flat file, except\n # embark_profiles.txt: [PROFILE] is parent\n # interface.txt: [BIND] is parent (legacy will be flat)\n # world_gen.txt: [WORLD_GEN] is parent\n # Non-raw files (unsupported): init/arena.txt, subdirs of raw/objects\n parse_raw(self, self.read(self.filename))", "def read_classes(file, class_list):\n\n if 'PSB' not in file.readline().strip():\n raise ('Not a valid PSB classification header', ImportError)\n\n _, num_models = file.readline().strip().split()\n modelcount = 0\n class_dict = {}\n\n while modelcount < int(num_models):\n line = file.readline().strip().split()\n if len(line) == 0:\n pass \n elif len(line) > 2 and line[2] == '0': # empty class label\n pass\n elif len(line) > 2:\n class_name = str(line[0])\n # if the class not in the class_list add it\n if class_name not in class_list:\n class_list.append(class_name)\n else: # add the class to the number of the model\n class_id = class_list.index(class_name) # give class id based on class_list index\n class_dict[line[0]] = (class_id, class_name)\n modelcount += 1\n\n return class_dict, class_list", "def parse_parameters(filename):\n\n # read in the parameters\n mainInput = ParserClass.Parser(filename)\n if 'LogFile' in mainInput['Inputs']:\n if mainInput['Inputs']['LogFileUsePID']:\n logger = Logging.Logger(mainInput['Inputs']['LogFile']+'_{}'.format(os.getpid()))\n else:\n logger = Logging.Logger(mainInput['Inputs']['LogFile'])\n \n else:\n logger = print\n\n # Generate a filelist to loop over\n filelist = np.loadtxt(mainInput['Inputs']['filelist'],dtype=str,ndmin=1)\n if isinstance(mainInput['Inputs']['data_dir'], type(None)):\n filelist = [filename for filename in filelist]\n else:\n filelist = ['{}/{}'.format(mainInput['Inputs']['data_dir'],\n filename.split('/')[-1]) for filename in filelist]\n \n # Some items should always be a list\n if not isinstance(mainInput['Inputs']['pipeline'], list):\n mainInput['Inputs']['pipeline'] = [mainInput['Inputs']['pipeline']]\n # Get the class names (modulename, classname)\n jobnames = [c for c in mainInput['Inputs']['pipeline']]\n\n logger('Running: '+' '.join(mainInput['Inputs']['pipeline']))\n\n\n prejobnames = [c for c in mainInput['Inputs']['preamble']]\n\n\n # Read the class parameter file\n classInput = ParserClass.Parser(mainInput['Inputs']['classParameters'])\n\n # Initalise the classes : classInput are the kwargs to initiate classes\n jobs = []\n for job in jobnames:\n jobs += [getClass(job)(logger=logger,**classInput[job])]\n\n # Initalise the classes : classInput are the kwargs to initiate classes\n prejobs = []\n for prejob in prejobnames:\n prejobs += [getClass(prejob)(logger=logger,**classInput[prejob])]\n\n\n return jobs,prejobs, filelist, mainInput, classInput, logger", "def input_file_parser(cls):\n \n # Loop through the file and store lines in an appropriate list that is passed to other class functions\n with open(cls.infile_name,'r') as infile:\n for line in infile: # Loop through the whole file\n if '$molecule' in line: # Search for a section header\n for line in infile: # Enter second loop over the lines in the section\n if '$end' in line: # If you find $end, stop loop as the section is finished\n break\n else: # Otherwise add the line to a list\n cls.molecule_lines.append(line.strip())\n if '$connection' in line: # Continue for other sections...\n for line in infile:\n if '$end' in line:\n break\n else:\n cls.connector_lines.append(line.strip())\n if '$options' in line: # Continue for other sections...\n for line in infile:\n if '$end' in line:\n break\n else:\n cls.options_lines.append(line.strip())\n\n return None", "def _load_classes(self):\n classdocs = self._docset.get_classes()\n for classdoc in classdocs:\n files = [self._docmap[filedoc] for filedoc in classdoc.get_files()]\n classobj = Class(classdoc, files)\n self._docmap[classdoc] = classobj\n self._classes.add(classobj)", "def parse_classes(classes_data):\n classes = {}\n course_metadata = {}\n for course in classes_data['linked']['courses.v1']:\n course_metadata[course['id']] = course\n\n sorted_sessions = sorted(classes_data['linked']['v1Sessions.v1'],\n key=lambda x: x['id'], reverse=True)\n for session in sorted_sessions:\n course_id = session['courseId']\n session['startDateString'] = get_start_date_string(session)\n if course_id not in classes:\n classes[course_id] = course_metadata[course_id].copy()\n if 'sessions' not in classes[course_id]:\n classes[course_id]['sessions'] = []\n classes[course_id]['sessions'].append(session)\n return classes", "def parse(self):\n print_DBG(\"Parsing master file: \"+self.tokenizer.get_file_information()[0])\n for token_line in self.tokenizer.next_tokenized_line():\n if not token_line[0].isspace():\n if token_line[0] == pu.INCLUDE_FILE_SYM:\n self.tokenizer.open_file(token_line[1])\n print_DBG(\"Parsing file: \"+self.tokenizer.get_file_information()[0])\n self.stats[\"#files\"] += 1\n else:\n self._parse_declaration_initiator(token_line)\n self._expecting_rule = True\n self.stats[\"#declarations\"] += 1\n self._expected_indentation = None\n else:\n self._parse_rule(token_line)\n self._expecting_rule = False # Not expecting but still allowed\n self.stats[\"#rules\"] += 1\n self.tokenizer.close_files()\n print_DBG(\"Parsing finished!\")", "def parse(self, filename):\n infile = file(filename)\n for line in infile:\n self.parseLine(line)", "def get_class_names_in_files(path: str) -> List[str]:\n with open(path) as file:\n module = ast.parse(file.read())\n return [node.name for node in module.body if isinstance(node, ast.ClassDef)]", "def read_class_names(class_file_name):\n \n # open class text file\n with open(class_file_name, 'r') as f:\n \n # intialise empty list to store names\n names = []\n \n # iterate over class names\n for name in f:\n \n # append class name from each line\n names.append(name.strip('\\n'))\n\n return names", "def compile_class(self) -> None:\n self._consume('class')\n if self.tokenizer.token_type() != TokenTypes.IDENTIFIER:\n raise CompilationEngineError(f\"{self._get_current_token()} is an invalid token at this point. Expected a \"\n f\"class name.\")\n\n self.class_name = self._get_current_token()\n self._consume(TokenTypes.IDENTIFIER)\n self._consume('{')\n\n while self._get_current_token() != '}':\n if self._get_current_token() in CompilationEngine.CLASS_VAR_DEC_TOKENS:\n self.compile_class_var_dec()\n elif self._get_current_token() in CompilationEngine.SUBROUTINE_TOKENS:\n self.compile_subroutine_dec()\n else:\n raise CompilationEngineError(f\"{self._get_current_token()} is an expected token at this point\")\n\n self._consume('}')", "def visit_ClassDef(self, node):\n if node in self.manager.found_classes:\n return\n\n self.manager.found_classes.add(node)\n self.manager.found[\"classes\"].append({\"name\":node.name,\n \"lineno\":node.lineno,\n \"namespace\":\".\".join(self.parent)})\n\n # Keep checking all nodes in this class.\n for my_node in node.body:\n self.manager._explorer(self.manager, self.parent + [node.name]).visit(my_node)", "def parse(klass, f):\n members = []\n for line in f:\n line = line.strip()\n if not line:\n continue # skip empty lines\n members.append(PedigreeMember.parse_line(line))\n return Pedigree(members)", "def parse(klass, f):\n members = []\n for line in f:\n line = line.strip()\n if not line:\n continue # skip empty lines\n members.append(PedigreeMember.parse_line(line))\n return Pedigree(members)", "def _parse_class(self):\n first_pos = self.start_pos\n token_type, cname = self.next()\n if token_type != tokenize.NAME:\n debug.warning(\"class: syntax err, token is not a name@%s (%s: %s)\"\n % (self.start_pos[0], tokenize.tok_name[token_type], cname))\n return None\n\n cname = pr.Name(self.module, [(cname, self.start_pos)], self.start_pos,\n self.end_pos)\n\n super = []\n token_type, _next = self.next()\n if _next == '(':\n super = self._parse_parentheses()\n token_type, _next = self.next()\n\n if _next != ':':\n debug.warning(\"class syntax: %s@%s\" % (cname, self.start_pos[0]))\n return None\n\n # because of 2 line class initializations\n scope = pr.Class(self.module, cname, super, first_pos)\n if self.user_scope and scope != self.user_scope \\\n and self.user_position > first_pos:\n self.user_scope = scope\n return scope" ]
[ "0.70792365", "0.6988579", "0.68228126", "0.68027467", "0.67254263", "0.6457001", "0.64242077", "0.6291333", "0.6243986", "0.61837405", "0.61154544", "0.60808533", "0.6032137", "0.59897196", "0.5954794", "0.5947654", "0.5940507", "0.58991724", "0.5892846", "0.5872633", "0.5864711", "0.58500695", "0.58452624", "0.5799943", "0.5787262", "0.57737756", "0.57604814", "0.57556015", "0.57556015", "0.5754683" ]
0.76714414
0
Make a header from attrs . class label goes at start
def makeHeaderRow(attrs): return ['class'] + [attrs[i]['attr'] for i in range(len(attrs))]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def construct_header(self): \n \n # create the individual labels\n hdr_bits = [hb.format(hdr) for hb, hdr in zip(self.row_base, self.headers)]\n \n # stick it all together and return with hdr_sep underneath\n hdr_str = f\"|{'|'.join(hdr_bits)}|\\n\"\n return hdr_str + self.hdr_sep * (len(hdr_str)-1) + \"\\n\"", "def __make_header__(self):\n header = lashead.Header(point_format=0)\n return header", "def create_header(numValues):\n\n header = []\n for value in range(numValues):\n header.append(\"att{}\".format(value))\n return header", "def generate_header(self, header=None):\n if header is None:\n header = self.header\n\n lines = [self.PREFIX_HEAD + '!b']\n for k, v in header.items():\n if k in ('labels', 'categories'):\n v = ', '.join(v)\n elif k == 'draft':\n v = repr(v)\n lines.append(self.HEADER_FMT % (k, v))\n lines.append(self.PREFIX_END)\n return '\\n'.join([_f for _f in lines if _f]) + '\\n'", "def header(self):\n ...", "def header_style(self):\n ...", "def _horizontal_header(self):\n return self.header()", "def _horizontal_header(self):\n return self.header()", "def create_headers(klass, attributes):\n\n if 'organization_id' in attributes:\n return {'x-contentful-organization': attributes['organization_id']}\n return {}", "def get_export_header(self):\n\n name = self.get_name()\n\n if (self.name == \"input::nodes\"):\n\n name = \"user-specified\"\n\n grp_string = self.get_grp_string()\n\n if grp_string != \"\":\n\n grp_string = \" \" + grp_string\n\n return \"\\n!*!Label \" + self.path[1] + \" ..\" + grp_string + \" .. \" + name + \"\\n\"", "def write_header(self):\n lines = [\"\"]\n\n for key in self._header_keys:\n value = self.get_attr_from_name(key)\n if isinstance(value, list):\n value = \",\".join([f\"{v:.1f}\" for v in value])\n elif isinstance(value, (float)):\n value = f\"{value:.7f}\"\n elif isinstance(value, (int)):\n value = f\"{value:.0f}\"\n\n key = (\n key.replace(\"_\", \" \")\n .title()\n .replace(\" \", \"\")\n .replace(\"MTEdit.\", \"MTEdit:\")\n )\n\n lines.append(f\"${key}={value.capitalize()}\")\n\n return lines", "def _create_hdr_obj(self, pix_len, pix_scale):\n hdr = astropy.io.fits.Header()\n hdr['NAXIS'] = 2\n hdr['NAXIS1'] = pix_len\n hdr['NAXIS2'] = pix_len\n hdr['CTYPE1'] = 'RA---TAN'\n hdr['CRVAL1'] = float(self.ra_ctr)\n hdr['CRPIX1'] = (pix_len / 2.) * 1.\n hdr['CDELT1'] = -1.0 * pix_scale\n hdr['CTYPE2'] = 'DEC--TAN'\n hdr['CRVAL2'] = float(self.dec_ctr)\n hdr['CRPIX2'] = (pix_len / 2.) * 1.\n hdr['CDELT2'] = pix_scale\n hdr['EQUINOX'] = 2000\n return hdr", "def csv_make_header(self, fileobj, title, comment=\"\"):\n #Line of header info\n \n fileobj.write(csv_line( ['Notes'] + [x.name for x in self.angles] + ['Wait For/n', 'Value'] ) )", "def __init__(\n self,\n arg=None,\n align=None,\n alignsrc=None,\n fill=None,\n font=None,\n format=None,\n formatsrc=None,\n height=None,\n line=None,\n prefix=None,\n prefixsrc=None,\n suffix=None,\n suffixsrc=None,\n values=None,\n valuessrc=None,\n **kwargs\n ):\n super(Header, self).__init__(\"header\")\n\n # Validate arg\n # ------------\n if arg is None:\n arg = {}\n elif isinstance(arg, self.__class__):\n arg = arg.to_plotly_json()\n elif isinstance(arg, dict):\n arg = _copy.copy(arg)\n else:\n raise ValueError(\n \"\"\"\\\nThe first argument to the plotly.graph_objs.table.Header \nconstructor must be a dict or \nan instance of :class:`plotly.graph_objs.table.Header`\"\"\"\n )\n\n # Handle skip_invalid\n # -------------------\n self._skip_invalid = kwargs.pop(\"skip_invalid\", False)\n\n # Import validators\n # -----------------\n from plotly.validators.table import header as v_header\n\n # Initialize validators\n # ---------------------\n self._validators[\"align\"] = v_header.AlignValidator()\n self._validators[\"alignsrc\"] = v_header.AlignsrcValidator()\n self._validators[\"fill\"] = v_header.FillValidator()\n self._validators[\"font\"] = v_header.FontValidator()\n self._validators[\"format\"] = v_header.FormatValidator()\n self._validators[\"formatsrc\"] = v_header.FormatsrcValidator()\n self._validators[\"height\"] = v_header.HeightValidator()\n self._validators[\"line\"] = v_header.LineValidator()\n self._validators[\"prefix\"] = v_header.PrefixValidator()\n self._validators[\"prefixsrc\"] = v_header.PrefixsrcValidator()\n self._validators[\"suffix\"] = v_header.SuffixValidator()\n self._validators[\"suffixsrc\"] = v_header.SuffixsrcValidator()\n self._validators[\"values\"] = v_header.ValuesValidator()\n self._validators[\"valuessrc\"] = v_header.ValuessrcValidator()\n\n # Populate data dict with properties\n # ----------------------------------\n _v = arg.pop(\"align\", None)\n self[\"align\"] = align if align is not None else _v\n _v = arg.pop(\"alignsrc\", None)\n self[\"alignsrc\"] = alignsrc if alignsrc is not None else _v\n _v = arg.pop(\"fill\", None)\n self[\"fill\"] = fill if fill is not None else _v\n _v = arg.pop(\"font\", None)\n self[\"font\"] = font if font is not None else _v\n _v = arg.pop(\"format\", None)\n self[\"format\"] = format if format is not None else _v\n _v = arg.pop(\"formatsrc\", None)\n self[\"formatsrc\"] = formatsrc if formatsrc is not None else _v\n _v = arg.pop(\"height\", None)\n self[\"height\"] = height if height is not None else _v\n _v = arg.pop(\"line\", None)\n self[\"line\"] = line if line is not None else _v\n _v = arg.pop(\"prefix\", None)\n self[\"prefix\"] = prefix if prefix is not None else _v\n _v = arg.pop(\"prefixsrc\", None)\n self[\"prefixsrc\"] = prefixsrc if prefixsrc is not None else _v\n _v = arg.pop(\"suffix\", None)\n self[\"suffix\"] = suffix if suffix is not None else _v\n _v = arg.pop(\"suffixsrc\", None)\n self[\"suffixsrc\"] = suffixsrc if suffixsrc is not None else _v\n _v = arg.pop(\"values\", None)\n self[\"values\"] = values if values is not None else _v\n _v = arg.pop(\"valuessrc\", None)\n self[\"valuessrc\"] = valuessrc if valuessrc is not None else _v\n\n # Process unknown kwargs\n # ----------------------\n self._process_kwargs(**dict(arg, **kwargs))\n\n # Reset skip_invalid\n # ------------------\n self._skip_invalid = False", "def header(self, text, level, raw=None):\n return [[MdStyleInstructionCell('h{}'.format(level))] + text]", "def first_header():\n return \"\"\"\n<th>Target\n<th>Date\n<th colspan=\"2\">UT\n<th>Exp\n<th>Cycle\n<th>No. of\n<th>Filters\n<th>XxY\n<th>Speed\n<th>NX1xNY1\n<th>X1\n<th>Y1\n<th>NX2xNY2\n<th>X2\n<th>Y2\n<th>Grat.\n<th>Slit\n<th>Slit\n<th>ID\n<th>PI\n<th align=\"left\">Comment\n\"\"\"", "def csv_make_header(self, fileobj, title, comment=\"\"):\n #Line of header info\n fileobj.write(csv_line( ['Comment'] + [x.name.lower() for x in self.angles] + ['Wait For', 'Value'] ) )", "def header(self, **args):\n return self.pageConfig['header'] % self.pageConfig", "def second_header():\n return \"\"\"\n<th>\n<th>start\n<th>start\n<th>end\n<th>(secs)\n<th>time\n<th>frames\n<th>\n<th>bin\n<th>\n<th>\n<th>\n<th>\n<th>\n<th>\n<th>\n<th>\n<th>width\n<th>angle\n<th>\n<th>\n<th>\n<th>\n\"\"\"", "def csv_make_header(self, fileobj, title, comment=\"\"):\n fileobj.write(csv_line( [\"#Title:\", title] ) )\n fileobj.write(csv_line( [\"#Comment:\", comment] ) )\n #Any other useful comment s trings?\n fileobj.write('#\"First column is the sample phi motor rotation, in radians\"\\n' )\n fileobj.write('#\"Next 6 columns are the XY leg positions in mm, relative to the central (neutral) position.\"\\n' )\n fileobj.write('#\"Next are 2 columns for the stopping criterion parameters.\"\\n' )\n #Line of header info\n fileobj.write(csv_line( ['Phi', 'LegA_X', 'LegA_Y', 'LegB_X', 'LegB_Y', 'LegC_X', 'LegC_Y', 'CountFor', 'CountValue', 'Comment'] ) )", "def make_header(args):\n header = os.path.join(args.output_dir,'header.sam')\n args.header = header\n header_handle = open(header,'w')\n header_handle.write('@HD\\tVN:1.4\\n')\n joined_sam = open(os.path.join(args.output_dir, 'watson_joinedAligned.out.sam'))\n merged_sam = open(os.path.join(args.output_dir, 'watson_mergedAligned.out.sam'))\n for line in joined_sam:\n if line.startswith('@'):\n if line.startswith('@SQ'):\n header_handle.write(line)\n else:\n break\n for line in merged_sam:\n if line.startswith('@'):\n if line.startswith('@SQ'):\n header_handle.write(line)\n elif not line.startswith('@HD'):\n header_handle.write(line)\n else:\n break\n header_handle.close()\n in_files = {'header':os.path.join(args.output_dir,'header.sam')}\n addRG(in_files, args)\n return args", "def make_headers():\n headers = [\"agent_ident\", \"chro\"]\n for i in range(10):\n for j in range(5):\n s = \"d\" + str(i) + \"e\" + str(j)\n headers.append(s + \"_gene ident\")\n headers.append(s + \"_weight\")\n headers.append(s + \"_mut\")\n headers.append(s + \"_dom\")\n for j in range(5):\n s = \"d\" + \"a\" + \"e\" + str(j)\n headers.append(s + \"_gene ident\")\n headers.append(s + \"_weight\")\n headers.append(s + \"_mut\")\n headers.append(s + \"_dom\")\n for j in range(5):\n s = \"d\" + \"b\" + \"e\" + str(j)\n headers.append(s + \"_gene ident\")\n headers.append(s + \"_weight\")\n headers.append(s + \"_mut\")\n headers.append(s + \"_dom\")\n for i in range(6):\n for j in range(5):\n s = \"s\" + str(i) + \"e\" + str(j)\n headers.append(s + \"_gene ident\")\n headers.append(s + \"_weight\")\n headers.append(s + \"_mut\")\n headers.append(s + \"_dom\")\n for i in range(5):\n for j in range(6):\n s = \"e\" + str(i) + \"a\" + str(j)\n headers.append(s + \"_gene ident\")\n headers.append(s + \"_weight\")\n headers.append(s + \"_mut\")\n headers.append(s + \"_dom\")\n return headers", "def format_report_header(self):", "def add_header( name, value ):", "def __init__(self, headers):\n print headers\n print", "def header(name, value):\n print '%s: %s\\n' % (name, value)", "def make_header(text, size=80, symbol=\"-\"):\n header = symbol * size + \"\\n\"\n header += \"%s\\n\" % text\n header += symbol * size + \"\\n\"\n return header", "def hdrval(cls):\n\n hdr = {'Itn': 'Iter'}\n hdr.update(cls.hdrval_objfun)\n hdr.update({'Rsdl': 'Rsdl', 'F': 'F_Btrack', 'Q': 'Q_Btrack',\n 'It_Bt': 'IterBTrack', 'L': 'L'})\n\n return hdr", "def get_header(self, title):\n self.header = '<!DOCTYPE html>' \\\n '<html>' \\\n '<head>' \\\n '<title>Harm Brugge - ' + title + '</title>' \\\n '<link rel=\"icon\" href=\"../resources/img/dna.png\"/>' \\\n '<link href=\"../resources/css/bootstrap.min.css\" rel=\"stylesheet\">' \\\n '<link href=\"../resources/css/main.css\" rel=\"stylesheet\">' \\\n '<script type=\"text/javascript\" src=\"../resources/js/jquery.js\"></script>' \\\n '<script src=\"../resources/js/bootstrap.min.js\"></script>' \\\n '<script type=\"text/javascript\" src=\"../resources/js/bootbox.min.js\"></script>' \\\n '</head>' \\\n '<body>' \\\n '<div class=\"container shadow\">' \\\n '<div class=\"logo\">' \\\n '<h1></h1>' \\\n '</div>' \\\n '<br/>' \\\n '<div class=\"row content\">' \\\n '<div class=\"content-main\">' \\\n '<br/>' \\\n '<p class=\"lead content-title\">' + title + '</p>'\n return self.header", "def _horizontal_header(self):\n return self.horizontalHeader()" ]
[ "0.69086003", "0.6790904", "0.6636566", "0.641912", "0.6298546", "0.62713695", "0.62139636", "0.62139636", "0.62044215", "0.6202156", "0.61957175", "0.61957115", "0.61588085", "0.61331064", "0.6132515", "0.6127647", "0.6101991", "0.6100927", "0.6078027", "0.6056748", "0.60401607", "0.5974528", "0.59656376", "0.5963161", "0.5959081", "0.5952065", "0.5934469", "0.59270185", "0.59046143", "0.58842623" ]
0.770182
0
Write a Weka .arff file
def writeArff(file_name, relation, classes, attrs, data): print 'writeArff:', file_name, len(data), len(data[0]) f = file(file_name, 'w') f.write('%\n') f.write('%% %s \n' % os.path.basename(file_name)) f.write('%\n') f.write('% Created by ' + os.path.basename(sys.argv[0]) + ' on ' + datetime.date.today().strftime("%A, %d %B %Y") + '\n') f.write('% Code at http://bit.ly/b7Kkqt\n') f.write('%\n') f.write('% Constructed from raw data in http://archive.ics.uci.edu/ml/machine-learning-databases/soybean/\n') f.write('%% %d instances\n' % len(data)) f.write('%% %d attributes + 1 class = %d columns\n' % (len(data[0]) - 1, len(data[0]))) f.write('\n') f.write('@RELATION ' + relation + '\n\n') f.write('@ATTRIBUTE %-15s {%s}\n' % ('class', ','.join([x for x in classes if not x == '?']))) for a in attrs: f.write('@ATTRIBUTE %-15s {%s}\n' % (a['attr'], ','.join([x for x in a['vals'] if not x == '?']))) f.write('\n@DATA\n\n') for instance in data: f.write(', '.join(instance) + '\n') f.close() """ Copy .arff files to .arff.txt so they can be viewed from Google docs """ print 'writeArff:', file_name + '.txt', '-- duplicate' shutil.copyfile(file_name, file_name + '.txt')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def arff(features, path):\n out = open(path, 'w')\n\n # Header\n out.write(\"@RELATION music_speech\\n\")\n for i in range(features.shape[1]-1):\n out.write(\"@ATTRIBUTE MFCC_%i NUMERIC\\n\" % i)\n out.write(\"@ATTRIBUTE class {music,speech}\\n\\n@DATA\\n\")\n\n # Data\n for mfcc in features:\n for i in xrange(len(mfcc)-1):\n out.write(\"%f,\" % mfcc[i])\n out.write(\"%s\\n\" % ('music' if mfcc[-1] == 1 else 'speech'))\n\n out.close()", "def arff_file(data,attributes,relation,description,output_dir=\"./\",filename=\"tmp\"):\n x = []\n for k in attributes:\n x.append(k[0])\n data_write = {}\n data_write['data'] = manip.dic_to_list(data,order=x)[1:]\n data_write['attributes'] = [tuple(l) for l in attributes]\n data_write['relation'] = unicode(relation)\n data_write['description'] = unicode(description)\n data_final = arf.dumps(data_write)\n #print data_final\n fil = open(output_dir + filename + '.arff', \"w\")\n fil.write(data_final)\n fil.close()\n\n return None", "def create_arff(fname, mat, gold):\n \n gold = [int(g.split('.')[-1]) for g in gold]\n \n g = set(gold)\n\n gold = np.matrix(gold)\n \n c = np.concatenate((mat.todense(), gold.T), axis=1)\n\n ncol = mat.shape[1]\n #FIXME: pathi relative yap\n out = \"/home/tyr/Desktop/local.weka/\" + fname + '.arff'\n f = open(out, 'w')\n f.write(\"@relation %s\\n\\n\" % fname)\n for i in xrange(ncol):\n f.write(\"@attribute a%d numeric\\n\" % i)\n s = ','.join(map(str, g))\n f.write(\"@attribute class {%s}\\n\\n\" % s)\n f.write(\"@data\\n\")\n #FIXME: Avoid writing two times\n np.savetxt(f, c, delimiter=',', fmt='%5f')\n f.close()\n lines = open(out).readlines()\n f = open(out, 'w')\n for line in lines:\n if line[0] != '@' and len(line) != 1:\n line = line.split(',')\n tag = line[-1].strip()\n tag = str(int(float(tag))) + '\\n'\n line[-1] = tag\n f.write(','.join(line))\n f.write('\\n')\n else:\n f.write(line)\n f.close()", "def to_file(self, file_path, smirnoff_data):\n pass", "def save_to_arff(file_path, interactions, labels, selection,\n vectorizer=None, unlabelled=False, meka=True, use_bzip=True):\n if use_bzip:\n zipper = bz2\n else:\n zipper = gzip\n\n if vectorizer is None:\n vectorizer = CountVectorizer(lowercase=False, binary=True)\n\n X, y = interactions_to_Xy_format(interactions, selection)\n mlb = MultiLabelBinarizer(classes=sorted(labels), sparse_output=False)\n if not unlabelled:\n y = mlb.fit_transform(y)\n X = vectorizer.fit_transform(X)\n\n if meka:\n header = \"@relation 'PTMs: -C %d'\\n\\n\" % (len(labels))\n else:\n header = \"@relation PTMs\\n\\n\"\n\n for label in labels:\n header += \"@attribute %s {0,1}\\n\" % (label)\n for feature in (rename(x) for x in vectorizer.get_feature_names()):\n header += \"@attribute %s numeric\\n\" % (feature)\n\n header += \"\\n@data\\n\\n\"\n\n with zipper.open(file_path, 'wb') as fp:\n X = X.todense()\n if unlabelled:\n X = X.astype(str)\n y = y.astype(str)\n y[:, :] = '?'\n vec = np.hstack([y, X])\n np.savetxt(\n fp, X=vec, fmt='%s', delimiter=',', comments='', header=header\n )", "def create_training_file(D_RAT):\r\n return create_arff_file(D_RAT, 0)", "def writeWaveformToFile(self, filename, header='', binary=False):\n if filename == \"\": fo = sys.stdout # use stdout if no filename\n else: fo = open(filename, 'w')\n self._writeWaveform(fo, header, binary)\n fo.close()", "def _writeWaveform(self, fo, header='', binary=False):\n # TODO: Write channel data to file\n pass", "def to_file(self, file_path, smirnoff_data):\n xml_string = self.to_string(smirnoff_data)\n with open(file_path, \"w\") as of:\n of.write(xml_string)", "def write_data(values, cols, dataset):\n file = open(WRITEPATH+dataset, \"w\")\n weka = open(WEKAPATH+dataset[0:len(dataset)-3]+\"arff\", \"w\")\n\n weka.write(\"@relation emotion\\n\")\n weka.write(\"\\n\")\n\n\n for i in range(len(cols)-1):\n weka.write(\"@attribute \" + cols[i] + \" numeric\\n\")\n file.write(cols[i]+\",\")\n\n weka.write(\"@attribute \" + cols[-1] + \"{Positive,Negative,Neutral}\\n\")\n file.write(cols[-1]+\"\\n\")\n\n weka.write(\"\\n@data\\n\")\n\n for v in values:\n l = np.sum(v[0:len(v)-1])\n if l != 0:\n for i in range(len(v)-1):\n weka.write(str(v[i]) + \",\")\n file.write(str(v[i]) + \",\")\n weka.write(str(v[-1]) + \"\\n\")\n file.write(str(v[-1]) + \"\\n\")\n file.close()\n weka.close()", "def save(self, filename):\n target = open(filename, 'w')\n target.write(\"\\\\data\\\\\\n\")\n target.write(\"ngram 1=\" + str(len(self.f1)) + \"\\n\\n\")\n target.write(\"\\\\1-grams:\\n\")\n for w,p in sorted(self.f1.items()): \n target.write(str(p) + \" \" + w + \"\\n\")\n target.write(\"\\\\end\\\\\\n\")\n target.close()", "def write(self, f):\n if self.best_mhc_align:\n mhc_align_str = self.best_mhc_align.subject_str()\n mhc_score_str = str(self.best_mhc_align.bit_score)\n else:\n mhc_align_str = \".\"\n mhc_score_str = \"0\"\n\n if self.best_non_mhc_align:\n non_mhc_align_str = self.best_non_mhc_align.subject_str()\n non_mhc_score_str = str(self.best_non_mhc_align.bit_score)\n else:\n non_mhc_align_str = \".\"\n non_mhc_score_str = \"0\"\n \n f.write(\"\\t\".join([self.locus, self.short_samp_id, self.name,\n str(self.length), mhc_align_str, non_mhc_align_str,\n mhc_score_str, non_mhc_score_str,\n str(self.n_mhc_align), str(self.n_non_mhc_align)]) + \"\\n\")", "def save_audio(ndarray, feature_name, out_path, x, y, new_labels, filename=None, sr=SR):\n # this is kind-of standard\n filename = filename or FeatureExtractor.get_file_name(x, feature_name, 'wav')\n librosa.output.write_wav(out_path / filename, ndarray, sr=sr, norm=True)\n new_labels.append([filename, y])\n print('info: {} transformed and saved!'.format(filename))\n return filename", "def extract_feats_to_file(npy_path, audio_path, featurizer):\n # Returns a (time, feature) NumPy array\n data = featurizer.file_to_feats(audio_path)\n np.save(npy_path, data)", "def write_predictions_to_file(predictor, testDataFname, enc, outputFname, features=None):\n\n testData, _, testDataIds, _ = make_data(testDataFname, features=features, enc=enc)\n\n dt = datetime.now()\n predictions = predictor.predict(testData)\n print 'predicting took', datetime.now() - dt\n\n featureSelectionOutput = np.transpose(np.vstack((testDataIds, predictions.round().astype(int))))\n\n with open(outputFname, 'wb') as outputFile:\n writer = csv.writer(outputFile)\n writer.writerow(['id', 'loss'])\n writer.writerows(featureSelectionOutput)", "def writeFastaFile(filename,sequences):\n fhw=open(filename,\"w\")\n for id in sequences:\n fhw.write(\">\"+id+\"\\n\"+sequences[id]+\"\\n\")\n fhw.close()", "def write_pred_kaggle_file(cls, outfname, speech):\n yp = cls.predict(speech.test_doc_vec)\n labels = speech.le.inverse_transform(yp)\n f = codecs.open(outfname, 'w')\n f.write(\"FileIndex,Category\\n\")\n for i in range(len(speech.test_fnames)):\n fname = speech.test_fnames[i]\n f.write(fname + ',' + labels[i] + '\\n')\n f.close()", "def to_file(self, filename):\n self.header['n'] = self.n\n save_gyre(filename, self.header, self.data)", "def write(filename):\n print(uc.write(filename))", "def write_filepath(self, filename, file_format='FASTA', zipout=False):\n \n file_obj = open_with_intermediates(filename,'w')\n if zipout:\n file_obj.close()\n file_obj = StringIO()\n self.write(file_obj, file_format=file_format)\n if zipout:\n import gzip\n file_obj_gz = gzip.open(filename, \"wb\", 6)\n file_obj_gz.write(str.encode(file_obj.getvalue()))\n file_obj_gz.close()\n file_obj.close()", "def write_filepath(self, filename, file_format='FASTA', zipout=False):\n \n file_obj = open_with_intermediates(filename,'w')\n if zipout:\n file_obj.close()\n file_obj = StringIO()\n self.write(file_obj, file_format=file_format)\n if zipout:\n import gzip\n file_obj_gz = gzip.open(filename, \"wb\", 6)\n file_obj_gz.write(str.encode(file_obj.getvalue()))\n file_obj_gz.close()\n file_obj.close()", "def build_file(corpus_dir, feature_module, relation_name='author_detection',\n file_name='feature.arff', class_name='author'):\n input_vals = author_reader.iter_text(corpus_dir)\n module = __import__(feature_module)\n return arff_writer.write_feature_file(\n file_name, relation_name, module, input_vals, class_name)", "def to_file(self, fn):\n store.store_dict(fn, 'trainalgorithm', self.to_dict())", "def writeVOC(b, ld, f):\n of = os.path.join(ld, f[: f.rfind('.')] + \".txt\")\n with open(of, \"w\") as fh:\n dname = os.path.dirname(os.path.dirname(of))\n fh.write(\"\"\"<annotation>\n <folder>%s</folder>\n <filename>%s</filename>\n <source>\n <database>The NVIDIA AI City 2017 dataset</database>\n <annotation>PASCAL VOC2007</annotation>\n \n </source>\n <size>\n <width>%d</width>\n <height>%d</height>\n <depth>3</depth>\n </size>\n <segmented>0</segmented>\n\"\"\" % (dname, f, nwidth, nheight))\n for r in b:\n fh.write(\"\"\" <object>\n <name>%s</name>\n <bndbox>\n <xmin>%d</xmin>\n <ymin>%d</ymin>\n <xmax>%d</xmax>\n <ymax>%d</ymax>\n </bndbox>\n </object>\n\"\"\" % ( r[0], int(r[1]*dw), int(r[2]*dh), int(r[3]*dw), int(r[4]*dh) ))\n fh.write(\"</annotation>\")", "def save_elem_file(self, output):\n with open(output, 'wb') as fid:\n self._write_elem_header(fid)\n self._write_nodes(fid)\n self._write_elements(fid)\n self._write_neighbors(fid)", "def save_to(self, f: BinaryIO):\n raise NotImplementedError", "def merge_arff(indir, outfilename):\n utils.print_success(\"Preprocessing ARFFs\")\n indir = utils.abs_path_dir(indir)\n tmpfilename = \"tmp_arff.txt\"\n os.system(\"ls \" + indir + \" > \" + tmpfilename)\n with open(tmpfilename, 'r') as filenames:\n outfn = open(outfilename, 'w')\n cpt_invalid_fn = 0\n # Write first lines of ARFF template file\n for filename in filenames:\n filename = validate_arff(indir + \"/\" + filename[:-1])\n if filename:\n with open(filename, 'r') as template:\n nb_line = 77\n for line in template:\n if not nb_line:\n break\n nb_line -= 1\n outfn.write(line)\n break\n else:\n cpt_invalid_fn += 1\n # Append all arff file to the output file\n cur_file_num = 1\n for filename in filenames:\n filename = validate_arff(indir + \"/\" + filename[:-1])\n if filename:\n cur_file_num = cur_file_num + 1\n sys.stdout.write(\"\\r\\tAnalysing file\\t\" + str(cur_file_num))\n sys.stdout.flush()\n fname = open(filename,'r')\n outfn.write(\"\".join(fname.readlines()[74:77]))\n fname.close()\n else:\n cpt_invalid_fn += 1\n sys.stdout.write('\\n')\n sys.stdout.flush()\n outfn.close()\n os.remove(tmpfilename)\n if cpt_invalid_fn:\n utils.print_warning(str(cpt_invalid_fn) + \" ARFF with errors found\")\n utils.print_success(\"Preprocessing done\")\n return outfilename", "def writeToFile(fil, aks, tid):\r\n\r\n f = open(\"processed_\"+fil, 'w')\r\n \r\n f.write(\"Aks Tid\")\r\n for i in range(len(aks)):\r\n f.write(f\"\\n{aks[i]} {tid[i]}\")\r\n f.close()", "def write_megam_file(train_toks, encoding, stream, bernoulli: bool = ..., explicit: bool = ...):\n ...", "def write_train_data(lafs, ltf_dir, enc, trainf):\n with open(trainf, 'w') as f:\n\n A_vals = set()\n B_vals = set()\n G_vals = set()\n ltfs = []\n\n for laf in lafs:\n # Check that the LTF and LAF are valid.\n bn = os.path.basename(laf);\n ltf = os.path.join(ltf_dir, bn.replace('.laf.xml', '.ltf.xml'));\n ltfs.append(ltf)\n\n A_vals, B_vals, G_vals = get_ABG_value_sets(ltfs, logger)\n\n\n# laf_doc = load_doc(laf, LAFDocument, logger);\n# ltf_doc = load_doc(ltf, LTFDocument, logger);\n# if laf_doc is None or ltf_doc is None:\n# continue;\n \n # Extract features/targets.\n# try:\n # Extract tokens.\n# try:\n# tokens, token_ids, token_onsets, token_offsets, token_As, token_Bs, token_Gs = ltf_doc.tokenizedWithABG();\n# except:\n# tokens, token_ids, token_onsets, token_offsets = ltf_doc.tokenized();\n# token_As = token_Bs = token_Gs = None;\n# if token_As != None:\n# A_vals.update(token_As)\n# if token_Bs != None:\n# B_vals.update(token_Bs)\n# if token_Gs != None:\n# G_vals.update(token_Gs)\n# except:\n# logger.warn('ABG values not found for %s. Skipping.' % laf);\n# continue;\n\n print(\"Found the following number of values for ABG:\\nA: {}\\nB: {}\\nG: {}\\n\".format(len(A_vals), len(B_vals), len(G_vals)))\n\n for laf in lafs:\n # Check that the LTF and LAF are valid.\n bn = os.path.basename(laf);\n ltf = os.path.join(ltf_dir, bn.replace('.laf.xml', '.ltf.xml'));\n laf_doc = load_doc(laf, LAFDocument, logger);\n ltf_doc = load_doc(ltf, LTFDocument, logger);\n if laf_doc is None or ltf_doc is None:\n continue;\n \n # Extract features/targets.\n try:\n # Extract tokens.\n try:\n tokens, token_ids, token_onsets, token_offsets, token_nums, token_As, token_Bs, token_Gs, token_Fs, token_Js = ltf_doc.tokenizedWithABG();\n except:\n tokens, token_ids, token_onsets, token_offsets, token_nums = ltf_doc.tokenized();\n token_As = token_Bs = token_Gs = token_Fs = token_Js = None;\n \n # Convert mentions to format expected by the encoder; that is,\n # (tag, token_onset, token_offset).\n mentions = laf_doc.mentions();\n if len(mentions) == 0:\n mentions_ = [];\n else:\n # Map to the minimal enclosing span of tokens in the\n # supplied LTF.\n entity_ids, tags, extents, char_onsets, char_offsets = zip(*mentions);\n mention_onsets, mention_offsets = convert_extents(char_onsets, char_offsets,\n token_onsets, token_offsets);\n mentions_ = list(zip(tags, mention_onsets, mention_offsets));\n\n # Eliminate overlapping mentions, retaining whichever\n # is first when sorted in ascending order by (onset, offset).\n sort_mentions(mentions_);\n prev_mention_offset = -1;\n temp_mentions_ = [];\n for tag, mention_onset, mention_offset in mentions_:\n if mention_onset > prev_mention_offset:\n temp_mentions_.append([tag, mention_onset, mention_offset]);\n prev_mention_offset = mention_offset;\n mentions_ = temp_mentions_;\n\n feats, targets = enc.get_feats_targets(tokens, mentions_, token_nums, token_As, token_Bs, token_Gs, token_Fs, token_Js, A_vals, B_vals, G_vals);\n\n except:\n logger.warn('Feature extraction failed for %s. Skipping.' % laf);\n continue;\n\n # Write to file.\n write_crfsuite_file(f, feats, targets);" ]
[ "0.65214306", "0.64201504", "0.6406658", "0.6093845", "0.59766054", "0.59047896", "0.58643204", "0.58590144", "0.57665783", "0.5628115", "0.5613803", "0.5608763", "0.5576813", "0.55577165", "0.5512611", "0.55023175", "0.5419931", "0.53972125", "0.53967565", "0.5383743", "0.5383743", "0.53652483", "0.5351774", "0.5347746", "0.5337704", "0.5331098", "0.53294307", "0.53177947", "0.5284545", "0.52697843" ]
0.7022959
0
Tries to return a relative path from staging
def get_relative_path_from_staging(staging_path): staging_dir = settings.EXPORT_STAGING_ROOT.lstrip(os.path.sep).rstrip(os.path.sep) staging_path = staging_path.lstrip(os.path.sep) if staging_dir in staging_path: return staging_path.replace(staging_dir, "") return staging_path
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _abs_path(rel_path):\n return os.path.join(BASE_DIR, rel_path)", "def _get_local_src(self, path: Path) -> Path:\n src = \"\"\n\n if str(path).startswith(\"~\"):\n path = Path(str(path).replace(\"~/\", \"\"))\n\n if self.category == \"global\":\n src = f\"{self.local_base}/global{path}\"\n elif self.category == \"local\":\n src = f\"{self.local_base}/local/{path}\"\n else:\n src = f\"{self.local_base}/custom/{path}\"\n\n return Path(src)", "def _get_path(): # THIS IS JUST FOR GETTING THE FILE\n return os.path.dirname(os.path.abspath(__file__)) + '/'", "def relpath(targpath: str, basepath: str='') -> str:\n pass", "def _absPath(self, relpath):\n\n # Pass through URIs and absolute paths.\n if self.isUrl(relpath) or relpath[0] == '/':\n return relpath\n\n # This won't deal with ~user/ syntax, but it's much less\n # common anyway.\n if relpath.startswith('~/') and 'HOME' in os.environ:\n return os.path.join(os.environ['HOME'], relpath[2:])\n\n if self._configFileStack:\n relativeTo = os.path.dirname(self._configFileStack[-1])\n else:\n relativeTo = os.getcwd()\n\n if self.isUrl(relativeTo):\n parts = urlparse.urlsplit(relativeTo)\n return urlparse.urlunsplit((parts.scheme, parts.netloc, os.path.normpath(os.path.join(parts.path, relpath)), parts.query, parts.fragment))\n return os.path.normpath(os.path.join(relativeTo, relpath))", "def _get_local_dest(self, path: Path) -> Path:\n dest = \"\"\n\n if str(path).startswith(\"~\"):\n path = path.relative_to(\"~\")\n\n if self.category == \"global\":\n dest = f\"{self.local_base}/global/{path}\"\n elif self.category == \"local\":\n dest = f\"{self.local_base}/local/{path}\"\n else:\n dest = f\"{self.local_base}/custom/{path}\"\n\n return Path(dest)", "def srcdir(path):\n if not workflow.included_stack:\n return None\n return workflow.current_basedir.join(path).get_path_or_uri()", "def local_path(self):\n if self.repo_path:\n return self.repo_path\n tmpdir = PurePath(tempfile.gettempdir())\n return str(tmpdir.joinpath('harvest', self.org, self.repo))", "def resource_path(relative_path):\n return os.path.join(BASEPATH, relative_path)", "def fake_full_path(self) -> PurePath:\n if self.category:\n # Giza wrote out yaml file artifacts under a directory. e.g. steps-foo.yaml becomes\n # steps/foo.rst\n return self.source_path.parent.joinpath(\n PurePath(self.category), self.output_filename\n )\n return self.source_path", "def __relative_path(self, p4file):\n return self.ctx.depot_path(p4file.depot_path).to_gwt()", "def get_path():\n return path.abspath(path.dirname(path.dirname(__file__)))", "def relative_path(__file__, path):\n return os.path.abspath(os.path.join(os.path.dirname(__file__), path))", "def output_path():\n folder = path.join(path.curdir, \"stages\")\n folder = path.abspath(folder)\n return ensure_path(folder)", "def _fullpath(self, path):\n splitpath = path.split(self._baseurl, 2)\n if len(splitpath) == 1:\n result = os.path.join(self._baseurl, path)\n else:\n result = path # path contains baseurl already\n return result", "def getAbsolutePath(relPath):\n currDir = os.path.dirname(__file__)\n return os.path.join(currDir, relPath)", "def get_path() -> str:\n return os.path.dirname(os.path.realpath(__file__))", "def get_absolute_pathname(self):\n return os.path.join(settings.PRIVATE_STORAGE_ROOT, self.get_relative_pathname())", "def _get_relative_path(self, abs_path):\r\n relative_path = os.path.relpath(abs_path, settings.PROJECT_ROOT)\r\n return relative_path", "def _path(self):\n if self.target[-1] != \"/\":\n self.target += \"/\"\n\n if \"/\" in self.source:\n self.path = self.target + self.source.split(\"/\")[-1]\n else:\n raise NotImplementedError(\"This software is not done for Windows\")\n if self.method == \"git\":\n self.path = self.path.replace(\".git\", \"\")", "def GetPath(path_from_src):\n path = os.path.join(os.path.dirname(__file__), '../..', path_from_src)\n if not os.path.isfile(path):\n print 'WARNING: %s does not exist. Maybe moved or renamed?' % path\n return path", "def GetSrc():\n return os.path.abspath(os.path.join(_THIS_DIR, os.pardir, os.pardir,\n os.pardir))", "def _GetRelPath(self, filename):\r\n absname = os.path.join(self.repo_dir, filename)\r\n return os.path.relpath(absname)", "def relative_path(base, target):\r\n common, base_tail, target_tail = split_common(base, target)\r\n #print \"common:\", common\r\n #print \"base_tail:\", base_tail\r\n #print \"target_tail:\", target_tail\r\n r = len(base_tail) * [os.pardir] + target_tail\r\n if r:\r\n return os.path.join(*r)\r\n else:\r\n return os.curdir", "def path(cls, relpath=None):\r\n base = os.getcwd() if not ParseContext._active else cls.locate().current_buildfile.parent_path\r\n return os.path.abspath(os.path.join(base, relpath) if relpath else base)", "def _resolve_relative_path(filepath: str):\n if not filepath:\n return None\n\n inf_path = os.path.join(os.path.dirname(__file__), filepath)\n\n return inf_path", "def test_relativise_src_under():\n src = pathlib.Path(\"/tmp/foo/bar/baz/src.txt\")\n dst = pathlib.Path(\"/tmp/foo/dst.txt\")\n rel = relativise(src, dst)\n assert rel == pathlib.Path(\"../../dst.txt\")", "def abspath(self, ref):\n \n directory, path = get_location(self.directory, ref.strip(),\n current=dirname(self.relative))\n path = join_fb_root(join(directory, path))\n return path", "def _get_resource_path(filename, path=Path.TEST):\n return os.path.normpath(os.path.join(path.value, filename))", "def path(x):\n return os.path.abspath(os.path.join(os.path.dirname(__file__), x))" ]
[ "0.6901522", "0.6555474", "0.64813983", "0.6473201", "0.6391527", "0.636764", "0.63647974", "0.63283", "0.62761253", "0.6239532", "0.6201204", "0.61716205", "0.61697423", "0.61479324", "0.6136124", "0.613316", "0.6131473", "0.610699", "0.6102893", "0.60969174", "0.60914755", "0.6090929", "0.6078979", "0.6059856", "0.60569876", "0.6043589", "0.6036521", "0.60296583", "0.6014791", "0.6010492" ]
0.81900954
0
Stages the data in the `fileobj` for subsequent commital under the given `class_` and `key`.
def put(self, class_, key, fileobj): stageobj = StringIO() shutil.copyfileobj(fileobj, stageobj) fileobj.seek(0) stageobj.seek(0) idx = self._idx(class_, key) self.stage[idx] = (class_, key, stageobj)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process(self,fileobj_out,fileobj_in):\n pass", "def __init__(self):\n self.keyingMethod=fileSize\n self.keyToFile=dict()", "def commit(self):\n if self.keys:\n # If there used to be some keys, there must exist an old dictionary blob somewhere in the database. It should be deallocated after a successful commit to disk.\n self.file.seek(0)\n headerdump = self.file.read(16)\n if sum(headerdump):\n dictat,dictlen = struct.unpack(\"<QQ\", headerdump)\n self.awaitingpunch.append((dictat,dictlen))\n\n # All buffered (modified but uncommited) values get serialized and sent to disk.\n for key,value in self.buffered.items():\n valuedump = pickle.dumps(value)\n valueat = self.file.safeappend(valuedump, 16)\n self.keys[key] = (valueat,len(valuedump))\n self.buffered.clear()\n\n # A new dictionary blob gets serialized and sent to disk.\n dictdump = pickle.dumps(self.keys)\n dictat = self.file.safeappend(dictdump, 16)\n\n # Finally, the header gets overwritten atomically and orderly.\n headerdump = struct.pack(\"<QQ\", dictat, len(dictdump))\n self.file.fsync()\n self.file.writep(0, headerdump)\n self.file.fsync()\n\n # Whatever value blobs and dictionary blobs are no longer being pointed to, they can be safely deallocated.\n for (punchat,punchlen) in self.awaitingpunch:\n self.file.fallocate(2|1, punchat, punchlen)\n self.awaitingpunch.clear()\n\n self.buffered.clear()\n\n else:\n self.awaitingpunch.clear()\n self.file.fsync()\n self.file.truncate(0)\n self.file.fsync()", "def save_to_fileobj(self, fileobj):\n writetags(fileobj, self.__dxftags__(), self.ENCODING)", "def file_upload_to_obj():\n\n temp = []\n file_content = pd.read_excel(INPUT_FILE_NAME).fillna(0).to_dict('records')\n sorted_content = sorted(file_content, key=itemgetter(\n 'filedbentity.file_extension'))\n for item in file_content:\n\n raw_date = item.get('filedbentity.file_date')\n if raw_date:\n temp_date = raw_date.strftime('%Y-%m-%d')\n raw_date = datetime.strptime(temp_date, \"%Y-%m-%d\").date()\n else:\n raw_date = datetime.now().date()\n\n raw_status = item.get('dbentity.status')\n if raw_status == 'Archive':\n raw_status = 'Archived'\n \n obj = {\n 'path': item.get('EBS path'),\n 'display_name': item.get('dbentity.display_name'),\n 'status': raw_status,\n 'source': item.get('dbentity.source'),\n 'topic_edam_id': item.get('topic edam_id').upper().replace('TOPIC', 'EDAM').strip(),\n 'data_edam_id': item.get('data edam_id').upper().replace('DATA', 'EDAM').strip(),\n 'format_edam_id': item.get('format edam_id').upper().replace('FORMAT', 'EDAM').strip(),\n 'file_extension': item.get('filedbentity.file_extension'),\n 'file_date': raw_date,\n 'is_public': (item.get('filedbentity.is_public') == '1'),\n 'is_in_spell': item.get('filedbentity.is_in_spell'),\n 'is_in_browser': (item.get('filedbentity.is_in_browser') == '1'),\n 'readme_name': item.get('readme name'),\n 'description': item.get('filedbentity.description'),\n 'pmids': item.get('pmids (|)'),\n 'keywords': item.get('keywords (|)')\n }\n temp.append(obj)\n\n if len(temp) > 0:\n return temp\n return None", "def __setitem__(self, fname, obj):\n if isinstance(obj, Tree):\n for i in obj:\n obj[i.name] = obj[i]\n\n else:\n with open(os.path.join(self.name, fname), 'wb') as openfile:\n raw = obj.read()\n openfile.write(raw.encode() if isinstance(raw, str) else raw)\n\n self._refresh()", "def load_class(self):\n if not os.path.exists(self.savefile):\n self.save_class()\n\n with open(self.savefile, \"r\") as f:\n data = json.load(f)\n for key, value in data.items():\n # set every dict key to an atribute of the class\n setattr(self, key, value) # self.key = value", "def add_file(self, key, dict, data):\n try:\n # If new file (aka, data passed in, write file)\n path = os.path.join(self.file_path, '%s.xoj' % key)\n f = open( path, 'w' )\n f.write(data)\n f.close()\n\n self.fileList[key] = dict\n except:\n print \"Error writing file\", path\n\n self.save()", "def upload_fileobj(self, bucket_name, file_obj, key):\n self._client.upload_fileobj(Fileobj=file_obj, Bucket=bucket_name, Key=key)", "def _write_hy_tran_vcont_kv(self, f_obj, layer):\n if self.laycon[layer] in [0, 2]:\n f_obj.write(self.tran[layer].get_file_entry())\n else:\n f_obj.write(self.hy[layer].get_file_entry())\n\n if (self.ikvflag == 0) and layer < (self.parent.nlay - 1):\n f_obj.write(self.vcont[layer].get_file_entry())\n elif (self.ikvflag == 1) and (self.parent.nlay > 1):\n f_obj.write(self.kv[layer].get_file_entry())", "def process_one_file(guid, key, env):\n logging.info(f'Processing file: {key}')\n try:\n query_seq = read_s3_file(key).seq\n results = []\n for name, reference_seq in REFERENCE_RECORDS.items():\n offset = reference_seq.seq.find(query_seq)\n if offset != -1:\n result = {'filename': name,\n 'offset': offset,\n 'name': reference_seq.name,\n 'desc': reference_seq.description\n }\n results.append(result)\n logging.info(f'found in {name} at {offset}')\n update_database(guid, 'done', env, results)\n logging.info(f'Update succeeded for guid={guid} in env={env}')\n except Exception as err:\n report = {'time': str(datetime.utcnow()),\n 'guid': guid,\n 'env': env,\n 'key': key,\n 'trace' : traceback.format_exc()\n }\n results = [{'error' : report}]\n update_database(guid, 'error', env, results)\n raise", "def _put(self, key, data):\n path = self._get_key_path(key)\n with open(path, \"wb\") as pickle_file:\n pickle.dump(data, pickle_file)", "def transfer(file_obj):", "def s3_process(self, payload, classifier):\n s3_file_lines = StreamPreParsers.pre_parse_s3(payload.raw_record)\n for line in s3_file_lines:\n data = line.rstrip()\n payload.refresh_record(data)\n self.process_alerts(classifier, payload, data)", "def map_file_data(file_obj, file_events):\n file_as_dict = {\n \"premis:originalName\": file_obj.currentlocation,\n \"original_name\": escape(file_obj.originallocation),\n # needs investigation\n \"sanitized_file_name\": get_sanitized_file_name(\n get_file_name_cleanup(file_events)\n ),\n \"prov:generatedAtTime\": file_obj.modificationtime.strftime(\n \"%Y-%m-%dT%H:%M:%SZ\"\n ),\n \"premis:fixity\": {\n \"checksum_type\": convert_to_premis_hash_function(file_obj.checksumtype),\n \"Checksum\": file_obj.checksum,\n },\n \"premis:identifier\": file_obj.uuid,\n \"premis:size\": file_obj.size,\n \"file_name\": file_obj.label,\n # not sure if this is the file name or if we should stick with\n \"dct:FileFormat\": map_file_format_info(\n get_file_format_event(file_events), get_file_validation_event(file_events)\n ),\n \"file_validation\": map_file_validation_info(\n get_file_validation_event(file_events)\n ),\n \"file_normalization\": map_file_normalization_info(\n get_file_normalization_event(file_events)\n ),\n \"events\": list_file_events(file_events),\n }\n return file_as_dict", "def import_project_dump(self, key):", "def set_blob ( self, object_class_id, object_instance_id, attribute_name, blob_file, file_name ) :\n try :\n inputs = []\n inputs.append(open(blob_file, 'rb'))\n for input in inputs:\n binary_data = input.read()\n blobfile = self.oracle_cursor.var(cx_Oracle.BLOB)\n blobfile.setvalue(0, binary_data)\n self.oracle_cursor.callproc(\"sdb_interface_pck.setBlob\", [object_class_id, object_instance_id, attribute_name, file_name, blobfile ])\n except Exception, err:\n print \"Error storing BLOB: ERROR: \" + str(err)\n raise", "def _process_fileobj(self,\n tar_file_obj: IO[bytes],\n object_node: graph.COSObject) -> None:\n with tarfile.open(fileobj=tar_file_obj) as tar_file:\n gz_file_names = [f.name for f in tar_file.getmembers()\n if f.name.endswith('.gz')]\n for gz_file_name in gz_file_names:\n gz_file_obj = tar_file.extractfile(gz_file_name)\n if gz_file_obj:\n oem_data = self._parse_oem_data(gz_file_obj)\n aso_id = self._get_aso_id_from_file_name(gz_file_name)\n self._save_oem(oem_data, aso_id, object_node)", "def do_update(self, line):\n if line:\n args = shlex.split(line)\n if len(args) < 2:\n print(\"** instance id missing **\")\n return False\n elif len(args) < 3:\n print(\"** attribute name missing **\")\n return False\n elif len(args) == 3:\n print(\"** value missing **\")\n return False\n else:\n obj_name, obj_id, obj_attr, obj_value = args\n obj_repr = \"{}.{}\".format(obj_name, obj_id)\n data = FileStorage()\n data.reload()\n data_loaded = data.all()\n for key, value in data_loaded.items():\n if key == obj_repr:\n obj = eval(obj_name)(**value.to_dict())\n if obj_name in obj.__dict__.keys():\n obj[obj_name] = obj_value\n else:\n setattr(obj, obj_attr, obj_value)\n d = {}\n for s_key, s_value in data_loaded.items():\n d[s_key] = s_value.to_dict()\n with open(data.path(), mode='w', encoding=\"utf-8\") as file:\n file.write(json.dumps(d))\n break\n else:\n print(\"** class doesn't exist **\")\n else:\n print(\"** class name missing **\")", "def build(self, file_number, data):\n pass", "def manage_afterPUT(self, data, marshall_data, file, context, mimetype,\n filename, REQUEST, RESPONSE):\n file.seek(0)\n self.setImage(file)", "def commit(self, key, data):\n try:\n fobj = open(os.path.join(settings.SVN_WC_PATH, key), 'w')\n except IOError:\n #parent directory seems to be missing\n self.initial(os.path.dirname(os.path.join(settings.SVN_WC_PATH, key)))\n return self.commit(key, data)\n fobj.write(data)\n fobj.close()\n c = pysvn.Client()\n try:\n #svn add will throw an error, if the file is already under version control\n c.add(os.path.join(settings.SVN_WC_PATH, key))\n except:\n #but we don't care ...\n pass\n c.checkin(os.path.join(settings.SVN_WC_PATH, key), log_message=\"auto checkin from django\")\n c.update(settings.SVN_WC_PATH)", "def store_harvest(self, file_prefix, data):\n compressed = bz2.compress(data)\n k = f\"{self.harvest_key_prefix}/{self.harvest_date}/{file_prefix}.bz2\"\n self.s3_client.put_object(\n Body=compressed,\n Bucket=self.s3_bucket,\n Key=k,\n )", "def save_file(app, file_json, item_model, proj_model, fy_model, casc_model):\n # casc_model = app.casc.query.filter_by(id=casc_model).first()\n # fy_model = app.FiscalYear.query.filter_by(id=fy_model).first()\n # proj_model = app.Project.query.filter_by(id=proj_model).first()\n # item_model = app.Item.query.filter_by(id=item_model).first()\n # Since there is not science base id for a file, url is best to find it:\n sb_file = app.db.session.query(app.SbFile).filter(\n app.SbFile.url == file_json[\"url\"]).first()\n # sb_file = app.Item.query.filter_by(url=file_json[\"url\"]).first()\n if sb_file is None: # The Fiscal Year was not found in the db\n print(\"\\t\\t---------SQL--------- [SbFile] Could not find \" +\n \"{} in database...\".format(file_json[\"name\"].encode('utf-8')))\n sb_file = app.SbFile(url=file_json[\"url\"],\n name=file_json[\"name\"],\n # Convert bytes to megabytes:\n size=(file_json[\"size\"]/1000000),\n content_type=file_json[\"contentType\"])\n # Many-to-many relationship definitions:\n sb_file.cascs.append(casc_model)\n sb_file.fiscal_years.append(fy_model)\n sb_file.projects.append(proj_model)\n sb_file.items.append(item_model)\n app.db.session.add(sb_file)\n else:\n print(\"\\t\\t---------SQL--------- [SbFile] Found {} in database...\"\n .format(file_json[\"name\"].encode('utf-8')))\n if sb_file.name != file_json[\"name\"]:\n sb_file.name = file_json[\"name\"]\n if sb_file.url != file_json[\"url\"]:\n sb_file.url = file_json[\"url\"]\n if sb_file.size != file_json[\"size\"]:\n sb_file.size = file_json[\"size\"]\n if sb_file.content_type != file_json[\"contentType\"]:\n sb_file.content_type = file_json[\"contentType\"]\n\n # Many-to-many relationships (need db model):\n if not (any(casc.id == casc_model.id for casc in sb_file.cascs)):\n sb_file.cascs.append(casc_model)\n if not (any(fy.id == fy_model.id for fy in sb_file.fiscal_years)):\n sb_file.fiscal_years.append(fy_model)\n if not (any(proj.id == proj_model.id for proj in sb_file.projects)):\n sb_file.projects.append(proj_model)\n if not (any(item.id == item_model.id for item in sb_file.items)):\n sb_file.items.append(item_model)\n\n # Add new timestamp\n sb_file.timestamp = datetime.utcnow()\n\n app.db.session.commit()\n print(\"\\t\\t---------SQL--------- [SbFile] Done with {}.\"\n .format(sb_file.name.encode('utf-8')))\n return sb_file", "def __init__(self, pool_guid, bptxg, fileobj, mtime=None) :\n super(ZFSFileModifyEvent, self).__init__(pool_guid, bptxg, \\\n \"mtime\", self.DATA_TYPE, mtime)\n self.fileobj = fileobj", "def export_project_dump(self, key):", "def feed(self, fileobj):\n self._parser.feed(fileobj)", "def process_and_save(db: Broker, uid, tiff_path: str, data_key: str) -> None:\n run = db[uid]\n dk_uid = run.start.get(\"sc_dk_field_uid\", \"\")\n dk_run = db[dk_uid] if dk_uid else None\n dk_image = _mean(dk_run.data(data_key)) if dk_run else None\n image = _mean(run.data(data_key))\n image -= dk_image\n tw = TiffWriter(tiff_path)\n tw.write(image)\n return", "def _write_current_buffer_for_group_key(self, key):\n write_info = self.write_buffer.pack_buffer(key)\n self.write(write_info.get('file_path'),\n self.write_buffer.grouping_info[key]['membership'])\n self.write_buffer.clean_tmp_files(write_info)\n self.write_buffer.add_new_buffer_for_group(key)", "def _write(self, out_file):\n #\n # I know this function is long, but the FRD block is long as well...\n # Splitting this into multiple functions would not help in my opinion.\n # Therefore -> shut up pylint\n # pylint: disable=too-many-branches\n # pylint: disable=too-many-statements\n #\n out_file.write(' '.encode()) # pad byte\n out_file.write('{:4d}'.format(self.key).encode())\n out_file.write(self.code.encode())\n out_file.write(self.setname.ljust(6).encode())\n out_file.write('{:12.5E}'.format(self.value).encode())\n out_file.write('{:12d}'.format(self.numnod).encode())\n out_file.write(self.text.ljust(20).encode())\n out_file.write('{:2d}'.format(self.ictype).encode())\n out_file.write('{:5d}'.format(self.numstep).encode())\n out_file.write(self.analys.ljust(10).encode())\n out_file.write('{:2d}'.format(self.format).encode())\n out_file.write('\\n'.encode())\n\n out_file.write(' '.encode()) # pad byte\n out_file.write('-4'.encode()) # key = -4\n out_file.write((' '*2).encode()) # pad bytes\n out_file.write(self.name.ljust(8).encode())\n if self.entities[0].ictype == 2 and self.ncomps == 3:\n out_file.write('{:5d}'.format(self.ncomps + 1).encode())\n else:\n out_file.write('{:5d}'.format(self.ncomps).encode())\n out_file.write('{:5d}'.format(self.irtype).encode())\n out_file.write('\\n'.encode()) # eol\n\n for entity in self.entities:\n out_file.write(' '.encode()) # pad byte\n out_file.write('-5'.encode())\n out_file.write((' '*2).encode()) # pad bytes\n out_file.write(entity.name.ljust(8).encode())\n out_file.write('{:5d}'.format(entity.menu).encode())\n out_file.write('{:5d}'.format(entity.ictype).encode())\n out_file.write('{:5d}'.format(entity.icind1).encode())\n if entity.ictype == 4:\n out_file.write('{:5d}'.format(entity.icind2).encode())\n elif entity.ictype == 2 and entity is self.entities[-1]:\n out_file.write('{:5d}'.format(entity.icind2).encode())\n out_file.write('{:5d}'.format(entity.iexist).encode())\n out_file.write(entity.icname.encode())\n else:\n out_file.write('{:5d}'.format(entity.iexist).encode())\n out_file.write('\\n'.encode()) # eol\n\n for result in self.results:\n if self.format < 2:\n num_lines = int(self.ncomps/(6 + 1)) + 1\n for j in range(num_lines):\n if j == 0:\n out_file.write(' -1'.encode()) # pad byte and key = -1\n if self.format == 0:\n out_file.write(\n '{:5d}'.format(result.node).encode())\n else:\n out_file.write(\n '{:10d}'.format(result.node).encode())\n else:\n out_file.write(' -2'.encode()) # pad byte and key = -2\n out_file.write(' '*(5*(self.format+1)).encode())\n k_start = j*6\n k_end = min(self.ncomps - k_start, (j+1)*6)\n for k in range(k_start, k_end):\n out_file.write(\n '{:12.5E}'.format(result.data[k]).encode())\n out_file.write('\\n'.encode()) # eol\n else:\n out_file.write(struct.pack('i', result.node))\n out_file.write(struct.pack('f'*self.ncomps, *result.data))\n\n if self.format < 2:\n out_file.write(' -3\\n'.encode()) # last record for ascii only" ]
[ "0.5468729", "0.5315217", "0.52870375", "0.5223776", "0.52032954", "0.5127768", "0.51020163", "0.50726545", "0.5038371", "0.50330997", "0.4992969", "0.49860293", "0.4952587", "0.49325135", "0.49083364", "0.4891398", "0.48681134", "0.48669374", "0.4853784", "0.48504323", "0.48431307", "0.48156464", "0.47671524", "0.47629976", "0.47552377", "0.47335625", "0.47219655", "0.47104535", "0.46941286", "0.4688331" ]
0.73409903
0
writes HSRL data in CfRadial netcdf format output_filename = where to write the data start_td = = datetime.datetime object first time to retrieve. minutes = how many minutes to process timeres_s = time resolution in seconds (native would be 2.5) altres_m = altitude resolution in meters maxtimeslice_timedelta = datetime.timedelta object for amount of data processed (safe is 1 or 2 hours) instrument = hsrl id string (eg. 'ahsrl','gvhsrl','nshsrl','mf2hsrl'). min_alt_m = minimum altitude in meters to display max_alt_m = maximum altitude in meters to display.
def write_cfradial(output_filename, start_dt, minutes, timeres_s = 5, altres_m =60, maxtimeslice_td=datetime.timedelta(seconds=30*60), instrument='gvhsrl', min_alt_m=0, max_alt_m=5000,store_calibrations=False): cdl = locate_file('hsrl_cfradial.cdl', forModule=lgtb) print 'CDL = ', cdl timeres_td = datetime.timedelta(seconds=timeres_s) netcdf = Dataset(output_filename, 'w', clobber=True) delta = datetime.timedelta(minutes=minutes) timeres_delta = datetime.timedelta(seconds=timeres_s) end_dt = start_dt + delta gen = dpl_hsrl(instrument) if store_calibrations: # to store calibrations, newer actors are needed, as well as the precall methods (FIXME better design) import maestro.netcdf_precall as npc args=[] kwargs=dict(output=netcdf,template=cdl,usecfradial=True,basetime=start_dt) x=npc.addConstantsToParms(npc.addCalibrationsToNetCDF()) hsrlnar=gen(start_dt, end_dt, timeres_timedelta=timeres_delta, min_alt_m=min_alt_m, max_alt_m=max_alt_m, altres_m=altres_m) x(hsrlnar,args,kwargs) nar=artists.dpl_netcdf_artist(hsrlnar,*args,**kwargs) #framestream,template,outputfilename=None,format=None,usecfradial=None,selected_bindings=None,output=None,forModule=None,withUnlimited=None,basetime=None,addAttributes={}): for x in nar: pass else: v = None try: # store each lidar record for tzg in gen(start_dt, end_dt, timeres_timedelta=timeres_delta, min_alt_m=min_alt_m, max_alt_m=max_alt_m, altres_m=altres_m): if v == None: v = cfr.DplCreateCfradial(cdl, netcdf, tzg) v.append_data(tzg) v.close() except RuntimeError, msg: print msg traceback.print_exc() print 'write_cfradial: could not process data for %s starting at %s' % \ (instrument, start_dt.strftime('%Y-%m-%d %H:%M:%S'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def createnc(ncfout,xlat,xlon,times=None,zvals=None,wsvals=None,\\\n wdvals=None,olvals=None,attbts=None,ftype=\"timeseries\",dims=[7,180,180]):\n nc_out=nc.Dataset(ncfout,'w',clobber=True)\n\n # Set Attributes to the File\n if attbts is not None:\n final_attbts={}\n # Define projection\n proj_lcc = pj_lcc = Proj(\"+proj=lcc +lat_1={TRUELAT1} +lat_2={TRUELAT2} +lat_0={MOAD_CEN_LAT} +lon_0={STAND_LON} +x_0=0 +y_0=0 +a=6370000 +b=6370000\".format(**attbts))\n\n # Get x&y of domain center\n xcen, ycen = pj_lcc(attbts['CEN_LON'], attbts['CEN_LAT'])\n\n for key in attbts:\n if str(key).find(\"STAG\") <= 0 : # Remove Staggered Grid Information\n final_attbts.update({key:attbts[key]})\n nc_out.setncatts(final_attbts)\n # Create a CRS Variable for the Projection (GIS Readability)\n crsv=nc_out.createVariable('crs','c')\n crsv.semi_major_axis = 6370000.0\n crsv.inverse_flattening = 0.0\n crsv.grid_mapping_name = \"lambert_conformal_conic\"\n crsv.longitude_of_central_meridian = attbts[\"STAND_LON\"]\n crsv.false_easting = 0.0\n crsv.false_northing = 0.0\n crsv.latitude_of_projection_origin = attbts[\"MOAD_CEN_LAT\"]\n crsv.standard_parallel = [attbts[\"TRUELAT1\"],attbts[\"TRUELAT2\"]]\n crsv.longitude_of_prime_meridian = 0.0\n crsv.proj = proj_lcc.srs\n\n\n\n # Override Institution and Experiment\n nc_out.INSTITUTION=INSTITUTION\n nc_out.EXPERIMENT=EXPERIMENT\n nc_out.Conventions=\"CF-1.6\"\n\n # Create Dimensions First\n if ftype==\"timeseries\":\n nc_out.TITLE='Timeseries of the New European Wind Atlas from WRF V3.8.1'\n nc_out.createDimension('time',None)\n nc_out.createDimension('DateStrLen',19)\n nc_out.createDimension('height',dims[0])\n nc_out.createDimension('south_north',dims[1])\n nc_out.createDimension('west_east',dims[2])\n # Create Time Vector as Integer\n timesn = nc_out.createVariable('time','i8',('time',))\n timesn.units = \"minutes since 1900-01-01 00:00:00.0\"\n timesn.calendar = \"gregorian\"\n timesn.long_name = \"Time\"\n timesn.standard_name = \"time\"\n timesn[:] = nc.date2num(createdatv(times),units=timesn.units,calendar=timesn.calendar)\n # Create additional Time Vector as Character\n timesc = nc_out.createVariable('Times', 'c', ('time','DateStrLen'))\n timesc.format = \"YYYY-MM-DD_HH:MM:SS\"\n timesc.long_name = \"Time\"\n timesc[:] = times[:]\n # Height\n hgts = nc_out.createVariable('height','f4',('height',))\n hgts.units=\"m\"\n hgts.long_name=\"Height above Ground\"\n hgts.standard_name=\"height\"\n hgts[:] = zvals\n # y\n south_north = nc_out.createVariable('south_north','f4',('south_north',))\n south_north.long_name = \"y-coordinate in Cartesian system\"\n south_north.units = \"m\"\n\n dy = attbts[\"DY\"]\n ny = attbts[\"SOUTH-NORTH_PATCH_END_UNSTAG\"]\n ymin = ycen - dy * (ny - 1) / 2\n s_n = np.linspace(0, ny-1, ny) * dy + ymin\n south_north[:] = s_n\n\n # x\n west_east = nc_out.createVariable('west_east','f4',('west_east',))\n west_east.long_name = \"x-coordinate in Cartesian system\"\n west_east.units = \"m\"\n\n dx = attbts[\"DX\"]\n nx = attbts[\"WEST-EAST_PATCH_END_UNSTAG\"]\n xmin = xcen - dx * (nx - 1) / 2\n e_w = np.linspace(0, nx-1, nx) * dx + xmin\n west_east[:] = e_w\n\n elif ftype==\"roughness\":\n nc_out.title='NEWA Roughness'\n nc_out.createDimension('south_north',dims[0])\n nc_out.createDimension('west_east',dims[1])\n\n elif ftype==\"tabfile\":\n nc_out.title='NEWA WasP Tab File'\n nc_out.createDimension('south_north',dims[0])\n nc_out.createDimension('west_east',dims[1])\n nc_out.createDimension('sector',dims[2])\n nc_out.createDimension('wind',dims[3])\n nc_out.createDimension('stab',dims[4])\n\n # Wind Speed Class\n wscl = nc_out.createVariable('wspdCl','f4',('wind',))\n wscl.units=\"ms-1\"\n wscl.long_name=\"Velocity of bin centre\"\n wscl[:] = wsvals\n\n # Wind Speed Class\n wdcl = nc_out.createVariable('wdirCl','f4',('sector',))\n wdcl.units=\"ms-1\"\n wdcl.long_name=\"Velocity of bin centre\"\n wdcl[:] = wdvals\n\n # Stability\n lcl = nc_out.createVariable('Ltypical','f4',('stab',))\n lcl.units=\"m\"\n lcl.long_name=\"L typical\"\n lcl[:] = olvals\n\n # Lat and Lon\n lats = nc_out.createVariable(\"XLAT\", 'f4', ('south_north','west_east'), zlib=True,complevel=9)\n lats[:] = xlat[:]\n lats.units=\"degree_north\"\n lats.long_name=\"Center Latitude of Grid Cell\"\n lats.standard_name=\"latitude\"\n lons = nc_out.createVariable(\"XLON\", 'f4', ('south_north','west_east'), zlib=True,complevel=9)\n lons[:] = xlon[:]\n lons.units=\"degree_east\"\n lons.long_name=\"Center Longitude of Grid Cell\"\n lons.standard_name=\"longitude\"\n nc_out.close()\n return(None)", "def write_flat_netcdf(outFile,time,frac,uh,x,y,xc,yc,inGlobs,inAttrs):\n f = Dataset(outFile, 'w', format='NETCDF4')\n\n # set dimensions\n times = f.createDimension('time', len(time))\n npoints = f.createDimension('npoints', len(frac))\n \n # initialize variables\n times = f.createVariable('time','f8',('time',))\n fracs = f.createVariable('fraction','f8',('npoints',))\n xis = f.createVariable('xi','i4',('npoints',))\n yis = f.createVariable('yi','i4',('npoints',))\n xcs = f.createVariable('xc','f8',('npoints',))\n ycs = f.createVariable('yc','f8',('npoints',))\n uhs = f.createVariable('unit_hydrograph','f8',('time','npoints',))\n \n # deal with attributes\n f.description = 'Flattened uh/fraction grid file'\n f.history = 'Created ' + tm.ctime(tm.time())\n f.velocity = inGlobs['velocity']\n f.diffusion = inGlobs['diffusion']\n f.outlet_lon = inGlobs['outlet_lon']\n f.outlet_lat = inGlobs['outlet_lat']\n f.outlet_y = inGlobs['outlet_y']\n f.outlet_x = inGlobs['outlet_x']\n try:\n f.includes = inGlobs['includes']\n except:\n pass\n \n times.standard_name = inAttrs['time']['standard_name']\n times.units = inAttrs['time']['units']\n times.calendar = inAttrs['time']['calendar']\n \n try:\n fracs.units = inAttrs['fraction']['units']\n except:\n fracs.units = '%'\n fracs.description = inAttrs['fraction']['description']\n \n uhs.units = inAttrs['unit_hydrograph']['units']\n uhs.description = inAttrs['unit_hydrograph']['description']\n \n xis.standard_name = 'x_ind'\n xis.description = 'x index location'\n \n yis.standard_name = 'y_ind'\n yis.description = 'y index location'\n \n xcs.standard_name =inAttrs['xc']['standard_name']\n xcs.long_name = inAttrs['xc']['long_name']\n xcs.units =inAttrs['xc']['units']\n \n ycs.standard_name =inAttrs['yc']['standard_name']\n ycs.long_name = inAttrs['yc']['long_name']\n ycs.units =inAttrs['yc']['units']\n \n times[:] = time\n fracs[:] = frac\n uhs[:,:] = uh\n xis[:] = x\n yis[:] = y\n xcs[:] = xc\n ycs[:] = yc\n\n f.close()\n \n return", "def write_netcdf(file, lons, lats, times, hydrographs, fractions, loc, grid_id,\n inds, Flist, velocity, diffusion, fill_value, verbose):\n f = Dataset(file,'w', format='NETCDF4')\n\n # set dimensions\n time = f.createDimension('time', None)\n lon = f.createDimension('lon', (len(lons)))\n lat = f.createDimension('lat', (len(lats)))\n\n # initialize variables\n time = f.createVariable('time','f8',('time',))\n lon = f.createVariable('lon','f8',('lon',))\n lat = f.createVariable('lat','f8',('lat',))\n fraction = f.createVariable('fraction','f8',('lat','lon',),fill_value=fill_value)\n UHS = f.createVariable('unit_hydrograph','f8',('time','lat','lon',),fill_value=fill_value)\n\n # write attributes for netcdf\n f.description = 'Aggregated UH_S and Fraction Vars'\n f.history = 'Created: {}\\n'.format(tm.ctime(tm.time()))\n f.history += ' '.join(sys.argv) + '\\n'\n f.source = sys.argv[0] # prints the name of script used\n f.velocity = velocity\n f.diffusion = diffusion\n f.outlet_id = str(grid_id.astype(np.int64))\n f.outlet_y= str(inds[0].astype(np.int64))\n f.outlet_x = str(inds[1].astype(np.int64)) # this is change is a cdo work around. Othewise cdo removes the attribute. \n f.outlet_lat = loc[0]\n f.outlet_lon = loc[1]\n f.includes = ', '.join(Flist)\n\n lat.long_name = 'latitude coordinate'\n lat.standard_name = 'latitude'\n lat.units = 'degrees_north'\n\n lon.long_name = 'longitude coordinate'\n lon.standard_name = 'longitude'\n lon.units = 'degrees_east'\n\n time.units = 'seconds since 0001-1-1 0:0:0'\n time.calendar = 'noleap'\n time.longname = 'time'\n time.type_prefered = 'float'\n time.description = 'Seconds since initial impulse'\n\n UHS.units = 'unitless'\n UHS.description = 'unit hydrograph for each grid cell with respect to downstream grid location'\n \n fraction.units = 'unitless'\n fraction.description = 'fraction of grid cell contributing to guage location'\n\n # write data to variables initialized above\n time[:]= times\n lon[:] = lons\n lat[:] = lats\n UHS[:,:,:] = hydrographs\n fraction[:,:]= fractions\n f.close()", "def write_netcdf(file,xc,xc_bnd,yc,yc_bnd,times,hydrographs,fractions,loc,Flist,velocity,diffusion,NODATA,verbose):\n \n f = Dataset(file,'w', format='NETCDF4')\n\n # set dimensions\n time = f.createDimension('time', None)\n x = f.createDimension('x',xc.shape[1])\n y = f.createDimension('y',xc.shape[0])\n nv4 = f.createDimension('nv4',4)\n\n # initialize variables\n time = f.createVariable('time','f8',('time',))\n xcs = f.createVariable('xc','f8',('y','x',))\n ycs = f.createVariable('yc','f8',('y','x',))\n xc_bnds = f.createVariable('xc_bnds','f8',('y','x','nv4',))\n yc_bnds = f.createVariable('yc_bnds','f8',('y','x','nv4',))\n fraction = f.createVariable('fraction','f8',('y','x',),fill_value=NODATA)\n UHS = f.createVariable('unit_hydrograph','f8',('time','y','x',),fill_value=NODATA)\n\n # write attributes for netcdf\n f.description = 'Aggregated UH_S and Fraction Vars for full RASM domain'\n f.history = 'Created: {}\\n'.format(tm.ctime(tm.time()))\n f.history += ' '.join(sys.argv) + '\\n'\n f.source = sys.argv[0] # prints the name of script used\n f.velocity = velocity\n f.diffusion = diffusion\n f.outlet_lon = loc[0]\n f.outlet_lat = loc[1]\n f.includes = str(len(Flist))+' files'\n\n ycs.long_name = 'latitude of grid cell center'\n ycs.standard_name = 'latitude'\n ycs.units = 'degrees_north'\n ycs._CoordinateAxisType = 'Lat'\n ycs.bounds = 'yc_bnds'\n\n xcs.long_name = 'longitude of grid cell center'\n xcs.standard_name = 'longitude'\n xcs.units = 'degrees_east'\n xcs._CoordinateAxisType = 'Lon'\n xcs.bounds = 'xc_bnds'\n\n time.standard_name = 'time'\n time.units = 'seconds'\n time.description = 'Seconds since initial impulse'\n time.calendar = 'proleptic_gregorian'\n\n UHS.units = 'unitless'\n UHS.description = 'unit hydrograph for each grid cell with respect to basin outlet location'\n \n fraction.units = 'unitless'\n fraction.description = 'fraction of grid cell contributing to guage location'\n\n # write data to variables initialized above\n time[:]= times\n xcs[:,:] = xc\n ycs[:,:] = yc\n xc_bnds[:,:,:] = xc_bnd\n yc_bnds[:,:,:] = yc_bnd\n UHS[:,:,:] = hydrographs\n fraction[:,:]= fractions\n f.close()\n\n return", "def ncwrt_retrieval_obs_s1(retr_setup, outname=None):\n\n #-- set name of file to be generated\n act_outname = outname if outname!=None else 'obs_s1.nc'\n msg = \"Start writing configuration file ***{}***...\".format(act_outname)\n FileLogger.info(msg)\n\n #-- compression settings\n zlev = retr_setup.zlev\n use_zlib = retr_setup.use_zlib\n\n #-- retrieval settings\n s1_table = retr_setup.obs_dct['S1']\n timepts = s1_table.geom.date_utc\n npts = len(timepts)\n s1_satid = np.array(s1_table.sat_id_lst, dtype=str)\n s1_data = s1_table.data\n s1_dataunc = s1_table.dataunc\n nt,npol = s1_data.shape\n\n #-- temporal settings, create time-values (time-since)\n time_start, time_end = datelst_get_month_aligned_bounds(timepts)\n time_coverage_start = time_start.strftime('%Y-%m-%dT%H:%M:%S')\n time_coverage_end = time_end.strftime('%Y-%m-%dT%H:%M:%S')\n ref_time = dt.datetime(timepts[0].year,1,1) #January1st in year of first point in time\n time_unit = 'seconds since {}'.format(ref_time.strftime('%Y-%m-%dT%H:%M:%S'))\n time_values = nc4.date2num(timepts, time_unit)\n\n #-- ensure directory exists\n mkdirp_smart(os.path.dirname(act_outname))\n\n #-- open file pointer\n ncfp = nc4.Dataset(act_outname, 'w')\n #-- add dimensions\n d1 = ncfp.createDimension('npoints',npts)\n d2 = ncfp.createDimension('npol',npol)\n\n #-- time-value\n ncvar = ncfp.createVariable( 'time', np.float64, ('npoints',),\n zlib=use_zlib, complevel=zlev )\n ncvar.setncattr('standard_name','time')\n ncvar.setncattr('long_name','time')\n ncvar.setncattr('units', time_unit)\n ncvar[:] = time_values[:]\n\n unit_one = np.float64(1)\n # backscatter\n ncvar = ncfp.createVariable( 'backscatter', np.float64, ('npoints','npol'),\n zlib=use_zlib, complevel=zlev )\n ncvar.setncattr('long_name', 'backscatter in VH and VV polarisation')\n comment = \"VH is associated to npol=0, VV to npol=1.\"\n comment += \" linear units are used (not [dB]).\"\n ncvar.setncattr('comment', comment)\n ncvar.setncattr('units',unit_one)\n ncvar.setncattr('missing_value', retr_setup.obs_fill_value)\n ncvar[:,:] = s1_data[:,:]\n\n # backscatter uncertainty\n ncvar = ncfp.createVariable( 'backscatter_unc', np.float64, ('npoints','npol'),\n zlib=use_zlib, complevel=zlev )\n ncvar.setncattr('long_name', 'backscatter uncertainty in VH and VV polarisation')\n comment = \"uniform uncertainty of {} [dB] was applied on the observed backscatter\".format(\n retr_setup.s1_unc_db)\n ncvar.setncattr('comment', comment)\n ncvar.setncattr('units',unit_one)\n ncvar.setncattr('missing_value', retr_setup.obs_fill_value)\n ncvar[:,:] = s1_dataunc[:,:]\n\n # satellite identifier\n ncvar = ncfp.createVariable( 'satellite_id', str, ('npoints',),\n zlib=use_zlib, complevel=zlev )\n ncvar.setncattr('long_name', 'satellite identifer')\n ncvar[:] = s1_satid[:]\n\n #-- global attributes\n ncfp.setncattr('creator_name',\"The Inversion Lab, Hamburg, Germany\")\n ncfp.setncattr('creator_email', \"Michael.Vossbeck(at)Inversion-Lab.com\")\n ncfp.setncattr('netcdf_libversion',\"{}\".format(nc4.__netcdf4libversion__))\n ncfp.setncattr('date_created',\"{}\".format(dt.datetime.utcnow().isoformat()))\n ncfp.setncattr('time_coverage_start',time_coverage_start)\n ncfp.setncattr('time_coverage_end',time_coverage_end)\n\n #-- close file pointer\n ncfp.close()\n\n # logging\n msg = \"...writing ***{}*** DONE\".format(act_outname)\n FileLogger.info(msg)", "def write_netcdf(ncinfo):\r\n\t# ========== Create new netcdf ==========\r\n\tNAME=nc.netcdf_file(ncinfo.fname,'w')\r\n\t\r\n\t# ========== Set up the Dimensions ==========\r\n\tNAME.createDimension('time', None) #Question: Shouldn't time be unlimited?\r\n\t# NAME.createDimension('lev',11)\r\n\tNAME.createDimension('lat',ncinfo.lat)\r\n\tNAME.createDimension('lon',ncinfo.lon)\r\n\t\r\n\t# ========== Setup the Variables ==========\r\n\ttime=NAME.createVariable('time',np.float64,('time',))\r\n\t# lev=NAME.createVariable('lev',np.int32,('lev',))\r\n\tlat=NAME.createVariable('lat',np.float64,('lat',))\r\n\tlon=NAME.createVariable('lon',np.float64,('lon',))\r\n\t# VAR=NAME.createVariable(str(VAR),np.float64,('time','lev','lat','lon'),)\r\n\tVAR=NAME.createVariable(ncinfo.var_name,np.float64,('time','lat','lon'),)\r\n\t# setting the missing value is super important for the file to be cdo readable\r\n\tsetattr(VAR,'missing_value',ncinfo.fill)\r\n\tsetattr(VAR, 'standard_name', ncinfo.var_lname) \r\n\t\r\n\t# ========== Set the units ==========\r\n\ttime.units= 'day as %Y%m%d'\r\n\t# lev.units = '-'\r\n\tlat.units = 'degrees_north'\r\n\tlon.units = 'degrees_east'\r\n\tVAR.units = ncinfo.units\r\n\r\n\t# ========== Add data ==========\r\n\t\r\n\t# creates time vector using the date_range function\r\n\t# time[:]=[t for t in date_range('20110101.5','20111231.5')] \r\n\t# lev[:]=PFT_vector\r\n\tlat[:] = ncinfo.latitudes\r\n\tlon[:] = ncinfo.longitudes\r\n\t# THis is a Bodge for singe variable data\r\n\tVAR[:] = ncinfo.data\r\n\r\n\t#Add global attributes\r\n\tNAME.description = ncinfo.description\r\n\tNAME.history = ncinfo.history\r\n\r\n\t# WHATS MISSING\r\n\t# metadata a whole bunch of metadata\r\n\t# the standard_name and long_name of the variables\r\n\r\n\t# ========== Close the netcdf ==========\r\n\tNAME.close()", "def ncwrt_retrieval_config( retr_setup, outname=None ):\n\n #-- set name of file to be generated\n act_outname = outname if outname!=None else 'retrconfig.nc'\n msg = \"Start writing configuration file ***{}***...\".format(act_outname)\n FileLogger.info(msg)\n\n\n #-- compression settings\n zlev = retr_setup.zlev\n use_zlib = retr_setup.use_zlib\n\n #--\n schedule_dct = retr_setup.schedule_dct\n statevector = retr_setup.prstate\n #-- turn list into array\n sim_typ = np.array(schedule_dct['sim_typ'], dtype=np.int32)\n timepts = schedule_dct['date_utc']\n nstvar,npts = statevector.shape\n #-- overpass geometries SZA,SAA,VZA,VAA\n ivgeom = np.empty((npts,4), dtype=np.float64)\n ivgeom[:,0] = schedule_dct['sza']\n ivgeom[:,1] = schedule_dct['saa']\n ivgeom[:,2] = schedule_dct['vza']\n ivgeom[:,3] = schedule_dct['vaa']\n\n #-- temporal settings, create time-values (time-since)\n time_start, time_end = datelst_get_month_aligned_bounds(timepts)\n time_coverage_start = time_start.strftime('%Y-%m-%dT%H:%M:%S')\n time_coverage_end = time_end.strftime('%Y-%m-%dT%H:%M:%S')\n ref_time = dt.datetime(timepts[0].year,1,1) #January1st in year of first point in time\n time_unit = 'seconds since {}'.format(ref_time.strftime('%Y-%m-%dT%H:%M:%S'))\n time_values = nc4.date2num(timepts, time_unit)\n\n #-- ensure directory exists\n mkdirp_smart(os.path.dirname(act_outname))\n\n #-- open file pointer\n ncfp = nc4.Dataset(act_outname, 'w')\n #-- add dimensions\n d1 = ncfp.createDimension('npoints',npts)\n d2 = ncfp.createDimension('ngeo',4)\n\n #-- time-value\n ncvar = ncfp.createVariable( 'time', statevector.dtype, ('npoints',),\n zlib=use_zlib, complevel=zlev )\n ncvar.setncattr('standard_name','time')\n ncvar.setncattr('long_name','time')\n ncvar.setncattr('units', time_unit)\n ncvar[:] = time_values[:]\n\n #-- simulation type\n ncvar = ncfp.createVariable( 'sim_typ', sim_typ.dtype, ('npoints',),\n zlib=use_zlib, complevel=zlev )\n ncvar[:] = sim_typ[:]\n ncvar.setncattr('long_name','simulation_type')\n ncvar.setncattr('comment', 'integer value which is to be bit-interpreted')\n ncvar.setncattr('nobits_set', 'time-point with other state')\n ncvar.setncattr('bit0_is_set', 'time-point for S1 simulation')\n ncvar.setncattr('bit1_is_set', 'time-point for S2 simulation')\n ncvar.setncattr('bit2_is_set', 'time-point for S1A simulation')\n ncvar.setncattr('bit3_is_set', 'time-point for S1B simulation')\n ncvar.setncattr('bit4_is_set', 'time-point for S2A simulation')\n ncvar.setncattr('bit5_is_set', 'time-point for S2B simulation')\n \n #-- illumination-view geometry\n ncvar = ncfp.createVariable( 'ivgeom', ivgeom.dtype, ('npoints','ngeo'),\n zlib=use_zlib, complevel=zlev )\n ncvar.setncattr('sza','igeo: 0')\n ncvar.setncattr('saa','igeo: 1')\n ncvar.setncattr('vza','igeo: 2')\n ncvar.setncattr('vaa','igeo: 3')\n ncvar[:,:] = ivgeom[:,:]\n \n #-- global attributes\n ncfp.setncattr('creator_name',\"The Inversion Lab, Hamburg, Germany\")\n ncfp.setncattr('creator_email', \"Michael.Vossbeck(at)Inversion-Lab.com\")\n ncfp.setncattr('netcdf_libversion',\"{}\".format(nc4.__netcdf4libversion__))\n ncfp.setncattr('date_created',\"{}\".format(dt.datetime.utcnow().isoformat()))\n ncfp.setncattr('time_coverage_start',time_coverage_start)\n ncfp.setncattr('time_coverage_end',time_coverage_end)\n\n #-- close file pointer\n ncfp.close()\n\n # logging\n msg = \"...writing ***{}*** DONE\".format(act_outname)\n FileLogger.info(msg)", "def ncwrt_retrieval_obs_s2(retr_setup, outname=None):\n\n #-- set name of file to be generated\n act_outname = outname if outname!=None else 'obs_s2.nc'\n msg = \"Start writing configuration file ***{}***...\".format(act_outname)\n FileLogger.info(msg)\n\n #-- compression settings\n zlev = retr_setup.zlev\n use_zlib = retr_setup.use_zlib\n\n #-- retrieval settings\n s2_table = retr_setup.obs_dct['S2']\n timepts = s2_table.geom.date_utc\n npts = len(timepts)\n s2_satid = np.array(s2_table.sat_id_lst, dtype=str)\n s2_data = s2_table.data\n s2_dataunc = s2_table.dataunc\n nt,nbands = s2_data.shape\n\n #-- temporal settings, create time-values (time-since)\n time_start, time_end = datelst_get_month_aligned_bounds(timepts)\n time_coverage_start = time_start.strftime('%Y-%m-%dT%H:%M:%S')\n time_coverage_end = time_end.strftime('%Y-%m-%dT%H:%M:%S')\n ref_time = dt.datetime(timepts[0].year,1,1) #January1st in year of first point in time\n time_unit = 'seconds since {}'.format(ref_time.strftime('%Y-%m-%dT%H:%M:%S'))\n time_values = nc4.date2num(timepts, time_unit)\n\n #-- ensure directory exists\n mkdirp_smart(os.path.dirname(act_outname))\n\n #-- open file pointer\n ncfp = nc4.Dataset(act_outname, 'w')\n #-- add dimensions\n d1 = ncfp.createDimension('npoints',npts)\n d2 = ncfp.createDimension('nbands',nbands)\n\n #-- time-value\n ncvar = ncfp.createVariable( 'time', np.float64, ('npoints',),\n zlib=use_zlib, complevel=zlev )\n ncvar.setncattr('standard_name','time')\n ncvar.setncattr('long_name','time')\n ncvar.setncattr('units', time_unit)\n ncvar[:] = time_values[:]\n\n #-- unit (in correct type)\n unit_one = np.array([1]).astype(s2_data.dtype)[0]\n\n # BRF\n ncvar = ncfp.createVariable( 'brf', np.float64, ('npoints','nbands'),\n zlib=use_zlib, complevel=zlev )\n ncvar.setncattr('long_name', 'BRF top-of-canopy reflectances')\n ncvar.setncattr('units',unit_one)\n ncvar.setncattr('missing_value', retr_setup.obs_fill_value)\n ncvar[:,:] = s2_data[:,:]\n\n # BRF uncertainty\n ncvar = ncfp.createVariable( 'brf_unc', np.float64, ('npoints','nbands'),\n zlib=use_zlib, complevel=zlev )\n ncvar.setncattr('long_name', 'BRF top-of-canopy reflectances')\n ncvar.setncattr('units',unit_one)\n ncvar.setncattr('missing_value', retr_setup.obs_fill_value)\n comment = \"BRF uncertainties are derived as {:.2f}[%] relative uncertainty \".format(\n 100.*retr_setup.s2_relunc)\n comment += \"and an uncertainty floor value of {:.4f} is applied.\".format(retr_setup.s2_uncfloor)\n ncvar.setncattr('comment', comment)\n ncvar[:,:] = s2_dataunc[:,:]\n\n # satellite identifier\n ncvar = ncfp.createVariable( 'satellite_id', str, ('npoints',),\n zlib=use_zlib, complevel=zlev )\n ncvar.setncattr('long_name', 'satellite identifer')\n ncvar[:] = s2_satid[:]\n\n #-- global attributes\n ncfp.setncattr('creator_name',\"The Inversion Lab, Hamburg, Germany\")\n ncfp.setncattr('creator_email', \"Michael.Vossbeck(at)Inversion-Lab.com\")\n ncfp.setncattr('netcdf_libversion',\"{}\".format(nc4.__netcdf4libversion__))\n ncfp.setncattr('date_created',\"{}\".format(dt.datetime.utcnow().isoformat()))\n ncfp.setncattr('time_coverage_start',time_coverage_start)\n ncfp.setncattr('time_coverage_end',time_coverage_end)\n\n #-- close file pointer\n ncfp.close()\n\n # logging\n msg = \"...writing ***{}*** DONE\".format(act_outname)\n FileLogger.info(msg)", "def _write_nemo_hr_file(rpn_hr_ds_path, nemo_hr_ds_path):\n with xarray.open_dataset(rpn_hr_ds_path) as rpn_hr:\n logging.debug(\n f\"calculating specific humidity & incoming longwave radiation from {rpn_hr_ds_path}\"\n )\n qair, ilwr, rh = _calc_qair_ilwr(rpn_hr)\n u_out, v_out = _rotate_winds(rpn_hr)\n data_vars = {\n \"nav_lon\": rpn_hr.nav_lon,\n \"nav_lat\": rpn_hr.nav_lat,\n # [:, 0] drops z dimension that NEMO will not tolerate\n \"qair\": qair[:, 0],\n \"RH_2maboveground\": rh[:, 0],\n \"therm_rad\": ilwr[:, 0],\n \"u_wind\": u_out[:, 0],\n \"v_wind\": v_out[:, 0],\n # \"LHTFL_surface\": ** needs to be calculated**,\n }\n nemo_rpn_vars = (\n (\"atmpres\", \"PN\"),\n (\"percentcloud\", \"NT\"),\n (\"PRATE_surface\", \"RT\"),\n (\"precip\", \"PR\"),\n (\"solar\", \"FB\"),\n (\"tair\", \"TT\"),\n )\n missing_vars = \"\"\n for nemo_var, rpn_var in nemo_rpn_vars:\n try:\n # [:, 0] drops z dimension that NEMO will not tolerate\n data_vars.update({nemo_var: getattr(rpn_hr, rpn_var)[:, 0]})\n except AttributeError:\n # Variable is missing from RPN dataset, so provide a placeholder DataArray\n # full of NaNs that we will deal with later via interpolation\n data_vars.update(\n {nemo_var: xarray.DataArray(numpy.full_like(qair[:, 0], numpy.nan))}\n )\n missing_vars = (\n \", \".join((missing_vars, nemo_var)) if missing_vars else nemo_var\n )\n logging.warning(f\"missing RPN variable {rpn_var} from {rpn_hr_ds_path}\")\n nemo_hr = xarray.Dataset(\n data_vars=data_vars, coords=rpn_hr.coords, attrs=rpn_hr.attrs\n )\n nemo_hr.attrs[\"history\"] += (\n f\"\\n{arrow.now().format('ddd MMM DD HH:mm:ss YYYY')}: \"\n f\"Add specific and relative humidity and incoming longwave radiation variables from \"\n f\"correlations\"\n )\n if missing_vars:\n nemo_hr.attrs[\"missing_variables\"] = missing_vars\n _add_vars_metadata(nemo_hr)\n _write_netcdf_file(nemo_hr, nemo_hr_ds_path)", "def time_calibration(input_file):\n original_path = os.getcwd()\n save_path = input_file['save_path']\n #change to save data reduction directory\n os.chdir(save_path)\n print '\\n Reading the list of images ....\\n'\n planet = input_file['exoplanet'] #set exoplanet name\n images = sorted(glob.glob('AB'+planet+'*.fits'))\n print images\n #include de RA,DEC and epoch of the exoplanet\n RA,DEC,epoch = input_file['RA'],input_file['DEC'],input_file['epoch']\n #obtain ST JD using iraf task and introduce in the header\n for i in range(len(images)):\n hdr = fits.getheader(images[i])\n if int(split(hdr['UT'],':')[0]) < int(hdr['timezone']):\n new_date = use.yesterday(hdr['date-obs'])\n #print images[i], new_date\n else:\n new_date = hdr['date-obs']\n year,month,day = split(new_date,'-')\n iraf.asttimes(year=year,month=month,day=day,time=hdr['loctime'],obs=input_file['observatory'])\n JD = iraf.asttimes.jd #obtain julian date\n LMST = iraf.asttimes.lmst #obtain the sideral time\n LMST = use.sexagesimal_format(LMST) #convert sideral time in sexagesimal format\n iraf.hedit(images[i],'ST',LMST,add='yes',verify='no',show='no',update='yes') #create the ST keyword in the header\n iraf.ccdhedit(images[i],'LMST',LMST,type='string') #include the mean sideral time in the header\n iraf.ccdhedit(images[i],'JD',JD,type='string') #include de julian date in the header\n #include RA, and DEC of the object in your header\n iraf.ccdhedit(images[i],\"RA\",RA,type=\"string\") #include right ascention in the header\n iraf.ccdhedit(images[i],\"DEC\",DEC,type=\"string\") #include declination in the header\n iraf.ccdhedit(images[i],\"epoch\",epoch,type=\"string\") #include epoch in the header\n # use.update_progress((i+1.)/len(images))\n print '\\n Setting airmass ....\\n'\n for i in range(len(images)):\n print '# ',images[i]\n #iraf.hedit(images[i],'airmass',airmass,add='yes')\n #iraf.hedit(images[i],'HJD',HJD,add='yes')\n iraf.setairmass.observatory = input_file['observatory']\n iraf.setairmass(images[i])\n iraf.setjd.time = 'ut'\n iraf.setjd(images[i])\n print '\\n.... done.\\n'\n #export information\n hjd, jd, airmass, st = [],[],[],[]\n for i in range(len(images)):\n hdr = fits.getheader(images[i])\n hjd.append(hdr['HJD'])\n jd.append(hdr['JD'])\n airmass.append(hdr['airmass'])\n st.append(hdr['st'])\n #saving the data\n data = DataFrame([list(hjd),list(jd),list(st),list(airmass)]).T\n data.columns = ['HJD','JD','ST','Airmass']\n data.to_csv('results_iraf_calibrations.csv')\n #change to workings directory\n os.chdir(original_path)\n return", "def output_netcdf(forecast,proj_dict,grid_dict,start_hour,end_hour,\n stride,size,run_date,target_dataset,smoothing,config):\n for d,date in enumerate(run_date):\n date_outpath = config.forecast_out_path+'20{0}/netcdf/'.format(\n date)\n \n if not os.path.exists(date_outpath):\n os.makedirs(date_outpath)\n \n map_data = make_proj_grids(proj_dict,grid_dict)\n lons = map_data[\"lon\"]\n lats = map_data[\"lat\"]\n \n filtered_forecast = gaussian_filter(forecast[d],smoothing,mode='constant')\n \n filename = date_outpath + \"{0}_{6}_Hail_{1}_Cali_NMEP_{2}mm_{3}_Hours_{4}-{5}.nc\".format(\n config.ensemble_name,\n target_dataset,\n size,\n date,\n start_hour,end_hour,config.forecast_model_names)\n\n \n out_file = Dataset(filename, \"w\")\n out_file.createDimension(\"x\", filtered_forecast.shape[0])\n out_file.createDimension(\"y\", filtered_forecast.shape[1])\n out_file.createVariable(\"Longitude\", \"f4\", (\"x\", \"y\"))\n out_file.createVariable(\"Latitude\", \"f4\",(\"x\", \"y\"))\n out_file.createVariable(\"Data\", \"f4\", (\"x\", \"y\"))\n out_file.variables[\"Longitude\"][:,:] = lons\n out_file.variables[\"Latitude\"][:,:] = lats\n out_file.variables[\"Data\"][:,:] = filtered_forecast\n out_file.projection = proj_dict[\"proj\"]\n out_file.lon_0 = proj_dict[\"lon_0\"]\n out_file.lat_0 = proj_dict[\"lat_0\"]\n out_file.lat_1 = proj_dict[\"lat_1\"]\n out_file.lat_2 = proj_dict[\"lat_2\"]\n out_file.close()\n \n print(\"Writing to \" + filename)\n return", "def main(argv):\n\n inputfile = ''\n outputfile = ''\n try:\n opts, args = getopt.getopt(argv,\"hi:o:\",[\"ifile=\",\"ofile=\"])\n except getopt.GetoptError:\n print( 'usage: fix_icar_time.py -i <inputfile> -o <outputfile>' )\n sys.exit(2)\n for opt, arg in opts:\n if opt == '-h':\n print( 'use: fix_icar_time.py -i <inputfile> -o <outputfile>' )\n sys.exit()\n elif opt in (\"-i\", \"--ifile\"):\n inputfile = arg\n elif opt in (\"-o\", \"--ofile\"):\n outputfile = arg\n\n print( 'Input file is ', inputfile)\n print( 'Output file is ', outputfile)\n\n\n #### Option A: less elegant, but more robust: ####\n #_______ open the icar file: ______\n FIX = xr.open_dataset( inputfile )\n \n\n #_______ create the correct times: ______\n tstring = inputfile[inputfile.find('out_')+4:inputfile.rfind(\".nc\")]\n\n times2 = pd.date_range(tstring[:10], periods=len(FIX.time), freq='H')\n \n FIX['time'] = times2\n \n \n #_______ Write the fixed Dataset to nc file: _________\n \n if outputfile == '':\n out_path = inputfile\n else:\n out_path = outputfile \n \n FIX.to_netcdf( path=out_path, mode='w', encoding={'time': {'dtype': 'i4'}}) \n\n\n\n ########### Option B: only modify units, but this doesnt always work as sometimes it is only the first hour, sometimes more hours. ########\n # #_______ open the icar file: ______\n # FIX = xr.open_dataset( inputfile , decode_times=False) \n \n # #_______ create the correct times: ______\n # units = FIX.time.units\n # tstring = inputfile[inputfile.find('out_')+4:inputfile.rfind(\".nc\")]\n # # create right time based on file name:\n # FIX['time'].attrs['units'] = units[:units.find('since')+6] + tstring[:10]", "def optimize_dcr(dg):\n # files to consider. fixme: right now only works with one file\n sto = lh5.Store()\n lh5_dir = os.path.expandvars(dg.config['lh5_dir'])\n raw_list = lh5_dir + dg.fileDB['raw_path'] + '/' + dg.fileDB['raw_file']\n f_raw = raw_list.values[0] \n \n tb_raw = 'ORSIS3302DecoderForEnergy/raw/'\n tb_data = sto.read_object(tb_raw, f_raw)\n \n cycle = dg.fileDB['cycle'].values[0]\n f_results = f'./temp_{cycle}.h5'\n \n write_output = True\n \n # adjust dsp config \n with open('opt_dcr.json') as f:\n dsp_config = json.load(f, object_pairs_hook=OrderedDict)\n # pprint(dsp_config)\n # exit()\n \n # set dcr parameters\n # rise, flat, dcr_tstart = 200, 1000, 'tp_0+1.5*us' # default\n # dcr_rise, dcr_flat, dcr_tstart = 100, 3000, 'tp_0+3*us' # best so far?\n dcr_rise, dcr_flat, dcr_tstart = 100, 2500, 'tp_0+1*us'\n dsp_config['processors']['dcr_raw']['args'][1] = dcr_rise\n dsp_config['processors']['dcr_raw']['args'][2] = dcr_flat\n dsp_config['processors']['dcr_raw']['args'][3] = dcr_tstart\n \n # set trap energy parameters\n # ene_rise, ene_flat = \"2*us\", \"1*us\" # best? from optimize_trap\n ene_rise, ene_flat = \"10*us\", \"5*us\"\n dsp_config['processors']['wf_trap']['args'][1] = ene_rise\n dsp_config['processors']['wf_trap']['args'][2] = ene_flat\n \n # adjust pole-zero constant\n dsp_config['processors']['wf_pz']['defaults']['db.pz.tau'] = '64.4*us'\n # dsp_config['processors']['wf_pz']['defaults']['db.pz.tau'] = '50*us'\n # dsp_config['processors']['wf_pz']['defaults']['db.pz.tau'] = '100*us'\n \n # run dsp\n print('Running DSP ...')\n t_start = time.time()\n pc, tb_out = build_processing_chain(tb_data, dsp_config, verbosity=1)\n pc.execute()\n t_elap = (time.time() - t_start)/60\n print(f'Done. Elapsed: {t_elap:.2f} min')\n \n df_out = tb_out.get_dataframe()\n \n if write_output:\n df_out.to_hdf(f_results, key='opt_dcr')\n print('Wrote output file:', f_results)", "def _initialize_output(self, time_len, id_len):\r\n\r\n log('Initializing new file %s' % self.cf_compliant_file, 'INFO')\r\n \r\n self.cf_nc = Dataset(self.cf_compliant_file, 'w', format='NETCDF3_CLASSIC')\r\n \r\n # Create global attributes\r\n log(' globals', 'DEBUG', self.print_debug)\r\n self.cf_nc.featureType = 'timeSeries'\r\n self.cf_nc.Metadata_Conventions = 'Unidata Dataset Discovery v1.0'\r\n self.cf_nc.Conventions = 'CF-1.6'\r\n self.cf_nc.cdm_data_type = 'Station'\r\n self.cf_nc.nodc_template_version = (\r\n 'NODC_NetCDF_TimeSeries_Orthogonal_Template_v1.1')\r\n self.cf_nc.standard_name_vocabulary = ('NetCDF Climate and Forecast (CF) ' +\r\n 'Metadata Convention Standard Name ' +\r\n 'Table v28')\r\n self.cf_nc.title = 'RAPID Result'\r\n self.cf_nc.summary = (\"Results of RAPID river routing simulation. Each river \" +\r\n \"reach (i.e., feature) is represented by a point \" +\r\n \"feature at its midpoint, and is identified by the \" +\r\n \"reach's unique NHDPlus COMID identifier.\")\r\n self.cf_nc.time_coverage_resolution = 'point'\r\n self.cf_nc.geospatial_lat_min = 0.0\r\n self.cf_nc.geospatial_lat_max = 0.0\r\n self.cf_nc.geospatial_lat_units = 'degrees_north'\r\n self.cf_nc.geospatial_lat_resolution = 'midpoint of stream feature'\r\n self.cf_nc.geospatial_lon_min = 0.0\r\n self.cf_nc.geospatial_lon_max = 0.0\r\n self.cf_nc.geospatial_lon_units = 'degrees_east'\r\n self.cf_nc.geospatial_lon_resolution = 'midpoint of stream feature'\r\n self.cf_nc.geospatial_vertical_min = 0.0\r\n self.cf_nc.geospatial_vertical_max = 0.0\r\n self.cf_nc.geospatial_vertical_units = 'm'\r\n self.cf_nc.geospatial_vertical_resolution = 'midpoint of stream feature'\r\n self.cf_nc.geospatial_vertical_positive = 'up'\r\n self.cf_nc.project = self.project_name\r\n self.cf_nc.processing_level = 'Raw simulation result'\r\n self.cf_nc.keywords_vocabulary = ('NASA/Global Change Master Directory ' +\r\n '(GCMD) Earth Science Keywords. Version ' +\r\n '8.0.0.0.0')\r\n self.cf_nc.keywords = 'DISCHARGE/FLOW'\r\n self.cf_nc.comment = 'Result time step(s) (seconds): ' + str(self.time_step_array)\r\n \r\n timestamp = datetime.utcnow().isoformat() + 'Z'\r\n self.cf_nc.date_created = timestamp\r\n self.cf_nc.history = (timestamp + '; added time, lat, lon, z, crs variables; ' +\r\n 'added metadata to conform to NODC_NetCDF_TimeSeries_' +\r\n 'Orthogonal_Template_v1.1')\r\n \r\n # Create dimensions\r\n log(' dimming', 'DEBUG', self.print_debug)\r\n self.cf_nc.createDimension('time', time_len)\r\n self.cf_nc.createDimension(self.output_id_dim_name, id_len)\r\n \r\n # Create variables\r\n log(' timeSeries_var', 'DEBUG', self.print_debug)\r\n timeSeries_var = self.cf_nc.createVariable(self.output_id_dim_name, 'i4', \r\n (self.output_id_dim_name,))\r\n timeSeries_var.long_name = (\r\n 'Unique NHDPlus COMID identifier for each river reach feature')\r\n timeSeries_var.cf_role = 'timeseries_id'\r\n \r\n log(' time_var', 'DEBUG', self.print_debug)\r\n time_var = self.cf_nc.createVariable('time', 'i4', ('time',))\r\n time_var.long_name = 'time'\r\n time_var.standard_name = 'time'\r\n time_var.units = 'seconds since 1970-01-01 00:00:00 0:00'\r\n time_var.axis = 'T'\r\n \r\n #only add if user adds\r\n if self.comid_lat_lon_z_file and os.path.exists(self.comid_lat_lon_z_file):\r\n log(' lat_var', 'DEBUG', self.print_debug)\r\n lat_var = self.cf_nc.createVariable('lat', 'f8', (self.output_id_dim_name,),\r\n fill_value=-9999.0)\r\n lat_var.long_name = 'latitude'\r\n lat_var.standard_name = 'latitude'\r\n lat_var.units = 'degrees_north'\r\n lat_var.axis = 'Y'\r\n \r\n log(' lon_var', 'DEBUG', self.print_debug)\r\n lon_var = self.cf_nc.createVariable('lon', 'f8', (self.output_id_dim_name,),\r\n fill_value=-9999.0)\r\n lon_var.long_name = 'longitude'\r\n lon_var.standard_name = 'longitude'\r\n lon_var.units = 'degrees_east'\r\n lon_var.axis = 'X'\r\n \r\n log(' z_var', 'DEBUG', self.print_debug)\r\n z_var = self.cf_nc.createVariable('z', 'f8', (self.output_id_dim_name,),\r\n fill_value=-9999.0)\r\n z_var.long_name = ('Elevation referenced to the North American ' +\r\n 'Vertical Datum of 1988 (NAVD88)')\r\n z_var.standard_name = 'surface_altitude'\r\n z_var.units = 'm'\r\n z_var.axis = 'Z'\r\n z_var.positive = 'up'\r\n \r\n log(' crs_var', 'DEBUG', self.print_debug)\r\n crs_var = self.cf_nc.createVariable('crs', 'i4')\r\n crs_var.grid_mapping_name = 'latitude_longitude'\r\n crs_var.epsg_code = 'EPSG:4326' # WGS 84\r\n crs_var.semi_major_axis = 6378137.0\r\n crs_var.inverse_flattening = 298.257223563", "def dataIdentify(self, in_nc):\r\n data_nc = NET.Dataset(in_nc)\r\n time = data_nc.variables['time'][:]\r\n diff = NUM.unique(NUM.diff(time))\r\n data_nc.close()\r\n #time_interval_highres = NUM.array([1.0,3.0,6.0],dtype=float)\r\n #time_interval_lowres_full = NUM.array([3.0, 6.0],dtype=float)\r\n #time_interval_lowres = NUM.array([6.0],dtype=float)\r\n #time_interval_lowres_3Hr = NUM.array([3.0],dtype=float)\r\n\t\t\r\n time_interval_HRES1 = NUM.array([1.0],dtype=float) # Line Added/Modified CJB 20190108\r\n time_interval_HRES13 = NUM.array([1.0,3.0],dtype=float) # Line Added/Modified CJB 20190108\r\n time_interval_HRES136 = NUM.array([1.0,3.0,6.0],dtype=float) # Line Added/Modified CJB 20190108\r\n time_interval_ENS3 = NUM.array([3.0],dtype=float) # Line Added/Modified CJB 20190108\r\n time_interval_ENS36 = NUM.array([3.0,6.0],dtype=float) # Line Added/Modified CJB 20190108\r\n time_interval_ENS6 = NUM.array([6.0],dtype=float) # Line Added/Modified CJB 20190108\r\n\r\n\r\n #print \"SDR - diff:\", diff, time_interval_highres, time_interval_lowres_full, time_interval_lowres\r\n #if NUM.array_equal(diff, time_interval_highres):\r\n # return \"HighRes\"\r\n #elif NUM.array_equal(diff, time_interval_lowres_full):\r\n # return \"LowResFull\"\r\n #elif NUM.array_equal(diff, time_interval_lowres):\r\n # return \"LowRes\"\r\n #elif NUM.array_equal(diff, time_interval_lowres_3Hr):\r\n # return \"Low3HrRes\"\r\n #else:\r\n # return None\r\n\t\t\t\r\n if NUM.array_equal(diff, time_interval_HRES1): # Line Added/Modified CJB 20190108\r\n return \"HRES1\" # Line Added/Modified CJB 20190108\r\n elif NUM.array_equal(diff, time_interval_HRES13): # Line Added/Modified CJB 20190108\r\n return \"HRES13\" # Line Added/Modified CJB 20190108\r\n elif NUM.array_equal(diff, time_interval_HRES136): # Line Added/Modified CJB 20190108\r\n return \"HRES136\" # Line Added/Modified CJB 20190108\r\n elif NUM.array_equal(diff, time_interval_ENS3): # Line Added/Modified CJB 20190108\r\n return \"ENS3\" # Line Added/Modified CJB 20190108\r\n elif NUM.array_equal(diff, time_interval_ENS36): # Line Added/Modified CJB 20190108\r\n return \"ENS36\" # Line Added/Modified CJB 20190108\r\n elif NUM.array_equal(diff, time_interval_ENS6): # Line Added/Modified MJS, CJB 20190108\r\n return \"ENS6\" # Line Added/Modified CJB 20190108\r\n else: # Line Added/Modified CJB 20190108\r\n return None # Line Added/Modified CJB 20190108\r", "def ncwrt_retrieval_model(retr_setup, outname=None):\n\n #-- set name of file to be generated\n act_outname = outname if outname!=None else 'retrmodel.nc'\n msg = \"Start writing configuration file ***{}***...\".format(act_outname)\n FileLogger.info(msg)\n\n #-- compression settings\n zlev = retr_setup.zlev\n use_zlib = retr_setup.use_zlib\n\n #-- retrieval settings\n schedule_dct = retr_setup.schedule_dct\n timepts = schedule_dct['date_utc']\n npts = len(timepts)\n a_components = retr_setup.a_components\n b_components = retr_setup.b_components\n munc_components = retr_setup.munc_components\n\n #-- temporal settings, create time-values (time-since)\n time_start, time_end = datelst_get_month_aligned_bounds(timepts)\n time_coverage_start = time_start.strftime('%Y-%m-%dT%H:%M:%S')\n time_coverage_end = time_end.strftime('%Y-%m-%dT%H:%M:%S')\n ref_time = dt.datetime(timepts[0].year,1,1) #January1st in year of first point in time\n time_unit = 'seconds since {}'.format(ref_time.strftime('%Y-%m-%dT%H:%M:%S'))\n time_values = nc4.date2num(timepts, time_unit)\n\n #-- ensure directory exists\n mkdirp_smart(os.path.dirname(act_outname))\n\n #-- open file pointer\n ncfp = nc4.Dataset(act_outname, 'w')\n #-- add dimensions\n d1 = ncfp.createDimension('npoints',npts)\n\n #-- time-value\n ncvar = ncfp.createVariable( 'time', np.float64, ('npoints',),\n zlib=use_zlib, complevel=zlev )\n ncvar.setncattr('standard_name','time')\n ncvar.setncattr('long_name','time')\n ncvar.setncattr('units', time_unit)\n ncvar[:] = time_values[:]\n\n unit_one = np.float64(1)\n #-- a-coefficient part\n # lai\n ncvar = ncfp.createVariable( 'a_lai', np.float64, ('npoints',),\n zlib=use_zlib, complevel=zlev )\n ncvar.setncattr('long_name', 'a coefficient associated to LAI in dynamic state model')\n ncvar.setncattr('units',unit_one)\n ncvar[:] = a_components[0,:]\n # canht\n ncvar = ncfp.createVariable( 'a_canht', np.float64, ('npoints',),\n zlib=use_zlib, complevel=zlev )\n ncvar.setncattr('long_name', 'a coefficient associated to canopy height in dynamic state model')\n ncvar.setncattr('units',unit_one)\n ncvar[:] = a_components[1,:]\n # soil moisture\n ncvar = ncfp.createVariable( 'a_sm', np.float64, ('npoints',),\n zlib=use_zlib, complevel=zlev )\n ncvar.setncattr('long_name', 'a coefficient associated to soil moisture in dynamic state model')\n ncvar.setncattr('units',unit_one)\n ncvar[:] = a_components[2,:]\n\n #-- offset-coefficient part\n # lai\n ncvar = ncfp.createVariable( 'offset_lai', np.float64, ('npoints',),\n zlib=use_zlib, complevel=zlev )\n ncvar.setncattr('long_name', 'offset associated to LAI in dynamic state model')\n ncvar.setncattr('units',unit_one)\n ncvar[:] = b_components[0,:]\n # canht\n ncvar = ncfp.createVariable( 'offset_canht', np.float64, ('npoints',),\n zlib=use_zlib, complevel=zlev )\n ncvar.setncattr('long_name', 'offset associated to canopy height in dynamic state model')\n ncvar.setncattr('units',unit_one)\n ncvar[:] = b_components[1,:]\n # soil moisture\n ncvar = ncfp.createVariable( 'offset_sm', np.float64, ('npoints',),\n zlib=use_zlib, complevel=zlev )\n ncvar.setncattr('long_name', 'offset associated to soil moisture in dynamic state model')\n ncvar.setncattr('units',unit_one)\n ncvar[:] = b_components[2,:]\n\n #-- model-uncertainty part\n # lai\n ncvar = ncfp.createVariable( 'munc_lai', np.float64, ('npoints',),\n zlib=use_zlib, complevel=zlev )\n ncvar.setncattr('long_name', 'model uncertainty associated to LAI in dynamic state model')\n ncvar.setncattr('units',unit_one)\n ncvar[:] = munc_components[0,:]\n # canht\n ncvar = ncfp.createVariable( 'munc_canht', np.float64, ('npoints',),\n zlib=use_zlib, complevel=zlev )\n ncvar.setncattr('long_name', 'model uncertainty associated to canopy height in dynamic state model')\n ncvar.setncattr('units',unit_one)\n ncvar[:] = munc_components[1,:]\n # soil moisture\n ncvar = ncfp.createVariable( 'munc_sm', np.float64, ('npoints',),\n zlib=use_zlib, complevel=zlev )\n ncvar.setncattr('long_name', 'model uncertainty associated to soil moisture in dynamic state model')\n ncvar.setncattr('units',unit_one)\n ncvar[:] = munc_components[2,:]\n\n\n #-- global attributes\n ncfp.setncattr('creator_name',\"The Inversion Lab, Hamburg, Germany\")\n ncfp.setncattr('creator_email', \"Michael.Vossbeck(at)Inversion-Lab.com\")\n ncfp.setncattr('netcdf_libversion',\"{}\".format(nc4.__netcdf4libversion__))\n ncfp.setncattr('date_created',\"{}\".format(dt.datetime.utcnow().isoformat()))\n ncfp.setncattr('time_coverage_start',time_coverage_start)\n ncfp.setncattr('time_coverage_end',time_coverage_end)\n\n #-- close file pointer\n ncfp.close()\n\n # logging\n msg = \"...writing ***{}*** DONE\".format(act_outname)\n FileLogger.info(msg)", "def Writedata(self, tstep):\n \n nc = Dataset(self.outfile, 'a')\n \n nc.variables['time'][tstep] = self.time\n nc.variables['salt'][tstep] = self.salt\n nc.variables['temp'][tstep] = self.temp\n nc.variables['uc'][tstep] = self.uc\n nc.variables['vc'][tstep] = self.vc\n nc.variables['nu_v'][tstep] = self.nu_v\n nc.variables['rho'][tstep] = self.rho\n nc.variables['tau_x'][tstep] = self.tau_x\n nc.variables['tau_y'][tstep] = self.tau_y\n nc.variables['eta'][tstep] = self.eta\n \n nc.close()", "def writeNetCDFData(out_nc, hrus, dr_time, hru_type, remapped_data, var_meta, var_attrs, var_encodings, remap_idx):\n\n dataset = xr.Dataset()\n\n for varname, meta in var_meta.items():\n foo = xr.DataArray(remapped_data[varname][:, remap_idx],\n dims=['time', 'basinID'],\n name=varname)\n\n foo.encoding = var_encodings[varname]\n foo.attrs = var_attrs[varname]\n\n dataset[varname] = foo\n\n # HRU ID variables\n dataset['basinID'] = xr.DataArray(hrus[remap_idx], dims=['basinID'])\n dataset['basinID'].encoding = {'dtype': hru_type, '_FillValue': None}\n dataset['basinID'].attrs = {'long_name': 'Basin ID'}\n\n dataset[TIME_DIM_NAME] = dr_time\n\n dataset.to_netcdf(out_nc, unlimited_dims='time')", "def __init__(self):\r\n self.label = \"Create Inflow File From ECMWF Runoff\"\r\n self.description = (\"Creates RAPID NetCDF input of water inflow \" +\r\n \"based on ECMWF runoff results and previously created weight table.\")\r\n self.canRunInBackground = False\r\n #CJB self.header_wt = ['StreamID', 'area_sqm', 'lon_index', 'lat_index', 'npoints']\r\n self.header_wt = ['rivid', 'area_sqm', 'lon_index', 'lat_index', 'npoints']\r\n #SDR added new structure to fit new ecmwf ##.runoff.nc file order\r\n #self.dims_oi = [['lon', 'lat', 'time'], ['longitude', 'latitude', 'time']]\r\n self.dims_oi = [['lon', 'lat', 'time'], ['longitude', 'latitude', 'time'], ['time','lon','lat']] # Line Added/Modified CJB 20190108\r\n #self.vars_oi = [[\"lon\", \"lat\", \"time\", \"RO\"], ['longitude', 'latitude', 'time', 'ro']]\r\n self.vars_oi = [[\"lon\", \"lat\", \"time\", \"RO\"], ['longitude', 'latitude', 'time', 'ro'], [\"time\", \"lon\", \"lat\", \"RO\"]] # Line Added/Modified CJB 20190108\r\n self.length_time = {\"LowRes\": 61, \"Low3HrRes\": 40, \"LowResFull\": 85,\"HighRes\": 125, \"High3HrRes\":3} # *** MJS What is High3HrRes for? Doesn't seem to be used.\r\n #self.length_time = {\"LowResFull\": 85,\"HighRes\": 125}\r\n self.length_time_opt = {\"LowRes-6hr\": 60, \"LowRes-3hr\": 40,\r\n \"LowResFull-3hr-Sub\": 48, \"LowResFull-6hr-Sub\": 36,\r\n \"HighRes-1hr\": 90, \"HighRes-3hr\": 48, \"HighRes-6hr\": 40, # *** MJS HighRes-3hr was changed to 40 before; why?\r\n \"HighRes-3hr-Sub\": 18, \"HighRes-6hr-Sub\": 16}\r\n self.errorMessages = [\"Missing Variable 'time'\",\r\n \"Incorrect dimensions in the input ECMWF runoff file.\",\r\n \"Incorrect variables in the input ECMWF runoff file.\",\r\n \"Incorrect time variable in the input ECMWF runoff file\",\r\n \"Incorrect number of columns in the weight table\",\r\n \"No or incorrect header in the weight table\",\r\n \"Incorrect sequence of rows in the weight table\"]", "def get_daily_LIS_output(input_fname,loc_lat_lis,loc_lon_lis):\n\n print(\"carry on read_cable_var\")\n\n for month in np.arange(0,12,1):\n print(month)\n cable = nc.Dataset(input_fname[month], 'r')\n\n if month == 0:\n rain = cable.variables['Rainf_f_tavg'][:,loc_lat_lis,loc_lon_lis].filled(-9999.)\n evap = cable.variables['Evap_tavg'][:,loc_lat_lis,loc_lon_lis].filled(-9999.)\n tveg = cable.variables['TVeg_tavg'][:,loc_lat_lis,loc_lon_lis].filled(-9999.)\n esoil = cable.variables['ESoil_tavg'][:,loc_lat_lis,loc_lon_lis].filled(-9999.)\n ecanop= cable.variables['ECanop_tavg'][:,loc_lat_lis,loc_lon_lis].filled(-9999.)\n qs = cable.variables['Qs_tavg'][:,loc_lat_lis,loc_lon_lis].filled(-9999.)\n qsb = cable.variables['Qsb_tavg'][:,loc_lat_lis,loc_lon_lis].filled(-9999.)\n sm1 = cable.variables['SoilMoist_tavg'][:,0,loc_lat_lis,loc_lon_lis].filled(-9999.)\n sm2 = cable.variables['SoilMoist_tavg'][:,1,loc_lat_lis,loc_lon_lis].filled(-9999.)\n sm3 = cable.variables['SoilMoist_tavg'][:,2,loc_lat_lis,loc_lon_lis].filled(-9999.)\n sm4 = cable.variables['SoilMoist_tavg'][:,3,loc_lat_lis,loc_lon_lis].filled(-9999.)\n sm5 = cable.variables['SoilMoist_tavg'][:,4,loc_lat_lis,loc_lon_lis].filled(-9999.)\n sm6 = cable.variables['SoilMoist_tavg'][:,5,loc_lat_lis,loc_lon_lis].filled(-9999.)\n gwwb = cable.variables['GWwb_tavg'][:,loc_lat_lis,loc_lon_lis].filled(-9999.)\n\n else:\n rain = np.concatenate((rain,cable.variables['Rainf_f_tavg'][:,loc_lat_lis,loc_lon_lis].filled(-9999.)))\n evap = np.concatenate((evap,cable.variables['Evap_tavg'][:,loc_lat_lis,loc_lon_lis].filled(-9999.)))\n tveg = np.concatenate((tveg,cable.variables['TVeg_tavg'][:,loc_lat_lis,loc_lon_lis].filled(-9999.)))\n esoil = np.concatenate((esoil,cable.variables['ESoil_tavg'][:,loc_lat_lis,loc_lon_lis].filled(-9999.)))\n ecanop= np.concatenate((ecanop,cable.variables['ECanop_tavg'][:,loc_lat_lis,loc_lon_lis].filled(-9999.)))\n qs = np.concatenate((qs,cable.variables['Qs_tavg'][:,loc_lat_lis,loc_lon_lis].filled(-9999.)))\n qsb = np.concatenate((qsb,cable.variables['Qsb_tavg'][:,loc_lat_lis,loc_lon_lis].filled(-9999.)))\n sm1 = np.concatenate((sm1,cable.variables['SoilMoist_tavg'][:,0,loc_lat_lis,loc_lon_lis].filled(-9999.)))\n sm2 = np.concatenate((sm2,cable.variables['SoilMoist_tavg'][:,1,loc_lat_lis,loc_lon_lis].filled(-9999.)))\n sm3 = np.concatenate((sm3,cable.variables['SoilMoist_tavg'][:,2,loc_lat_lis,loc_lon_lis].filled(-9999.)))\n sm4 = np.concatenate((sm4,cable.variables['SoilMoist_tavg'][:,3,loc_lat_lis,loc_lon_lis].filled(-9999.)))\n sm5 = np.concatenate((sm5,cable.variables['SoilMoist_tavg'][:,4,loc_lat_lis,loc_lon_lis].filled(-9999.)))\n sm6 = np.concatenate((sm6,cable.variables['SoilMoist_tavg'][:,5,loc_lat_lis,loc_lon_lis].filled(-9999.)))\n gwwb = np.concatenate((gwwb,cable.variables['GWwb_tavg'][:,loc_lat_lis,loc_lon_lis].filled(-9999.)))\n\n cable.close()\n\n Var = pd.DataFrame(rain*60.*60.*24., columns=['Rainf'])\n Var['Evap'] = evap*60.*60.*24.\n Var['TVeg'] = tveg*60.*60.*24.\n Var['ESoil'] = esoil*60.*60.*24.\n Var['ECanop']= ecanop*60.*60.*24.\n Var['Qs'] = qs*60.*60.*24.\n Var['Qsb'] = qsb*60.*60.*24.\n Var['SM1'] = sm1\n Var['SM2'] = sm2\n Var['SM3'] = sm3\n Var['SM4'] = sm4\n Var['SM5'] = sm5\n Var['SM6'] = sm6\n Var['GWMoist'] = gwwb\n\n return Var", "def nc_to_hdf5_mudis(dataset, config):\n np.warnings.filterwarnings('ignore')\n\n date = datetime.datetime.strptime(dataset.recorddate,\n '%d.%m.%Y') # read date from dateset\n date_name = datetime.datetime.strftime(date,\n '%Y%m%d') # convert date to YYYYMMDD format\n config['date'] = date_name\n\n # Create the directory to save the results\n path = config['str_dir'] + '/radiance/{}/data/'.format(config['date'])\n os.makedirs(os.path.dirname(path), exist_ok=True)\n\n # Read time of the file (correct time)\n time = datetime.datetime.strptime(dataset.recordtime, '%H:%M:%S.')\n time = datetime.datetime.time(time)\n\n # convert time to datetime format\n datetime_name = datetime.datetime.combine(date, time)\n new_name = datetime.datetime.strftime(datetime_name, '%Y%m%d_%H%M%S')\n\n # radiance = dataset.variables['data'][:].reshape(113, 1281)\n # wavelength_axis = dataset.variables['xAxis'][:]\n\n # Create a file in the disk\n with h5py.File(config['str_dir'] + '/radiance/{}/data/{}.h5'.format(\n config['date'], new_name), 'w') as datos:\n\n if not list(datos.items()):\n # Create two datasets(use only one time)\n datos.create_dataset('/data',\n data=dataset['data'][:].reshape(113, 1281),\n dtype='f4')\n # datos.create_dataset('/skymap', data=skymap, dtype='f4')\n else:\n del datos['data']\n # del datos['skymap']\n print('data deleted and corrected')\n datos.create_dataset('/data', data=data, dtype='f4')\n # datos.create_dataset('/skymap', data=skymap, dtype='f4')\n\n # Add attributes to datasets\n datos['data'].attrs['time'] = str(time)\n datos['data'].attrs['Exposure'] = dataset.exposuretime\n datos['data'].attrs['NumAver'] = dataset.AVERAGED\n datos['data'].attrs['CCDTemp'] = dataset.detectortemperature\n datos['data'].attrs['NumSingMes'] = dataset.noofaccumulations\n # datos['data'].attrs['ElectrTemp'] = dataset.\n datos['data'].attrs['Latitude'] = '52.39N'\n datos['data'].attrs['Longitude'] = '9.7E'\n datos['data'].attrs['Altitude'] = '65 AMSL'\n\n chn = np.arange(1, 114)\n datos.create_dataset('/channel', data=chn, dtype=np.float32)\n datos.create_dataset('/wavelength', data=dataset['xAxis'][:])\n\n datos['data'].dims.create_scale(datos['channel'], 'channel')\n datos['data'].dims[0].attach_scale(datos['channel'])\n datos['data'].dims[0].label = 'channel'\n datos['data'].dims[1].label = 'wavelength'\n\n # datos['skymap'].dims[0].label = 'channel'\n # datos['skymap'].dims[1].label = 'Azimuth, Zenith'\n\n datos.close()", "def read300yrh(period):\n directory300 = '/seley/ypeings/simu/PAMIP-1.1-QBO-300yr/monthly/'\n file300 = 'U10_1700-2000.nc'\n filename = directory300 + file300\n \n data = Dataset(filename)\n lat = data.variables['latitude'][:]\n lon = data.variables['longitude'][:]\n u10q = data.variables['U10'][:]\n data.close()\n \n ### Reshape in year/month\n u10n = np.reshape(u10q,(u10q.shape[0]//12,12,lat.shape[0],lon.shape[0]))\n \n ### Calculate over particular months\n u10 = UT.calcDecJanFeb(u10n,lat,lon,'surface',1)\n \n ### Slice U10 at 65N\n latq = np.where((lat >= 64.5) & (lat <= 65.5))[0]\n lat = lat[latq].squeeze()\n u10 = u10[:,latq,:].squeeze()\n \n ### Take zonal mean \n u10z = np.nanmean(u10,axis=1)\n \n ### Remove missing data\n mask = np.where(u10z > -1e5)[0]\n \n ### Detrend\n u10zdt = sss.detrend(u10z[mask],type='linear')\n \n return lat,lon,u10zdt", "def write_rsc_file(inps,in_file,out_file):\n # read file\n meta = readfile.read_roipac_rsc(in_file)\n # initiate dict\n rsc = dict()\n rsc['FILE_DIR'] = os.getenv('pwd')\n rsc['FILE_LENGTH'] = meta[\"FILE_LENGTH\"]\n rsc['WIDTH'] = meta[\"WIDTH\"]\n rsc['XMIN'] = 0\n rsc['XMAX'] = int(meta[\"WIDTH\"]) - 1\n rsc['YMIN'] = 0\n rsc['YMAX'] = int(meta[\"FILE_LENGTH\"]) - 1\n rsc['X_FIRST'] = float(meta[\"X_FIRST\"])\n rsc['Y_FIRST'] = float(meta[\"Y_FIRST\"])\n rsc['X_STEP'] = float(meta[\"X_STEP\"])\n rsc['Y_STEP'] = float(meta[\"Y_STEP\"])\n rsc['X_UNIT'] = 'degrees'\n rsc['Y_UNIT'] = 'degrees'\n rsc['RLOOKS'] = meta[\"RLOOKS\"]\n rsc['ALOOKS'] = meta[\"ALOOKS\"]\n rsc['Z_OFFSET'] = 0\n rsc['Z_SCALE'] = 1\n rsc['PROJECTION'] = 'LATLON'\n rsc['DATE12'] = '111111-222222'\n # write rsc file\n writefile.write_roipac_rsc(rsc, out_file, print_msg=True)\n return out_file", "def Writefile(self, outfile, verbose=True):\n \n self.outfile = outfile\n \n # Write SUNTANS grid to file\n nc = Dataset(outfile, 'w', format='NETCDF3_CLASSIC')\n nc.Description = 'SUNTANS subsetted history file'\n nc.Author = ''\n nc.Created = datetime.now().isoformat()\n nc.type = 'SUNTANS HIS file'\n #pdb.set_trace()\n nc.createDimension('Nc', self.Nc)\n nc.createDimension('Np', self.Np)\n nc.createDimension('Ne', self.Ne)\n nc.createDimension('Nk', self.Nk)\n nc.createDimension('numsides', self.numsides)\n \n nc.createDimension('time', None)\n \n def write_nc_var(var, name, dimensions, units=None):\n nc.createVariable(name, 'f8', dimensions)\n if units is not None:\n nc.variables[name].units = units\n nc.variables[name][:] = var\n if verbose:\n print ' ... wrote ', name\n \n def create_nc_var(name, dimensions, units=None):\n nc.createVariable(name, 'f8', dimensions)\n if units is not None:\n nc.variables[name].units = units\n if verbose:\n print ' ... wrote ', name\n \n # Grid variables\n write_nc_var(self.xv, 'xv', ('Nc'))\n write_nc_var(self.yv, 'yv', ('Nc'))\n write_nc_var(self.xp, 'xp', ('Np'))\n write_nc_var(self.yp, 'yp', ('Np'))\n write_nc_var(self.xe, 'xe', ('Ne'))\n write_nc_var(self.ye, 'ye', ('Ne'))\n write_nc_var(self.dz, 'dz', ('Nk'))\n write_nc_var(self.dv, 'dv', ('Nc'))\n write_nc_var(self.Ac, 'Ac', ('Nc'))\n write_nc_var(self.Nk, 'Nk', ('Nc'))\n write_nc_var(self.face, 'face', ('Nc','numsides'))\n write_nc_var(self.mark, 'mark', ('Ne'))\n write_nc_var(self.cells, 'cells', ('Nc','numsides'))\n \n \n # Create the data variables\n create_nc_var('time',('time'),'seconds since 1990-01-01 00:00:00')\n create_nc_var('salt',('time','Nk','Nc'),'psu')\n create_nc_var('temp',('time','Nk','Nc'),'degrees C')\n create_nc_var('uc',('time','Nk','Nc'),'meter second-1')\n create_nc_var('vc',('time','Nk','Nc'),'meter second-1')\n create_nc_var('nu_v',('time','Nk','Nc'),'m2 s-1')\n create_nc_var('rho',('time','Nk','Nc'),'kg m-3')\n create_nc_var('tau_x',('time','Nc'),'N m-2')\n create_nc_var('tau_y',('time','Nc'),'N m-2')\n create_nc_var('eta',('time','Nc'),'m')\n \n nc.close()", "def collect_data(ra,dec,unix,Nspectra,dt,fileName,fitName,noise=False):\n with open('{}'.format(fileName), 'w') as pointFile:\n pointFile.write('{}'.format('agilent'))\n \n alt, az = get_altaz(ra[0],dec[0],jd =uni_to_jul(unix), lat=37.9183, lon=-122.1067, alt =304)\n LeuschTelescope.point(alt,az)\n print(LeuschTelescope.get_pointing())\n\n if noise:\n ugradio.leusch.LeuschNoise()\n LeuschNoise.on()\n \n ugradio.agilent.SynthClient(host='127.0.0.1')\n pointFile.write('{}'.format(SynthClient.get_frequency()))\n \n #initialize spectrometer thing\n leuschner.Spectrometer('10.0.1.2')\n \n for r,d in zip(ra,dec):\n obsv_time = uni_to_jul(time.time())\n alt,az = get_altaz(ra[0],dec[0], jd=obsv_time, lat=37.9183, lon=-122.1067, alt = 304)\n LeuschTelescope.point(alt,az)\n currentAlt, currentAz = leusch.get_pointing()\n print('alt: {} , az: {}'.format(currentAlt, currentAz))\n Spectrometer.read_spec('{}_{}_r_d.fits'.format(unix,fitName), Nspec, (r,d), 'eq')", "def get_data(station,starttime,endtime,activity=False,\n rep='/GNOMEDrive/gnome/serverdata/',resample=None):\n setname = \"MagneticFields\"\n dstr = ['%Y','%m','%d','%H','%M']\n dsplit = '-'.join(dstr[:starttime.count('-')+1])\n start = datetime.strptime(starttime,dsplit)\n starttime = construct_utc_from_metadata(start.strftime(\"%Y/%m/%d\"),\n start.strftime(\"%H:%M:%S.%d\"))\n dsplit = '-'.join(dstr[:endtime.count('-')+1])\n end = datetime.strptime(endtime,dsplit)\n endtime = construct_utc_from_metadata(end.strftime(\"%Y/%m/%d\"),\n end.strftime(\"%H:%M:%S.%d\"))\n dataset = []\n for date in numpy.arange(start,end,timedelta(minutes=1)):\n date = date.astype(datetime)\n path1 = rep+station+'/'+date.strftime(\"%Y/%m/%d/\")\n path2 = station+'_'+date.strftime(\"%Y%m%d_%H%M*.hdf5\")\n fullpath = os.path.join(path1,path2)\n dataset += glob.glob(fullpath)\n if len(dataset)==0:\n print \"ERROR: No data files were found...\"\n quit()\n file_order,data_order = {},{}\n for fname in dataset:\n hfile = h5py.File(fname, \"r\")\n segfile = file_to_segment(hfile,setname)\n file_order[segfile] = fname\n data_order[segfile] = hfile\n # Extract sample rate from metadata of last read data file\n sample_rate = hfile[setname].attrs[\"SamplingRate(Hz)\"]\n # Estimate full segment activity list\n activity = create_activity_list(station,data_order)\n # Generate an ASCII representation of the GPS timestamped\n # segments of time covered by the input data\n seglist = segmentlist(data_order.keys())\n # Sort the segment list\n seglist.sort()\n # Create list of time series from every segment\n ts_list = generate_timeseries(file_order,setname)\n # Retrieve channel data for all the segments\n full_data = numpy.hstack([retrieve_channel_data(data_order[seg],setname)\n for seg in seglist])\n new_sample_rate = sample_rate if resample==None else resample\n new_data_length = len(full_data)*new_sample_rate/float(sample_rate)\n full_data = scipy.signal.resample(full_data,int(new_data_length))\n # Models a time series consisting of uniformly sampled scalar values\n ts_data = types.TimeSeries(full_data,delta_t=1./new_sample_rate,\n epoch=seglist[0][0])\n for v in data_order.values():\n v.close()\n return ts_data,ts_list,activity,int(starttime),int(endtime)", "def _rpn_hrs_to_nemo_hrs(\n netcdf_start_date,\n netcdf_end_date,\n forecast,\n rpn_dir,\n tmp_dir,\n keep_rpn_fcst_hr_files,\n bunzip2_rpn_fcst_hr_files,\n):\n days_range = arrow.Arrow.range(\n \"day\", netcdf_start_date.shift(days=-1), netcdf_end_date\n )\n for netcdf_date in days_range:\n bash_cmd = (\n f\"rpn-netcdf {forecast} {netcdf_date.format('YYYY-MM-DD')} {rpn_dir} {tmp_dir} \"\n f\"{keep_rpn_fcst_hr_files} {bunzip2_rpn_fcst_hr_files}\"\n )\n _exec_bash_func(bash_cmd)\n nemo_date = f\"y{netcdf_date.year}m{netcdf_date.month:02d}d{netcdf_date.day:02d}\"\n for hr in range(24 - int(forecast), 25):\n rpn_hr_ds_path = (\n tmp_dir\n / f\"{netcdf_date.shift(days=-1).format('YYYYMMDD')}{forecast}_{hr:03d}.nc\"\n )\n nemo_hr_ds_path = (\n tmp_dir / f\"gemlam_{nemo_date}_{(hr - (24-int(forecast))):03d}.nc\"\n )\n try:\n _write_nemo_hr_file(rpn_hr_ds_path, nemo_hr_ds_path)\n rpn_hr_ds_path.unlink()\n except FileNotFoundError:\n # Missing forecast hour; we'll fill it in later\n continue\n for hr in range(24 - int(forecast)):\n rpn_hr_ds_path = (\n tmp_dir\n / f\"{netcdf_date.format('YYYYMMDD')}{forecast}_{(hr + 1):03d}.nc\"\n )\n nemo_hr_ds_path = (\n tmp_dir / f\"gemlam_{nemo_date}_{(hr + 1 + int(forecast)):03d}.nc\"\n )\n try:\n _write_nemo_hr_file(rpn_hr_ds_path, nemo_hr_ds_path)\n rpn_hr_ds_path.unlink()\n except FileNotFoundError:\n # Missing forecast hour; we'll fill it in later\n continue", "def __output(self, t_signal: SpectralQty, t_background: SpectralQty, t_rms: SpectralQty,\n name: str, snr: SpectralQty = None, exp_time: SpectralQty = None, sensitivity: SpectralQty = None):\n # Concatenate the paths\n path = os.path.join(self.__common_conf.output.path, name)\n try:\n os.makedirs(path, exist_ok=True)\n except FileExistsError:\n logger.warning(\"Output directory '\" + path + \"' already exists.\")\n\n res = QTable([t_signal.wl, t_signal.qty, t_background.qty, t_rms.qty],\n names=('Wavelength [' + t_signal.wl.unit.to_string() + ']',\n 'Signal Temperature [' + t_signal.qty.unit.to_string() + ']',\n 'Background Temperature [' + t_background.qty.unit.to_string() + ']',\n 'RMS Noise Temperature [' + t_rms.qty.unit.to_string() + ']'),\n meta={'name': 'first table'})\n if snr is not None:\n res['SNR [-]'] = snr.qty\n if exp_time is not None:\n res['Exposure Time [' + exp_time.qty.unit.to_string() + ']'] = exp_time.qty\n if sensitivity is not None:\n res['Sensitivity [' + sensitivity.qty.unit.to_string() + ']'] = sensitivity.qty\n res.write(os.path.join(path, \"result.csv\"), format='ascii.csv', overwrite=True)", "def addtonc(ncfout,key,vd,ofield,ftype=\"timeseries\"):\n nc_out=nc.Dataset(ncfout,'r+')\n if ftype==\"timeseries\":\n diml=['time','height','south_north','west_east'] # Tuple of Dimensions\n if vd['dims']==4:\n dimtup=tuple(diml)\n elif vd['dims']==3:\n dimtup = tuple([c for c in diml if c != \"height\"])\n elif vd['dims']==2:\n dimtup = tuple([c for c in diml if c not in [\"height\",\"time\"]])\n elif ftype==\"roughness\":\n diml=['south_north','west_east']\n dimtup=tuple(diml)\n elif ftype==\"tabfile\":\n diml=['south_north','west_east','sector','wind','stab']\n if vd['dims']==3:\n dimtup=tuple(diml.remove('wind').remove('stab'))\n if vd['dims']==2:\n dimtup=tuple(diml.remove('wind').remove('stab').remove('sector'))\n if key in (\"TKE\", \"ABLAT_CYL\", \"ACCRE_CYL\"):\n outv=nc_out.createVariable(key, 'f4', dimtup, zlib=True,\n complevel=9, fill_value=-999.)\n else:\n outv=nc_out.createVariable(key,'f4',dimtup,zlib=True,complevel=9)\n outv.units=vd['units']\n outv.long_name=vd['name']\n if vd['std_name'] is not None:\n outv.standard_name=vd['std_name']\n if key==\"PRECIP\":\n outv.cell_methods=\"time: sum\"\n outv.grid_mapping=\"crs\"\n outv.coordinates=\"XLAT XLON\"\n outv[:]=ofield[:]\n nc_out.close()\n return(None)", "def execute(self, in_nc, in_weight_table, out_nc, grid_name, conversion_flag, in_time_interval=\"6hr\"): # modified this line CJB 20190218\r\n # MJS I might consider netCDF4.Dataset.variables['RO'].units\r\n # and upstream correction of the cdo grid conversion units attribute.\r\n\r\n # Validate the netcdf dataset\r\n vars_oi_index = self.dataValidation(in_nc)\r\n \r\n \"\"\"get conversion factor the flag is used to differentiate forecasts converted \r\n to netCDF from GRIB and the original netCDF. They both use the same weight tables\r\n but the original netCDF is in mm whereas the stock GRIB forecasts are in meters.\r\n Set the conversion_flag in the run.py configuration file.\r\n \"\"\"\r\n if conversion_flag: # Line Added CJB 20190218\r\n conversion_factor = 1.0 #Line Modified CJB 20190218\r\n elif grid_name == 'ecmwf_t1279' or grid_name == 'ecmwf_tco639': # Line Modified CJB 20190218\r\n #if grid_name == 'ecmwf_HRES_F' or grid_name == 'ecmwf_ENS_F': # Line Added/Modified CJB 20190108\r\n #new grids in mm instead of m\r\n conversion_factor = 0.001\r\n else: #set the conversion factor to 1 for everything else (data is in m but legacy installations do not have a flag) Line Added CJB 20190218\r\n conversion_factor = 1.0 # Line Added CJB 20190218\r\n # MJS I might consider netCDF4.Dataset.variables['RO'].units\r\n # and upstream correction of the cdo grid conversion units attribute.\r\n # identify if the input netcdf data is the High Resolution data with three different time intervals\r\n id_data = self.dataIdentify(in_nc)\r\n if id_data is None:\r\n raise Exception(self.errorMessages[3])\r\n\r\n ''' Read the netcdf dataset'''\r\n data_in_nc = NET.Dataset(in_nc)\r\n time = data_in_nc.variables['time'][:]\r\n\r\n # Check the size of time variable in the netcdf data\r\n if len(time) == 0: # *** MJS This change seems like it is too loose an error trap; should it account for instances when nc file time var is != in length with id_data lenght?\r\n raise Exception(self.errorMessages[3])\r\n #if len(time) != self.length_time[id_data]:\r\n # raise Exception(self.errorMessages[3])\r\n\r\n ''' Read the weight table '''\r\n print(\"Reading the weight table...\", in_weight_table)\r\n dict_list = {self.header_wt[0]:[], self.header_wt[1]:[], self.header_wt[2]:[],\r\n self.header_wt[3]:[], self.header_wt[4]:[]}\r\n\r\n with open(in_weight_table, \"r\") as csvfile:\r\n reader = csv.reader(csvfile)\r\n count = 0\r\n for row in reader:\r\n if count == 0:\r\n #check number of columns in the weight table\r\n if len(row) < len(self.header_wt):\r\n raise Exception(self.errorMessages[4])\r\n #check header\r\n if row[1:len(self.header_wt)] != self.header_wt[1:]:\r\n raise Exception(self.errorMessages[5])\r\n count += 1\r\n else:\r\n for i in range(len(self.header_wt)):\r\n dict_list[self.header_wt[i]].append(row[i])\r\n count += 1\r\n\r\n ''' Calculate water inflows\r\n as a reminder, the first 91 time steps are T=0 to T=90 and are 1-hourly for HRES\r\n\t\t the next 18 time steps for HRES are T=93 to T=144 at 3-hourly\r\n then the final 16 time steps are T=150 to T=240 at 6-hourly for a total of 125 records\r\n\t\t\tFor ENS, the first 49 time steps are T=0 to T=144 at 3-hourly\r\n\t\t\tthe final 35 time steps are T=150 to T=360 at 6-hourly for a total of 84 records\r\n '''\r\n\t\t\t\r\n print(\"Calculating water inflows...\")\r\n\t\t\r\n ''' \r\n added the next section CJB 20180122 \r\n '''\r\n\r\n\t\t# Get the overall number of time steps\r\n size_time = self.getTimeSize(in_nc) #CJB 20180122\r\n # Determine the size of time steps in each group (1-hourly, 3-hourly, and/or 6-hourly)\r\n if id_data == \"HRES1\": # T <= 90 \r\n time_size = (size_time - 1)\r\n elif id_data == \"HRES13\": # 93 <= T <= 144\r\n if in_time_interval == \"1hr\":\r\n time_size = self.length_time_opt[\"HighRes-1hr\"]\r\n else:\r\n time_size = (size_time - self.length_time_opt[\"HighRes-1hr\"] - 1)\r\n elif id_data == \"HRES136\": # 150 <= T <= 240\r\n if in_time_interval == \"1hr\":\r\n time_size = self.length_time_opt[\"HighRes-1hr\"]\r\n elif in_time_interval == \"3hr\": # MJS Doesn't seem to be a case used currently, but added just in case later need.\r\n time_size = self.length_time_opt[\"HighRes-3hr-sub\"] # MJS This is HRES136, i.e., if for some reason in ecmwf_rapid_multi a 3 hr is asked for for this case, it should still have the 3hr_sub number of times\r\n elif in_time_interval == \"3hr_subset\":\r\n time_size = self.length_time_opt[\"HighRes-3hr-Sub\"]\r\n else:\r\n time_size = (size_time - self.length_time_opt[\"HighRes-1hr\"] - self.length_time_opt[\"HighRes-3hr-Sub\"] - 1)\r\n elif id_data == \"ENS3\": # T <= 144\r\n time_size = (size_time - 1)\r\n elif id_data == \"ENS36\": # 150 <= T <= 360\r\n if in_time_interval == \"3hr_subset\":\r\n time_size = self.length_time_opt[\"LowResFull-3hr-Sub\"]\r\n else:\r\n time_size = (size_time - self.length_time_opt[\"LowResFull-3hr-Sub\"] - 1)\r\n else: # id_data == \"ENS6\": # T <= 360 but all 6-hourly\r\n time_size = (size_time - 1)\r\n #else: # something is wrong and need to throw an error message - likely a corrupt forecast file\r\n # raise Exception(self.errorMessages[3])\r\n #''' end of added section CJB 20180122 \r\n #'''\r\n\r\n #if id_data == \"LowRes\":\r\n # size_time = self.length_time_opt[\"LowRes-6hr\"]\r\n #elif id_data == \"Low3HrRes\":\r\n # size_time = self.length_time_opt[\"LowRes-3hr\"]\r\n #elif id_data == \"LowResFull\":\r\n # if in_time_interval == \"3hr_subset\":\r\n # size_time = self.length_time_opt[\"LowResFull-3hr-Sub\"]\r\n # elif in_time_interval == \"6hr_subset\":\r\n # size_time = self.length_time_opt[\"LowResFull-6hr-Sub\"]\r\n # else:\r\n # size_time = self.length_time_opt[\"LowRes-6hr\"]\r\n #else: #HighRes\r\n # if in_time_interval == \"1hr\":\r\n # size_time = self.length_time_opt[\"HighRes-1hr\"]\r\n # elif in_time_interval == \"3hr\":\r\n # size_time = self.length_time_opt[\"HighRes-3hr\"]\r\n # elif in_time_interval == \"3hr_subset\":\r\n # size_time = self.length_time_opt[\"HighRes-3hr-Sub\"]\r\n # elif in_time_interval == \"6hr_subset\":\r\n # size_time = self.length_time_opt[\"HighRes-6hr-Sub\"]\r\n # else:\r\n # size_time = self.length_time_opt[\"HighRes-6hr\"]\r\n\r\n size_streamID = len(set(dict_list[self.header_wt[0]]))\r\n\r\n # Create output inflow netcdf data\r\n # data_out_nc = NET.Dataset(out_nc, \"w\") # by default format = \"NETCDF4\"\r\n data_out_nc = NET.Dataset(out_nc, \"w\", format = \"NETCDF3_CLASSIC\")\r\n #dim_Time = data_out_nc.createDimension('Time', size_time)\r\n dim_Time = data_out_nc.createDimension('Time', time_size)\r\n dim_RiverID = data_out_nc.createDimension('rivid', size_streamID)\r\n var_m3_riv = data_out_nc.createVariable('m3_riv', 'f4', \r\n ('Time', 'rivid'),\r\n fill_value=0)\r\n \r\n #data_temp = NUM.empty(shape = [size_time, size_streamID])\r\n data_temp = NUM.empty(shape = [time_size, size_streamID])\r\n\r\n lon_ind_all = [int(i) for i in dict_list[self.header_wt[2]]]\r\n lat_ind_all = [int(j) for j in dict_list[self.header_wt[3]]]\r\n\r\n # Obtain a subset of runoff data based on the indices in the weight table\r\n min_lon_ind_all = min(lon_ind_all)\r\n max_lon_ind_all = max(lon_ind_all)\r\n min_lat_ind_all = min(lat_ind_all)\r\n max_lat_ind_all = max(lat_ind_all)\r\n\r\n # self.vars_oi[vars_oi_index][3] = RO; get that variable's 3D structure (time, lat_index, lon_index) ready to reshape into 2D (time, lat_index x lon_index)\r\n data_subset_all = data_in_nc.variables[self.vars_oi[vars_oi_index][3]][:, min_lat_ind_all:max_lat_ind_all+1, min_lon_ind_all:max_lon_ind_all+1]\r\n len_time_subset_all = data_subset_all.shape[0]\r\n len_lat_subset_all = data_subset_all.shape[1]\r\n len_lon_subset_all = data_subset_all.shape[2]\r\n data_subset_all = data_subset_all.reshape(len_time_subset_all, (len_lat_subset_all * len_lon_subset_all))\r\n\r\n # compute new indices based on the data_subset_all\r\n index_new = []\r\n for r in range(0,count-1):\r\n ind_lat_orig = lat_ind_all[r]\r\n ind_lon_orig = lon_ind_all[r]\r\n index_new.append((ind_lat_orig - min_lat_ind_all)*len_lon_subset_all + (ind_lon_orig - min_lon_ind_all))\r\n\r\n # obtain a new subset of data\r\n data_subset_new = data_subset_all[:,index_new]*conversion_factor\r\n\r\n # start compute inflow\r\n pointer = 0\r\n for s in range(0, size_streamID):\r\n npoints = int(dict_list[self.header_wt[4]][pointer])\r\n # Check if all npoints points correspond to the same streamID\r\n if len(set(dict_list[self.header_wt[0]][pointer : (pointer + npoints)])) != 1:\r\n print(\"ROW INDEX {0}\".format(pointer))\r\n print(\"RIVID {0}\".format(dict_list[self.header_wt[0]][pointer]))\r\n raise Exception(self.errorMessages[2])\r\n\r\n area_sqm_npoints = [float(k) for k in dict_list[self.header_wt[1]][pointer : (pointer + npoints)]]\r\n area_sqm_npoints = NUM.array(area_sqm_npoints)\r\n area_sqm_npoints = area_sqm_npoints.reshape(1, npoints)\r\n data_goal = data_subset_new[:, pointer:(pointer + npoints)]\r\n \r\n \r\n #remove noise from data\r\n data_goal[data_goal<=0.00001] = 0\r\n\r\n ''' IMPORTANT NOTE: runoff variable in ECMWF dataset is cumulative instead of incremental through time\r\n '''\r\n # For data with Low Resolution, there's only one time interval 6 hrs\r\n if id_data == \"ENS6\": # Line Added/Modified CJB 20190108\r\n #ro_stream = data_goal * area_sqm_npoints\r\n ro_stream = NUM.subtract(data_goal[1:,],data_goal[:-1,]) * area_sqm_npoints\r\n elif id_data == \"ENS3\": # there's only one time interval 3 hrs # Line Added/Modified CJB 20190108\r\n #ro_stream = data_goal * area_sqm_npoints\r\n ro_stream = NUM.subtract(data_goal[1:,],data_goal[:-1,]) * area_sqm_npoints # Line Added/Modified CJB 20190108\r\n elif id_data == \"HRES1\": # there's only one time interval 1 hrs # Line Added/Modified CJB 20190108\r\n #ro_stream = data_goal * area_sqm_npoints\r\n ro_stream = NUM.subtract(data_goal[1:,],data_goal[:-1,]) * area_sqm_npoints # Line Added/Modified CJB 20190108\t\r\n #For data with the full version of Low Resolution, from Hour 0 to 144 (the first 49 time points) are of 3 hr time interval,\r\n # then from Hour 144 to 360 (36 time points) are of 6 hour time interval\r\n elif id_data == \"ENS36\": # Line Added/Modified CJB 20190108\r\n if in_time_interval == \"3hr_subset\":\r\n #use only the 3hr time interval\r\n ro_stream = NUM.subtract(data_goal[1:49,], data_goal[:48,]) * area_sqm_npoints\r\n elif in_time_interval == \"6hr_subset\":\r\n #use only the 6hr time interval\r\n ro_stream = NUM.subtract(data_goal[49:,], data_goal[48:-1,]) * area_sqm_npoints\r\n else: #\"LowRes-6hr\"\r\n ######################################################\r\n # MJS Always assume this case will have a full ECMWF 240\r\n # hour forecast to work with. It's actually never re-\r\n # quested by ecmwf_rapid_multiprocess anyhow.\r\n ######################################################\r\n #convert all to 6hr\r\n # calculate time series of 6 hr data from 3 hr data\r\n ro_6hr_a = NUM.subtract(data_goal[2:49:2,], data_goal[:48:2,])\r\n # get the time series of 6 hr data\r\n ro_6hr_b = NUM.subtract(data_goal[49:,], data_goal[48:-1,])\r\n # concatenate all time series\r\n ro_stream = NUM.concatenate([ro_6hr_a, ro_6hr_b]) * area_sqm_npoints\r\n #For data with High Resolution, from Hour 0 to 90 (the first 91 time points) are of 1 hr time interval,\r\n # then from Hour 90 to 144 (18 time points) are of 3 hour time interval, and from Hour 144 to 240 (16 time points)\r\n # are of 6 hour time interval\r\n ##########################################################\r\n # MJS The following should handle id_data = HRES13 and HRES136\r\n ##########################################################\r\n else:\r\n if in_time_interval == \"1hr\":\r\n #ro_stream = NUM.subtract(data_goal[1:91,],data_goal[:90,]) * area_sqm_npoints\r\n ro_stream = NUM.subtract(data_goal[1:1+time_size,],data_goal[:time_size,]) * area_sqm_npoints # Line Added/Modified CJB, MJS 20190108\r\n elif in_time_interval == \"3hr\": # MJS HRES 3hr not currently used\r\n # calculate time series of 3 hr data from 1 hr data\r\n ro_3hr_a = NUM.subtract(data_goal[3:91:3,],data_goal[:88:3,])\r\n # get the time series of 3 hr data\r\n #ro_3hr_b = NUM.subtract(data_goal[91:109,], data_goal[90:108,])\r\n ro_3hr_b = NUM.subtract(data_goal[91:91+time_size,], data_goal[90:90+time_size,]) # MJS modified again; seems no case for this, but just in case later... Line Added/Modified CJB 20190108\r\n # concatenate all time series\r\n ro_stream = NUM.concatenate([ro_3hr_a, ro_3hr_b]) * area_sqm_npoints\r\n elif in_time_interval == \"3hr_subset\":\r\n #use only the 3hr time interval\r\n #ro_stream = NUM.subtract(data_goal[91:109,], data_goal[90:108,]) * area_sqm_npoints\r\n ro_stream = NUM.subtract(data_goal[91:91+time_size,], data_goal[90:90+time_size,]) * area_sqm_npoints # MJS modified again; needs to handle HRES13 that might not have complete 3hr set... Line Added/Modified CJB 20190108\r\n elif in_time_interval == \"6hr_subset\":\r\n #use only the 6hr time interval\r\n ro_stream = NUM.subtract(data_goal[109:,], data_goal[108:-1,]) * area_sqm_npoints\r\n ######################################################\r\n # MJS Always assume this case will have a full ECMWF 240 \r\n # hour forecast to work with. It's actually never re-\r\n # quested by ecmwf_rapid_multiprocess anyhow.\r\n ######################################################\r\n else: # in_time_interval == \"6hr\"\r\n #arcpy.AddMessage(\"6hr\")\r\n # calculate time series of 6 hr data from 1 hr data\r\n ro_6hr_a = NUM.subtract(data_goal[6:91:6,], data_goal[:85:6,])\r\n # calculate time series of 6 hr data from 3 hr data\r\n ro_6hr_b = NUM.subtract(data_goal[92:109:2,], data_goal[90:107:2,])\r\n # get the time series of 6 hr data\r\n ro_6hr_c = NUM.subtract(data_goal[109:,], data_goal[108:-1,])\r\n # concatenate all time series\r\n ro_stream = NUM.concatenate([ro_6hr_a, ro_6hr_b, ro_6hr_c]) * area_sqm_npoints\r\n \r\n #remove negative values\r\n ro_stream[ro_stream<0] = 0\r\n data_temp[:,s] = ro_stream.sum(axis = 1)\r\n\r\n pointer += npoints\r\n\r\n\r\n '''Write inflow data'''\r\n print(\"Writing inflow data...\")\r\n var_m3_riv[:] = data_temp\r\n # close the input and output netcdf datasets\r\n data_in_nc.close()\r\n data_out_nc.close()" ]
[ "0.59763277", "0.58809626", "0.58501816", "0.58345604", "0.57742786", "0.57117915", "0.5656523", "0.5648311", "0.561094", "0.55820256", "0.5581089", "0.5577127", "0.5531285", "0.5526736", "0.5467026", "0.54665303", "0.54586935", "0.5431486", "0.5387424", "0.52717113", "0.52662605", "0.5262664", "0.525574", "0.5238192", "0.5234841", "0.5215541", "0.52139866", "0.52123547", "0.52047235", "0.52013916" ]
0.7855086
0
Check if equal to another DFA by traversing their cross product. If equal returns None, otherwise returns a counter example.
def equivalence_with_counterexample(self, other): if self.is_word_in(tuple()) != other.is_word_in(tuple()): return tuple() cross_states = {(self.init_state, other.init_state): (tuple(), None)} to_check = [(self.init_state, other.init_state)] alphabet = self.alphabet while len(to_check) != 0: s1, s2 = to_check.pop(0) for l in alphabet: q1 = self.next_state_by_letter(s1, l) q2 = other.next_state_by_letter(s2, l) if (q1 in self.final_states) != (q2 in other.final_states): counter_example = tuple([l]) q1, q2 = s1, s2 while (q1 != self.init_state) | (q2 != other.init_state): l, q1, q2 = cross_states.get((q1, q2)) counter_example = tuple([l]) + counter_example return counter_example if cross_states.get((q1, q2)) is None: to_check.append((q1, q2)) cross_states.update({(q1, q2): (l, s1, s2)}) return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def iso_dfa(D1,D2):\n assert(is_consistent_dfa(D1)), \"Inconsist. DFA1 in iso_dfa\"\n assert(is_consistent_dfa(D2)), \"Inconsist. DFA2 in iso_dfa\"\n return (len(D1[\"Q\"]) == len(D2[\"Q\"]) and\n langeq_dfa(D1, D2))", "def langeq_dfa(D1, D2, gen_counterex=False):\n if D1[\"Sigma\"] != D2[\"Sigma\"]:\n print(\"The DFA cannot be compared, as their\", end=\"\")\n print(\" alphabets are different; namely:\")\n print(\"Sigma1 = \", D1[\"Sigma\"])\n print(\"Sigma2 = \", D2[\"Sigma\"])\n return False\n else:\n (eqStatus, lastAdd, cex_path) = h_langeq_dfa(D1[\"q0\"], D1,\n D2[\"q0\"], D2, \n Visited=dict({})) # was []\n if not eqStatus:\n if gen_counterex:\n print(\"The DFA are NOT language equivalent!\")\n print(\"Last added pair @ mismatch site is: \", lastAdd) # print msg changed\n print(\"All visited state pairs are\", cex_path)\n return eqStatus # True or False", "def is_language_not_subset_of(self, other):\n if self.is_word_in(tuple()) & (not other.is_word_in(tuple())):\n return tuple()\n\n cross_states = {(self.init_state, other.init_state): (tuple(), None)}\n to_check = [(self.init_state, other.init_state)]\n alphabet = self.alphabet\n\n while len(to_check) != 0:\n s1, s2 = to_check.pop(0)\n for l in alphabet:\n q1 = self.next_state_by_letter(s1, l)\n q2 = other.next_state_by_letter(s2, l)\n\n if (q1 in self.final_states) and (q2 not in other.final_states):\n counter_example = tuple([l])\n q1, q2 = s1, s2\n\n while (q1 != self.init_state) | (q2 != other.init_state):\n l, q1, q2 = cross_states.get((q1, q2))\n counter_example = tuple([l]) + counter_example\n return counter_example\n\n if cross_states.get((q1, q2)) is None:\n to_check.append((q1, q2))\n cross_states.update({(q1, q2): (l, s1, s2)})\n return None", "def is_equivalent(self, other):\n A = self.minimization().relabeled()\n [initial] = A.initial_states()\n address = {initial: ()}\n for v in A.digraph().breadth_first_search(initial.label()):\n state = A.state(v)\n state_address = address[state]\n for t in A.iter_transitions(state):\n if t.to_state not in address:\n address[t.to_state] = state_address + tuple(t.word_in)\n\n B = other.minimization().relabeled()\n labels = {B.process(path)[1].label(): state.label()\n for (state, path) in address.iteritems()}\n try:\n return A == B.relabeled(labels=labels)\n except KeyError:\n return False", "def eq_transitive(self, step):\n self.proof[step.seq_num] = ProofTerm(\"verit_and_rule\", step.concl)", "def __eq__(self, other):\n return self._dna == other._dna and \\\n self._exons == other._exons", "def IsEqualOrder(self,other):\n return self.InferPolynomialDegree() == other.InferPolynomialDegree()", "def test_CONTRADICTION():\n\tk, outputs = 2, [0,0,0,0]\n\t# Prime Implicants\n\ttrue_pi0s = set(['22'])\n\ttrue_pi1s = set([])\n\n\ttdt0, tdt1 = make_transition_density_tables(k=k, outputs=outputs)\n\tpi0s, pi1s = find_implicants_qm(tdt0) , find_implicants_qm(tdt1)\n\n\tassert (pi0s == true_pi0s) , ('Prime Implicants for 0 does not match. %s != %s' % (pi0s,true_pi0s))\n\tassert (pi1s == true_pi1s) , ('Prime Implicants for 1 does not match. %s != %s' % (pi1s,true_pi1s))\n\t# Two Symbols\n\ttrue_ts0s = [('22',[],[[0,1]])]\n\ttrue_ts1s = []\n\n\tts0s,ts1s = find_two_symbols_v2(k=k, prime_implicants=pi0s) , find_two_symbols_v2(k=k, prime_implicants=pi1s)\n\n\tassert (ts0s == true_ts0s) , ('Two Symbol for 0 does not match. %s != %s' % (ts0s,true_ts0s))\n\tassert (ts1s == true_ts1s) , ('Two Symbol for 1 does not match. %s != %s' % (ts1s,true_ts1s))", "def sameLinking(el_a, el_b, system_el2kbid, gold_el2kbid):\n\n sys_el_a_id = system_el2kbid[el_a]\n sys_el_b_id = system_el2kbid[el_b]\n gol_el_a_id = gold_el2kbid[el_a]\n gol_el_b_id = gold_el2kbid[el_b]\n\n if sys_el_a_id.startswith('NIL'): sys_el_a_id = 'NIL'\n if sys_el_b_id.startswith('NIL'): sys_el_b_id = 'NIL'\n if gol_el_a_id.startswith('NIL'): gol_el_a_id = 'NIL'\n if gol_el_b_id.startswith('NIL'): gol_el_b_id = 'NIL'\n\n #print system_el2kbid\n \n return sys_el_a_id == sys_el_b_id == gol_el_a_id == gol_el_b_id", "def isEquivalent(self, oth: 'StateNode') -> bool:\n a = [self.table[i][j] for i in self.state[0] for j in self.state[1]]\n b = [oth.table[i][j] for i in oth.state[0] for j in oth.state[1]]\n if len(a) != len(b):\n return False\n if len(a) < 1 or len(b) < 1 or len(a[0]) != len(b[0]):\n return False\n for i in range(len(a)):\n for j in range(len(a[0])):\n if a[i][j] != b[i][j]:\n return False\n return True", "def f_equal(*args):\n f = Equal(*args).factor()\n return f if f in B else f.factor()", "def __eq__(self, other) -> bool:\n # If the other is a self, and points and steps match (not necessarily in same order), then equal\n return isinstance(other, Construction) and self.steps_set == other.steps_set", "def min_dfa(D, state_name_mode='succinct', chatty=False): # Default state mode\n if (len(D[\"Q\"]) == 1): # Already minimal\n if(chatty):\n print(\"-> Your DFA is already minimal.\")\n return D\n else:\n # Build a dict of all state combinations of DFA.\n # Function state_combos also imparts a -1 for each state pair,\n # initializing the separation distance at -1. \n ht = dict(state_combos(list(D[\"Q\"])))\n \n # Mark final and non-final states to be 0-distinguishable.\n # This is achieved by putting a 0 against those state pairs.\n if (chatty):\n print(\"Separating final and non-final states (marking 0-distinguishable entries).\")\n \n sepFinNonFin(D, ht)\n \n if (chatty):\n print(\" The 0-distinguishable entries are:\")\n for k in ht.keys():\n if (ht[k]==0):\n print(\"States \", k[0],\" and \", k[1], \" are 0-distinguished.\")\n \n \n # Main fixpoint computation: Assigning distinguishability dist. \n #==============================================================\n ht = fixptDist(D, ht, chatty)\n \n if (chatty):\n print(\" \")\n print(\"Now, collecting equivalence-classes.\")\n \n # Pick out equivalent state-pairs, i.e. those that cannot be \n # distinguished. These are still with a \"-1\" in ht.\n ht_1 = [ stpair for (stpair, dist) in ht.items() if dist == -1 ]\n \n \n if (chatty):\n print(\" The equivalent pairs are:\")\n \n \n # Now form equivalence classes\n # what's returned is \n # [(rep_1, [all_eql_states_1]), (rep_2, [all_eql_states_2]),...]\n # which includes all equivalence classes of size 2 or more.\n rep_eqc = bash_eql_classes(ht_1)\n\n \n \n if (chatty):\n print(\" The merged equivalent classes and representative states are these:\")\n for eqc in rep_eqc:\n print(\"State \", eqc[0], \" represents the equivalent states \", eqc[1])\n \n \n \n # Now we have to deal with singleton equivalence classes. \n # These sit unmerged, OUTSIDE OF ALL (x,y) in ht_1\n # i.e. all the entries in ht_1 are PARTNERED STATE PAIRS. \n \n # If we now take D[\"Q\"] and subtract from it all those x and y\n # which are present in some pair in ht_1, we obtain completely\n # non-mergable states. These are states in their own eql. classes.\n \n # 1. Find all partnered states from ht_1\n Partnered_states = list({x for (x,y) in ht_1} |\n {y for (x,y) in ht_1})\n \n # 2. Now who is left un-partnered?\n List_of_self_only_eqlt_states = listminus(D[\"Q\"], Partnered_states) \n \n # 3. For these singletons, i.e. \"self-only equivalent states\", \n # they are self-representative. Form pairs that indicate this fact.\n rep_eqc_1 = [(x, [x]) for x in List_of_self_only_eqlt_states]\n \n # 4. OK now, we can combine the set of pairs where each pair is \n # (representative, [the list of equivalent states])\n # So finally we get the list of equivalence classes with \n # representatives which is of this form:\n # [(a0,[a0, a1, a2, a3, a4]), (b0,[b0, b1]), (c0,[c0]), ...] \n final_rep_eqc = rep_eqc + rep_eqc_1\n \n # We are now ready to build a DFA out of final_rep_eqc. \n # =====================================================\n \n # 1. First, form the set of minimized states, which are \n # state representatives.\n minQ = {x for (x,y) in final_rep_eqc}\n \n # 2. The Alpbahet remains the same.\n minSigma = D[\"Sigma\"]\n \n # 3. The starting state is the representative of D[\"q0\"]\n minq0 = q0_of(D[\"q0\"], final_rep_eqc)\n \n # 4. The final states are the representatives of the original\n # final states. This is computed by helper F_of.\n minF = F_of(D[\"F\"], final_rep_eqc)\n \n # 5. The transition relation of the minimized DFA is obtained\n # by the helper Delta_of\n minDelta = Delta_of(D[\"Delta\"], final_rep_eqc)\n \n # 6. We now need to rename the states if the user wants verbose \n # names (default is succinct). Verbose names are the name of \n # states in each equivalence class strung together sep by \"_\".\n if state_name_mode == 'verbose':\n # First build a state-renaming hash-table involving \n # mk_state_eqc_name\n state_rename_ht = { x : mk_state_eqc_name(y) \n for (x,y) in final_rep_eqc }\n \n minQ = { state_rename_ht[x] for x in minQ }\n minq0 = state_rename_ht[minq0]\n minF = { state_rename_ht[f] for f in minF }\n minDelta = { (state_rename_ht[x], y) : state_rename_ht[z] \n for ((x,y),z) in minDelta.items() }\n #\n # Return the finished (minimized) DFA!\n return mk_dfa(minQ, minSigma, minDelta, minq0, minF)", "def __eq__(left, right):\n if not is_FSMState(right):\n return False\n return left.label() == right.label()", "def test_cx_equivalence_1cx(self, seed=1):\n state = np.random.default_rng(seed)\n rnd = 2 * np.pi * state.random(size=12)\n\n qr = QuantumRegister(2, name=\"q\")\n qc = QuantumCircuit(qr)\n\n qc.u(rnd[0], rnd[1], rnd[2], qr[0])\n qc.u(rnd[3], rnd[4], rnd[5], qr[1])\n\n qc.cx(qr[1], qr[0])\n\n qc.u(rnd[6], rnd[7], rnd[8], qr[0])\n qc.u(rnd[9], rnd[10], rnd[11], qr[1])\n\n sim = UnitarySimulatorPy()\n unitary = execute(qc, sim).result().get_unitary()\n self.assertEqual(two_qubit_cnot_decompose.num_basis_gates(unitary), 1)\n self.assertTrue(Operator(two_qubit_cnot_decompose(unitary)).equiv(unitary))", "def judge(genA: typing.Iterator[int], genB: typing.Iterator[int], steps: int) -> int:\n res = 0\n for na, nb in it.islice(zip(genA, genB), steps):\n la, lb = lower16(na), lower16(nb)\n if la == lb:\n res += 1\n return res", "def test_XOR():\n\tk, outputs = 2, [0,1,1,0]\n\n\ttrue_pi0s = set(['00','11'])\n\ttrue_pi1s = set(['01','10'])\n\n\ttdt0, tdt1 = make_transition_density_tables(k=k, outputs=outputs)\n\tpi0s, pi1s = find_implicants_qm(tdt0) , find_implicants_qm(tdt1)\n\n\tassert (pi0s == true_pi0s) , ('Prime Implicants for 0 does not match. %s != %s' % (pi0s,true_pi0s))\n\tassert (pi1s == true_pi1s) , ('Prime Implicants for 1 does not match. %s != %s' % (pi1s,true_pi1s))\n\t# Two Symbols\n\ttrue_ts0s = [('11',[],[[0,1]]),('00',[],[[0,1]])]\n\ttrue_ts1s = [('10',[[0,1]],[])]\n\n\tts0s,ts1s = find_two_symbols_v2(k=k, prime_implicants=pi0s) , find_two_symbols_v2(k=k, prime_implicants=pi1s)\n\n\tassert (ts0s == true_ts0s) , ('Two Symbol for 0 does not match. %s != %s' % (ts0s,true_ts0s))\n\tassert (ts1s == true_ts1s) , ('Two Symbol for 1 does not match. %s != %s' % (ts1s,true_ts1s))", "def same(self, other, epsilon_=None):\n if epsilon_ is None:\n return self-other < epsilon\n else:\n return self-other < epsilon_", "def _eq(a, b):\n return (a - b) % 2 == 0", "def __eq__(self, other):\n if not isinstance(other, PantsMappingClass):\n # print(\"A\")\n return False\n # if other._pants_decomposition != self._pants_decomposition:\n # print(\"B\")\n # return False\n # print(\"C\")\n return (self * other.inverse()).is_identity()", "def is_equivalence(self) -> bool:", "def __eq__(self, other) -> bool:\r\n\t\treturn self.NextState is other", "def __eq__(self, other):\r\n return self.label == other.label and self.positive_state == other.positive_state", "def test_eq(self):\n assert self.app2 == self.app4\n assert self.app1 != self.app3", "def _are_assumed_equal(a, b, assumed_equivs):\n\n # Are we just assuming that the two are equal?\n equiv = (id(a), id(b))\n if equiv in assumed_equivs:\n return True\n\n # If we see these two again assume they're equal. If they're not then the\n # traversal will detect it.\n assumed_equivs.add(equiv)\n return False", "def jaccard_sim(set1, set2):\n set1, set2 = set(set1), set(set2)\n return len(set1 & set2) * 1.0 / len(set1 | set2)", "def test():\n Z = func.evaluate_circuit(F, e_x, e_y, e_xor)\n if Z == d[0]:\n return 0\n elif Z == d[1]:\n return 1", "def _test_associativity(self, **options):\n tester = self._tester(**options)\n S = tester.some_elements()\n from sage.misc.misc import some_tuples\n for x,y,z in some_tuples(S, 3, tester._max_runs):\n tester.assert_((x * y) * z == x * (y * z))", "def __eq__(self, other):\n\n if isinstance(other, (int, type(Zero()))):\n if other == 0:\n if self.args == []:\n return True\n else:\n return False\n\n frame = self.args[0][1]\n for v in frame:\n if expand((self - other) & v) != 0:\n return False\n return True", "def test_cx_equivalence_0cx(self, seed=0):\n state = np.random.default_rng(seed)\n rnd = 2 * np.pi * state.random(size=6)\n\n qr = QuantumRegister(2, name=\"q\")\n qc = QuantumCircuit(qr)\n\n qc.u(rnd[0], rnd[1], rnd[2], qr[0])\n qc.u(rnd[3], rnd[4], rnd[5], qr[1])\n\n sim = UnitarySimulatorPy()\n unitary = execute(qc, sim).result().get_unitary()\n self.assertEqual(two_qubit_cnot_decompose.num_basis_gates(unitary), 0)\n self.assertTrue(Operator(two_qubit_cnot_decompose(unitary)).equiv(unitary))" ]
[ "0.6204105", "0.5935996", "0.5769594", "0.5176918", "0.5175636", "0.5168933", "0.5157896", "0.5129171", "0.5082458", "0.5069077", "0.50641865", "0.5061806", "0.50514776", "0.503652", "0.50280046", "0.5011892", "0.5007164", "0.5002021", "0.4990112", "0.4943318", "0.49272498", "0.49239472", "0.4920213", "0.4918087", "0.4914208", "0.49057105", "0.48965767", "0.48951077", "0.48930502", "0.4886535" ]
0.6685296
0
Checks whether this DFA's language is a subset of the language of other (checking A \cap A'^c != 0), by traversing the cross product of self and the complimentary of other. If equal returns None, otherwise returns a counter example.
def is_language_not_subset_of(self, other): if self.is_word_in(tuple()) & (not other.is_word_in(tuple())): return tuple() cross_states = {(self.init_state, other.init_state): (tuple(), None)} to_check = [(self.init_state, other.init_state)] alphabet = self.alphabet while len(to_check) != 0: s1, s2 = to_check.pop(0) for l in alphabet: q1 = self.next_state_by_letter(s1, l) q2 = other.next_state_by_letter(s2, l) if (q1 in self.final_states) and (q2 not in other.final_states): counter_example = tuple([l]) q1, q2 = s1, s2 while (q1 != self.init_state) | (q2 != other.init_state): l, q1, q2 = cross_states.get((q1, q2)) counter_example = tuple([l]) + counter_example return counter_example if cross_states.get((q1, q2)) is None: to_check.append((q1, q2)) cross_states.update({(q1, q2): (l, s1, s2)}) return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def equivalence_with_counterexample(self, other):\n if self.is_word_in(tuple()) != other.is_word_in(tuple()):\n return tuple()\n\n cross_states = {(self.init_state, other.init_state): (tuple(), None)}\n to_check = [(self.init_state, other.init_state)]\n alphabet = self.alphabet\n\n while len(to_check) != 0:\n s1, s2 = to_check.pop(0)\n for l in alphabet:\n q1 = self.next_state_by_letter(s1, l)\n q2 = other.next_state_by_letter(s2, l)\n\n if (q1 in self.final_states) != (q2 in other.final_states):\n counter_example = tuple([l])\n q1, q2 = s1, s2\n\n while (q1 != self.init_state) | (q2 != other.init_state):\n l, q1, q2 = cross_states.get((q1, q2))\n counter_example = tuple([l]) + counter_example\n return counter_example\n\n if cross_states.get((q1, q2)) is None:\n to_check.append((q1, q2))\n cross_states.update({(q1, q2): (l, s1, s2)})\n return None", "def is_strict_subset(self, other):\n return self.is_subset(other) and self != other", "def __sub__(self, other: Seq) -> int:\n return sum(i != j for i, j in zip_longest(self.sequence, other.sequence))", "def cross(self, other):\n return self.scalar(other) == 0", "def __listsubseteq(self, c1, c2):\n s2 = {}\n for delta in c2:\n s2[delta] = 1\n\n for delta in c1:\n if not s2.has_key(delta):\n return 0\n\n return 1", "def iso_dfa(D1,D2):\n assert(is_consistent_dfa(D1)), \"Inconsist. DFA1 in iso_dfa\"\n assert(is_consistent_dfa(D2)), \"Inconsist. DFA2 in iso_dfa\"\n return (len(D1[\"Q\"]) == len(D2[\"Q\"]) and\n langeq_dfa(D1, D2))", "def matches(self, other):\n return ( all([i==j or i<0 or j<0 for i, j in zip(self._data, other._data)])\n and len(self._data) == len(other._data) )", "def lookahead_intersect(a, b):\n if a is None:\n return b\n elif b is None:\n return a\n elif a.positive:\n if b.positive:\n return LookaheadRule(a.set & b.set, True)\n else:\n return LookaheadRule(a.set - b.set, True)\n else:\n if b.positive:\n return LookaheadRule(b.set - a.set, True)\n else:\n return LookaheadRule(a.set | b.set, False)", "def subcontrary_with(self, other: 'Concept') -> bool:\n return (self._extent & other._extent\n and (self._extent | other._extent) == self.lattice.supremum._extent)", "def compare(self, other):\n return len(self & other) / max(len(self | other), 1)", "def contradicts(self, other, **kwargs):\n if isinstance(other, Not):\n return _coconut_tail_call(other.contradicts, self, **kwargs)\n else:\n return self == Not(other).simplify(**kwargs)", "def __le__(self, other):\n return self._is_subpolyhedron(other)", "def intersect(self, other): # type: (Term) -> Term\n if self.package != other.package:\n raise ValueError(\"{} should refer to {}\".format(other, self.package))\n\n if self.is_compatible_with(other):\n if self.is_positive() != other.is_positive():\n # foo ^1.0.0 ∩ not foo ^1.5.0 → foo >=1.0.0 <1.5.0\n positive = self if self.is_positive() else other\n negative = other if self.is_positive() else self\n\n to_return = self._non_empty_term(\n positive.constraint.difference(negative.constraint), True\n )\n elif self.is_positive():\n # foo ^1.0.0 ∩ foo >=1.5.0 <3.0.0 → foo ^1.5.0\n to_return = self._non_empty_term(\n self.constraint.intersect(other.constraint), True\n )\n else:\n # not foo ^1.0.0 ∩ not foo >=1.5.0 <3.0.0 → not foo >=1.0.0 <3.0.0\n to_return = self._non_empty_term(\n self.constraint.union(other.constraint), False\n )\n if to_return is not None:\n to_return._constraint._package = Package(\n str(\n parse_req(\n to_return.constraint.package.req.__str__(),\n extras=self.constraint.package.req.extras\n | other.constraint.package.req.extras,\n )\n )\n )\n to_return._package = self.constraint.package\n\n elif self.is_positive() != other.is_positive():\n to_return = self if self.is_positive() else other\n else:\n to_return = Term(Constraint(self.package, EmptyRange()), self.is_positive())\n\n return to_return", "def __eq__(self, other):\n\n if isinstance(other, (int, type(Zero()))):\n if other == 0:\n if self.args == []:\n return True\n else:\n return False\n\n frame = self.args[0][1]\n for v in frame:\n if expand((self - other) & v) != 0:\n return False\n return True", "def langeq_dfa(D1, D2, gen_counterex=False):\n if D1[\"Sigma\"] != D2[\"Sigma\"]:\n print(\"The DFA cannot be compared, as their\", end=\"\")\n print(\" alphabets are different; namely:\")\n print(\"Sigma1 = \", D1[\"Sigma\"])\n print(\"Sigma2 = \", D2[\"Sigma\"])\n return False\n else:\n (eqStatus, lastAdd, cex_path) = h_langeq_dfa(D1[\"q0\"], D1,\n D2[\"q0\"], D2, \n Visited=dict({})) # was []\n if not eqStatus:\n if gen_counterex:\n print(\"The DFA are NOT language equivalent!\")\n print(\"Last added pair @ mismatch site is: \", lastAdd) # print msg changed\n print(\"All visited state pairs are\", cex_path)\n return eqStatus # True or False", "def __ge__(self, other):\n return other._is_subpolyhedron(self)", "def LCA(self, value1, value2):\n contain1 = self.contains(value1)\n contain2 = self.contains(value2)\n\n## for node in self.graph:\n## print(node.value)\n \n if contain1 and contain2:\n #if both values are in the graph the LCA is checked\n return lowest(self.graph, value1, value2)\n\n elif contain1 is False and contain2:\n return str(value1) + \" is not in the graph\"\n\n elif contain2 is False and contain1:\n return str(value2) + \" is not in the graph\"\n else:\n return str(value1) + \" and \" + str(value2) + \" are not in the graph\"", "def is_subset(self, other):", "def includes(self, other):\n for char, qty in other.chars.items():\n if self.chars.get(char, 0) < qty:\n return False\n return True", "def is_proper_subset(self, other):\n if not isinstance(other, SetPy):\n raise TypeError(\"Can only be proper subset of another SetPy\")\n return self.is_subset(other) and not self == other", "def __gt__(self, other):\n return other._is_subpolyhedron(self) and not self._is_subpolyhedron(other)", "def __le__(self, other):\n try:\n return self.length2 <= other.length2\n except AttributeError:\n return assert_unorderable(self, other)", "def is_subset(self, other):\n \n for element in self:\n if element not in other:\n return False\n\n return True", "def is_proper_subset(self, other):\n if isinstance(other, Set):\n return self != other and self.is_subset(other)\n else:\n raise ValueError(\"Unknown argument '%s'\" % other)", "def compare(self, other):\n # First, compare sections\n if (self.section != \"\" or other.section != \"\") and self.section != other.section:\n if self.section == \"\" and other.section != \"\":\n return -1\n elif self.section != \"\" and other.section == \"\":\n return 1\n else:\n if self.section > other.section:\n return 1\n else:\n return -1\n\n # Next, compare topics\n if self.topic != other.topic:\n stopic = _split(self.topic)\n otopic = _split(other.topic)\n if stopic[0] != otopic[0]:\n if stopic[0] > otopic[0]:\n return 1\n else:\n return -1\n if float(stopic[1]) > float(otopic[1]):\n return 1\n else:\n return -1\n\n # Then sub-topics\n if self.sub_topic != other.sub_topic:\n result = _compare(self.sub_topic, other.sub_topic)\n if result != 0:\n return result\n\n # Then cutters\n if self.cutter != other.cutter:\n result = _compare(self.cutter, other.cutter)\n if result != 0:\n return result\n\n # Then normal after-effects in V-Y-O-C priority\n if self.version != other.version:\n if self.version > other.version:\n return 1\n return -1\n\n if self.year != other.year:\n if self.year > other.year:\n return 1\n return -1\n\n # We must take the work letter into account\n if self.work_letter != other.work_letter:\n if self.work_letter > other.work_letter:\n return 1\n return -1\n\n # If any unknown additions are present, try to guess at those.\n if self.other != other.other:\n # TODO: Try to guess numbers vs words and such\n if self.other > other.other:\n return 1\n return -1\n\n # Copy is always evaluated last\n if self.copy != other.copy:\n if self.copy > other.copy:\n return 1\n return -1\n\n return 0 # All else fails, we must be equal.", "def __eq__(self, other):\n s = len(self)\n r = len(other)\n\n if s != r:\n raise(VetorError, \"Vetor dimensions are not equal\")\n\n # Two vectors are numericaly the same if the difference\n # between both of them are smaller than given precisao\n for i in range(s):\n if not comozero(self[i] - other[i]):\n return False\n\n return True", "def __le__(self, other: Compound[Scalar]) -> bool:\n return (self._points_set <= other._points_set\n if isinstance(other, Multipoint)\n else NotImplemented)", "def commutes_with(self, other):\n a = self.array_form\n b = other.array_form\n if len(a) != len(b):\n raise ValueError(\"The number of elements in the permutations \\\ndon\\'t match.\")\n for i in range(len(a)-1):\n if a[b[i]] != b[a[i]]:\n return False\n return True", "def __isub__(self, other):\n self.components = [c1 - c2 for (c1, c2) in zip(self.components, other.components)]\n return self", "def __eq__(self, other):\n if isinstance(other, DenseUnit):\n return (Counter(self.dimension) == Counter(other.dimension) and Counter(self.points) == Counter(\n other.points))\n return False" ]
[ "0.6220115", "0.58154976", "0.5815043", "0.57204807", "0.54974604", "0.54746324", "0.5466859", "0.5449294", "0.5417679", "0.5412426", "0.54014486", "0.5397089", "0.5386101", "0.5358133", "0.53466886", "0.5341275", "0.52965057", "0.5286825", "0.52840143", "0.5269786", "0.5269438", "0.52555877", "0.52469254", "0.5234017", "0.52318174", "0.5220732", "0.5211769", "0.5205845", "0.5196153", "0.5192114" ]
0.7660876
0
Creates a customized Draft4ExtendedValidator.
def from_resolver(cls, spec_resolver): spec_validators = cls._get_spec_validators(spec_resolver) return validators.extend(Draft4Validator, spec_validators)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_validator(schema,etd):\n #Major version\n major_version=int(jsonschema.__version__.split('.')[0])\n if major_version < 4:\n #The easy way\n return ValidatorClass(schema,types=etd)\n else:\n #The hard way\n #Create the extra types functions dictionary\n etd_funcs={name:create_checker_func(typs) for name,typs in etd.items()}\n #Create the type checker\n type_checker = ValidatorClass.TYPE_CHECKER.redefine_many(etd_funcs)\n #Create the validator class\n CustomValidator = jsonschema.validators.extend(ValidatorClass, type_checker=type_checker)\n #Return the validator\n return CustomValidator(schema=schema)", "def xsd_validator(self):\n return XSDValidator(self.schema)", "def get_validator(self):\n return self.get_validator_class()(**self.get_validator_kwargs())", "def __init__(self,validator):\n self.validator = validator", "def definition_validator(request):\n return validator(request, DefinitionValidator())", "def generate_validator(self, t, **kwargs):\n def validator(val, field_name=''):\n if val is None and 'required' in kwargs and not kwargs['required']:\n return True\n elif val is None:\n raise ValidationError('%s: None is not allowed (field required)' % field_name)\n if not isinstance(val, t):\n raise ValidationError('%s: \"%s\" not an instance of %s but an instance of %s' %\n (field_name, val, t, type(val)))\n if isinstance(val, dict):\n check_keys(val) # check against . & $ in keys\n return True\n return validator", "def create_draft(self):\n return Draft(self)", "def createEditor(self, parent, options, midx):\n ledit = qt.QLineEdit(parent)\n rx = qtc.QRegExp()\n rx.setPattern(\"\\\\S{0,8}\");\n validator = qt.QRegExpValidator(rx, ledit)\n ledit.setValidator(validator)\n return ledit", "def fields_validator():\n\n return validator.BrewerySchema()", "def validator_instance(self):\n return CodingDNADeletion(*self.params)", "def validator_for(context_fn):\n\n def validator_for_decor(validator_fn):\n # Yes, this doesn't return a function! However, a Validator instance is\n # callable, so this is fine :)\n # See: https://stackoverflow.com/a/20791175 (and the other answers)\n return Validator(context_fn, validator_fn)\n return validator_for_decor", "def createEditor(self, parent, options, midx):\n ledit = qt.QLineEdit(parent)\n vmin, vmax = self._vrange\n dnb = self._decimals_nb\n ledit.setValidator(ValueValidator(vmin, vmax, dnb, ledit))\n return ledit", "def __custom_tpcp_validation__(cls):\n return", "def validator_instance(self):\n return CodingDNASubstitution(*self.params)", "def validator(self) -> DataValidator:\n if self._validator is None:\n self._validator = JsonSchemaDataValidator(self.schemaview.schema)\n return self._validator", "def clone(self):\n return _libsbml.SBMLExternalValidator_clone(self)", "def validates(version):\r\n\r\n def _validates(cls):\r\n validators[version] = cls\r\n if u\"id\" in cls.META_SCHEMA:\r\n meta_schemas[cls.META_SCHEMA[u\"id\"]] = cls\r\n return cls\r\n return _validates", "def get_validator_class(self):\n return self.validator_class", "def clone(self):\n return _libsbml.SBMLValidator_clone(self)", "def get_validator(cls):\n cls.validator.model = cls\n return cls.validator or SageValidator", "def test_Validator_subclassing(self):\n\n with self.assertWarns(DeprecationWarning) as w:\n class Subclass(validators.Draft202012Validator):\n pass\n\n self.assertEqual(w.filename, __file__)\n self.assertTrue(\n str(w.warning).startswith(\"Subclassing validator classes is \"),\n )\n\n with self.assertWarns(DeprecationWarning) as w:\n class AnotherSubclass(validators.create(meta_schema={})):\n pass", "def __validate():\n # TODO: implement", "def build_schema(schema):\n annotated_schema = {\"$schema\": \"http://json-schema.org/schema#\", **schema}\n jsonschema.Draft7Validator.check_schema(annotated_schema)\n return jsonschema.Draft7Validator(annotated_schema)", "def validator(self):\n return self._validator", "def __validate__(self):", "def validate(cls, **kwargs: Any) -> None: # pragma no cover", "def __init__(self, *args):\n this = _libsbml.new_SBMLExternalValidator(*args)\n try: self.this.append(this)\n except: self.this = this", "def _validate(self, instance, value):", "def creation_validation(**_):\n return Instance().creation_validation()", "def make_generic_v1_field_validator(validator: V1Validator) -> core_schema.FieldValidatorFunction:\n sig = signature(validator)\n\n needs_values_kw = False\n\n for param_num, (param_name, parameter) in enumerate(sig.parameters.items()):\n if can_be_keyword(parameter) and param_name in ('field', 'config'):\n raise PydanticUserError(\n 'The `field` and `config` parameters are not available in Pydantic V2, '\n 'please use the `info` parameter instead.',\n code='validator-field-config-info',\n )\n if parameter.kind is Parameter.VAR_KEYWORD:\n needs_values_kw = True\n elif can_be_keyword(parameter) and param_name == 'values':\n needs_values_kw = True\n elif can_be_positional(parameter) and param_num == 0:\n # value\n continue\n elif parameter.default is Parameter.empty: # ignore params with defaults e.g. bound by functools.partial\n raise PydanticUserError(\n f'Unsupported signature for V1 style validator {validator}: {sig} is not supported.',\n code='validator-v1-signature',\n )\n\n if needs_values_kw:\n # (v, **kwargs), (v, values, **kwargs), (v, *, values, **kwargs) or (v, *, values)\n val1 = cast(V1ValidatorWithValues, validator)\n\n def wrapper1(value: Any, info: core_schema.FieldValidationInfo) -> Any:\n return val1(value, values=info.data)\n\n return wrapper1\n else:\n val2 = cast(V1OnlyValueValidator, validator)\n\n def wrapper2(value: Any, _: core_schema.FieldValidationInfo) -> Any:\n return val2(value)\n\n return wrapper2" ]
[ "0.6791567", "0.58201903", "0.56023145", "0.5236828", "0.5219821", "0.5214271", "0.51001966", "0.50732434", "0.49859777", "0.49850696", "0.49794722", "0.4957833", "0.49058315", "0.48873714", "0.487872", "0.487541", "0.48698068", "0.48452082", "0.4801144", "0.47913477", "0.47865587", "0.4747822", "0.47339645", "0.46738508", "0.46727663", "0.46510988", "0.46233726", "0.4616095", "0.4611711", "0.45898423" ]
0.6396528
1
Creates json documents validator from spec resolver.
def create(self, spec_resolver): validator_cls = self.spec_validator_factory.from_resolver( spec_resolver) return validator_cls( self.schema, resolver=self.schema_resolver)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def from_spec_resolver(cls, spec_resolver):\n deref = DerefValidatorDecorator(spec_resolver)\n for key, validator_callable in iteritems(cls.validators):\n yield key, deref(validator_callable)", "def from_resolver(cls, spec_resolver):\n spec_validators = cls._get_spec_validators(spec_resolver)\n return validators.extend(Draft4Validator, spec_validators)", "def test_uses_json_decoder(schema_builder, json_spec):\n builder = ExtractorBuilder(schema_builder)\n extractor = builder.build_param_extractor(json_spec)\n assert isinstance(extractor.decoder, JSONDecoder)\n extractor = builder.build_body_extractor({'content': json_spec['content']})\n assert isinstance(extractor.decoder, JSONDecoder)", "def init_validator(schema,etd):\n #Major version\n major_version=int(jsonschema.__version__.split('.')[0])\n if major_version < 4:\n #The easy way\n return ValidatorClass(schema,types=etd)\n else:\n #The hard way\n #Create the extra types functions dictionary\n etd_funcs={name:create_checker_func(typs) for name,typs in etd.items()}\n #Create the type checker\n type_checker = ValidatorClass.TYPE_CHECKER.redefine_many(etd_funcs)\n #Create the validator class\n CustomValidator = jsonschema.validators.extend(ValidatorClass, type_checker=type_checker)\n #Return the validator\n return CustomValidator(schema=schema)", "def test_get_json_spec(self):\n pass", "def validate(validator, document):\n try:\n validator.validate(document)\n except jsonschema.ValidationError as ex:\n raise wsgi_errors.HTTPBadRequestBody(\n '{0}: {1}'.format(ex.args, ex.message)\n )", "def test_json():\n schemas = {\n 'schema-languages': 'bible/languages.json',\n 'schema-book-metadata': 'bible/book-metadata.json',\n 'schema-bible': 'bible/bible-*.json'\n }\n for schema_name, data_path_glob in schemas.items():\n schema_path = 'schemas/{}.json'.format(schema_name)\n with open(schema_path) as schema_file:\n schema = json.load(schema_file)\n data_paths = glob.iglob(data_path_glob)\n for data_path in data_paths:\n with open(data_path) as data_file:\n data = json.load(data_file)\n yield jsonschema.validate, data, schema", "def test_metadata_schema_json_valid(mock_irods):\n\n schema_file_path = 'pytest/assets/mi_schema.json'\n with open(schema_file_path, 'r') as file_obj:\n json_schema = file_obj.read()\n assert len(json_schema) > 0\n form_data = {\"mp_program_type\": \"Test Model Program\", \"mi_json_schema\": json_schema}\n metadata_validation_form = ModelProgramMetadataValidationForm(data=form_data)\n assert metadata_validation_form.is_valid()", "def validate_against_schema(self, json_doc):\n if self.uri not in self.se.validation:\n raise RuntimeError(\"$validation is not defined for {} field; thus the json document could not be validated\".format(self.name))\n else:\n validate(json_doc, self.se.validation[self.uri])\n print('The JSON document is valid')", "def test_validators():", "def swagger_content_validator(spec_body):\n version = spec_body['swagger']\n if version.startswith('1'):\n return 'Deprecated Swagger version. Please visit http://swagger.io for information on upgrading to Swagger 2.0'\n\n with open(config.JSON_SCHEMA) as schema:\n swagger_schema = json.loads(schema.read())\n\n try:\n validate(spec_body, swagger_schema)\n except ValidationError as ex:\n return str(ex)\n\n return 'OK'", "def validate(validators, spec, schema=None):\n\n for validator in validators:\n global_context = {\"spec\": spec}\n if schema is not None:\n global_context.update({\"schema\": schema})\n\n # The partial no longer takes global_context\n validator(partial(validator.context_fn, global_context))", "def docutils_sucks(spec):\r\n\r\n base_url = VALIDATION_SPEC\r\n ref_url = \"http://json-schema.org/latest/json-schema-core.html#anchor25\"\r\n schema_url = \"http://json-schema.org/latest/json-schema-core.html#anchor22\"\r\n\r\n def validator(name, raw_text, text, lineno, inliner):\r\n \"\"\"\r\n Link to the JSON Schema documentation for a validator.\r\n\r\n :argument str name: the name of the role in the document\r\n :argument str raw_source: the raw text (role with argument)\r\n :argument str text: the argument given to the role\r\n :argument int lineno: the line number\r\n :argument docutils.parsers.rst.states.Inliner inliner: the inliner\r\n\r\n :returns: 2-tuple of nodes to insert into the document and an iterable\r\n of system messages, both possibly empty\r\n\r\n \"\"\"\r\n\r\n if text == \"$ref\":\r\n return [nodes.reference(raw_text, text, refuri=ref_url)], []\r\n elif text == \"$schema\":\r\n return [nodes.reference(raw_text, text, refuri=schema_url)], []\r\n\r\n xpath = \"//h3[re:match(text(), '(^|\\W)\\\"?{0}\\\"?($|\\W,)', 'i')]\"\r\n header = spec.xpath(\r\n xpath.format(text),\r\n namespaces={\"re\": \"http://exslt.org/regular-expressions\"},\r\n )\r\n\r\n if len(header) == 0:\r\n inliner.reporter.warning(\r\n \"Didn't find a target for {0}\".format(text),\r\n )\r\n uri = base_url\r\n else:\r\n if len(header) > 1:\r\n inliner.reporter.info(\r\n \"Found multiple targets for {0}\".format(text),\r\n )\r\n uri = base_url + \"#\" + header[0].getprevious().attrib[\"name\"]\r\n\r\n reference = nodes.reference(raw_text, text, refuri=uri)\r\n return [reference], []\r\n\r\n return validator", "def __init__(self, doc: Dict, validate: Optional[bool] = True):\n self.doc = doc\n if validate:\n jsonschema.validate(instance=doc, schema=DESCRIPTOR_SCHEMA)", "def test_schema_validation():\n resolver = RefResolver.from_schema(j, store={\"definitions\": j})\n schema_definitions = j[\"definitions\"]\n validation_models = root_dir / \"validation\" / \"models.yaml\"\n validation_tests = yaml.load(open(validation_models), Loader=yaml.SafeLoader)\n for cls, tests in validation_tests.items():\n for t in tests:\n validate(instance=t[\"in\"],\n schema=schema_definitions[cls],\n resolver=resolver)", "def fields_validator():\n\n return validator.BrewerySchema()", "def validator(self) -> DataValidator:\n if self._validator is None:\n self._validator = JsonSchemaDataValidator(self.schemaview.schema)\n return self._validator", "def test_json_syntax(self):\n\n base = abs_path('./specs/')\n ps = Parser(base+'script3-1.py', base)\n\n # empty - should parse\n spec = {}\n read_wrapper(spec, ps)\n\n # empty array - should parse\n spec = {'constraints': []}\n read_wrapper(spec, ps)\n\n # empty element - should fail\n spec = {'constraints': [{}]}\n with self.assertRaises(ParseError):\n read_wrapper(spec, ps)\n\n # no matching block - should fail\n spec = {'constraints': [{'block': 'a'}]}\n with self.assertRaises(ParseError):\n read_wrapper(spec, ps)\n\n # no matching variable - should fail\n spec = {'constraints': [{'variable': 'c'}]}\n with self.assertRaises(ParseError):\n read_wrapper(spec, ps)\n\n # loner option - should fail\n spec = {'constraints': [{'option': 'a1'}]}\n with self.assertRaises(ParseError):\n read_wrapper(spec, ps)\n\n # loner block - should parse\n spec = {'constraints': [{'block': 'A', 'condition': 'B==b1'}]}\n read_wrapper(spec, ps)\n\n # block and option - should parse\n spec = {'constraints': [{'block': 'A', 'option': 'a1', 'condition': 'B==b1'}]}\n read_wrapper(spec, ps)\n\n # variable and option - should parse\n spec = {'constraints': [{'variable': 'a', 'option': '2.5', 'condition': 'B==b1'}]}\n read_wrapper(spec, ps)\n\n # weird option - should parse\n # fixme: {'option': '[1,2]'} will fail\n spec = {'constraints': [{'variable': 'c', 'option': '[1, 2]', 'condition': 'B==b1'}]}\n read_wrapper(spec, ps)\n\n # variables in condition do not match - should fail\n spec = {'constraints': [{'block': 'A', 'condition': 'H==b1'}]}\n with self.assertRaises(ParseError):\n read_wrapper(spec, ps)\n\n # variables in condition do not match - should fail\n spec = {'constraints': [{'block': 'A', 'condition': 'H.index==1'}]}\n with self.assertRaises(ParseError):\n read_wrapper(spec, ps)", "def resolver():\n if RESOLVER:\n return RESOLVER\n path = str(pathlib.Path(__file__).parents[1].joinpath(\"schema\", \"app.json\"))\n with open(path) as stream:\n schema = json.load(stream)\n globals()[\"RESOLVER\"] = RefResolver(\n \"https://schema.timeflux.io/app.json\", None\n ).from_schema(schema)\n return RESOLVER", "def validate(self, config_json):\n pass", "def fetch_validators(self):\n return self.fetch('/validators')", "def build_schema(schema):\n annotated_schema = {\"$schema\": \"http://json-schema.org/schema#\", **schema}\n jsonschema.Draft7Validator.check_schema(annotated_schema)\n return jsonschema.Draft7Validator(annotated_schema)", "def check(self, manager):\n for all_json in self.api_dir.rglob(ALL_JSON):\n stem = all_json.relative_to(self.api_dir)\n yield dict(\n name=f\"validate:translation:{stem}\",\n doc=f\"Validate {stem} with the JupyterLab Translation API\",\n file_dep=[all_json],\n actions=[(self.validate_one_json_file, [None, all_json])],\n )", "def read_simulator_specs(path_or_url: str, patch: dict = None, validate: bool = True):\n if os.path.isfile(path_or_url):\n with open(path_or_url, 'r') as file:\n try:\n specs: JSONType = json.load(file)\n except json.JSONDecodeError as error: # Change to simplejson call like below?\n raise ValueError(''.join([\n 'Simulator specifications from {} could not be parsed. '.format(path_or_url),\n 'Specifications must be encoded into JSON.\\n\\n {}'.format(str(error).replace('\\n', '\\n ')),\n ]))\n\n else:\n # download specifications\n response: requests.Response = requests.get(path_or_url)\n try:\n response.raise_for_status()\n except requests.RequestException as error:\n raise requests.RequestException('Simulator specifications could not be retrieved from {}.\\n\\n {}'.format(\n path_or_url, str(error).replace('\\n', '\\n ')))\n\n # check that specifications is valid JSON\n try:\n specs = response.json()\n with open(\"specifications.json\", \"a\") as specFile:\n print(complexjson.dumps(specs), file=specFile)\n\n except simplejson.errors.JSONDecodeError as error:\n raise ValueError(''.join([\n 'Simulator specifications from {} could not be parsed. '.format(path_or_url),\n 'Specifications must be encoded into JSON.\\n\\n {}'.format(str(error).replace('\\n', '\\n ')),\n ]))\n\n # apply patch\n if patch:\n patch_dict(specs, patch)\n\n # validate specifications\n if validate:\n api_endpoint: str = get_config().BIOSIMULATORS_API_ENDPOINT\n response: requests.Request = requests.post('{}simulators/validate'.format(api_endpoint), json=specs)\n intro_failure_msg = ''.join([\n \"The simulator specifications from `{}` are invalid. \".format(path_or_url),\n \"The specifications of simulation tools must adhere to BioSimulators' schema. \",\n \"BioSimulators' schema is available in both JSON Schema and Open API Specifications formats. \",\n \"Documentation is available at {}.\".format(api_endpoint)\n ])\n validate_biosimulations_api_response(response, intro_failure_msg, ValueError)\n\n # return validated specifications\n return specs", "def validator(data_json):\n fields = spec[\"fields\"]\n data = json.loads(data_json, object_pairs_hook=collections.OrderedDict)\n for k, v in fields.items():\n if v.get(\"required\"):\n found = False\n if k in data:\n found = True\n elif \".\" in k:\n # Dotted keys could be nested, like ecs.version\n subkeys = k.split(\".\")\n subval = data\n for subkey in subkeys:\n subval = subval.get(subkey, {})\n if subval:\n found = True\n if not found:\n raise ValidationError(\"Missing required key {}\".format(k))\n if k in data:\n if v[\"type\"] == \"string\" and not (\n isinstance(data[k], str) or isinstance(data[k], basestring)\n ):\n raise ValidationError(\n \"Value {0} for key {1} should be string, is {2}\".format(\n data[k], k, type(data[k])\n )\n )\n if v[\"type\"] == \"datetime\":\n try:\n datetime.datetime.strptime(data[k], \"%Y-%m-%dT%H:%M:%S.%fZ\")\n except ValueError:\n raise ValidationError(\n \"Value {0} for key {1} doesn't parse as an ISO datetime\".format(\n data[k], k\n )\n )\n if v.get(\"index\") and list(data.keys())[v.get(\"index\")] != k:\n raise ValidationError(\"Key {0} is not at index {1}\".format(k, index))\n\n return data_json", "def create_from_json(cls, config_json: str) -> 'ResolverOp':\n return cls.create(**json_utils.loads(config_json))", "def validate_json(self):\n pass", "def __init__(self,validator):\n self.validator = validator", "def test_metadata_schema_json_templates(mock_irods):\n\n template_path = settings.MODEL_PROGRAM_META_SCHEMA_TEMPLATE_PATH\n template_path = os.path.join(template_path, \"*.json\")\n template_exists = False\n for schema_template in glob.glob(template_path):\n template_exists = True\n form_data = {\"mp_program_type\": \"Test Model Program\", \"mi_json_schema_template\": schema_template}\n metadata_validation_form = ModelProgramMetadataValidationForm(data=form_data)\n assert metadata_validation_form.is_valid()\n if not template_exists:\n pytest.fail(\"No metadata schema templates found\")", "def test_register_openapi_spec_invalid_yaml_json():\n app = App(__name__)\n spec_configs = [SpecConfig(path=PATH_SPECS_INVALID_JSON)]\n with pytest.raises(YAMLError):\n register_openapi(app=app, specs=spec_configs)" ]
[ "0.6038106", "0.59799147", "0.5902257", "0.57491654", "0.5429676", "0.54261833", "0.5403806", "0.5362911", "0.53564113", "0.53234047", "0.53168976", "0.5265585", "0.52480936", "0.52251613", "0.5189247", "0.5176584", "0.5172658", "0.5112875", "0.5091649", "0.5087997", "0.50809884", "0.5064016", "0.50595355", "0.5040394", "0.5030283", "0.5025399", "0.50019735", "0.5001774", "0.49670824", "0.49451354" ]
0.61859643
0
get state of all known nodes of the cluster
def cluster_state(self): for ip in set([status.ip for status in self.cluster_status]): yield self.node_state(ip)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def nodes(self):\n all_nodes = json.loads(self.sys_info.response).get('nodes_info')\n online_nodes = [node for node in all_nodes if node[\"infos\"][\"has_error\"] is False]\n return online_nodes", "def nodes(self) -> Sequence['outputs.GetClusterShardNodeResult']:\n return pulumi.get(self, \"nodes\")", "def get_nodes(self):\n self.get_status()\n old_api = self.version[0] <= '3'\n if old_api:\n certs_path = \"%s/certificate_statuses/*\" % (self.environment)\n nodeinfo_path_tpl = \"{env}/node/{node}\"\n else:\n certs_path = \"puppet-ca/v1/certificate_statuses/no_key?environment=%s\" % (self.environment)\n nodeinfo_path_tpl = \"puppet/v3/node/{node}?environment={env}\"\n\n csts = self._send('GET', certs_path)\n nodes_names = []\n for cst in csts:\n nodes_names.append(cst['name'])\n\n all_nodes = []\n for nname in nodes_names:\n path = nodeinfo_path_tpl.format(node=nname, env=self.environment)\n nodeinfo = self._send('GET', path)\n if old_api:\n nodeinfo = self._from_pson(nodeinfo['data'])\n else:\n nodeinfo = self._from_pson(nodeinfo)\n if 'parameters' in nodeinfo:\n node = nodeinfo['parameters']\n if self.onlynodes:\n if not (node.get('hostname') in self.onlynodes or\n node.get('ipaddress') in self.onlynodes or\n node.get('fqdn') in self.onlynodes or\n node.get('uuid') in self.onlynodes):\n continue\n all_nodes.append(node)\n\n return all_nodes", "def getClusterStatus(self):\n data = self.connect('get','cluster/status', None)\n return data", "def getNodes(self):\n return self.__allNodes", "def get_nodes(self):\n try:\n return list(self._adjacency_list.keys())\n except Exception as error:\n print(f'An error occurred: {error}')", "def _get_cluster_list(self):\n return self.__cluster_list", "def get_nodes():\n nodes_config_file = Settings.CONF_NODES_FILE\n current_nodes = load_node_names(nodes_config_file)\n\n return current_nodes", "def node_statuses(self) -> pulumi.Output[Sequence['outputs.NodeBalancerConfigNodeStatus']]:\n return pulumi.get(self, \"node_statuses\")", "def known_nodes(self) -> List[Client]:\n return list(self.in_memory_client_registry.values())", "def get_nodes(self):\n return requests.get(self.__url + 'nodes').json()", "def get_node_list(self):\n logger.debug('Updating node list')\n self.subscribe_mqtt('/nodes/+/responses/ping')\n self.node_ids = []\n\n def on_response(payload, data):\n if data and data.get('node', None):\n node_id = data['node']\n logger.debug('Found node with ID \"%s\"' % node_id)\n\n if node_id not in self.node_ids:\n self.node_ids.append(node_id)\n\n return False\n\n self.publish_mqtt('/ping', on_response=on_response)\n time.sleep(self.timeout / 1000)\n\n return self.node_ids", "def state_nodes(self) -> np.ndarray:\n return np.array([[nd[c] for c in [\"alive\", \"infected\", \"immune\", \"isolated\", \"masked\"]]\n for nv, nd in self.g_.nodes.data()])", "def get_all_nodes(self):\n # NOTE: return copy, so no one will screw\n # our list?\n return self.nodes", "def list_nodes(self):\n return self.ironic_client.node.list()", "def get_all_nodes(self):\n return self._get_all_nodes()", "def nodes(self) -> Optional[Sequence['outputs.ClusterShardNode']]:\n return pulumi.get(self, \"nodes\")", "def list_nodes(self):\n return self.datanodes.keys()", "def cluster_nodes(self) -> ResponseT:\n return self.execute_command(\"CLUSTER NODES\")", "def get_nodes():\n with session_for_read() as session:\n res = session.query(\n model.Node\n ).order_by(\n model.Node.started_at.desc()\n )\n return [model.Node(uuid=entry.uuid, version_id=entry.version_id,\n state=entry.state, started_at=entry.started_at,\n finished_at=entry.finished_at, error=entry.error,\n manage_boot=entry.manage_boot)\n for entry in res.all()]", "def test_get_node_state(self):\n pass", "def nodes(self):\n # TODO(sahid): I copied that part from legacy. That part may\n # be wrong, cassandra has a principle of auto-discovery. We\n # could pass only one server's address to server_list,\n # cassandra will still automatically discovers its peers.\n return len(self._server_list)", "def knownNodes(disco, service, environment=\"sandbox\"):\n return disco.knownNodes(service, _parseEnvironment(environment))", "def get_available_nodes(self, refresh=False):\n # TODO(preethipy): Refresh parameter should be handled to fetch\n # updated nodenames\n LOG.debug(\"get_available_nodes return node %(cpcsubset_name)s\" %\n {'cpcsubset_name': self._host.properties[\n \"hypervisor_hostname\"]})\n nodenames = [self._host.properties[\"hypervisor_hostname\"]]\n\n return nodenames", "def all_info_fetched(self, ctxt):\n # print \"ControllerManager: num of nodes' info is \", len(self._compute_node_info.get_node_info()) \n return self._compute_node_info.check_node_nums()", "def getNodes(self, deg, state=None):\n __node_ids = list()\n for __node_id, __node_obj in self.nodes.iteritems():\n if __node_obj.getDegree() == deg:\n if state is None:\n __node_ids.append(__node_id)\n elif __node_obj.state == state:\n __node_ids.append(__node_id)\n return __node_ids", "def list_nodes(self):\n\n return list(\n dict(\n self._from_json(self.manage.run(override=\"list-nodes\"))\n ).keys()\n )", "def get_visited_nodes(self):\n return self.visited_nodes", "def getNodes(self):\n return self.graph.keys()", "def _get_cluster_components(self):\n print(\"Connecting to cluster...\")\n self.cluster.connect_to_cluster()\n print(\"Connected!\")\n print(\"Collecting information from the cluster...\")\n return self.cluster.get_components()" ]
[ "0.67903185", "0.6746941", "0.667453", "0.66625124", "0.6581692", "0.65786994", "0.65495056", "0.65487826", "0.65190536", "0.65127057", "0.65086156", "0.65028393", "0.6470773", "0.6466806", "0.64126676", "0.637164", "0.6371118", "0.6356893", "0.63565904", "0.6340954", "0.6309951", "0.629403", "0.62860125", "0.62481934", "0.6202143", "0.6172923", "0.6172131", "0.61586016", "0.6145965", "0.61454946" ]
0.7818429
0
Converts surface normals from world coords to camera coords using a provided quaternion to apply transform
def world_to_camera_normals(inverted_camera_quaternation, world_normals): exr_x, exr_y, exr_z = world_normals[0], world_normals[1], world_normals[2] camera_normal = np.empty([exr_x.shape[0], exr_x.shape[1], 3], dtype=np.float32) for i in range(exr_x.shape[0]): for j in range(exr_x.shape[1]): pixel_camera_normal = _multiply_quaternion_vec3(inverted_camera_quaternation, [exr_x[i][j], exr_y[i][j], exr_z[i][j]]) camera_normal[i][j][0] = pixel_camera_normal[0] camera_normal[i][j][1] = pixel_camera_normal[1] camera_normal[i][j][2] = pixel_camera_normal[2] camera_normal = camera_normal.transpose(2, 0, 1) return camera_normal
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unit_normals(p,q,r): \n vx1 = p[0] - r[0] # x1 - x3. \n vy1 = p[1] - r[1] # y1 - y3. \n vz1 = p[2] - r[2] # z1 - z3. \n\n vx2 = q[0] - r[0] # x2 - x3. \n vy2 = q[1] - r[1] # y2 - y3. \n vz2 = q[2] - r[2] # z2 - z3. \n\n vnx = vy1*vz2 - vz1*vy2 \n vny = vz1*vx2 - vx1*vz2 \n vnz = vx1*vy2 - vy1*vx2 \n\n len_vn = math.sqrt(vnx*vnx + vny*vny + vnz*vnz) \n vnx = vnx/len_vn \n vny = vny/len_vn \n vnz = vnz/len_vn \n\n return vnx, vny, vnz", "def normalize(q):\n quaternion = q[0]\n norm = 1/np.sqrt((q[0,0]**2)+(q[0,1]**2)+(q[0,2]**2)+(q[0,3]**2))\n #q = norm*q\n #print('norm =', norm)\n '''q[0,0] = norm*q[0,0]\n q[0,1] = norm*q[0,1]\n q[0,2] = norm*q[0,2]\n q[0,3] = norm*q[0,3]'''\n normalizedq = norm*q\n return normalizedq", "def face_normals(xyz, triangles):\n\n\tabc_xyz = face_attr(xyz, triangles)\n\n\tbc_xyz = abc_xyz[:,:,1:3] - abc_xyz[:,:,0:1]\n\tfn = tf.linalg.cross(bc_xyz[:,:,0], bc_xyz[:,:,1])\n\tfn = tf.math.l2_normalize(fn, -1)\n\treturn fn", "def _update_surface_normals(self):\n\n # This is the case if there are too few points to\n # compute normals so there can be values to remove\n\n #can be important for parallel\n self.swarm.shadow_particles_fetch()\n\n if self.empty:\n self.director.data[...] = 0.0\n else:\n\n particle_coords = self.swarm.particleCoordinates.data\n\n Nx = np.empty(self.swarm.particleLocalCount)\n Ny = np.empty(self.swarm.particleLocalCount)\n Nz = np.empty(self.swarm.particleLocalCount)\n\n for i, xyz in enumerate(particle_coords):\n r, neighbours = self.kdtree.query(particle_coords[i], k=4)\n\n # this point is neighbour[0] and neighbour points are neighbours[(1,2,3)]\n XYZ1 = self.kdtree.data[neighbours[1]]\n XYZ2 = self.kdtree.data[neighbours[2]]\n XYZ3 = self.kdtree.data[neighbours[3]]\n\n dXYZ1 = XYZ2 - XYZ1\n dXYZ2 = XYZ3 - XYZ1\n\n # Cross product of those 2 vectors can be use as the local normal (perhaps)\n\n Nx[i], Ny[i], Nz[i] = np.cross(dXYZ1, dXYZ2)\n #if i == 0:\n # print(Nx, Ny, Nz)\n # print(xyz[0], xyz[1],xyz[2])\n # print((self.insidePt[0] - xyz[0]) * Nx[i] )\n\n if (self.insidePt):\n sign = np.sign( (self.insidePt[0] - xyz[0]) * Nx[i] +\n (self.insidePt[1] - xyz[1]) * Ny[i] +\n (self.insidePt[2] - xyz[2]) * Nz[i] )\n Nx[i] *= sign\n Ny[i] *= sign\n Nz[i] *= sign\n\n\n for i in range(0, self.swarm.particleLocalCount):\n scale = 1.0 / np.sqrt(Nx[i]**2 + Ny[i]**2 + Nz[i]**2)\n Nx[i] *= scale\n Ny[i] *= scale\n Nz[i] *= scale\n\n\n self.director.data[:,0] = Nx[:]\n self.director.data[:,1] = Ny[:]\n self.director.data[:,2] = Nz[:]\n\n print(\"Surf Norms\")\n\n return", "def QuatNormalize(wxyz):\n return wxyz/rigmech.QuatMag(wxyz)", "def get_quad_normal(q):\n P0, P1, P2, P3 = q\n p0 = Vector.fromPoint(P0) # fromPoint converts to ECEF\n p1 = Vector.fromPoint(P1)\n p3 = Vector.fromPoint(P3)\n v1 = p1 - p0\n v2 = p3 - p0\n vn = Vector.cross(v2, v1).norm()\n return vn", "def invert_quaternion(quaternion):\n norm = np.linalg.norm(quaternion)\n quaternion[1:] = -1.0 * quaternion[1:]\n return quaternion / norm", "def _normal_vector(o, p0_3d, p1_3d):\n # The vector between middle point of v1-v2 and object center location\n # is the normal vector I'm looking for\n vn = p0_3d.lerp(p1_3d, 0.5) - o.matrix_world.translation\n # normalize so I can to length computation on it\n vn.normalize()\n return vn", "def normalize(self):\n mag = np.linalg.norm(self.array)\n if mag > 0:\n arr_norm = self.array/mag\n self.__scalar = arr_norm[0]\n self.__vector = arr_norm[1:]\n else:\n raise ValueError(('Cannot normalize quaternion with non-positive '\n 'magnitude.'))", "def vert_normals(xyz, triangles):\n\n\tB, N, _ = _shape(xyz)\n\tM = _shape(triangles)[-2]\n\ttriangles = _i64(triangles)\n\t\n\tfn = face_normals(xyz, triangles)\n\tbfn = tf.reshape(tf.tile(fn, [1,1,3]), [B*M*3, 3])\n\tbt = tf.reshape(\n\t\ttriangles[tf.newaxis,:,:] + _i64(tf.range(B)[:,tf.newaxis,tf.newaxis] * N),\n\t\t[B*M*3])\n\tvn = tf.reshape(tf.math.unsorted_segment_sum(bfn, bt, B*N), [B,N,3])\n\tvn = tf.math.l2_normalize(vn, -1)\n\treturn vn", "def get_surface_normals_o3d(normals, points, scale=2):\n # total number of points:\n N = points.shape[0]\n\n points = np.vstack(\n (points.to_numpy(), points.to_numpy() + scale * normals)\n )\n lines = [[i, i+N] for i in range(N)]\n colors = np.zeros((N, 3)).tolist()\n\n # build pca line set:\n surface_normals_o3d = o3d.geometry.LineSet(\n points=o3d.utility.Vector3dVector(points),\n lines=o3d.utility.Vector2iVector(lines),\n )\n surface_normals_o3d.colors = o3d.utility.Vector3dVector(colors)\n\n return surface_normals_o3d", "def _compute_quat(self, cam_normal):\n return quat_from_two_vectors(habitat_sim.geo.FRONT, cam_normal)", "def get_normal_from_pose(pose):\n # p = Pose()\n # p.orientation = pose.orientation\n # z1 = (quaternion_matrix((p.orientation.x, p.orientation.y, p.orientation.z, p.orientation.w)))[0:3,2:3]\n z = tf_conversions.fromMsg(pose).M.UnitZ()\n normal = np.array([[z[0], z[1], z[2]]]).T\n \n return normal", "def _quatm(q1, q0):\n w0, x0, y0, z0 = q0\n w1, x1, y1, z1 = q1\n\n return torch.cuda.FloatTensor([\n -x1*x0 - y1*y0 - z1*z0 + w1*w0,\n x1*w0 + y1*z0 - z1*y0 + w1*x0,\n -x1*z0 + y1*w0 + z1*x0 + w1*y0,\n x1*y0 - y1*x0 + z1*w0 + w1*z0,\n ])", "def as_homogenous_transformation(self):\n r3 = self.orientation.normalize().unit_quaternion_as_r3_rotation_matrix()\n return matrix.sqr((r3[0],r3[1],r3[2],self.translation[0],\n r3[3],r3[4],r3[5],self.translation[1],\n r3[6],r3[7],r3[8],self.translation[2],\n 0,0,0,1))", "def gen_world2local(normal):\n last_dim_i = normal.ndim - 1\n\n z = np.array((0, 0, 1), dtype=float)\n\n # Tangents\n t = np.cross(normal, z)\n if (t == 0).all(axis=-1).any():\n raise ValueError((\n \"Found (0, 0, 0) tangents! Possible reasons: normal colinear with \"\n \"(0, 0, 1); normal is (0, 0, 0)\"))\n t = normalize_vec(t, axis=last_dim_i)\n\n # Binormals\n # No need to normalize because normals and tangents are orthonormal\n b = np.cross(normal, t)\n\n # Rotation matrices\n rot = np.stack((t, b, normal), axis=last_dim_i)\n # So that at each location, we have a 3x3 matrix whose ROWS, from top to\n # bottom, are world tangents, binormals, and normals\n\n return rot", "def normal_at(self, u, v, world=True):\n u = u * pi\n v = v * PI2\n x = cos(u) * sin(v)\n y = sin(u) * sin(v)\n z = cos(v)\n normal = Vector(x, y, z)\n if world:\n normal.transform(self.transformation)\n return normal", "def body_2_world(orientation, vector):\n\n vector = np.append(vector, 0)\n orientation_inv = quaternion_inverse(orientation)\n new_vector = quaternion_multiply(orientation, quaternion_multiply(vector, orientation_inv))\n return new_vector[:3]", "def unit_normals(self):\n return np.stack(self.centers_cartesian(), axis=-1)", "def make_inward_normal(tetrahedron):\n\n convert_to_np_array = lambda v: np.array([v.x, v.y, v.z])\n np_vertices = list(map(convert_to_np_array, [tetrahedron.get_vertex(i) for i in range(4)]))\n # This is the middle point\n # midpoint = np.mean(np_vertices, axis=0)\n\n midpoint = np_vertices[0]\n for i in range(1, 4):\n midpoint += np_vertices[i]\n midpoint = midpoint / 2.0\n\n for i in range(4):\n face = tetrahedron.get_face(i)\n d = distance(face, midpoint)\n if d < 0:\n face.nx *= -1.0\n face.ny *= -1.0\n face.nz *= -1.0\n face.d *= -1.0", "def quaternion_inv(quaternion):\r\n q = numpy.array(quaternion, dtype=numpy.float64, copy=True)\r\n numpy.negative(q[1:], q[1:])\r\n return q / numpy.dot(q, q)", "def mesh_renderer(\n vertices,\n triangles,\n normals,\n diffuse_colors,\n camera_position,\n camera_lookat,\n camera_up,\n light_positions,\n light_intensities,\n image_width,\n image_height,\n specular_colors=None,\n shininess_coefficients=None,\n ambient_color=None,\n fov_y=40.0,\n near_clip=0.01,\n far_clip=10.0):\n if len(vertices.shape) != 3 or vertices.shape[-1] != 3:\n raise ValueError(\n \"Vertices must have shape [batch_size, vertex_count, 3].\")\n batch_size = vertices.shape[0]\n if len(normals.shape) != 3 or normals.shape[-1] != 3:\n raise ValueError(\n \"Normals must have shape [batch_size, vertex_count, 3].\")\n if len(light_positions.shape) != 3 or light_positions.shape[-1] != 3:\n raise ValueError(\n \"light_positions must have shape [batch_size, light_count, 3].\")\n if len(light_intensities.shape) != 3 or light_intensities.shape[-1] != 3:\n raise ValueError(\n \"light_intensities must have shape [batch_size, light_count, 3].\")\n if len(diffuse_colors.shape) != 3 or diffuse_colors.shape[-1] != 3:\n raise ValueError(\n \"diffuse_colors must have shape [batch_size, vertex_count, 3].\")\n if (ambient_color is not None and\n list(ambient_color.shape) != [batch_size, 3]):\n raise ValueError(\"ambient_color must have shape [batch_size, 3].\")\n if list(camera_position.shape) == [3]:\n camera_position = torch.unsqueeze(camera_position, 0).repeat(batch_size, 1)\n elif list(camera_position.shape) != [batch_size, 3]:\n raise ValueError(\n \"camera_position must have shape [batch_size, 3] or [3].\")\n if list(camera_lookat.shape) == [3]:\n camera_lookat = torch.unsqueeze(camera_lookat, 0).repeat(batch_size, 1)\n elif list(camera_lookat.shape) != [batch_size, 3]:\n raise ValueError(\n \"camera_lookat must have shape [batch_size, 3] or [3].\")\n if list(camera_up.shape) == [3]:\n camera_up = torch.unsqueeze(camera_up, 0).repeat(batch_size, 1)\n elif list(camera_up.shape) != [batch_size, 3]:\n raise ValueError(\"camera_up must have shape [batch_size, 3] or [3].\")\n if isinstance(fov_y, float):\n fov_y = torch.tensor(batch_size * [fov_y], dtype=torch.float32)\n elif len(fov_y.shape) == 0:\n fov_y = torch.unsqueeze(fov_y, 0).repeat(batch_size)\n elif list(fov_y.shape) != [batch_size]:\n raise ValueError(\"fov_y must be a float, a 0D tensor, or a 1D tensor \"\n \"with shape [batch_size].\")\n if isinstance(near_clip, float):\n near_clip = torch.tensor(batch_size * [near_clip], dtype=torch.float32)\n elif len(near_clip.shape) == 0:\n near_clip = torch.unsqueeze(near_clip, 0).repeat(batch_size)\n elif list(near_clip.shape) != [batch_size]:\n raise ValueError(\"near_clip must be a float, a 0D tensor, or a 1D \"\n \"tensor with shape [batch_size].\")\n if isinstance(far_clip, float):\n far_clip = torch.tensor(batch_size * [far_clip], dtype=torch.float32)\n elif len(far_clip.shape) == 0:\n far_clip = torch.unsqueeze(far_clip, 0).repeat(batch_size)\n elif list(far_clip.shape) != [batch_size]:\n raise ValueError(\"far_clip must be a float, a 0D tensor, or a 1D \"\n \"tensor with shape [batch_size].\")\n if specular_colors is not None and shininess_coefficients is None:\n raise ValueError(\n \"Specular colors were supplied without shininess coefficients.\")\n if shininess_coefficients is not None and specular_colors is None:\n raise ValueError(\n \"Shininess coefficients were supplied without specular colors.\")\n if specular_colors is not None:\n # Since a 0D float32 tensor is accepted, also accept a float.\n if isinstance(shininess_coefficients, float):\n shininess_coefficients = torch.tensor(\n shininess_coefficients, dtype=torch.float32)\n if len(specular_colors.shape) != 3:\n raise ValueError(\"The specular colors must have shape [batch_size, \"\n \"vertex_count, 3].\")\n if len(shininess_coefficients.shape) > 2:\n raise ValueError(\"The shininess coefficients must have shape at \"\n \"most [batch_size, vertex_count].\")\n # If we don't have per-vertex coefficients, we can just reshape the\n # input shininess to broadcast later, rather than interpolating an\n # additional vertex attribute:\n if len(shininess_coefficients.shape) < 2:\n vertex_attributes = torch.cat(\n [normals, vertices, diffuse_colors, specular_colors], 2)\n else:\n vertex_attributes = torch.cat(\n [\n normals, vertices, diffuse_colors, specular_colors,\n torch.unsqueeze(shininess_coefficients, 2)\n ], 2)\n else:\n vertex_attributes = torch.cat([normals, vertices, diffuse_colors], 2)\n\n camera_matrices = camera_utils.look_at(camera_position, camera_lookat,\n camera_up)\n\n perspective_transforms = camera_utils.perspective(\n image_width / image_height,\n fov_y,\n near_clip,\n far_clip)\n\n clip_space_transforms = torch.matmul(perspective_transforms, camera_matrices)\n\n pixel_attributes = rasterize(\n vertices, vertex_attributes, triangles,\n clip_space_transforms, image_width, image_height,\n [-1] * vertex_attributes.shape[2])\n\n # Extract the interpolated vertex attributes from the pixel buffer and\n # supply them to the shader:\n pixel_normals = torch.nn.functional.normalize(\n pixel_attributes[:, :, :, 0:3], p=2, dim=3)\n pixel_positions = pixel_attributes[:, :, :, 3:6]\n diffuse_colors = pixel_attributes[:, :, :, 6:9]\n if specular_colors is not None:\n specular_colors = pixel_attributes[:, :, :, 9:12]\n # Retrieve the interpolated shininess coefficients if necessary, or just\n # reshape our input for broadcasting:\n if len(shininess_coefficients.shape) == 2:\n shininess_coefficients = pixel_attributes[:, :, :, 12]\n else:\n shininess_coefficients = torch.reshape(\n shininess_coefficients, [-1, 1, 1])\n\n pixel_mask = (diffuse_colors >= 0.0).reduce(dim=3).type(torch.float32)\n\n renders = phong_shader(\n normals=pixel_normals,\n alphas=pixel_mask,\n pixel_positions=pixel_positions,\n light_positions=light_positions,\n light_intensities=light_intensities,\n diffuse_colors=diffuse_colors,\n camera_position=camera_position if specular_colors is not None else None,\n specular_colors=specular_colors,\n shininess_coefficients=shininess_coefficients,\n ambient_color=ambient_color)\n return renders", "def getNormalizedNormalVec(self):\n TriPos = self.position\n # calc normalized normal vecor for Tri\n # get vectors Vert1Vert2 & Vert2Vert3\n TriVectors = np.subtract(TriPos[1:],TriPos[:-1])\n # get crossproduct of Vert1Vert2 & Vert2Vert3 (= surface normal)\n TriNorm = np.cross(TriVectors[0],TriVectors[1])+0.0\n # get length of surface normal\n length = np.linalg.norm(TriNorm)\n # divide each component of surface normal by length (= normalized surface normal)\n NormalizedNormalVec = np.around(TriNorm / length, decimals=5) # rounded, otherwise different values, equals not found\n # create string of tuple for segment dict \n #SegmDict = str(tuple(NormalizedNormalVec))\n return NormalizedNormalVec.tolist()", "def quat2transform(q):\n x, y, z, w = q\n xx2 = 2 * x * x\n yy2 = 2 * y * y\n zz2 = 2 * z * z\n xy2 = 2 * x * y\n wz2 = 2 * w * z\n zx2 = 2 * z * x\n wy2 = 2 * w * y\n yz2 = 2 * y * z\n wx2 = 2 * w * x\n\n rmat = np.empty((3, 3), float)\n rmat[0,0] = 1. - yy2 - zz2\n rmat[0,1] = xy2 - wz2\n rmat[0,2] = zx2 + wy2\n rmat[1,0] = xy2 + wz2\n rmat[1,1] = 1. - xx2 - zz2\n rmat[1,2] = yz2 - wx2\n rmat[2,0] = zx2 - wy2\n rmat[2,1] = yz2 + wx2\n rmat[2,2] = 1. - xx2 - yy2\n\n return rmat", "def normal(axis_direction, axis_origin, point):\n # transform input into numpy arrays\n axis_direction = np.array(axis_direction, float)\n axis_origin = np.array(axis_origin, float)\n point = np.array(point, float)\n\n # vector from axis normal_origin to point\n vector = point - axis_origin\n\n # projection of vector on axis\n projection = np.dot(vector, axis_direction)*axis_direction\n\n # the normal vector from normal_origin to point\n normal_direction = vector - projection\n\n # normalized normal_direction\n normal_direction = normal_direction/np.linalg.norm(normal_direction)\n\n # opposite of the projection of vector on normal\n projection2 = - np.dot(normal_direction, vector)*normal_direction\n\n normal_origin = point + projection2\n\n return normal_direction, normal_origin", "def quaternion2rot3D(quaternion):\n theta, axis = quaternion2AngleAxis(quaternion)\n return angleAxis2rot3D(axis, theta)", "def compute_normalvect(self):\n normvect = np.zeros((len(self.tri_pnts),3,3))\n zvec = np.array([0, 0, 1])\n for itri, tri in enumerate(self.tri_pnts):\n #import pdb; pdb.set_trace()\n tri0, tri1, tri2 = tri\n x1,y1 = self.points[tri1]-self.points[tri0]\n v1 = np.array([x1,y1,0])\n x2,y2 = self.points[tri2]-self.points[tri1]\n v2 = np.array([x2,y2,0])\n x3,y3 = self.points[tri0]-self.points[tri2]\n v3 = np.array([x3,y3,0])\n v1 = v1/np.linalg.norm(v1)\n v2 = v2/np.linalg.norm(v2)\n v3 = v3/np.linalg.norm(v3)\n #import pdb; pdb.set_trace()\n normvect[itri,:,:] = np.cross(v1,zvec), np.cross(v2,zvec), np.cross(v3,zvec)\n #import pdb; pdb.set_trace()\n return normvect", "def compareNormals():\n computeNormals = False\n if computeNormals:\n r1,r2,r3 = read('r1'),read('r2'),read('r3')\n r = [r1,r2,r3]\n x2 = [like(r1),like(r1),like(r1)]\n x3 = [like(r1),like(r1),like(r1)]\n v = [like(r1),like(r1),like(r1)]\n FlattenerUtil.getFrame(r,None,x2,x3)\n FlattenerUtil.cross(x3,x2,v)\n FlattenerUtil.normalize(v,v)\n write('v1',v[0])\n write('v2',v[1])\n write('v3',v[2])\n v1,v2,v3 = read('v1'),read('v2'),read('v3')\n u1,u2,u3 = read('u1'),read('u2'),read('u3')\n display(sub(v1,u1),cmap=rwb,cmin=-0.2,cmax=0.2,name='v1-u1')\n display(sub(v2,u2),cmap=rwb,cmin=-0.2,cmax=0.2,name='v2-u2')\n display(sub(v3,u3),cmap=rwb,cmin=-0.2,cmax=0.2,name='v3-u3')", "def transformNormal(p, xform, axes=None):\n return transform(p, invert(xform[:3, :3]).T, axes, vector=True)", "def surface_norm(self, pt):\n\n return self.normal.normalize()" ]
[ "0.6527598", "0.59582293", "0.59039336", "0.58207244", "0.57282263", "0.5679537", "0.5604675", "0.55679154", "0.54979587", "0.5455145", "0.545264", "0.5418624", "0.54045796", "0.5378379", "0.5371932", "0.5358267", "0.5305178", "0.52894783", "0.52881867", "0.52851504", "0.5279571", "0.5272644", "0.52716583", "0.52672887", "0.5253356", "0.5253047", "0.5250882", "0.5240889", "0.5233729", "0.52161413" ]
0.7345408
0
Find the stack frame of the caller so that we can note the source file name, line number and function name.
def _findCaller(stack_info=False): f = logging.currentframe() #On some versions of IronPython, currentframe() returns None if #IronPython isn't run with -X:Frames. if f is not None: f = f.f_back rv = "(unknown file)", 0, "(unknown function)", None while hasattr(f, "f_code"): co = f.f_code filename = os.path.normcase(co.co_filename) if filename == logging._srcfile: f = f.f_back continue sinfo = None if stack_info: sio = io.StringIO() sio.write('Stack (most recent call last):\n') traceback.print_stack(f, file=sio) sinfo = sio.getvalue() if sinfo[-1] == '\n': sinfo = sinfo[:-1] sio.close() rv = (co.co_filename, f.f_lineno, co.co_name, sinfo) break return rv
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def findCaller(cls):\n f = currentframe()\n # On some versions of IronPython, currentframe() returns None if\n # IronPython isn't run with -X:Frames.\n if f is not None:\n f = f.f_back\n rv = \"(unknown file)\", 0, \"(unknown function)\"\n while hasattr(f, \"f_code\"):\n co = f.f_code\n filename = os.path.normcase(co.co_filename)\n if filename == _srcfile:\n f = f.f_back\n continue\n rv = (co.co_filename, f.f_lineno, co.co_name)\n break\n return rv", "def findCallerPatch():\n f = currentframe()\n #On some versions of IronPython, currentframe() returns None if\n #IronPython isn't run with -X:Frames.\n if f is not None:\n f = f.f_back\n rv = \"(unknown file)\", 0, \"(unknown function)\"\n while hasattr(f, \"f_code\"):\n co = f.f_code\n filename = os.path.normcase(co.co_filename)\n if filename == _srcfile:\n f = f.f_back\n continue\n rv = (filename, f.f_lineno, co.co_name)\n break\n return rv", "def findCaller(self, stack_info=False, stacklevel=2):\n f = currentframe()\n #On some versions of IronPython, currentframe() returns None if\n #IronPython isn't run with -X:Frames.\n if f is not None:\n f = f.f_back\n orig_f = f\n while f and stacklevel > 1:\n f = f.f_back\n stacklevel -= 1\n if not f:\n f = orig_f\n rv = \"(unknown file)\", 0, \"(unknown function)\", None\n while hasattr(f, \"f_code\"):\n co = f.f_code\n filename = os.path.normcase(co.co_filename)\n if filename == _srcfile:\n f = f.f_back\n continue\n sinfo = None\n if stack_info:\n sio = io.StringIO()\n sio.write('Stack (most recent call last):\\n')\n traceback.print_stack(f, file=sio)\n sinfo = sio.getvalue()\n if sinfo[-1] == '\\n':\n sinfo = sinfo[:-1]\n sio.close()\n rv = (co.co_filename, f.f_lineno, co.co_name, sinfo)\n break\n return rv", "def findCaller(self, stack_info=False):\n \n _frame_object = logging.currentframe()\n #On some versions of IronPython, currentframe() returns None if\n #IronPython isn't run with -X: Frames.\n if (_frame_object is not None):\n _frame_object = _frame_object.f_back\n \n rv = (\"(unknown file)\", 0, \"(unknown function)\", None)\n while hasattr(_frame_object, 'f_code'):\n _code_object = _frame_object.f_code\n filename = os.path.normcase(_code_object.co_filename)\n \n _next = _frame_object.f_back\n # noinspection PyProtectedMember,PyUnresolvedReferences\n if (filename == logging._srcfile):\n _frame_object = _next\n continue\n \n if (_next and hasattr(_next, 'f_code')):\n _parent_code = _next.f_code\n if (_parent_code.co_name == LOGGING_WRAPPER_NAME):\n _frame_object = _next.f_back\n continue\n \n _stack_info = None\n if (stack_info):\n _str_io = StringIO()\n _str_io.write('Stack (most recent call last):\\n')\n traceback.print_stack(_frame_object, file=_str_io)\n _stack_info = _str_io.getvalue()\n if (_stack_info[-1] == '\\n'):\n _stack_info = _stack_info[:-1]\n _str_io.close()\n \n rv = (_code_object.co_filename, _frame_object.f_lineno, _code_object.co_name, _stack_info)\n break\n return rv", "def get_caller_context(depth=None, **kwarg):\r\n if TIK_ERROR_MSG.api_source_info is not None:\r\n return TIK_ERROR_MSG.api_source_info\r\n if depth is None:\r\n raise RuntimeError(\"There are two reasons for the error:\\n\"\r\n \"If it is called by the user, please register source\"\r\n \" info before entering decorators;\\n\"\r\n \"If it is an internal call, please specify \"\r\n \"the stack depth;\")\r\n additional_stack = kwarg.get('stack_depth', 0)\r\n depth += additional_stack\r\n if ERROR_MSG_LEVEL.err_msg_level == 0:\r\n caller = stack(depth)\r\n else:\r\n caller = current_frame(depth)\r\n return caller", "def caller_info(self):\n\n frames = traceback.extract_stack()\n frames.reverse()\n try:\n (_, mod_name) = __name__.rsplit('.', 1)\n except ValueError:\n mod_name = __name__\n for (fpath, lnum, _, _) in frames:\n (fname, _) = os.path.basename(fpath).rsplit('.', 1)\n if fname != mod_name:\n break\n\n return (fname, lnum)", "def getframeinfo(frame, context=1):\r\n if istraceback(frame):\r\n lineno = frame.tb_lineno\r\n frame = frame.tb_frame\r\n else:\r\n lineno = frame.f_lineno\r\n if not isframe(frame):\r\n raise TypeError('{!r} is not a frame or traceback object'.format(frame))\r\n\r\n filename = getsourcefile(frame) or getfile(frame)\r\n if context > 0:\r\n start = lineno - 1 - context//2\r\n try:\r\n lines, lnum = findsource(frame)\r\n except IOError:\r\n lines = index = None\r\n else:\r\n start = max(start, 1)\r\n start = max(0, min(start, len(lines) - context))\r\n lines = lines[start:start+context]\r\n index = lineno - 1 - start\r\n else:\r\n lines = index = None\r\n\r\n return Traceback(filename, lineno, frame.f_code.co_name, lines, index)", "def get_caller_frame() -> FrameType:\n return cast(FrameType, cast(FrameType, inspect.currentframe()).f_back)", "def calling_stack_info(print_res=True, code_context=1):\n\n start_frame = inspect.currentframe().f_back\n\n fil = generate_frame_list_info(start_frame, code_context=code_context)\n\n if print_res:\n # noinspection PyUnresolvedReferences\n print(fil.tb_txt)\n return fil", "def find_actual_caller(self):\n\n # Gleaned from code in the logging module itself...\n try:\n f = sys._getframe(1)\n ##f = inspect.currentframe(1)\n except Exception:\n f = None\n # On some versions of IronPython, currentframe() returns None if\n # IronPython isn't run with -X:Frames.\n if f is not None:\n f = f.f_back\n rv = \"(unknown module)\", \"(unknown file)\", 0, \"(unknown function)\"\n while hasattr(f, \"f_code\"):\n co = f.f_code\n filename = os.path.normcase(co.co_filename)\n mod = inspect.getmodule(f)\n\n if mod is None:\n modname = '__main__'\n else:\n modname = mod.__name__\n\n if modname == __name__:\n # Crawl back until the first frame outside of this module\n f = f.f_back\n continue\n\n rv = (modname, filename, f.f_lineno, co.co_name)\n break\n return rv", "def _find_the_caller(i=0):\n import inspect\n\n # the first 2 elements in the stack are the current line and the line\n # of caller of `_find_the_caller`\n i = i + 2\n caller = inspect.stack()[i]\n return caller[1], caller[2], caller[4][0].rstrip(\"\\n\").strip()", "def _get_caller_detail(n=2):\n if not _show_caller_details:\n return None\n s = inspect.stack()[:n + 1]\n try:\n frame = s[n]\n try:\n return frame[1]\n # WARNING(dhellmann): Using frame.lineno to include the\n # line number in the return value causes some sort of\n # memory or stack corruption that manifests in values not\n # being cleaned up in the cfgfilter tests.\n # return '%s:%s' % (frame[1], frame[2])\n finally:\n del frame\n finally:\n del s", "def _print_caller(self):\n import traceback\n print '\\n'.join(['%s:%d %s'%(f,l,c) for f,l,m,c in traceback.extract_stack()])", "def get_cur_info():\n try:\n raise Exception\n except:\n f = sys.exc_info()[2].tb_frame.f_back\n # return (f.f_code.co_name, f.f_lineno)\n return f.f_code.co_name", "def callersName():\r\n import sys\r\n return sys._getframe(2).f_code.co_name", "def GetCallerName(num_frame=1):\n frame = sys._getframe(num_frame + 1) # pylint: disable=protected-access\n return inspect.getframeinfo(frame, 1)[2]", "def getStackPosition(self):\r\n return self.callstack.getStack()", "def caller_name(skip=2):\n stack = inspect.stack()\n start = 0 + skip\n if len(stack) < start + 1:\n return ''\n parentframe = stack[start][0] \n \n name = []\n module = inspect.getmodule(parentframe)\n # `modname` can be None when frame is executed directly in console\n # TODO(techtonik): consider using __main__\n if module:\n name.append(module.__name__)\n # detect classname\n if 'self' in parentframe.f_locals:\n # I don't know any way to detect call from the object method\n # XXX: there seems to be no way to detect static method call - it will\n # be just a function call\n name.append(parentframe.f_locals['self'].__class__.__name__)\n codename = parentframe.f_code.co_name\n if codename != '<module>': # top level usually\n name.append( codename ) # function or a method\n del parentframe\n return \".\".join(name)", "def _sourceFrame(self):\n try:\n raise Exception('catch me') # forced exception to get stack traceback\n except:\n exc_traceback = sys.exc_info()[2]\n return exc_traceback.tb_frame.f_back.f_back.f_back.f_back\n #endTry", "def get_caller(delta=0):\n if delta < 0:\n raise RuntimeError(\"Delta must be positive!\")\n for i, frame in enumerate(inspect.stack()):\n if i == 2 + delta:\n return os.path.abspath(frame.filename)", "def __get_caller_name(caller_frame):\n\n caller_name = caller_frame.f_code.co_name\n if 'self' in caller_frame.f_locals:\n caller_name = \"%s.%s\" % (\n caller_frame.f_locals['self'].__class__.__name__, caller_name\n )\n module = inspect.getmodule(caller_frame)\n if module:\n caller_name = \"%s.%s\" % (module.__name__, caller_name)\n return caller_name", "def findCaller(self):\n frames = inspect.stack()\n thisfile = os.path.normcase(frames[0][1])\n for frame in frames:\n filename = os.path.normcase(frame[1])\n if filename != thisfile and filename != logging._srcfile:\n major, minor, micro, _, _ = sys.version_info\n if (major, minor, micro) >= (2, 4, 2):\n return filename, frame[2], frame[3]\n else:\n return filename, frame[2]", "def debug_caller_name(skip=2):\n stack = inspect.stack()\n start = 0 + skip\n if len(stack) < start + 1:\n return ''\n parentframe = stack[start][0]\n name = []\n module = inspect.getmodule(parentframe)\n # `modname` can be None when frame is executed directly in console\n if module:\n name.append(module.__name__)\n # detect classname\n if 'self' in parentframe.f_locals:\n # I don't know any way to detect call from the object method\n # XXX: there seems to be no way to detect static method call - it will\n # be just a function call\n name.append(parentframe.f_locals['self'].__class__.__name__)\n codename = parentframe.f_code.co_name\n if codename != '<module>': # top level usually\n name.append( codename ) # function or a method\n del parentframe\n return \".\".join(name)", "def caller_name(skip=2):\n stack = inspect.stack()\n start = 0 + skip\n if len(stack) < start + 1:\n return ''\n parentframe = stack[start][0]\n name = []\n module = inspect.getmodule(parentframe)\n # `modname` can be None when frame is executed directly in console\n # TODO(techtonik): consider using __main__\n if module:\n name.append(module.__name__)\n # detect classname\n if 'self' in parentframe.f_locals:\n # I don't know any way to detect call from the object method\n # XXX: there seems to be no way to detect static method call - it will\n # be just a function call\n name.append(parentframe.f_locals['self'].__class__.__name__)\n codename = parentframe.f_code.co_name\n if codename != '<module>': # top level usually\n name.append( codename ) # function or a method\n del parentframe\n return \".\".join(name)", "def who_is_calling():\n return sys._getframe(2).f_code.co_name", "def getCallerParams(self,frameLevel=1):\n # frameLevel=0 is always getCallerParams. Caller should be level 1, but sometimes level 1 is still in Debug. This causes many dirty hacks.\n levelsToAdd=frameLevel-1\n #debugDir=dir(self)\n #debugDir.remove('__init__') # without removing __init__ was debug unusable in any __init__. Following line is temporary unslashed only\n debugDir=['allowed', 'allowedLevels', 'caller', 'callerLocals', 'callerName', 'dprint', 'getCallerName', 'getCallerParams', 'printHeader', 'restricted', 'settings']\n while sys._getframe(frameLevel).f_code.co_name in debugDir: # restrict returning functions from Debug instance -- dirty hack\n # but causes trouble for init which is in every class. property debugDir hacks this issue.\n if frameLevel>1: print '%i: %s'%(frameLevel,sys._getframe(frameLevel).f_code.co_name)\n frameLevel+=1\n frameLevel+=levelsToAdd # another hack to get another frame\n self.caller=sys._getframe(frameLevel)\n self.callerLocals=self.caller.f_locals\n try:\n if self.callerLocals.has_key('self'):\n #debug.dprint(print str(self.callerLocals['self'].__class__).split(' ')[1],4)\n self.callerName=(\n str(self.callerLocals['self']).split(' ')[0].replace('<__main__.','')+\n '.'+self.caller.f_code.co_name)\n # 026 #if self.callerLocals.has_key('self'): del self.callerLocals['self'] # 025 Fix - caused errors in multithreadng.\n else: self.callerName=self.caller.f_code.co_name\n except KeyError, errorInstance:\n #026 #self.headerLogger.error(\"Caught KeyError. Error: %s; Arguments: %s\"%(errorInstance,str(errorInstance.args)))\n self.headerLogger.exception(\"Caught KeyError. Error: %s; Arguments: %s\"%(errorInstance,str(errorInstance.args)))\n self.headerLogger.debug(\"callerLocals is %s\"%(str(self.callerLocals)))\n return (self.callerName,self.callerLocals)", "def print_callsite_location():\n fi = inspect.getouterframes( inspect.currentframe() )[2]\n print(\"{path}:{line} {fname}\".format(\n line=fi.lineno, path=fi.filename, fname=fi.function))", "def caller_name(self, skip=6):\r\n stack = inspect.stack()\r\n start = 0 + skip\r\n if len(stack) < start + 1:\r\n return ''\r\n parentframe = stack[start][0] \r\n\r\n name = []\r\n module = inspect.getmodule(parentframe)\r\n # `modname` can be None when frame is executed directly in console\r\n # TODO(techtonik): consider using __main__\r\n if module:\r\n name.append(module.__name__)\r\n # detect classname\r\n if 'self' in parentframe.f_locals:\r\n # I don't know any way to detect call from the object method\r\n # XXX: there seems to be no way to detect static method call - it will\r\n # be just a function call\r\n name.append(parentframe.f_locals['self'].__class__.__name__)\r\n codename = parentframe.f_code.co_name\r\n if codename != '<module>': # top level usually\r\n name.append( codename ) # function or a method\r\n\r\n ## Avoid circular refs and frame leaks\r\n # https://docs.python.org/2.7/library/inspect.html#the-interpreter-stack\r\n del parentframe, stack\r\n\r\n return \".\".join(name)", "def GetFunctionName():\n return traceback.extract_stack(None, 2)[0][2]", "def _get_vispy_caller():\n records = inspect.stack()\n # first few records are vispy-based logging calls\n for record in records[5:]:\n module = record[0].f_globals['__name__']\n if module.startswith('vispy'):\n line = str(record[0].f_lineno)\n func = record[3]\n cls = record[0].f_locals.get('self', None)\n clsname = \"\" if cls is None else cls.__class__.__name__ + '.'\n caller = \"{0}:{1}{2}({3}): \".format(module, clsname, func, line)\n return caller\n return 'unknown'" ]
[ "0.7863397", "0.77925444", "0.77843714", "0.7719026", "0.76551193", "0.75870043", "0.7571479", "0.74977046", "0.7495805", "0.7414687", "0.7359957", "0.7356189", "0.7281788", "0.72635347", "0.72338563", "0.71918875", "0.71641344", "0.71571225", "0.7153518", "0.71489084", "0.7134191", "0.70939595", "0.70868355", "0.70721364", "0.70689595", "0.706157", "0.7059557", "0.7006868", "0.7006727", "0.6927039" ]
0.7931316
0
log with color by different level
def log_with_color(level): def wrapper(text, exc_info=False): #modified by zhengchun 20180607 # color = log_colors_config[level.upper()] # getattr(logging, level.lower())(coloring(text, color)) # fn, lno, func, sinfo = _findCaller(stack_info=False) # out_text="[F:" + os.path.basename(fn) + "] [M:" + func + "] [L:" + str(lno) + "] - " + text sinfo=None f=sys._getframe().f_back out_text="[F:" + os.path.basename(f.f_code.co_filename) + "] [M:" + f.f_code.co_name + "] [L:" + str(f.f_lineno) + "] - " + text if(exc_info): sio = io.StringIO() sio.write('Stack (most recent call last):\n') traceback.print_stack(f, file=sio) sinfo = sio.getvalue() if sinfo[-1] == '\n': sinfo = sinfo[:-1] sio.close() out_text += "\n" + str(sinfo) getattr(logging, level.lower())(out_text) return wrapper
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _log_format_onecolor(record):\n\n return LEVEL_COLORS.get(record.levelname)", "def get_color(self, record):\n if record.level >= logbook.ERROR:\n return 'red'\n elif record.level >= logbook.NOTICE:\n return 'yellow'\n elif record.level >= logbook.INFO:\n return 'green'\n elif record.level >= logbook.DEBUG:\n return 'darkblue'\n return 'lightgray'", "def __log(level, message):\n if level == 1:\n logging.info(\" \" + str(datetime.datetime.now()) + \" \" + message)\n if level == 2:\n logging.error(\" \" + str(datetime.datetime.now()) + \" \" + message)\n if level == 3:\n logging.critical(\" \" + str(datetime.datetime.now()) + \" \" + message)", "def _log_print(self, msg_GREEN: str = \"\", msg_WHITE: str = \"\",\n level: str = \"INFO\"):\n if msg_GREEN != \"\":\n print(GREEN + \"\\n\" + msg_GREEN)\n if msg_WHITE != \"\":\n print(msg_WHITE)\n\n if self.with_log:\n msg_GREEN = msg_GREEN + msg_WHITE\n\n if level == \"INFO\":\n log.info(msg_GREEN)\n if level == \"WARN\":\n log.warning(msg_GREEN)", "def setup_logging(log_level=logging.DEBUG):\n logging.basicConfig(level=log_level)\n fmt = \"%(asctime)s %(levelname)s (%(threadName)s) \" \"[%(name)s] %(message)s\"\n colorfmt = \"%(log_color)s{}%(reset)s\".format(fmt)\n datefmt = \"%Y-%m-%d %H:%M:%S\"\n\n try:\n from colorlog import ColoredFormatter\n\n logging.getLogger().handlers[0].setFormatter(\n ColoredFormatter(\n colorfmt,\n datefmt=datefmt,\n reset=True,\n log_colors={\n \"DEBUG\": \"cyan\",\n \"INFO\": \"green\",\n \"WARNING\": \"yellow\",\n \"ERROR\": \"red\",\n \"CRITICAL\": \"red\",\n },\n )\n )\n except ImportError:\n pass\n\n logger = logging.getLogger(\"\")\n logger.setLevel(log_level)", "def log(level, msg):\n weight = \"?\"\n if level>=LOG_LEVEL:\n if level == 0:\n weight = \"DEBUG\"\n elif level == 1:\n weight = \"INFO\"\n elif level == 2:\n weight = \"WARN\"\n elif level == 3:\n weight = \"ERROR\"\n else:\n log(3, \"Invalid log level: {0}\".format(level))\n print(\"{0}: {1}\".format(weight, msg))", "def log(self, level, msg, *args, **kwargs):\n pass", "def log_lvl(lvl):\n logs.set_level(logging.getLogger(\"plysp\"), lvl)", "def print_log(value_color=\"\", value_noncolor=\"\"):\n HEADER = '\\033[92m'\n ENDC = '\\033[0m'\n print(HEADER + value_color + ENDC + str(value_noncolor))", "def log( loglevel, message ):\n E.log( loglevel, message )", "def l(msg, loglvl=0xFFFFFF):\n global LOG_LEVEL\n if (loglvl & LOG_LEVEL) != 0x0:\n print time.ctime(), ': ' , str(msg)", "def log(message, level=\"INFO\"):\r\n print(__get_formatted(message, level))", "def add_logger(cls, name, color):\n assert name in _verbosity_table.keys(), \"{} does not have an associated verbosity level\".format(name)\n\n def log_func(cls, *text):\n if cls._verbosity >= _verbosity_table[name]:\n print(color + ' '.join(text) + Style.RESET_ALL)\n\n setattr(cls, name, classmethod(log_func))", "def _print(self, message, level, color):\n if (self.level >= level):\n sys.stdout.write(color)\n try: sys.stdout.write(\"%s\\n\" % message)\n except: sys.stdout.write(encode(\"%s\\n\" % message))\n sys.stdout.write(COLOR_RESET)\n sys.stdout.flush()\n return message", "def init_logger_color():\n if os.environ.get('COLOREDLOGS_LOG_LEVEL') is None:\n os.environ['COLOREDLOGS_LOG_LEVEL'] = 'INFO'\n if os.environ.get('COLOREDLOGS_LOG_FORMAT') is None:\n os.environ['COLOREDLOGS_LOG_FORMAT'] = '%(asctime)s '\n '[%(levelname)s] %(message)s'\n if os.environ.get('COLOREDLOGS_DATE_FORMAT') is None:\n os.environ['COLOREDLOGS_LOG_DATE_FORMAT'] = '%Y-%m-%d %H:%M:%S'\n coloredlogs.install()", "def log(prefix, msg):\n msg = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S [%%s] %%s\") % (prefix, msg)\n print(COLOR_MAP.get(prefix, DEFAULT) + msg + DEFAULT)", "def format(self, record):\n msg = super(ColoredFormatter, self).format(record)\n color = self._COLOR_MAPPING.get(record.levelname)\n if self._use_colors and color:\n msg = '%s%s%s' % (color, msg, self._RESET)\n return msg", "def write_log(self, level, message): \n \n level = level.lower()\n #print(level, message,str(self.logger))\n if level == 'debug':\n self.logger.debug('%s', message)\n elif level == 'error':\n self.logger.error('%s', message)\n elif level == 'critical':\n self.logger.critical('%s', message)\n elif level == 'warning':\n self.logger.warning('%s', message)\n else:\n self.logger.info('%s', message)", "def setup_logging():\n\n coloredlogs.install(\n level=DEBUG, fmt=\"%(asctime)s %(name)s[%(process)d] %(levelname)s %(message)s\"\n )", "def logging(cls, lvl, message):\n log = getattr(logging, lvl)\n message = '[{}] {}'.format(cls.__name__, message)\n log(message)", "def test_set_single_logger_level(self):\n pass", "def map_level(level):\n if level >= logging.ERROR:\n return 'error'\n elif level >= logging.WARNING:\n return 'warn'\n elif level >= logging.INFO:\n return 'info'\n return ''", "def tone_down_logger():\n for level in (logging.CRITICAL, logging.ERROR, logging.WARNING, logging.INFO, logging.DEBUG):\n level_name = logging.getLevelName(level)\n logging.addLevelName(level, level_name.capitalize())", "def tone_down_logger():\n for level in (logging.CRITICAL, logging.ERROR, logging.WARNING, logging.INFO, logging.DEBUG):\n level_name = logging.getLevelName(level)\n logging.addLevelName(level, level_name.capitalize())", "def format(self, record):\n\n level_colors = {\n 'DEBUG': strc('DEBUG', 'yellow', 'bold'),\n 'INFO': strc('INFO', 'blue', 'bold'),\n 'WARNING': strc('WARNING', 'yellow', 'bold'),\n 'ERROR': strc('ERROR', 'red', 'bold'),\n 'CRITICAL': strc('CRITICAL', 'red', 'bold')}\n\n if record.levelname in level_colors.keys():\n record.levelname = level_colors[record.levelname]\n record.name = strc(record.name, 'black', 'bold')\n\n return logging.Formatter.format(self, record)", "def setup_log():\n\n #logging.basicConfig(filename='log.txt',filemode='a',format='%(asctime)s %(threadName)s %(filename)s %(funcName) %(lineno) %(levelname)s %(message)s', datefmt='%Y/%m/%d %H:%M:%S')\n\n #get the root logger\n logger = logging.getLogger()\n logger.setLevel(logging.DEBUG)\n\n #set up logging to console for INFO and worse\n sh = colorlog.StreamHandler()\n sh.setLevel(logging.INFO)\n #sh_formatter = colorlog.Formatter(fmt='%(log_color)s%(levelname):%(asctime)s\\n%(message)s', datefmt='%H:%M:%S')\n sh_formatter = colorlog.ColoredFormatter(\n \"%(log_color)s%(levelname)-8s - %(name)-25s - %(threadName)-15s - %(asctime)s - %(cyan)s \\n %(message)s\\n\",\n datefmt=None,\n reset=True,\n log_colors={\n 'DEBUG': 'cyan',\n 'INFO': 'green',\n 'WARNING': 'yellow',\n 'ERROR': 'red',\n 'CRITICAL': 'red,bg_white',\n },\n secondary_log_colors={},\n style='%'\n)\n sh.setFormatter(sh_formatter)\n\n #set up logging to file for ALL messages\n #fh = logging.FileHandler('log.txt')\n # fh = logging.handlers.TimedRotatingFileHandler('log.txt', when='midnight', interval=1, backupCount=7)\n # fh.setLevel(logging.DEBUG)\n # fh_formatter = logging.Formatter(fmt='%(asctime)s.%(msecs)03d - %(threadName)s - %(filename)s.%(funcName)s.%(lineno)s - %(levelname)s\\n%(message)s\\n\\n', datefmt='%Y/%m/%d %H:%M:%S')\n # fh.setFormatter(fh_formatter)\n\n #put the handlers to use\n logger.addHandler(sh)\n # logger.addHandler(fh)", "def log_color(self, parts: List[Tuple[str, ANSICode or None]], *,\n new_line=True):\n\n coded = [self.ansi_code(text, color) for text, color in parts]\n self.log(\"\".join(coded), new_line=new_line)", "def get_color(self, record):\n if record.level == CRITICAL:\n return Fore.RED + Style.DIM\n elif record.level == ERROR:\n return Fore.RED + Style.BRIGHT\n elif record.level == WARNING:\n return Fore.YELLOW + Style.DIM\n elif record.level == NOTICE:\n return Fore.CYAN + Style.BRIGHT\n elif record.level == DEBUG:\n return Fore.GREEN + Style.BRIGHT\n return Fore.WHITE", "def TeeLog(self, msg=None, level=0):\n\n if msg != None:\n print(msg)\n logging.info(msg) if level == 0 else logging.warning(msg)", "def log(level, message, args=()):\n\tif level >= minimumLogLevel:\n\t\ttry:\n\t\t\tif args:\n\t\t\t\tmessage = message % args\n\t\t\tif level >= screenLogLevel:\n\t\t\t\tlogToScreen(message)\n\t\t\tif level >= fileLogLevel:\n\t\t\t\tLEVEL_PREFIXES = (\n\t\t\t\t\t\"DEBUG: \",\n\t\t\t\t\t\"INFO : \",\n\t\t\t\t\t\"WARN : \",\n\t\t\t\t\t\"ERROR: \",\n\t\t\t\t)\n\t\t\t\tlogToFile(LEVEL_PREFIXES[level] + message)\n\t\texcept UnicodeError:\n\t\t\tpass" ]
[ "0.72820413", "0.67462224", "0.66891", "0.6607708", "0.6521738", "0.6501938", "0.64461005", "0.6437896", "0.6431689", "0.64296585", "0.6406766", "0.640526", "0.640376", "0.63724786", "0.63439846", "0.6341986", "0.6324903", "0.61996776", "0.61714673", "0.6170006", "0.6167977", "0.6143365", "0.61288375", "0.61288375", "0.6104835", "0.6077699", "0.60753644", "0.6069092", "0.6060017", "0.60444903" ]
0.7540215
0
Finds column names for coordinates, annotations, and cell names
def determine_coordinates_and_cell_names(self): self.coordinates_and_cell_headers = [ annot[0] for annot in self.file.columns if annot[0].lower() in ("z", "y", "x", "name") ] # annotation column names self.annot_column_headers = [ annot for annot in self.file.columns if annot[0].lower() not in ("z", "y", "x", "name") ]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_colnames(self):\n\n cd = self.conn.execute('select * from atom')\n print('Possible column names are:')\n names = list(map(lambda x: x[0], cd.description))\n print('\\trowID')\n for n in names:\n print('\\t'+n)", "def get_annot_colnames() -> List[str]:\n\n target_annot_cols = list()\n\n for col in JOINT_COLS:\n target_annot_cols.append('{}_{}'.format(col, 'cf'))\n\n target_annot_cols += ANNOT_COLS\n\n return target_annot_cols", "def get_loci_colnames(df):\n if 'node1_locus' in df.columns:\n return 'node1_locus', 'node2_locus'\n elif 'gene A' in df.columns:\n return 'gene A', 'gene B'", "def get_column_info(config):\n columns = config.view.columns\n colnames = dict(zip(columns, list(s.replace(\"_\", \" \") for s in columns)))\n colnames.update(config.view.colnames)\n column = config.view.column\n return columns, colnames, column", "def print_column_names(self):\n counter = 1\n try:\n for col_names in self.cursor.description:\n # print(self.cursor.description[col_names][0])\n print(\"\"\"Attribut{}: {:<5}, Typ: {:<5}, DisplaySize: {} InternalSize: {:<5}, Precision: {},\n \"Scale: {}, Null_Ok: {}\"\"\"\n .format(counter,\n col_names[0],\n col_names[1],\n col_names[2],\n col_names[3],\n col_names[4],\n col_names[5],\n col_names[6]))\n counter += 1\n except p.Error as exception:\n print(exception.pgerror)\n except Exception as general_exception:\n print(general_exception)", "def identify_columns(structure: dict):\n\tknown_columns = list()\n\n\t# collect columns\n\tfor day in structure['data']:\n\t\tfor key in day.keys():\n\t\t\tif key not in known_columns:\n\t\t\t\tknown_columns.append(key)\n\n\treturn known_columns", "def get_cols_dummy():", "def header(self, cols, parent_row):\n out = []\n for col in cols:\n if col == 'gau_id':\n out.append(self.name_for('Geographies', parent_row['geography_id']))\n elif col == 'oth_1_id':\n out.append(self.name_for('OtherIndexes', parent_row['other_index_1_id']))\n elif col == 'oth_2_id':\n out.append(self.name_for('OtherIndexes', parent_row['other_index_2_id']))\n else:\n out.append(col)\n return out", "def find_columns(stmt):\n columns = []\n tstmt = stmt.upper()\n parse = tstmt[tstmt.find(\"SELECT\") + 6:tstmt.find(\"FROM\")].strip().split(',')\n for p in parse:\n if ' AS ' in p:\n temp = p.split()\n temp.reverse()\n for i, val in enumerate(temp):\n if val == 'AS':\n columns.append(temp[i - 1].strip())\n break\n elif re.search(r\"\\D+\\.\\D+\", p) is not None:\n columns.append(p.split('.')[-1].strip())\n else:\n columns.append(p)\n return columns", "def get_report_column_names(self):\r\n # Compose the list of report_column names required for\r\n # summary_report.dsw.DictWriter()\r\n sr = self.summary_report\r\n dict_leader = sr.dict_leader\r\n dict_out = sr.dict_out\r\n column_names = self.column_names\r\n report_column_names = []\r\n #if dict_leader is not None and dict_out is not None:\r\n if dict_leader is not None and dict_out is not None:\r\n for key,value in dict_leader.iteritems():\r\n #print \"Adding report_column_name(from dict_leader)=\",key\r\n report_column_names.append(key)\r\n dict_out[key] = value\r\n # We have to initialize the DictWriter with the report_column_names\r\n # below. \r\n # Also need matched coord_val and var names for calling node_report()\r\n # below, so we do this duplication of storage of names. \r\n coord_var_names = []\r\n coord_val_names = []\r\n for idx, column_name in enumerate(column_names):\r\n var_name = \"Var_%s\" % str(idx+1)\r\n report_column_names.append(var_name)\r\n coord_var_names.append(var_name)\r\n val_name = \"Val_%s\" % str(idx+1)\r\n report_column_names.append(val_name)\r\n coord_val_names.append(val_name)\r\n # Add the entry report_column_names\r\n report_column_names += self.EntryClass.report_column_names\r\n return report_column_names", "def collect_columns():\n return ((x, y) for x in range(72) for y in range(x + 9, 81, 9))", "def find_columns(input_file, title):\n contents = find_table_command(input_file)\n for command in contents:\n if ' '+title+' ' in command:\n command = command.split('\\n')\n command.pop(0)\n command.pop(-1)\n column = []\n for line in command:\n column.append(line.split()[0].strip('\\\"'))\n return column, command", "def get_column_dict(self) -> HeaderToWells:\n return self._grid.columns", "def origin_columns(self):\n return self.intersection + self.origin_renames", "def get_annotation_names(viewer):\n\n layer_nodes_name = None\n layer_edges_name = None\n for layer in viewer.layers:\n if isinstance(layer, napari.layers.points.points.Points):\n layer_nodes_name = layer.name\n elif isinstance(layer, napari.layers.shapes.shapes.Shapes):\n layer_edges_name = layer.name\n if layer_nodes_name is not None and layer_edges_name is not None:\n break\n return layer_nodes_name, layer_edges_name", "def getCellTypes(self):\n sc_data = Utils.convertAnnDataToDf(self.sc_data)\n try:\n self.sc_annot, self.de_dict = Annotate.annotateTree(sc_data, self.refDataset, self.refAnnot)\n except:\n print(\"Columns of annotations should be cell type levels. Additionally, higher levels should contain lower levels bound with ':'. Example structure; level1 (including B-cells), level2 (including B-cells:Naive)\")", "def getColnames(self, dataset=\"X\"):\n if dataset in (\"X\",\"x\",):\n return [c[\"colname\"] for c in self._columns if c[\"dataset\"]==\"X\"]\n elif dataset in (\"y\",\"Y\",):\n return [c[\"colname\"] for c in self._columns if c[\"dataset\"]==\"y\"]\n else:\n raise Exception(\"Dataset unknown: {}\".format(dataset))", "def _get_data_labels(sheet, row, col):\n final_column = col\n header_row = _FIELDS['cell_value']['header']['row']\n # Abstract this sort of thing\n header = sheet.cell(row + header_row, final_column).value\n while any(header.startswith(label) for label\n in _FIELDS['isotherm tabular']['labels']):\n final_column += 1\n header = sheet.cell(row + header_row, final_column).value\n return [sheet.cell(row + header_row, i).value for i in\n range(col, final_column)]", "def get_re_analysis_detail_grid_column_names_by_order(self):\n self.column_name_list = self.get_grid_column_names_by_order(self.re_analysis_detail_grid_div_id)\n return self.column_name_list", "def get_cols_drop():", "def _get_columns(source):\n return _get_tuple(source)", "def get_columns_after_apply_mapping(self) -> List[str]:\n return self.get_dyf_and_apply_mapping().toDF().columns", "def __get_column_names(self, path_to_xml):\n span_table = xmlet.parse(path_to_xml / Path(\"SSTDataFormatTimeSpanTable.xml\")).getroot()\n filetype = MAP_RBD_TYPE_TO_FILE_TYPE[self.__rbd_type]\n\n for item in span_table:\n if item[0].text == filetype and item[1].text <= self.date <= item[2].text:\n data_description_file_name = item[3].text\n\n xml = xmlet.parse(path_to_xml / Path(data_description_file_name)).getroot()\n\n header = dict()\n for child in xml:\n var_name = child[0].text\n var_dim = int(child[1].text)\n var_type = child[2].text\n var_unit = child[3].text\n\n np_type = XML_TYPE_TO_NUMPY_TYPE[var_type]\n\n header.update({var_name: [var_dim, np_type, var_unit]})\n\n return header", "def get_column_names(self):\n # here, creating combined column/volue column names for uniqueness\n colname_temp = list()\n for column in self.col_value:\n colname_temp.append(self.question_column + \"-\" + str(column))\n return colname_temp", "def _columns(cls, schema: dsl.Source.Schema) -> typing.Sequence[str]:\n return tuple(f.name for f in schema)", "def generate_colnames(df, labelnum=0): # need to be adjusted for GC content\n colnames = []\n for field in range(len(df.columns) - labelnum):\n colnames.append(BEDCOLS[field])\n for label in range(labelnum):\n colnames.append(f\"label_{label+1}\")\n return colnames", "def find_pacgums(self):\n for row in range(len(self.structure)):\n for col in range(len(self.structure[row])):\n if self.structure[row][col] == 'n': \n self.pacgums.append((col, row))", "def get_headers(df):\n return df.columns.values", "def get_re_analysis_grid_column_names_by_order(self):\n self.column_name_list = self.get_grid_column_names_by_order(self.re_analysis_grid_div_id)\n return self.column_name_list", "def get_body_part_names(self):\n self.x_cols, self.y_cols, self.p_cols = [], [], []\n for bp in self.body_parts_lst:\n self.x_cols.append(f\"{bp}_x\")\n self.y_cols.append(f\"{bp}_y\")\n self.p_cols.append(f\"{bp}_p\")" ]
[ "0.632583", "0.63223386", "0.6270446", "0.59763235", "0.59620404", "0.5937639", "0.59089094", "0.58520865", "0.58381534", "0.58223736", "0.57881415", "0.5786808", "0.57778555", "0.57523495", "0.5699752", "0.56966233", "0.5673672", "0.5629715", "0.5628198", "0.55986613", "0.55775183", "0.5570049", "0.5560915", "0.5537659", "0.5528702", "0.55283064", "0.55180675", "0.55080914", "0.55014515", "0.55000156" ]
0.7893604
0
Sets data types for group annotations. This function assumes that annotation types passed into the function are valid.
def get_dtypes_for_group_annots(header: List, annot_types: List): group_dtypes = {} for annotation, annot_type in zip(header, annot_types): if annot_type != "numeric": group_dtypes[annotation] = np.str return group_dtypes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate_type_annotations(self):\n valid = False\n invalid_types = []\n # skipping the TYPE keyword, iterate through the types\n # collecting invalid type annotations in list annots\n for t in self.annot_types[1:]:\n if t.lower() not in (\"group\", \"numeric\"):\n # if the value is a blank space, store a higher visibility\n # string for error reporting\n if \"Unnamed\" in t:\n invalid_types.append(\"<empty value>\")\n # Duplicated metadata header name causes type annotation issue.\n # Side effect of Pandas adding a suffix to uniquefy the header.\n # These invalid annotations should not be included in invalid\n # type annotation count. This exception may cause miscount of\n # type annot errors if user-supplied annotation has period.\n elif \".\" in t:\n pass\n else:\n invalid_types.append(t)\n if invalid_types:\n msg = 'TYPE row annotations should be \"group\" or \"numeric\"'\n self.store_validation_issue(\n \"error\",\n msg,\n \"format:cap:group-or-numeric\",\n associated_info=invalid_types,\n )\n else:\n valid = True\n return valid", "def set_data_type(data_type):\n data_type_type = DataTypeUtil.getDtypeFromContext(data_type)\n DataTypeUtil.setDTypeForContext(data_type_type)", "def add_annotation_type(self, doc, annotation_type):\n if len(doc.annotations) != 0:\n if not self.annotation_type_set:\n self.annotation_type_set = True\n if validations.validate_annotation_type(annotation_type):\n doc.annotations[-1].annotation_type = annotation_type\n return True\n else:\n raise SPDXValueError('Annotation::AnnotationType')\n else:\n raise CardinalityError('Annotation::AnnotationType')\n else:\n raise OrderError('Annotation::AnnotationType')", "def set_dtype(self, dtype):\n self.mean_.set_dtype(dtype)\n for filter_k in self.samples_:\n filter_k.set_dtype(dtype)\n self.dtype = self.mean_.dtype", "def _convert_annotations(self, ast):\n self.annotations = IDLAnnotations(ast)", "def copy_annotations(from_data, to_data, annot_type):\n\n for annot in from_data.annotations.select_type(annot_type):\n entity = anafora.AnaforaEntity()\n entity.id = annot.id\n entity.spans = annot.spans\n entity.type = annot.type\n to_data.annotations.append(entity)", "def setDataSetType(self, type):\n self.__data_set_type__ = type", "def _check_annotations(value):\n if isinstance(value, dict):\n for k, v in value.items():\n _check_annotations(v)\n elif isinstance(value, list):\n for element in value:\n _check_annotations(element)\n elif isinstance(value, numpy.ndarray):\n if value.dtype not in (numpy.integer, numpy.floating, numpy.complex) \\\n and value.dtype.type != numpy.string_:\n raise ValueError(\"Invalid annotation. NumPy arrays with dtype %s are not allowed\" % value.dtype)\n elif not isinstance(value, ALLOWED_ANNOTATION_TYPES):\n raise ValueError(\"Invalid annotation. Annotations of type %s are not allowed\" % type(value))", "def add_annotations(annot_tuples, ref_data, annot_type):\n\n for annot in ref_data.annotations.select_type(annot_type):\n annot_begin, annot_end = annot.spans[0]\n annot_tuples.append((annot_begin, annot_end, annot.id))", "def setTransformType(self, val): # real signature unknown; restored from __doc__\n pass", "def set_datatype(self, datatype):\n if(datatype == 0):\n self.datatype = \"eeg\"\n elif(datatype == 1):\n self.datatype = \"motion\"\n else:\n raise NotImplementedError(\"EEG and Motion-Data supported only\")", "def annotation(self, ann_type: str = None):\n if ann_type is None: ann_type = self.ann\n if ann_type != self.ann:\n warnings.warn('Please note that the annotation type is mismatch with the dataset setting!')\n\n if ann_type == 'label':\n xml_path = self.xml_path.format(id=self.id)\n ann = int(ET.parse(xml_path).find('defective').text)\n elif ann_type == 'bbox':\n xml_path = self.xml_path.format(id=self.id)\n objs = ET.parse(xml_path).findall('bbox')\n ann = []\n for ix, bbox in enumerate(objs):\n y1 = int(float(bbox.find('ymin').text))\n y2 = int(float(bbox.find('ymax').text))\n x1 = int(float(bbox.find('xmin').text))\n x2 = int(float(bbox.find('xmax').text))\n ann.append((y1, y2, x1, x2))\n elif ann_type == 'mask':\n mask_path = self.mask_path.format(id=self.id)\n if os.path.exists(mask_path):\n ann = Image.open(mask_path).convert('L')\n else:\n ann = Image.fromarray(np.zeros((512, 512), dtype=np.uint8)).convert('L')\n elif ann_type == 'none':\n ann = []\n else:\n raise NotImplementedError\n return ann", "def test_default_dtype(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n dset = grp.create_dataset('foo', (63,))\n assert dset.dtype == np.dtype('float32')", "def _empirical_set_dtype():\n MKL._set_int_type(_ctypes.c_longlong, np.int64)\n\n try:\n _validate_dtype()\n except ValueError as err:\n\n MKL._set_int_type(_ctypes.c_int, np.int32)\n\n try:\n _validate_dtype()\n except ValueError:\n raise ImportError(\"Unable to set MKL numeric type\")", "def types(self, types):\n\n self._types = types", "def get_group_types(self):\r\n pass", "def _value_of_annotation_type(self, val, node):\n return type_from_value(val, visitor=self, node=node)", "def assign_column_types(self):\n type_list = [\"category\" if u_input == 1 else float for u_input in self.user_column_label]\n self.df = self.df.astype(dict(zip(self.df.columns, type_list)))\n df_types = pd.DataFrame(self.df.dtypes).reset_index()\n df_types.columns = [\"column_name\", \"dtype\"]\n df_types.dtype = df_types.dtype.astype(str)\n self.column_dtypes = {list(df_types.column_name)[i]: list(df_types.dtype)[i] for i in range(len(df_types))}", "def annotations(self, annotations):\n self._annotations = annotations", "def initTypes(self):\n self.types = [ty.NoneType]*self.numcols()\n for k,row in enumerate(self.data):\n for i in range(self.numcols()):\n val = row[i]\n typ = self.types[i]\n if not val is None:\n if typ in [ty.NoneType,ty.IntType]:\n if val.isdigit():\n row[i] = int(val)\n if val.startswith('-') and val[1:].isdigit():\n row[i] = -int(val[1:])\n self.types[i] = ty.IntType\n continue\n if typ in [ty.NoneType,ty.IntType,ty.FloatType]:\n try:\n row[i] = float(val)\n if not typ == ty.FloatType:\n self.types[i] = ty.FloatType\n # Convert already existing values\n for j in range(k):\n elt = self.data[j][i]\n self.data[j][i] = None if elt is None else float(elt)\n continue\n except ValueError:\n pass\n if typ in [ty.NoneType,utils.Date]:\n try:\n row[i] = utils.Date(val)\n self.types[i] = utils.Date\n continue\n except ValueError:\n pass\n row[i] = unicode(val)\n if not typ == ty.UnicodeType:\n self.types[i] = ty.UnicodeType\n # Convert already existing values\n for j in range(k):\n elt = self.data[j][i]\n self.data[j][i] = None if elt is None else unicode(elt)", "def set_type(*args):\n return _ida_hexrays.set_type(*args)", "def __post_init__(self):\n for field in dataclasses.fields(self):\n value = getattr(self, field.name)\n if not isinstance(value, field.type) and value:\n try:\n setattr(self, field.name, field.type(value))\n except ValueError:\n raise ValueError(f\"Expected {field.name} \"\n f\"to be {field.type}, \"\n f\"got {repr(value)}\")", "def convert_type(self, value, schema_type, **kwargs):", "def test_type_confict(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n grp.create_group('foo')\n with pytest.raises(TypeError):\n grp.require_dataset('foo', (10, 3), 'f')", "def __init__(self, **named):\n for key, value in named.items():\n typ = getattr(self, '__annotations__', {}).get(key)\n if typ:\n setattr(self, key, type_coerce(value, typ))\n else:\n setattr(self, key, value)", "def create_data_types(self):\n for col in self.all_columns:\n try:\n if float(self.train[col].iloc[-3]):\n self.train[col] = self.train[col].astype(np.float32)\n except:\n pass\n self.d_types = self.train.dtypes", "def data_types(self):", "def annotate(self, **annotations):\n _check_annotations(annotations)\n self.annotations.update(annotations)", "def set_annotations(self, request, pk=None):\n # No need to check for permissions, since post requires edit by default.\n entity = self.get_object()\n # Read and validate the request data.\n serializer = AnnotationsSerializer(data=request.data, many=True)\n serializer.is_valid(raise_exception=True)\n annotations = [\n (entry[\"field\"], entry[\"value\"]) for entry in serializer.validated_data\n ]\n annotation_fields = [e[0] for e in annotations]\n # The following dict is a mapping from annotation field id to the annotation\n # value id.\n existing_annotations = dict(\n entity.annotations.filter(field__in=annotation_fields).values_list(\n \"field_id\", \"id\"\n )\n )\n\n validation_errors = []\n to_create = []\n to_update = []\n for field, value in annotations:\n annotation_id = existing_annotations.get(field.id)\n append_to = to_create if annotation_id is None else to_update\n annotation = AnnotationValue(\n entity_id=entity.id, field_id=field.id, value=value, id=annotation_id\n )\n try:\n annotation.validate()\n except DjangoValidationError as e:\n validation_errors += e\n append_to.append(annotation)\n\n if validation_errors:\n raise DjangoValidationError(validation_errors)\n\n with transaction.atomic():\n AnnotationValue.objects.bulk_create(to_create)\n AnnotationValue.objects.bulk_update(to_update, [\"_value\"])\n return Response()", "def test_get_group_class_types(self):\n pass" ]
[ "0.5981265", "0.54895484", "0.547235", "0.53883153", "0.53806806", "0.53698015", "0.5304921", "0.52900726", "0.52251786", "0.5142408", "0.5072298", "0.5048777", "0.5021037", "0.50178635", "0.5001251", "0.4979307", "0.4968987", "0.49605083", "0.49495628", "0.49360093", "0.4916752", "0.4881924", "0.4855782", "0.4852916", "0.4847873", "0.4843385", "0.48280275", "0.47858068", "0.4770459", "0.47672445" ]
0.5769833
1
Coerces numeric columns to floats and rounds annotation to 3 decimal places
def coerce_numeric_values(df, annot_types): if "numeric" in annot_types: numeric_columns = df.xs( "numeric", axis=1, level=1, drop_level=False ).columns.tolist() try: # Round numeric columns to 3 decimal places df[numeric_columns] = df[numeric_columns].round(3).astype(float) except ValueError as e: log_exception(Annotations.dev_logger, Annotations.user_logger, e) raise ValueError(e) return df
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_to_float(df, *cols):\n for col in cols:\n df[col] = df[col].str.replace(',', '')\n df[col] = df[col].str.replace(' ', '')\n df[col] = df[col].astype(float)\n return df", "def convert_dollars(df,col_name):\n df[col_name] = df[col_name].replace('[^.0-9]','',regex=True).astype(float)\n return df", "def convert_to_float(series):\n series = series.str[:-4].str.replace('.', '').str.replace(',', '.').astype(float)\n return series", "def conform_input_data(rowdict):\n # rowdict['Value'] = float(rowdict['Value'])\n rowdict['TimeStamp'] = TS_to_date(rowdict['TimeStamp'][:19])\n for floatcolumn in ['LowPx','OpenPx','ClosePx','QuoteCount','HighPx','TradeCount']:\n if floatcolumn in rowdict:\n rowdict[floatcolumn] = float(rowdict[floatcolumn])\n return rowdict", "def float_format(self):\n ...", "def to_numerical(table, column_name):\n def replace(entry):\n return float(entry)\n assert (isinstance(table, Table)), \"Input not a supported type.\"\n column = table.apply(replace, column_name)\n return table.append_column(column_name, column)", "def test_profiled_precision(self):\n df_1 = pd.Series([0.4, 0.3, 0.1, 0.1, 0.1]).apply(str)\n df_2 = pd.Series([0.11, 0.11, 0.12, 2.11]).apply(str)\n df_3 = pd.Series([4.114, 3.161, 2.512, 2.131]).apply(str)\n df_mix = pd.Series([4.1, '3.', 2.52, 2.13143]).apply(str)\n\n float_profiler = FloatColumn(\"Name\")\n float_profiler.update(df_3)\n self.assertEqual(4, float_profiler.precision)\n\n float_profiler.update(df_2)\n self.assertEqual(2, float_profiler.precision)\n\n float_profiler.update(df_1)\n self.assertEqual(1, float_profiler.precision)\n\n float_profiler = FloatColumn(\"Name\")\n float_profiler.update(df_mix)\n self.assertEqual(1, float_profiler.precision)\n\n # edge cases #\n # integer with 0s on right and left side\n df_ints = pd.Series(['0013245678', '123456700', '0012345600'])\n float_profiler = FloatColumn(\"Name\")\n float_profiler.update(df_ints)\n self.assertEqual(6, float_profiler.precision)\n\n # scientific\n df_scientific = pd.Series(['1.23e-3', '2.2344', '1.244e4'])\n float_profiler = FloatColumn(\"Name\")\n float_profiler.update(df_scientific)\n self.assertEqual(3, float_profiler.precision)\n\n # plus\n df_plus = pd.Series(['+1.3e-3', '+2.244', '+1.3324e4'])\n float_profiler = FloatColumn(\"Name\")\n float_profiler.update(df_plus)\n self.assertEqual(2, float_profiler.precision)\n\n # minus\n df_minus = pd.Series(['-1.3234e-3', '-0.244', '-1.3324e4'])\n float_profiler = FloatColumn(\"Name\")\n float_profiler.update(df_minus)\n self.assertEqual(3, float_profiler.precision)\n\n # spaces around values\n df_spaces = pd.Series([' -1.3234e-3 ', ' -0.244 '])\n float_profiler = FloatColumn(\"Name\")\n float_profiler.update(df_spaces)\n self.assertEqual(3, float_profiler.precision)\n\n # check to make sure all formats of precision are correctly predicted\n samples = [\n # value, expected precision\n ['10.01', 4],\n ['.01', 1],\n ['0.01', 1],\n ['-0.01', 1],\n ['+0.01', 1],\n [' +0.013', 2],\n [' -1.3234e-3 ', 5],\n [' 0012345600 ', 6],\n [' 0012345600. ', 8],\n [' -0012345600. ', 8],\n ]\n for sample in samples:\n df_series = pd.Series([sample[0]])\n expected_precision = sample[1]\n precision = FloatColumn._get_float_precision(df_series)\n self.assertEqual(expected_precision, precision,\n msg='Errored for: {}'.format(sample[0]))", "def df_float2fillna(self, df):\n df = pd.to_numeric(df, errors=\"coerce\")\n df = df.fillna(999)\n # df = df.astype(np.int64)\n df = df.replace(999, \"\")\n df = df\n return df", "def _transform_col(col, val):\n if dict_values(col.types)[0] in ('int', 'real'):\n return col.asnumeric(), float(val)\n\n # for enums, character, etc...\n return col, val", "def to_numeric_and_downcast_data(df: pd.DataFrame):\n fcols = df.select_dtypes('float').columns\n \n icols = df.select_dtypes('integer').columns\n\n df[fcols] = df[fcols].apply(pd.to_numeric, downcast='float')\n \n df[icols] = df[icols].apply(pd.to_numeric, downcast='integer')\n\n return df", "def test_round_columns():\n case = unittest.TestCase()\n\n settings = {\n 'type': 'round',\n 'columns': ['col1'],\n 'decimals': 1,\n }\n\n df = run({'default': DF.copy()}, settings)['default']\n\n case.assertEqual(\n df.to_dict(orient='records'),\n [{\n 'col1': 1.7,\n 'col2': 3.14,\n }],\n )", "def __mdformat(self, dat):\n\t\tif type(dat) is types.FloatType:\n\t\t\treturn round(dat, 3)\n\t\telse:\n\t\t\treturn dat", "def test_float():\n floatify = fields.FloatField().adapt\n\n for input, expect in [\n (1.1, 1.1),\n (11, 11.0),\n (int(5.7), 5)\n ]:\n assert floatify(input) == expect", "def to_float(x, key):\n x = x.strip()\n if not x or x in ('NA', 'n/a'):\n return None\n if '.' in x:\n # There are '.'s, so commas are placeholders\n x = x.replace(',', '') \n if x.endswith('ft'):\n scale = 0.3048\n x = x[:-2].strip()\n else:\n scale = 1 \n try:\n return scale * float(x)\n except:\n logging.warn('Could not convert %s value %s to float', key, x)\n return None", "def condense_floating_points(css):\n log.debug(\"Condensing all floating point values.\")\n return re.sub(r\"(:|\\s)0+\\.(\\d+)\", r\"\\1.\\2\", css)", "def ijson_decimal_to_float(event):\n if event[1] == 'number' and isinstance(event[2], decimal.Decimal):\n return event[0], event[1], float(event[2])\n else:\n return event", "def set_precisions(df):\n\n # Create a copy so we're not modifying the original DF\n df = df.copy()\n for regex, str_format in TABLE_FLOAT_STRING_FORMAT.items():\n r = re.compile(regex, re.IGNORECASE)\n columns = list(filter(r.match, df.columns))\n for col in columns:\n df[col] = df[col].map(lambda x: str_format % x)\n return df", "def add_support_for_floats_to_dynamodb():\n\n # Ignore loss of precision rather than raising exception\n DYNAMODB_CONTEXT.clear_traps()\n\n # Keep a reference to the original serialization methods\n boto3_serialize_orig = TypeSerializer.serialize\n boto3_deserialize_orig = TypeDeserializer.deserialize\n\n # Wrap serialization methods to support floats\n def boto3_serialize(self, value):\n if isinstance(value, float):\n value = Decimal(value)\n return boto3_serialize_orig(self, value)\n\n def boto3_deserialize(self, value):\n value = boto3_deserialize_orig(self, value)\n if isinstance(value, Decimal):\n value = float(value)\n return value\n\n # Replace the serialization methods with wrapped versions\n TypeSerializer.serialize = boto3_serialize\n TypeDeserializer.deserialize = boto3_deserialize", "def get_column_type(cls, **kwargs: Any) -> Any:\n return sqlalchemy.Float()", "def get_float_data(dataframe):\n dataframe = dataframe[np.isfinite(dataframe.TIME_StartTime)]\n float_cols = [isfloatarray(col) for col in dataframe.values.T]\n return (dataframe.T[float_cols].T).astype(float)", "def format_float(data):\n try:\n return format(float(data), '.1f')\n except:\n return None", "def format_float(value):\n if isinstance(value, float):\n value = '{:.3f}'.format(value * 1e-3)\n return value", "def convertRate(row):\n if row == 'null':\n return 1.0\n elif ':' in row:\n rows = row.split(':')\n return 1.0 - float(rows[1])/float(rows[0])\n else:\n return float(row)", "def __float__(self) -> float:\n return self._translate_in_type(float, self.integer)", "def cast(elem, psql_type):\n if psql_type == 'real':\n return float(format(elem, '.6g'))\n elif psql_type == 'double precision':\n return float(format(elem, '.15g'))\n elif psql_type == 'timestamp':\n if isinstance(elem, pd.Timestamp):\n return elem.to_pydatetime()\n else:\n return elem\n elif psql_type == 'text':\n if type(elem) == float:\n return \"NaN\"\n return str(elem)\n else:\n return elem", "def force_float(element, surpress_error = False):\n \n if isinstance(element, float):\n # element is a float, return it\n return element\n else:\n try:\n # try if the element is a number\n return float(element)\n except (ValueError, TypeError):\n # replace all non-digit characters\n element = str(element)\n matches = convert_pattern.match(element)\n \n if matches != None:\n element = matches.group(0)\n \n try:\n return float(element)\n except (ValueError, TypeError):\n if surpress_error:\n return 0\n else:\n raise", "def format_col(values, num_decimal=3):\n new_vals = []\n for val in values:\n if np.isnan(val):\n new_val = NA_REP\n elif isinstance(val, numbers.Number):\n new_val = text_util.format_num(val, num_decimal=num_decimal)\n else:\n new_val = val\n new_vals.append(new_val)\n\n return new_vals", "def __convert_min(self):\n self.pandas_df[\"min\"] = self.pandas_df[\"min\"].str.replace(':','.').astype(float) # Converts column to float; column previously held strings\n self.pandas_df[\"min\"] = self.pandas_df[\"min\"].astype(int) + (((self.pandas_df.loc[:, \"min\"] - self.pandas_df.loc[:, \"min\"].astype(int)) * 100).round(0).astype(int) / 60).round(2)", "def convertRate(row):\n if pd.isnull(row):\n return 1.0\n elif ':' in str(row):\n rows = row.split(':')\n return 1.0 - float(rows[1]) / float(rows[0])\n else:\n return float(row)", "def clean_dollar(df, col):\n \n df[col] = df[col].apply(lambda s: s.strip('$')).astype(float)\n \n return df" ]
[ "0.64902544", "0.61874473", "0.608199", "0.60813975", "0.6055885", "0.60087264", "0.5969837", "0.5932989", "0.59281605", "0.59061414", "0.5905307", "0.5799542", "0.57412326", "0.5664925", "0.565744", "0.56301486", "0.5617101", "0.5613498", "0.5609374", "0.5608853", "0.56031525", "0.55840534", "0.5553394", "0.5519582", "0.5519046", "0.5504627", "0.5500091", "0.5495997", "0.5482218", "0.54738086" ]
0.7405629
0
Converts empty cells in numeric annotations to NaN
def coerce_empty_numeric_values(self): if "numeric" in self.annot_types: numeric_columns = self.file.xs( "numeric", axis=1, level=1, drop_level=False ).columns.tolist() self.file[numeric_columns].replace("", np.nan, inplace=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fill_nan_in_numeric(df):\n print(\" --- Filling NaN in Numerics.\")\n thresh = get_min_filled_threshold(df)\n columns = df.columns\n numerical = [x for x in columns if x.startswith('n_')]\n # fill NaN with mean or median, based on std dev\n for col in numerical:\n filled = get_non_missing_count(df[col])\n if filled < thresh:\n df[col] = df[col].fillna(-1)\n else:\n std = df[col].std()\n if std < 1:\n mean = df[col].mean()\n df[col] = df[col].fillna(mean)\n else:\n median = df[col].median()\n df[col] = df[col].fillna(mean)\n\n print(\" --- Finished filling NaN in Numerics.\")\n return df", "def df_float2fillna(self, df):\n df = pd.to_numeric(df, errors=\"coerce\")\n df = df.fillna(999)\n # df = df.astype(np.int64)\n df = df.replace(999, \"\")\n df = df\n return df", "def _remove_nan(parsed_dictionary):\n for key, value in parsed_dictionary.items():\n if isinstance(value, np.ndarray):\n non_nan_value = np.nan_to_num(value, nan=123456789, posinf=2e308, neginf=-2e308)\n parsed_dictionary.update({key: non_nan_value})\n\n return parsed_dictionary", "def NaN_cleaning(df):\n df = df.replace(np.nan, 'unknown')\n return df.reset_index(drop=True)", "def NA():\n return float('nan')", "def nullValueToNan(self) -> None:\n self.cpp.nullValueToNan()", "def fix_data(self, df):\n return df.dropna(axis='columns', how='all').fillna(0.0)", "def replace_nan(cls, prop_obj):\n for key, item in enumerate(prop_obj):\n for column, value in item.items():\n if str(value) == 'nan':\n prop_obj[key][column] = 0.0", "def test_empty_input():\n assert _currency_column_to_numeric(\"\") == \"ORIGINAL_NA\"", "def _strip_nan(val):\n if isinstance(val, float) and np.isnan(val):\n return '__NaN__'\n elif isinstance(val, dict):\n return {key: Database._strip_nan(item) for key, item in list(val.items())}\n elif isinstance(val, list) or isinstance(val, tuple):\n return [Database._strip_nan(item) for item in val]\n elif isinstance(val, set):\n raise NotImplementedError\n return val", "def impute_missing(df):\n\n for name in df.select_dtypes(\"number\"):\n df[name] = df[name].fillna(0)\n for name in df.select_dtypes(\"category\"):\n df[name] = df[name].fillna(\"None\")\n return df", "def must_be_numeric(cell):\n # Check if it's nan first\n if check_empty(cell):\n return True\n # If it's not nan, check it's a number\n return pd.isna(pd.to_numeric(str(cell), errors=\"coerce\"))", "def replace_nan(data):\r\n lst_ind = np.array(['valence_intensity', 'anger_intensity',\r\n 'fear_intensity', 'sadness_intensity', 'joy_intensity'])\r\n for i in lst_ind:\r\n native = data[:][i]\r\n avg = np.nanmean(native)\r\n data[:][i] = np.where(np.isnan(native), avg, native)\r\n return data", "def change_nan(dict):\n\n for k,v in dict.items():\n if np.isnan(v):\n dict[k] = 0.0\n else:\n dict[k] = v", "def set_nan_as_string(data, replace_str='0'):\n for i, x in enumerate(data):\n for key, value in x.items():\n if value == '':\n x[key] = replace_str\n data[i] = x", "def code_unknown_to_nan(data, attribute_values):\n attribute_values_unknown = attribute_values[attribute_values['Meaning'] == \"unknown\"]\n for i in range(len(attribute_values_unknown)):\n colname = attribute_values_unknown.iloc[i]['Attribute']\n unknown_values = eval('[' + str(attribute_values_unknown.iloc[i]['Value']) + ']')\n try:\n data[colname] = data[colname].replace(unknown_values, float('nan'))\n except:\n pass\n return data", "def set_nan(x):\n x[x == -999] = np.nan\n return x", "def isnan(data):\n return _make.isnan(data)", "def ISNA(value):\n return isinstance(value, float) and math.isnan(value)", "def dataCleaner(dataframe):\r\n dataframe = dataframe.dropna(how='all')\r\n for col in dataframe:\r\n dataframe[col] = dataframe[col].apply(lambda x : np.nan() if str(x).isspace() else x)\r\n dataframe[col] = dataframe[col].fillna(dataframe[col].mean())\r\n return dataframe", "def correct_nans(y):\n y = str(y)\n if y == \"nan\":\n return \"\"\n else:\n y = float(y)\n return int(y)", "def isnan(self):\n return self.isAny( (lambda x: np.isnan(x)) )", "def test_drop_numbers():\n cleaner = TextCleaner()\n assert cleaner.transform([[\"123,123.123\"]])[\"corpus\"][0] == \"\"\n assert not cleaner.drops[\"number\"].dropna().empty", "def replace_nan_num(filename, columns, value_dic):\n\th = pyfits.open(filename, mode='update')\n\tfor col in columns:\n\t\tif value_dic.has_key(col):\n\t\t\tval = value_dic[col]\n\t\telse:\n\t\t\tval = 0\n\t\tdata = h[1].data.field(col)\n\t\th[1].data.field(col)[:] = where(isnan(data), val, data)\n\th.flush()\n\th.close()", "def test_default_cleaning_style_with_remove(currency_df):\n result = currency_df.currency_column_to_numeric(\n \"d_col\",\n cast_non_numeric={\"non-existent-col\": 10},\n remove_non_numeric=True,\n )\n expected = pd.DataFrame(\n {\n \"a_col\": [\" 24.56\", \"(12.12)\", \"1,000,000\"],\n \"d_col\": [np.nan, 1.23, -1_000],\n },\n index=[0, 2, 3],\n )\n assert_frame_equal(result, expected)", "def pd_isnan(val):\n return val is None or val != val", "def clean_all(text):\n # anticipate Null values in columns that will be cleaned\n if text is not None and type(text) is not float:\n text = \"\".join(text)\n no_ucode = clean_unicode(text)\n no_space = \"\".join(clean_whitespaces(no_ucode.strip()))\n text = no_space.strip()\n\n return text", "def remove_nan(self, dataframe):\n return dataframe.dropna()", "def correct_for_missing_labels(df, annotation_values):\n columns = list(df.columns)\n missing_labels = [x for x in annotation_values if x not in columns]\n\n if not len(missing_labels) > 0:\n return(df)\n else:\n for msslbl in missing_labels:\n df[msslbl] = 0\n return(df)", "def check_empty(cell):\n return pd.isna(cell)" ]
[ "0.6934066", "0.66125906", "0.65770066", "0.65756345", "0.6518445", "0.6518293", "0.6511774", "0.6474103", "0.6398664", "0.6339298", "0.6284341", "0.6269383", "0.6237103", "0.62226254", "0.61696684", "0.61133254", "0.6088505", "0.60856736", "0.6034949", "0.59849316", "0.5974105", "0.5966936", "0.5947347", "0.594669", "0.59186965", "0.590926", "0.5904175", "0.5902704", "0.58825225", "0.5879707" ]
0.8349927
0
Create dataframe with proper dtypes for group annotations. Numeric annotations require special handling and are addressed in functions presented in preprocess() and preprocess_numeric_annot().
def create_data_frame(self): column_names = Annotations.create_columns(self.headers, self.annot_types) dtypes = Annotations.get_dtypes_for_group_annots(self.headers, self.annot_types) df = self.open_file( self.file_path, open_as="dataframe", # Coerce values in group annotations converters=dtypes, # Header/column names names=self.headers, # Prevent pandas from reading first 2 lines in file # since they're passed in with param 'names' skiprows=2, )[0] self.file = Annotations.convert_header_to_multi_index(df, column_names)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_dtypes_for_group_annots(header: List, annot_types: List):\n group_dtypes = {}\n for annotation, annot_type in zip(header, annot_types):\n if annot_type != \"numeric\":\n group_dtypes[annotation] = np.str\n return group_dtypes", "def FE_add_groupby_features_aggregated_to_dataframe(train,\r\n agg_types,groupby_columns,ignore_variables, test=\"\"):\r\n train_copy = copy.deepcopy(train)\r\n test_copy = copy.deepcopy(test)\r\n if isinstance(groupby_columns, str):\r\n groupby_columns = [groupby_columns]\r\n \r\n for groupby_column in groupby_columns:\r\n train_copy_index = train_copy.index\r\n MGB = My_Groupby_Encoder(groupby_column, agg_types, ignore_variables)\r\n train1 = MGB.fit_transform(train)\r\n addl_cols = left_subtract(train1.columns,train.columns)\r\n train1.index = train_copy_index\r\n train_copy = pd.concat([train_copy,train1[addl_cols]], axis=1)\r\n if isinstance(test, str) or test is None:\r\n pass\r\n else:\r\n test_copy_index = test_copy.index\r\n test1 = MGB.transform(test)\r\n addl_cols = left_subtract(test1.columns,test.columns)\r\n test1.index = test_copy_index\r\n test_copy = pd.concat([test_copy,test1[addl_cols]],axis=1)\r\n ### return the dataframes ###########\r\n return train_copy, test_copy", "def create_annotation(raw):\n annotation_pandas = pd.DataFrame(columns=[\"onset\", \"duration\", \"description\"])\n for idx, event in enumerate(raw.annotations):\n annotation_pandas.loc[idx] = [\n event[\"onset\"],\n event[\"duration\"],\n event[\"description\"],\n ]\n return annotation_pandas", "def rep_dtypes(df):\n return \"(\" + re.sub(\", dtype.*\", \"\", re.sub(r\" +\", \": \", str(df.dtypes)).replace(\"\\n\", \", \")) + \")\"", "def _dataframe_preprocess(self):\n # 1. add baisc feature like date, time in day, ....\n if self.data_type != 'porto':\n self.df['TIMESTAMP'] = self.df.apply(lambda df: df['TIMESTAMPS'][0], axis=1)\n self.df['TIME'] = pd.to_datetime(self.df['TIMESTAMP'], unit='s', utc=True)\n \n self.df.TIME = self.df.TIME.dt.tz_convert(self.timezone)\n # 2. group df for specific driver analysis\n self.grouped_df = self.df.groupby('LABEL')\n if self.count_od_info:\n if 'SD' not in self.df.columns:\n self._add_OD_info()\n self.grouped_od = self.df.groupby('SD')", "def data_for_grouping(dtype):\n pa_dtype = dtype.pyarrow_dtype\n if pa.types.is_boolean(pa_dtype):\n A = False\n B = True\n C = True\n elif pa.types.is_floating(pa_dtype):\n A = -1.1\n B = 0.0\n C = 1.1\n elif pa.types.is_signed_integer(pa_dtype):\n A = -1\n B = 0\n C = 1\n elif pa.types.is_unsigned_integer(pa_dtype):\n A = 0\n B = 1\n C = 10\n elif pa.types.is_date(pa_dtype):\n A = date(1999, 12, 31)\n B = date(2010, 1, 1)\n C = date(2022, 1, 1)\n elif pa.types.is_timestamp(pa_dtype):\n A = datetime(1999, 1, 1, 1, 1, 1, 1)\n B = datetime(2020, 1, 1)\n C = datetime(2020, 1, 1, 1)\n elif pa.types.is_duration(pa_dtype):\n A = timedelta(-1)\n B = timedelta(0)\n C = timedelta(1, 4)\n elif pa.types.is_time(pa_dtype):\n A = time(0, 0)\n B = time(0, 12)\n C = time(12, 12)\n else:\n raise NotImplementedError\n return pd.array([B, B, None, None, A, A, B, C], dtype=dtype)", "def validate_type_annotations(self):\n valid = False\n invalid_types = []\n # skipping the TYPE keyword, iterate through the types\n # collecting invalid type annotations in list annots\n for t in self.annot_types[1:]:\n if t.lower() not in (\"group\", \"numeric\"):\n # if the value is a blank space, store a higher visibility\n # string for error reporting\n if \"Unnamed\" in t:\n invalid_types.append(\"<empty value>\")\n # Duplicated metadata header name causes type annotation issue.\n # Side effect of Pandas adding a suffix to uniquefy the header.\n # These invalid annotations should not be included in invalid\n # type annotation count. This exception may cause miscount of\n # type annot errors if user-supplied annotation has period.\n elif \".\" in t:\n pass\n else:\n invalid_types.append(t)\n if invalid_types:\n msg = 'TYPE row annotations should be \"group\" or \"numeric\"'\n self.store_validation_issue(\n \"error\",\n msg,\n \"format:cap:group-or-numeric\",\n associated_info=invalid_types,\n )\n else:\n valid = True\n return valid", "def pandas_typecast(self) -> dict:\n res = {}\n for feat in self.data_features:\n res[feat.key] = ApiForm.typecast(feat.dtype)\n return res", "def annotation_to_nodules(annotation_df):\n data_list = []\n for group in annotation_df.groupby(['AccessionNumber', 'DoctorID']):\n accession_number = group[0][0]\n doctor_id = group[0][1]\n\n nodules = group[1].iloc[:, 5:].values.reshape(-1, 5)\n for i in range(nodules.shape[0]):\n nodule_id = generate_index()\n nodule_dict = {\n 'AccessionNumber': accession_number,\n 'DoctorID': doctor_id,\n 'NoduleID': nodule_id,\n 'NoduleType': nodules[i, 4],\n 'coordX': nodules[i, 0] if nodules[i, 0] != '' else 'NaN',\n 'coordY': nodules[i, 1] if nodules[i, 1] != '' else 'NaN',\n 'coordZ': nodules[i, 2] if nodules[i, 2] != '' else 'NaN',\n 'diameter_mm': nodules[i, 3] if nodules[i, 3] != '' else 'NaN',\n }\n data_list.append(nodule_dict)\n result_df = pd.DataFrame(data_list)\n result_df.coordX = result_df.coordX.astype(np.float)\n result_df.coordY = result_df.coordY.astype(np.float)\n result_df.coordZ = result_df.coordZ.astype(np.float)\n result_df.diameter_mm = result_df.diameter_mm.astype(np.float)\n result_df = result_df.dropna()\n result_df = result_df.assign(DoctorID=lambda df: df.loc[:, 'DoctorID'].str.replace(\"'\", \"\"))\n return normalize_nodule_type(result_df)", "def data_for_grouping(allow_in_pandas):\n a, b, c = (1,), (2,), (3,)\n return PandasArray(np.array(\n [b, b, np.nan, np.nan, a, a, b, c]\n ))", "def set_dtypes(df):\n # drop rows where a column names appear (happened while appending to csv)\n df = df.loc[df[df.columns[0]] != df.columns[0]]\n # convert numerics\n df = df.apply(pd.to_numeric, errors='ignore')\n # parse query_timestamp\n df.query_timestamp = df.query_timestamp.apply(pd.to_datetime)\n\n df.reset_index(inplace=True, drop=True)\n\n return df", "def test_default_dtype(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n dset = grp.create_dataset('foo', (63,))\n assert dset.dtype == np.dtype('float32')", "def encode_dtypes(df):\n\n global catn, cato\n\n # Nominal categories\n for name in catn:\n df[name] = df[name].astype(\"category\")\n # Add a None category for missing values\n if \"None\" not in df[name].cat.categories:\n df[name].cat.add_categories(\"None\", inplace=True)\n # Ordinal categories\n for name, levels in cato.items():\n df[name] = df[name].astype(CategoricalDtype(levels,\n ordered=True))\n return df", "def _parse_dtypes(data, table_meta):\n for name, field in table_meta['fields'].items():\n field_type = field['type']\n if field_type == 'datetime':\n datetime_format = field.get('format')\n data[name] = pd.to_datetime(data[name], format=datetime_format, exact=False)\n elif field_type == 'numerical' and field.get('subtype') == 'integer':\n data[name] = data[name].dropna().astype(np.int64)\n elif field_type == 'id' and field.get('subtype', 'integer') == 'integer':\n data[name] = data[name].dropna().astype(np.int64)\n\n return data", "def binarize_annotations(df: DataFrame) -> DataFrame:\n\n new_df = pd.DataFrame(index=df.index)\n for col in df.columns:\n if len(df[col].dropna().value_counts().keys()) == 2:\n new_df[col] = df[col]\n elif len(df[col].dropna().value_counts().keys()) > 2:\n for val in df[col].dropna().value_counts().keys():\n val = val.replace(\"_\", \"-\")\n new_df.loc[(df[col] != val), binarized_col_name % (col, val)] = (\n outgroup_val % val\n )\n new_df.loc[(df[col] == val), binarized_col_name % (col, val)] = val\n new_df.loc[df[col].isnull(), binarized_col_name % (col, val)] = np.nan\n return new_df", "def _read_group_format_result_custom(self, data, annotated_groupbys, groupby, domain):\n\n sections = []\n for gb in annotated_groupbys:\n ftype = gb['type']\n value = data[gb['groupby']]\n\n # full domain for this groupby spec\n d = None\n if value:\n if ftype == 'many2one':\n value = value[0]\n elif ftype in ('date', 'datetime'):\n locale = self._context.get('lang') or 'en_US'\n if locale == \"ar_SY\":\n locale = \"ar\"\n fmt = DEFAULT_SERVER_DATETIME_FORMAT if ftype == 'datetime' else DEFAULT_SERVER_DATE_FORMAT\n tzinfo = None\n range_start = value\n range_end = value + gb['interval']\n # value from postgres is in local tz (so range is\n # considered in local tz e.g. \"day\" is [00:00, 00:00[\n # local rather than UTC which could be [11:00, 11:00]\n # local) but domain and raw value should be in UTC\n if gb['tz_convert']:\n tzinfo = range_start.tzinfo\n range_start = range_start.astimezone(pytz.utc)\n range_end = range_end.astimezone(pytz.utc)\n\n range_start = range_start.strftime(fmt)\n range_end = range_end.strftime(fmt)\n if ftype == 'datetime':\n label = babel.dates.format_datetime(\n value, format=gb['display_format'],\n tzinfo=tzinfo, locale=locale\n )\n else:\n label = babel.dates.format_date(\n value, format=gb['display_format'],\n locale=locale\n )\n data[gb['groupby']] = ('%s/%s' % (range_start, range_end), label)\n d = [\n '&',\n (gb['field'], '>=', range_start),\n (gb['field'], '<', range_end),\n ]\n\n if d is None:\n d = [(gb['field'], '=', value)]\n sections.append(d)\n sections.append(domain)\n\n data['__domain'] = expression.AND(sections)\n if len(groupby) - len(annotated_groupbys) >= 1:\n data['__context'] = { 'group_by': groupby[len(annotated_groupbys):]}\n del data['id']\n return data", "def test_dtype(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n dset = grp.create_dataset('foo', (5,), '|S10')\n assert dset.dtype == np.dtype('|S10')", "def _coerce_and_store_data_types(tag_loop_dict):\n\n regex_format = re.compile(r\"\"\"\\d*\\.(?P<decimal>\\d+)(?:[Ee]?[+-]?(?P<exponent>\\d?))\"\"\")\n\n # Attempt to convert data columns from strings to integers or floats whenever possible\n # Skip any table with 'data_header' in its name because these contain mixed data\n for key in tag_loop_dict.keys():\n if u'data_header' not in key:\n tmp = tag_loop_dict[key].copy()\n tag_loop_dict[key] = tag_loop_dict[key].apply(lambda x: pd.to_numeric(x, errors=u'ignore'))\n \n # Preserve the formatting for all columns that were converted to floats\n float_cols = [x for x in tag_loop_dict[key].columns if tag_loop_dict[key][x].dtype == np.float]\n\n decimal_format = dict([(col, tmp[col].apply(lambda x: \n len(re.search(regex_format, x).group('decimal'))).max())\n for col in float_cols])\n\n exponent_format = dict([(col, tmp[col].apply(lambda x: \n len(re.search(regex_format, x).group('exponent'))).max())\n for col in float_cols])\n\n number_format = dict([(col,'f') if exponent_format[col] == 0 else (col,'E')\n for col in float_cols])\n\n formatter = dict([(col, '{:.' + str(decimal_format[col]) + number_format[col] + '}') \n for col in float_cols])\n \n # Save format instructions to dataframe\n tag_loop_dict[key]._print_format = formatter\n\n return tag_loop_dict", "def dataframe_with_arrays(include_index=False):\n dtypes = [('i1', pa.int8()), ('i2', pa.int16()),\n ('i4', pa.int32()), ('i8', pa.int64()),\n ('u1', pa.uint8()), ('u2', pa.uint16()),\n ('u4', pa.uint32()), ('u8', pa.uint64()),\n ('f4', pa.float32()), ('f8', pa.float64())]\n\n arrays = OrderedDict()\n fields = []\n for dtype, arrow_dtype in dtypes:\n fields.append(pa.field(dtype, pa.list_(arrow_dtype)))\n arrays[dtype] = [\n np.arange(10, dtype=dtype),\n np.arange(5, dtype=dtype),\n None,\n np.arange(1, dtype=dtype)\n ]\n\n fields.append(pa.field('str', pa.list_(pa.string())))\n arrays['str'] = [\n np.array([\"1\", \"ä\"], dtype=\"object\"),\n None,\n np.array([\"1\"], dtype=\"object\"),\n np.array([\"1\", \"2\", \"3\"], dtype=\"object\")\n ]\n\n fields.append(pa.field('datetime64', pa.list_(pa.timestamp('ms'))))\n arrays['datetime64'] = [\n np.array(['2007-07-13T01:23:34.123456789',\n None,\n '2010-08-13T05:46:57.437699912'],\n dtype='datetime64[ms]'),\n None,\n None,\n np.array(['2007-07-13T02',\n None,\n '2010-08-13T05:46:57.437699912'],\n dtype='datetime64[ms]'),\n ]\n\n if include_index:\n fields.append(pa.field('__index_level_0__', pa.int64()))\n df = pd.DataFrame(arrays)\n schema = pa.schema(fields)\n\n return df, schema", "def create_data_types(self):\n for col in self.all_columns:\n try:\n if float(self.train[col].iloc[-3]):\n self.train[col] = self.train[col].astype(np.float32)\n except:\n pass\n self.d_types = self.train.dtypes", "def finalized_canonical_averages_dtype(spanning_cluster=True):\n fields = list()\n fields.extend([\n ('number_of_runs', 'uint32'),\n ('p', 'float64'),\n ('alpha', 'float64'),\n ])\n if spanning_cluster:\n fields.extend([\n ('percolation_probability_mean', 'float64'),\n ('percolation_probability_std', 'float64'),\n ('percolation_probability_ci', '(2,)float64'),\n ])\n fields.extend([\n ('percolation_strength_mean', 'float64'),\n ('percolation_strength_std', 'float64'),\n ('percolation_strength_ci', '(2,)float64'),\n ('moments_mean', '(5,)float64'),\n ('moments_std', '(5,)float64'),\n ('moments_ci', '(5,2)float64'),\n ])\n return _ndarray_dtype(fields)", "def field_feature_dtypes(self):\n\n dtypes_grp = self.h5['{}/{}'.format(SETTINGS, FIELD_FEATURE_DTYPES_STR)]\n\n field_paths = _iter_field_paths(dtypes_grp)\n\n dtypes = {}\n for field_path in field_paths:\n dtype_str = dtypes_grp[field_path][()]\n # if there is 'None' flag for the dtype then return None\n if dtype_str == NONE_STR:\n dtypes[field_path] = None\n else:\n dtype_obj = json.loads(dtype_str)\n dtype_obj = [tuple(d) for d in dtype_obj]\n dtype = np.dtype(dtype_obj)\n dtypes[field_path] = dtype\n\n return dtypes", "def grouping_cols(df, cat_percentage = 0.05, checking_itr = 10):", "def preprocess(df):\n \n # drop the following columns - irrelevant now\n DROP_COLUMNS = ['id', 'original_title', 'release_date'\n , 'tmdbId', 'popularity', 'year']\n df.drop(DROP_COLUMNS, axis=1, inplace=True)\n \n # drop all of the language columns\n DROP_COLUMNS = [col for col in df.columns if col[:3]==\"lan\"]\n df.drop(DROP_COLUMNS, axis=1, inplace=True)\n\n # loop through the columns we want to aggregate\n for col_type in [\n \"original_language_\"\n , \"prod_comp_cntry_\"\n , \"prod_comp_names_\"\n , \"writers_\"\n , \"actors_\"\n , \"genres_\"\n , \"director_\"\n ]:\n # create a dictionary of each unique value and its frequency\n val_freq = {}\n for col in df.columns:\n if col.startswith(col_type):\n val_freq[col] = df[col].sum()\n\n # create a dataframe from this dictionary; sort by count\n counts = pd.DataFrame.from_dict(\n val_freq\n , orient='index'\n , columns=['count']\n ).sort_values('count', ascending=False)\n counts['frac'] = counts['count'].apply(lambda x: 100*x / df.shape[0])\n\n # handle special case of production company country\n if col_type == \"prod_comp_cntry_\":\n DROP_COLUMNS = [col for col in counts.index][3:]\n\n # handle special case of directors\n elif col_type == \"director_\":\n DIRECTOR_COLS = [col for col in df.columns\n if col.startswith(\"director_\")\n and col!=\"director_pop\"]\n df['established_director'] = df[DIRECTOR_COLS].max(axis=1)\n DROP_COLUMNS = DIRECTOR_COLS\n\n # handle special case of actors\n elif col_type == \"actors_\":\n ACTORS_COLS = [col for col in df.columns if \"actors\" in col]\n df['num_top_100_actors'] = df[ACTORS_COLS].sum(axis=1)\n DROP_COLUMNS = ACTORS_COLS\n\n # handle all the other cases\n else:\n DROP_COLUMNS = [col for col in counts.query('frac < 2').index]\n\n\n df.drop(DROP_COLUMNS, axis=1, inplace=True)\n \n ##########################################################################\n # adjust the data for inflation\n CPI_tf = df['CPIAUCSL'].max()\n df['budget'] = df[['budget', 'CPIAUCSL']].apply(\n cpi_adjust\n , args=(CPI_tf ,)\n , axis=1\n )\n df['revenue'] = df[['revenue', 'CPIAUCSL']].apply(\n cpi_adjust\n , args=(CPI_tf ,)\n , axis=1\n )\n # no longer need CPI data\n df.drop('CPIAUCSL', axis=1, inplace=True)\n \n ########################################################################## \n # add in useful features about the cast and crew \n df['cast_crew_sum_pop'] = (\n df['director_pop']\n + df['avg_actor_pop']\n + df['avg_writer_pop']\n )\n df['cast_crew_product_pop'] = (\n df['director_pop']\n * df['avg_actor_pop']\n * df['avg_writer_pop']\n )\n df['runtime'].replace(to_replace=0, value=df['runtime'].median(), inplace=True)\n df = df.query('10000 <= revenue').copy()\n df = df.query('100000 <= budget').copy()\n df.drop('sum_actor_pop', axis=1, inplace=True)\n df.drop('min_writer_pop', axis=1, inplace=True)\n\n # code to transform columns\n for col in [\n \"budget\", \"director_pop\", \"avg_writer_pop\"\n , \"max_writer_pop\", \"avg_actor_pop\", \"max_actor_pop\"\n , \"min_actor_pop\", 'cast_crew_sum_pop'\n , 'cast_crew_product_pop'\n ]:\n df['log10_'+col] = df[col].apply(lambda x: math.log10(x))\n df.drop(col, axis=1, inplace=True)\n \n return df", "def canonical_averages_dtype(spanning_cluster=True):\n fields = list()\n fields.extend([\n ('number_of_runs', 'uint32'),\n ])\n if spanning_cluster:\n fields.extend([\n ('percolation_probability_mean', 'float64'),\n ('percolation_probability_m2', 'float64'),\n ])\n fields.extend([\n ('max_cluster_size_mean', 'float64'),\n ('max_cluster_size_m2', 'float64'),\n ('moments_mean', '(5,)float64'),\n ('moments_m2', '(5,)float64'),\n ])\n return _ndarray_dtype(fields)", "def make_annot_df(ibs):\n aid_list = ibs.get_valid_aids() # 80us\n kpts_list = ibs.get_annot_kpts(aid_list) # 40ms\n vecs_list = ibs.get_annot_desc(aid_list) # 50ms\n aid_series = pdh.IntSeries(np.array(aid_list, dtype=INTEGER_TYPE), name='aid')\n kpts_df = pdh.pandasify_list2d(kpts_list, aid_series, KPT_COLUMNS, 'fx', 'kpts') # 6.7ms\n vecs_df = pdh.pandasify_list2d(vecs_list, aid_series, VEC_COLUMNS, 'fx', 'vecs') # 7.1ms\n # Pandas Annotation Dataframe\n annots_df = pd.concat([kpts_df, vecs_df], axis=1) # 845 us\n return annots_df", "def clean_dtypes(df):\n df['AgentLat'] = df['AgentLat'].astype(float)\n df['AgentLong'] = df['AgentLong'].astype(float)\n df['ContaMediaAccount'] = df['ContaMediaAccount'].astype(int)\n df['DistVIPHamming'] = df['DistVIPHamming'].astype(float)\n df['Distance'] = df['Distance'].astype(float)\n df['Final'] = df['Final'].astype(float)\n df['LeadID'] = df['LeadID'].astype(int)\n df['LeadLat'] = df['LeadLat'].astype(float)\n df['LeadLong'] = df['LeadLong'].astype(float)\n df['MLDecision'] = df['MLDecision'].astype(float)\n df['SemDistCorrel'] = df['SemDistCorrel'].astype(float)\n df['SemDistCosine'] = df['SemDistCosine'].astype(float)\n df['SemDistHamming'] = df['SemDistHamming'].astype(float)\n df['StarRating'] = df['StarRating'].astype(float)\n df['StoryAgent'] = df['StoryAgent'].tolist()\n df['StoryLead'] = df['StoryLead'].tolist()\n df['VIPAgentStory'] = df['VIPAgentStory'].tolist()\n df['VIPLeadStory'] = df['VIPLeadStory'].tolist()\n df['VIPAgentStory'] = df['VIPAgentStory'].astype(str)\n df['VIPLeadStory'] = df['VIPLeadStory'].astype(str)\n df['WeightSem'] = df['WeightSem'].astype(float)\n\n return df", "def test_datatype(self):\n dates = pd.date_range(start=\"2007-01-01\", end=\"2007-02-01\")\n\n ts = pd.DataFrame(\n {\n \"var1\": np.arange(len(dates), dtype=np.int8),\n \"var2\": np.arange(len(dates), dtype=np.int16),\n \"var3\": np.arange(len(dates), dtype=np.int32),\n \"var4\": np.arange(len(dates), dtype=np.int64)\n },\n index=dates)\n\n dataset_w = GriddedNcContiguousRaggedTs(self.testdatapath,\n self.grid,\n mode=\"w\")\n\n for gpi in self.gpis:\n dataset_w.write(gpi, ts)\n\n dataset_r = GriddedNcContiguousRaggedTs(self.testdatapath,\n self.grid,\n mode=\"r\")\n\n for gpi in self.gpis:\n arr = dataset_r.read(gpi)\n assert (arr[\"var1\"].dtype == np.int8)\n assert (arr[\"var2\"].dtype == np.int16)\n assert (arr[\"var3\"].dtype == np.int32)\n assert (arr[\"var4\"].dtype == np.int64)", "def get_annotation_dataframe_compact(self): \n temp_df = pd.DataFrame(self.annotation_line_list)\n # make a list with the annotations for each bbox (each row of the fata frame)\n temp_df['annon'] = list(zip(list(zip(temp_df['xmin'], temp_df['ymin'], temp_df['xmax'], temp_df['ymax'])), temp_df['class_name']))\n # group the df based on im_full_path\n grouped = temp_df.groupby(['img_full_path'])\n # create tuples of the grouped rows columns\n df_serie = grouped['annon'].aggregate(lambda x: tuple(x))\n return df_serie.to_frame()", "def create_ext_df(row, dtype, dummy_y=False, order=False):\n\n temp_df = pd.DataFrame(\n {\n 'Time': clean_ext_entry(row['packet_times'], dtype),\n 'pkt_size': clean_ext_entry(row['packet_sizes'], dtype),\n 'pkt_src': clean_ext_entry(row['packet_dirs'], str)\n }\n )\n\n if dummy_y:\n temp_df['dummy_y'] = np.zeros(len(temp_df))\n\n if order:\n temp_df['order'] = np.arange(len(temp_df))\n\n\n return temp_df" ]
[ "0.6742742", "0.57367635", "0.5632649", "0.55163085", "0.5503319", "0.5457204", "0.5316533", "0.53103656", "0.5284454", "0.5279293", "0.52708393", "0.5217111", "0.5212483", "0.52033097", "0.51998", "0.5162279", "0.51412517", "0.5125167", "0.5123744", "0.5106807", "0.510412", "0.5087214", "0.50783443", "0.5073579", "0.50724536", "0.5028708", "0.50251603", "0.49914452", "0.49826062", "0.49712452" ]
0.59625477
1
Check header row starts with NAME (caseinsensitive).
def validate_header_keyword(self): valid = False if self.headers[0].upper() == "NAME": valid = True if self.headers[0] != "NAME": msg = f'File keyword "NAME" provided as {self.headers[0]}' self.store_validation_issue("warn", msg, "format:cap:name") else: msg = "Malformed file header row, missing NAME keyword. (Case Sensitive)" self.store_validation_issue("error", msg, "format:cap:name") return valid
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_csv_headers(csvfile, headers):\n\n with open(csvfile, 'rb') as f:\n csv_header = f.readline()\n\n # Check the lower ones\n if headers[1][0] not in csv_header.lower():\n return False\n\n return True", "def normalizeHeaderName(name):\n # type: (AnyStr) -> AnyStr\n return name.lower()", "def check_valid_csv_header(self, row):\n obj = re.match(re.compile('^Year\\,Month\\,.'),\n ','.join(row))\n if not obj:\n raise Exception(\"Invalid Headers must be `Year` `Month` Check Sample file\")", "def get_headerRow(self, sheetname, row):\n\n sheet = self.wb.sheet_by_name(sheetname)\n headers = []\n\n for i in range(0, sheet.ncols):\n value = str(sheet.cell(row, i).value)\n\n if value:\n headers.append(value.lower())\n\n return headers", "def check_header(self, name, value):\r\n if value in self.headers.get(name, ''):\r\n return True\r\n return False", "def check_header(self, name, value):\r\n if value in self.headers.get(name, ''):\r\n return True\r\n return False", "def check_headers(df, filename):\n print(\"Checking headers for: \" + filename)\n read_message = \"\"\n\n original_colnames = df.columns.tolist()\n # good_colnames = [\"Marker\",\"Chr\",\"Position\",\"Effect_allele\",\"Other_allele\",\"Beta\",\"SE\",\"Pval\",\"EAF\",\"N\",\"Imputed\",\"Info\",\"Information_type\"]\n\n # Before actually checking the contents header, are there even headers?\n passed = False\n for col in original_colnames:\n if col.lower().strip() in [\"name\", \"snp\", \"snpid\", \"id\", \"markername\", \"marker\", \"rsid\"]:\n passed = True\n\n # Fail the check if the name column is not found, this is to stop the checks if there is a file without a header\n if not passed:\n # First check whether this is one of the files of Malik, where the columns were missing\n if filename.split('/')[-1].startswith('INTERSTROKE'):\n # Add column names and moveverything down\n first_data_row = df.columns.tolist()\n df.loc[-1] = first_data_row # adding a row\n df.index = df.index + 1 # shifting index\n df = df.sort_index() # sorting by index\n df.columns = [\"SNPID\", \"chr\", \"position\", \"coded_all\", \"noncoded_all\", \"strand_genome\", \"beta\", \"SE\", \"pval\", \"AF_coded_all\", \"n_cases\", \"n_controls\", \"imputed\", \"oevar_imp\"]\n original_colnames = df.columns.tolist()\n read_message = read_message + \"NAMECOLCHECK;CUSTOMCOLS\" \n\n elif filename.split('/')[-1].startswith('ASGC'):\n # Add column names and moveverything down\n first_data_row = df.columns.tolist()\n df.loc[-1] = first_data_row # adding a row\n df.index = df.index + 1 # shifting index\n df = df.sort_index() # sorting by index\n df.columns = [\"SNPID\", \"chr\", \"position\", \"n_cases\", \"n_controls\", \"coded_all\", \"noncoded_all\", \"AF_coded_all\", \"beta\", \"SE\", \"pval\", \"imputed\", \"info\"]\n original_colnames = df.columns.tolist()\n read_message = read_message + \"NAMECOLCHECK;CUSTOMCOLS\" \n\n else:\n # print(\"Something went wrong for \" + filename)\n # print(\"Please make sure there are headers in the file and that there is a name/id/marker column\")\n return df, \"NAMECOLCHECK;FAILED\"\n \n # Variable to hold all unknown columns\n unknown_cols = []\n\n # Loop over al colnames and rename it\n for index,col in enumerate(original_colnames):\n if col.lower().strip() in [\"name\", \"snp\", \"snpid\", \"id\", \"markername\", \"marker\", \"rsid\"]:\n original_colnames[index] = \"Marker\"\n\n elif col.lower().strip() in [\"chromosome\", \"chr\", \"chrom\"]:\n original_colnames[index] = \"Chr\"\n\n elif col.lower().strip() in [\"pos\", \"position\", \"bp\"]:\n original_colnames[index] = \"Position\"\n\n elif col.lower().strip() in [\"effallele\", \"eff_allele\", \"effectallele\", \"effect_allele\", \"coded_all\", \"codedall\", \"allele1\"]:\n original_colnames[index] = \"Effect_allele\"\n\n elif col.lower().strip() in [\"noneffallele\", \"noneff_allele\", \"noneffectallele\", \"noneffect_allele\", \"non_coded_all\", \"noncoded_all\", \"noncodedall\", \"other_allele\", \"otherallele\", \"allele2\"]:\n original_colnames[index] = \"Other_allele\"\n\n elif col.lower().strip() in [\"beta\"]:\n original_colnames[index] = \"Beta\"\n\n elif col.lower().strip() in [\"se\", \"sebeta\", \"stderr\"]:\n original_colnames[index] = \"SE\"\n\n elif col.lower().strip() in [\"p\", \"pval\", \"p-value\"]:\n original_colnames[index] = \"Pval\"\n\n elif col.lower().strip() in [\"eaf\", \"freq1\", \"af_coded_all\", \"effallelefreq\"]:\n original_colnames[index] = \"EAF\"\n\n elif col.lower().strip() in [\"n\", \"ntot\", \"n_total\"]:\n original_colnames[index] = \"N\"\n\n elif col.lower().strip() in [\"ncase\", \"ncases\", \"n_case\", \"n_cases\"]:\n original_colnames[index] = \"N_cases\"\n\n elif col.lower().strip() in [\"ncontrol\", \"ncontrols\", \"n_control\", \"n_controls\"]:\n original_colnames[index] = \"N_controls\"\n\n elif col.lower().strip() in [\"imputed\", \"imp\"]:\n original_colnames[index] = \"Imputed\"\n\n elif col.lower().strip() in [\"inf\", \"info\", \"info_rsq\", \"rsqr\"]:\n original_colnames[index] = \"Info\"\n\n elif col.lower().strip() in [\"inf_type\", \"information_type\"]:\n original_colnames[index] = \"Information_type\"\n\n # Not neccesary for the toolkit, but reduce the error messages\n elif col.lower().strip() in [\"strand\", \"strand_genome\"]:\n original_colnames[index] = \"Strand\"\n\n elif col.lower().strip() in [\"oevar_imp\"]:\n original_colnames[index] = \"oevar_imp\"\n\n elif col.lower().strip() in [\"pval.t\"]:\n original_colnames[index] = \"pval.t\"\n\n elif col.lower().strip() in [\"df.t\"]:\n original_colnames[index] = \"df.t\"\n\n elif col.lower().strip() in [\"approxdf\"]:\n original_colnames[index] = \"approxdf\"\n\n elif col.lower().strip() in [\"or\"]:\n original_colnames[index] = \"OR\"\n\n else:\n # print(\"Could not match the string: \" + col)\n # print(\"Please make sure this column is handled correctly in the toolkit\")\n unknown_cols.append(col)\n\n # Change column names\n df.columns = original_colnames\n\n # Write the unknown columns into the fail_reason variable\n if len(unknown_cols) > 0:\n read_message = read_message + \"NAMECOLCHECK;PASSED\" + \" UNRECOGNIZED;\" + ' '.join([str(elem) for elem in unknown_cols])\n else:\n read_message = read_message + \"NAMECOLCHECK;PASSED\"\n\n return df, read_message", "def is_header(line):\n return line[0] == '>'", "def has_header():\n header_content = (\"\\n\".join(CURRENT_BUFFER[:7])).lower()\n return sum(1 for keyword in KEYWORDS if header_content.find(keyword.lower()) != -1) >= 2", "def has_header_row(self, strdata):\n debug = False\n comma_dec_sep_ok = True\n if debug: print(strdata)\n if len(strdata) < 2: ## a header row needs a following row to be a header\n return False\n row1_types = [lib.get_val_type(val, comma_dec_sep_ok) \n for val in strdata[0]]\n row2_types = [lib.get_val_type(val, comma_dec_sep_ok) \n for val in strdata[1]]\n str_type = mg.VAL_STRING\n empty_type = mg.VAL_EMPTY_STRING\n non_str_types = [mg.VAL_DATE, mg.VAL_NUMERIC]\n return importer.has_header_row(\n row1_types, row2_types, str_type, empty_type, non_str_types)", "def test_is_fasta_header(self):\r\n\r\n is_fasta_header = False\r\n\r\n with open(full_file_name, \"r\") as in_file:\r\n for line in in_file:\r\n is_fasta_header = mfau.is_header_line(line)\r\n\r\n # only testing the first line\r\n break\r\n\r\n self.assertEqual(is_fasta_header, True)", "def test_clean_row_lowercase(self):\n\t\tobj_ut = sentiment.clean_row(\n\t\t\t'100\\tAn APPLE so GOODforme')\n\t\tself.assertEqual(obj_ut[1], \"an apple so goodforme\")", "def _fnmatch_lower(name: str | None, pattern: str) -> bool:\n if name is None:\n return False\n return fnmatch.fnmatch(name.lower(), pattern)", "def check_headerRow(self, expected, found):\n\n # spreadsheets must have either a barcode field or a object ID field, but both are not required\n header1 = 'barcode'\n header2 = ('object identifier\\n(edit heading to specify type' +\n ' - e.g. barcode)')\n expected = self.remove_annoying(header1, header2, expected, found)\n\n missing = []\n\n for header in expected:\n if header not in found:\n missing.append(header)\n\n if missing:\n self.raise_excelerror(\"Missing required value- {0}.\"\n .format(missing))\n\n return True", "def check_header(line):\n if line[:7] == '###### ':\n line = '<h6>' + line[7:] + '</h6>'\n elif line[:6] == '##### ':\n line = '<h5>' + line[6:] + '</h5>'\n elif line[:5] == '#### ':\n line = '<h4>' + line[5:] + '</h4>'\n elif line[:4] == '### ':\n line = '<h3>' + line[4:] + '</h3>'\n elif line[:3] == '## ':\n line = '<h2>' + line[3:] + '</h2>'\n elif line[:2] == '# ':\n line = '<h1>' + line[2:] + '</h1>'\n else:\n return False, ''\n\n return True, line", "def is_valid_string_name(src):\n\n return src[0].isupper()", "def match_name(pattern, rows):\n matching = []\n for row in rows:\n # Use regex matching to check whether first name or last name contains the pattern\n if re.search(r'%s' % pattern.lower(), row[0].lower()) != None or re.search(r'%s' % pattern.lower(), row[1].lower()) != None:\n matching.append(row)\n\n # print the matched records\n print_records(matching)", "def has_headers(self):\n for column in self.columns:\n if column.header:\n return True\n return False", "def is_sorted_queryname(header):\n\tif(\"HD\" in header):\n\t\tif(\"SO\" in header[\"HD\"]):\n\t\t\tif(header[\"HD\"][\"SO\"] == \"queryname\"):\n\t\t\t\treturn True\n\treturn False", "def name_startswith(self, name):\n matches = [\n entry\n for entry in self\n if entry is not None and entry.name.startswith(name)\n ]\n return matches", "def _check_headers(cursor, headers):\n all_columns = set(chain.from_iterable(_columns(cursor, table) for table in DATA_TABLES))\n for header in headers:\n if header not in all_columns:\n raise ValueError('column {} not recognized'.format(header))", "def is_name(self, cell):\n tokens = self._cell_tokenizer.tokenize(cell.get_text())\n cell_type = self._get_token_type(self._cell_parser.parse(tokens))\n if cell_type == 'NAME':\n return True\n return False", "def _validate_header(self, header_row):\n\n self.logger.info(\"Validating header row.\")\n \n # assume value.\n is_valid = True\n\n # check if @header_row is perfect.\n required_keys = tuple(self.required_headers.keys())\n if sorted(header_row) == sorted(required_keys):\n self.logger.info(\"Header is valid.\")\n return is_valid\n else:\n self.logger.error(\"Header is invalid.\")\n is_valid = False\n\n # report on any missing header fields.\n missing_headers = [header for header in self.required_headers if header not in\n header_row]\n if len(missing_headers) != 0:\n self.logger.warning(\"Missing required fields: {}\".format(missing_headers))\n \n # report on any duplicate fields.\n duplicate_headers = [header for header in header_row if header_row.count(header) != 1]\n if len(duplicate_headers) != 0:\n self.logger.warning(\"Found duplicate fields: {}\".format(set(duplicate_headers)))\n\n # report on any extra fields.\n extra_headers = [header for header in header_row if header not in \n self.required_headers]\n if len(extra_headers) != 0:\n self.logger.warning(\"Found extra fields: {}\".format(extra_headers))\n \n return is_valid", "def _parse_name(self, row):\n return row[Row.NAME]", "def _is_header(line):\n line = line.strip()\n if line.startswith('#') or line.startswith('track') or line.startswith(\n 'browser'): # BED header\n return True\n else:\n return False", "def is_valid_header(headers: Dict[str, Any]) -> bool:\n for name, value in headers.items():\n if not utils.is_latin_1_encodable(value):\n return False\n if utils.has_invalid_characters(name, value):\n return False\n return True", "def is_header_part(cell: str) -> bool:\n pattern = '|'.join([\n rf'(?:(?:three|3|six|6|nine|9|twelve|12)\\s+months?(?:\\s+periods?)?|quarters?|year|ytd)(?!ly)',\n rf'\\b(?:{MONTH})\\b',\n rf'^(?:end(?:ed|ing))?(?:20)\\s*[0-2]\\s*[0-9]{FOOTNOTE}$',\n rf'^\\d{1, 2}/\\d{1, 2}/\\d{2, 4}{FOOTNOTE}$',\n rf'^q[1-4](?:\\s*\\(\\w+\\))?{FOOTNOTE}$',\n rf'^[1-4]q(?:tr)?(?:\\d{2, 4})?',\n rf'as\\s+(?:reported|adjusted)',\n rf'year-?\\s*to-?\\s*date',\n rf'^year-$',\n rf'^to-date$',\n rf'full\\s+year',\n rf'^(?:28|29|30|31){FOOTNOTE}$',\n rf'^(?:month|quarter|year)s?{FOOTNOTE}$',\n rf'^(?:three|six|nine|twelve){FOOTNOTE}$',\n rf'^(?:operating|reported|baseline|percent|%|end(?:ed|ing)){FOOTNOTE}$',\n ORDINAL,\n rf'^(?:(?:20)\\s*[0-2]\\s*[0-9]\\*\\s*)?{UNAUDITED_EXACT}$'\n ])\n prepped = str(cell).lower().strip()\n match = re.search(allow_space_between_letters(pattern), prepped)\n return match is not None or parse_fiscal_period(cell) is not None", "def test_header_row(self):\n header_row = self.view_class().header_row\n if not header_row:\n return\n\n response = self.view_class().get()\n # Some formatting needs to be done so that the header row\n # is compliant with the CSV dialect - all fields need\n # to be quoted.\n quoted_header_row = '\"{}\"'.format('\",\"'.join(header_row))\n self.assertContains(response, quoted_header_row)", "def is_header_content(response, key, value):\n try:\n if response.headers[key].lower() == value:\n return True\n else:\n return False\n except:\n return False", "def hasname(self, tag: str) -> bool:\n for key in self.formal_names:\n if key in tag.lower():\n return True\n\n # Exit case if key -> value not in mapping \n return False" ]
[ "0.68957376", "0.6201394", "0.60827667", "0.60588807", "0.601952", "0.601952", "0.6007669", "0.59995687", "0.59966505", "0.5987942", "0.5969808", "0.59591544", "0.5956927", "0.59442854", "0.59016865", "0.5888689", "0.5867301", "0.58425367", "0.5842121", "0.58320737", "0.58273846", "0.58247524", "0.5820106", "0.58147365", "0.5781012", "0.5771142", "0.57600635", "0.575569", "0.57533836", "0.5701448" ]
0.7514782
0
Check all header names are unique and not empty.
def validate_unique_header(self): valid = False unique_headers = set(self.headers) if len(unique_headers) == len(self.headers): valid = True else: seen_headers = set() duplicate_headers = set() for x in self.headers: if x in seen_headers or seen_headers.add(x): duplicate_headers.add(x) msg = f"Duplicated header names are not allowed: {duplicate_headers}" log_exception(Annotations.dev_logger, Annotations.user_logger, msg) self.store_validation_issue("error", msg, "format:cap:unique") valid = False if any("Unnamed" in s for s in list(unique_headers)): msg = "Headers cannot contain empty values" log_exception(Annotations.dev_logger, Annotations.user_logger, msg) self.store_validation_issue("error", msg, "format:cap:no-empty") valid = False return valid
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_header_dups(header,\r\n errors):\r\n\r\n for curr_elem in range(len(header)):\r\n if header.count(header[curr_elem]) != 1:\r\n errors.append('%s found in header %d times. ' %\r\n (header[curr_elem], header.count(header[curr_elem])) +\r\n 'Header fields must be unique.\\t%d,%d' % (0, curr_elem))\r\n\r\n return errors", "def test_check_header_dups(self):\r\n\r\n # Default header, should not generate any errors/warnings\r\n header = [\r\n 'SampleID', 'BarcodeSequence', 'LinkerPrimerSequence', 'run_prefix',\r\n 'Description']\r\n errors = []\r\n\r\n errors = check_header_dups(header, errors)\r\n\r\n expected_errors = []\r\n\r\n self.assertEqual(errors, expected_errors)\r\n\r\n # Should give errors with dups\r\n header = [\r\n 'SampleID', 'BarcodeSequence', 'LinkerPrimerSequence', 'run_prefix', 'run_prefix',\r\n 'Description']\r\n errors = []\r\n\r\n errors = check_header_dups(header, errors)\r\n\r\n expected_errors = [\r\n 'run_prefix found in header 2 times. Header fields must be unique.\\t0,3',\r\n 'run_prefix found in header 2 times. Header fields must be unique.\\t0,4']\r\n\r\n self.assertEqual(errors, expected_errors)", "def _check_duplicate_headers(self, docstring: PetscDocStringImpl) -> None:\n for heading, where in self.seen_headers.items():\n if len(where) <= 1:\n continue\n\n lasti = len(where) - 1\n src_list = []\n nbefore = 2\n nafter = 0\n prev_line_begin = 0\n for i, loc in enumerate(where):\n startline = loc.start.line\n if i:\n nbefore = startline - prev_line_begin - 1\n if i == lasti:\n nafter = 2\n src_list.append(loc.formatted(num_before_context=nbefore, num_after_context=nafter, trim=False))\n prev_line_begin = startline\n mess = \"Multiple '{}' subheadings. Much like Highlanders, there can only be one:\\n{}\".format(\n self.transform(self.name), '\\n'.join(src_list)\n )\n docstring.add_diagnostic(\n Diagnostic(Diagnostic.Kind.ERROR, self.diags.section_header_unique, mess, self.extent.start)\n )\n return", "def test_get_cleaned_headers(self):\r\n # Some duplicates.\r\n exp = ['foo', 'foo_2', 'foo_3', 'foo_4', 'fooo', 'foo_5', 'foo_6',\r\n 'foo_7', 'foo_8', 'foo_9', 'f2oo456', 'foo_10']\r\n obs = _get_cleaned_headers(\r\n ['foo', 'Foo', 'FOO', 'F_oO', 'F:Oo_o', '123foo', '#Foo',\r\n '123foo', ' 123Foo', 'f O\\tO#', ' f2\\too456', '456 foo'])\r\n self.assertEqual(obs, exp)\r\n\r\n # All unique.\r\n exp = ['foo', 'bar']\r\n obs = _get_cleaned_headers(['Fo#o', 'bar'])\r\n self.assertEqual(obs, exp)\r\n\r\n # Header consisting of only special characters and header that is\r\n # blank.\r\n self.assertRaises(GoogleSpreadsheetError, _get_cleaned_headers,\r\n ['Foo', '___', 'BAR'])\r\n self.assertRaises(GoogleSpreadsheetError, _get_cleaned_headers,\r\n ['Foo', '', 'BAR'])", "def validate_header_keyword(self):\n\n valid = False\n if self.headers[0].upper() == \"NAME\":\n valid = True\n if self.headers[0] != \"NAME\":\n msg = f'File keyword \"NAME\" provided as {self.headers[0]}'\n self.store_validation_issue(\"warn\", msg, \"format:cap:name\")\n else:\n msg = \"Malformed file header row, missing NAME keyword. (Case Sensitive)\"\n self.store_validation_issue(\"error\", msg, \"format:cap:name\")\n return valid", "def validate_against_header_count(self):\n valid = False\n len_headers = len(\n [header for header in self.headers if \"Unnamed\" not in header]\n )\n len_annot_type = len(\n [\n annot_type\n for annot_type in self.annot_types\n if \"Unnamed\" not in annot_type\n ]\n )\n if not len_headers == len_annot_type:\n msg = (\n f\"Header mismatch: {len_annot_type} TYPE declarations \"\n f\"for {len_headers} column headers\"\n )\n self.store_validation_issue(\"error\", msg, \"format:cap:count\")\n else:\n valid = True\n return valid", "def verify_unique_names(items):\n unique_names = set([item['name'] for item in items])\n if len(unique_names) != len(items):\n raise ClientException(\"Error: Duplicate sequence names found.\", ErrorType.INVALID_SEQUENCE_DATA)", "def check_headerEntries(self, expected, found):\n\n # spreadsheets must have either a barcode field or a object ID field, but both are not required\n header1 = ('original master', 'object', 'barcode')\n header2 = ('original master', 'object',\n 'object identifier\\n(edit heading to specify type ' +\n '- e.g. barcode)')\n expected = self.remove_annoying(header1, header2, expected, found)\n\n bad_entries = []\n\n for header in expected:\n if header not in found:\n bad_entries.append(header)\n\n if bad_entries:\n self.raise_excelerror(\"Incorrect header entry for {0}.\"\n .format(bad_entries))\n return True", "def check_header(self, name, value):\r\n if value in self.headers.get(name, ''):\r\n return True\r\n return False", "def check_header(self, name, value):\r\n if value in self.headers.get(name, ''):\r\n return True\r\n return False", "def _check_headers(cursor, headers):\n all_columns = set(chain.from_iterable(_columns(cursor, table) for table in DATA_TABLES))\n for header in headers:\n if header not in all_columns:\n raise ValueError('column {} not recognized'.format(header))", "def handle_missing_duplicate_headers(self, expected_fields, bucket_name, error_filename):\n missing_headers = [cell for cell, count in expected_fields.items() if count == 0]\n duplicated_headers = [cell for cell, count in expected_fields.items() if count > 1]\n\n if missing_headers or duplicated_headers:\n self.write_missing_duplicated_headers(\n missing_headers, duplicated_headers, bucket_name,\n error_filename\n )\n raise_missing_duplicated_exception(missing_headers, duplicated_headers)", "def validate_header(self, reply):\n # check message is from my agg to me\n check_equal(reply.header.sender, self.aggregator_uuid, self.logger)\n check_equal(reply.header.recipient, self.common_name, self.logger)\n\n # check that the federation id matches\n check_equal(reply.header.federation_id, self.federation_uuid, self.logger)\n\n # check that we agree on single_col_cert_common_name\n check_equal(reply.header.single_col_cert_common_name, self.single_col_cert_common_name, self.logger)", "def verifyHeader(self, headers):\n for head in headers:\n if(hex(head[0]) == VID and hex(head[1]) == PID):\n return True\n return False", "def test_check_header_missing_fields(self):\r\n\r\n # Default header, should not generate any errors/warnings\r\n header = ['AAA', 'XXX', 'YYY',\r\n 'ZZZ']\r\n errors = []\r\n warnings = []\r\n\r\n errors, warnings = check_header(header,\r\n errors,\r\n warnings,\r\n sample_id_ix=0,\r\n desc_ix=3,\r\n bc_ix=1,\r\n linker_primer_ix=2,\r\n added_demultiplex_field=None)\r\n\r\n expected_errors = [\r\n 'Found header field AAA, expected field SampleID\\t0,0',\r\n 'Found header field XXX, expected field BarcodeSequence\\t0,1',\r\n 'Found header field YYY, expected field LinkerPrimerSequence\\t0,2',\r\n 'Found header field ZZZ, last field should be Description\\t0,3']\r\n expected_warnings = []\r\n\r\n self.assertEqual(errors, expected_errors)\r\n self.assertEqual(warnings, expected_warnings)", "def test_check_header(self):\r\n\r\n # Default header, should not generate any errors/warnings\r\n header = ['SampleID', 'BarcodeSequence', 'LinkerPrimerSequence',\r\n 'Description']\r\n errors = []\r\n warnings = []\r\n\r\n errors, warnings = check_header(header,\r\n errors,\r\n warnings,\r\n sample_id_ix=0,\r\n desc_ix=3,\r\n bc_ix=1,\r\n linker_primer_ix=2,\r\n added_demultiplex_field=None)\r\n\r\n expected_errors = []\r\n expected_warnings = []\r\n\r\n self.assertEqual(errors, expected_errors)\r\n self.assertEqual(warnings, expected_warnings)", "def check_headers(self, headers):\n h = headers.values()[0]\n\n if 'DT' in PAR:\n if h.dt != PAR.DT:\n h.dt = PAR.DT\n\n if 'NT' in PAR:\n if h.nt != PAR.NT:\n print 'Warning: h.nt != PAR.NT'\n\n if 'NREC' in PAR:\n if h.nr != PAR.NREC:\n print 'Warning: h.nr != PAR.NREC'\n\n return h", "def has_headers(self):\n for column in self.columns:\n if column.header:\n return True\n return False", "def test_fasta_get_headers(self):\r\n\r\n header_records = mfau.get_record_headers(full_file_name)\r\n\r\n if debug:\r\n for header_record in header_records:\r\n print header_record.strip()\r\n\r\n self.assertGreaterEqual(len(header_records), 0)", "def is_valid_header(headers: Dict[str, Any]) -> bool:\n for name, value in headers.items():\n if not utils.is_latin_1_encodable(value):\n return False\n if utils.has_invalid_characters(name, value):\n return False\n return True", "def validate_unique_mof_names():\n names = list(FRAMEWORKS_DF['name'].str.lower()) + list(FRAMEWORKS_DF['alternative names'].dropna().str.lower())\n names = [ n for l in names for n in l.split(',') if l ]\n names = [ n.lower().replace('-', ' ') for n in names ]\n\n duplicates = [item for item, count in collections.Counter(list(names)).items() if count > 1]\n\n if duplicates:\n print('Warning: Duplicate CURATED-MOF names detected: {}'.format(duplicates))\n sys.exit(1)\n\n print('No duplicate CURATED-MOF names found.')", "def _repair_names_check_unique(names: Iterable[str]) -> Iterable[str]:\n for name in names:\n if names.count(name) > 1:\n raise NameNonUniqueError(f\"Names must be unique: {name}\")\n if name == \"\" or name is numpy.nan:\n raise NameNonUniqueError(f\"Names can't be empty: {name}\")\n if re.search(r\"(?:(?<!_)_{2}\\d+|(?<!_)__)+$\", str(name)):\n raise NameNonUniqueError(\n f\"Names can't be of the form `__` or `_j`: {name}\"\n )\n return names", "def test_check_header_required_fields(self):\r\n\r\n # Default header, should not generate any errors/warnings\r\n header = [\r\n 'SampleID', 'BarcodeSequence', 'LinkerPrimerSequence', 'run_prefix',\r\n 'Description']\r\n errors = []\r\n\r\n errors = check_header_required_fields(header,\r\n errors,\r\n sample_id_ix=0,\r\n desc_ix=4,\r\n bc_ix=1,\r\n linker_primer_ix=2,\r\n added_demultiplex_field='run_prefix')\r\n\r\n expected_errors = []\r\n\r\n self.assertEqual(errors, expected_errors)\r\n\r\n # Should find all as errors if not named correctly\r\n header = ['AAA', 'BBB', 'CCC', 'DDD',\r\n 'EEE']\r\n errors = []\r\n\r\n errors = check_header_required_fields(header,\r\n errors,\r\n sample_id_ix=0,\r\n desc_ix=4,\r\n bc_ix=1,\r\n linker_primer_ix=2,\r\n added_demultiplex_field='run_prefix')\r\n\r\n expected_errors = [\r\n 'Found header field AAA, expected field SampleID\\t0,0',\r\n 'Found header field BBB, expected field BarcodeSequence\\t0,1',\r\n 'Found header field CCC, expected field LinkerPrimerSequence\\t0,2',\r\n 'Found header field EEE, last field should be Description\\t0,4',\r\n 'Missing added demultiplex field run_prefix\\t-1,-1']\r\n\r\n self.assertEqual(errors, expected_errors)", "def check_csv_headers(csvfile, headers):\n\n with open(csvfile, 'rb') as f:\n csv_header = f.readline()\n\n # Check the lower ones\n if headers[1][0] not in csv_header.lower():\n return False\n\n return True", "def check_names(sections):\n return _check_nentries(sections, \"NAMES\", \"NAMES\")", "def check_headerRow(self, expected, found):\n\n # spreadsheets must have either a barcode field or a object ID field, but both are not required\n header1 = 'barcode'\n header2 = ('object identifier\\n(edit heading to specify type' +\n ' - e.g. barcode)')\n expected = self.remove_annoying(header1, header2, expected, found)\n\n missing = []\n\n for header in expected:\n if header not in found:\n missing.append(header)\n\n if missing:\n self.raise_excelerror(\"Missing required value- {0}.\"\n .format(missing))\n\n return True", "def check_unique(self):\n pass", "def _validate_header(self, header_row):\n\n self.logger.info(\"Validating header row.\")\n \n # assume value.\n is_valid = True\n\n # check if @header_row is perfect.\n required_keys = tuple(self.required_headers.keys())\n if sorted(header_row) == sorted(required_keys):\n self.logger.info(\"Header is valid.\")\n return is_valid\n else:\n self.logger.error(\"Header is invalid.\")\n is_valid = False\n\n # report on any missing header fields.\n missing_headers = [header for header in self.required_headers if header not in\n header_row]\n if len(missing_headers) != 0:\n self.logger.warning(\"Missing required fields: {}\".format(missing_headers))\n \n # report on any duplicate fields.\n duplicate_headers = [header for header in header_row if header_row.count(header) != 1]\n if len(duplicate_headers) != 0:\n self.logger.warning(\"Found duplicate fields: {}\".format(set(duplicate_headers)))\n\n # report on any extra fields.\n extra_headers = [header for header in header_row if header not in \n self.required_headers]\n if len(extra_headers) != 0:\n self.logger.warning(\"Found extra fields: {}\".format(extra_headers))\n \n return is_valid", "def write_missing_duplicated_headers(self, missing_headers, duplicated_headers, bucket_name, error_filename):\n with self.get_writer(bucket_name, error_filename, self.header_report_headers, self.is_local) as writer:\n for header in duplicated_headers:\n writer.write([\"Duplicated header\", header])\n for header in missing_headers:\n writer.write([\"Missing header\", header])\n writer.finish_batch()", "def check_headers(df, filename):\n print(\"Checking headers for: \" + filename)\n read_message = \"\"\n\n original_colnames = df.columns.tolist()\n # good_colnames = [\"Marker\",\"Chr\",\"Position\",\"Effect_allele\",\"Other_allele\",\"Beta\",\"SE\",\"Pval\",\"EAF\",\"N\",\"Imputed\",\"Info\",\"Information_type\"]\n\n # Before actually checking the contents header, are there even headers?\n passed = False\n for col in original_colnames:\n if col.lower().strip() in [\"name\", \"snp\", \"snpid\", \"id\", \"markername\", \"marker\", \"rsid\"]:\n passed = True\n\n # Fail the check if the name column is not found, this is to stop the checks if there is a file without a header\n if not passed:\n # First check whether this is one of the files of Malik, where the columns were missing\n if filename.split('/')[-1].startswith('INTERSTROKE'):\n # Add column names and moveverything down\n first_data_row = df.columns.tolist()\n df.loc[-1] = first_data_row # adding a row\n df.index = df.index + 1 # shifting index\n df = df.sort_index() # sorting by index\n df.columns = [\"SNPID\", \"chr\", \"position\", \"coded_all\", \"noncoded_all\", \"strand_genome\", \"beta\", \"SE\", \"pval\", \"AF_coded_all\", \"n_cases\", \"n_controls\", \"imputed\", \"oevar_imp\"]\n original_colnames = df.columns.tolist()\n read_message = read_message + \"NAMECOLCHECK;CUSTOMCOLS\" \n\n elif filename.split('/')[-1].startswith('ASGC'):\n # Add column names and moveverything down\n first_data_row = df.columns.tolist()\n df.loc[-1] = first_data_row # adding a row\n df.index = df.index + 1 # shifting index\n df = df.sort_index() # sorting by index\n df.columns = [\"SNPID\", \"chr\", \"position\", \"n_cases\", \"n_controls\", \"coded_all\", \"noncoded_all\", \"AF_coded_all\", \"beta\", \"SE\", \"pval\", \"imputed\", \"info\"]\n original_colnames = df.columns.tolist()\n read_message = read_message + \"NAMECOLCHECK;CUSTOMCOLS\" \n\n else:\n # print(\"Something went wrong for \" + filename)\n # print(\"Please make sure there are headers in the file and that there is a name/id/marker column\")\n return df, \"NAMECOLCHECK;FAILED\"\n \n # Variable to hold all unknown columns\n unknown_cols = []\n\n # Loop over al colnames and rename it\n for index,col in enumerate(original_colnames):\n if col.lower().strip() in [\"name\", \"snp\", \"snpid\", \"id\", \"markername\", \"marker\", \"rsid\"]:\n original_colnames[index] = \"Marker\"\n\n elif col.lower().strip() in [\"chromosome\", \"chr\", \"chrom\"]:\n original_colnames[index] = \"Chr\"\n\n elif col.lower().strip() in [\"pos\", \"position\", \"bp\"]:\n original_colnames[index] = \"Position\"\n\n elif col.lower().strip() in [\"effallele\", \"eff_allele\", \"effectallele\", \"effect_allele\", \"coded_all\", \"codedall\", \"allele1\"]:\n original_colnames[index] = \"Effect_allele\"\n\n elif col.lower().strip() in [\"noneffallele\", \"noneff_allele\", \"noneffectallele\", \"noneffect_allele\", \"non_coded_all\", \"noncoded_all\", \"noncodedall\", \"other_allele\", \"otherallele\", \"allele2\"]:\n original_colnames[index] = \"Other_allele\"\n\n elif col.lower().strip() in [\"beta\"]:\n original_colnames[index] = \"Beta\"\n\n elif col.lower().strip() in [\"se\", \"sebeta\", \"stderr\"]:\n original_colnames[index] = \"SE\"\n\n elif col.lower().strip() in [\"p\", \"pval\", \"p-value\"]:\n original_colnames[index] = \"Pval\"\n\n elif col.lower().strip() in [\"eaf\", \"freq1\", \"af_coded_all\", \"effallelefreq\"]:\n original_colnames[index] = \"EAF\"\n\n elif col.lower().strip() in [\"n\", \"ntot\", \"n_total\"]:\n original_colnames[index] = \"N\"\n\n elif col.lower().strip() in [\"ncase\", \"ncases\", \"n_case\", \"n_cases\"]:\n original_colnames[index] = \"N_cases\"\n\n elif col.lower().strip() in [\"ncontrol\", \"ncontrols\", \"n_control\", \"n_controls\"]:\n original_colnames[index] = \"N_controls\"\n\n elif col.lower().strip() in [\"imputed\", \"imp\"]:\n original_colnames[index] = \"Imputed\"\n\n elif col.lower().strip() in [\"inf\", \"info\", \"info_rsq\", \"rsqr\"]:\n original_colnames[index] = \"Info\"\n\n elif col.lower().strip() in [\"inf_type\", \"information_type\"]:\n original_colnames[index] = \"Information_type\"\n\n # Not neccesary for the toolkit, but reduce the error messages\n elif col.lower().strip() in [\"strand\", \"strand_genome\"]:\n original_colnames[index] = \"Strand\"\n\n elif col.lower().strip() in [\"oevar_imp\"]:\n original_colnames[index] = \"oevar_imp\"\n\n elif col.lower().strip() in [\"pval.t\"]:\n original_colnames[index] = \"pval.t\"\n\n elif col.lower().strip() in [\"df.t\"]:\n original_colnames[index] = \"df.t\"\n\n elif col.lower().strip() in [\"approxdf\"]:\n original_colnames[index] = \"approxdf\"\n\n elif col.lower().strip() in [\"or\"]:\n original_colnames[index] = \"OR\"\n\n else:\n # print(\"Could not match the string: \" + col)\n # print(\"Please make sure this column is handled correctly in the toolkit\")\n unknown_cols.append(col)\n\n # Change column names\n df.columns = original_colnames\n\n # Write the unknown columns into the fail_reason variable\n if len(unknown_cols) > 0:\n read_message = read_message + \"NAMECOLCHECK;PASSED\" + \" UNRECOGNIZED;\" + ' '.join([str(elem) for elem in unknown_cols])\n else:\n read_message = read_message + \"NAMECOLCHECK;PASSED\"\n\n return df, read_message" ]
[ "0.76189494", "0.7537004", "0.73286694", "0.7190239", "0.6937977", "0.6811385", "0.6641252", "0.65809643", "0.6544887", "0.6544887", "0.6498212", "0.6487584", "0.6473634", "0.6463443", "0.6449808", "0.64381677", "0.6397096", "0.6381177", "0.63589174", "0.63446665", "0.63047713", "0.62352693", "0.62304175", "0.61918986", "0.61436707", "0.610863", "0.608389", "0.60799885", "0.6075485", "0.6073025" ]
0.84468025
0