query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
sequencelengths 30
30
| negative_scores
sequencelengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Creates a new session or returns existing one if path exists | def create_session(
path: str,
type: str,
name: Optional[str] = None,
kernel_name: Optional[str] = None,
kernel_id: Optional[str] = None,
) -> str:
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def make_new_session():\n session = Session.objects.create(uuid=str(uuid4()), container_id=None)\n return session.id",
"def _insert_new_session():\n request = self._make_request()\n session_existing = self._set_up_session_in_Redis_and_makeOne( # noqa: F841\n request, session_id, session_dict={\"visited\": True}, **session_args\n )\n return request",
"def get_or_create_session(db):",
"def create_new_session(self) -> None:\n try:\n session = self.client.create_session()\n logger.info(\"created session: %s\", session.id)\n self.join_session(session.id)\n location_config = self.app.guiconfig.location\n self.session.location = SessionLocation(\n x=location_config.x,\n y=location_config.y,\n z=location_config.z,\n lat=location_config.lat,\n lon=location_config.lon,\n alt=location_config.alt,\n scale=location_config.scale,\n )\n except grpc.RpcError as e:\n self.app.show_grpc_exception(\"New Session Error\", e)",
"def create_session(\n self,\n environ: str,\n session_request_to_use: typing.Optional[SessionRequest] = None,\n ) -> Session:\n self.poll_sessions() # make sure there is an up to date picture of Sessions before proceeding\n self.check_session_can_start(session_request_to_use)\n return self.perform_session_create(\n environ, self.project.session_parameters.serialize()\n )",
"def perform_session_create(self, environ: str, session_parameters: dict) -> Session:\n session_parameters[\"mounts\"] = []\n attach_context = self.client.start_session(environ, session_parameters)\n\n # TODO should we record some of the request\n # headers e.g. `REMOTE_ADDR`, `HTTP_USER_AGENT`, `HTTP_REFERER` for analytics?\n\n return Session.objects.create(\n project=self.project,\n url=attach_context.url,\n execution_id=attach_context.execution_id,\n client_class_id=self.client.class_id,\n )",
"def session(self):\n if not self._session: #Create new session if none exists\n return self._new_session()\n return self._session",
"def test_create_session(self):\n study_id = self.storage.create_study(sample_study_spec())\n\n session = sample_session(study_id=study_id)\n self.storage.create_session(session)\n\n self.assertEqual(self.storage.get_session(study_id, session.id), session)",
"async def create_session(session: SessionModel, mongo: MongoDB = mongodb) -> SessionOutModel:\n if not await mongo.session_coll.find_one({\"id\": session.id}):\n await mongo.session_coll.insert_one(session.dict())\n else:\n await mongo.session_coll.update_one({\"id\": session.id}, {'$set': {'status': session.status}})\n return SessionOutModel(**session.dict())",
"def create_new_session(self, username):\n return self.session_mgr.create_new_session(username)",
"def get_session():\n if not hasattr(get_session, \"session\"):\n get_session.session = requests_cache.CachedSession(\n cache_name=CACHE_PATH.rstrip(\".sqlite\"),\n expire_after=518400, # 6 days\n )\n adapter = HTTPAdapter(max_retries=3)\n get_session.session.mount(\"http://\", adapter)\n get_session.session.mount(\"https://\", adapter)\n return get_session.session",
"def _get_session():\n api_version = \"1.0\"\n originator = \"salt_cloud_{}_driver\".format(__virtualname__)\n url = config.get_cloud_config_value(\n \"url\", get_configured_provider(), __opts__, search_global=False\n )\n user = config.get_cloud_config_value(\n \"user\", get_configured_provider(), __opts__, search_global=False\n )\n password = config.get_cloud_config_value(\n \"password\", get_configured_provider(), __opts__, search_global=False\n )\n ignore_ssl = config.get_cloud_config_value(\n \"ignore_ssl\",\n get_configured_provider(),\n __opts__,\n default=False,\n search_global=False,\n )\n try:\n session = XenAPI.Session(url, ignore_ssl=ignore_ssl)\n log.debug(\n \"url: %s user: %s password: %s, originator: %s\",\n url,\n user,\n \"XXX-pw-redacted-XXX\",\n originator,\n )\n session.xenapi.login_with_password(user, password, api_version, originator)\n except XenAPI.Failure as ex:\n pool_master_addr = str(ex.__dict__[\"details\"][1])\n slash_parts = url.split(\"/\")\n new_url = \"/\".join(slash_parts[:2]) + \"/\" + pool_master_addr\n session = XenAPI.Session(new_url)\n log.debug(\n \"session is -> url: %s user: %s password: %s, originator:%s\",\n new_url,\n user,\n \"XXX-pw-redacted-XXX\",\n originator,\n )\n session.xenapi.login_with_password(user, password, api_version, originator)\n return session",
"def new_session(self):\n return self._SessionLocal()",
"def session(get_session):\n return get_session()",
"def create_session(self):\n self._session = self.create_scoped_session()\n self.session = self._session()",
"def create_session(self, **params):\n raise NotImplementedError('Should be implemented by a sub-class.')",
"def _session(self):\n if self.session is None:\n self.session = create_session(self.config, self.auth)\n return self.session",
"def get_session(self, renew: Optional[bool] = False) -> neo4j.work.simple.Session:\n if self.session is None or renew:\n sess = self.driver.session()\n self.session = sess\n return self.session",
"def session():\n def session():\n return BaseUrlSession()\n return session",
"def new_session(self):\n return self.Session()",
"async def create(self, session, *, dc=None):\n response = await self._api.put(\n \"/v1/session/create\",\n data=session,\n params={\"dc\": dc})\n return response.body",
"def init_session(\n session_path=None\n , session_path_header=None\n , session_domain=None\n , session_secure=False\n , session_httponly=True\n , session_persistent=True\n , **kwargs\n ):\n \n # Guard against running twice\n if hasattr(cherrypy.serving, \"session\"):\n return\n \n request = cherrypy.serving.request\n session_cookie = kwargs.get('session_cookie', Session.session_cookie)\n cookie_timeout = kwargs.get('session_timeout', Session.timeout)\n \n # Check if request came with a session ID\n id = None\n if session_cookie in request.cookie:\n id = request.cookie[session_cookie].value\n log('ID obtained from request.cookie: %r' % id)\n else:\n log('New session (no cookie)')\n \n # Create and attach a new Session instance to cherrypy.serving.\n # It will possess a reference to (and lock, and lazily load)\n # the requested session data.\n cherrypy.serving.session = sess = Session(id, **kwargs)\n # Save a copy of our session in case we get overwritten by a user slate.\n cherrypy.serving.sessionActual = sess",
"def test_create_session(self):\n finder = FinderInsidePro(self.test_key)\n session_id = finder.create_session(2811)\n assert isinstance(session_id, str)\n assert session_id == finder.session_id\n assert len(session_id)",
"def get_or_create_sessions(self):\n\t\tpath = f'{self.BIKE_ENDPOINT}user/current/session?{self.secret_key}'\n\t\tresponse = requests.get(path).json()\n\t\tself.check_api_key(response)\n\n\t\treturn response",
"def create_session(self, session_id=None):\n\n # create random id when necessary, seems to be 1 case wanted, based on legacy code\n # creating a value so high, typical client side generation schemes hopefully wont collide\n if not session_id:\n session_id = next(\n session_id for session_id in xrange(60000, 65000)\n if session_id not in self.sessions\n )\n\n # create and add session to local manager\n session = Session(session_id, config=self.config)\n self.add_session(session)\n\n # add shutdown handler to remove session from manager\n session.shutdown_handlers.append(self.session_shutdown)\n\n return session",
"def insert_item(self, token_object,\n new_session, session_time=timedelta(0)):\n if self.file_type == settings.APACHE_COMMON:\n url_obj = get_or_create(\n self.session, Uurl, url=token_object.resource_requested)\n elif self.file_type == settings.APACHE_COMBINED:\n url_obj = get_or_create(\n self.session, Uurl, url=token_object.resource_requested)\n elif self.file_type == settings.SQUID:\n url_obj = get_or_create(\n self.session, Uurl, url=token_object.url)\n\n # If this is a new session\n if new_session:\n # Create session object\n session_obj = Session(\n ip=token_object.ip_address, session_time=session_time)\n # Set start and end time\n session_obj.start_time = token_object.date_time\n session_obj.end_time = token_object.date_time\n # If new_session is False, new session may or may not be created\n # (depending upon the session_time)\n else:\n # Try to get session object\n session_obj = get_or_create(\n self.session, Session, ip=token_object.ip_address)\n # If the object is a new session\n if session_obj.session_time is timedelta(0):\n session_obj.start_time = token_object.date_time\n\n session_obj.session_time = session_time\n session_obj.end_time = token_object.date_time\n\n # Add url to session\n session_obj.session_urls.append(url_obj)\n self.session.add(session_obj)",
"def test_new_session(self):\r\n cookie = Cookie()\r\n req = Mock(incookie=Cookie(), outcookie=cookie, authname='anonymous',\r\n base_path='/')\r\n session = Session(self.env, req)\r\n self.assertEqual(session.sid, cookie['trac_session'].value)\r\n cursor = self.db.cursor()\r\n cursor.execute(\"SELECT COUNT(*) FROM session\")\r\n self.assertEqual(0, cursor.fetchone()[0])",
"def _create_session_data(self, abs_path, sess_root):\n sess_path = os.path.join(abs_path, sess_root)\n if not os.path.exists(sess_path):\n os.makedirs(sess_path)\n sess_id = len(os.listdir(sess_path))\n sess_path = os.path.join(sess_path, str(sess_id))\n print(\"SESSION PATH:\", sess_path)\n print(\"SESSION ID:\", sess_id) \n return sess_id, sess_path",
"def create(self):\n\t\tif self._session:\n\t\t\tself.close()\n\n\t\tif not self._session:\n\t\t\tself._session = requests.Session()\n\t\t\tself._session.mount('http://', ra.HTTPAdapter(max_retries=self._max_retries))\n\t\t\tself._session.mount('https://', ra.HTTPAdapter(max_retries=self._max_retries))\n\n\t\t\tmsg = u'Created internal requests Session instance {0:#0x}'\n\t\t\tlog_with_debug_info(logging.DEBUG, msg.format(id(self._session)))",
"def get_write_session() -> Session:\n return _write_session()"
] | [
"0.7212364",
"0.6900169",
"0.6895351",
"0.6759798",
"0.6726369",
"0.6653804",
"0.6640182",
"0.65958303",
"0.65842575",
"0.65202713",
"0.6479733",
"0.64455444",
"0.6434389",
"0.64199865",
"0.63682336",
"0.6362771",
"0.63557774",
"0.63264036",
"0.6294936",
"0.6289158",
"0.62681067",
"0.6267423",
"0.62555367",
"0.624714",
"0.6244107",
"0.6200586",
"0.61884403",
"0.6152993",
"0.61482996",
"0.61311203"
] | 0.70272946 | 1 |
Updates an existing session. | def update_session(
id: str,
path: Optional[str] = None,
name: Optional[str] = None,
type: Optional[str] = None,
kernel_name: Optional[str] = None,
kernel_id: Optional[str] = None,
) -> None:
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def upsert_session(session_data):\n g_db['sessions'].update(\n get_session_id(session_data),\n {\n \"$set\": session_data,\n },\n upsert=True\n )",
"def test_update_session(self):\r\n now = time.time()\r\n\r\n # Make sure the session has data so that it doesn't get dropped\r\n cursor = self.db.cursor()\r\n cursor.execute(\"INSERT INTO session VALUES ('123456', 0, 1)\")\r\n cursor.execute(\"INSERT INTO session_attribute VALUES \"\r\n \"('123456', 0, 'foo', 'bar')\")\r\n\r\n incookie = Cookie()\r\n incookie['trac_session'] = '123456'\r\n outcookie = Cookie()\r\n req = Mock(authname='anonymous', base_path='/', incookie=incookie,\r\n outcookie=outcookie)\r\n session = Session(self.env, req)\r\n session.save() # updating should not require modifications\r\n\r\n self.assertEqual(PURGE_AGE, outcookie['trac_session']['expires'])\r\n\r\n cursor.execute(\"SELECT last_visit FROM session WHERE sid='123456' AND \"\r\n \"authenticated=0\")\r\n self.assertAlmostEqual(now, int(cursor.fetchone()[0]), -1)",
"def update(self):\n sess = u.get_default_session()\n # sess.run(self.update_op)\n u.run(self.update_op)",
"def update_session(self, session):\n self.session = session\n print(self.session.active)\n self.curvePlot.session = session\n self.saveAs.session = session\n self.actionRun.setEnabled(True)\n self.actionPass.setEnabled(True)\n try:\n if self.port and self.brate:\n self.menuConnect.setEnabled(True)\n self.actionStop.setEnabled(False)\n if self.depthCal and self.tensionCal:\n self.actionPlot.setEnabled(True)\n self.actionSpeed.setEnabled(True)\n self.actionDepth.setEnabled(True)\n self.actionSaveAs.setEnabled(True)\n except:\n pass\n # Status Bar message\n msg = \"Well: {} Run: {} Pass: {}\".format(\n session.active['well'],\n str(session.active['run']),\n session.active['pass'][5:])\n self.dbStatus.showMessage(msg)",
"def test_update_session(self):\n study_id = self.storage.create_study(sample_study_spec())\n\n session = sample_session(study_id=study_id)\n self.storage.create_session(session)\n self.assertEqual(session.state, study_pb2.Session.STATE_VALID)\n\n session.state = study_pb2.Session.STATE_INVALID\n self.storage.update_session(session)\n\n self.assertEqual(self.storage.get_session(study_id, session.id), session)\n self.assertEqual(session.state, study_pb2.Session.STATE_INVALID)",
"def update_session(id):\n session = Session.query.get(id)\n\n # calculate all the final scores of the contributors\n latency_score = cssi.latency.generate_final_score(scores=session.latency_scores)\n sentiment_score = cssi.sentiment.generate_final_score(all_emotions=session.sentiment_scores, expected_emotions=session.expected_emotions)\n questionnaire_score = cssi.questionnaire.generate_final_score(pre=session.questionnaire.pre, post=session.questionnaire.post)\n\n # calculate the final scores of the plugins\n plugin_scores = cssi.generate_plugin_final_scores(scores=session.plugin_scores)\n\n # calculate the final CSSI Score\n cssi_score = cssi.generate_cssi_score(tl=latency_score, ts=sentiment_score, tq=questionnaire_score, ps=plugin_scores)\n\n # set the scores in the session\n session.total_latency_score = latency_score\n session.total_sentiment_score = sentiment_score\n session.total_questionnaire_score = questionnaire_score\n session.total_plugin_scores = plugin_scores\n session.cssi_score = cssi_score\n\n # get a breakdown of the questionnaire scores and set it in the session\n [pre_n, pre_o, pre_d, pre_ts], [post_n, post_o, post_d, post_ts] = cssi.questionnaire.generate_score_breakdown(pre=session.questionnaire.pre, post=session.questionnaire.post)\n q_score_breakdown = {\n \"pre\": {\n \"N\": pre_n,\n \"O\": pre_o,\n \"D\": pre_d,\n \"TS\": pre_ts\n },\n \"post\": {\n \"N\": post_n,\n \"O\": post_o,\n \"D\": post_d,\n \"TS\": post_ts\n }\n }\n session.questionnaire_scores = q_score_breakdown\n\n session.status = \"completed\"\n db.session.commit()\n\n result = session_schema.dump(session).data\n\n return jsonify({'status': 'success', 'message': 'Successfully updated the session data', 'data': result}), 200",
"def save_session(self, session):\n db = self.open()\n db[session.id] = session",
"def update_session(request):\n if request.method == \"POST\":\n req_data = request.POST.get(\"session_data\", None)\n if req_data:\n if req_data == \"sidebar\":\n if \"sidebar\" in request.session.keys():\n request.session[\"sidebar\"][\"sticky\"] ^= True\n else:\n request.session[\"sidebar\"] = {}\n request.session[\"sidebar\"][\"sticky\"] = True\n request.session.save()\n data = {\n \"result\": \"success\",\n \"message\": \"Session updated\",\n }\n return JsonResponse(data)\n\n return HttpResponseNotAllowed([\"POST\"])",
"def set(self, session):\n raise InvalidSessionException('Need to be implemented')",
"def put(self, session: Session = None) -> Response:\n token = generate_token(username=current_user.name, session=session)\n return jsonify({'token': token})",
"def update_self(self, existing_session=None):\n if (not existing_session):\n session = get_database_session()\n else:\n session = existing_session\n\n session.add(self)\n session.commit()\n\n if (not existing_session):\n session.expunge(self)",
"def set_login_session(self, session_id=None):\r\n meta = self.get_meta()\r\n old_login = meta.get('session_id', None)\r\n if old_login:\r\n SessionStore(session_key=old_login).delete()\r\n meta['session_id'] = session_id\r\n self.set_meta(meta)\r\n self.save()",
"def session(self, value: ClientSession):\r\n self._session = value",
"def _update_token(token):\n session.token = token",
"def update_from_naucse(self, report_progress=print, session=None):\n if self.naucse_slug == None:\n raise ValueError(f'No naucse slug for course {self.course_name}')\n if session is None:\n session = requests.Session()\n url = NAUCSE_API_URL_TEMPLATE.format(self.naucse_slug)\n response = session.get(url)\n if response.status_code != 200:\n raise ValueError(f'Could not update course: {url} returned {response.status_code}')\n response.raise_for_status()\n course_info = response.json()['course']\n if 'subtitle' in course_info:\n self.course_name = f\"{course_info['title']} – {course_info['subtitle']}\"\n else:\n self.course_name = course_info['title']\n\n report_progress(f'Updating {self!r}')\n\n self.save()\n\n for session_info in course_info['sessions']:\n if 'time' not in session_info:\n report_progress(\n f'Skipping session without time: {session_info[\"title\"]}')\n else:\n session, created = Session.objects.get_or_create(\n course=self,\n slug=session_info['slug'],\n )\n if 'serial' in session_info:\n session.title = f'Lekce {session_info[\"serial\"]}'\n else:\n session.title = None\n session.text = session_info['title']\n published_date = parse_datetime(session_info['time']['start'])\n session.published_date = published_date\n\n if created:\n report_progress(f'Added {session!r}')\n else:\n report_progress(f'Updating {session!r}')\n\n session.save()",
"def upsert(database: Database, user: User, session_id: SessionId, session_expiration_datetime: datetime) -> None:\n database.sessions.replace_one(\n {\"user\": user.username},\n {\n \"user\": user.username,\n \"email\": user.email,\n \"common_name\": user.common_name,\n \"session_id\": session_id,\n \"session_expiration_datetime\": session_expiration_datetime,\n },\n upsert=True,\n )",
"async def renew(self, session, *, dc=None):\n session_id = extract_attr(session, keys=[\"ID\"])\n response = await self._api.put(\"/v1/session/renew\", session_id,\n params={\"dc\": dc})\n try:\n result = response.body[0]\n except IndexError:\n meta = extract_meta(response.headers)\n raise NotFound(\"No session for %r\" % session_id, meta=meta)\n return consul(result, meta=extract_meta(response.headers))",
"def set_session(session):\n\n global session_\n session_ = session\n import observatory.api.server.api as api\n\n api.session_ = session",
"def set_session(context, key, value):\n session_manager = getToolByName(context, 'session_data_manager')\n session = session_manager.getSessionData()\n session[key] = value",
"def use_session(cls, session):\r\n cls._session = session",
"def fusion_api_set_active_session(self, sessionId):\n return self.loginsession.set_active_session(sessionId)",
"def refresh_session():\n\n hruntime.response.headers['Cache-Control'] = 'must-revalidate, no-cache, no-store'\n\n hruntime.user = hruntime.dbroot.users[hruntime.session.name]\n hruntime.i18n = hruntime.dbroot.localization.languages['cz']",
"def update(self):\n db.session.commit()",
"def update(self):\n db.session.commit()",
"def set_session_property(self, key, value):\n\n self.session[key] = value",
"def add2session(key, value):\n cherrypy.session.acquire_lock()\n cherrypy.session[key] = value\n cherrypy.session.release_lock()",
"def update(self):\n with managed_session() as session:\n session.merge(self)",
"def addsession(cls, session, username, passwd):\n sessionkey = cls.sessionkey(session)\n tmpdict = dict({'username': username, 'password': passwd})\n sessionmgr.update(dict({sessionkey: tmpdict}))",
"def save(self):\n self.session.modified = True",
"async def save_session(request, response):\n try:\n await request.app.session_interface.save(request, response)\n except Exception as e:\n if isinstance(e, Exception):\n pass"
] | [
"0.7159977",
"0.686218",
"0.68574136",
"0.66107583",
"0.6506551",
"0.6489274",
"0.6447803",
"0.64465",
"0.63691455",
"0.63294715",
"0.62878484",
"0.6127597",
"0.6104474",
"0.6089136",
"0.6085308",
"0.6046978",
"0.6046949",
"0.60441613",
"0.5959553",
"0.59453976",
"0.5927981",
"0.59049016",
"0.5862541",
"0.5862541",
"0.58554435",
"0.58338207",
"0.58177024",
"0.5811466",
"0.5779311",
"0.57515186"
] | 0.726062 | 0 |
Takes a two element tuple. The second element must be a Beliefs object which system1 will use to update the belief module. Once updated, the action queue will be emptied and the rules will be checked for satisfied conditions. The action queue will be refilled with new active actions from the rule list. | def process_belief(self, args):
goal, belief = args
if isinstance(belief, Beliefs):
self.belief_module.process_belief(belief)
self.initialize_action_queue()
return [{}] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_beliefs(self, result, action):\n if self.traceUpdate:\n print(\"Updating beliefs based on action\", action, \"with result\", result)\n\n if result == 'TryAgain':\n return None\n\n elif not result and not self.isTransient(action):\n if self.traceUpdate:\n print(\"Adding known false\", action)\n self.knownFalseTuple(action)\n\n if isinstance(result, list):\n for bindings in result:\n concrete_result = substitute(action, bindings)\n if not self.isTransient(concrete_result):\n if self.traceUpdate:\n print(\"Adding known true and performed\", concrete_result)\n self.knownTuple(concrete_result)\n self.knownTuple(('performed', concrete_result))\n self.update_variable_binding(concrete_result)",
"def _update_beliefs(self, features,\n beliefs):\n raise NotImplementedError",
"def update1(self, state, action, nextState, reward):\n #print \"update1 in ApproximateQAgent\"\n \"*** YOUR CODE HERE ***\"\n ##################################################################################################################################Eric Did Stuff\n actionList = nextState.getLegalActions(self.index)\n\n\n #print \"Action List\", actionList\n\n\n\n\n weights = self.getWeights()\n\n features = self.featExtractor.getFeatures(state, action, self)\n #self.myFeats = features\n if self.index == 0:\n print \"FEATURES: \",features\n value = self.computeValueFromQValues(nextState)\n qValue = self.getQValue(state,action)\n #print \"value\", value, \"qValue\", qValue\n for feature in features:\n if len(actionList) != 0:\n weights[feature] = weights[feature] + self.alpha * (reward + self.discount * value - qValue) * features[feature]\n else:\n weights[feature] = weights[feature] + self.alpha * (reward - qValue) * features[feature]\n #print \"feature\", feature, \"weights\", weights[feature]\n #print \"weights\", weights\n\n #util.raiseNotDefined()",
"def update(self):\n #self.consider_deactivation() if self.active_flag else self.consider_activation()\n if self.active_flag:\n self.consider_deactivation()\n else:\n self.consider_activation()\n if self.active_flag:\n self.sense_and_act()\n self.weight = self.match_degree*self.priority",
"def update(self):\n #self.consider_deactivation() if self.active_flag else self.consider_activation()\n if self.active_flag:\n self.consider_deactivation()\n else:\n self.consider_activation()\n if self.active_flag:\n self.sense_and_act()\n self.weight = self.match_degree*self.priority",
"def update(self):\n #self.consider_deactivation() if self.active_flag else self.consider_activation()\n if self.active_flag:\n self.consider_deactivation()\n else:\n self.consider_activation()\n if self.active_flag:\n self.sense_and_act()\n self.weight = self.match_degree*self.priority",
"def update(self):\n #self.consider_deactivation() if self.active_flag else self.consider_activation()\n if self.active_flag:\n self.consider_deactivation()\n else:\n self.consider_activation()\n if self.active_flag:\n self.sense_and_act()\n self.weight = self.match_degree*self.priority",
"def update(self):\n startstate = self.state\n goalstates =self.env.getGoalStates()\n inputs = self.env.sense(self)\n self.action_sequence = self.drive(goalstates,inputs)\n action = self.choose_action() # Choose an action\n self.state = self.env.act(self,action) \n return",
"def _action(self, wloops: Any, beta: Any) -> Any:\n pass",
"def HELPER_update_belief(self, old_belief, observation, gamma):\n observation = int(observation)\n #print \"old_belief:\", old_belief, type(old_belief)\n #print \"observation:\", observation, type(observation)\n #print \"gamma:\", gamma, type(gamma)\n\n diffs = [0.1*i for i in range(self.num_difficulty_bins)]\n new_belief = util.updateBelief(old_belief, None, observation, diffs, gamma)\n #print \"new_belief\", new_belief, type(new_belief)\n return new_belief",
"def update_beliefs(self, corpus_id):\n logger.info('Updating beliefs for corpus \"%s\"' % corpus_id)\n # TODO check which options are appropriate for get_corpus\n corpus = self.get_corpus(corpus_id)\n be = BeliefEngine(self.scorer)\n stmts = list(corpus.statements.values())\n be.set_prior_probs(stmts)\n # Here we set beliefs based on actual curation\n for uuid, correct in corpus.curations.items():\n stmt = corpus.statements.get(uuid)\n if stmt is None:\n logger.warning('%s is not in the corpus.' % uuid)\n continue\n stmt.belief = correct\n belief_dict = {st.uuid: st.belief for st in stmts}\n return belief_dict",
"def update_q(self,action,reward):\n #print('')\n #print('Action index is: ' + str(action))\n #print('Provided reward is: ' + str(reward))\n \n # Read from disk before updating\n try:\n pickle_in = open(\"static/data/values.pickle\",\"rb\")\n values = pickle.load(pickle_in)\n #print(values)\n self.values = values\n pickle_in = open(\"static/data/counts.pickle\",\"rb\")\n self.counts = pickle.load(pickle_in)\n pickle_in = open(\"static/data/actions_taken.pickle\",\"rb\")\n actions_taken = pickle.load(pickle_in)\n pickle_in = open(\"static/data/reward_list.pickle\",\"rb\")\n reward_list = pickle.load(pickle_in)\n except:\n actions_taken = []\n reward_list = []\n pass\n \n self.counts[action] += 1\n n = self.counts[action]\n value = self.values[action]\n actions_taken.append(action)\n reward_list.append(reward)\n \n # Running product\n new_value = value + (1/n) * (reward - value)\n self.values[action] = new_value\n \n \n # Save to disk before exiting\n pickle_out = open('static/data/values.pickle','wb')\n pickle.dump(self.values, pickle_out)\n pickle_out.close()\n pickle_out = open('static/data/counts.pickle','wb')\n pickle.dump(self.counts, pickle_out)\n pickle_out.close()\n pickle_out = open('static/data/actions_taken.pickle','wb')\n pickle.dump(actions_taken, pickle_out)\n pickle_out.close()\n pickle_out = open('static/data/reward_list.pickle','wb')\n pickle.dump(reward_list, pickle_out)\n pickle_out.close()",
"def update1(self, state, action, nextState, reward):\n \"*** YOUR CODE HERE ***\"\n ############################################################################################################ Eric Changed nextState to other stuff\n \n actionList = nextState.getLegalActions(self.index)\n\n if (not (nextState == None)) and len(actionList) > 0 :\n expectedRewardList = []\n #print \"state \",nextState,\" has legal actions \", state.getLegalActions(nextState)\n for a in actionList:\n #print \"next state: \",nextState,\" action: \",a, \"Value: \", self.Q[(nextState, a)]\n expectedRewardList.append(self.Q[(nextState, a)])\n #print \"expected reward list: \", expectedRewardList\n self.Q[(state, action)] = self.Q[(state, action)] + self.alpha * (reward + self.discount * max(expectedRewardList) - self.Q[(state, action)])\n #print self.Q\n return\n else:\n self.Q[(state, action)] = self.Q[(state, action)] + self.alpha * (reward - self.Q[(state, action)])\n return\n\n #print \"I should never be here\"\n #util.raiseNotDefined()",
"def update_belief(self, state, action, reward):\n self.add_to_state_history(state)\n state = self.get_modified_state()\n self.belief.update(state, action, reward, self.alpha)\n self.alpha *= self.a_rate",
"def _update_beliefs(self, features,\n beliefs):\n if (len(features) != len(beliefs) or features.ndim != 1):\n raise core.BadFeatureFnError()\n\n assert len(features) == len(beliefs)\n decay = self.rng.binomial(beliefs, self.params.decay_prob)\n updated_beliefs = [\n beliefs[i] + features[i] - decay[i] for i in range(len(beliefs))\n ]\n return updated_beliefs",
"def update(self, state, action, nextState, reward):\n \"*** YOUR CODE HERE ***\"\n features = self.featExtractor.getFeatures(state,action)\n\n learning_rate = self.alpha #gives us the learning rate\n\n temporary_QValue = self.getQValue(state,action) #to get the Q value of the state,action pair\n\n nextState_QValue = self.getValue(nextState) #to get the Q value of the landing state when taken action a and state s\n\n discount_factor = self.discount #to get the gamma/ discount factor\n\n weight = self.weights\n\n Q_Value = 0\n\n difference = (reward + discount_factor * nextState_QValue ) - (temporary_QValue) #refer to README_Reinforcement.txt for the formula\n\n for each_feature in features:\n\n #refer to README_Reinforcement.txt for the formula at line 20\n weight[each_feature] = weight[each_feature] + learning_rate * difference * features[each_feature]\n\n #util.raiseNotDefined()",
"def update(self, arm, reward, context):",
"def update(self, action, reward):\n # like puseudo count\n a, b = self.ActionValue[action]\n #print(f\"UPDATE {action}: ({a}, {b})\")\n a = a + self.huber(reward) # The larger the reward, the easier it is to select\n b = b + 1 - self.huber(reward) # It becomes easy to be selected as the reward becomes larger, and it becomes difficult to be selected as the reward becomes smaller\n a = 0.001 if a <= 0 else a\n b = 0.001 if b <= 0 else b\n \n self.ActionValue[action] = (a, b)\n\n #print(f\"=> ({a}, {b})\")\n\n # Update nearby action candidates\n around_update_rate = 0.3 # Parameter to adjust the degree of change according to the distance; [0, 1]\n radius = np.sqrt(self.action_resolution**2 + self.action_resolution**2 + 1e-9) # 1e-9 is for safety to caluculate the small number \n for action_around in self.actions:\n if action_around == action:\n continue\n x = action_around[0] - action[0]\n y = action_around[1] - action[1]\n distance = np.sqrt(x**2 + y**2)\n if distance <= radius:\n a, b = self.ActionValue[action_around]\n #print(f\"UPDATE {action_around}: ({a}, {b})\")\n a = a + self.huber(reward) * around_update_rate * (1 - distance)\n b = b + (1 - self.huber(reward)) * around_update_rate * (1 - distance) # To adjust the update, weight 1-r. If normal update is 1, it will be the update of around_update_rate * (1-distance) for adjacent actions.\n a = 0.001 if a <= 0 else a\n b = 0.001 if b <= 0 else b\n\n #print(f\"=> ({a}, {b})\")\n\n self.ActionValue[action_around] = (a, b)",
"def update_based_on_topology(self, *args, **kwargs):\n for bfr in Configuration.get(\"switches\"):\n switch = bfr[\"name\"]\n\n self.update_bier_decap_rule(switch=switch)",
"def __init__(self, sn, beliefs):\n assert all([type(x) is Belief for x in beliefs])\n self._sn = sn\n self._beliefs = beliefs",
"def kb_retract(self, fact_or_rule):\n printv(\"Retracting {!r}\", 0, verbose, [fact_or_rule])\n ####################################################\n # Student code goes here\n\n if isinstance(fact_or_rule, Fact):\n if fact_or_rule not in self.facts:\n #print(\"fact not in bk!\")\n return\n else:\n #find the corresponding fact in kb\n index = self.facts.index(fact_or_rule)\n fact_or_rule = self.facts[index]\n #if the fact is not supported, remove it\n if len(fact_or_rule.supported_by) == 0:\n self.facts.remove(fact_or_rule)\n else:\n #print(\"can't retract!\")\n return\n elif isinstance(fact_or_rule, Rule):\n if fact_or_rule not in self.rules:\n #print(\"rule not in bk!\")\n return\n else:\n #find the corresponding rule in kb\n index = self.rules.index(fact_or_rule)\n fact_or_rule = self.rules[index]\n #if rule is not supported and not asserted, then remove it\n if len(fact_or_rule.supported_by) == 0 and fact_or_rule.asserted != True:\n self.rules.remove(fact_or_rule)\n else:\n #print(\"can't retract!\")\n return\n #remove the supported pairs of the facts that it supports\n for facts in fact_or_rule.supports_facts:\n for i in facts.supported_by:\n if fact_or_rule in i:\n facts.supported_by.remove(i)\n if facts.asserted != True:\n self.kb_retract(facts)\n #remove the supported pairs of the rules that it supports\n for rules in fact_or_rule.supports_rules:\n for i in rules.supported_by:\n if fact_or_rule in i:\n rules.supported_by.remove(i)\n if rules.asserted != True:\n self.kb_retract(rules)",
"def updateFCFS_queue(self, junc):\n for tl_combination in junc.tl_combinations:\n for lane in tl_combination.corresponding_lanes:\n for vehicle in traci.lane.getLastStepVehicleIDs(lane.ID):\n junc.FCFS_queue[vehicle] = tl_combination.ryg_state",
"def updateWeapons(self):\n self.readyWeapons = []\n self.setWeaponStatus()\n\n for myWeapon in self.activeWeapons:\n if myWeapon.preFireCheck() == 1:\n self.readyWeapons.append(myWeapon)\n self.alternateTargets = []\n\n if self.amsTargets != []:\n for myWeapon in self.amsWeapons:\n if myWeapon.preFireCheck() == 1:\n self.readyWeapons.append(myWeapon)\n self.amsTargets = []",
"def update_belief_once(self, current_observation, last_observation, avg_vel, dt, current_belief):\n # type: (np.ndarray, np.ndarray, float, float, list) -> (list, list)\n\n\n\n new_belief = []\n likelihoods = []\n estimated_positions = []\n normalization_factor = 0.0\n\n # Compute the likelihoods\n for goal_idx in range(self._num_goals):\n obs_likelihood, calculated_position = self.compute_observation_likelihood(current_observation,\n last_observation,\n self._goals[goal_idx],\n avg_vel, dt)\n estimated_positions.append(calculated_position)\n obs_likelihood += 1\n likelihoods.append(obs_likelihood)\n normalization_factor += obs_likelihood * current_belief[goal_idx]\n\n\n\n\n #for i in range(self.importance_of_prior_in_belief_update):\n #normalization_factor = 0.0\n #tmp_belief = []\n # Compute new belief\n for goal_idx in range(self._num_goals):\n prob = (likelihoods[goal_idx] * current_belief[goal_idx])/normalization_factor\n\n new_belief.append(prob)\n\n #tmp_belief = np.array(tmp_belief) / normalization_factor\n\n\n #new_belief = tmp_belief\n return [new_belief, estimated_positions]",
"def get_action(self, state):\n\n \"\"\"\n XXX: DO NOT MODIFY THAT FUNCTION !!!\n Doing so will result in a 0 grade.\n \"\"\"\n\n # XXX : You shouldn't care on what is going on below.\n # Variables are specified in constructor.\n if self.beliefGhostStates is None:\n self.beliefGhostStates = state.getGhostBeliefStates()\n if self.walls is None:\n self.walls = state.getWalls()\n return self.updateAndGetBeliefStates(\n self._computeNoisyPositions(state))",
"def beam_update(self, beams, extra):\n return extra",
"def update(self, *args, **kwargs):\n\n print(\"\\nIn MOCK ALGO OBSERVER....\")\n\n if 'remaining_tasks' in kwargs:\n\n remaining_tasks = len(kwargs['remaining_tasks'])\n\n print(\"\\tThere are {} remaining tasks\".format(remaining_tasks))\n print(\"\\tIs {} less than {}? {}\".format(remaining_tasks, min_tasks, (remaining_tasks < min_tasks)))\n\n # If we don't have the minimum number of hits out...\n if remaining_tasks < min_tasks:\n print(\"\\tRefilling queue with {} new task(s)\".format(min_tasks - remaining_tasks))\n # Fill up the tasks again\n for t in range(min_tasks - remaining_tasks):\n new_task = make_rand_task()\n tasks.append(new_task)\n\n actAMT.init_tasks(tasks, hit_type_init_file)\n del tasks[:]\n\n if 'completed_task' in kwargs:\n add_to_db(kwargs['completed_task'])",
"def update_action(self):\n self.action = self.automata > self.states\n self.inv_action = self.inv_automata > self.states",
"def belief_conflict(self, args):\n goal, belief = args\n if isinstance(belief, Beliefs):\n if self.belief_module.is_conflicting_belief(belief):\n return [{}]\n\n return []",
"def _update_goals(self):\n print\"updating goals\"\n response = self.goal_tracker_call() # type: GoalsResponse\n self._goals = []\n for goal in response.goals: # type: Point\n self._goals.append([goal.x, goal.y, goal.z])\n self._num_goals = len(self._goals)\n\n self._current_belief = self._init_belief()"
] | [
"0.62839353",
"0.5601004",
"0.54203224",
"0.523504",
"0.523504",
"0.523504",
"0.523504",
"0.52241856",
"0.51511395",
"0.5150723",
"0.5111748",
"0.50751257",
"0.5067793",
"0.50459915",
"0.5037544",
"0.50247914",
"0.4989329",
"0.4953486",
"0.49498907",
"0.49385783",
"0.49159616",
"0.48950887",
"0.48764834",
"0.48609462",
"0.48405632",
"0.48395693",
"0.48154777",
"0.48101878",
"0.48097143",
"0.48087364"
] | 0.60782725 | 1 |
Calls the belief module's emit_belief method to get and return a Beliefs object with the agents chosen belief for emission. | def emit_belief(self, args):
goal, belief = args
return [{belief: self.belief_module.emit_belief()}] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def process_belief(self, args):\n goal, belief = args\n\n if isinstance(belief, Beliefs):\n self.belief_module.process_belief(belief)\n self.initialize_action_queue()\n\n return [{}]",
"def calculateBeliefs(self):\n\n belief = {}\n\n for question in self.getQuestions():\n q = str(question.id)\n belief[q] = self.HELPER_init_belief()\n\n #print belief[q]\n for answer in self.getQuestionCompletedAnswers(question):\n #print q\n #print str(answer.question.id)\n assert str(answer.question.id) == q\n w_skill = answer.worker.inference_results['EM']['skill']\n # answer.value must be \"0\" or \"1\"\n assert answer.value == \"0\" or answer.value == \"1\"\n #print answer.value, w_skill\n belief[q] = self.HELPER_update_belief(belief[q], answer.value, w_skill)\n #print belief[q]\n\n #print \"Question beliefs:\", belief\n #print \"##################\"\n return belief",
"def getBeliefDistribution(self):\n pass",
"def update_beliefs(self, corpus_id):\n logger.info('Updating beliefs for corpus \"%s\"' % corpus_id)\n # TODO check which options are appropriate for get_corpus\n corpus = self.get_corpus(corpus_id)\n be = BeliefEngine(self.scorer)\n stmts = list(corpus.statements.values())\n be.set_prior_probs(stmts)\n # Here we set beliefs based on actual curation\n for uuid, correct in corpus.curations.items():\n stmt = corpus.statements.get(uuid)\n if stmt is None:\n logger.warning('%s is not in the corpus.' % uuid)\n continue\n stmt.belief = correct\n belief_dict = {st.uuid: st.belief for st in stmts}\n return belief_dict",
"def belief_revision(self):\n\n # Store the coherence of the belief_network before the belief revision has taken place\n network_history = self.belief_network.copy()\n self.coherence_history = self.coherence(network_history)\n\n # Add the newly communicated nodes to the belief_network\n if self.communicated_nodes is not None:\n for node in self.communicated_nodes:\n self.belief_network.nodes[node[0]]['truth_value'] = node[1]\n self.belief_network.nodes[node[0]]['type'] = 'com'\n\n # Get the inferred nodes and its combinations of truth values in order to explore different coherence values\n inferred_nodes = [x for x, y in self.belief_network.nodes(data=True) if y['type'] == 'inf']\n combinations = list(itertools.product([True, False], repeat=len(inferred_nodes)))\n\n # Calculate the coherence for all possible combinations\n\n # Initialise a list to store the different coherence values in\n coherence_values = []\n\n for n in range(len(combinations)):\n # Initialise a count for the number of inferred nodes\n i = 0\n for inferred_node in inferred_nodes:\n self.belief_network.nodes[inferred_node]['truth_value'] = combinations[n][i]\n i += 1\n coherence_values.append(self.coherence(self.belief_network))\n\n # Store all the indices of the maximum coherence values in a list and pick one randomly\n max_coherence = max(coherence_values)\n max_indices = [i for i in range(len(coherence_values)) if coherence_values[i] == max_coherence]\n nodes_truth_values_index = random.choice(max_indices)\n\n # Set the truth values of the inferred nodes to (one of) the maximum coherence option(s)\n i = 0\n for inferred_node in inferred_nodes:\n self.belief_network.nodes[inferred_node]['truth_value'] = combinations[nodes_truth_values_index][i]\n i += 1\n\n # If at least one node is flipped, belief revision has taken place and the coherence should be compared\n # with the previous belief_network before belief revision (trouble_identification)\n # print(\"Network after belief revision:\\n\", self.belief_network.nodes(data=True))\n # print(\"Network before belief revision:\\n\", network_history.nodes(data=True))\n if not nx.is_isomorphic(self.belief_network, network_history, node_match=lambda x, y: x['truth_value'] ==\n y['truth_value']):\n # print(\"Trouble identification\")\n repair_initiation = self.trouble_identification()\n else:\n # print(\"No trouble identification\")\n repair_initiation = False\n\n return repair_initiation, self.belief_network",
"def getAction(self, observation):\n \n beliefs = []\n noisyRangeMeasurements, prevAction, gameState = observation\n if self.observeEnable:\n self.inferenceModule.observe(prevAction, noisyRangeMeasurements)\n beliefs.append(self.inferenceModule.getWallBeliefDistribution())\n beliefs.append(self.inferenceModule.getPositionBeliefDistribution())\n self.display.updateDistributions(beliefs)\n return self.chooseAction(gameState)",
"def belief(self, element):\n return self.bel(element)",
"def _init_belief(self):\n belief = []\n for i in range(self._num_goals):\n belief.append(1.0 / self._num_goals)\n self._last_belief_over_history = np.copy(belief)\n return belief",
"def _compute_belief(self):\n # Compute current dt\n current_time = time.time()\n\n\n\n\n\n # Get the current human position\n try:\n (current_human_pos, rotation) = self._tf_listener.lookupTransform(self._darias_frame, self._human_frame,\n rospy.Time(0))\n current_human_pos = np.asarray(current_human_pos)\n\n except (tf.ExtrapolationException, tf.ConnectivityException, tf.LookupException):\n return\n\n self._compute_belief_from_pose_and_time(current_human_pos, current_time)",
"def get_belief_scores(self):\n return self._belief_scores.copy()",
"def belief_conflict(self, args):\n goal, belief = args\n if isinstance(belief, Beliefs):\n if self.belief_module.is_conflicting_belief(belief):\n return [{}]\n\n return []",
"def updateAndGetBeliefStates(self, evidences):\n # XXX: Your code here\n\n # if self.iter < 0:\n # np.save('Entropy{}_{}'.format(self.w, self.p), self.entropy)\n # sys.exit()\n #\n # self.iter = self.iter - 1\n\n if (self.m or self.n) is None:\n self.m = self.walls.height\n self.n = self.walls.width\n\n if not self.board:\n for x in np.arange(self.n):\n for y in np.arange(self.m):\n self.board.append((x, y))\n\n if self.transitionMatrix is None:\n self.transitionMatrix = self.createTransitionMatrix()\n\n if self.sensorMatrix is None:\n self.sensorMatrix = self.createSensorModel()\n\n beliefStates = self.beliefGhostStates\n\n # self.entropy.append(self.entropyF(beliefStates))\n\n for i, e in enumerate(evidences):\n \"\"\"\n To manage multiple ghosts.\n \"\"\"\n col_beliefStates = np.reshape(beliefStates[i, :, :], (-1, 1))\n\n index = self.board.index(e)\n O_col = self.sensorMatrix[:, index]\n\n O = np.diag(O_col)\n \"\"\"\n O = Observation matrix.\n \"\"\"\n\n col_bel = np.dot(O, self.transitionMatrix)\n col_beliefStates = np.dot(col_bel, col_beliefStates)\n\n alpha = 1/(np.sum(col_beliefStates))\n col_beliefStates = alpha*col_beliefStates\n\n beliefState = col_beliefStates.reshape((self.n, self.m))\n beliefStates[i, :, :] = beliefState\n\n # XXX: End of your code\n self.beliefGhostStates = beliefStates\n return beliefStates",
"def __init__(self, sn, beliefs):\n assert all([type(x) is Belief for x in beliefs])\n self._sn = sn\n self._beliefs = beliefs",
"def generate_new_state(self):\n # If simple Beam, return itself.\n # Variable beams should return simple one.\n n_samples = 100000\n samples = np.random.normal(self.photon_energy, self.sigma, self.n_spikes*n_samples)\n\n gkde = stats.gaussian_kde(samples)\n\n gkde.set_bandwidth(bw_method=0.25)\n\n xs = np.linspace(self.photon_energy-self.sigma*5, self.photon_energy+self.sigma*5, self.n_spikes+1)\n\n density, bins, patches = plt.hist(samples, bins=xs, histtype=u'step', density=True)\n\n ind = np.where(density == np.amax(density))\n density[ind[0][0]] *= 1.5\n density_renorm = density / density.sum()\n\n photon_energy = np.linspace(self.photon_energy-self.sigma*5, self.photon_energy+self.sigma*5, self.n_spikes+1).tolist()\n fluences = (self.get_photons_per_pulse()*density_renorm/density_renorm.sum())\n\n return [\n Beam(\n photon_energy=photon_energy[i],\n focus_x=self._focus_xFWHM,\n focus_y=self._focus_yFWHM,\n focus_shape=self._focus_shape,\n fluence=fluences[i])\n for i in range(self.n_spikes)\n ]",
"def updateAndGetBeliefStates(self, evidences):\n\n beliefStates = self.beliefGhostStates\n # XXX: Your code here\n width = self.walls.width\n height = self.walls.height\n w = self.w\n p = self.p\n pastBeliefStates = self.beliefGhostStates\n\n\n beliefStates = list()\n for i in range(len(evidences)):\n prob = np.zeros((width, height))\n pastProb = pastBeliefStates[i]\n evidence = evidences[i]\n for x in range(evidence[0] - w, evidence[0] + w + 1):\n for y in range(evidence[1] - w, evidence[1] + w + 1):\n if x in range(width) and y in range(height):\n prob[x][y] = 1\n\n for x in range(width):\n for y in range(height):\n if prob[x][y] != 0:\n prob[x][y] *= self.forwarding(x, y, p, pastProb)\n\n alpha = 1/np.sum(prob)\n # Normalization of the probability of the evidence\n for x in range(width):\n for y in range(height):\n if prob[x][y] != 0:\n prob[x][y] *= alpha\n beliefStates.append(prob)\n\n # XXX: End of your code\n self.beliefGhostStates = beliefStates\n return beliefStates",
"def beam(self) -> Beam:\n\n return self._beam",
"def HELPER_init_belief(self):\n return util.initBelief(self.num_answer_choices, self.num_difficulty_bins)",
"def test_edge_features(self):\n k = [4, 4, 4, 4, 4]\n mn = self.create_chain_model(k)\n\n d = 3\n\n for i in range(5):\n mn.set_edge_features((i, i+1), np.random.randn(d))\n\n mn.create_matrices()\n mn.set_unary_weight_matrix(np.random.randn(4, 4))\n mn.set_edge_weight_matrix(np.random.randn(d, 16))\n\n bp = MatrixBeliefPropagator(mn)\n\n bp.infer()\n bp.load_beliefs()\n\n unconditional_marginals = bp.var_beliefs[4]\n\n bp.condition(0, 2)\n bp.infer()\n bp.load_beliefs()\n\n conditional_marginals = bp.var_beliefs[4]\n\n assert not np.allclose(unconditional_marginals, conditional_marginals), \\\n \"Conditioning on variable 0 did not change marginal of variable 4\"\n\n mn.set_edge_features((2, 3), np.zeros(d))\n mn.create_matrices()\n mn.set_unary_weight_matrix(np.random.randn(4, 4))\n mn.set_edge_weight_matrix(np.random.randn(d, 16))\n\n bp.infer()\n bp.load_beliefs()\n\n unconditional_marginals = bp.var_beliefs[4]\n\n bp.condition(0, 2)\n bp.infer()\n bp.load_beliefs()\n\n conditional_marginals = bp.var_beliefs[4]\n\n assert np.allclose(unconditional_marginals, conditional_marginals), \\\n \"Conditioning on var 0 changed marginal of var 4, when the features should have made them independent\"",
"def _update_beliefs(self, features,\n beliefs):\n raise NotImplementedError",
"def _interaction(self, entity):\n\n # Get parameters\n att_range = np.array([agent.a_range for agent in entity], dtype=float)[:,None]\n att_strength = np.array([agent.get_advantage for agent in entity])[:,None]\n team_index = np.array([agent.team for agent in entity])\n alliance_matrix = team_index[:,None]==team_index[None,:]\n att_strength[team_index==TEAM1_BACKGROUND,] += self.BLUE_ADV_BIAS\n att_strength[team_index==TEAM2_BACKGROUND,] += self.RED_ADV_BIAS\n\n # Get distance between all agents\n x, y = np.array([agent.get_loc() for agent in entity]).T\n dx = np.subtract(*np.meshgrid(x,x))\n dy = np.subtract(*np.meshgrid(y,y))\n distance = np.hypot(dx, dy)\n\n # Get influence matrix\n infl_matrix = np.less(distance, att_range)\n infl_matrix = infl_matrix * att_strength\n friend_count = (infl_matrix*alliance_matrix).sum(axis=0)-1 # -1 to not count self\n enemy_count = (infl_matrix*~alliance_matrix).sum(axis=0)\n mask = enemy_count == 0\n\n # Add background advantage bias\n loc_background = [self._static_map[agent.get_loc()] for agent in entity]\n friend_count[loc_background==team_index] += self.STOCH_ATTACK_BIAS\n enemy_count[~(loc_background==team_index)] += self.STOCH_ATTACK_BIAS\n\n # Interaction\n if self.STOCH_ATTACK:\n result = self.np_random.rand(*friend_count.shape) < friend_count / (friend_count + enemy_count)\n else:\n result = friend_count > enemy_count\n result[mask] = True\n\n return result",
"def retrieve_solver_belief(self, t_plan=0, t=0):\n\n # get raw info stored from the solver\n # b_target[(v, t)] = beta, 0 <= beta <= 1\n b_target = self.belief[t_plan]\n\n # make it pretty: b = [b_c, b_v1, .... b_vn]\n belief = self.get_belief_vector(b_target, t)\n\n return belief",
"def choose(self):\n # pick agent A\n keys = list(self._agents.keys())\n keyA = random.choice(keys)\n agentA = self.model.schedule.agents[keyA]\n\n # pick pick agent B\n keyB = random.choice(agentA.neighbors)\n agentB = self.model.schedule.agents[keyB]\n\n return agentA, agentB",
"def HELPER_update_belief(self, old_belief, observation, gamma):\n observation = int(observation)\n #print \"old_belief:\", old_belief, type(old_belief)\n #print \"observation:\", observation, type(observation)\n #print \"gamma:\", gamma, type(gamma)\n\n diffs = [0.1*i for i in range(self.num_difficulty_bins)]\n new_belief = util.updateBelief(old_belief, None, observation, diffs, gamma)\n #print \"new_belief\", new_belief, type(new_belief)\n return new_belief",
"def act(self):\n channel_act = copy.deepcopy(self.observation)\n\n for user_act in channel_act['user_acts']:\n # Dialogue Act\n da_conf = self.generate_confidence()\n da_value = user_act[\"dialogue_act\"][\"value\"]\n\n if np.random.random() > da_conf:\n if da_value == UserAct.AFFIRM:\n da_value = UserAct.NEGATE\n elif da_value == UserAct.NEGATE:\n da_value == UserAct.AFFIRM\n else:\n pass\n\n user_act[\"dialogue_act\"][\"value\"] = da_value\n user_act[\"dialogue_act\"][\"conf\"] = self.generate_confidence()\n\n # Intent\n if \"intent\" in user_act:\n intent_value = user_act[\"intent\"][\"value\"]\n if self.intents[intent_value].get(\"speech\", False):\n intent_conf = 1.\n else:\n intent_conf = self.generate_confidence()\n intent_possible_values = self.slots[\"intent\"][\n \"possible_values\"].copy()\n\n if np.random.random() > intent_conf:\n intent_possible_values.remove(intent_value)\n intent_value = np.random.choice(intent_possible_values)\n\n user_act['intent']['value'] = intent_value\n user_act['intent']['conf'] = intent_conf\n\n # Slot Values\n for slot_dict in user_act.get('slots', list()):\n slot_name = slot_dict[\"slot\"]\n slot_value = slot_dict[\"value\"]\n\n if self.slots[slot_name][\"node\"] != \"BeliefNode\":\n slot_conf = 1.0\n else:\n slot_conf = self.generate_confidence()\n\n slot_possible_values = self.slots[slot_name].get(\n \"possible_values\")\n\n if slot_possible_values is None:\n slot_possible_values = list()\n\n slot_possible_values = slot_possible_values.copy()\n if len(slot_possible_values) and np.random.random() > slot_conf:\n slot_possible_values.remove(slot_value)\n slot_value = np.random.choice(slot_possible_values)\n\n slot_dict['conf'] = slot_conf\n\n channel_act[\"channel_utterance\"] = self.template_nlg(\n channel_act['user_acts'])\n return channel_act",
"def _update_beliefs(self, features,\n beliefs):\n if (len(features) != len(beliefs) or features.ndim != 1):\n raise core.BadFeatureFnError()\n\n assert len(features) == len(beliefs)\n decay = self.rng.binomial(beliefs, self.params.decay_prob)\n updated_beliefs = [\n beliefs[i] + features[i] - decay[i] for i in range(len(beliefs))\n ]\n return updated_beliefs",
"def talk(self):\n out = (self.blurbs[self.state][\"talk\"])\n self.next_state(\"talk\")\n return out",
"def _update_beliefs(self, features, beliefs):\n self.n_steps += 1\n if self.last_allocation is None:\n return beliefs\n for i_bin in range(self._n_bins):\n self.data[i_bin].append((features[i_bin], self.last_allocation[i_bin]))\n if self.params.burn_steps <= self.n_steps and self.n_steps % self.params.interval == 0:\n ll_model = _CensoredPoisson(\n np.array(self.data[i_bin][-self.params.window:]))\n results = ll_model.fit(disp=0)\n beliefs[i_bin] = results.params[0]\n return beliefs",
"def obtain_batch_bandit_feedback(\n self,\n random_state: Optional[int] = None,\n ) -> BanditFeedback:\n random_ = check_random_state(random_state)\n # train a base ML classifier\n base_clf_b = clone(self.base_classifier_b)\n base_clf_b.fit(X=self.X_tr, y=self.y_tr)\n preds = base_clf_b.predict(self.X_ev).astype(int)\n # construct a behavior policy\n pi_b = np.zeros((self.n_rounds_ev, self.n_actions))\n pi_b[:, :] = (1.0 - self.alpha_b) / self.n_actions\n pi_b[np.arange(self.n_rounds_ev), preds] = (\n self.alpha_b + (1.0 - self.alpha_b) / self.n_actions\n )\n # sample action and factual reward based on the behavior policy\n action = np.zeros(self.n_rounds_ev, dtype=int)\n for i, p in enumerate(pi_b):\n action[i] = random_.choice(\n np.arange(self.n_actions, dtype=int), p=p, replace=False\n )\n reward = self.y_full_ev[np.arange(self.n_rounds_ev), action]\n\n return dict(\n n_actions=self.n_actions,\n n_rounds=self.n_rounds_ev,\n context=self.X_ev,\n action=action,\n reward=reward,\n position=None, # position effect is not considered in classification data\n pscore=pi_b[np.arange(self.n_rounds_ev), action],\n )",
"def update_belief_once(self, current_observation, last_observation, avg_vel, dt, current_belief):\n # type: (np.ndarray, np.ndarray, float, float, list) -> (list, list)\n\n\n\n new_belief = []\n likelihoods = []\n estimated_positions = []\n normalization_factor = 0.0\n\n # Compute the likelihoods\n for goal_idx in range(self._num_goals):\n obs_likelihood, calculated_position = self.compute_observation_likelihood(current_observation,\n last_observation,\n self._goals[goal_idx],\n avg_vel, dt)\n estimated_positions.append(calculated_position)\n obs_likelihood += 1\n likelihoods.append(obs_likelihood)\n normalization_factor += obs_likelihood * current_belief[goal_idx]\n\n\n\n\n #for i in range(self.importance_of_prior_in_belief_update):\n #normalization_factor = 0.0\n #tmp_belief = []\n # Compute new belief\n for goal_idx in range(self._num_goals):\n prob = (likelihoods[goal_idx] * current_belief[goal_idx])/normalization_factor\n\n new_belief.append(prob)\n\n #tmp_belief = np.array(tmp_belief) / normalization_factor\n\n\n #new_belief = tmp_belief\n return [new_belief, estimated_positions]",
"def happiness(self):\n return ( self.girl.happiness())\n # self.boy.happiness(self.girl) +"
] | [
"0.6572719",
"0.60047936",
"0.58963394",
"0.5791217",
"0.5663534",
"0.5584099",
"0.5567305",
"0.550817",
"0.54517233",
"0.53226274",
"0.53167677",
"0.5304004",
"0.51742756",
"0.51660204",
"0.5156423",
"0.5151522",
"0.5149159",
"0.51191115",
"0.5110184",
"0.50023305",
"0.49014044",
"0.48974186",
"0.48856696",
"0.48717213",
"0.48576158",
"0.48263758",
"0.48238376",
"0.4794923",
"0.47802514",
"0.4777317"
] | 0.74441326 | 0 |
Checks if an incoming belief is in conflict with internal beliefs. A conflict occurs when the belief is of opposite valence to a current belief. This method does not update own or perceived beliefs. | def belief_conflict(self, args):
goal, belief = args
if isinstance(belief, Beliefs):
if self.belief_module.is_conflicting_belief(belief):
return [{}]
return [] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def has_conflict(self):\n for diffstat in self.diffstat():\n if diffstat.has_conflict:\n return True\n return False",
"def checkConflicts(self):\n\t\treturn",
"def refine_conflict(self):\n self._raise_not_supported()",
"def check_conflicts(self):\n\t\tshutit_global.shutit_global_object.yield_to_draw()\n\t\tcfg = self.cfg\n\t\t# Now consider conflicts\n\t\tself.log('PHASE: conflicts', level=logging.DEBUG)\n\t\terrs = []\n\t\tself.pause_point('\\nNow checking for conflicts between modules', print_input=False, level=3)\n\t\tfor module_id in self.module_ids():\n\t\t\tif not cfg[module_id]['shutit.core.module.build']:\n\t\t\t\tcontinue\n\t\t\tconflicter = self.shutit_map[module_id]\n\t\t\tfor conflictee in conflicter.conflicts_with:\n\t\t\t\t# If the module id isn't there, there's no problem.\n\t\t\t\tconflictee_obj = self.shutit_map.get(conflictee)\n\t\t\t\tif conflictee_obj is None:\n\t\t\t\t\tcontinue\n\t\t\t\tif ((cfg[conflicter.module_id]['shutit.core.module.build'] or\n\t\t\t\t self.is_to_be_built_or_is_installed(conflicter)) and\n\t\t\t\t (cfg[conflictee_obj.module_id]['shutit.core.module.build'] or\n\t\t\t\t self.is_to_be_built_or_is_installed(conflictee_obj))):\n\t\t\t\t\terrs.append(('conflicter module id: ' + conflicter.module_id + ' is configured to be built or is already built but conflicts with module_id: ' + conflictee_obj.module_id,))\n\t\treturn errs",
"def check_influence_sanity(self):\n for influence in crest.get_all_influences(self.model):\n assert influence._name is not None, f\"There is an Influence in {influence._parent._name} ({influence._parent.__class__.__name__}) whose name is 'None'\"\n assert influence._name != \"\", f\"There is an Update in {influence._parent._name} ({influence._parent.__class__.__name__}) whose name is empty string\"\n\n assert isinstance(influence.source, crest.Port), f\"Influence {influence._name}'s source is not a crest.Port\"\n assert influence.source in api.get_sources(influence._parent), f\"Influence's source {influence.source._name} ({influence.source}) is not in the sources of entity {influence._parent._name} ({influence._parent})\"\n\n assert isinstance(influence.target, crest.Port), f\"Influence {influence._name}'s target is not a crest.Port\"\n assert influence.target in api.get_targets(influence._parent), f\"Influence's target {influence.target._name} ({influence.target}) is not in the targets of entity {influence._parent._name} ({influence._parent})\"\n\n assert isinstance(influence.function, (crestml.LearnedFunction, types.FunctionType)), f\"Influence {influence._name}'s function needs to be of type types.FunctionType or crestdsl.ml.LearnedFunction\"\n assert len(inspect.signature(influence.function).parameters) == 1, f\"An influence should not have arguments (except the input value)\"",
"def checkConflicts(self):\n\t\tapDisplay.printError(\"you did not create a 'checkConflicts' function in your script\")\n\t\traise NotImplementedError()",
"def violated(self) -> bool:\n ...",
"def checkSpikeBonding (self):\r\n stable = True # If any bonds break this will be set to false\r\n stabilityChecker = True # Checks the result of each function call, if set to false then stable will be set to false\r\n # Go through each atom\r\n for i in range(len(self.mol)):\r\n # Go through each spike\r\n for j in range(len(self.mol[i].spikeArray)):\r\n if self.mol[i].spikeArray[j].bonded == True:\r\n stabilityChecker = self.stabilitySpike(self.mol[i].spikeArray[j])\r\n if stabilityChecker == False:\r\n stable = False\r\n #print (stable)\r\n if stable == True:\r\n print(\"No Bonds have broken \\n\")\r\n else:\r\n print (\"Bonds have broken \\n\")\r\n return stable",
"def _resolve_ball_collisions(self) -> bool:\n\n bln_naughty = True\n lng_naughty_loop_count = 0\n lng_naughty_loop_limit = 10\n while bln_naughty:\n lng_naughty_loop_count += 1\n if lng_naughty_loop_count > lng_naughty_loop_limit:\n return False\n bln_naughty = False\n\n \"\"\" Ball vs Ball \"\"\"\n for sprBall1, sprBall2 in TrashyPhysics.collision_pairs_self(\n self.grpBalls, fncCollided=TrashyPhysics.balls_collided):\n bln_naughty = True\n TrashyPhysics.bounce_balls(sprBall1, sprBall2)\n\n \"\"\" Ball vs Bot \"\"\"\n for sprBall, sprRobot in TrashyPhysics.collision_pairs(\n self.grpBalls, self.grpRobots,\n fncCollided=TrashyPhysics.ball_robot_collided):\n bln_naughty = True\n TrashyPhysics.bounce_ball_off_bot(sprRobot, sprBall)\n\n \"\"\" Ball vs Wall \"\"\"\n for ball in filter(lambda x: TrashyPhysics.collided_wall(x), self.lstBalls):\n bln_naughty = True\n TrashyPhysics.bounce_ball_off_wall(ball)\n\n \"\"\" Ball vs Bumper \"\"\"\n # todo\n\n return True",
"def has_bond_crossing(self):\n return self.count_bond_collisions() > 0",
"def decide_infect(self, other):\n if (self._is_infected and not other._is_infected):\n if random.random() < self._transmission_prob and random.random() < other._infection_prob:\n other._is_infected = True\n\n if other._is_infected and not self._is_infected:\n if random.random() < other._transmission_prob and random.random() < self._infection_prob:\n self._is_infected = True",
"def refine(self): # pylint: disable=R0201\n return True",
"def isInternal(self):\n if self.data.depend_er_job == self.data.depend_on_job:\n return True\n return False",
"def fusable(self) -> bool:\n if not self._pre_check() or not self.has_crossing_len2_ob():\n return False\n new_tiling = self._tiling.add_obstructions(self.obstructions_to_add())\n\n return (\n self._tiling == new_tiling\n and self._check_isolation_level()\n and all(\n self._can_component_fuse_assumption(assumption)\n for assumption in self._tiling.assumptions\n )\n )",
"def updateInconsistency(self, x : pd.Series):\n problemname = x.get(Key.ProblemName)\n pb = x.get(Key.PrimalBound)\n db = x.get(Key.DualBound)\n\n obs = self.getObjSense(problemname, x)\n\n if pd.isnull(obs):\n obs = ObjectiveSenseCode.MINIMIZE\n\n if not problemname:\n return\n\n\n #\n # for inconsistency checks, we only consider problems that are consistent\n # with the reference information.\n #\n if self.isReferenceConsistent(x) != ProblemStatusCodes.Ok:\n return\n\n # do not trust versions/settings/solvers that returned an infeasible solution\n if self.isSolInfeasible(x) or (not pd.isnull(pb) and not self.isSolFeasible(x)):\n return\n\n pb = self.getPbValue(pb, obs)\n db = self.getDbValue(db, obs)\n bestpb = self.bestpb.get(problemname, np.inf if obs == ObjectiveSenseCode.MINIMIZE else -np.inf)\n bestpb = min(bestpb, pb) if obs == ObjectiveSenseCode.MINIMIZE else max(bestpb, pb)\n\n bestdb = self.bestdb.get(problemname, -np.inf if obs == ObjectiveSenseCode.MINIMIZE else np.inf)\n if x.get(Key.SolverStatus) == SolverStatusCodes.Infeasible:\n db = infty() if obs == ObjectiveSenseCode.MINIMIZE else -infty()\n\n bestdb = max(bestdb, db) if obs == ObjectiveSenseCode.MINIMIZE else min(bestdb, db)\n\n if (obs == ObjectiveSenseCode.MINIMIZE and not self.isLE(bestdb, bestpb)) or (obs == ObjectiveSenseCode.MAXIMIZE and not self.isGE(bestdb, bestpb)):\n self.inconsistentset.add(problemname)\n else:\n self.bestdb[problemname] = bestdb\n self.bestpb[problemname] = bestpb",
"def refine_conflict(self):\n # Start refine conflict\n self._check_status(STATUS_IDLE)\n self._set_status(STATUS_REFINING_CONFLICT)\n self._notify_listeners_start_operation(listener.OPERATION_REFINE_CONFLICT)\n\n # Ensure cpo model is generated with all constraints named\n namecstrs = self.context.model.name_all_constraints\n if not namecstrs:\n self.context.model.name_all_constraints = True\n self.cpostr = None\n self.agent.solver.model_sent = False\n\n # Refine conflict\n msol = self.agent.refine_conflict()\n\n # Restore previous name constraints indicator\n self.context.model.name_all_constraints = namecstrs\n\n # Call listeners with conflict result\n for lstnr in self.listeners:\n lstnr.new_result(self, msol)\n\n # End refine conflict\n self._set_status(STATUS_IDLE)\n self._notify_listeners_end_operation()\n\n return msol",
"def inferrable(self) -> bool:\n return self._strategy.inferrable",
"def partial_change(self):\n return self.attempted_change() and not all(self._get_field_data())",
"def b3_correctness(el_a, el_b, system_el2kbid, gold_el2kbid):\n correct = False\n\n if(inSameSet(el_a, el_b, system_el2kbid) and \n inSameSet(el_a, el_b, gold_el2kbid) and\n sameLinking(el_a, el_b, system_el2kbid, gold_el2kbid) #THIS CONDITION DEPARTS FROM THE ORIGINAL BCUBED (extesion for the Entity Linking problem)\n ):\n correct = True\n\n return correct",
"def attempted_change(self):\n return any(self._get_field_data())",
"def checkForSideChangeRequest(self):\n inThirdRound = self.wonRounds[\"Team1\"] == 1 and self.wonRounds[\"Team2\"] == 1\n oneTeamAt11AndOtherTeamUnder11 = (self.counter[\"Team1\"] == 11 and self.counter[\"Team2\"] < 11) or\\\n (self.counter[\"Team2\"] == 11 and self.counter[\"Team1\"] < 11)\n if inThirdRound and oneTeamAt11AndOtherTeamUnder11:\n self.__notifySideChangeRequest()",
"def fix_has_no_advisory(self):\n fixed_in = self.fixed_artifact()\n return fixed_in and fixed_in.vendor_no_advisory",
"def fail(self):\n rows, cols, _ = self.bird.img.shape\n # find the top-left coordinates of bird image\n x_b, y_b = self.bird.x + self.env.pad - cols//2, max(self.bird.y + self.env.pad - rows//2, 0)\n \n # check if the bird square intersects with some environment obstacles\n isCollision = (self.env.occ[y_b:y_b + rows, x_b:x_b + cols]).any()\n \n return isCollision",
"def check_stability(self):\n\n blocking_pairs = []\n for resident in self.residents:\n for hospital in self.hospitals:\n if (\n _check_mutual_preference(resident, hospital)\n and _check_resident_unhappy(resident, hospital)\n and _check_hospital_unhappy(resident, hospital)\n ):\n blocking_pairs.append((resident, hospital))\n\n self.blocking_pairs = blocking_pairs\n return not any(blocking_pairs)",
"def check_bollinger(self):\n upper, lower = self.bollinger_bands()\n if self.daily['Adj Close'][-1] > upper[-1]:\n self.debug += '\\nAbove upper bollinger: sells + 1'\n self.sells += 1\n elif self.daily['Adj Close'][-1] < lower[-1]:\n self.debug += '\\nBelow lower bollinger: buys + 1'\n self.buys += 1",
"def need_attention(self):\n msg = [\"not staged\", \"behind\", \"ahead\", \"Untracked\"]\n status_msg = self.status()\n if any([each in status_msg for each in msg]):\n return True\n return False",
"def high_business_impact(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"high_business_impact\")",
"def high_business_impact(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"high_business_impact\")",
"def _check_collisions(self):\n\t\tif pygame.sprite.spritecollide(\n\t\t\tself.bolan, \n\t\t\tself.obstacles.obstacles,\n\t\t\tFalse, \n\t\t\tpygame.sprite.collide_mask):\n\t\t\t\tself.is_play = False\n\t\t\t\tself.is_gameover = True\n\t\t\t\tself.bolan.image = self.settings.bolan_dead_image",
"def collide_with_flower(self, flower):\n pass"
] | [
"0.60755587",
"0.5997811",
"0.5935477",
"0.5858629",
"0.57589734",
"0.56303644",
"0.5552522",
"0.55062824",
"0.54827005",
"0.54471344",
"0.5424391",
"0.54060894",
"0.53555745",
"0.53466696",
"0.53354824",
"0.5331523",
"0.5313019",
"0.530491",
"0.5267892",
"0.52595043",
"0.5228276",
"0.5223606",
"0.5222479",
"0.5220137",
"0.5218231",
"0.51882666",
"0.51552117",
"0.51552117",
"0.514843",
"0.51482075"
] | 0.66045004 | 0 |
quad(f, a, b) > \int_a^b f(x) dx Uses some quadrature rule to evaluate the integral. | def quad(f, a, b):
S, D = (b+a)/2.0, (b-a)/2.0
def rescaled_f(x):
return f(x*D + S)*D
return sum(w * rescaled_f(p) for w, p in zip(quad_weights, quad_points)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def quad(func, a, b, args=()):\n\tx_units = a.units\n\tf_units = func(.5*(a+b)).units\n\n\tI, abserr = sciquad(\n\t\tlambda x : func(x*x_units).to(f_units).magnitude,\n\t\ta.magnitude, b.to(x_units).magnitude,\n\t\targs)\n\n\treturn I*x_units*f_units, abserr*x_units*f_units",
"def add_quad(a, b):\n s = np.sqrt(np.square(a) + np.square(b))\n return s",
"def integrate_f_from0(b):\n integral, err = scipy.integrate.quad(f, 0, b)\n return integral",
"def complex_quadrature(func, a, b, **kwargs):\n\n def real_func(x):\n return scipy.real(func(x))\n\n def imag_func(x):\n return scipy.imag(func(x))\n\n real_integral = quad(real_func, a, b, **kwargs)\n imag_integral = quad(imag_func, a, b, **kwargs)\n return (real_integral[0] + 1j * imag_integral[0], real_integral[1:], imag_integral[1:])",
"def quad(self, b):\n return b.T @ self.solve(b)",
"def integrate(f, inf_lim, sup_lim):\n function = get_function_from_text(f)\n return sp_integrate.quad(function, inf_lim, sup_lim)[0]",
"def integrate_fun(fun: Callable, low_b: float, upp_b: float) -> float:\n return integrate.quad(fun, low_b, upp_b)[0]",
"def complex_integral(self,func,a,b):\r\n \r\n import scipy\r\n from scipy import array\r\n \r\n def quad_routine(func, a, b, x_list, w_list):\r\n c_1 = (b-a)/2.0\r\n c_2 = (b+a)/2.0\r\n eval_points = map(lambda x: c_1*x+c_2, x_list)\r\n func_evals = list(map(func, eval_points)) # Python 3: make a list here\r\n return c_1 * sum(array(func_evals) * array(w_list))\r\n \r\n def quad_gauss_7(func, a, b):\r\n x_gauss = [-0.949107912342759, -0.741531185599394, -0.405845151377397, 0, 0.405845151377397, 0.741531185599394, 0.949107912342759]\r\n w_gauss = array([0.129484966168870, 0.279705391489277, 0.381830050505119, 0.417959183673469, 0.381830050505119, 0.279705391489277,0.129484966168870])\r\n return quad_routine(func,a,b,x_gauss, w_gauss)\r\n \r\n def quad_kronrod_15(func, a, b):\r\n x_kr = [-0.991455371120813,-0.949107912342759, -0.864864423359769, -0.741531185599394, -0.586087235467691,-0.405845151377397, -0.207784955007898, 0.0, 0.207784955007898,0.405845151377397, 0.586087235467691, 0.741531185599394, 0.864864423359769, 0.949107912342759, 0.991455371120813]\r\n w_kr = [0.022935322010529, 0.063092092629979, 0.104790010322250, 0.140653259715525, 0.169004726639267, 0.190350578064785, 0.204432940075298, 0.209482141084728, 0.204432940075298, 0.190350578064785, 0.169004726639267, 0.140653259715525, 0.104790010322250, 0.063092092629979, 0.022935322010529]\r\n return quad_routine(func,a,b,x_kr, w_kr)\r\n \r\n class Memorize: # Python 3: no need to inherit from object\r\n def __init__(self, func):\r\n self.func = func\r\n self.eval_points = {}\r\n def __call__(self, *args):\r\n if args not in self.eval_points:\r\n self.eval_points[args] = self.func(*args)\r\n return self.eval_points[args]\r\n \r\n def quad(func,a,b):\r\n ''' Output is the 15 point estimate; and the estimated error '''\r\n func = Memorize(func) # Memorize function to skip repeated function calls.\r\n g7 = quad_gauss_7(func,a,b)\r\n k15 = quad_kronrod_15(func,a,b)\r\n # I don't have much faith in this error estimate taken from wikipedia\r\n # without incorporating how it should scale with changing limits\r\n return [k15, (200*scipy.absolute(g7-k15))**1.5]\r\n \r\n return quad(func,a,b)",
"def complex_integral(self,func,a,b):\r\n \r\n import scipy\r\n from scipy import array\r\n \r\n def quad_routine(func, a, b, x_list, w_list):\r\n c_1 = (b-a)/2.0\r\n c_2 = (b+a)/2.0\r\n eval_points = map(lambda x: c_1*x+c_2, x_list)\r\n func_evals = list(map(func, eval_points)) # Python 3: make a list here\r\n return c_1 * sum(array(func_evals) * array(w_list))\r\n \r\n def quad_gauss_7(func, a, b):\r\n x_gauss = [-0.949107912342759, -0.741531185599394, -0.405845151377397, 0, 0.405845151377397, 0.741531185599394, 0.949107912342759]\r\n w_gauss = array([0.129484966168870, 0.279705391489277, 0.381830050505119, 0.417959183673469, 0.381830050505119, 0.279705391489277,0.129484966168870])\r\n return quad_routine(func,a,b,x_gauss, w_gauss)\r\n \r\n def quad_kronrod_15(func, a, b):\r\n x_kr = [-0.991455371120813,-0.949107912342759, -0.864864423359769, -0.741531185599394, -0.586087235467691,-0.405845151377397, -0.207784955007898, 0.0, 0.207784955007898,0.405845151377397, 0.586087235467691, 0.741531185599394, 0.864864423359769, 0.949107912342759, 0.991455371120813]\r\n w_kr = [0.022935322010529, 0.063092092629979, 0.104790010322250, 0.140653259715525, 0.169004726639267, 0.190350578064785, 0.204432940075298, 0.209482141084728, 0.204432940075298, 0.190350578064785, 0.169004726639267, 0.140653259715525, 0.104790010322250, 0.063092092629979, 0.022935322010529]\r\n return quad_routine(func,a,b,x_kr, w_kr)\r\n \r\n class Memorize: # Python 3: no need to inherit from object\r\n def __init__(self, func):\r\n self.func = func\r\n self.eval_points = {}\r\n def __call__(self, *args):\r\n if args not in self.eval_points:\r\n self.eval_points[args] = self.func(*args)\r\n return self.eval_points[args]\r\n \r\n def quad(func,a,b):\r\n ''' Output is the 15 point estimate; and the estimated error '''\r\n func = Memorize(func) # Memorize function to skip repeated function calls.\r\n g7 = quad_gauss_7(func,a,b)\r\n k15 = quad_kronrod_15(func,a,b)\r\n # I don't have much faith in this error estimate taken from wikipedia\r\n # without incorporating how it should scale with changing limits\r\n return [k15, (200*scipy.absolute(g7-k15))**1.5]\r\n \r\n return quad(func,a,b)",
"def evalQuad(a,b,c,x):\n return a * x**2 + b*x + c",
"def quad(*args, **kwargs):\n return (42, 0.001)",
"def sp_integrate_1D ( func , xmin , xmax , *args , **kwargs ) : \n from scipy import integrate\n ##\n result = integrate.quad ( func , xmin , xmax , *args , **kwargs )\n return result[0]",
"def integralFunction(xa, ya, xb, yb):\n return psi(xb, yb) - psi(xa, ya)",
"def piecewise_integrate(x, y, a, b):\n assert x[0] == a\n assert x[-1] <= b\n output = 0.\n num_x = len(x)\n if x[-1] == b:\n for idx in range(num_x - 1):\n output += y[idx] * (x[idx+1] - x[idx])\n else:\n for idx in range(num_x):\n if idx < num_x - 1:\n output += y[idx] * (x[idx+1] - x[idx])\n else:\n output += y[idx] * (b - x[idx])\n return output",
"def test_quad1():\n xi = np.array([-1., 0., 2.])\n yi = np.array([ 1., -1., 7.])\n c = quad_interp(xi,yi)\n c_true = np.array([-1., 0., 2.])\n print(\"c = \", c)\n print(\"c_true = \", c_true)\n # test that all elements have small error:\n assert np.allclose(c, c_true), \\\n \"Incorrect result, c = %s, Expected: c = %s\" % (c,c_true)",
"def f1(x, a, b):\n #return x**43 - b*x**42 + x**7 - x**6 * a + 84*x - 42 * b - 42 * a\n return (x**42 + 42)/(x-a) + (x**6 + 42)/(x-b)",
"def quad_interp(x,y,xi) :\n f = interp1d(x,y,kind='quadratic')\n yi = f(xi)\n \n return yi",
"def calc_quad(self,mw,A0,A1,A2): \n return (A0 + A1 * mw + A2 * mw**2)",
"def integrate_gausskronrod(f, a, b, args=()):\n\n assert b > a\n mid = 0.5*(b+a)\n dx = 0.5*(b-a)\n zi = mid+gausskronrod_nodes*dx\n integrand = f(zi)\n integral_G7 = np.sum(integrand[:7]*gauss_weights)\n integral_K15 = np.sum(integrand*kronrod_weights)\n\n error = (200*abs(integral_G7-integral_K15))**1.5\n\n return integral_K15*dx, dx*error",
"def sp_integrate_2D ( func ,\n xmin , xmax ,\n ymin , ymax , *args , **kwargs ) :\n from scipy import integrate\n ##\n result = integrate.dblquad ( func ,\n ymin ,\n ymax ,\n lambda x : xmin ,\n lambda x : xmax , \n *args , **kwargs )\n return result[0]",
"def integrate(x, y, xmin, xmax):\n indexes = get_interval(x, xmin, xmax)\n integral = np.trapz(y[indexes], x[indexes])\n\n return integral",
"def _quad_function(order, *args, **kws):\n params = parameters_spec.copy()\n params.update(kws)\n return quad_function(order, *args, **params)",
"def integrate_using_univariate_gauss_legendre_quadrature_unbounded(\n integrand, lb, ub, nquad_samples, atol=1e-8, rtol=1e-8,\n interval_size=2, max_steps=1000, verbose=0, adaptive=True,\n soft_error=False, tabulated_quad_rules=None):\n if interval_size <= 0:\n raise ValueError(\"Interval size must be positive\")\n\n if np.isfinite(lb) and np.isfinite(ub):\n partial_lb, partial_ub = lb, ub\n elif np.isfinite(lb) and not np.isfinite(ub):\n partial_lb, partial_ub = lb, lb+interval_size\n elif not np.isfinite(lb) and np.isfinite(ub):\n partial_lb, partial_ub = ub-interval_size, ub\n else:\n partial_lb, partial_ub = -interval_size/2, interval_size/2\n\n result = __integrate_using_univariate_gauss_legendre_quadrature_bounded(\n integrand, partial_lb, partial_ub, nquad_samples, rtol,\n atol, verbose-1, adaptive, tabulated_quad_rules)\n\n step = 0\n partial_result = np.inf\n plb, pub = partial_lb-interval_size, partial_lb\n while (np.any(np.absolute(partial_result) >= rtol*np.absolute(result)+atol)\n and (plb >= lb) and step < max_steps):\n partial_result = \\\n __integrate_using_univariate_gauss_legendre_quadrature_bounded(\n integrand, plb, pub, nquad_samples, rtol, atol,\n verbose-1, adaptive, tabulated_quad_rules)\n result += partial_result\n pub = plb\n plb -= interval_size\n step += 1\n if verbose > 1:\n print('Left', step, result, partial_result, plb, pub,\n interval_size)\n if verbose > 0:\n if step >= max_steps:\n msg = \"Early termination when computing left integral\"\n msg += f\"max_steps {max_steps} reached\"\n if soft_error is True:\n warn(msg, UserWarning)\n else:\n raise RuntimeError(msg)\n if np.all(np.abs(partial_result) < rtol*np.absolute(result)+atol):\n msg = f'Tolerance {atol} {rtol} for left integral reached in '\n msg += f'{step} iterations'\n print(msg)\n\n step = 0\n partial_result = np.inf\n plb, pub = partial_ub, partial_ub+interval_size\n while (np.any(np.absolute(partial_result) >= rtol*np.absolute(result)+atol)\n and (pub <= ub) and step < max_steps):\n partial_result = \\\n __integrate_using_univariate_gauss_legendre_quadrature_bounded(\n integrand, plb, pub, nquad_samples, rtol, atol,\n verbose-1, adaptive, tabulated_quad_rules)\n result += partial_result\n plb = pub\n pub += interval_size\n step += 1\n if verbose > 1:\n print('Right', step, result, partial_result, plb, pub,\n interval_size)\n if verbose > 0:\n if step >= max_steps:\n msg = \"Early termination when computing right integral. \"\n msg += f\"max_steps {max_steps} reached\"\n if soft_error is True:\n warn(msg, UserWarning)\n else:\n raise RuntimeError(msg)\n if np.all(np.abs(partial_result) < rtol*np.absolute(result)+atol):\n msg = f'Tolerance {atol} {rtol} for right integral reached in '\n msg += f'{step} iterations'\n print(msg)\n # print(partial_result, plb, pub)\n\n return result",
"def quadratures(f, a=-1, b=1, n=30):\n nodes, weights = gauss_legender_points(n)\n w = to.tensor(weights.reshape(1, 1, -1))\n nodes = to.tensor(nodes.reshape(1, 1, -1))\n\n scale = (b - a) / 2.\n\n x = scale * nodes + (b + a) / 2.\n y = w * f(x)\n y = to.sum(scale * y, dim=-1)\n return y.type(dtype=to.float)",
"def isqrt( a, b ):\n return a*a - b",
"def trapezoid_integral(f, xrange, intervals):\n \n a, b = min(xrange), max(xrange)\n delta_x = (b-a)/intervals\n x = np.arange(1, intervals)\n \n int_out = f(a)\n int_out += f(b)\n int_out += sum(2*f(a+x*delta_x))\n \n return delta_x/2*int_out",
"def integrate(func, a, b, tol=1e-8):\n left_pts = []\n result = integ(func, a, b, tol, 0, left_pts)\n\n return result, left_pts",
"def integrate(f, a, b, args=(), minintervals=1, limit=200, tol=1e-10):\n fv = np.vectorize(f)\n\n intervals = []\n\n limits = np.linspace(a, b, minintervals+1)\n for left, right in zip(limits[:-1], limits[1:]):\n I, err = integrate_gausskronrod(fv, left, right, args)\n bisect.insort(intervals, (err, left, right, I))\n\n while True:\n Itotal = sum([x[3] for x in intervals])\n err2 = sum([x[0]**2 for x in intervals])\n err = sqrt(err2)\n\n if abs(err/Itotal) < tol:\n return Itotal, err\n\n # no convergence\n if len(intervals) >= limit:\n return False # better to raise an exception\n\n err, left, right, I = intervals.pop()\n\n # split integral\n mid = left+(right-left)/2\n\n # calculate integrals and errors, replace one item in the list and\n # append the other item to the end of the list\n I, err = integrate_gausskronrod(fv, left, mid, args)\n bisect.insort(intervals, (err, left, mid, I))\n I, err = integrate_gausskronrod(fv, mid, right, args)\n bisect.insort(intervals, (err, mid, right, I))",
"def quad_interp(xi,yi):\n\n # check inputs and print error message if not valid:\n\n error_message = \"xi and yi should have type numpy.ndarray\"\n assert (type(xi) is np.ndarray) and (type(yi) is np.ndarray), error_message\n\n error_message = \"xi and yi should have length 3\"\n assert len(xi)==3 and len(yi)==3, error_message\n\n error_message = \"it is not possible to have more than one point in the with the same xi\"\n assert (len(np.unique(xi)) == len(xi)), error_message\n\n # Set up linear system to interpolate through data points:\n\n A = np.array([[1, 1, 1], xi, xi**2]).T\n b = yi\n\n c = solve(A,b)\n \n return c",
"def integral(self, f_bin, gamma, malm_pars=np.array([1.]), Pobs=0.00711310498183):\n s = 0.0\n for alpha, beta in zip(self.alpha_vals, self.beta_vals):\n #arg_list = [gamma, alpha, beta, self.low_q, self.high_q, len(malm_pars)]\n arg_list = [gamma, f_bin, Pobs, alpha, beta, 0.0, 1.0, len(malm_pars)]\n arg_list.extend(malm_pars)\n s += quad(self.c_integrand, self.low_q, self.high_q, args=tuple(arg_list))[0]\n #return s*f_bin\n return s\n #return f_bin * np.sum([quad(self.c_integrand, 0, 1, args=arg_list)[0] for alpha, beta in\n # zip(self.alpha_vals, self.beta_vals)])"
] | [
"0.7321034",
"0.66224813",
"0.6437059",
"0.64194804",
"0.62829626",
"0.62496614",
"0.6247555",
"0.6206842",
"0.6206842",
"0.6199654",
"0.61690414",
"0.6163885",
"0.61347663",
"0.6107581",
"0.60302866",
"0.60202134",
"0.6018099",
"0.6005248",
"0.60019904",
"0.5973936",
"0.59257406",
"0.58599657",
"0.58041286",
"0.58012384",
"0.5800743",
"0.5697578",
"0.5657789",
"0.5613508",
"0.5597451",
"0.5479812"
] | 0.7673257 | 0 |
List or create friendrequests. Create an unconfirmed friendship between two users. Or return all friendships which are not confirmed for the current user. | def create_friend_request():
if request.method == "GET":
friend_requests = [f.to_dict() for f in g.user.get_friend_requests()]
return jsonify({'success': True, 'friend_requests': friend_requests})
if request.method == "POST":
# Get recieving user id from request
json = request.get_json()
if json is None:
raise CustomError(400, message="No JSON included or Content-Type"
"is not application/json")
if 'recieving_user_id' not in json:
raise CustomError(400, message="Must include recieving_user_id")
recieving_user_id = json['recieving_user_id']
# Get the user object
recieving_user = User.query.get(recieving_user_id)
if recieving_user is None:
raise CustomError(
404,
message='User with id: {} was not found.'.format(
recieving_user_id)
)
# Check friendship does not already exist
friendship_exists = Friendship.query.filter(
(Friendship.actioning_user_id == g.user.id) |
(Friendship.recieving_user_id == g.user.id),
(Friendship.actioning_user_id == recieving_user_id) |
(Friendship.recieving_user_id == recieving_user_id)
).first()
if friendship_exists:
raise CustomError(
409,
message="There is either a pending friend request between the"
"two users or the two users are already friends."
)
# Insert friend request
friend_request = Friendship(g.user, recieving_user)
db.session.add(friend_request)
db.session.commit()
return jsonify({'success': True}), 201 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_pending_friendships(self):\n url = 'friendships/pending/'\n return self.send_request(url)",
"def get_friend_requests(self, user):\n return self.filter(addresser_user=user, status=Friendship.STATUS_PENDING, active=True)",
"def friend_request():\n if 'username' not in session:\n return redirect('/login?type=0')\n user1 = session['username']\n user2 = request.form['username']\n now_time = Time.time()\n if not re.search(ID_REG, user2) and user2 != 'admin':\n return jsonify(res=-4)\n # check friend\n with sqlite3.connect('data.db') as conn:\n cur = conn.cursor()\n cur.execute('SELECT * FROM friend WHERE user1 = ? AND user2 = ?', [user1, user2])\n if cur.fetchall() or user1 == user2:\n return jsonify(res=-1)\n cur.execute('SELECT * FROM friend_request WHERE user1 = ? AND user2 = ?', [user1, user2])\n if cur.fetchall():\n return jsonify(res=-2)\n cur.execute('SELECT * FROM friend_request WHERE user1 = ? AND user2 = ?', [user2, user1])\n if cur.fetchall():\n return jsonify(res=-3)\n cur.execute('SELECT * FROM user_login WHERE username = ?', [user2])\n if not cur.fetchall():\n return jsonify(res=-4)\n cur.execute('INSERT INTO friend_request VALUES (?, ?, ?)', [user1, user2, now_time])\n conn.commit()\n return jsonify(res=0)",
"def list(self, request):\n\n user_profile = get_object_or_404(UserProfile, user=request.user)\n # Get all sent accepted invitations\n sent = user_profile.creator_friendships.filter(status=1)\n # Get all received accepted invitations\n received = user_profile.invited_friendships.filter(status=1)\n # Combine results to get all friends:\n friends = []\n for friendship in sent:\n friends.append(UserProfileSerializer(friendship.user_2).data)\n for friendship in received:\n friends.append(UserProfileSerializer(friendship.user_1).data)\n return Response(friends, status=rest_status.HTTP_200_OK)",
"def create(self, request):\n\n invited_email = request.data.get(\"email\")\n status = request.data.get(\"status\", False)\n if not invited_email:\n return Response(status=rest_status.HTTP_404_NOT_FOUND)\n try:\n invited_user = UserProfile.objects.get(user__email=invited_email)\n except UserProfile.DoesNotExist:\n return Response(status=rest_status.HTTP_404_NOT_FOUND)\n\n user_sending = get_object_or_404(UserProfile, user=request.user)\n\n if user_sending == invited_user:\n return Response(status=rest_status.HTTP_404_NOT_FOUND)\n\n error = \"\"\n try:\n friendship, _created = FriendShip.objects.get_or_create(\n user_1=user_sending, user_2=invited_user, status=status\n )\n if not _created:\n if friendship.status:\n error = _(\"You already are friend with this user\")\n else:\n error = _(\"A pending invitation is already created\")\n except Exception:\n error = _(\n f\"An error occured when user {user_sending.user.email} invited {invited_user.user.email}\"\n )\n\n data = {}\n status = rest_status.HTTP_200_OK\n if error:\n status = rest_status.HTTP_400_BAD_REQUEST\n data[\"message\"] = error\n else:\n serializer = FriendShipSerializer(friendship)\n data[\"message\"] = \"OK\"\n data[\"content\"] = serializer.data\n return Response(data, status=status)",
"def friendship_request_list(request, template_name='/friend/requests_list.html'):\n # friendship_requests = Friend.objects.requests(request.user)\n friendship_requests = FriendshipRequest.objects.filter(rejected__isnull=True)\n\n return render(request, template_name, {'requests': friendship_requests})",
"def dispatch(self, request, *args, **kwargs):\n user_to = User.objects.get(pk=kwargs['pk'])\n user_from = self.request.user\n ###\n if user_to not in wanna_be_friends(user_from):\n friendship = FriendshipInvitation.objects.create(\n from_user=user_from, to_user=user_to, status=\"0\")\n\n notif = Notification.objects.create(sender=user_from,\n receiver=user_to,\n notif_type='friend_request')\n # Aca se ha enviado la solicitud\n else:\n return HttpResponseRedirect(\"/fr_req_fail/\")\n return HttpResponseRedirect(\"/\")",
"def accept_request(request, id):\n f_request = FriendRequest.objects.get(id=id)\n if f_request.to_user == request.user:\n f_request.to_user.profile.friends.add(f_request.from_user)\n f_request.from_user.profile.friends.add(f_request.to_user)\n f_request.delete()\n messages.success(\n request,\n f'Your friend request was successfully accepted'\n )\n return redirect('profiles:my_friends')",
"def request_to_be_friends(self, user_id, target_id):\n if self.database is None:\n raise Exception(\"No database.\")\n if user_id is None or len(user_id) == 0:\n raise Exception(\"Bad parameter.\")\n if target_id is None or len(target_id) == 0:\n raise Exception(\"Bad parameter.\")\n return self.database.create_pending_friend_request(user_id, target_id)",
"def user_send_friend_request(self):\n try:\n assert request.is_json\n except AssertionError:\n self.logger.debug(messages.REQUEST_IS_NOT_JSON)\n return messages.ERROR_JSON % messages.REQUEST_IS_NOT_JSON, 400\n content = request.get_json()\n if not FRIEND_REQUEST_MANDATORY_FIELDS.issubset(content.keys()):\n self.logger.debug(messages.MISSING_FIELDS_ERROR % (FRIEND_REQUEST_MANDATORY_FIELDS - set(content.keys())))\n return messages.ERROR_JSON % messages.MISSING_FIELDS_ERROR % (\n FRIEND_REQUEST_MANDATORY_FIELDS - set(content.keys())), 400\n email_token = auth.current_user()[0]\n try:\n self.friend_database.create_friend_request(email_token, content[\"other_user_email\"])\n except UnexistentTargetUserError:\n self.logger.debug(messages.USER_NOT_FOUND_MESSAGE % content[\"other_user_email\"])\n return messages.ERROR_JSON % (messages.USER_NOT_FOUND_MESSAGE % content[\"other_user_email\"]), 404\n except UsersAlreadyFriendsError:\n self.logger.debug(messages.USERS_ALREADY_FRIEND_ERROR)\n return messages.ERROR_JSON % messages.USERS_ALREADY_FRIEND_ERROR, 400\n except UnexistentRequestorUserError:\n self.logger.debug(messages.INTERNAL_ERROR_CONTACT_ADMINISTRATION)\n return messages.ERROR_JSON % messages.INTERNAL_ERROR_CONTACT_ADMINISTRATION, 500\n self.notification_database.notify(content[\"other_user_email\"],\n \"New friendship request\", \"From %s\" % email_token,\n {\"kind\": \"friendship_request\",\n \"from\": email_token})\n return messages.SUCCESS_JSON, 200",
"def friends(self):\n #Guillaume\n friends_list = []\n received = Friendships.objects.filter(request_for=self, status='A')\n for friend in received:\n friends_list.append(friend.request_from)\n sent = Friendships.objects.filter(request_from=self, status='A')\n for friend in sent:\n friends_list.append(friend.request_for)\n return friends_list",
"def get_pending_friends(cu_id):\n users = db.session.execute(\n \"\"\"select fr.user_1_id, u.username, u.firstname, u.lastname\n from friend_request as fr inner join userm as u on fr.user_1_id = u.id \n where fr.user_2_id = :cu_id\n and fr.approved is NULL\"\"\",\n {\"cu_id\": cu_id}\n )\n return users",
"def create(self, request):\n friend_obj = Friend.objects.add_friend(\n request.user, # The sender\n get_object_or_404(User, pk=request.data['user_id']), # The recipient\n message=request.data.get('message', '')\n )\n\n return Response(\n FriendshipRequestSerializer(friend_obj).data,\n status.HTTP_201_CREATED\n )",
"def pending_invitations(self, request):\n\n user_profile = get_object_or_404(UserProfile, user=request.user)\n # Get all sent pending invitation\n sent = user_profile.creator_friendships.filter(status=0)\n # Get all received pending invitation\n received = user_profile.invited_friendships.filter(status=0)\n # Serialize all and create a dict from it\n data = {\"sent\": [], \"received\": []}\n for friendship in sent:\n data[\"sent\"].append(FriendShipSerializer(friendship).data)\n for friendship in received:\n data[\"received\"].append(FriendShipSerializer(friendship).data)\n # Return response with these 2 informations\n return Response(data, status=rest_status.HTTP_200_OK)",
"def get_accepted_friend_requests(self, user):\n \n return self.filter(addresser_user=user, status=Friendship.STATUS_ACCEPTED, active=True)",
"def confirm_request_to_be_friends(self, user_id, target_id):\n if self.database is None:\n raise Exception(\"No database.\")\n if user_id is None or len(user_id) == 0:\n raise Exception(\"Bad parameter.\")\n if target_id is None or len(target_id) == 0:\n raise Exception(\"Bad parameter.\")\n\n if self.database.delete_pending_friend_request(user_id, target_id):\n return self.database.create_friend(user_id, target_id)\n return False",
"async def send_friend_request(self):\n\n logging.debug(\"Sending friend request to \" + self.username)\n\n if self.is_friend:\n raise ObjectErrors.AlreadyFriends(\n \"You are already friends with \" + self.display_name)\n\n await self.client.request.post(\n \"/user/%s/friendRequest\" % self.id)",
"def post(self):\n\t\tdb = getattr(g, 'db', None)\n\t\tobj = request.get_json()\n\n\t\tif ('username' not in obj) or ('session' not in obj):\n\t\t\treturn {'status':'MISSING_PARAMS'}\n\t\telif not authenticate(obj['username'],obj['session']):\n\t\t\treturn {'status':'AUTH_FAIL'}\n\t\telif ('action' not in obj):\n\t\t\treturn {'status':'MISSING_PARAMS'}\n\t\telse:\n\t\t\taction = obj['action']\n\t\t\tif action == 'ADD' and 'friend' in obj:\n\t\t\t\tqry = \"INSERT INTO friends VALUES ((SELECT id FROM profiles WHERE username = %s),\\\n\t\t\t\t\t(SELECT id FROM profiles WHERE username = %s));\"\n\t\t\t\twith db as cur:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tlines = cur.execute(qry, (obj['username'],obj['friend']))\n\n\t\t\t\t\t\tif lines > 0:\n\t\t\t\t\t\t\treturn {'status':'FRIEND_ADDED'}\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\treturn {'status':'QUERY_FAILED'}\n\t\t\t\t\texcept sql.IntegrityError:\n\t\t\t\t\t\treturn {'status':'DUPLICATE_USER'}\n\t\t\t\t\texcept sql.OperationalError:\n\t\t\t\t\t\treturn {'status':'NO_SUCH_USER'}\n\n\t\t\telif action == 'GET':\n\t\t\t\t\"\"\" Retrieve all friends belonging to user. \"\"\"\n\t\t\t\tfriends = [] #accepted, both ends\n\t\t\t\tpending = [] #pending answer from friend\n\n\t\t\t\t# retrieve canonical friends\n\t\t\t\tuserqry = \"SELECT id FROM profiles WHERE username = %s\"\n\t\t\t\tfriendsqry = \"SELECT friend FROM friends WHERE target = ANY(\"+userqry+\")\"\n\t\t\t\tqry = \"SELECT username FROM profiles WHERE id = ANY(\"+friendsqry+\");\"\n\t\t\t\twith db as cur:\n\t\t\t\t\tlines = cur.execute(qry, (obj['username'],))\n\t\t\t\t\tfor friend in cur.fetchall():\n\t\t\t\t\t\tfriends += friend\n\n\t\t\t\t# retrieve pending requests\n\t\t\t\tuserqry = \"SELECT id FROM profiles WHERE username = %s\"\n\t\t\t\tfriendsqry = \"SELECT target FROM friends WHERE friend = ANY(\"+userqry+\")\"\n\t\t\t\tqry = \"SELECT username FROM profiles WHERE id = ANY(\"+friendsqry+\");\"\n\t\t\t\twith db as cur:\n\t\t\t\t\tlines = cur.execute(qry, (obj['username'],))\n\t\t\t\t\tprint \"friends:\"+str(friends)\n\t\t\t\t\tfor req in cur.fetchall():\n\t\t\t\t\t\tif not req[0] in friends:\n\t\t\t\t\t\t\tpending += req\n\n\t\t\t\tif not (len(friends)<=0 and len(pending)<=0):\n\t\t\t\t\treturn {'status':'QUERY_OK', 'friends':friends, 'pending':pending}\n\t\t\t\telse:\n\t\t\t\t\treturn {'status':'NO_FRIENDS'}\n\n\t\t\telif action == 'DELETE' and 'friend' in obj:\n\t\t\t\tqry = \"DELETE FROM friends WHERE target = (SELECT id FROM profiles WHERE username = %s)\\\n\t\t\t\t\tand friend = (SELECT id FROM profiles WHERE username = %s);\"\n\t\t\t\twith db as cur:\n\t\t\t\t\tlines = cur.execute(qry, (obj['username'], obj['friend']))\n\t\t\t\t\tif lines>0:\n\t\t\t\t\t\treturn {'status':'FRIEND_DELETED'}\n\t\t\t\t\telse:\n\t\t\t\t\t\treturn {'status':'QUERY_FAILED'}\n\n\t\t\telse:\n\t\t\t\treturn {'status':'INVALID_ACTION'}",
"def friends():\n friends = [u.to_dict() for u in g.user.get_friends()]\n return jsonify({'success': True, 'friends': friends})",
"def user_list_friend_requests(self):\n email_token = auth.current_user()[0]\n friend_emails = self.friend_database.get_friend_requests(email_token)\n friends = [self.auth_server.profile_query(email) for email in friend_emails]\n return json.dumps(friends), 200",
"def add_friend(request):\n required_fields = ['source_user_id', 'dest_user_id', 'token']\n\n # Check if the post request contain the required fields\n if set(required_fields) != set(list(request.data.keys())):\n return Response({'error': str('Missing required fields!')}, status=status.HTTP_400_BAD_REQUEST)\n\n # POST Request content\n data = request.data\n\n # Here check if user_id matches the token with the database\n if not db.check_user(data['source_user_id'], data['token']):\n return Response({'error': str('UNAUTHORIZED')}, status=status.HTTP_401_UNAUTHORIZED)\n\n # Send friend request\n if not mock_db.add_friend(data['source_user_id'], data['dest_user_id']):\n return Response({'error': str('Error when adding friend!')},\n status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n return Response({'status': 'success'})",
"def test_requested_friends_asymmetrical(self):\n u = AppUser(id = 1)\n u.django_user = User.objects.create(username='Testuser')\n u.save()\n f = AppUser(id = 2)\n f.django_user = User.objects.create(username='Testuser2')\n f.save()\n \n f.requested_friends.add(u)\n self.assertIs(u in f.requested_friends.all(), True)\n self.assertIs(f in u.requested_friends.all(), False)",
"def find_friends(request):\n find_list = []\n sent_requests = set()\n rec_requests = set()\n sent_f_requests = FriendRequest.objects.filter(\n from_user=request.user\n )\n rec_f_requests = FriendRequest.objects.filter(\n to_user=request.user\n )\n\n me = request.user\n my_friends = me.profile.friends.all()\n my_family = me.relations.all()\n profiles = Profile.objects.exclude(\n user=request.user\n )\n for user in profiles:\n user_friends = user.friends.all()\n for friend in user_friends:\n if friend not in find_list and friend != me:\n if friend not in my_friends and friend not in my_family:\n find_list.append(friend)\n\n template = 'profiles/find_friends.html'\n context = {\n 'find_list': find_list,\n }\n return render(request, template, context)",
"def get_friend_request_with_id(id):\n # Get friend request\n friendship = Friendship.query.get(id)\n if friendship is None:\n raise CustomError(\n 404,\n message=\"Friendship with id: {} not found.\".format(id)\n )\n can_view = friendship.actioning_user_id == g.user.id or \\\n friendship.recieving_user_id == g.user.id\n # Check user is has permission to view that request\n if not can_view:\n raise CustomError(\n 401,\n message=\"You are not authorised to view this resource.\"\n )\n\n if request.method == \"GET\":\n return jsonify({'success': True, 'friendship': friendship.to_dict()})\n\n if request.method == \"PATCH\":\n if friendship.recieving_user_id != g.user.id:\n raise CustomError(\n 401,\n message=\"You are not authorised to update this object.\"\n )\n\n json = request.get_json()\n if json is None:\n raise CustomError(400, message=\"No JSON included or Content-Type\"\n \"is not application/json\")\n if 'confirmed' in json:\n friendship.confirmed = json['confirmed']\n\n db.session.commit()\n return jsonify({'success': True, 'friendship': friendship.to_dict()})\n\n if request.method == \"DELETE\":\n db.session.delete(friendship)\n db.session.commit()\n return jsonify({'success': True})",
"def accept(self):\n receiver_friend_list = FriendList.objects.filter(user_id=self.receiver_id)\n sender_friend_list = FriendList.objects.filter(user_id=self.sender_id)\n if(receiver_friend_list.exists()):\n receiver_friend_list = receiver_friend_list[0]\n else:\n receiver_friend_list = FriendList.objects.create(user_id=self.receiver_id)\n\n if(sender_friend_list.exists()):\n sender_friend_list = sender_friend_list[0]\n else:\n sender_friend_list = FriendList.objects.create(user_id=self.sender_id)\n\n if receiver_friend_list:\n receiver_friend_list.add_friend(self.sender_id)\n if sender_friend_list:\n sender_friend_list.add_friend(self.receiver_id)\n self.is_active = False\n self.save()",
"def accept(request, pk=None):\n # check request is valid or not\n friend_request = get_or_none(FriendRequest, pk=pk)\n if friend_request is None:\n return Response({'status': '400', 'code': 'E_REQUEST_NOT_FOUND',\n 'detail': code['E_REQUEST_NOT_FOUND']}, status=400)\n # Create friend for login user -> request user\n new_friend1 = FriendConnectSerializer(\n data={'user': friend_request.from_user.id, 'friend': friend_request.to_user.id})\n if not new_friend1.is_valid():\n return Response({'status': '400', 'code': 'E_INVALID_PARAMETER_VALUES',\n 'detail': new_friend1.errors}, status=400)\n # Create friend for request user -> login user\n new_friend2 = FriendConnectSerializer(\n data={'friend': friend_request.from_user.id, 'user': friend_request.to_user.id})\n if not new_friend2.is_valid():\n return Response({'status': '400', 'code': 'E_INVALID_PARAMETER_VALUES',\n 'detail': new_friend2.errors}, status=400)\n # Save record 1\n new_friend1.save()\n # Check save or fail\n is_save1 = get_or_none(FriendConnect, user=friend_request.from_user, friend=friend_request.to_user)\n if is_save1 is not None:\n return Response({'status': '500', 'code': 'E_NOT_SAVE',\n 'detail': code['E_NOT_SAVE']}, status=500)\n # Save record 2\n new_friend2.save()\n # Check save or fail\n is_save2 = get_or_none(FriendConnect, user=friend_request.to_user, friend=friend_request.from_user)\n # if fail delete record 1\n if is_save2 is not None:\n is_save1.delete()\n return Response({'status': '500', 'code': 'E_NOT_SAVE',\n 'detail': code['E_NOT_SAVE']}, status=500)\n # if every things ok delete request\n friend_request.delete()\n return Response({'status': '200', 'code': 'OK_SEND_FRIEND_REQUEST',\n 'detail': code['OK_ACCEPT_FRIEND_REQUEST']}, status=201)",
"def add_friends(self, user1_index, user2_index):\n if user1_index >= self.num_users or user2_index >= self.num_users:\n raise ValueError(\n f\"Number of users is {self.num_users}, but indices \"\n f\"{user1_index} and {user2_index} were requested.\"\n )\n if self.users_hat[user1_index, user2_index] == 0:\n self.users_hat[user1_index, user2_index] = 1\n elif self.is_verbose():\n self.log(f\"User {user2_index} was already following user {user1_index}\")\n if self.users_hat[user2_index, user1_index] == 0:\n self.users_hat[user2_index, user1_index] = 1\n elif self.is_verbose():\n self.log(f\"User {user1_index} was already following user {user2_index}\")",
"def send_request(request, id):\n user = get_object_or_404(User, id=id)\n f_request, created = FriendRequest.objects.get_or_create(\n from_user=request.user,\n to_user=user\n )\n if created:\n messages.success(\n request,\n f'Your friend request to {user} has been sent.'\n )\n\n return redirect('/profiles/%s/' % user.profile.slug)\n messages.info(\n request,\n f'You have already sent a friend request to {user}'\n )\n return redirect('/profiles/%s/' % user.profile.slug)",
"def get_friends(user, data):\n setA = list(\n data.loc[data.user == user].user_friend_list.values)\n setB = list(\n data.loc[data.user_friend_list == user].user\n .values)\n friends = list(set(set(setA).union(setB)))\n return friends",
"def accept_decline_friend(request):\n required_fields = ['source_user_id', 'dest_user_id', 'accept', 'token']\n\n # Check if the post request contain the required fields\n if set(required_fields) != set(list(request.data.keys())):\n return Response({'error': str('Missing required fields!')}, status=status.HTTP_400_BAD_REQUEST)\n\n # POST Request content\n data = request.data\n\n # Here check if user_id matches the token with the database\n if not db.check_user(data['source_user_id'], data['token']):\n return Response({'error': str('UNAUTHORIZED')}, status=status.HTTP_401_UNAUTHORIZED)\n\n # if friend request is being accepted\n if data['accept'] == \"yes\":\n if not mock_db.accept_friend(data['source_user_id'], data['dest_user_id']):\n return Response({'error': str('Error when accepting friend request!')},\n status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n # if friend request is not accepted\n elif data['accept'] == \"no\":\n if not mock_db.cancel_friend_request(data['source_user_id'], data['dest_user_id']):\n return Response({'error': str('Error when declining friend request!')},\n status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n # bad request\n else:\n return Response({'error': str('Invalid request. Use yes/no in accept field.')},\n status=status.HTTP_400_BAD_REQUEST)\n\n return Response({'status': 'success'})"
] | [
"0.66678417",
"0.65838015",
"0.64492136",
"0.6419166",
"0.6355479",
"0.6347656",
"0.63212603",
"0.6301099",
"0.6292718",
"0.62728006",
"0.6239098",
"0.62126476",
"0.61244184",
"0.60886",
"0.6074877",
"0.6053998",
"0.6042511",
"0.60373974",
"0.59732705",
"0.5967323",
"0.59309",
"0.5902279",
"0.5878904",
"0.58686084",
"0.5834719",
"0.57897013",
"0.5774579",
"0.572391",
"0.5668115",
"0.5655726"
] | 0.7035371 | 0 |
Get, update or delete friendship with the specified id. | def get_friend_request_with_id(id):
# Get friend request
friendship = Friendship.query.get(id)
if friendship is None:
raise CustomError(
404,
message="Friendship with id: {} not found.".format(id)
)
can_view = friendship.actioning_user_id == g.user.id or \
friendship.recieving_user_id == g.user.id
# Check user is has permission to view that request
if not can_view:
raise CustomError(
401,
message="You are not authorised to view this resource."
)
if request.method == "GET":
return jsonify({'success': True, 'friendship': friendship.to_dict()})
if request.method == "PATCH":
if friendship.recieving_user_id != g.user.id:
raise CustomError(
401,
message="You are not authorised to update this object."
)
json = request.get_json()
if json is None:
raise CustomError(400, message="No JSON included or Content-Type"
"is not application/json")
if 'confirmed' in json:
friendship.confirmed = json['confirmed']
db.session.commit()
return jsonify({'success': True, 'friendship': friendship.to_dict()})
if request.method == "DELETE":
db.session.delete(friendship)
db.session.commit()
return jsonify({'success': True}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def delete_friend(request, id):\n user = request.user\n friend = get_object_or_404(User, id=id)\n user.profile.friends.remove(friend)\n friend.profile.friends.remove(user)\n messages.success(\n request,\n 'User deleted from your friends list'\n )\n return redirect('profiles:profile')",
"async def delete(\n self, user_id: Optional[int] = None, **kwargs\n ) -> friends.DeleteResponseModel:\n\n params = self.get_set_params(locals())\n response = await self.api.request(\"friends.delete\", params)\n model = friends.DeleteResponse\n return model(**response).response",
"def update_ship(id):\n data = request.get_json()\n print(data)\n for ship in db['ships']:\n if ship['id'] == id:\n if data['name']:\n ship['name'] == data['name']\n if data['age']:\n ship['age'] == data['age']\n return ship, status.HTTP_202_ACCEPTED\n return {}, status.HTTP_404_NOT_FOUND",
"def add_friendship(self, user_id, friend_id):\n if user_id == friend_id:\n print(\"WARNING: You cannot be friends with yourself\")\n elif friend_id in self.friendships[user_id] or user_id in self.friendships[friend_id]:\n print(\"WARNING: Friendship already exists\")\n else:\n self.friendships[user_id].add(friend_id)\n self.friendships[friend_id].add(user_id)",
"def add_friendship(self, user_id, friend_id):\n if user_id == friend_id:\n print(\"WARNING: You cannot be friends with yourself\")\n elif friend_id in self.friendships[user_id] or user_id in self.friendships[friend_id]:\n print(\"WARNING: Friendship already exists\")\n else:\n self.friendships[user_id].add(friend_id)\n self.friendships[friend_id].add(user_id)",
"def add_friendship(self, user_id, friend_id):\n if user_id == friend_id:\n print(\"WARNING: You cannot be friends with yourself\")\n elif friend_id in self.friendships[user_id] or user_id in self.friendships[friend_id]:\n print(\"WARNING: Friendship already exists\")\n else:\n self.friendships[user_id].add(friend_id)\n self.friendships[friend_id].add(user_id)",
"def getFriends(id):\n u = models.User.query.get(id)\n if not u:\n return jsonify({'error': 'No account found'}), 200\n\n if not u.isFb:\n if int(u.fbid) is not 0:\n u = models.User.query.get(u.fbid)\n if not u.isFb and int(u.fbid) is not 0:\n u = models.User.query.get(u.fbid)\n else:\n return jsonify({'error': 'No account found'}), 200\n\n session['oauth_token'] = (u.token, '')\n resp = facebook.get('/' + u.fbid + '/friends')\n friends = []\n for f in resp.data['data']:\n friends.append(f['id'])\n\n friends_json = []\n for f in friends:\n u = models.User.query.filter_by(fbid=f).first()\n user = {\n 'id': u.id,\n 'name': u.name,\n 'email': u.email,\n 'regID': u.regid,\n 'photo': u.photo\n }\n friends_json.append(user)\n return jsonify({'friends': friends_json}), 200",
"def add_friendship(self, user_id, friend_id):\n if user_id == friend_id:\n # print(\"WARNING: You cannot be friends with yourself\")\n return False\n elif friend_id in self.friendships[user_id] or user_id in self.friendships[friend_id]:\n # print(\"WARNING: Friendship already exists\")\n return False\n else:\n self.friendships[user_id].add(friend_id)\n self.friendships[friend_id].add(user_id)\n\n return True",
"def remove_relation(request, id):\n user = request.user\n relation = get_object_or_404(User, id=id)\n user.profile.relations.remove(relation)\n user.profile.friends.add(relation)\n messages.success(\n request,\n 'Family member removed to your friends list'\n )\n return redirect('profiles:my_friends')",
"def delete_request(request, id):\n f_request = FriendRequest.objects.get(id=id)\n f_request.delete()\n messages.success(\n request,\n f'Your friend request has been removed.'\n )\n return redirect('profiles:my_requests')",
"def add_relation(request, id):\n user = request.user\n friend = get_object_or_404(User, id=id)\n user.profile.relations.add(friend)\n user.profile.friends.remove(friend)\n messages.success(\n request,\n 'Friend added to your family list'\n )\n return redirect('profiles:my_family')",
"def api_profile_friends_get(profile_id: int):\n\n if is_access_denied(profile_id):\n return jsonify({'error': {'message': 'forbidden'}}), 403\n\n friends = Friend.find_by_profile_id(profile_id)\n \n if friends is None:\n return jsonify({'error': {'message': 'not found'}}), 404\n\n out = [ f.get_fields(with_id=True) for f in friends ]\n\n return jsonify(out), 200",
"def make_friend(user_id, friend_id):\n # Find out if the user exists\n user_a = user_grab(user_id)\n if user_a is None:\n return \"user not found\", 404\n\n # Find the other user\n user_b = user_grab(friend_id)\n if user_b is None:\n return \"user not found\", 404\n\n # Get their friend list\n friends_current = user_a.get(\"friends\")\n friends_updated = []\n if friends_current is not None:\n for friend in friends_current:\n if friend == friend_id:\n return user_b\n friends_updated = friends_current\n friends_updated.append(str(user_b['_id']))\n api_vars.users.update({'_id': ObjectId(user_id)},\n {'$set': {'friends': friends_updated}})\n return json.dumps(user_b)",
"def delete_favorite(self, id):\n path = self._get_path('alter_favorite').format(id=id)\n \n return self._clean_return(self._DELETE(path))",
"def scrap_ship(self, ship_id):\n r = requests.delete(self.base_url + f'/users/{self.username}/ships/{ship_id}', headers=self.auth_header)\n return r.text",
"def addFriendship(self, userID, friendID):\n if userID == friendID:\n print(\"WARNING: You cannot be friends with yourself\")\n elif friendID in self.friendships[userID] or userID in self.friendships[friendID]:\n print(\"WARNING: Friendship already exists\")\n else:\n self.friendships[userID].add(friendID)\n self.friendships[friendID].add(userID)",
"def addFriendship(self, userID, friendID):\n if userID == friendID:\n print(\"WARNING: You cannot be friends with yourself\")\n elif friendID in self.friendships[userID] or userID in self.friendships[friendID]:\n print(\"WARNING: Friendship already exists\")\n else:\n self.friendships[userID].add(friendID)\n self.friendships[friendID].add(userID)",
"def removeFollower(self,id):\n # DELETE /followers/$id\n pass",
"def accept_request(request, id):\n f_request = FriendRequest.objects.get(id=id)\n if f_request.to_user == request.user:\n f_request.to_user.profile.friends.add(f_request.from_user)\n f_request.from_user.profile.friends.add(f_request.to_user)\n f_request.delete()\n messages.success(\n request,\n f'Your friend request was successfully accepted'\n )\n return redirect('profiles:my_friends')",
"def send_request(request, id):\n user = get_object_or_404(User, id=id)\n f_request, created = FriendRequest.objects.get_or_create(\n from_user=request.user,\n to_user=user\n )\n if created:\n messages.success(\n request,\n f'Your friend request to {user} has been sent.'\n )\n\n return redirect('/profiles/%s/' % user.profile.slug)\n messages.info(\n request,\n f'You have already sent a friend request to {user}'\n )\n return redirect('/profiles/%s/' % user.profile.slug)",
"def friends(user_id):\n user = user_grab(user_id)\n if user is None:\n return \"user not found\", 404\n friends = user.get(\"friends\")\n if friends is None:\n friends = []\n data_json = json.dumps({'friends': [str(friend) for friend in friends]})\n return data_json",
"def delete(self,id):\r\n return delete(id=id)",
"def delete(self, id):\n response = remove_location(id)\n return response",
"def addFriendship(self, userID, friendID):\n if userID == friendID:\n print(\"WARNING: You cannot be friends with yourself\")\n return False\n elif friendID in self.friendships[userID] or userID in self.friendships[friendID]:\n print(\"WARNING: Friendship already exists\")\n return False\n else:\n self.friendships[userID].add(friendID)\n self.friendships[friendID].add(userID)\n return True",
"def board_by_id(request, id):\n if request.method == 'GET':\n try:\n board = Board.objects.get(id=id)\n return Response(BoardSerializer(board).data)\n except ObjectDoesNotExist:\n return Response({\n \"id\": -1,\n \"error\": \"invalid id\"\n })\n if request.method == 'DELETE':\n try:\n Board.objects.get(id=id).delete()\n return Response({\n \"success\": True\n })\n except ObjectDoesNotExist:\n return Response({\n \"success\": False\n })",
"def getFollowings(self,id=None,**kwargs):\n # GET /followings [/$id]\n debugMain('getEntitiesIFollow')\n if id is None:\n return self._genericGet('/followings',**kwargs)\n else:\n return self._genericGet('/followings/%s'%id,**kwargs)",
"def addFriendship(self, userID, friendID):\n # adding a edge between two vertices\n if userID == friendID:\n print(\"WARNING: You cannot be friends with yourself\")\n elif friendID in self.friendships[userID] or userID in self.friendships[friendID]:\n print(\"WARNING: Friendship already exists\")\n else:\n self.friendships[userID].add(friendID)\n self.friendships[friendID].add(userID)",
"def add_favorite(self, id):\n path = self._get_path('alter_favorite').format(id=id)\n \n return self._clean_return(self._PUT(path))",
"def show_friends():\n\n\n user_id = session['user_id']\n user = User.query.get(user_id)\n friendship = Friendship.query.get(user_id)\n\n return render_template('friends.html', user=user, friendship=friendship)",
"def delete(self, id):\n try:\n deleted_id = self.borrow_repo.remove_one_by_id(id)\n if deleted_id:\n self.write({'id': deleted_id})\n else:\n self.write_not_found(\n 'A request with id {} was not found'.format(id)\n )\n except BumerangError as e:\n self.set_status(500)\n self.finish({'error': str(e)})"
] | [
"0.6267513",
"0.60474205",
"0.5893467",
"0.57769",
"0.57769",
"0.57769",
"0.55159366",
"0.5514679",
"0.5414053",
"0.53229433",
"0.5308454",
"0.52603304",
"0.52275467",
"0.5149211",
"0.51255256",
"0.51151985",
"0.51151985",
"0.5048067",
"0.49942237",
"0.49900073",
"0.49625716",
"0.492907",
"0.49201536",
"0.49102953",
"0.48963603",
"0.48825768",
"0.48816967",
"0.4872865",
"0.48643848",
"0.48641998"
] | 0.74585414 | 0 |
outLookSender is not utilized in this module but wrote the function in case we want to send from an outlook account in the future | def outLookSender(receiverAddress, receiverName, retainedCompany, companyName, senderName, senderTitle, senderCompany, senderEmail, senderCompanyHomePage, senderPhone, returnHTML=False):
subj = f'Engineers from {retainedCompany} Search'
if returnHTML:
[text, html] = emailTextHTML(receiverName, retainedCompany, companyName, senderName, senderTitle, senderCompany, senderEmail, senderCompanyHomePage, senderPhone, returnHTML=returnHTML)
else:
[text] = emailTextHTML(receiverName, retainedCompany, companyName, senderName, senderTitle, senderCompany, senderEmail, senderCompanyHomePage, senderPhone, returnHTML=returnHTML)
outlook = app('Microsoft Outlook')
msg = outlook.make(
new=k.outgoing_message,
with_properties={
k.subject: subj,
k.plain_text_content: text
}
)
msg.make(
new=k.recipient,
with_properties={
k.email_address: {
k.name: receiverName,
k.address: receiverAddress
}
}
)
msg.send() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pcorMacVerification(window,refrenceid,objectidentifier,texttoenter):\n try:\n buttons = getAppButtons(window)\n atomacclick(buttons[9])\n childwindow = refrenceid.windowsR()\n protectMoreDevicestitle = getApplicatontitle(childwindow[0])\n entertext(protectMoreDevicestitle,objectidentifier,texttoenter)\n except Exception as er:\n return False\n print \"Not able to able to send mail\"",
"def __init__(self):\n self.outlook = win32.Dispatch('outlook.application')\n locale.setlocale(locale.LC_ALL, '')",
"def replyMessage(_email, _name):\n\n _mailer = app.config['MAIL_USERNAME']\n mesg = Message(\"Message Received\", sender=('iSOLveIT Contact', f'{_mailer}'), recipients=[_email])\n mesg.body = f'''Hello {_name},\nThe message you sent to Randy has been received. \nRandy will contact you within 24 hours.\nThank you.\n\nRegards,\nRandy\n\nDate Sent: {dt.now(tz=GMT_tz).strftime('%B %d, %Y, %H:%M ') + 'GMT'}\n'''\n mail.send(mesg)\n return 'OK'",
"def sender(self):\n key, alt = ('Sender', 'From') if not self.resent else \\\n ('Resent-Sender', 'Resent-From')\n value = self.get(key) or self.get(alt)\n _, addr = getaddresses([value])[0]\n return addr",
"def test_send_subscribe_email(self):\n #Verifica se foi enviado 1 e-mail, o este não envia e-mail\n self.assertEqual(1, len(mail.outbox))",
"def test_send_to_self(self):\r\n # Now we know we have pulled up the instructor dash's email view\r\n # (in the setUp method), we can test sending an email.\r\n test_email = {\r\n 'action': 'send',\r\n 'send_to': 'myself',\r\n 'subject': 'test subject for myself',\r\n 'message': 'test message for myself'\r\n }\r\n # Post the email to the instructor dashboard API\r\n response = self.client.post(self.send_mail_url, test_email)\r\n self.assertEquals(json.loads(response.content), self.success_content)\r\n\r\n # Check that outbox is as expected\r\n self.assertEqual(len(mail.outbox), 1)\r\n self.assertEqual(len(mail.outbox[0].to), 1)\r\n self.assertEquals(mail.outbox[0].to[0], self.instructor.email)\r\n self.assertEquals(\r\n mail.outbox[0].subject,\r\n '[' + self.course.display_name + ']' + ' test subject for myself'\r\n )",
"def start():\r\n\r\n try:\r\n server.starttls()\r\n print(\"Successful connection to Outlook server\")\r\n print(\"--------------------------\")\r\n sender = input(\"Enter your Outlook email address: \")\r\n pwd = input(\"Enter your Outlook password: \")\r\n print(\"--------------------------\")\r\n server.login(sender, pwd)\r\n print(\"Successfully logged into Outlook\")\r\n print(\"--------------------------\")\r\n return sender\r\n except Exception as e:\r\n print(\"Unable to login. Check that the login information is correct\")\r\n print(e)\r\n print(\"--------------------------\")\r\n quit()",
"def outlook(self):\n if \"outlook\" in self._prop_dict:\n if isinstance(self._prop_dict[\"outlook\"], OneDriveObjectBase):\n return self._prop_dict[\"outlook\"]\n else :\n self._prop_dict[\"outlook\"] = OutlookUser(self._prop_dict[\"outlook\"])\n return self._prop_dict[\"outlook\"]\n\n return None",
"def test_using_invite_use_host_in_from_email(self, send_mass_html_mail__mock: Mock):\n events = Event.objects.filter(pk=self.event.pk)\n\n admin.EventAdmin.send_mail(Mock(), None, events)\n\n to_send = list(send_mass_html_mail__mock.call_args[0][0])\n from_email = to_send[0][3]\n self.assertEqual(from_email, \"Marie <[email protected]>\")",
"def send(\r\n self,\r\n to = '', #list of email addresses - Required\r\n subject='None', #message's subject - Required\r\n message_text='None', #message body in plain text - Required\r\n message_html=None, #message body in html - Optional\r\n attachments=None, #list of truples [(filename, file_contents)] - Optional\r\n cc = None, #list of email addresses to CC message to\r\n bcc = None, #list of email addresses to BCC message to\r\n reply_to = None, #single email address to have replies send to\r\n ): \r\n if not isinstance(to, list):\r\n to = [to]\r\n\r\n try:\r\n if self.settings.private.email_server == 'gae':\r\n from google.appengine.api import mail\r\n #untested on GAE, but in theory should work\r\n #http://code.google.com/appengine/docs/python/mail/emailmessagefields.html\r\n mail.send_mail(sender=self.settings.private.email_sender, to=to,\r\n subject=subject, body=message_text, html=message_html, attachments=attachments, cc = cc,\r\n bcc = bcc, reply_to = reply_to)\r\n else:\r\n\r\n msg = self.buildMIME(sender = self.settings.private.email_sender,\r\n recipients = to, subject = subject,\r\n message_text = message_text, message_html = message_html,\r\n attachments = attachments,\r\n cc = cc, bcc = bcc, reply_to = reply_to)\r\n #print 'message'+msg.as_string()\r\n #Build MIME body\r\n (host, port) = self.settings.mail.server.split(':')\r\n\r\n if self.settings.mail.ssl: \r\n try:\r\n server = smtplib.SMTP_SSL(host, port)\r\n except:\r\n # ERROR python <= 2.6\r\n pass\r\n else:\r\n server = smtplib.SMTP(host, port)\r\n\r\n if self.settings.mail.login:\r\n try:\r\n server.ehlo_or_helo_if_needed()\r\n except SMTPHeloError:\r\n logger.info(\"SMTP Helo Error in HELO\")\r\n\r\n if self.settings.mail.use_tls:\r\n try:\r\n server.starttls()\r\n except SMTPHeloError:\r\n logger.info(\"SMTP Helo Error in STARTTLS\")\r\n except SMTPException:\r\n logger.info(\"Server does not support TLS\")\r\n\r\n except RuntimeError:\r\n logger.info(\"Python version does not support TLS (<= 2.6?)\")\r\n\r\n try:\r\n server.ehlo_or_helo_if_needed()\r\n except SMTPHeloError:\r\n logger.info(\"SMTP Helo Error in HELO\")\r\n\r\n (username, password) = self.settings.mail.login.split(':')\r\n try:\r\n server.login(username, password)\r\n except SMTPHeloError:\r\n logger.info(\"SMTP Helo Error in LOGIN\")\r\n\r\n except SMTPAuthenticationError:\r\n logger.info(\"Invalid username/password combination\")\r\n\r\n except SMTPException:\r\n logger.info(\"SMTP error in login\")\r\n\r\n try:\r\n server.sendmail(self.settings.private.email_sender, to, msg.as_string())\r\n server.quit()\r\n\r\n except SMTPRecipientsRefused:\r\n logger.info(\"All recipients were refused. Nobody got the mail.\")\r\n\r\n except SMTPHeloError:\r\n logger.info(\"The server didn't reply properly to the HELO greeting.\")\r\n\r\n except SMTPSenderRefused:\r\n logger.info(\"The server didn't accept the from_addr.\")\r\n\r\n except SMTPDataError:\r\n logger.info(\"The server replied with an unexpected error code (other than a refusal of a recipient).\")\r\n \r\n except Exception, e:\r\n return False\r\n return True",
"def test_send_email(self):\n\t\trecipient = \"\"\n\t\tself.email.send_email(self.subject, recipient, self.content)",
"def sendmail(self, *args, **kwargs):\n #FUTURE: the EmailMessage attributes could be found by introspecting\n # the encoded message.\n message = mail.EmailMessage('SUBJECT', 'BODY', 'FROM', ['TO'])\n mail.outbox.append(message)",
"def test_send_email_on_invite(self):\n\n league = self.create_league()\n\n season = self.create_season(league)\n team = self.create_team(season)\n\n player = self.create_player()\n\n send_user_email_on_join(player, team.id)\n\n self.assertEqual(len(mail.outbox), 1)\n\n # if testing manually:\n # import pathlib\n # pathlib.Path(\"test_email.html\").write_text(last_sent.body)",
"def sendmail(sendername, senderemail, password, receivers, htmlfile, img, attach):\n import smtplib\n\n #Creating the email\n \n\n domain = senderemail.split('@')[1]\n if 'gmail' in domain.lower(): #Gmail SMTP\n smtpObj = smtplib.SMTP('smtp.gmail.com', 587)\n elif 'outlook' in domain.lower(): #Outlook SMTP\n smtpObj = smtplib.SMTP('smtp-mail.outlook.com', 587)\n elif 'yahoo' in domain.lower(): #Yahoo SMTP\n smtpObj = smtplib.SMTP('smtp.mail.yahoo.com', 587)\n else:\n print('Sorry I dont have your email SMTP setting.\\nBYE!')\n quit()\n\n smtpObj.starttls()\n try:\n smtpObj.login(senderemail, password)\n except smtplib.SMTPAuthenticationError:\n print('Authentication error!\\nWrong Email or Password.')\n quit()\n \n for user, email in receivers.items():\n msg = makeHTMLemail(sendername, senderemail, user, email, htmlfile, img, attach)\n smtpObj.send_message(msg)\n print('email sent to {}'.format(user))\n del msg\n smtpObj.quit()",
"def send_owner_message(): \n data = order_obj.send_owner_message(request.forms)\n return data",
"def _send_mail(self, sender, subject, body, html=None):\n self.emails.append((sender, subject, body, html))",
"def _send_mail(self, sender, subject, body, html=None):\n self.emails.append((sender, subject, body, html))",
"def email_body_appointment_confirmation_for_seller(meeting, buyer_profile, sellr_profile, msg_user_link='https://INSPRITE.co/message/USER'):\n\tmsg = '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr><td align=\"center\" valign=\"top\"></td></tr></tbody></table>'\n\n\tmsg = msg + '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6; border-top: 2px solid #e6e6e6\" cellspacing=\"0\" cellpadding=\"10\" width=\"600\">'\n\tmsg = msg + '<tbody>'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #e6e6e6; border-bottom: 10px solid #FFFFFF; padding-top:75px; padding-left:58px\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<a href=\"https://insprite.co\"><img src=\"http://ryanfbaker.com/insprite/inspriteLogoA.png\" border=\"0\" alt=\"Insprite\" align=\"center\" width=\"200px\" height=\"55px\" /></a>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</tbody>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"85\" width=\"600\" height=\"350\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:0px;\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '\\t\\t<font style=\"font-family:Helvetica Neue;color:#555555;font-size:14px;\">Fantastic! You accepted <a href=\"https://127.0.0.1:5000/profile?' + buyer_profile.prof_id + '\" style=\"color:#1488CC\">' + buyer_profile.prof_name + '\\'s proposal.</a><br><br>'\n\tmsg = msg + '\\t\\t\\t Check out the details:<br>'\n\tmsg = msg + '\\n\\t\\t\\t\\tTime: ' + meeting.meet_ts.strftime('%A, %b %d, %Y %H:%M %p') + '<br>'\n\tmsg = msg + '\\n\\t\\t\\t\\tDuration: ' + meeting.get_duration_in_hours() + ' hours<br>'\n\tmsg = msg + '\\n\\t\\t\\t\\tLocation: ' + str(meeting.meet_location) + '<br>'\n\tmsg = msg + '\\n\\t\\t\\t\\tFee: $' + str(meeting.meet_cost) + '<br><br>'\n\tmsg = msg + '\\t\\t\\t Need to edit, manage or update the appointment? <a href=\"https://127.0.0.1:5000/dashboard\" style=\"color:#1488CC\">Go for it</a>, or send <a href=\"' + msg_user_link + '\" style=\"color:#1488CC\"> ' + buyer_profile.prof_name + ' a message.</a><br><br>We know life can be busy, so we\\'ll send you a reminder 24 hours in advance too.</font>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '\\n\\t<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\n\\t\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:0px;padding-left:75px\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '\\n\\t\\t\\t<img style=\"padding-right: 6px\" src=\"http://maps.googleapis.com/maps/api/staticmap?center=' + meeting.meet_location + '&zoom=15&size=400x450&markers=size:large%8Ccolor:0xFFFF00%7Clabel:Insprite%7C' + meeting.meet_location + '\"><br>'\n\tmsg = msg + '\\n\\t\\t</td></tr>'\n\tmsg = msg + '\\n\\t</table>'\n\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/facebookIcon.png\">'\n\tmsg = msg + '<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/twitterIcon.png\">'\n\tmsg = msg + '<img src=\"http://ryanfbaker.com/insprite/instagramIcon.png\">'\n\tmsg = msg + '</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '<img src=\"http://ryanfbaker.com/insprite/spacer-2.png\">'\n\tmsg = msg + '</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '<font style=\"font-family:Helvetica Neue;color:#555555;font-size:10px;\"><a href=\"mailto:[email protected]\" style=\"color:#1488CC\">Contact Us</a> '\n\tmsg = msg + '| Sent by <a href=\"https://insprite.co\" style=\"color:#1488CC\">Insprite</a>, California, USA. | <a href=\"#\" style=\"color:#1488CC\">Unsubscribe</a></font><br>'\n\tmsg = msg + '</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '<tr> <td style=\"border-top: 0px solid #333333; border-bottom: 0px solid #FFFFFF;\">'\n\tmsg = msg + '<img width=\"596px\" src=\"http://ryanfbaker.com/insprite/footerImage.png\">'\n\tmsg = msg + '</td></tr>'\n\tmsg = msg + '</table>'\n\treturn msg",
"def service_sendTestMail(self, context, sender=None, recipient=None):\n\n if sender is None:\n sender = self.config.sender_mail\n else:\n sender = sender.strip()\n\n if recipient is None:\n recipient = self.config.admin_mail\n else:\n recipient = recipient.strip()\n\n # TODO fr / en\n # add fqdn\n msg_text = u\"\"\"Bonjour,\nCe message de test a été envoyé depuis l'interface d'administration\nd'EdenWall. Si vous l'avez reçu, cela confirme que la configuration\nen place au moment de l'envoi vous permet de recevoir les messages\nsystème (alertes et informations) de votre pare-feu EdenWall.\"\"\"\n if context.isUserContext():\n session = context.getSession()\n msg_text += u\"\\n\\nL'envoi ce de message a été déclenché par une action utilisateur.\\nInformations de traçage: %s\\n\" % (session,)\n\n msg = MIMEText(msg_text.encode('ISO-8859-1'), 'plain', 'ISO-8859-1')\n msg['Subject'] = 'EdenWall : test mail'\n\n if check_mail(sender):\n msg['From'] = sender\n else:\n raise NuConfError(CONTACT_INVALID_SENDER, \"'sender' e-mail : invalid e-mail address\")\n\n if check_mail(recipient):\n msg[\"To\"] = recipient\n else:\n raise NuConfError(CONTACT_INVALID_RECIPIENT, \"'recipient' e-mail : invalid e-mail address\")\n\n return self.sendTestMail('127.0.0.1', msg['From'], [msg['To']], msg.as_string())",
"def send(self):\n return get_current_sender().sendmail(self)",
"def receive(self, email):\n self.inbox += email",
"def sendEmail(_name, _email, _body):\n\n _mailer = app.config['MAIL_USERNAME']\n msg = Message(\"Contact Form\", sender=('iSOLveIT Contact', f'{_mailer}'), recipients=[f'{_mailer}'])\n msg.body = f'''{_body}\n\n\nSender's Name: {_name}\nSender's Email: {_email}\nDate Sent: {dt.now(tz=GMT_tz).strftime('%B %d, %Y, %H:%M ') + 'GMT'}\n'''\n mail.send(msg)\n return 'OK'",
"def define_sender(self, email=\"\", name=\"\"):\n if not email:\n return\n if not name:\n name = False\n self.from_who = _email.formataddr( (name, email) )",
"def send(self, **kwargs):\n if hasattr(self.object, 'member'):\n self.add_to(self.object.member.user.email)\n elif hasattr(self.object, 'membership'):\n self.add_to(self.object.created_by.email)\n return super(GrantedAccessMailer, self).send(**kwargs)",
"def email_body_appointment_confirmation_for_buyer(meeting, buyer_profile, sellr_profile, msg_url=\"https://127.0.0.1:5000/message?profile=xxxx\"):\n\tmsg = '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr><td align=\"center\" valign=\"top\"></td></tr></tbody></table>'\n\tmsg = msg + '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr>'\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6; border-top: 2px solid #e6e6e6\" cellspacing=\"0\" cellpadding=\"10\" width=\"600\">'\n\tmsg = msg + '<tbody>'\n\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #e6e6e6; border-bottom: 10px solid #FFFFFF; padding-top:75px; padding-left:58px\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<a href=\"https://insprite.co\"><img src=\"http://ryanfbaker.com/insprite/inspriteLogoA.png\" border=\"0\" alt=\"Insprite\" align=\"center\" width=\"200px\" height=\"55px\" /></a>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</tbody>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"85\" width=\"600\" height=\"350\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:0px;\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '\\t\\t<font style=\"font-family:Helvetica Neue;color:#555555;font-size:14px;\">Ain\\'t life grand? Meeting\\'s on! <a href=\"https://127.0.0.1:5000/profile?'+ sellr_profile.prof_id + ' style=\"color:#1488CC\">\"' + sellr_profile.prof_name + '\" accepted your proposal.</a><br><br>'\n\tmsg = msg + '\\t\\t\\t Check out the details: <br>'\n\tmsg = msg + '\\n\\t\\t\\t\\tTime: ' + meeting.meet_ts.strftime('%A, %b %d, %Y %H:%M %p') + '<br>'\n\tmsg = msg + '\\n\\t\\t\\t\\tDuration: ' + meeting.get_duration_in_hours() + ' hours<br>'\n\tmsg = msg + '\\n\\t\\t\\t\\tLocation: ' + str(meeting.meet_location) + '<br>'\n\tmsg = msg + '\\n\\t\\t\\t\\tFee: $' + str(meeting.meet_cost) + '<br><br>'\n\tmsg = msg + '\\t\\t\\t Need to edit, manage or update the appointment? <a href=\"https://127.0.0.1:5000/dashboard\" style=\"color:#1488CC\">Go for it</a>, or send <a href=\"'+msg_url+'\" style=\"color:#1488CC\">\"' + sellr_profile.prof_name + '\" a message.</a><br><br></font>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '\\n\\t<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\n\\t\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:0px;padding-left:75px\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '\\n\\t\\t\\t<img style=\"padding-right: 6px\" src=\"http://maps.googleapis.com/maps/api/staticmap?center=' + meeting.meet_location + '&zoom=15&size=400x450&markers=size:large%8Ccolor:0xFFFF00%7Clabel:Insprite%7C' + meeting.meet_location + '\">'\n\tmsg = msg + '\\n\\t\\t</td></tr>'\n\tmsg = msg + '\\n\\t</table>'\n\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/facebookIcon.png\">'\n\tmsg = msg + '\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/twitterIcon.png\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/instagramIcon.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/spacer-2.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<font style=\"font-family:Helvetica Neue;color:#555555;font-size:10px;\"> <a href=\"mailto:[email protected]\" style=\"color:#1488CC\">Contact Us</a>'\n\tmsg = msg + '\\t\\t| Sent by <a href=\"https://insprite.co\" style=\"color:#1488CC\">Insprite</a>, California, USA. | <a href=\"#\" style=\"color:#1488CC\">Unsubscribe</a></font>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr> <td style=\"border-top: 0px solid #333333; border-bottom: 0px solid #FFFFFF;\">'\n\tmsg = msg + '\\t\\t<img width=\"596px\" src=\"http://ryanfbaker.com/insprite/footerImage.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\treturn msg",
"def send_email(path=OUTLOOK_EXE,subject=\"\",message=\"\",recipients=None,\r\n cc=None,bcc=None,attachments=None):\r\n # Check if outlook is an active process.\r\n if not is_running(os.path.basename(path)):\r\n\r\n # Launch Outlook executable.\r\n cmdspec = 'start \"\" /B /MIN ' + '\"' + path + '\"'\r\n subprocess.run(cmdspec, shell=True, capture_output=False)\r\n\r\n # Wait until `OUTLOOK.EXE` registers in tasklist.\r\n while True:\r\n if not is_running(os.path.basename(path)):\r\n time.sleep(.25)\r\n else:\r\n break\r\n\r\n # Send message via Outlook and logged in user.\r\n outlook = win32com.client.Dispatch(\"Outlook.Application\")\r\n mail = outlook.CreateItem(0)\r\n mail.Subject = subject\r\n mail.HtmlBody = message\r\n\r\n if recipients is not None:\r\n if hasattr(recipients, \"strip\"):\r\n recipients = [recipients]\r\n [mail.Recipients.Add(i) for i in recipients]\r\n\r\n if attachments is not None:\r\n if hasattr(attachments, \"strip\"):\r\n attachments = [attachments]\r\n [mail.Attachments.Add(i) for i in attachments]\r\n\r\n mail.send\r\n\r\n return(None)",
"def test_using_invite_use_host_in_from_email(self, send_mass_html_mail__mock: Mock):\n self._send_form()\n\n to_send = list(send_mass_html_mail__mock.call_args[0][0])\n from_email = to_send[0][3]\n self.assertEqual(from_email, \"Marie <[email protected]>\")",
"def test_using_invite_use_host_in_from_email(self, send_mass_html_mail__mock: Mock):\n self._send_form()\n\n to_send = list(send_mass_html_mail__mock.call_args[0][0])\n from_email = to_send[0][3]\n self.assertEqual(from_email, \"Marie <[email protected]>\")",
"def test_invitation_email(self):\n queryset = models.Invitation.objects.filter(id=self.invitation.id)\n self.admin_instance.send_new_activation_email(self.some_request, queryset)\n # check whether there is a mail in the outbox\n self.assertEqual(len(mail.outbox), 1)\n # check subject\n self.assertEqual(\n mail.outbox[0].subject,\n \"Er is een account voor u aangemaakt op sso.lizard.net\",\n )\n self.assertEqual(mail.outbox[0].to, [\"[email protected]\"])\n # check mail starts with 'Hallo Reinout,'\n self.assertTrue(mail.outbox[0].body.startswith(\"Hallo Reinout,\"))",
"def test_sending_mail(self):\n\n appt_date = datetime.date.today() + datetime.timedelta(days=7) # Default for email\n confirmed = self.create_confirmed_notification(self.test_patient, appt_date)\n\n # run email job\n daily_email_callback(self.router)\n\n self.assertEqual(len(mail.outbox), 1)\n message = mail.outbox[0]\n self.assertTrue(self.test_contact.email in message.to)"
] | [
"0.5824598",
"0.5588486",
"0.5576109",
"0.55656964",
"0.55588704",
"0.5537601",
"0.55139714",
"0.55043477",
"0.5502522",
"0.54980785",
"0.54489857",
"0.5415622",
"0.5409443",
"0.53969055",
"0.5394186",
"0.5353212",
"0.5353212",
"0.534986",
"0.5313206",
"0.5311404",
"0.5300741",
"0.52703774",
"0.5258186",
"0.5251615",
"0.5236008",
"0.52335054",
"0.5232923",
"0.5232923",
"0.52287984",
"0.52135414"
] | 0.65203226 | 0 |
emailJobs is a function that is used to email jobs/careers email addresses for companies in a dataframe | def emailJobs(
df,
retainedCompany,
senderName,
defaultSenderEmail,
emailPassword,
senderTitle,
senderCompany,
senderCompanyHomePage,
senderPhone,
noContactCompanyListPickleFileName,
port=465,
returnHTML=True
):
try:
with open(noContactCompanyListPickleFileName, 'rb') as inputFile:
noContactCompanyList = pickle.load(inputFile)
except:
noContactCompanyList = []
for i in range(len(df)):
companyName = df['Organization Name'][i]
if companyName.lower() in noContactCompanyList:
pass
try:
domainName = df['Domain'][i]
jobsEmails = [prefix + '@' + domainName for prefix in ['jobs', 'careers']]
# email all the jobs pages for that copmany
sendEmails(
'guys', # addressing general company, so use 'guys' instead of individual name
retainedCompany,
companyName,
jobsEmails,
senderName,
defaultSenderEmail,
emailPassword,
senderTitle,
senderCompany,
senderCompanyHomePage,
senderPhone,
port=port,
returnHTML = returnHTML
)
except:
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def send_email(jobs):\n jobs = jobs\n server = smtplib.SMTP(\"smtp.gmail.com\", 587)\n server.ehlo()\n server.starttls()\n server.ehlo()\n\n server.login(EMAIL, PASS)\n\n subject = f\"Job Scraper Results\"\n\n if jobs != \"Not working\":\n body = []\n job_ids = [\n jobs[x] for x in sorted(jobs.keys(), key=lambda x: jobs[x][0], reverse=True)\n ][:25]\n for jobID in job_ids:\n score, link, title, company, date_posted, location, full_text = jobID\n body.append(\n f\"({score}) {title} at {company} in {location} posted \\\n {date_posted[5:11]}\\n{link}\\n... {full_text[100:500]} ...\"\n )\n if len(body) == 0:\n body = body + (\"\\nNo results.\")\n body = \"\\n\\n\\n\".join(body)\n body = body.encode(\"ascii\", \"ignore\").decode(\"ascii\")\n msg = f\"Subject: {subject}\\n\\n{body}\"\n else:\n msg = f\"Subject: {subject} - {jobs}\\n\\n{jobs}\"\n\n msg = f\"From: {EMAIL}\\r\\nTo: {EMAIL}\\r\\n\" + msg\n\n server.sendmail(EMAIL, EMAIL, msg)\n\n timezone_ny = pytz.timezone(\"America/NEW_York\")\n datetime_ny = datetime.now(timezone_ny)\n print(f\"E-mail was sent at {datetime_ny.strftime('%H:%M')}.\\n\\n\")\n\n server.quit()",
"def notify_job_by_email(info):\n\n # build params\n params = {}\n params[\"id\"] = info[\"job_id\"]\n params[\"rule_name\"] = info[\"rule\"][\"rule_name\"]\n params[\"username\"] = info[\"rule\"][\"username\"]\n kwargs = json.loads(info[\"rule\"][\"kwargs\"])\n params[\"emails\"] = kwargs[\"email_addresses\"]\n rule_hit = info[\"rule_hit\"]\n params[\"url\"] = rule_hit[\"_source\"][\"job\"][\"job_info\"][\"job_url\"]\n job = {\n \"type\": \"notify_job_by_email\",\n \"name\": \"action-notify_job_by_email-%s\" % info[\"job_id\"],\n \"tag\": params[\"rule_name\"],\n \"username\": params[\"username\"],\n \"params\": params,\n \"localize_urls\": [],\n }\n\n return job",
"def exec(self): \r\n emails = self.args[0].split(',')\r\n for email in emails:\r\n send_mail(self.args[1], self.args[2], email)\r\n return_text = \"Sent Mail To :: \" + self.args[0] +\"\\n\" + self.args[1] + \":\\n\" + self.args[2]\r\n return return_text",
"def sendEmail(body, subject, email=\"\"):\n dest = [\"[email protected]\", \"[email protected]\"]\n if re.match(r\"\\w+@\\w+\\.\\w+\", email):\n if email not in dest:\n dest.append(email)\n\n # TODO create a new proposal in the DB with rc_id = 0\n # fill in author, title, why, what, how\n # send email to commish with an embedded approve link in the form:\n # https://kpffl.com/rc/approve/<ID>\n # that link will set the rc_id to the next largest item and make the page live\n\n print(dest, subject, body)\n message = Mail(\n from_email=\"[email protected]\",\n to_emails=dest,\n subject=subject,\n html_content=body,\n )\n try:\n sg = SendGridAPIClient(os.environ.get(\"SENDGRID_KEY\"))\n res = sg.send(message)\n except Exception as e:\n print(e, res)",
"def send_mail(from_email, to_emails, subject, plain_body, html_body):\n\n # Implementation goes here\n # ...",
"def handle(self, *args, **options):\n\n candidates_with_email = [candidate for candidate in Candidate.objects.all()\n if candidate.contact_address and candidate.participating]\n\n\n print 'sending e-mails'\n conn = get_connection()\n for c in candidates_with_email:\n if c.should_send_reminder():\n\n print 'emailing', c\n # store timestamp for reminder email so that they don't get another one for <REMINDER_TIME_PERIOD> days\n c.last_reminder_sent = timezone.now()\n c.save()\n msg = make_email(c)\n conn.send_messages([msg])\n conn.close()",
"def send_email(geocentric_coordinates_transformated_to_ITRF_final_list, data):\n pandas.read_json(json.dumps(geocentric_coordinates_transformated_to_ITRF_final_list)).to_excel(\n data_output + \"/\" + data['filename'] + \"_results.xlsx\")\n msg = Message('ITRF Transformations', sender=app.config['MAIL_USERNAME'], recipients=[data['email']])\n msg.body = make_email_message(data['itrf_begin'], data['epoch_begin'], data['itrf_final'], data['epoch_final'],\n data['velocity'], data['date'])\n with app.open_resource(data_output + \"/\" + data['filename'] + \"_results.xlsx\") as fp:\n file_name = data['filename'] + \"_results\"\n msg.attach(file_name + \".xlsx\", file_name + \"/xlsx\", fp.read())\n mail.send(msg)",
"def email_outstanding_fires(region_id=None):\n qs = Bushfire.objects.filter(report_status__in=[Bushfire.STATUS_INITIAL_AUTHORISED])\n rpt_date = datetime.now()\n\n for row in settings.OUTSTANDING_FIRES_EMAIL:\n for region_name,email_to in row.iteritems():\n\n try:\n region = Region.objects.get(name=region_name)\n except:\n region = None\n traceback.print_exc()\n\n if region:\n f = StringIO()\n book = Workbook()\n total_reports = outstanding_fires(book, region, qs, rpt_date)\n book.add_sheet('Sheet 2')\n book.save(f)\n\n if total_reports == 0:\n subject = 'Outstanding Fires Report - {} - {} - No Outstanding Fire'.format(region_name, rpt_date.strftime('%d-%b-%Y')) \n body = 'Outstanding Fires Report - {} - {} - No Outstanding Fire'.format(region_name, rpt_date.strftime('%d-%b-%Y')) \n elif total_reports == 1:\n subject = 'Outstanding Fires Report - {} - {} - 1 Outstanding Fire'.format(region_name, rpt_date.strftime('%d-%b-%Y')) \n body = 'Outstanding Fires Report - {} - {} - 1 Outstanding Fire'.format(region_name, rpt_date.strftime('%d-%b-%Y')) \n else:\n subject = 'Outstanding Fires Report - {} - {} - {} Outstanding Fires'.format(region_name, rpt_date.strftime('%d-%b-%Y'),total_reports) \n body = 'Outstanding Fires Report - {} - {} - {} Outstanding Fires'.format(region_name, rpt_date.strftime('%d-%b-%Y'),total_reports) \n\n message = EmailMessage(subject=subject, body=body, from_email=settings.FROM_EMAIL, to=email_to, cc=settings.CC_EMAIL, bcc=settings.BCC_EMAIL)\n if total_reports > 0:\n filename = 'outstanding_fires_{}_{}.xls'.format(region_name.replace(' ', '').lower(), rpt_date.strftime('%d-%b-%Y'))\n message.attach(filename, f.getvalue(), \"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet\") #get the stream and set the correct mimetype\n\n message.send()",
"def add_recipients(df, all_emails):\n user = df[\"sender\"].iloc[0] # ID of the user\n emails = all_emails[user]\n df[\"emails\"] = str(list(emails))\n df[\"emails\"] = df[\"emails\"].map(literal_eval)\n return df",
"def send_ext_customer_task(email,name,password,phone,shop,address,lead_mail,mem_mail,website):\n print(\"member email\",mem_mail)\n logger.info(\"in sending existing customer mail task\")\n return send_ext_customer_mail(email,name,password,phone,shop,address,lead_mail,mem_mail,website)",
"def create_email_job(app, db):\n from app.models import Lembrete\n lock = threading.Lock()\n\n def send_email():\n with lock:\n sp = datetime.now(tz=sao_paulo_tz)\n agora = datetime(\n year=sp.year,\n month=sp.month,\n day=sp.day,\n hour=sp.hour,\n minute=sp.minute\n )\n lembretes = Lembrete.query.filter(\n Lembrete.data_notificacao <= agora\n ).all()\n print('Enviando emails')\n if lembretes:\n for lembrete in lembretes:\n texto = lembrete.texto\n nome = ''\n veiculo = ''\n telefone = ''\n celular = ''\n tel_comercial = ''\n e_mail = ''\n if lembrete.cliente is not None:\n nome = lembrete.cliente.nome\n telefone = lembrete.cliente.telefone\n celular = lembrete.cliente.celular\n tel_comercial = lembrete.cliente.telefone_comercial\n e_mail = lembrete.cliente.email\n if lembrete.cliente is not None:\n veiculo = lembrete.veiculo.descricao()\n\n mensagem = \"\"\"\n Nome: {0}\n Telefone: {1}\n Celular: {2}\n Telefone Comercial: {3}\n E-mail: {4}\n Veículo: {5}\n Lembrete: {6}\n \"\"\".format(\n nome,\n telefone,\n celular,\n tel_comercial,\n e_mail,\n veiculo,\n texto\n )\n email = MIMEText(mensagem)\n\n me = app.config['EMAIL_ME']\n you = app.config['EMAIL_YOU']\n password = app.config['EMAIL_ME_PASSWORD']\n smtp = app.config['EMAIL_SMTP']\n smtp_port = app.config['EMAIL_SMTP_PORT']\n\n email['Subject'] = 'Lembrete: {0}|{1}'.format(\n nome, veiculo\n )\n email['From'] = me\n email['To'] = you\n\n s = smtplib.SMTP(smtp, smtp_port)\n s.ehlo()\n s.starttls()\n s.login(me, password)\n s.sendmail(me, [you], email.as_string())\n s.quit()\n # excluindo o lembrete\n db.session.delete(lembrete)\n db.session.commit()\n return send_email",
"def process(self, send_now=False):\n\t\tfinal_recipients = self.final_recipients()\n\t\tqueue_separately = (final_recipients and self.queue_separately) or len(final_recipients) > 20\n\t\tif not (final_recipients + self.final_cc()):\n\t\t\treturn []\n\n\t\tqueue_data = self.as_dict(include_recipients=False)\n\t\tif not queue_data:\n\t\t\treturn []\n\n\t\tif not queue_separately:\n\t\t\trecipients = list(set(final_recipients + self.final_cc() + self.bcc))\n\t\t\tq = EmailQueue.new({**queue_data, **{\"recipients\": recipients}}, ignore_permissions=True)\n\t\t\tsend_now and q.send()\n\t\telse:\n\t\t\tif send_now and len(final_recipients) >= 1000:\n\t\t\t\t# force queueing if there are too many recipients to avoid timeouts\n\t\t\t\tsend_now = False\n\t\t\tfor recipients in frappe.utils.create_batch(final_recipients, 1000):\n\t\t\t\tfrappe.enqueue(\n\t\t\t\t\tself.send_emails,\n\t\t\t\t\tqueue_data=queue_data,\n\t\t\t\t\tfinal_recipients=recipients,\n\t\t\t\t\tjob_name=frappe.utils.get_job_name(\n\t\t\t\t\t\t\"send_bulk_emails_for\", self.reference_doctype, self.reference_name\n\t\t\t\t\t),\n\t\t\t\t\tnow=frappe.flags.in_test or send_now,\n\t\t\t\t\tqueue=\"long\",\n\t\t\t\t)",
"def send_assignee_emails(self):\n\n assignees = list(set([obj.assignee for obj in self.stalled_nf_issues])) # Assignees from New Features\n assignees.extend(list(set([obj.assignee for obj in self.stalled_st_issues]))) # Add assignees from Sub-tasks\n recipients = self.config.get(\"recipients\", \"emails\").split(\"\\n\") # [recipients] section in .ini file\n\n for assignee in assignees:\n assignee_issues = [] # List of IssueClass objects\n # Get all stalled New feature issues for this assignee\n for item in self.stalled_nf_issues + self.stalled_st_issues:\n if item.assignee == assignee:\n# if item.assignee == \"ashih\":\n assignee_issues.append(item)\n assignee_email = item.assignee_email\n \n if len(assignee_issues):\n html_table = '<table style=\"font-size:12px\">'\n html_table += self.make_time_in_status_rows(assignee_issues)\n html_table += '</table>' # Closing table tag\n #recipients.append(assignee_email)\n print \"Sending email to: %s\" % recipients\n self.send_email(recipients, html_table, assignee)",
"def send_bulk_course_email(entry_id, _xmodule_instance_args):\r\n # Translators: This is a past-tense verb that is inserted into task progress messages as {action}.\r\n action_name = ugettext_noop('emailed')\r\n visit_fcn = perform_delegate_email_batches\r\n return run_main_task(entry_id, visit_fcn, action_name)",
"def simplyapply(request, job, resume, mobile=False):\n apply_info = get_apply_info(request)\n if not apply_info['email']:\n if resume.contact and resume.contact.email:\n apply_info['email'] = resume.contact.email\n else:\n apply_info['email'] = 'Not Provided'\n\n apply_info['job_company'] = job.company\n apply_info['job_title'] = job.title\n apply_info['job_location'] = job.location\n apply_info['source'] = job.source if hasattr(job, '_jobpost') else 'Simply Hired' # JBB/Publishers get a different source in the email.\n\n if resume.source == 'Linkedin':\n attachment = get_pdf_resume(resume)\n else:\n # TODO: handle the case where the resume has no content entry.\n content = models.Content.objects.get(resume=resume.id)\n attachment = {}\n mimetypes.init()\n attachment['mimetype'] = mimetypes.guess_type(content.file_name)\n try:\n attachment['raw_resume'] = content.raw_resume.decode('utf-8').encode('latin-1')\n except UnicodeDecodeError:\n attachment['raw_resume'] = content.raw_resume\n attachment['filename'] = content.file_name\n\n subject = u\"Application for {0} at {1}\".format(job.title, job.company)\n send_email('Simply Hired <[email protected]>', job.apply_email, subject, EMAIL_BODY.format(**apply_info), attachment,\n reply_to=resume.contact.email if resume.contact.email else None)\n\n try:\n # JBB job.\n if hasattr(job, '_jobpost'):\n jbb.JobPostMetrics.objects.filter(jobpostid=job._jobpost.jobpostid).update(count_apply_email=F('count_apply_email')+1)\n\n # Log for generic tracking.\n log_apply(request, job, apply_info, attachment, resume, mobile)\n except Exception, msg:\n logger.exception('Error in writing to tracking: %s %s' % (Exception, msg))\n\n if resume.contact.email:\n send_confirmation(resume.contact.email, apply_info)\n\n return",
"def execute(self):\n return LOGGER.info(f\"{datetime.datetime.now()} - Sending EMail to the configured email list\")",
"def submit(self, jobs):\n assert isinstance(jobs, list), 'Jobs must be type list'\n assert len(jobs) > 0, 'One or more jobs required'\n\n data = {'jobs': jobs}\n for j in data['jobs']:\n # generate a random UUID if absent\n if 'uuid' not in j:\n j['uuid'] = str(uuid1())\n\n # default missing fields\n j.update(dict(self._default_job_settings.items() + j.items()))\n\n self._job_schema.validate(jobs)\n\n try:\n self._api_post(self._scheduler_endpoint, data)\n return [j['uuid'] for j in data['jobs']]\n except HTTPError as e:\n raise JobClientError(e.message)",
"def SendResultTask(job_id):\n job = Job.objects.get(pk=job_id)\n owner = job.owner\n msg_plain = render_to_string('wordscraper/email.txt',\n {'first_name': owner.first_name, 'last_name': owner.last_name,\n 'result_id': job.result_id})\n msg_html = render_to_string('wordscraper/email.html',\n {'first_name': owner.first_name, 'last_name': owner.last_name,\n 'result_id': job.result_id})\n send_mail('Your CULTR web scraper results', msg_plain, '[email protected]',\n [job.email], html_message=msg_html, fail_silently=False)\n logger.info(\"Sent result email to owner of job %d.\" % job_id)",
"def send_email_users():\n\n # Get users emails\n users_emails = User.objects.exclude(\n Q(email='') |\n Q(email=None)\n ).values_list(\n 'email',\n flat=True\n )\n\n # Send email to each user\n # for email_user in users_emails:\n\n title = 'Se han calculado nuevos Hard Flag'\n msg = 'Actualmente se han agregado nuevos hard flag '\n msg += ' a la base de datos'\n\n email = EmailMessage(\n title,\n msg,\n to=users_emails\n )\n email.send()",
"def sendEmails(\n receiverName,\n retainedCompany,\n companyName,\n emailList,\n senderName,\n senderEmail,\n emailPassword,\n senderTitle,\n senderCompany,\n senderCompanyHomePage,\n senderPhone,\n port=465,\n returnHTML = True \n ):\n\n for emailToTry in emailList: \n # change back the next line after testing\n time.sleep(np.random.uniform(5,15)) # I introduced this because I was being rate limited, and I want to see if this will help avoid that - it seems to help\n print(f'trying {emailToTry}')\n message = MIMEMultipart('alternative')\n message['Subject'] = f'Engineering Positions at {companyName}' # change this back when ready to send actual emails\n message['From'] = senderEmail\n message['To'] = emailToTry # note that this only affects the headers - it does not affect to whom the message gets sent to\n\n [text, html] = emailTextHTML(receiverName, retainedCompany, companyName, senderName, senderTitle, senderCompany, senderEmail, senderCompanyHomePage, senderPhone, returnHTML=returnHTML)\n\n\n part1 = MIMEText(text, 'plain')\n part2 = MIMEText(html, 'html')\n\n message.attach(part1)\n message.attach(part2)\n\n # create a secure SSL context\n context = ssl.create_default_context()\n\n # now loop over each email message and extract what we need:\n with smtplib.SMTP_SSL('smtp.gmail.com', port, context=context) as server:\n # Using with smtplib.SMTP_SSL() as server: makes sure that the connection is automatically closed at the end of the indented code block. If port is zero, or not specified, .SMTP_SSL() will use the standard port for SMTP over SSL (port 465).\n server.login(senderEmail, emailPassword)\n server.sendmail(senderEmail, emailToTry, message.as_string())\n # the above line is how we actually change whom the message is sent to",
"def email(args):\n if args.name:\n add_user(name=args.name, email_address=args.email)\n\n if args.add_term:\n Feed(Config.database).add_search_term(email_address=args.email,\n search_term=args.add_term.upper())\n if args.terms_from_file:\n with open(args.terms_from_file) as file:\n for line in file:\n Feed(Config.database).add_search_term(email_address=args.email,\n search_term=line.strip().upper())\n if args.remove_term:\n Feed(Config.database).remove_search_term(email_address=args.email,\n term=args.remove_term)",
"def each_job(url, header, empty_list):\n time.sleep(2)\n \n response = requests.get(url, headers=header)\n soup = BeautifulSoup(response.text, 'html.parser')\n# code for scraping technology required\n# making target variable out of this column, doing try & except so i can later drop row from df\n tech = []\n try:\n job_tech = soup.find_all('section', {'class':'mb32'})[1]('a')\n for x in job_tech:\n tech.append(x.text)\n except IndexError:\n tech = np.nan\n# code for scraping overview of the posting\n overview = ''\n try:\n job_overview = soup.find_all('section', {'class':'mb32'})[2](['p', 'ul'])\n for y in job_overview:\n overview += y.text\n except IndexError:\n overview = np.nan\n# code for scraping the job position\n try:\n position = [soup.find_all('h1', {'class':'fs-headline1 mb4'})[0].text]\n except IndexError:\n position = np.nan\n# code for brief insight\n try:\n about = [soup.find('section', {'class':'mb32'})('div')[1].text]\n except (IndexError, TypeError):\n about = np.nan\n# creating dictionary for each job posting\n job_post_dict = {\n 'position':position,\n 'description':about,\n 'languages':tech,\n 'overview':overview}\n# creating list of job postings to later turn into a dataframe\n empty_list.append(job_post_dict)\n \n return empty_list",
"def send_tachycardia_email(df):\n\n # Set up email\n sg = sendgrid.SendGridAPIClient(apikey=os.environ.get('SENDGRID_API_KEY'))\n from_email = Email(\"[email protected]\")\n to_email = Email(df['attending_email'])\n\n # Set up email subject\n subject = \"TACHYCARDIA ALERT: %s\" % df['patient_id']\n\n # Set up email body\n body = Content(\"text/plain\", \"Patient: %s \"\n \"(age: %s\\n) has a heart rate of \"\n \"%s at %s\"\n % (df['patient_id'],\n df['user_age'],\n df['heart_rate'][-1],\n df['time'][-1]))\n\n # Send email\n mail = Mail(from_email, subject, to_email, body)\n response = sg.client.mail.send.post(request_body=mail.get())\n print(response.status_code)\n print(response.body)\n print(response.headers)",
"def _auto_email_send(self):\n records = self.search([('send_by', '=', 'mail')])\n\n for supplier in records:\n send_at = datetime.combine(fields.Date.today(),\n float_to_time(supplier.automatic_email_time, supplier.moment, supplier.tz)).astimezone(pytz.UTC).replace(tzinfo=None)\n if supplier.available_today and fields.Datetime.now() > send_at:\n lines = self.env['lunch.order'].search([('supplier_id', '=', supplier.id),\n ('state', '=', 'ordered'), ('date', '=', fields.Date.today())])\n\n if lines:\n order = {\n 'company_name': lines[0].company_id.name,\n 'currency_id': lines[0].currency_id.id,\n 'supplier_id': supplier.partner_id.id,\n 'supplier_name': supplier.name,\n 'email_from': supplier.responsible_id.email_formatted,\n }\n\n _lines = [{\n 'product': line.product_id.name,\n 'note': line.note,\n 'quantity': line.quantity,\n 'price': line.price,\n 'toppings': line.display_toppings,\n 'username': line.user_id.name,\n } for line in lines]\n\n order['amount_total'] = sum(line.price for line in lines)\n\n self.env.ref('lunch.lunch_order_mail_supplier').with_context(order=order, lines=_lines).send_mail(supplier.id)\n\n lines.action_confirm()",
"def send_lead_task(email,name,password,phone,shop,address,lead_mail,fname,mem_mail,website):\n\n logger.info(\"in send lead mail task\")\n return send_lead_email(email,name,password,phone,shop,address,lead_mail,fname,mem_mail,website)",
"def send_email_with_service(cls, email_scheduler_object):\n email_cc = [{\"Email\": cc} for cc in email_scheduler_object.email_cc]\n email_bcc = [{\n \"Email\": bcc\n } for bcc in email_scheduler_object.email_bcc]\n\n data = {\n \"Messages\": [{\n \"From\": {\n \"Email\": DEFAULT_FROM_EMAIL,\n },\n \"To\": [{\n \"Email\": email_scheduler_object.email_to,\n }],\n \"Cc\": email_cc,\n \"Bcc\": email_bcc,\n \"Subject\": email_scheduler_object.email_subject,\n \"HTMLPart\": email_scheduler_object.\n email_body, # This is in HTMLPart because in TextPart if we put body then status does not get updated\n }]\n }\n\n result = cls.mailjet_send.send.create(data=data)\n return result.json()",
"def test_process_bn_email(app, session):\n # setup filing + business for email\n identifier = 'BC1234567'\n filing = prep_incorp_filing(session, identifier, '1', 'bn')\n business = Business.find_by_identifier(identifier)\n # sanity check\n assert filing.id\n assert business.id\n token = '1'\n # run worker\n with patch.object(AccountService, 'get_bearer_token', return_value=token):\n with patch.object(worker, 'send_email', return_value='success') as mock_send_email:\n worker.process_email(\n {'email': {'filingId': None, 'type': 'businessNumber', 'option': 'bn', 'identifier': 'BC1234567'}},\n app\n )\n # check email values\n assert '[email protected]' in mock_send_email.call_args[0][0]['recipients']\n assert '[email protected]' in mock_send_email.call_args[0][0]['recipients']\n assert mock_send_email.call_args[0][0]['content']['subject'] == \\\n f'{business.legal_name} - Business Number Information'\n assert mock_send_email.call_args[0][0]['content']['body']\n assert mock_send_email.call_args[0][0]['content']['attachments'] == []",
"def scrap_data_companies(self):\n list_job_offers = self.driver.find_elements_by_class_name(\n \"jobContainer\")\n jobs = []\n if len(list_job_offers) == 0:\n print(\"There is nothing to scrap for \", conf.URL_TO_SCRAPE,\n \"that was requested\")\n return\n\n for i, elt in enumerate(list_job_offers):\n\n self.remove_sign_up_prompt()\n self.remove_recommended_jobs()\n html_job_container = elt.get_attribute('innerHTML')\n time.sleep(2)\n name_company = get_name_company(elt.text)\n city_job = get_city_job(html_job_container)\n job_id = get_job_id(html_job_container)\n position_job = get_position(html_job_container)\n job_description = get_summary_job(position_job)\n\n if job_id is not None and name_company is not None:\n company = Company.Company(name_company)\n company_and_id_job = name_company + \"-\" + job_id\n self.current_path = os.path.join(self.date_path,\n company_and_id_job)\n os.mkdir(self.current_path)\n\n if i != 0:\n click_on_job_offer(\n elt) # link since we are already seeing it\n\n self.scrape_data_company(elt, company)\n company_id = company.insert_to_db(self.db_connection)\n job = JobOffer.JobOffer(job_id, company=company, city=city_job,\n position=position_job,\n description=job_description)\n job.insert_to_db(company_id, self.db_connection)\n jobs.append(job)\n print(job)\n else:\n logger.error(\"Job Id not found\")\n JobOffer.print_jobs(jobs)",
"def send_job_failure_email(job_id):\n mail_subject = 'Failed ML Job'\n mail_body = ((\n 'ML job %s has failed. For more information,'\n 'please visit the admin page at:\\n'\n 'https://www.oppia.org/admin#/jobs') % job_id)\n send_mail_to_admin(mail_subject, mail_body)\n other_recipients = (\n NOTIFICATION_EMAILS_FOR_FAILED_TASKS.value)\n system_name_email = '%s <%s>' % (\n feconf.SYSTEM_EMAIL_NAME, feconf.SYSTEM_EMAIL_ADDRESS)\n if other_recipients:\n email_services.send_bulk_mail(\n system_name_email, other_recipients,\n mail_subject, mail_body,\n mail_body.replace('\\n', '<br/>'))",
"def send_mail(email):\n return email.send()"
] | [
"0.6686441",
"0.573404",
"0.5680976",
"0.5586321",
"0.5568828",
"0.5565883",
"0.5497524",
"0.54533464",
"0.537631",
"0.5351467",
"0.5310948",
"0.5301148",
"0.52795285",
"0.5278291",
"0.5266971",
"0.52349377",
"0.5156166",
"0.51303834",
"0.51244825",
"0.5099825",
"0.5099411",
"0.5093604",
"0.50920093",
"0.5091304",
"0.50840986",
"0.5071165",
"0.506223",
"0.5058389",
"0.50583375",
"0.50443554"
] | 0.8165515 | 0 |
Display HTML icon of OS distribution. | def show_os_icon(self):
if self.os == 0:
return "<i class='devicon-debian-plain'></i>"
elif self.os == 1:
return "<i class='devicon-redhat-plain'></i>"
else:
return "?" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def downloadicon_name(self):\n return 'platform_%s.gif' % \\\n re.sub(r'\\W', '_', self.context.getPlatform()).lower()",
"def icon(self) -> str:\n return ICON_SERVER",
"def icon(self):\n return \"mdi:hubspot\"",
"def icon(self):\n return ICON_BUS",
"def icon(self):\n return ICON",
"def icon(self):\n return ICON",
"def icon(self):\n return ICON",
"def icon(self):\n return ICON",
"def icon(self):\n return ICON",
"def icon(self):\n return ICON",
"def icon(self):\n return ICON",
"def icon(self):\n return ICON",
"def icon(self):\n return ICON",
"def icon(self):\n return ICON",
"def api_get_icon():\n pkg_name = request.args.get('pkg')\n if pkg_name:\n pkg_files = Database().db.get_pkg_files(pkg_name)\n for src in pkg_files:\n if src.startswith(\"/usr/share/icons/hicolor/32x32/apps/\"):\n return send_file(src, as_attachment=False)\n return send_file(\"static/images/null.gif\")\n else:\n src = request.args.get('i')\n if not os.path.isfile(src):\n #abort(404)\n return send_file(\"static/images/null.gif\")\n return send_file(src, as_attachment=False)",
"def icon(self):",
"def get_icon_title(self): # real signature unknown; restored from __doc__\n return \"\"",
"def icon(self):\n return None",
"def icon(self):\n return None",
"def get_icon_name(self):\n return 'gramps-notes'",
"def get_icon(self):\n raise NotImplementedError",
"def get_icon(self):\r\n raise NotImplementedError",
"async def icon(self, ctx: lifesaver.Context):\n if not ctx.guild.icon:\n await ctx.send(\"This server doesn't have a custom icon.\")\n return\n\n await ctx.send(ctx.guild.icon.replace(format=\"png\"))",
"def icon(self):\n return self.ICON",
"def icon(self):\n return self.ICON",
"def icon(self):\r\n try:\r\n return self.data['icon_url_base']+self.data['icon_url_name']\r\n except KeyError:\r\n return ''",
"def icon(self):\n if self.device_class:\n return None\n\n return ICONS.get(self.tesla_device.type)",
"def icon(self):\n if self._sensor_type == DEVICE_TYPE_DOORBELL:\n if self._camera_data[\"event_ring_on\"]:\n return \"mdi:bell-ring-outline\"\n return \"mdi:doorbell-video\"",
"def icon(self):\n return self._metadata[2]",
"def icon(self) -> str:\n return self._icon"
] | [
"0.6832323",
"0.68212587",
"0.6478877",
"0.6392684",
"0.63691825",
"0.63691825",
"0.63691825",
"0.63691825",
"0.63691825",
"0.63691825",
"0.63691825",
"0.63691825",
"0.63691825",
"0.63691825",
"0.63597417",
"0.63435024",
"0.63178104",
"0.6314043",
"0.6314043",
"0.6243124",
"0.6194222",
"0.61826044",
"0.6180889",
"0.61762774",
"0.61762774",
"0.61458737",
"0.6119743",
"0.6058779",
"0.6055613",
"0.60552055"
] | 0.83448386 | 0 |
Sets "_total_posts" as amount of posts in the VK domain. | async def _set_total_posts_in_domain(self) -> None:
logger.info('Getting total posts in "vk.com/%s"...', self.vk_domain)
params = {
"v": settings.VKAPI_VERSION,
"access_token": settings.VKAPI_TOKEN,
"count": 1, # Enough just to get total post in domain.
"domain": self.vk_domain,
}
# Data fetching.
response = await vk_asynchronous_request(
self._url_wall_get,
params,
domain=self.vk_domain,
)
self._total_posts_in_domain = response["response"]["count"]
logger.info("Total posts in VK domain: %s", self._total_posts_in_domain) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def posts_count(self):\n return Post.objects.filter(user__username = self.user.username).count()",
"def num_posts(self):\n\n return FlicketTicket.query.filter_by(started_id=self.id).count() + FlicketPost.query.filter_by(\n user_id=self.id).count()",
"def _update_count(self):\n self._count = len(self._items)",
"def _set_all_page_num(self):\n res = get(self.url, headers=self.headers)\n post_num = re.findall(r'微博\\[(\\d+)\\]', res.text)[0]\n page_num = re.findall(r'\\/(\\d+)页', res.text)[0]\n self._current_page -= 1\n self._all_page_num = int(page_num)\n self._all_post_num = int(post_num)",
"def page_count(self):\r\n postcount = self.post_set.count()\r\n max_pages = (postcount / get_paginate_by())\r\n if postcount % get_paginate_by() != 0:\r\n max_pages += 1\r\n return max_pages",
"async def set_post_number(self, ctx: commands.Context, post_num: int = 0):\n await ctx.cfg_channel.current_post_num.set(post_num)\n await ctx.send(\"Current auto-post number has been set to {}\".format(post_num))\n await ctx.cfg_channel.last_post_time.set(0)",
"def total_hits(self, total_hits):\n\n self._total_hits = total_hits",
"def get_number_of_posts(self):\n return self._performed_actions[WRITE_POST]",
"def __len__(self):\n return len(self._blogposts)",
"def set_article_count(cls, count):\n return cls.db.set(\"article_count\", count)",
"def page(self):\n data = super(RunningCountPaginator, self).page()\n try:\n obj_count = len(data[self.collection_name])\n if obj_count:\n obj_count += self.get_offset()\n else:\n obj_count = -1\n data['meta']['running_count'] = obj_count\n del data['meta']['total_count']\n except KeyError:\n pass\n return data",
"def set_n_comments_observed_task2(posts):\n for p in posts:\n p['n_comments_observed'] = get_hostile_indices(p)[0] + 1",
"def new_posts(self, number_posts=5) -> Type[QuerySet]:\n return self.published_posts()[:number_posts]",
"def add_main_post_into_ds(post):\n global ds_size\n if post is not None and not post.is_in_ds:\n duplicates = find_all_duplicates_for_post(post)\n if (len(duplicates)) > 0:\n add_post_into_ds(post, ds_size, DS_MAIN_POST)\n add_duplicates_into_ds(duplicates, ds_size)\n ds_size += 1",
"def get_num_postings(\n res: List[Dict[str, Any]],\n account_id: str = MAIN_ACCOUNT,\n balance_dimensions: BalanceDimensions = None,\n) -> int:\n balance_dimensions = balance_dimensions or BalanceDimensions()\n return len(get_postings(res, account_id, balance_dimensions))",
"def postings(self, postings):\n if postings:\n self._postings = postings",
"def setCount(self, num):\n self.count=num",
"def set_total(self):\n\n self.total = 0\n for item in self.items.all():\n self.total += item.price\n self.save()",
"def get_vote_count(self, post):\n return post.vote_set.count()",
"def count(self, value):\n \n self._count = int(value)",
"def update_count(self):\n pass",
"def add(self, posts):\n for post in posts:\n self._feed.add(FeedEntry(\n summary=post.summary,\n title=post.title,\n title_type='html',\n url=post.url,\n updated=post.date,\n ))",
"def num_links(self, num_links):\n self._num_links = num_links",
"def count(self, count: int) -> None:\n self._count = count",
"def correct_counts():\n articles = mongo.db[app.config['ARTICLES_COLLECTION']]\n monitors = mongo.db[app.config['MONITORS_COLLECTION']]\n unique = articles.distinct('feed_source', dict())\n for link in unique:\n count = articles.count({'feed_source': link})\n monitors.update({'metadata.rss_link': link}, {'$set': {'hits': count}})",
"def get_context_data(self, **kwargs):\n data = super().get_context_data(**kwargs)\n post = get_object_or_404(BlogPost, url=kwargs['slug'])\n\n if not self.request.user.is_authenticated:\n BlogPost.objects.filter(pk=post.pk).update(\n views_count=F('views_count') + 1,\n real_views_count=F('real_views_count') + 1\n )\n\n data['post'] = post\n return data",
"def count(self, count: int):\n\n self._count = count",
"def _count(self):\n if self._count_valid:\n return self._total_results\n\n url = self._build_url(\"/_search\")\n request = self._build_request(0, -1)\n resp = self._cb.post_object(url, body=request)\n result = resp.json()\n\n self._total_results = result[\"num_found\"]\n self._count_valid = True\n\n return self._total_results",
"def _count(self):\n if self._count_valid:\n return self._total_results\n\n url = self._build_url(\"/_search\")\n request = self._build_request(0, -1)\n resp = self._cb.post_object(url, body=request)\n result = resp.json()\n\n self._total_results = result[\"num_found\"]\n self._count_valid = True\n\n return self._total_results",
"def posts(self, limit=100, all=False):\n source, edge = self.id, \"feed\"\n return lazygen(Post, source, edge,\n limit=limit, get_all=all)"
] | [
"0.63555706",
"0.6204205",
"0.56337523",
"0.55793494",
"0.5573961",
"0.5543833",
"0.5520672",
"0.55201143",
"0.5465146",
"0.5398032",
"0.53932655",
"0.53263414",
"0.52809906",
"0.52755684",
"0.5259448",
"0.5227877",
"0.51914954",
"0.5175302",
"0.51708394",
"0.5170826",
"0.51669246",
"0.5157397",
"0.5154564",
"0.5126761",
"0.5123188",
"0.5097314",
"0.50963193",
"0.5094919",
"0.5094919",
"0.5079578"
] | 0.84554565 | 0 |
Fetches posts from VK domain asynchronously and put it into "posts" attribute. | async def fetch_posts(self) -> None:
async def fetch_posts_for_offset(offset) -> list:
logger.info(
"(offset %i) Start fetching posts from vk.com/%s...",
offset,
self.vk_domain,
)
# VK Script code for /execute method.
vks_code = get_wall_post_template.substitute(
{
"domain": self.vk_domain,
"offset": offset,
"posts_per_portion": self._posts_per_portion,
"execution_times": self._execution_times,
}
)
params = {
"v": settings.VKAPI_VERSION,
"access_token": settings.VKAPI_TOKEN,
"code": vks_code,
}
url = self._url_execute
# Posts fetching.
resp_json = await vk_asynchronous_request(
url,
params,
domain=self.vk_domain,
offset=offset,
)
logger.info(
"(offset %i) End fetching posts from vk.com/%s...",
offset,
self.vk_domain,
)
# Gathered posts handling.
posts_from_vk = resp_json["response"]["items"]
posts = posts_as_schemas(posts_from_vk)
del posts_from_vk
return posts
# Checks and preparations.
await self._set_total_posts_in_domain()
if not self._total_posts_in_domain:
return
# Creating tasks for fetching.
tasks = []
posts_per_task = self._posts_per_portion * self._execution_times
offsets = list(range(0, self._total_posts_in_domain, posts_per_task))
for offset in offsets:
tasks.append(asyncio.create_task(fetch_posts_for_offset(offset)))
# Running tasks.
logger.info("Start fetching posts from vk.com/%s...", self.vk_domain)
results = await asyncio.gather(*tasks)
logger.info("End fetching posts from vk.com/%s...", self.vk_domain)
# Flatting results from many tasks into one list.
self._posts = [post for result in results for post in result]
# Final actions.
if self.sort_by_likes:
self._posts = list(sorted(self.posts, key=lambda p: p.likes, reverse=True))
if self.amount_to_fetch:
self._posts = self._posts[: self.amount_to_fetch] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fetch_posts():\n get_chain_address = F\"{CONNECTED_NODE_ADDRESS}/chain\"\n response = requests.get(get_chain_address)\n if response.status_code == 200:\n content = []\n chain = json.loads(response.content)\n for block in chain[\"chain\"]:\n for tx in block[\"transactions\"]:\n tx[\"index\"] = block[\"index\"]\n tx[\"hash\"] = block[\"previous_hash\"]\n content.append(tx)\n \n global posts \n posts = sorted(content,\n key=lambda k: k['timestamp'],\n reverse=True)",
"def fetch_posts():\n get_chain_address = \"{}/chain\".format(CONNECTED_NODE_ADDRESS)\n response = requests.get(get_chain_address)\n if response.status_code == 200:\n content = []\n chain = json.loads(response.content)\n for pos, block in enumerate(chain[\"chain\"]):\n if pos ==0:\n pass\n else:\n for tx in list(block[\"transactions\"].values()):\n tx[\"index\"] = block[\"index\"]\n tx[\"hash\"] = block[\"previous_hash\"]\n content.append(tx)\n\n global posts\n posts = sorted(content, key=lambda k: k['timestamp'],\n reverse=True)",
"def get_posts():\n url = app.config['POSTS_ENDPOINT']\n response = requests.get(url, params={})\n if response.status_code == 200:\n return parse_posts(response.json())\n raise RuntimeError('Error in retrieving posts.')",
"async def _set_total_posts_in_domain(self) -> None:\n\n logger.info('Getting total posts in \"vk.com/%s\"...', self.vk_domain)\n\n params = {\n \"v\": settings.VKAPI_VERSION,\n \"access_token\": settings.VKAPI_TOKEN,\n \"count\": 1, # Enough just to get total post in domain.\n \"domain\": self.vk_domain,\n }\n\n # Data fetching.\n response = await vk_asynchronous_request(\n self._url_wall_get,\n params,\n domain=self.vk_domain,\n )\n\n self._total_posts_in_domain = response[\"response\"][\"count\"]\n logger.info(\"Total posts in VK domain: %s\", self._total_posts_in_domain)",
"def get_posts(self): #return list of posts that are associated with this blog_id\n return Post.find_posts_for_blog_id(self.blog_id) #this will return a list of posts objects",
"def remotePostList(host, posts, public):\n post_list = list()\n posts = posts.get('posts')\n for post in posts:\n author = remoteAddAuthor(post.get('author'))\n title = post.get('title')\n description = post.get('description')\n contentType = post.get('contentType')\n content = post.get('content')\n published = utc.localize(datetime.strptime(post.get('published'), '%Y-%m-%dT%H:%M:%S.%fZ'))\n visibility = post.get('visibility')\n unlisted = post.get('unlisted')\n id = post.get('id')\n origin = post.get('source')\n comments = remoteCommentList(post)\n count = post.get('count')\n next = \"{}/api/posts/{}/comments\".format(DOMAIN, id)\n if host.endswith(\"/\"):\n host = host[:-1]\n source = \"{}/posts/{}\".format(host, post.get('id'))\n\n post_dict = {'author': author, 'title': title, 'description': description,\n 'contentType': contentType, 'content': content, 'published': published,\n 'visibility': visibility, 'unlisted': unlisted, 'id': id,\n 'comments': comments, 'origin': origin,\n 'source': source, 'count': count, 'next': next}\n post_list.append(post_dict)\n return post_list",
"def _urlfetch_async(**kwargs):\n return ndb.get_context().urlfetch(**kwargs)",
"def get_posts(wp):\n from wordpress_xmlrpc.methods.posts import GetPosts\n\n all_posts = []\n\n offset = 0\n increment = 20\n while True:\n posts = wp.call(GetPosts({'number': increment, 'offset': offset, 'post_type': 'post'}))\n if len(posts) == 0:\n break # no more posts returned\n for post in posts:\n all_posts.append(post)\n\n offset = offset + increment\n\n return all_posts",
"def get(self):\n return get_all_posts()",
"def get_posts(self):\n return self.blog_posts.all()",
"def posts(self, limit=100, all=False):\n source, edge = self.id, \"feed\"\n return lazygen(Post, source, edge,\n limit=limit, get_all=all)",
"def get_posts():\n db = psycopg2.connect(\"dbname=forum\")\n c = db.cursor()\n query = \"SELECT content, time FROM posts ORDER BY time DESC\"\n c.execute(query)\n rows = c.fetchall()\n POSTS = rows\n db.close()\n return POSTS",
"def posts_get():\n \n\n # Get and filter the posts from the database\n songs = session.query(models.Song).all()\n \n # Convert the posts to JSON and return a response\n data = json.dumps([song.as_dictionary() for song in songs])\n return Response(data, 200, mimetype=\"application/json\")",
"def fetch_post(page_num):\n req = POST_API.format(page_num=page_num)\n try:\n response = requests.get(req)\n response.raise_for_status()\n posts = response.json()\n objects = list()\n for json_post in posts:\n fetch_author.delay(json_post.get('author'), json_post.get(\"_links\", dict()).get('authors', []))\n title = BeautifulSoup(json_post.get('title', dict()).get('rendered', \"\"), \"lxml\").text\n content = BeautifulSoup(json_post.get('content', dict()).get('rendered', \"\"), \"lxml\").text\n post = Article(id=json_post.get('id'),\n date=json_post.get('date_gmt', datetime.now()),\n modified=json_post.get('modified_gmt', datetime.now()),\n title=title,\n content=content,\n author_id=json_post.get('author')\n )\n objects.append(post)\n s = Session()\n s.bulk_save_objects(objects)\n s.commit()\n\n except requests.exceptions.HTTPError as error:\n raise Reject(error)\n except Exception as ex:\n raise Reject(ex)",
"def get_posts(url):\r\n feed = feedparser.parse(url)\r\n return feed.entries",
"def get_posts(self):\r\n postList = []\r\n for tag in self.setting.imgurTags:\r\n try:\r\n req = requests.get('%s%s' % (self.setting.tagLink, tag), headers=self.setting.imgurHeaders)\r\n for post in req.json()['data']['items']:\r\n p = self.json_to_post(post, tag)\r\n if p is not None:\r\n postList.append(p)\r\n except Exception as e:\r\n self.logger.log(logger.LogLevel.CRITICAL, 'imgur.get_posts exception(%s): %s' % (tag, e))\r\n break\r\n return postList",
"def getPosts(self):\n # TODO do we really need threading here or it can just do fine without\n allPosts = []\n threads = []\n feedTime = self.startTime\n for oneUrl in self.newsFeeds:\n thread = FeedparserThread(oneUrl, self.startTime, allPosts)\n threads.append(thread)\n thread.start()\n\n # Joining all threads into one\n for thread in threads:\n thread.join()\n\n return allPosts",
"def getMyPosts():\n \n cur, user_id = initialise(3)\n cur.execute(\"SELECT username FROM users WHERE id = ?\", [user_id])\n name = cur.fetchall()[0][0]\n cur.execute(\"SELECT * FROM posts WHERE name = ?\", [name])\n posts = cur.fetchall()\n return posts",
"def task_fetch_posts(\n author_id,\n count=28,\n posts_out='data/posts_data.xlsx'):\n\n # Create query instances for posts\n post_query = Query(PostParser)\n\n # Query posts data\n post_data = post_query.query_all(POSTS_QUERY_HASH_PARAM, {\n \"id\": author_id,\n \"first\": 50,\n }, count)\n logger.info(\"Count of posts data: %d\" % len(post_data))\n\n # Save the posts data\n post_data_df = pd.DataFrame(post_data)\n post_data_df.to_excel(posts_out, encoding='utf-8', index=False)\n logger.info(\"Save the posts data to %s.\" % posts_out)",
"def get_posts(account, pages=10, timeout=5, sleep=0):\n global _session, _timeout\n\n url = f'{_base_url}/{account}/posts/'\n\n _session = HTMLSession()\n _session.headers.update(_headers)\n\n _timeout = timeout\n response = _session.get(url, timeout=_timeout)\n html = response.html\n cursor_blob = html.html\n\n while True:\n for article in html.find('article'):\n yield _extract_post(article)\n\n pages -= 1\n if pages == 0:\n return\n\n cursor = _find_cursor(cursor_blob)\n next_url = f'{_base_url}{cursor}'\n\n if sleep:\n time.sleep(sleep)\n\n try:\n response = _session.get(next_url, timeout=timeout)\n response.raise_for_status()\n data = json.loads(response.text.replace('for (;;);', '', 1))\n except (RequestException, ValueError):\n return\n\n for action in data['payload']['actions']:\n if action['cmd'] == 'replace':\n html = HTML(html=action['html'], url=_base_url)\n elif action['cmd'] == 'script':\n cursor_blob = action['code']",
"def api_get_thread_posts(request, opening_post_id):\n\n opening_post = get_object_or_404(Post, id=opening_post_id)\n thread = opening_post.get_thread()\n posts = thread.get_replies()\n\n json_data = {\n 'posts': [],\n 'last_update': None,\n }\n json_post_list = []\n\n for post in posts:\n json_post_list.append(_get_post_data(post.id))\n json_data['last_update'] = datetime_to_epoch(thread.last_edit_time)\n json_data['posts'] = json_post_list\n\n return HttpResponse(content=json.dumps(json_data))",
"def recent_posts(self):\n\n try:\n jsondoc = json.load(urllib.urlopen(\"http://reddit.com/user/%s.json\" % self.username))\n except:\n raise self.DoesNotExist\n \n posts = []\n for item in jsondoc['data']['children']:\n if item['kind'] == 't1':\n posts.append(Comment(item['data']))\n elif item['kind'] == 't3':\n posts.append(item['data'])\n\n return posts",
"def get_public_posts(server_posts):\n public_list = server_posts\n servers = Server.objects.all()\n\n for server in servers:\n if server.username and server.password:\n host = server.hostname\n if not host.endswith(\"/\"):\n host = host + \"/\"\n server_api = \"{}posts\".format(host)\n try:\n s = requests.Session()\n # https://stackoverflow.com/questions/15431044/can-i-set-max-retries-for-requests-request\n retries = Retry(total=5,\n backoff_factor=0.1,\n status_forcelist=[500, 502, 503, 504])\n\n s.mount('http://', HTTPAdapter(max_retries=retries))\n s.mount('https://', HTTPAdapter(max_retries=retries))\n\n r = s.get(server_api, auth=(server.username, server.password))\n\n if r.status_code == 200:\n posts = remotePostList(server.hostname, r.json(), public_list)\n public_list.extend(posts)\n public_list = sorted(public_list, key=lambda k: k['published'], reverse=True)\n public_list = [next(v) for k, v in groupby(public_list, lambda d: d[\"id\"])]\n\n except:\n print('error')\n return public_list",
"def generatePosts(self,**kwargs):\n oldestTimeSoFar = None\n while True:\n if oldestTimeSoFar is None:\n items = self.getPosts(**kwargs)\n else:\n items = self.getPosts(before_time=oldestTimeSoFar,**kwargs)\n if not items:\n return\n for item in items:\n yield item\n oldestTimeSoFar = item['published_at']\n time.sleep(0.5)",
"def get_posts(account, pages=10, timeout=5, sleep=0):\n\n url = f'{_base_url}/{account}/posts/'\n\n session = HTMLSession()\n session.headers.update({'Accept-Language': 'en-US,en;q=0.5'})\n\n response = session.get(url, timeout=timeout)\n html = response.html\n cursor_blob = html.html\n\n while True:\n for article in html.find('article'):\n yield _extract_post(article)\n\n pages -= 1\n if pages == 0:\n return\n\n cursor = _find_cursor(cursor_blob)\n next_url = f'{_base_url}{cursor}'\n\n if sleep:\n time.sleep(sleep)\n\n try:\n response = session.get(next_url, timeout=timeout)\n response.raise_for_status()\n data = json.loads(response.text.replace('for (;;);', '', 1))\n except (RequestException, ValueError):\n return\n\n for action in data['payload']['actions']:\n if action['cmd'] == 'replace':\n html = HTML(html=action['html'], url=_base_url)\n elif action['cmd'] == 'script':\n cursor_blob = action['code']",
"def get_post(self):\n\n if self.gotten: return\n self.get_text()\n self.get_keywords()\n self.get_poll()\n self.get_schedule()\n self.get_expiration()\n self.get_files()\n self.set_text()\n if Settings.get_performer_category() or self.hasPerformers:\n self.get_performers()\n else:\n self.performers = \"unset\"\n self.gotten = True",
"async def scrape_and_post(self):\n # Scrape latest challenge posts\n challenges = self.scraper.scrape()\n await self._update_rooms(challenges)",
"def get(self):\n\n self.render_posts()",
"async def getPostData(self, PostID):\n url = self.urlGen(id=str(PostID))\n XML =None\n with async_timeout.timeout(10):\n async with self.session.get(url=url) as XML:\n XML = await XML.read()\n XML = self.ParseXML(ET.XML(XML))\n data = XML['posts']['post']\n return data\n return None",
"def run(self) -> None:\n self.urls_list = self._create_api_ulr_list()\n self.results = self._sort_results(\n AsyncGetAPI(\n self.urls_list, self.threads, max_requests=self.max_requests\n ).results\n )"
] | [
"0.655602",
"0.65269387",
"0.6413515",
"0.6018604",
"0.5916808",
"0.59025943",
"0.5787382",
"0.57700604",
"0.5734009",
"0.57019544",
"0.5652772",
"0.56395626",
"0.5616751",
"0.5613428",
"0.56133306",
"0.56015396",
"0.5592875",
"0.5583471",
"0.5548579",
"0.551852",
"0.55078864",
"0.549857",
"0.5476955",
"0.54635954",
"0.5457611",
"0.5433103",
"0.5420551",
"0.5409774",
"0.54038525",
"0.5379112"
] | 0.7330048 | 0 |
Creates posts as Pydantic schemas based on posts data given from VK API. | def posts_as_schemas(posts_from_vk: list[dict]) -> list[Post]:
posts = []
for post_from_vk in posts_from_vk:
try:
post = Post(
date=post_from_vk["date"],
likes=post_from_vk["likes"]["count"],
text=post_from_vk["text"],
path=f"wall{post_from_vk['owner_id']}_" f"{post_from_vk['id']}",
photos=[],
videos=[],
)
except KeyError as exc:
logger.error("No key %s for post: %s", exc, post_from_vk)
continue
# Collect attachments (photos, videos etc.).
if "attachments" in post_from_vk:
attachments = post_from_vk["attachments"]
for attachment in attachments:
if attachment["type"] == "photo":
try:
photo = PostPhoto(url="")
photo.url = attachment["photo"]["sizes"][-1]["url"]
post.photos.append(photo)
except KeyError as exc:
logger.error("No key %s for photo: %s", exc, post_from_vk)
elif attachment["type"] == "video":
video = PostVideo(first_frame_url="")
video_from_vk = attachment["video"]
if "first_frame" in video_from_vk:
video.first_frame_url = video_from_vk["first_frame"][-1]["url"]
elif "image" in video_from_vk:
video.first_frame_url = video_from_vk["image"][-1]["url"]
else:
logger.error("No video image found: %s", post)
continue
post.videos.append(video)
posts.append(post)
return posts | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def posts_post():\n data = request.json\n\n try:\n validate(data, post_schema)\n except ValidationError as error:\n data = {\"message\": error.message}\n return Response(json.dumps(data), 422, mimetype=\"application/json\")\n\n post = Post(title=data[\"title\"], body=data[\"body\"])\n session.add(post)\n session.commit()\n\n data = json.dumps(post.as_dictionary())\n headers = {\"Location\": url_for(\"post_get\", id=post.id)}\n\n return Response(data, 201, headers=headers, mimetype=\"application/json\")",
"def remotePostCreate(host, post):\n post = post.get('posts')[0]\n author = remoteAddAuthor(post.get('author'))\n title = post.get('title')\n description = post.get('description')\n contentType = post.get('contentType')\n content = post.get('content')\n published = utc.localize(datetime.strptime(post.get('published'), '%Y-%m-%dT%H:%M:%S.%fZ'))\n visibility = post.get('visibility')\n unlisted = post.get('unlisted')\n id = post.get('id')\n origin = post.get('origin')\n count = post.get('count')\n comments = remoteCommentList(post)\n source = \"{}/api/posts/{}\".format(DOMAIN, post.get('id'))\n\n post_dict = {'author': author, 'title': title, 'description': description,\n 'contentType': contentType, 'content': content, 'published': published,\n 'visibility': visibility, 'unlisted': unlisted, 'id': id,\n 'comments': comments, 'origin': origin, 'count': count,\n 'source': source}\n return post_dict",
"def postCreate(post):\n post_list = list()\n comments = commentList(post)\n comment_url = \"{}/api/posts/{}/comments\".format(DOMAIN, post.id)\n visible_to = list()\n visible = post.visibleTo.all()\n if visible:\n for author in visible:\n auth = \"{}/api/author/{}\".format(DOMAIN, author.id)\n visible_to.append(auth)\n\n # visible_to = list(post.visibleTo)\n post_dict = {'author': addAuthor(post.author), 'title': post.title, 'description': post.description,\n 'contentType': post.contentType, 'content': post.content, 'published': post.published,\n 'visibility': post.visibility, 'visibleTo': visible_to, 'unlisted': post.unlisted, 'id': post.id,\n 'comments': comments[:5], 'next': comment_url, 'count': len(comments),\n 'source': \"{}/api/posts/{}\".format(DOMAIN, post.id),\n 'origin': \"{}/api/posts/{}\".format(DOMAIN, post.id)}\n post_list.append(post_dict)\n return post_list",
"def marshal_posts(shard, post_list):\n out = []\n for post in post_list:\n post_dict = dict(\n shardId=shard,\n archiveType=models.Post.ARCHIVE_REVERSE_MAPPING[post.archive_type],\n nickname=post.nickname,\n title=post.title,\n body=post.body,\n postTimeMs=models.datetime_to_stamp_ms(post.post_time),\n sequenceId=getattr(post, 'sequence', None),\n newTopicId=post.new_topic,\n postId=post.post_id)\n out.append(post_dict)\n return out",
"def get_posts():\n url = app.config['POSTS_ENDPOINT']\n response = requests.get(url, params={})\n if response.status_code == 200:\n return parse_posts(response.json())\n raise RuntimeError('Error in retrieving posts.')",
"def send_postings_to_api(raw_postings):\n # Start by extracting the posting information we need from the raw\n # postings.\n\n def _copy(src_dict, src_key, dst_dict, dst_key):\n if src_dict.get(src_key) != None:\n dst_dict[dst_key] = src_dict[src_key]\n\n def _dateToSecs(date_str):\n \"\"\" Convert given timestamp string to number of seconds in unix time.\n \"\"\"\n if date_str not in [\"\", None]:\n timestamp = datetime.datetime.strptime(date_str,\n \"%Y-%m-%dT%H:%M:%SZ\")\n delta = timestamp - datetime.datetime(1970, 1, 1)\n return (delta.days*24*3600) + delta.seconds\n else:\n return None\n\n postings = []\n for id,raw_posting in raw_postings:\n posting = {}\n _copy(raw_posting, 'source', posting, 'source')\n _copy(raw_posting, 'category', posting, 'category')\n\n location = {}\n if \"location\" in raw_posting:\n raw_location = raw_posting['location']\n _copy(raw_location, 'latitude', location, 'lat')\n _copy(raw_location, 'longitude', location, 'long')\n _copy(raw_location, 'accuracy', location, 'accuracy')\n _copy(raw_location, 'countryCode', location, 'country')\n _copy(raw_location, 'stateCode', location, 'state')\n _copy(raw_location, 'metroCode', location, 'metro')\n _copy(raw_location, 'regionCode', location, 'region')\n _copy(raw_location, 'countyCode', location, 'county')\n _copy(raw_location, 'cityCode', location, 'city')\n _copy(raw_location, 'localityCode', location, 'locality')\n _copy(raw_location, 'zipCode', location, 'zipcode')\n posting['location'] = location\n\n _copy(raw_posting, 'sourceId', posting, 'external_id')\n _copy(raw_posting, 'sourceUrl', posting, 'external_url')\n _copy(raw_posting, 'heading', posting, 'heading')\n _copy(raw_posting, 'body', posting, 'body')\n _copy(raw_posting, 'html', posting, 'html')\n\n if \"postingTimestamp\" in raw_posting:\n posting['timestamp'] = _dateToSecs(raw_posting['postingTimestamp'])\n if \"expirationTimestamp\" in raw_posting:\n posting['expires'] = _dateToSecs(raw_posting['expirationTimestamp'])\n\n _copy(raw_posting, 'language', posting, 'language')\n _copy(raw_posting, 'price', posting, 'price')\n _copy(raw_posting, 'currency', posting, 'currency')\n\n images = []\n if \"images\" in raw_posting:\n for raw_image in raw_posting['images']:\n image = {}\n _copy(raw_image, 'thumbnail', image, 'thumbnail')\n _copy(raw_image, 'full', image, 'full')\n if len(image) > 0:\n images.append(image)\n posting['images'] = images\n\n annotations = {}\n if \"annotations\" in raw_posting:\n for key,value in raw_posting['annotations'].items():\n annotations[key] = value\n posting['annotations'] = annotations\n\n status = {}\n if \"flags\" in raw_posting:\n flags = raw_posting['flags']\n\n if flags & 1 == 1:\n status['offered'] = True\n elif flags & 2 == 2:\n status['lost'] = True\n elif flags % 4 == 4:\n status['stolen'] = True\n elif flags % 8 == 8:\n status['found'] = True\n posting['status'] = status\n\n _copy(raw_posting, 'immortal', posting, 'immortal')\n\n postings.append(posting)\n\n # Send the postings off to the Posting API.\n\n request = {'postings' : postings}\n\n print \"Sending...\"\n\n response = requests.post(POSTING_URL,\n data=json.dumps(request),\n headers={'content-type' : \"application/json\"})\n\n print \"got response\"\n\n if response.status_code != 200:\n print \"Unexpected response:\" + str(response.status_code)\n print\n print response.text\n return None\n\n if response.headers['content-type'] != \"application/json\":\n print \"Server didn't return JSON data!\"\n print\n print response.text\n return None\n\n response = response.json()\n\n # Check the response to see which postings failed (if any).\n\n num_sent = 0 # initially.\n if response != None:\n if \"responses\" in response:\n posting_errors = response['posting_errors']\n for i in range(len(posting_errors)):\n if posting_errors[i] != None:\n for key in postings[i].keys():\n print \" %s : %s\" % (key, repr(postings[i][key]))\n print \"--> failed, reason = \" + posting_errors[i]\n print\n else:\n num_sent = num_sent + 1\n\n return response.get(\"wait_for\")",
"def posts(self, limit=100, all=False):\n source, edge = self.id, \"feed\"\n return lazygen(Post, source, edge,\n limit=limit, get_all=all)",
"def post(self):\n data = request.json\n return create_new_blog(data=data)",
"def serialize_posts_data_v2(influencer, posts, length_limit=30, highlighted_ids=[], **kw):\n from debra import serializers\n from debra import feeds_helpers\n from debra import constants\n\n request = kw.get('request')\n brand = request.visitor[\"base_brand\"] if request else None\n\n posts_data = []\n urls = set()\n posts = list(posts)\n\n for post in posts:\n if post.url in urls:\n continue\n urls.add(post.url)\n\n feed_json = feeds_helpers.get_feed_handler_for_platform(\n get_post_platform(post))\n\n post_data = feed_json(None,\n for_single_post=post,\n length_limit=length_limit\n )\n\n if post_data is None:\n continue\n\n post_data[\"blog_name\"] = serializers.unescape(influencer.blogname if influencer else post.influencer.blogname)\n post_data[\"title\"] = post.title\n post_data[\"platform\"] = get_post_platform(post)\n\n if brand and brand.flag_show_dummy_data:\n post_data['url'] = constants.FAKE_POST_DATA['url']\n post_data['title'] = constants.FAKE_POST_DATA['title']\n\n if post.id in highlighted_ids:\n post_data[\"highlight\"] = True\n if post.create_date:\n post_data[\"create_date\"] = post.create_date.strftime(\"%b. %e, %Y\")\n if influencer:\n post_data['user'] = influencer.feed_stamp\n else:\n post_data['user'] = post.influencer.feed_stamp\n posts_data.append(post_data)\n return posts_data",
"def generate_post(self):\n post = {'title': self.generate_title(), 'draft': False}\n for k in ('blog', 'id', 'labels', 'categories', 'draft'):\n if k not in self.header:\n continue\n if k == 'blog':\n post[k] = {'id': self.header[k]}\n else:\n post[k] = self.header[k]\n return post",
"def remotePostList(host, posts, public):\n post_list = list()\n posts = posts.get('posts')\n for post in posts:\n author = remoteAddAuthor(post.get('author'))\n title = post.get('title')\n description = post.get('description')\n contentType = post.get('contentType')\n content = post.get('content')\n published = utc.localize(datetime.strptime(post.get('published'), '%Y-%m-%dT%H:%M:%S.%fZ'))\n visibility = post.get('visibility')\n unlisted = post.get('unlisted')\n id = post.get('id')\n origin = post.get('source')\n comments = remoteCommentList(post)\n count = post.get('count')\n next = \"{}/api/posts/{}/comments\".format(DOMAIN, id)\n if host.endswith(\"/\"):\n host = host[:-1]\n source = \"{}/posts/{}\".format(host, post.get('id'))\n\n post_dict = {'author': author, 'title': title, 'description': description,\n 'contentType': contentType, 'content': content, 'published': published,\n 'visibility': visibility, 'unlisted': unlisted, 'id': id,\n 'comments': comments, 'origin': origin,\n 'source': source, 'count': count, 'next': next}\n post_list.append(post_dict)\n return post_list",
"def save_posts(self, posts):\n return self.collection.insert_many(map(lambda post: post.serialize(), posts))",
"def PostData(title: str, body: str) -> dict:\n post = Posts(title=title, body=body)\n db.session.add(post)\n db.session.commit()\n return {\"status\": 200, \"message\": \"Data Posted successfully\"}",
"def post(self, post_id=None):\n\n if post_id:\n abort(400)\n else:\n args = parsers.post_post_parser.parse_args(strict=True)\n\n new_post = Post(args['title'])\n new_post.text = args['text']\n # new_post.user = user\n\n if args['tags']:\n for item in args['tags']:\n tag = Tag.query.filter_by(name=item).first()\n # If the tag already exist, append.\n if tag:\n new_post.tags.append(tag)\n # If the tag not exist, create the new one.\n # Will be write into DB with session do.\n else:\n new_tag = Tag(item)\n new_post.tags.append(new_tag)\n db.session.add(new_post)\n db.session.commit()\n return (new_post.id, 201)",
"def postList(posts):\n post_list = list()\n for post in posts:\n visible_to = list()\n visible = post.visibleTo.all()\n if visible:\n for author in visible:\n auth = \"{}/api/author/{}\".format(DOMAIN, author.id)\n visible_to.append(auth)\n\n comments = commentList(post)\n comment_url = \"{}/api/posts/{}/comments\".format(DOMAIN, post.id)\n post_dict = {'author': addAuthor(post.author), 'title': post.title, 'description': post.description,\n 'contentType': post.contentType, 'content': post.content, 'published': post.published,\n 'visibility': post.visibility, 'visibleTo': visible_to, 'unlisted': post.unlisted, 'id': post.id,\n 'comments': comments[:5], 'next': comment_url, 'count': len(comments),\n 'origin': \"{}/api/posts/{}\".format(DOMAIN, post.id),\n 'source': \"{}/api/posts/{}\".format(DOMAIN, post.id)}\n post_list.append(post_dict)\n return post_list",
"def post_to_object(self, post, remove_id_prefix=False):\n id = post.get('id')\n if not id:\n return {}\n\n post_type = post.get('type')\n status_type = post.get('status_type')\n url = self.post_url(post)\n picture = post.get('picture')\n display_name = None\n message = (post.get('message') or post.get('story') or\n post.get('description') or post.get('name'))\n\n data = post.get('data', {})\n for field in ('object', 'song'):\n obj = data.get(field)\n if obj:\n id = obj.get('id')\n post_type = obj.get('type')\n url = obj.get('url')\n display_name = obj.get('title')\n\n object_type = OBJECT_TYPES.get(post_type)\n author = self.user_to_actor(post.get('from'))\n link = post.get('link', '')\n\n if link.startswith('/gifts/'):\n object_type = 'product'\n if not object_type:\n if picture and not message:\n object_type = 'image'\n else:\n object_type = 'note'\n\n obj = {\n 'id': self.tag_uri(str(id)),\n 'objectType': object_type,\n 'published': util.maybe_iso8601_to_rfc3339(post.get('created_time')),\n 'updated': util.maybe_iso8601_to_rfc3339(post.get('updated_time')),\n 'author': author,\n 'content': message,\n # FB post ids are of the form USERID_POSTID\n 'url': url,\n 'image': {'url': picture},\n 'displayName': display_name,\n 'fb_object_id': post.get('object_id'),\n }\n\n privacy = post.get('privacy', {})\n if isinstance(privacy, dict):\n privacy = privacy.get('value')\n if privacy is not None:\n # privacy value '' means it doesn't have an explicit audience set, so i\n # *think* it inherits from its parent. TODO: use that value as opposed to\n # defaulting to public.\n public = privacy.lower() in ('', 'everyone', 'open')\n obj['to'] = [{'objectType': 'group',\n 'alias': '@public' if public else '@private'}]\n\n # tags and likes\n tags = itertools.chain(post.get('to', {}).get('data', []),\n post.get('with_tags', {}).get('data', []),\n *post.get('message_tags', {}).values())\n obj['tags'] = [self.postprocess_object({\n 'objectType': OBJECT_TYPES.get(t.get('type'), 'person'),\n 'id': self.tag_uri(t.get('id')),\n 'url': self.object_url(t.get('id')),\n 'displayName': t.get('name'),\n 'startIndex': t.get('offset'),\n 'length': t.get('length'),\n }) for t in tags]\n\n obj['tags'] += [self.postprocess_object({\n 'id': self.tag_uri('%s_liked_by_%s' % (id, like.get('id'))),\n 'url': url,\n 'objectType': 'activity',\n 'verb': 'like',\n 'object': {'url': url},\n 'author': self.user_to_actor(like),\n 'content': 'likes this.',\n }) for like in post.get('likes', {}).get('data', [])]\n\n # \"See Original\" links\n post_actions = post.get('actions',[])\n see_orig_actions = (act for act in post_actions\n if act.get('name', '').lower() in SEE_ORIGINAL_ACTIONS)\n obj['tags'] += [self.postprocess_object({\n 'objectType': 'article',\n 'url': act.get('link'),\n 'displayName': act.get('name')\n }) for act in see_orig_actions]\n\n # is there an attachment? prefer to represent it as a picture (ie image\n # object), but if not, fall back to a link.\n att = {\n 'url': link if link else url,\n 'image': {'url': picture},\n 'displayName': post.get('name'),\n 'summary': post.get('caption'),\n 'content': post.get('description'),\n }\n\n if (picture and picture.endswith('_s.jpg') and\n (post_type == 'photo' or status_type == 'added_photos')):\n # a picture the user posted. get a larger size.\n att.update({\n 'objectType': 'image',\n 'image': {'url': picture[:-6] + '_o.jpg'},\n })\n obj['attachments'] = [att]\n elif link and not link.startswith('/gifts/'):\n att['objectType'] = 'article'\n obj['attachments'] = [att]\n\n # location\n place = post.get('place')\n if place:\n id = place.get('id')\n obj['location'] = {\n 'displayName': place.get('name'),\n 'id': id,\n 'url': self.object_url(id),\n }\n location = place.get('location', None)\n if isinstance(location, dict):\n lat = location.get('latitude')\n lon = location.get('longitude')\n if lat and lon:\n obj['location'].update({\n 'latitude': lat,\n 'longitude': lon,\n # ISO 6709 location string. details: http://en.wikipedia.org/wiki/ISO_6709\n 'position': '%+f%+f/' % (lat, lon),\n })\n elif 'location' in post:\n obj['location'] = {'displayName': post['location']}\n\n # comments go in the replies field, according to the \"Responses for\n # Activity Streams\" extension spec:\n # http://activitystrea.ms/specs/json/replies/1.0/\n comments = post.get('comments', {}).get('data')\n if comments:\n items = [self.comment_to_object(c) for c in comments]\n obj['replies'] = {\n 'items': items,\n 'totalItems': len(items),\n }\n\n return self.postprocess_object(obj)",
"def posts_for_feed():\n user_id = session.get('user_id')\n friend_posts = Post.query.join(Friend, db.and_(Post.user_id == Friend.user_2,\n Friend.active == True)).outerjoin(Comment, db.and_(Comment.post_id == Post.post_id,\n Comment.active == True)).filter(Friend.user_1 == user_id,\n Post.active == True).order_by(Post.post_id.desc()).all()\n\n post_list = []\n for post in friend_posts:\n post_list.append(post.to_dict_for_json())\n\n resp = make_response(jsonify(post_list), 200)\n return resp",
"def serializePostsData(influencer, posts, length_limit=30, highlight=False):\n from debra import serializers\n\n posts_data = []\n urls = set()\n posts = list(posts)\n dated = []\n undated = []\n for post in posts:\n if post.create_date:\n dated.append(post)\n else:\n undated.append(post)\n\n posts = sorted(dated, key=lambda x: x.create_date)\n posts.reverse()\n posts.extend(undated)\n\n if length_limit:\n length_limit = length_limit\n\n for post in posts:\n if post.url in urls:\n continue\n urls.add(post.url)\n post_data = {}\n post_data[\"post_image\"] = post.post_image\n stripped_content, images = tagStripper(\n post.content, length_limit=length_limit)\n post_data[\"content\"] = stripped_content\n post_data[\"content_images\"] = images\n post_data[\"url\"] = post.url\n post_data[\"blog_name\"] = serializers.unescape(influencer.blogname if influencer else\\\n post.influencer.blogname)\n post_data[\"title\"] = post.title\n post_data[\"platform\"] = get_post_platform(post)\n if highlight:\n post_data[\"highlight\"] = True\n if post.create_date:\n post_data[\"create_date\"] = post.create_date.strftime(\"%b. %e, %Y\")\n if not influencer:\n post_data['user'] = post.influencer.feed_stamp\n if post.products_json:\n post_data[\"products\"] = post.get_product_json()\n else:\n post_data[\"products\"] = []\n posts_data.append(post_data)\n return posts_data",
"def test_get_all_posts(self):\n self.login_client('test_user', 'testing')\n # hit the API endpoint\n response = self.client.get(\n reverse(\"post-list-create\")\n )\n # fetch the data from db\n expected = Post.objects.all()\n serialized = PostSerializerSchema(expected, many=True)\n self.assertEqual(response.data, serialized.data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)",
"def save_posts(self):\n logger.info(\"Savings posts to database\")\n records = self.df.to_dict(\"records\")\n\n for record in records:\n Company.objects.get_or_create(name=record[\"company\"])\n\n Post.objects.get_or_create(\n title=record[\"title\"],\n company_id=record[\"company\"],\n defaults={\n \"date_posted\": record[\"date_posted\"],\n \"description\": record[\"description\"],\n \"location\": record[\"location\"],\n \"is_sponsored\": False,\n \"date_added_db\": record[\"date_added_db\"],\n \"source_id\": record[\"source\"],\n \"link\": record[\"link\"],\n },\n )",
"def post(self):\n user_id = get_jwt_identity()\n user = find_user(user_id)\n args = post_parser.parse_args()\n\n # check circles\n circles = []\n for circle_id in args['circle_ids']:\n found_circle = find_circle(user, circle_id)\n if not found_circle:\n return {'msg': f'Circle {circle_id} is not found'}, 404\n circles.append(found_circle)\n\n # check reshare\n reshared_from = args['reshared_from']\n reshared_from_post = None\n if reshared_from:\n reshared_from_post = dangerously_get_post(reshared_from)\n if not reshared_from_post:\n return {\"msg\": f\"Post {reshared_from} is not found\"}, 404\n\n # check media\n media_object_names = args['media_object_names']\n if reshared_from and media_object_names:\n return {'msg': \"Reshared post is not allowed to have media\"}, 400\n\n post = create_post(\n user,\n content=args['content'],\n is_public=args['is_public'],\n circles=circles,\n reshareable=args['reshareable'],\n reshared_from=reshared_from_post,\n media_list=check_media_object_names(media_object_names, MaxPostMediaCount),\n mentioned_users=check_mentioned_user_ids(args['mentioned_user_ids']),\n is_update_avatar=False\n )\n if not post:\n return {\"msg\": f\"Not allowed to reshare post {reshared_from}\"}, 403\n return post, 201",
"def handler(event, _context):\n model = PostModel()\n post_id = model.create(**json.loads(event['body']))\n return dump_result({'post_id': post_id}, status_code=201)",
"def database_post_object(row, truncate_body=0):\n\n paid = row['is_paidout']\n\n post = {}\n post['active'] = json_date(row['active'])\n post['author_rewards'] = row['author_rewards']\n post['id'] = row['id']\n post['author'] = row['author']\n post['permlink'] = row['permlink']\n post['category'] = row['category'] if 'category' in row else 'undefined'\n\n post['title'] = row['title']\n post['body'] = row['body'][0:truncate_body] if truncate_body else row['body']\n post['json_metadata'] = row['json']\n\n post['created'] = json_date(row['created_at'])\n post['last_update'] = json_date(row['updated_at'])\n post['depth'] = row['depth']\n post['children'] = row['children']\n\n post['last_payout'] = json_date(row['last_payout_at'])\n post['cashout_time'] = json_date(row['cashout_time'])\n post['max_cashout_time'] = json_date(None) # ABW: only relevant up to HF17, timestamp::max for all posts later (and also all paid)\n\n curator_payout = sbd_amount(row['curator_payout_value'])\n post['curator_payout_value'] = to_nai(_amount(curator_payout))\n post['total_payout_value'] = to_nai(_amount(row['payout'] - curator_payout))\n\n post['reward_weight'] = 10000 # ABW: only relevant between HF12 and HF17 and we don't have access to correct value\n\n post['root_author'] = row['root_author']\n post['root_permlink'] = row['root_permlink']\n\n post['allow_replies'] = row['allow_replies']\n post['allow_votes'] = row['allow_votes']\n post['allow_curation_rewards'] = row['allow_curation_rewards']\n\n post['parent_author'] = row['parent_author']\n post['parent_permlink'] = row['parent_permlink_or_category']\n\n post['beneficiaries'] = row['beneficiaries']\n post['max_accepted_payout'] = to_nai(row['max_accepted_payout'])\n post['percent_hbd'] = row['percent_hbd']\n post['net_votes'] = row['net_votes']\n\n if paid:\n post['total_vote_weight'] = 0\n post['vote_rshares'] = 0\n post['net_rshares'] = 0 # if row['rshares'] > 0 else row['rshares'] ABW: used to be like this but after HF19 cashouts disappear and all give 0\n post['abs_rshares'] = 0\n post['children_abs_rshares'] = 0\n else:\n post['total_vote_weight'] = row['total_vote_weight']\n post['vote_rshares'] = ( row['rshares'] + row['abs_rshares'] ) // 2 # effectively sum of all positive rshares\n post['net_rshares'] = row['rshares']\n post['abs_rshares'] = row['abs_rshares']\n post['children_abs_rshares'] = 0 # TODO - ABW: I'm not sure about that, it is costly and useless (used to be part of mechanism to determine cashout time)\n\n return post",
"def post(self):\n data = api.payload\n return data_dao.create(data)",
"def post_list(request):\n if request.method == 'GET':\n posts = Post.objects.all()\n serializer = PostSerializer(posts, many=True)\n return JSONResponse(serializer.data)\n\n elif request.method == 'POST':\n data = JSONParser().parse(request)\n serializer = PostSerializer(data=data)\n if serializer.is_valid():\n serializer.save()\n return JSONResponse(serializer.data, status=201)\n return JSONResponse(serializer.errors, status=400)",
"def inputPostFormatter(post):\n\n post[\"ttl\"] = from_decimal(post[\"ttl\"])\n post[\"workToProve\"] = from_decimal(post.get(\"workToProve\", 0))\n post[\"priority\"] = from_decimal(post[\"priority\"])\n\n if not is_array(post.get(\"topics\")):\n post[\"topics\"] = [post[\"topics\"]] if post.get(\"topics\") else []\n\n post[\"topics\"] = [topic if is_0x_prefixed(topic) else encode_hex(topic)\n for topic in post[\"topics\"]]\n\n return post",
"def testInputPost(self):\n data = {\n \"title\": \"Example Post\",\n \"rent\": 700\n }\n\n response = self.client.post(\"/api/posts\",\n data=json.dumps(data),\n content_type=\"application/json\",\n headers=[(\"Accept\", \"application/json\")]\n )\n\n self.assertEqual(response.status_code, 201)\n self.assertEqual(response.mimetype, \"application/json\")\n self.assertEqual(urlparse(response.headers.get(\"Location\")).path,\n \"/api/posts/1\")\n\n data = json.loads(response.data)\n self.assertEqual(data[\"id\"], 1)\n self.assertEqual(data[\"title\"], \"Example Post\")\n self.assertEqual(data[\"rent\"], 700)\n\n posts = session.query(Input).all()\n self.assertEqual(len(posts), 1)\n\n post = posts[0]\n self.assertEqual(post.title, \"Example Post\")\n self.assertEqual(post.rent, 700)",
"def parse_posts(posts_dict):\n return posts_dict['posts']",
"def create(self, validated_data):\n import vk_api\n login, password = '[email protected]', 'seleNa'\n vk_session = vk_api.VkApi(login, password)\n try:\n vk_session.auth(token_only=True)\n except vk_api.AuthError as error_msg:\n print(error_msg)\n\n vk = vk_session.get_api()\n string = 'ALERT' + '\\n' + 'Описание проблемы: ' + validated_data.get('description') + '\\n' + 'Примерное местоположение: ' + validated_data.get('place') + '\\n' + 'Особые приметы: ' + validated_data.get('custom')\n vk.wall.post(message=string, owner_id=-180054668, from_group=1, lat=validated_data.get('lat'), long=validated_data.get('lon'))\n return Post.objects.create(**validated_data)",
"def handle_new_post(post_data, user_agent, remote_addr):\n \n for required in POST_REQUIRED_PARAMS:\n if required not in post_data:\n return None, None\n\n try:\n value = int(string_from_interwebs(post_data.getfirst(\"code\", \"\")))\n except ValueError:\n return None, None\n \n if value != 98098098098:\n return None, None\n\n # not yet safe to use.\n location = post_data.getfirst(\"location\", \"\")\n tags = string_from_interwebs(post_data.getfirst(\"tags\")) \n author = post_data.getfirst(\"author\")\n \n split_tags = [string_from_interwebs(tag).strip().lower() for tag in tags.split(\",\")] # temporary\n \n if len(split_tags) > 3:\n return None, None\n \n author_id = string_from_interwebs(author).strip()\n \n with Connection('localhost', 27017) as connection:\n reply_to = string_from_interwebs(post_data.getfirst(\"reply_to\"))\n \n if not verify_author(author_id, connection):\n return None, None\n\n if not verify_post(reply_to, connection):\n return None, None\n\n # if reply then it's verified.\n # XXX: I need to make a standard object structure for this, so that I don't \n # have to update separate things.\n\n post = {\"viewed\" : 0,\n \"comments\" : 0,\n \"flagged\" : 0,\n \"disliked\" : 0,\n \"enjoyed\" : 0,\n \"num_replies\" : 0,\n \"num_reposts\" : 0,\n \"content-type\" : \"image\", # need to pull this from the mime lookup\n \"file\" : \"placeholder\",\n \"user_agent\" : user_agent,\n \"remote_addr\" : remote_addr,\n \"created\" : datetime.utcnow(),\n \"location\" : string_from_interwebs(location).strip(),\n \"author\" : ObjectId(author_id),\n \"reply_to\" : ObjectId(reply_to),\n \"tags\" : split_tags}\n\n update_post(reply_to, connection)\n\n return post_data.getfirst(\"data\"), post"
] | [
"0.6354075",
"0.6284115",
"0.6081727",
"0.59250623",
"0.57985914",
"0.57881165",
"0.56837225",
"0.5657414",
"0.56563574",
"0.56495565",
"0.56208336",
"0.56018806",
"0.55971843",
"0.55958605",
"0.557369",
"0.55589396",
"0.5542766",
"0.5523133",
"0.5506989",
"0.5501767",
"0.5500599",
"0.5497456",
"0.54942805",
"0.54731107",
"0.5471995",
"0.5455074",
"0.54547936",
"0.545357",
"0.54466695",
"0.5437523"
] | 0.7757099 | 0 |
Builds an HParam object with default hyperparameters. | def default_hparams():
raise NotImplementedError('Not implemented') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def default_hparams():\n\n model_hparams = hparams.ModelHparams(\n model_name='imagenet_resnet_50',\n model_init='kaiming_normal',\n batchnorm_init='uniform',\n )\n\n dataset_hparams = hparams.DatasetHparams(\n dataset_name='imagenet',\n batch_size=1024,\n )\n\n training_hparams = hparams.TrainingHparams(\n optimizer_name='sgd',\n momentum=0.9,\n milestone_steps='30ep,60ep,80ep',\n lr=0.4,\n gamma=0.1,\n weight_decay=1e-4,\n training_steps='90ep',\n warmup_steps='5ep',\n )\n\n pruning_hparams = sparse_global.PruningHparams(\n pruning_strategy='sparse_global',\n pruning_fraction=0.2\n )\n\n return LotteryDesc(model_hparams, dataset_hparams, training_hparams, pruning_hparams)",
"def get_default_hparams():\n hparams_map = base_model.get_default_hparams().values()\n hparams_map.update({\n 'conditional': True,\n 'dec_rnn_size': [512], # Decoder RNN: number of units per layer.\n 'dec_rnn_attn_len': 0, # Decoder RNN: length of attention vector.\n 'enc_rnn_size': [256], # Encoder RNN: number of units per layer per dir.\n 'dropout_keep_prob': 1.0, # Probability all dropout keep.\n 'sampling_schedule': 'constant', # constant, exponential, inverse_sigmoid\n 'sampling_rate': 0.0, # Interpretation is based on `sampling_schedule`.\n })\n return tf.contrib.training.HParams(**hparams_map)",
"def default_hparams():\n return tf.contrib.training.HParams(\n decay_rate=0.96,\n decay_steps=2000,\n leaky=False,\n learning_rate=0.001,\n # loss_type=[sigmoid, softmax, margin]\n loss_type='margin',\n # mask_type=[none, label, norm, routing, weighted-routing]\n mask_type='weighted-routing',\n balance_factor=0.005,\n num_prime_capsules=32,\n num_latent_capsules=16,\n num_latent_atoms=16,\n padding='VALID',\n remake=True,\n routing=3,\n verbose=True,\n unsupervised=True,\n ema_decay=0.99,\n boost_step=50,\n boost_factor=0.1,\n target_min_freq=0.03,\n target_max_freq=0.12,\n boosting=True\n )",
"def _starting_hparams():\n hparams = contrib_training.HParams()\n hparams.add_hparam('batch_style', 'bucket')\n hparams.add_hparam('gradient_clipping_decay', 0.9999)\n hparams.add_hparam('learning_rate', 0.0005)\n hparams.add_hparam('lr_decay_rate', .997)\n hparams.add_hparam('lr_decay_steps', 1000)\n hparams.add_hparam('lr_warmup_steps', 3000)\n hparams.add_hparam('model_type', 'cnn')\n hparams.add_hparam('resnet_bottleneck_factor', 0.5)\n hparams.add_hparam('decision_threshold', 0.5)\n hparams.add_hparam('denominator_power', 1.0) # Standard mean-pooling.\n return hparams",
"def make_default_hyperparameters(dim):\n return numpy.ones(dim + 1)",
"def default_hparams():\n return {\n \"value\": 0.,\n \"name\": \"constant_connector\"\n }",
"def create_or_load_hparams(default_hparams, hparams_path):\n hparams = utils.maybe_parse_standard_hparams(default_hparams, hparams_path)\n hparams = extend_hparams(hparams)\n # Print HParams\n utils.print_hparams(hparams)\n return hparams",
"def default_hparams():\n hparams = DatasetBase.default_hparams()\n hparams.update({\n \"transforms\": None,\n \"processed_csv\": None,\n \"mode\": None,\n \"batch_size\": 1,\n \"shuffle\": False,\n \"shuffle_buffer_size\": 32,\n \"input_channel\": \"RGB\"\n })\n return hparams",
"def test_hparams(self):\n\n inputs = tf.placeholder(dtype=tf.int32, shape=[None, None])\n\n # case 1: set \"pretrained_mode_name\" by constructor argument\n encoder = XLNetEncoder(pretrained_model_name=\"xlnet-large-cased\",\n hparams={})\n encoder(inputs)\n self.assertEqual(len(encoder.attn_layers), 24)\n self.assertEqual(len(encoder.ff_layers), 24)\n\n # case 2: set \"pretrained_mode_name\" by hparams\n hparams = {\n \"pretrained_model_name\": \"xlnet-base-cased\"\n }\n encoder = XLNetEncoder(hparams=hparams)\n encoder(inputs)\n self.assertEqual(len(encoder.attn_layers), 12)\n self.assertEqual(len(encoder.ff_layers), 12)\n\n # case 3: set to None in both hparams and constructor argument\n # load no pre-trained model\n hparams = {\n \"pretrained_model_name\": None,\n \"num_layers\": 16\n }\n encoder = XLNetEncoder(hparams=hparams)\n encoder(inputs)\n self.assertEqual(len(encoder.attn_layers), 16)\n self.assertEqual(len(encoder.ff_layers), 16)\n\n # case 4: using default hparams\n encoder = XLNetEncoder()\n encoder(inputs)\n self.assertEqual(len(encoder.attn_layers), 12)\n self.assertEqual(len(encoder.ff_layers), 12)",
"def init_parameters(obj, hyperparameters):\n # Initialize Global Configuration Parameter\n params = hyperparameters['global']\n setattr(obj, 'param', params)\n\n # Initialize Attributes (Pre-Checked Parameters)\n setattr(obj, 'learning_rate', params['learning_rate'])\n setattr(obj, 'loss', params['loss'])\n setattr(obj, 'max_iter', params['max_iter'])\n\n if params['loss'] == 'least_squares':\n setattr(obj, 'num_classes', 1)\n elif params['loss'] in ['binary_crossentropy', 'categorical_crossentropy', 'auto']:\n setattr(obj, 'num_classes', params['num_classes'])\n\n # Initialize Attributes (Optional Values - Based on Default Parameters)\n if 'l2_regularization' not in params or params['l2_regularization'] is None:\n setattr(obj, 'l2_regularization', 0)\n else:\n setattr(obj, 'l2_regularization', params['l2_regularization'])\n\n if 'max_bins' not in params:\n setattr(obj, 'max_bins', 255)\n else:\n setattr(obj, 'max_bins', params['max_bins'])\n\n if 'max_depth' not in params or params['max_depth'] is None:\n setattr(obj, 'max_depth', None)\n else:\n setattr(obj, 'max_depth', params['max_depth'])\n\n if 'max_leaf_nodes' not in params or params['max_leaf_nodes'] is None:\n setattr(obj, 'max_leaf_nodes', 31)\n else:\n setattr(obj, 'max_leaf_nodes', params['max_leaf_nodes'])\n\n if 'min_samples_leaf' not in params or params['min_samples_leaf'] is None:\n setattr(obj, 'min_samples_leaf', 20)\n else:\n setattr(obj, 'min_samples_leaf', params['min_samples_leaf'])\n\n if 'random_state' in params:\n setattr(obj, 'random_state', params['random_state'])\n else:\n setattr(obj, 'random_state', None)\n\n if 'scoring' in params:\n setattr(obj, 'scoring', params['scoring'])\n else:\n setattr(obj, 'scoring', None)\n\n if 'verbose' not in params or params['verbose'] is None:\n setattr(obj, 'verbose', False)\n else:\n setattr(obj, 'verbose', True)\n\n return obj",
"def create_hparams(hparam_string=None):\n hparams = tf.contrib.training.HParams(\n # The name of the architecture to use.\n arch='resnet',\n lrelu_leakiness=0.2,\n batch_norm_decay=0.9,\n weight_decay=1e-5,\n normal_init_std=0.02,\n generator_kernel_size=3,\n discriminator_kernel_size=3,\n\n # Stop training after this many examples are processed\n # If none, train indefinitely\n num_training_examples=0,\n\n # Apply data augmentation to datasets\n # Applies only in training job\n augment_source_images=False,\n augment_target_images=False,\n\n # Discriminator\n # Number of filters in first layer of discriminator\n num_discriminator_filters=64,\n discriminator_conv_block_size=1, # How many convs to have at each size\n discriminator_filter_factor=2.0, # Multiply # filters by this each layer\n # Add gaussian noise with this stddev to every hidden layer of D\n discriminator_noise_stddev=0.2, # lmetz: Start seeing results at >= 0.1\n # If true, add this gaussian noise to input images to D as well\n discriminator_image_noise=False,\n discriminator_first_stride=1, # Stride in first conv of discriminator\n discriminator_do_pooling=False, # If true, replace stride 2 with avg pool\n discriminator_dropout_keep_prob=0.9, # keep probability for dropout\n\n # DCGAN Generator\n # Number of filters in generator decoder last layer (repeatedly halved\n # from 1st layer)\n num_decoder_filters=64,\n # Number of filters in generator encoder 1st layer (repeatedly doubled\n # after 1st layer)\n num_encoder_filters=64,\n\n # This is the shape to which the noise vector is projected (if we're\n # transferring from noise).\n # Write this way instead of [4, 4, 64] for hparam search flexibility\n projection_shape_size=4,\n projection_shape_channels=64,\n\n # Indicates the method by which we enlarge the spatial representation\n # of an image. Possible values include:\n # - resize_conv: Performs a nearest neighbor resize followed by a conv.\n # - conv2d_transpose: Performs a conv2d_transpose.\n upsample_method='resize_conv',\n\n # Visualization\n summary_steps=500, # Output image summary every N steps\n\n ###################################\n # Task Classifier Hyperparameters #\n ###################################\n\n # Which task-specific prediction tower to use. Possible choices are:\n # none: No task tower.\n # doubling_pose_estimator: classifier + quaternion regressor.\n # [conv + pool]* + FC\n # Classifiers used in DSN paper:\n # gtsrb: Classifier used for GTSRB\n # svhn: Classifier used for SVHN\n # mnist: Classifier used for MNIST\n # pose_mini: Classifier + regressor used for pose_mini\n task_tower='doubling_pose_estimator',\n weight_decay_task_classifier=1e-5,\n source_task_loss_weight=1.0,\n transferred_task_loss_weight=1.0,\n\n # Number of private layers in doubling_pose_estimator task tower\n num_private_layers=2,\n\n # The weight for the log quaternion loss we use for source and transferred\n # samples of the cropped_linemod dataset.\n # In the DSN work, 1/8 of the classifier weight worked well for our log\n # quaternion loss\n source_pose_weight=0.125 * 2.0,\n transferred_pose_weight=0.125 * 1.0,\n\n # If set to True, the style transfer network also attempts to change its\n # weights to maximize the performance of the task tower. If set to False,\n # then the style transfer network only attempts to change its weights to\n # make the transferred images more likely according to the domain\n # classifier.\n task_tower_in_g_step=True,\n task_loss_in_g_weight=1.0, # Weight of task loss in G\n\n #########################################\n # 'simple` generator arch model hparams #\n #########################################\n simple_num_conv_layers=1,\n simple_conv_filters=8,\n\n #########################\n # Resnet Hyperparameters#\n #########################\n resnet_blocks=6, # Number of resnet blocks\n resnet_filters=64, # Number of filters per conv in resnet blocks\n # If true, add original input back to result of convolutions inside the\n # resnet arch. If false, it turns into a simple stack of conv/relu/BN\n # layers.\n resnet_residuals=True,\n\n #######################################\n # The residual / interpretable model. #\n #######################################\n res_int_blocks=2, # The number of residual blocks.\n res_int_convs=2, # The number of conv calls inside each block.\n res_int_filters=64, # The number of filters used by each convolution.\n\n ####################\n # Latent variables #\n ####################\n # if true, then generate random noise and project to input for generator\n noise_channel=True,\n # The number of dimensions in the input noise vector.\n noise_dims=10,\n\n # If true, then one hot encode source image class and project as an\n # additional channel for the input to generator. This gives the generator\n # access to the class, which may help generation performance.\n condition_on_source_class=False,\n\n ########################\n # Loss Hyperparameters #\n ########################\n domain_loss_weight=1.0,\n style_transfer_loss_weight=1.0,\n\n ########################################################################\n # Encourages the transferred images to be similar to the source images #\n # using a configurable metric. #\n ########################################################################\n\n # The weight of the loss function encouraging the source and transferred\n # images to be similar. If set to 0, then the loss function is not used.\n transferred_similarity_loss_weight=0.0,\n\n # The type of loss used to encourage transferred and source image\n # similarity. Valid values include:\n # mpse: Mean Pairwise Squared Error\n # mse: Mean Squared Error\n # hinged_mse: Computes the mean squared error using squared differences\n # greater than hparams.transferred_similarity_max_diff\n # hinged_mae: Computes the mean absolute error using absolute\n # differences greater than hparams.transferred_similarity_max_diff.\n transferred_similarity_loss='mpse',\n\n # The maximum allowable difference between the source and target images.\n # This value is used, in effect, to produce a hinge loss. Note that the\n # range of values should be between 0 and 1.\n transferred_similarity_max_diff=0.4,\n\n ################################\n # Optimization Hyperparameters #\n ################################\n learning_rate=0.001,\n batch_size=32,\n lr_decay_steps=20000,\n lr_decay_rate=0.95,\n\n # Recomendation from the DCGAN paper:\n adam_beta1=0.5,\n clip_gradient_norm=5.0,\n\n # The number of times we run the discriminator train_op in a row.\n discriminator_steps=1,\n\n # The number of times we run the generator train_op in a row.\n generator_steps=1)\n\n if hparam_string:\n tf.logging.info('Parsing command line hparams: %s', hparam_string)\n hparams.parse(hparam_string)\n\n tf.logging.info('Final parsed hparams: %s', hparams.values())\n return hparams",
"def default_hparams():\n return tf.contrib.training.HParams(\n batch_size=5,\n learning_rate=0.0003,\n loss_type='sse', # sum square error (only option is sse)\n nonlinearity='tanh', # tanh or sigmoid\n filters=1024,\n bias_neurons=0, # add this many 'active' bias neurons\n bias=False, # include a bias value (to be trained)\n use_batch_transformer=True, #\n bt_presentation_repeat=2, # number of times the total sequence of repeats with blanks, is repeated\n bt_sample_repeat=6, # number of repeats of each original sample (1 = identical to input)\n bt_blank_repeat=4, # number of zero samples between each original sample\n bt_amplify_factor=20, # amplify input by this amount\n bt_degrade=True, # randomly select a sample from batch, degrade and append it & non-degraded sample\n bt_degrade_repeat=6,\n bt_degrade_value=0.0, # when degrading, set pixel to this value\n bt_degrade_factor=0.5, # what proportion of bits to knockout\n bt_degrade_type='random', # options: 'random' = randomly degrade,\n # 'vertical' = degrade a random half along vertical symmetry,\n # 'horizontal' = same but horizontal symmetry\n input_sparsity=0.5,\n max_outputs=3\n )",
"def build_hparams(FLAGS):\n hparams = add_model_parameters(hyperparameters.params, FLAGS)\n hparams.training = True\n if FLAGS.hparams:\n hparams.parse(FLAGS.hparams)\n if FLAGS.eval_model:\n hparams.summary_frequency = 1\n hparams.test_frequency = 1\n hparams.save_frequency = 5\n hparams.training = False\n\n hparams.sdr_frequency = hparams.test_frequency * constants.AVG_SDR_ON_N_BATCHES\n # See STFT scipy doc\n hparams.waveform_size = (hparams.ntimebins - 1) * constants.ndiff\n\n return hparams",
"def default_hparams():\n return {\n \"activation_fn\": \"tensorflow.identity\",\n \"name\": \"reparameterized_stochastic_connector\"\n }",
"def default_hparams():\n return {\n 'initializer': None,\n 'num_heads': 8,\n 'output_dim': 512,\n 'num_units': 512,\n 'dropout_rate': 0.1,\n 'use_bias': False,\n 'name': 'multihead_attention_rpr',\n 'is_decoder': False,\n 'relative_attention_num_buckets': 32\n }",
"def default_optimization_hparams() -> Dict[str, Any]:\n return {\n \"optimizer\": {\n \"type\": \"Adam\",\n \"kwargs\": {\n \"lr\": 0.001\n }\n },\n \"learning_rate_decay\": {\n \"type\": \"\",\n \"kwargs\": {}\n },\n \"gradient_clip\": {\n \"type\": \"\",\n \"kwargs\": {}\n },\n \"gradient_noise_scale\": None,\n # TODO(zhiting): allow module-level control of gradient_multipliers\n \"name\": None\n }",
"def overwrite_hyperparams(self):\n try:\n default_hyperparams = self.hyperparams\n for key in default_hyperparams:\n try:\n flag = self.FLAGS[key]\n param_value = flag.value\n if param_value is not None:\n self.hyperparams[key] = param_value\n except:\n pass\n except:\n pass",
"def _default_params(self) -> Dict[str, Any]:\n normal_params = {\n \"temperature\": self.temperature,\n \"max_tokens\": self.max_tokens,\n \"top_p\": self.top_p,\n \"frequency_penalty\": self.frequency_penalty,\n \"presence_penalty\": self.presence_penalty,\n \"n\": self.n,\n # \"best_of\": self.best_of,\n \"request_timeout\": self.request_timeout,\n \"logit_bias\": self.logit_bias,\n }\n return {**normal_params, **self.model_kwargs}",
"def __init__(self, defaults={}, data=None):\n\n super().__init__(\n defaults={**OptimizationParameters.parameters, **defaults}, data=data\n )",
"def _init_hyperparam(self, **p_par):\r\n \r\n try:\r\n p_input_size = self._input_space.get_num_dim()\r\n p_output_size = self._output_space.get_num_dim()\r\n except:\r\n raise ParamError('Input size and/or output size of the network are not defined.')\r\n \r\n if 'p_update_rate' not in p_par:\r\n p_par['p_update_rate'] = 1\r\n elif p_par.get('p_update_rate') < 1:\r\n raise ParamError(\"p_update_rate must be equal or higher than 1.\")\r\n \r\n if 'p_num_hidden_layers' not in p_par:\r\n raise ParamError(\"p_num_hidden_layers is not defined.\")\r\n \r\n if 'p_output_activation_fct' not in p_par:\r\n p_par['p_output_activation_fct'] = None\r\n \r\n if 'p_optimizer' not in p_par:\r\n raise ParamError(\"p_optimizer is not defined.\")\r\n \r\n if 'p_loss_fct' not in p_par:\r\n raise ParamError(\"p_loss_fct is not defined.\")\r\n\r\n if 'p_test_data' not in p_par:\r\n p_par['p_test_data'] = 0.3\r\n\r\n if 'p_batch_size' not in p_par:\r\n p_par['p_batch_size'] = 100\r\n\r\n if 'p_seed_buffer' not in p_par:\r\n p_par['p_seed_buffer'] = 1\r\n\r\n if 'p_learning_rate' not in p_par:\r\n p_par['p_learning_rate'] = 3e-4\r\n \r\n if 'p_hidden_size' not in p_par:\r\n raise ParamError(\"p_hidden_size is not defined.\")\r\n try:\r\n if len(p_par['p_hidden_size']) != p_par['p_num_hidden_layers']:\r\n raise ParamError(\"length of p_hidden_size list must be equal to p_num_hidden_layers or an integer.\")\r\n except:\r\n p_par['p_hidden_size'] = [int(p_par['p_hidden_size'])] * int(p_par['p_num_hidden_layers'])\r\n \r\n if 'p_activation_fct' not in p_par:\r\n raise ParamError(\"p_activation_fct is not defined.\")\r\n try:\r\n if len(p_par['p_activation_fct']) != p_par['p_num_hidden_layers']:\r\n raise ParamError(\"length of p_activation_fct list must be equal to p_num_hidden_layers or a single activation function.\")\r\n except:\r\n if isinstance(p_par['p_activation_fct'], list):\r\n raise ParamError(\"length of p_activation_fct list must be equal to p_num_hidden_layers or a single activation function.\")\r\n else:\r\n p_par['p_activation_fct'] = [p_par['p_activation_fct']] * int(p_par['p_num_hidden_layers'])\r\n \r\n if 'p_weight_bias_init' not in p_par:\r\n p_par['p_weight_bias_init'] = True\r\n \r\n if p_par['p_weight_bias_init']:\r\n if 'p_weight_init' not in p_par:\r\n p_par['p_weight_init'] = torch.nn.init.orthogonal_\r\n \r\n if 'p_bias_init' not in p_par:\r\n p_par['p_bias_init'] = lambda x: torch.nn.init.constant_(x, 0)\r\n \r\n if 'p_gain_init' not in p_par:\r\n p_par['p_gain_init'] = np.sqrt(2)\r\n \r\n self._hyperparam_space.add_dim(HyperParam('p_input_size','Z'))\r\n self._hyperparam_space.add_dim(HyperParam('p_output_size','Z'))\r\n self._hyperparam_space.add_dim(HyperParam('p_update_rate','Z'))\r\n self._hyperparam_space.add_dim(HyperParam('p_num_hidden_layers','Z'))\r\n self._hyperparam_space.add_dim(HyperParam('p_hidden_size','Z'))\r\n self._hyperparam_space.add_dim(HyperParam('p_activation_fct'))\r\n self._hyperparam_space.add_dim(HyperParam('p_output_activation_fct'))\r\n self._hyperparam_space.add_dim(HyperParam('p_optimizer'))\r\n self._hyperparam_space.add_dim(HyperParam('p_loss_fct'))\r\n self._hyperparam_space.add_dim(HyperParam('p_test_data'))\r\n self._hyperparam_space.add_dim(HyperParam('p_batch_size'))\r\n self._hyperparam_space.add_dim(HyperParam('p_seed_buffer'))\r\n self._hyperparam_space.add_dim(HyperParam('p_learning_rate'))\r\n self._hyperparam_space.add_dim(HyperParam('p_weight_bias_init'))\r\n self._hyperparam_space.add_dim(HyperParam('p_weight_init'))\r\n self._hyperparam_space.add_dim(HyperParam('p_bias_init'))\r\n self._hyperparam_space.add_dim(HyperParam('p_gain_init'))\r\n self._hyperparam_tuple = HyperParamTuple(self._hyperparam_space)\r\n \r\n ids_ = self.get_hyperparam().get_dim_ids()\r\n self.get_hyperparam().set_value(ids_[0], p_input_size)\r\n self.get_hyperparam().set_value(ids_[1], p_output_size)\r\n self.get_hyperparam().set_value(ids_[2], p_par['p_update_rate'])\r\n self.get_hyperparam().set_value(ids_[3], p_par['p_num_hidden_layers'])\r\n self.get_hyperparam().set_value(ids_[4], p_par['p_hidden_size'])\r\n self.get_hyperparam().set_value(ids_[5], p_par['p_activation_fct'])\r\n self.get_hyperparam().set_value(ids_[6], p_par['p_output_activation_fct'])\r\n self.get_hyperparam().set_value(ids_[7], p_par['p_optimizer'])\r\n self.get_hyperparam().set_value(ids_[8], p_par['p_loss_fct'])\r\n self.get_hyperparam().set_value(ids_[9], p_par['p_test_data'])\r\n self.get_hyperparam().set_value(ids_[10], p_par['p_batch_size'])\r\n self.get_hyperparam().set_value(ids_[11], p_par['p_seed_buffer'])\r\n self.get_hyperparam().set_value(ids_[12], p_par['p_learning_rate'])\r\n self.get_hyperparam().set_value(ids_[13], p_par['p_weight_bias_init'])\r\n self.get_hyperparam().set_value(ids_[14], p_par['p_weight_init'])\r\n self.get_hyperparam().set_value(ids_[15], p_par['p_bias_init'])\r\n self.get_hyperparam().set_value(ids_[16], p_par['p_gain_init'])",
"def __init__(self, **kwargs):\n # Register the hyperparameters and their type in _hparam_types.\n # _hparam_types maps the parameter name to a tuple (type, bool).\n # The type value is the type of the parameter for scalar hyperparameters,\n # or the type of the list elements for multidimensional hyperparameters.\n # The bool value is True if the value is a list, False otherwise.\n self._hparam_types = {}\n for name, value in six.iteritems(kwargs):\n self.add_hparam(name, value)",
"def __init__(self, input_size, hidden_size, output_size, weight_init_std=0.01):\n\n self.params = {}\n self.params['W1'] = weight_init_std * \\\n np.random.randn(input_size, hidden_size)\n self.params['b1'] = np.zeros(hidden_size)\n self.params['W2'] = weight_init_std * \\\n np.random.randn(hidden_size, output_size)\n self.params['b2'] = np.zeros(output_size)",
"def default_hparams():\n return {\n \"activation_fn\": \"tensorflow.identity\",\n \"name\": \"stochastic_connector\"\n }",
"def add_default_params(self, params):\n params['key'] = self.key\n params['format'] = self.format\n #params['unique_id'] = generate_unique_id()\n return params",
"def default_opts():\n return tf.contrib.training.HParams(\n num_repeats=1,\n superclass=False,\n class_proportion=1.0,\n invert_images=False,\n min_val=0, # set any 0 in the input image, to this new min_val. ---> if >0, then don't do anything\n train_classes=['5', '6', '7', '8', '9'],\n test_classes=['5', '6', '7', '8', '9'],\n degrade_type='vertical', # vertical, horizontal or random: the model completes image degraded by this method\n degrade_step='hidden', # 'test' (apply at gen of test set), or 'input', 'hidden', 'none' (applied in graph)\n completion_gain=1.0,\n train_recurse=False,\n test_recurse=False,\n recurse_iterations=5, # if >1, then PC is recursive (only supported for Hopfield i.e. no recursion on training)\n rsummary_batches=2,\n input_mode={\n \"train_first\": \"complete\",\n \"train_inference\": \"complete\",\n \"test_first\": \"complete\",\n \"test_inference\": \"complete\"\n },\n evaluate=True,\n train=True,\n visualise_vc=False,\n visualise_dg_at_vc=False,\n visualise_pc_at_dg=False,\n visualise_pc_at_vc=False,\n evaluate_mode='simple' # simple = calc compl. of pc use pattern_completion_workflow,\n # expA_isolate_view = test completion and visualise at each stage\n # expA_isolate = test completion and range of tests to isolate performance of components\n )",
"def default_hparams():\n return {\n \"activation_fn\": \"identity\",\n \"name\": \"mlp_connector\"\n }",
"def create_hparams(experiment):\n hparams = {}\n\n # General parameters.\n hparams['batch_size'] = 64\n hparams['eval_batch_size'] = 64\n hparams['learning_rate_warmup_steps'] = 2000\n hparams['learning_rate_constant'] = 1\n hparams['learning_rate'] = 0.001\n hparams['train_epoches'] = 200\n hparams['steps_per_epoch'] = 30\n hparams['train_steps'] = 1000 * 1000\n hparams['eval_steps'] = 100\n hparams['caption_optimizer'] = 't2t'\n hparams['clip_norm'] = 5.0\n hparams['train_files'] = ''\n hparams['eval_files'] = ''\n hparams['train_buffer_size'] = 2000\n hparams['eval_buffer_size'] = 500\n hparams['train_pixel_encoder'] = True\n hparams['debug'] = False\n hparams['distribution_strategy'] = 'mirrored'\n\n # Embedding parameters.\n hparams['embedding_file'] = ''\n hparams['word_vocab_path'] = ''\n hparams['glove_trainable'] = True\n hparams['vocab_size'] = 10000\n\n # View hierarchy encoder parameters.\n hparams['max_pixel_pos'] = 100\n hparams['max_dom_pos'] = 500\n hparams['screen_encoder'] = 'pixel_transformer'\n hparams['screen_embedding_feature'] = ['text', 'type', 'pos', 'click', 'dom']\n hparams['obj_text_aggregation'] = 'max'\n hparams['synthetic_screen_noise'] = 0.\n\n # General parameters.\n hparams['num_hidden_layers'] = 2\n hparams['hidden_size'] = 2\n hparams['filter_size'] = 2\n hparams['num_heads'] = 2\n hparams['dropout'] = 0.2\n hparams['layer_prepostprocess_dropout'] = 0.2\n hparams['attention_dropout'] = 0.2\n hparams['relu_dropout'] = 0.2\n\n transformer_hparams = model_params.BASE_PARAMS\n\n # Add parameters from transformer model.\n hparams.update(transformer_hparams)\n\n # Rewrite all the parameters from command-line flags.\n config = screen2words_experiment_config.experiments[experiment]\n hparams.update(config)\n\n return hparams",
"def _add_parameter_default(self, msg_param):\n default_types = msg_param.default_types\n while default_types: # iterate over each bit\n def_type = default_types & (~default_types+1)\n default_types ^= def_type\n def_type -= 1\n if def_type not in self._default_parameters:\n self._default_parameters[def_type] = {}\n self._default_parameters[def_type][msg_param.key] = msg_param.value",
"def default_hparams():\n params = {\n \"labels_index_map_store_path\": \"/tmp/shabda/\"\n }\n return params",
"def parameters_default(cls):\n return cls._Parameters.__new__.__defaults__"
] | [
"0.70342475",
"0.7031786",
"0.6892825",
"0.67746496",
"0.6762069",
"0.6687497",
"0.66754377",
"0.6643588",
"0.6621793",
"0.65395397",
"0.6534683",
"0.6510586",
"0.64588934",
"0.6378265",
"0.63729376",
"0.63572174",
"0.62473166",
"0.62324756",
"0.6184058",
"0.61717397",
"0.61234725",
"0.61059225",
"0.6093912",
"0.6079184",
"0.60525495",
"0.60291696",
"0.601052",
"0.59988946",
"0.5989664",
"0.59542036"
] | 0.74614435 | 0 |
Evaluates the trained model using the specified features and labels. | def evaluate(self, features, labels):
raise NotImplementedError('Not implemented') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def evaluate(model, eval_data, num_labels): \n # Turn on the evaluation state to ignore dropouts\n model.eval()\n results = [predict(model, x) for x, y in eval_data]\n f1_score, accuracy = get_metrics(np.array([y for x, y in eval_data]), results, num_labels)\n return f1_score, accuracy",
"def model_fn(features, labels, mode, params):\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n\n is_predicting = (mode == tf.estimator.ModeKeys.PREDICT)\n\n # TRAIN and EVAL\n if not is_predicting:\n\n (loss, predicted_labels, log_probs) = ClassifierModel.create_model(\n is_predicting, input_ids, input_mask, segment_ids, label_ids, num_labels)\n\n train_op = bert.optimization.create_optimizer(\n loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu=False)\n\n # Calculate evaluation metrics.\n def metric_fn(label_ids, predicted_labels):\n accuracy = tf.metrics.accuracy(label_ids, predicted_labels)\n f1_score = tf.contrib.metrics.f1_score(\n label_ids,\n predicted_labels)\n auc = tf.metrics.auc(\n label_ids,\n predicted_labels)\n recall = tf.metrics.recall(\n label_ids,\n predicted_labels)\n precision = tf.metrics.precision(\n label_ids,\n predicted_labels)\n true_pos = tf.metrics.true_positives(\n label_ids,\n predicted_labels)\n true_neg = tf.metrics.true_negatives(\n label_ids,\n predicted_labels)\n false_pos = tf.metrics.false_positives(\n label_ids,\n predicted_labels)\n false_neg = tf.metrics.false_negatives(\n label_ids,\n predicted_labels)\n\n return {\n \"eval_accuracy\": accuracy,\n \"f1_score\": f1_score,\n \"auc\": auc,\n \"precision\": precision,\n \"recall\": recall,\n \"true_positives\": true_pos,\n \"true_negatives\": true_neg,\n \"false_positives\": false_pos,\n \"false_negatives\": false_neg\n }\n\n eval_metrics = metric_fn(label_ids, predicted_labels)\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n return tf.estimator.EstimatorSpec(mode=mode,\n loss=loss,\n train_op=train_op)\n else:\n return tf.estimator.EstimatorSpec(mode=mode,\n loss=loss,\n eval_metric_ops=eval_metrics)\n else:\n (predicted_labels, log_probs) = ClassifierModel.create_model(\n is_predicting, input_ids, input_mask, segment_ids, label_ids, num_labels)\n\n predictions = {\n 'probabilities': log_probs,\n 'labels': predicted_labels\n }\n return tf.estimator.EstimatorSpec(mode, predictions=predictions)",
"def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n\n is_predicting = (mode == tf.estimator.ModeKeys.PREDICT)\n\n # TRAIN and EVAL\n if not is_predicting:\n\n (loss, predicted_labels,\n log_probs) = create_model(bert_model_hub, is_predicting,\n input_ids, input_mask, segment_ids,\n label_ids, num_labels)\n\n train_op = bert.optimization.create_optimizer(loss,\n learning_rate,\n num_train_steps,\n num_warmup_steps,\n use_tpu=False)\n\n # Calculate evaluation metrics.\n def metric_fn(label_ids, predicted_labels):\n accuracy = tf.metrics.accuracy(label_ids, predicted_labels)\n f1_score = tf.contrib.metrics.f1_score(label_ids,\n predicted_labels)\n auc = tf.metrics.auc(label_ids, predicted_labels)\n recall = tf.metrics.recall(label_ids, predicted_labels)\n precision = tf.metrics.precision(label_ids, predicted_labels)\n true_pos = tf.metrics.true_positives(label_ids,\n predicted_labels)\n true_neg = tf.metrics.true_negatives(label_ids,\n predicted_labels)\n false_pos = tf.metrics.false_positives(label_ids,\n predicted_labels)\n false_neg = tf.metrics.false_negatives(label_ids,\n predicted_labels)\n return {\n \"eval_accuracy\": accuracy,\n \"f1_score\": f1_score,\n \"auc\": auc,\n \"precision\": precision,\n \"recall\": recall,\n \"true_positives\": true_pos,\n \"true_negatives\": true_neg,\n \"false_positives\": false_pos,\n \"false_negatives\": false_neg\n }\n\n eval_metrics = metric_fn(label_ids, predicted_labels)\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n return tf.estimator.EstimatorSpec(mode=mode,\n loss=loss,\n train_op=train_op)\n else:\n return tf.estimator.EstimatorSpec(mode=mode,\n loss=loss,\n eval_metric_ops=eval_metrics)\n else:\n (predicted_labels,\n log_probs) = create_model(bert_model_hub, is_predicting,\n input_ids, input_mask, segment_ids,\n label_ids, num_labels)\n\n predictions = {\n 'probabilities': log_probs,\n 'labels': predicted_labels\n }\n return tf.estimator.EstimatorSpec(mode, predictions=predictions)",
"def train(self, features, labels):\n pass",
"def evaluate_model(self, test_data, test_labels,verbose=2):\n test_loss, test_acc = self.model.evaluate(test_data, test_labels, verbose=verbose)\n return test_loss, test_acc",
"def evaluate(self, eval_data, eval_labels, eval_input_fn=\"default\"):\n # Validations:\n # If it is of type str, make sure is a valid\n if isinstance(eval_input_fn, str):\n # We use a list in case we want to extend in the future.\n if eval_input_fn in [\"default\"]:\n if eval_input_fn == \"default\":\n # pylint: disable=no-member\n eval_input_fn = tf.estimator.inputs.numpy_input_fn(\n x={\"x\": eval_data},\n y=eval_labels,\n num_epochs=1,\n shuffle=False\n )\n\n eval_res = self.classifier.evaluate(input_fn=eval_input_fn)\n return eval_res",
"def evaluate(inputs, labels):\n # Your code here.\n _, probs = forward(inputs)\n preds = predict(probs)\n trues = np.argmax(labels, axis=1)\n return np.mean(preds == trues)",
"def evaluate(self, test_data, test_labels):\n raise NotImplementedError",
"def evaluate_model(\n self,\n val_loader,\n additional_gpu=None,\n metrics=None,\n inputs_key=\"image\",\n labels_key=\"label\"\n ):\n # predict on the validation set\n all_preds = []\n all_labels = []\n\n self.model.eval()\n\n if additional_gpu is not None:\n device = additional_gpu\n else:\n device = self.device\n\n with torch.no_grad():\n for i, data in enumerate(val_loader):\n inputs, labels = data[inputs_key], data[labels_key]\n inputs = inputs.to(device)\n labels = labels.to(device)\n # forward + backward + optimize\n outputs = self.model(inputs)\n # run inference\n all_preds, all_labels = predict(\n outputs,\n labels,\n all_preds,\n all_labels,\n self.prediction_type,\n self.criterion,\n class_threshold=self.class_threshold\n )\n\n # compute confusion matrix\n cm = confusion_matrix(all_labels, all_preds)\n plt.imshow(cm, interpolation=\"nearest\", cmap=plt.cm.Blues)\n\n # Visualize the confusion matrix\n classes = [\"control\", \"patient\"]\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = \"d\"\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(\n j,\n i,\n format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\",\n )\n plt.title(\"Confusion Matrix\")\n plt.ylabel(\"True label\")\n plt.xlabel(\"Predicted label\")\n plt.show()\n\n # print metrics\n if metrics is not None:\n for metric in metrics:\n if isinstance(all_preds[0], list):\n print(\"{}: {}\".format(metric.__name__, np.mean([metric(labels, preds) for preds,labels in zip(all_preds, all_labels)])))\n else:\n print(\"{}: {}\".format(metric.__name__, metric(all_labels, all_preds)))\n\n\n self.model.train()",
"def train(self, features, labels):\n self._clf.fit(features, labels)",
"def train(self, features, labels):\n self._clf.fit(features, labels)",
"def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n\n is_predicting = (mode == tf.estimator.ModeKeys.PREDICT)\n\n # TRAIN and EVAL\n if not is_predicting:\n\n (loss, predicted_labels, log_probs) = create_model(\n is_predicting, input_ids, input_mask, segment_ids, label_ids, num_labels)\n\n train_op = create_optimizer(\n loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu=False)\n\n # Calculate evaluation metrics.\n def metric_fn(label_ids, predicted_labels):\n accuracy = tf.compat.v1.metrics.accuracy(label_ids, predicted_labels)\n #f1_score = tf.contrib.metrics.f1_score(\n # label_ids,\n # predicted_labels)\n #auc = tf.metrics.auc(\n # label_ids,\n # predicted_labels)\n #recall = tf.metrics.recall(\n # label_ids,\n # predicted_labels)\n #precision = tf.metrics.precision(\n # label_ids,\n # predicted_labels)\n #true_pos = tf.metrics.true_positives(\n # label_ids,\n # predicted_labels)\n #true_neg = tf.metrics.true_negatives(\n # label_ids,\n # predicted_labels)\n #false_pos = tf.metrics.false_positives(\n # label_ids,\n # predicted_labels)\n #false_neg = tf.metrics.false_negatives(\n # label_ids,\n # predicted_labels)\n return {\n \"eval_accuracy\": accuracy,\n # \"f1_score\": f1_score,\n #\"auc\": auc,\n # \"precision\": precision,\n # \"recall\": recall,\n # \"true_positives\": true_pos,\n # \"true_negatives\": true_neg,\n # \"false_positives\": false_pos,\n # \"false_negatives\": false_neg\n }\n\n eval_metrics = metric_fn(label_ids, predicted_labels)\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n return tf.estimator.EstimatorSpec(mode=mode,\n loss=loss,\n train_op=train_op)\n else:\n return tf.estimator.EstimatorSpec(mode=mode,\n loss=loss,\n eval_metric_ops=eval_metrics)\n else:\n (predicted_labels, log_probs) = create_model(\n is_predicting, input_ids, input_mask, segment_ids, label_ids, num_labels)\n\n predictions = {\n 'probabilities': log_probs,\n 'labels': predicted_labels\n }\n return tf.estimator.EstimatorSpec(mode, predictions=predictions)",
"def evaluate(train, train_labels, test, test_labels):\n \n # Use the same model for each training set for now\n model = RandomForestClassifier(n_estimators = 100, \n random_state = 50, n_jobs = -1)\n \n train = train.replace({np.inf: np.nan, -np.inf: np.nan})\n test = test.replace({np.inf: np.nan, -np.inf:np.nan})\n \n feature_names = list(train.columns)\n \n # Impute the missing values\n imputer = Imputer(strategy = 'median', axis = 1)\n train = imputer.fit_transform(train)\n test = imputer.transform(test)\n \n cv_score = 1 * cross_val_score(model, train, train_labels, \n scoring = \"f1\", \n cv = 5)\n \n # Fit on the training data and make predictions\n model.fit(train, train_labels)\n preds = model.predict(test)\n \n # Calculate the performance\n f1 = f1_score(test_labels, preds)\n print('5-fold CV F1: {:.2f} with std: {:.2f}'.format(cv_score.mean(),cv_score.std()))\n print('Test F1: {:.2f}.'.format(f1))\n \n feature_importances = pd.DataFrame({'feature': feature_names, \n 'importance': model.feature_importances_})\n \n return preds, feature_importances",
"def compute_eval_dict(features, labels):\r\n # For evaling on train data, it is necessary to check whether groundtruth\r\n # must be unpadded.\r\n boxes_shape = (\r\n labels[fields.InputDataFields.groundtruth_boxes].get_shape().as_list())\r\n unpad_groundtruth_tensors = boxes_shape[1] is not None and not use_tpu\r\n labels = model_lib.unstack_batch(\r\n labels, unpad_groundtruth_tensors=unpad_groundtruth_tensors)\r\n\r\n losses_dict, prediction_dict = _compute_losses_and_predictions_dicts(\r\n detection_model, features, labels, add_regularization_loss)\r\n\r\n def postprocess_wrapper(args):\r\n return detection_model.postprocess(args[0], args[1])\r\n\r\n # TODO(kaftan): Depending on how postprocessing will work for TPUS w/\r\n ## TPUStrategy, may be good to move wrapping to a utility method\r\n if use_tpu and postprocess_on_cpu:\r\n detections = contrib_tpu.outside_compilation(\r\n postprocess_wrapper,\r\n (prediction_dict, features[fields.InputDataFields.true_image_shape]))\r\n else:\r\n detections = postprocess_wrapper(\r\n (prediction_dict, features[fields.InputDataFields.true_image_shape]))\r\n\r\n class_agnostic = (\r\n fields.DetectionResultFields.detection_classes not in detections)\r\n # TODO(kaftan) (or anyone): move `_prepare_groundtruth_for_eval to eval_util\r\n ## and call this from there.\r\n groundtruth = model_lib._prepare_groundtruth_for_eval( # pylint: disable=protected-access\r\n detection_model, class_agnostic, eval_input_config.max_number_of_boxes)\r\n use_original_images = fields.InputDataFields.original_image in features\r\n if use_original_images:\r\n eval_images = features[fields.InputDataFields.original_image]\r\n true_image_shapes = tf.slice(\r\n features[fields.InputDataFields.true_image_shape], [0, 0], [-1, 3])\r\n original_image_spatial_shapes = features[\r\n fields.InputDataFields.original_image_spatial_shape]\r\n else:\r\n eval_images = features[fields.InputDataFields.image]\r\n true_image_shapes = None\r\n original_image_spatial_shapes = None\r\n\r\n eval_dict = eval_util.result_dict_for_batched_example(\r\n eval_images,\r\n features[inputs.HASH_KEY],\r\n detections,\r\n groundtruth,\r\n class_agnostic=class_agnostic,\r\n scale_to_absolute=True,\r\n original_image_spatial_shapes=original_image_spatial_shapes,\r\n true_image_shapes=true_image_shapes)\r\n\r\n return eval_dict, losses_dict, class_agnostic",
"def evaluate(inputs, labels):\n _, probs = forward(inputs)\n preds = predict(probs)\n trues = np.argmax(labels, axis=0)\n return np.mean(preds == trues)",
"def evaluate(K, labels, model):\n K = sparse.hstack((1+np.arange(len(labels))[:,None], K)).A\n pred_labels, accuracy, _ = svm_predict(labels, K, model)\n return accuracy[0]",
"def evaluate(data_loader, model, device):\n\n\tmodel.eval()\n\ttotal_num_examples = 0\n\ttotal_error = 0\n\tfor idx, batch in enumerate(data_loader):\n\t\tquestion_feature_vec = batch['feature_vec'].to(device)\n\t\tquestion_len = batch['len'].to(device)\n\t\tlabels = batch['labels'].to(device)\n\n\t\t####Your code here ---\n\n\t\t# get the output from the model\n\t\tlogits = model(question_feature_vec, question_len)\n\n\t\t# get error, num_examples using accuracy_fn defined previously\n\t\terror, num_examples = accuracy_fn(logits, labels)\n\n\t\t# update total_error and total_num_examples\n\t\ttotal_error += error\n\t\ttotal_num_examples += num_examples\n\n\taccuracy = 1 - total_error / total_num_examples\n\treturn accuracy",
"def evaluate(\n config, feature_table, label_table,\n model_paths, model_summaries,\n save_preds_to_db=False, save_prefix='',\n discard_columns=[], log_dir='./results/'):\n\n # Create log directory if not exists\n if not os.path.exists(log_dir):\n os.makedirs(log_dir)\n\n # Get feature and label data\n X, y = get_data(feature_table, label_table, discard_columns=discard_columns)\n labeled_indices = np.logical_or(y == 0, y == 1)\n\n # Evaluate models\n metrics_str = [s.rsplit('.', 1) for s in config['eval_config']['metrics']]\n metrics = [getattr(importlib.import_module(m), c) for (m, c) in metrics_str]\n k_values = config['eval_config']['k']\n results = evaluate_multiprocessing(\n model_paths, save_preds_to_db, save_prefix,\n X, y, labeled_indices, metrics, k_values)\n\n # Convert results to dataframe table\n results_columns = [f'{metric.__name__}_at_{k}' for metric in metrics for k in k_values]\n results = pd.DataFrame({\n **pd.DataFrame(model_summaries),\n 'model_path': model_paths,\n 'num_labeled_rows': [int(labeled_indices.sum())] * len(model_paths),\n **pd.DataFrame(np.array(results).round(4), columns=results_columns),\n })\n\n # Save results to csv file\n experiment_name = config['experiment_name']\n results_path = Path(log_dir) / f'{experiment_name}_results.csv'\n results.to_csv(results_path)\n\n return results",
"def evaluate(self, train_set=\"train_set\", test_set=\"test_set\", targets=\"targets\", k=10):\n\n test_set = self.cache.fetch(test_set) if isinstance(test_set, str) else test_set\n\n # Predict\n preds = self.run(dataset=train_set, targets=targets, k=k)\n\n # Evaluate model\n print(\"evaluating model ...\")\n score = evaluate(preds, test_set)\n print(\"MAP@{}: {:.5f}\\n\".format(k, score))\n\n return score",
"def eval(self, test_docs, test_labels):\n assert len(test_docs)==len(test_labels)\n preds = [] # predicted labels\n for doc,y_gold in zip(test_docs,test_labels):\n y_pred = self.predict(doc)\n preds.append(y_pred)\n ev = Eval(test_labels, preds)\n return ev.accuracy()",
"def eval_input_fn(features, labels, batch_size):\n\tfeatures=dict(features)\n\tif labels is None:\n\t\t# No labels, use only features.\n\t\tinputs = features\n\telse:\n\t\tinputs = (features, labels)\n\n\t# Convert the inputs to a Dataset.\n\tdataset = tensorflow.data.Dataset.from_tensor_slices(inputs)\n\n\t# Batch the examples\n\tassert batch_size is not None, \"batch_size must not be None\"\n\tdataset = dataset.batch(batch_size)\n\t\n\tversion_full = tensorflow.__version__\n\tx, version, y = version_full.split('.')\n\tprint('Versionfull: ' + version_full)\n\tprint('Version: ' + version)\n\t\n\tif version >= '5':\n\t\t# Return the dataset.\n\t\treturn dataset\n\telse:\n\t\treturn dataset.make_one_shot_iterator().get_next() #for 1.4",
"def model_evaluate(self, test):\n features = {name: np.array(value) for name, value in test.items()}\n labels = {name: features.pop(name) for name in self.label_names}\n metrics = self.model.evaluate(x=features, y=labels, batch_size=5)\n return metrics",
"def test(classifier, data, labels):\n eval_input_fn = tf.estimator.inputs.numpy_input_fn(\n x={\"x\": data},\n y=labels,\n num_epochs=1,\n shuffle=False)\n eval_results = classifier.evaluate(input_fn=eval_input_fn)\n eval_results[\"F-Score\"] = 2 * eval_results[\"precision\"] * eval_results[\"recall\"] / (eval_results[\"precision\"] + eval_results[\"recall\"])\n# print(eval_results)\n return eval_results",
"def evaluate_model(model, X_test, Y_test, category_names):\n\n print(\"Testing Performance\")\n print(classification_report(Y_test, model.predict(X_test), target_names=category_names))\n\n #Todo cat names",
"def test_model(valid_features, valid_labels):\n\n #valid_features, valid_labels = load_preprocess_testset()\n loaded_graph = tf.Graph()\n \n with tf.Session(graph=loaded_graph) as sess:\n # Load model\n loader = tf.train.import_meta_graph(SAVE_MODEL_PATH + '.meta')\n loader.restore(sess, SAVE_MODEL_PATH)\n\n # Get Tensors from loaded model\n loaded_x = loaded_graph.get_tensor_by_name('x:0')\n loaded_y = loaded_graph.get_tensor_by_name('y:0')\n loaded_keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0')\n loaded_logits = loaded_graph.get_tensor_by_name('logits:0')\n loaded_acc = loaded_graph.get_tensor_by_name('accuracy:0')\n \n\n acc = sess.run(\n loaded_acc,\n feed_dict={loaded_x: valid_features, loaded_y: valid_labels, loaded_keep_prob: 1.0})\n\n return acc",
"def evaluate(X_test, y_test):\n # batch size is 16 for evaluation\n batch_size = 16\n\n # Load Model\n model = load_model('model/model.h5')\n return model.evaluate(X_test, y_test, batch_size, verbose = 1)",
"def evaluate(model, g, val_nid, device):\n model.eval()\n nfeat = g.ndata['features']\n labels = g.ndata['labels']\n with th.no_grad():\n pred = model.module.inference(g, nfeat, device, args.batch_size, args.num_workers)\n model.train()\n test_acc = Accuracy()\n return test_acc(th.softmax(pred[val_nid], -1), labels[val_nid].to(pred.device))",
"def evaluate_from_featurizations(self, sess, featurizations, y):\n feed_dict = {self.featurizations: featurizations, self.y: y}\n loss, acc = sess.run([self.loss, self.accuracy], feed_dict = feed_dict)\n self.logger.info(\"Model was evaluated from featurizations\")\n return loss, acc",
"def eval_input_fn(features, labels, batch_size):\n features = dict(features)\n if labels is None:\n # No labels, use only features. (in prediction)\n inputs = features\n else:\n inputs = (features, labels)\n print(inputs)\n # Convert the inputs to a Dataset.\n dataset = tf.data.Dataset.from_tensor_slices(inputs)\n\n # Batch the examples\n assert batch_size is not None, \"batch_size must not be None\"\n dataset = dataset.batch(batch_size)\n\n # Return the dataset.\n return dataset",
"def eval_input_fn(features, labels, batch_size):\n #features=dict(features)\n features = dataframetodict(features)\n if labels is None:\n # No labels, use only features.\n inputs = features\n else:\n inputs = (features, labels)\n\n # Convert the inputs to a Dataset.\n dataset = tf.data.Dataset.from_tensor_slices(inputs)\n\n # Batch the examples\n assert batch_size is not None, \"batch_size must not be None\"\n dataset = dataset.batch(batch_size)\n\n # Return the dataset.\n return dataset"
] | [
"0.71969485",
"0.6895252",
"0.68876994",
"0.6868299",
"0.6828266",
"0.68280625",
"0.6817962",
"0.6815478",
"0.6739548",
"0.66729695",
"0.66729695",
"0.6669808",
"0.6641863",
"0.65919566",
"0.6583201",
"0.6532106",
"0.6510232",
"0.6506622",
"0.65015924",
"0.64879614",
"0.64562",
"0.6448343",
"0.64396787",
"0.6437344",
"0.64362913",
"0.64341515",
"0.64323837",
"0.6425257",
"0.64235103",
"0.6390746"
] | 0.8275948 | 0 |
Simple wrapper around sklearn's learning curve module | def learning_curve(self, features, labels):
return learning_curve(self._model, features, labels) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def train(self,features,y):\r\n \r\n if self.learn_type == \"nn\":\r\n #generate supervised dataset\r\n return(self.learner.train_on_batch(features,y))\r\n elif self.learn_type == \"linear\":\r\n grad = 0\r\n n = len(features)\r\n for i in range(n):\r\n #sum over the instances to get an estimate of the gradient\r\n print((y[i] - self.learner.activate(features[i])))\r\n grad -= (y[i] - self.learner.activate(features[i])) * \\\r\n self.learner.grad(features[i])\r\n grad /= n\r\n #update paramter\r\n param = np.copy(self.learner.param)\r\n self.learner.param = param - self.alpha * grad\r\n #print(self.learner.param)\r",
"def learn(self, Xtrain, ytrain):",
"def linear_regression(features, values):\n clf = SGDRegressor(n_iter=100)\n clf.fit(features,values)\n print(clf.score(features,values))\n intercept = clf.intercept_ \n params = clf.coef_\n \n return intercept, params",
"def __init__(self, data, target, target_names, alpha=.0001, n_iter=100, penalty='l2', preprocess=False):\n super().__init__(data, target, target_names, sklearn.linear_model.SGDClassifier(alpha=alpha, n_iter=n_iter,\n penalty=penalty), preprocess=preprocess)",
"def learnHyperLinear(X, y, xTest, yTest, penalty, scoring, transformedData,jobs=1):\n #Check if test is empty, if it is, don't refit and predict data\n testAvailable = np.size(xTest,0)!=0\n\n # Parameters selection\n #====================\n cRange = np.logspace(-5,1,3)\n parameters = {'C': cRange}\n\n if penalty=='l1':\n dual=False\n else:\n dual=True\n\n #Creating Model and begin classification\n #=======================================\n classif = svm.LinearSVC(penalty=penalty, class_weight=CLASS_WEIGHT, dual=dual)\n clf = grid_search.GridSearchCV(classif, parameters, scoring=scoring, cv=5, n_jobs=jobs, verbose=3, refit=testAvailable)\n print(\"Begin\\n...\")\n clf.fit(X,y)\n\n \n #Get results, print and write them into a file\n #============================================\n print(clf.best_params_, clf.best_score_)\n\n if testAvailable:\n scores = testModel(clf.best_estimator_,X,y,xTest,yTest,penalty)\n writeResults(clf.grid_scores_, clf.best_params_, clf.best_score_,'Linear',\\\n penalty,scoring, transformedData, scores=scores)\n else:\n print(\"No test, don't predict data\")\n writeResults(clf.grid_scores_, clf.best_params_, clf.best_score_,'Linear',\\\n penalty,scoring, transformedData, scores=None)",
"def get_learning_curve(estimator, X, y, ylim=None, cv=None, n_jobs=4, train_sizes=np.linspace(.125, 1.0, 8)):\n if ylim is not None:\n plt.ylim(*ylim)\n train_sizes, train_scores, test_scores = learning_curve(\n estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)\n train_scores_mean = np.mean(train_scores, axis=1)\n train_scores_std = np.std(train_scores, axis=1)\n test_scores_mean = np.mean(test_scores, axis=1)\n test_scores_std = np.std(test_scores, axis=1)\n\n return train_sizes, train_scores_mean, test_scores_mean, train_scores_std, test_scores_std",
"def sklearn(experiment, method, prediction_threshold=0.5, **kwargs):\n experiment['method'] = method\n experiment['prediction_threshold'] = prediction_threshold\n X_train = experiment['X_train']\n X_test = experiment['X_test']\n y_train = experiment['y_train']\n\n\n classifier = None\n if method == 0:\n # k-Nearest Neighbors\n classifier = KNeighborsClassifier(**kwargs)\n elif method == 1:\n # Logistic Regression\n classifier = LogisticRegression(**kwargs)\n elif method == 2:\n # Random Forest\n classifier = RandomForestClassifier(**kwargs)\n elif method == 3:\n # Support Vector Classifier\n classifier = SVC(kernel = 'rbf') # kernel = linear, poly, rbf, sigmoid\n elif method == 4:\n # Gaussian Naive Bayes\n classifier = GaussianNB(**kwargs)\n elif method == 5:\n # Decision Trees\n classifier = DecisionTreeClassifier(**kwargs)\n elif method == 6:\n # AdaBoost Classifier\n classifier = AdaBoostClassifier(**kwargs)\n elif method == 7:\n # Gradient Boosting Classifier\n classifier = GradientBoostingClassifier(**kwargs)\n elif method == 8:\n # Neural Network Classifier\n classifier = MLPClassifier(**kwargs)\n # classifier = MLPClassifier(hidden_layer_sizes=(10, 5))\n else:\n print('Invalid method!')\n\n classifier.fit(X_train, np.ravel(y_train))\n\n # output probability of prediction, use threshold to pick class\n y_train_probabilities = classifier.predict_proba(X_train)\n y_test_probabilities = classifier.predict_proba(X_test)\n\n\n y_test = experiment['y_test']\n\n FPR, TPR, prediction_threshold = roc_curve(y_test, y_test_probabilities[:, 1], pos_label=1)\n\n N_roc = np.shape(FPR)[0]\n best_d = 10\n best_i = 0\n d = np.ones((N_roc, 1))\n for i in range(N_roc):\n d[i] = np.sqrt((1 - TPR[i]) ** 2 + FPR[i] ** 2)\n if best_d > d[i]:\n best_d = d[i]\n best_i = i\n\n threshold = prediction_threshold[best_i]\n # auc2 = roc_auc_score(y_test, y_test_probabilities[:, 1])\n y_train_prediction = (y_train_probabilities[:, 1] >= threshold) * 1\n y_test_prediction = (y_test_probabilities[:, 1] >= threshold) * 1\n\n experiment['FPR'] = FPR\n experiment['TPR'] = TPR\n experiment['y_test_probabilities'] = y_test_probabilities\n experiment['y_train_probabilities'] = y_train_probabilities\n experiment['y_test_prediction'] = y_test_prediction\n experiment['y_train_prediction'] = y_train_prediction\n\n return experiment",
"def test_learning_curves():\n\n p = pipeline.Pipeline(\n FX_TRAIN,\n FX_TEST,\n FX_LOOKUP,\n RESULTS_DIR\n )\n\n data = p.learning_curves()",
"def __init__(self):\n super().__init__()\n import sklearn\n import sklearn.linear_model\n self.model = sklearn.linear_model.LogisticRegression",
"def train(self, X, y):",
"def sklearn_train() -> None:\n cross_validate(args=SklearnTrainArgs().parse_args(), train_func=run_sklearn)",
"def __init__(self, estimator = LogisticRegression()): \n\t self.estimator = estimator",
"def learningCurve(X, y, Xval, yval, Lambda):\n\n # Number of training examples\n m, _ = X.shape\n\n # You need to return these values correctly\n error_train = np.zeros(m)\n error_val = np.zeros(m)\n\n for i in range(m):\n theta = trainLinearReg(X[:i + 1], y[:i + 1], Lambda)\n error_train[i], _ = linearRegCostFunction(X[:i + 1], y[:i + 1], theta, 0)\n error_val[i], _ = linearRegCostFunction(Xval, yval, theta, 0)\n \n return error_train, error_val",
"def __init__(self, reg_penalty='l2', reg_inv=1.0, k_fold=5, random_state=0):\n print(\"Initialize model Logistic Regression\")\n self.reg_penalty = reg_penalty\n self.reg_inv = reg_inv\n self.k_fold = k_fold\n self.random_state = random_state\n self.model = sklearn.linear_model.LogisticRegression(penalty=self.reg_penalty,\n C=self.reg_inv,\n max_iter=1000, \n random_state=self.random_state)",
"def __call__(self, y, pred, sample_weight=None):",
"def plot_learning_curve(model, X_train, X_test, y_train, y_test):\n\n m, train_scores, valid_scores = learning_curve(estimator = model, \n X = X_train, y = y_train.ravel(), train_sizes = np.linspace(0.1,1.0, 80))\n\n train_cv_err = np.mean(train_scores, axis=1)\n test_cv_err = np.mean(valid_scores, axis=1)\n tr, = plt.plot(m, train_cv_err)\n ts, = plt.plot(m, test_cv_err)\n plt.legend((tr, ts), ('training error', 'test error'), loc = 'best')\n plt.title('Learning Curve')\n plt.xlabel('Data Points')\n plt.ylabel('Accuracy')",
"def plot_learning_curve(ax, estimator, X, y, ylim=None, cv=None,\n n_jobs=-1, train_sizes=np.linspace(.1, 1.0, 5)):\n ax.set_xlabel(\"Training examples\")\n ax.set_ylabel(\"F1 score\")\n train_sizes, train_scores, test_scores = learning_curve(\n estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes, scoring='f1_macro')\n train_scores_mean = np.mean(train_scores, axis=1)\n train_scores_std = np.std(train_scores, axis=1)\n test_scores_mean = np.mean(test_scores, axis=1)\n test_scores_std = np.std(test_scores, axis=1)\n ax.grid()\n\n ax.fill_between(train_sizes, train_scores_mean - train_scores_std,\n train_scores_mean + train_scores_std, alpha=0.1,\n color=\"r\")\n ax.fill_between(train_sizes, test_scores_mean - test_scores_std,\n test_scores_mean + test_scores_std, alpha=0.1, color=\"g\")\n ax.plot(train_sizes, train_scores_mean, 'o-', color=\"r\",\n label=\"Training score\")\n ax.plot(train_sizes, test_scores_mean, 'o-', color=\"g\",\n label=\"Cross-validation score\")\n\n ax.legend(loc=\"best\")\n\n if ylim is not None:\n ax.set_ylim(*ylim)\n return ax",
"def train_logisticRegression(data: np.array, labels: np.array)->None:\n\n n_examples = np.size(data, 0)\n n_features = np.size(data, 1)\n n_categories = np.size(labels, 1)\n\n data = np.hstack((np.ones((n_examples, 1)), data))\n\n print(data[0:5, :])\n\n X_train, X_test, y_train, y_test, idx_test = split_data(data, labels, 0.7)\n\n convergence_goal = 1e-3\n learning_rate = 0.01\n\n theta = np.random.uniform(size=((n_features+1, n_categories)))\n\n for i in range(n_categories):\n\n cost_var = 1\n\n previous_cost = 1e6\n iterations = 0\n cost_to_plot = []\n\n while cost_var > convergence_goal:\n iterations += 1\n cost, grad = costFunction(X_train, y_train[:, i], theta[:, i])\n theta[:, i] = update_theta(theta[:, i], grad, learning_rate)\n cost_var = previous_cost - cost\n previous_cost = cost\n if iterations == 1: cost_var = 1\n cost_to_plot.append(cost)\n # print(cost)\n\n plt.plot(range(iterations), cost_to_plot, 'g-', label = 'cost')\n plt.xlabel('iterations')\n plt.ylabel('cost')\n # plt.show()\n\n predictions = lrPredict(theta, X_test)\n\n print(predictions[0:5, :])\n print(y_test[0:5, :])\n\n accuracy = np.mean([p == l for p, l in zip(predictions, y_test)])\n print(\"Accuracy = {}\".format(accuracy))\n\n pass",
"def mlr(df, exp_vars, resp_var, \n method='ols', \n fit_intercept=True,\n kcv=3,\n normalize=False):\n from sklearn import cross_validation\n from sklearn.linear_model import LinearRegression, RidgeCV\n from sklearn.linear_model import LassoCV, ElasticNetCV\n from sklearn.metrics import r2_score\n from sklearn.utils import resample\n import matplotlib.pyplot as plt\n import seaborn as sn\n import pandas as pd\n import numpy as np\n \n # Separate data\n X = df[exp_vars]\n y = df[resp_var]\n \n # Setup model\n if method == 'ols':\n model = LinearRegression(fit_intercept=fit_intercept, \n normalize=normalize)\n elif method == 'lasso':\n model = LassoCV(fit_intercept=fit_intercept, \n normalize=normalize, \n max_iter=10000,\n cv=kcv)\n elif method == 'ridge':\n model = RidgeCV(fit_intercept=fit_intercept, \n normalize=normalize, \n alphas=np.logspace(-10, 10, 21))\n elif method == 'el-net':\n model = ElasticNetCV(l1_ratio=[.1, .5, .7, .9, .95, .99, 1],\n fit_intercept=fit_intercept, \n normalize=normalize,\n cv=kcv)\n else:\n raise ValueError('\"method\" parameter must be in [\"ols\", \"lasso\", \"ridge\", \"el-net\"]')\n \n # k-fold cross validation\n #cv_scores = cross_validation.cross_val_score(model, X, y, cv=kcv, scoring='r2')\n #print 'Mean r2 from %s-fold CV: %.3f\\n' % (kcv, cv_scores.mean())\n \n # Train model on full dataset\n model.fit(X, y)\n \n # Get y-hat\n y_pred = model.predict(X)\n \n # r2 based on calibration data\n r2 = r2_score(y, y_pred)\n print 'r2:', r2\n print ''\n \n # Summary of model\n print model\n print ''\n \n if method == 'lasso':\n print 'Lasso alpha:', model.alpha_\n print ''\n elif method == 'ridge':\n print 'Ridge alpha:', model.alpha_\n print ''\n elif method == 'el-net':\n print 'Elastic net alpha:', model.alpha_ \n print 'Elastic net L1 ratio:', model.l1_ratio_ \n print ''\n else: # OLS\n pass\n \n # Plot\n fig = plt.figure(figsize=(15,15))\n \n # Paired points for each site\n ax1 = plt.subplot2grid((2,2), (0,0), colspan=2)\n ax1.plot(range(0, len(X.index)), y, 'ro', label='Observed')\n ax1.plot(range(0, len(X.index)), y_pred, 'b^', label='Modelled')\n \n ax1.set_xticks(range(0, len(X.index)))\n ax1.set_xticklabels(X.index, rotation=90, fontsize=12)\n ax1.set_xlim(0, len(X.index)-1)\n \n ax1.set_xlabel('Site code', fontsize=16)\n ax1.set_ylabel(resp_var)\n ax1.set_title('Points paired for each location', fontsize=20)\n ax1.legend(loc='best', fontsize=16)\n \n # Modelled versus observed\n ax2 = plt.subplot2grid((2,2), (1,0), colspan=1)\n ax2.plot(y, y_pred, 'ro')\n ax2.set_xlabel('Observed', fontsize=16)\n ax2.set_ylabel('Modelled', fontsize=16)\n ax2.set_title('Modelled versus observed', fontsize=20)\n \n # Hist of residuals\n ax3 = plt.subplot2grid((2,2), (1,1), colspan=1)\n sn.distplot(y - y_pred, kde=True, ax=ax3)\n ax3.set_title('Histogram of residuals', fontsize=20)\n \n plt.tight_layout()\n \n # Get param estimates\n params = pd.Series(model.coef_, index=X.columns)\n\n # Estimate confidence using bootstrap\n # i.e. what is the std. dev. of the estimates for each parameter\n # based on 1000 resamplings\n err = np.std([model.fit(*resample(X, y)).coef_ for i in range(1000)], \n axis=0)\n\n # Build df\n res = pd.DataFrame({'effect':params,\n 'error':2*err})\n\n # Rough indicator of significance: are the estimated values more than\n # 2 std. devs. from 0 (~95% CI?). NB: this assumnes the \"marginal posterior\" \n # is normal, which I haven't tested for and which quite possibly isn't true\n # - use with care! \n res['signif'] = np.abs(res['effect']) > res['error']\n \n return res",
"def plot_learning_curve(X, y, maxdepth, estimator, plt):\n # create cv training and test scores for various training set sizes\n train_sizes, train_scores, test_scores = learning_curve(estimator,\n X, # feature matrix\n y, # target vector\n cv=10, # number of folds in cross-validation\n scoring='neg_mean_squared_error', # metric\n n_jobs=-1, # use all computer cores,\n train_sizes=np.linspace(0.01, 1.0, 30) # 30 different sizes of the training set\n )\n # create means and standart deviations of training set scores\n train_mean = np.mean(train_scores, axis=1)\n train_std = np.std(train_scores, axis=1)\n\n # create means and standart deviations of test set scores\n test_mean = np.mean(test_scores, axis=1)\n test_std = np.std(test_scores, axis=1)\n\n # draw lines\n plt.plot(train_sizes, train_mean, '--', color='#111111', label=\"Training score\")\n plt.plot(train_sizes, test_mean, color='#111111', label=\"Cross-validation score\")\n\n # draw bands\n plt.fill_between(train_sizes, train_mean - train_std, train_mean + train_std, color=\"#DDDDDD\")\n plt.fill_between(train_sizes, test_mean - test_std, test_mean + test_std, color=\"#f4d0d7\")\n \n # create plot \n plt.title(\"Learning curve\")\n plt.xlabel(\"Training set size\", fontsize=18)\n plt.ylabel(\"mse\", fontsize=18)\n plt.legend(loc=\"best\")\n plt.tight_layout()",
"def linear_regression_sklearn(data):\n# Split the data into training/testing sets\n dataset = np.array(data)\n\n X_train = dataset[:,0].reshape(-1,1)\n y_train = dataset[:,1]\n\n# Create linear regression object\n regr = linear_model.LinearRegression()\n\n# Train the model using the training sets\n regr.fit(X_train, y_train)\n\n return (regr.coef_[0], regr.intercept_)",
"def do_scikit_learn_regression(data, verbose = False):\n \n \n regr = linear_model.LinearRegression()\n\n x = data['c'].values.reshape(100,1)\n y = data['f'].values.reshape(100,1)\n \n regr.fit(x, y)\n \n if verbose:\n\n string = '\\n'.join((\n f'Coefficient of {regr.coef_[0][0]} compared to actual {9/5}',\n f'Intercept of {regr.intercept_[0]} compared to actual {32}'\n ))\n\n print (string)\n\n return regr.coef_[0][0], regr.intercept_[0]",
"def fit(self, X):",
"def stability_lasso(x, y, **kwargs):\n rl = RandomizedLasso()\n if 'param' in kwargs:\n rl.set_params(**kwargs['param'])\n rl.fit(x, y)\n return rl.get_support()",
"def nnRegression(data):",
"def ex_2_b(x_train, y_train, x_test, y_test):\n ###########\n ## TODO:\n ## Train SVMs with polynomial kernels for different values of the degree\n ## (Remember to set the 'coef0' parameter to 1)\n ## and plot the variation of the training and test scores with polynomial degree using 'plot_score_vs_degree' func.\n ## Plot the decision boundary and support vectors for the best value of degree\n ## using 'plot_svm_decision_boundary' function\n ###########\n degrees = range(1, 21)\n\n test_scores = np.array([])\n train_scores = np.array([])\n best_svm = None\n best_test_score = 0\n\n for deg in degrees:\n clf = svm.SVC(kernel='poly', degree=deg, coef0=1)\n clf.fit(x_train, y_train)\n\n test_score = clf.score(x_test, y_test)\n\n if test_score > best_test_score:\n best_test_score = test_score\n best_svm = clf\n\n test_scores = np.append(test_scores, test_score)\n train_scores = np.append(train_scores, clf.score(x_train, y_train))\n\n plot_score_vs_degree(train_scores, test_scores, degrees)\n\n plot_svm_decision_boundary(clf, x_train, y_train, x_test, y_test)",
"def mylinearsvm(lambdat, eta_init, maxiter, X, y):\n d = np.size(X, 1)\n beta_init = np.zeros(d)\n theta_init = np.zeros(d)\n betas, objs = fast_grad(beta_init, theta_init, lambdat, eta_init, maxiter,X=X,y=y)\n return betas, objs",
"def train(features, targets, weights, bias):\n # see gradient_descent for explanation\n epochs = 100\n learning_rate = 0.1\n\n picture_nb = 2\n\n # Print current accuracy. How many people have been classified as sick/healthy correctly?\n predictions = predict(features, weights, bias)\n print(\"Accuracy: \", np.mean(predictions == targets))\n\n for epoch in range(epochs):\n if epoch % 10 == 0:\n # get normalized scores\n predictions = activation(pre_activation(features, weights, bias))\n # compare with targets to see how bad our algorithm is\n print(\"Cost = %s\" % cost(predictions, targets))\n # Replot graph. Check in create_dataset for explanation of parameters\n if picture_nb == 2:\n plt.plot(features[:, 0], (weights[0] * features[:, 0] + bias) / -weights[1], color='red')\n elif picture_nb == 11:\n plt.plot(features[:, 0], (weights[0] * features[:, 0] + bias) / -weights[1], color='green')\n else:\n plt.plot(features[:, 0], (weights[0] * features[:, 0] + bias) / -weights[1], color='orange')\n picture_nb+=1\n\n # Initialize gradients\n # weights_gradients is 2D array with 2 values\n weights_gradients = np.zeros(weights.shape)\n bias_gradient = 0\n # Go through each row\n for feature, target in zip(features, targets):\n # Compute prediction\n z = pre_activation(feature, weights, bias)\n # Get normalized score\n y = activation(z)\n # Update gradients based on formulas established before. Look at gradient_descent to understand what we\n # are doing. Also, the formulas are below, just before the call of the function train.\n weights_gradients += (y - target) * derivative_activation(z) * feature\n # no multiplication of feature because it does not depend on some coordinates.\n bias_gradient += (y - target) * derivative_activation(z)\n\n # Update variables. These are the lines that result the cost to get reduced.\n weights = weights - learning_rate * weights_gradients\n bias = bias - learning_rate * bias_gradient\n\n # Print final accuracy. How many people have been classified as sick/healthy correctly?\n predictions = predict(features, weights, bias)\n print(\"Accuracy: \", np.mean(predictions == targets))\n\n plt.scatter(features[:, 0], features[:, 1], s=40, c=targets, cmap=plt.cm.Spectral)\n plt.savefig(\"DataPointsLineEvolution.png\")\n # legend for understanding\n plt.legend(['Original division', 'New division', 'New division', 'New division', 'New division', 'New division',\n 'New division', 'New division', 'New division', 'Final division'], loc='upper left')\n # save picture of data points drawn.\n plt.savefig(\"DataPointsLineEvolutionLegend.png\")",
"def train_model(x_tra, y_tra):\n\n clf1 = AdaBoostClassifier(n_estimators=300, random_state=1)\n clf1.fit(x_tra, y_tra)\n return clf1",
"def __init__(self, reg_penalty='l2', reg=0.001, k_fold=5, random_state=0):\n print(\"Initialize model Perceptron\")\n self.reg_penalty = reg_penalty\n self.reg = reg\n self.k_fold = k_fold\n self.random_state = random_state\n self.model = sklearn.linear_model.Perceptron(penalty=reg_penalty,\n alpha=self.reg,\n max_iter=1000,\n random_state=self.random_state)"
] | [
"0.64221656",
"0.63034314",
"0.6246641",
"0.6199103",
"0.61902195",
"0.6133346",
"0.6075857",
"0.6074158",
"0.6016552",
"0.5998704",
"0.59975034",
"0.59883845",
"0.59740204",
"0.59331024",
"0.5884026",
"0.58812094",
"0.5867041",
"0.58130425",
"0.5809215",
"0.5798471",
"0.5792855",
"0.5756564",
"0.57458466",
"0.57367617",
"0.57365996",
"0.5735482",
"0.5725889",
"0.5722918",
"0.5716548",
"0.5710387"
] | 0.70836437 | 0 |
Create an agent membership | def create_agent_membership(self, context, agent_membership):
am = agent_membership['agent_membership']
with context.session.begin(subtransactions=True):
am_db = AgentMembership(id=am['id'],
ip_address=am['ip_address'])
context.session.add(am_db)
return self._make_agent_membership_dict(am_db) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_member(self, body=None):\r\n return self.post(self.members_path, body=body)",
"def add_member():\n client = RequestManager()\n client.set_method(\"POST\")\n client.set_endpoint(\"/accounts/{0}/memberships\".format(CONFIG_DATA['account_id']))\n body = {\"person_id\": CONFIG_DATA['member_id']}\n client.set_body(json.dumps(body))\n client.execute_request()",
"def create_member(org_id, group_id, target_group_ids, sex, first_name, last_name, title_name, email):\n user = get_user_by_email(email)\n # --- falls e-mail schon existiert wird nichts unternommen\n if user != None:\n if org_id > 0: # nur bei Schulen wird die Schulnummer vorangestellt\n prefix = '%i_' % org_id\n else:\n prefix = ''\n user = User()\n username = get_username(prefix, first_name, last_name)\n user.username = username\n user.sex = sex\n user.first_name = first_name\n user.last_name = last_name\n user.email = email\n user.title = title_name\n user.is_staff = False\n user.is_active = True\n user.is_superuser = False\n user.date_joined = datetime.datetime.now()\n password = generate_passwd()\n user.set_password(password)\n user.save()\n set_user_org(org_id, user)\n send_password(email, username, password)\n set_user_group(user, get_group_by_id(group_id))\n for group in target_group_ids:\n set_user_group(user, get_group_by_id(group))\n transaction.commit()",
"def create_member(actioncluster, user):\n membership, is_new = (ActionClusterMembership.objects\n .get_or_create(actioncluster=actioncluster, user=user))\n return membership if is_new else None",
"def test_create_member(self):\r\n resource = 'member'\r\n cmd = member.CreateMember(test_cli20.MyApp(sys.stdout), None)\r\n address = '10.0.0.1'\r\n port = '8080'\r\n tenant_id = 'my-tenant'\r\n my_id = 'my-id'\r\n pool_id = 'pool-id'\r\n args = ['--address', address, '--protocol-port', port,\r\n '--tenant-id', tenant_id, pool_id]\r\n position_names = ['address', 'protocol_port', 'tenant_id', 'pool_id',\r\n 'admin_state_up']\r\n position_values = [address, port, tenant_id, pool_id, True]\r\n self._test_create_resource(resource, cmd, None, my_id, args,\r\n position_names, position_values,\r\n admin_state_up=None)",
"def create_memberships_project():\n client = RequestManager()\n client.set_method(\"POST\")\n client.set_endpoint(\"/projects/{0}/memberships\".format(STORED_ID['project_id']))\n body = {\"person_id\": CONFIG_DATA['member_id'], \"role\": 'member'}\n client.set_body(json.dumps(body))\n client.execute_request()",
"async def command_create(self, context):\n # await self._create_new_role(context, name, target=GROUP_CATEGORY_NAME)\n print('main create')",
"def create_member(self, context, member):\n LOG.info(\"Received request 'Create Member' for Pool:%(pool_id)s \",\n {'pool_id': member['pool_id']})\n arg_dict = {'context': context,\n lb_const.MEMBER: member,\n }\n self._send_event(lb_const.EVENT_CREATE_MEMBER_V2, arg_dict,\n serialize=True,\n binding_key=member[lb_const.POOL]['loadbalancer_id'],\n key=member['id'])",
"def create_creator_member(sender, **kwargs):\n if kwargs.get('created'):\n league = kwargs['instance']\n league.members.create(user=league.creator,\n fb_uid=league.creator.fb_uid,\n status='creator')",
"def _create_member(self, **kwargs):\n category_name = kwargs.pop('category_name', Category.ACTIVE)\n params = {\n 'category': Category.objects.get(name=category_name),\n 'first_payment_month': 8,\n 'first_payment_year': 2015,\n 'has_student_certificate': False,\n 'has_subscription_letter': True,\n 'has_collaborator_acceptance': False,\n }\n params = {k: kwargs.pop(k, v) for k, v in params.items()}\n member = Member.objects.create(**params)\n\n # create the related person\n params = {\n 'membership': member,\n 'nickname': 'test-nick',\n 'picture': 'fake-pic',\n }\n params = {k: kwargs.pop(k, v) for k, v in params.items()}\n Person.objects.create(**params)\n\n assert not kwargs, kwargs # would indicate a misuse of the parameters\n return member",
"def create(self, identity, data=None, record=None, **kwargs):\n if system_process in identity.provides:\n return\n\n member = {\n \"type\": \"user\",\n \"id\": str(identity.id),\n }\n self.service.members.add(\n # the user is not yet owner of the community (is being added)\n # therefore we cannot use `identity`\n system_identity,\n record.id,\n {\"members\": [member], \"role\": current_roles.owner_role.name},\n uow=self.uow,\n )\n\n # Invalidate the membership cache\n on_user_membership_change(identity=identity)",
"def create(self, name, login, password, email, address=\"\", vat=\"\", jobguid=\"\", executionparams=None):",
"def createMentor(self, org):\n self.createProfile()\n self.profile.mentor_for = [org.key()]\n self.profile.put()",
"def createAgent(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def test_alert_create_for_site_members(self):\n pass",
"def make_agent(agent_id, **kwargs):\n return agent_register[agent_id](**kwargs)",
"def test_agent_creation():\n agent = AgentFactory()\n agent.name = 'agent test name'\n agent.save()\n assert agent.name == 'agent test name'",
"def create_member(net_id):\n #TODO put this exception handling in to the presentation layer\n #if ' ' in net_id or '@' in net_id:\n # raise Exception('Only enter the first portion of the net id => [email protected] - jmrolf')\n student_html = Info_IaState_Scraper.get_raw_html(net_id)\n student_data = Info_IaState_Scraper.parse_student_data(student_html)\n connection = get_connection()\n cursor = connection.cursor()\n sql_string = \"INSERT INTO Member VALUES('\"+net_id+\"', '\"+net_id+\"@iastate.edu', \" \\\n \"'\"+student_data['classification']+\"', '\" + student_data['major']+\"', \" \\\n \"'\"+student_data['name']+\"', 0)\"\n cursor.execute(sql_string)\n connection.commit()",
"def create(self, data):\n curso = self.context['curso']\n invitation = self.context['invitation']\n user = data['user']\n\n now = timezone.now()\n\n # studen creation\n member = Rol.objects.create(\n user=user,\n profile=user.profile,\n curso=curso,\n invited_by=invitation.issued_by\n )\n\n # Update Invitation\n invitation.used_by = user\n invitation.used = True\n invitation.used_at = now\n invitation.save()\n\n # Update issuer data\n issuer = Rol.objects.get(user=invitation.issued_by, curso=curso)\n issuer.remaining_invitations -= 1\n issuer.save()\n\n return member",
"def main_role_create(\n client: CitusCloudMgmt,\n **opts: tp.Any\n) -> None:\n\n role_id = client.create_role(opts[\"formation\"], opts[\"name\"])\n logger.info(f\"Created new role \\\"name\\\" with id=\\\"{role_id}\\\"\")\n click.echo(role_id)",
"def test_ipam_roles_create(self):\n pass",
"async def create(self, ctx):\n\n # get the model data for the role assigner object\n data = await self.get_objects(\n model=RoleAssigner, filter={\"bot__name\": str(self.bot_name)}\n )\n\n # role assigner object\n data = data[0]\n\n message = await ctx.send(\"_ _\", embed=self.create_message_embed(data))\n\n data.message.uid = message.id\n data.message.cuid = message.channel.id\n\n self.message_id = data.message.uid\n\n await self.update_reactions(message, data)\n\n await self.update_objects(model_instance=data)",
"def create_individual(self):\n pass",
"def create_members(self, accounts_info):\n detector_id = self.list_detector()\n if detector_id:\n try:\n response = self.client.create_members(\n AccountDetails=accounts_info,\n DetectorId=detector_id\n )\n for result in response['UnprocessedAccounts']:\n print(result)\n return True\n except ClientError as e:\n print(e.response['Error']['Code'])\n return False",
"async def post(self):\r\n data = await self.request.json()\r\n register_date = data[\"register_date\"]\r\n ip_address = data[\"ip_address\"]\r\n try:\r\n Agent.create(register_date=register_date, ip_address=ip_address)\r\n response_obj = {\"status\": \"success\"}\r\n return web.Response(text=str(response_obj), status=201)\r\n except Exception as exception:\r\n response_obj = {\"status\": \"failed\", \"reason\": exception}\r\n error_message = str(exception)\r\n logger.error(error_message)\r\n return web.Response(text=str(response_obj), status=500)",
"def create(self, name, username, emailaddress, maxMemoryCapacity=-1.0, maxVDiskCapacity=-1, maxCPUCapacity=-1, maxNetworkPeerTransfer=-1, maxNumPublicIP=-1, sendAccessEmails=True, **kwargs):\n #put your code here to implement this method\n raise NotImplementedError (\"not implemented method create\")",
"def _register_agent(self, agent, agent_avatar: AgentBody):\n\n # Random seed for agent between 1 and 10000000, might need to be adjusted still\n agent_seed = self.__rnd_gen.randint(1, 1000000)\n\n # check if the agent can be succesfully placed at that location\n self.__validate_obj_placement(agent_avatar)\n\n # Add agent to registered agents\n self.__registered_agents[agent_avatar.obj_id] = agent_avatar\n\n if self.__verbose:\n print(f\"@{os.path.basename(__file__)}: Created agent with id {agent_avatar.obj_id}.\")\n\n # Get all properties from the agent avatar\n avatar_props = agent_avatar.properties\n\n if agent_avatar.is_human_agent is False:\n agent._factory_initialise(agent_name=agent_avatar.obj_name,\n agent_id=agent_avatar.obj_id,\n action_set=agent_avatar.action_set,\n sense_capability=agent_avatar.sense_capability,\n agent_properties=avatar_props,\n customizable_properties=agent_avatar.customizable_properties,\n callback_is_action_possible=self.__check_action_is_possible,\n rnd_seed=agent_seed)\n else: # if the agent is a human agent, we also assign its user input action map\n agent._factory_initialise(agent_name=agent_avatar.obj_name,\n agent_id=agent_avatar.obj_id,\n action_set=agent_avatar.action_set,\n sense_capability=agent_avatar.sense_capability,\n agent_properties=avatar_props,\n customizable_properties=agent_avatar.customizable_properties,\n callback_is_action_possible=self.__check_action_is_possible,\n rnd_seed=agent_seed,\n key_action_map=agent_avatar.properties[\"key_action_map\"])\n\n return agent_avatar.obj_id",
"async def addFreeAgentRole(self, ctx, tier, role : discord.Role):\n server_dict = self.get_server_dict(ctx)\n free_agent_dict = server_dict.setdefault(\"Free agent roles\", {})\n \n try:\n free_agent_dict[tier] = role.id\n self.save_data()\n await self.bot.say(\"Franchise role for {0} = {1}\".format(tier, role.mention))\n except IndexError:\n await self.bot.say(\":x: Error adding info to the free agent role dictionary\")",
"def do_user_create():\n target = User(\n request.form['gender'],\n request.form['first_name'],\n request.form['name'],\n request.form['mail'],\n request.form['meter_id'],\n request.form['group_id'],\n secrets.token_hex(33))\n target.set_role(request.form['role'])\n target.nick = request.form['nick']\n db.session.add(target)\n db.session.commit()\n return user_list(\"Created user \" + target.name)",
"def test_create_owner(self):\n self.assertEqual(\n ProjectInvite.objects.filter(project=self.project).count(), 0\n )\n\n url = reverse(\n 'projectroles:api_invite_create',\n kwargs={'project': self.project.sodar_uuid},\n )\n post_data = {\n 'email': INVITE_USER_EMAIL,\n 'role': PROJECT_ROLE_OWNER,\n 'message': INVITE_MESSAGE,\n }\n response = self.request_knox(url, method='POST', data=post_data)\n\n self.assertEqual(response.status_code, 400, msg=response.content)\n self.assertEqual(\n ProjectInvite.objects.filter(project=self.project).count(), 0\n )\n self.assertEqual(len(mail.outbox), 0)"
] | [
"0.6627206",
"0.63848805",
"0.6290456",
"0.6268151",
"0.62504977",
"0.624233",
"0.6192578",
"0.59588885",
"0.5937359",
"0.59215134",
"0.589374",
"0.5893678",
"0.58593506",
"0.58082986",
"0.58055055",
"0.5803748",
"0.580091",
"0.5796165",
"0.5793468",
"0.57622015",
"0.57265323",
"0.5709941",
"0.56969666",
"0.56872076",
"0.5662437",
"0.56577617",
"0.5644747",
"0.563892",
"0.5636193",
"0.562371"
] | 0.7037421 | 0 |
Test load class works correctly and raises right exceptions. | def test_load_class():
full_classname = 'collections.namedtuple'
cls_ = load_class(full_classname)
assert cls_ is collections.namedtuple
with pytest.raises(ValueError):
full_classname = 'collections.Foobar'
load_class(full_classname)
with pytest.raises(ImportError):
full_classname = 'barfoo.Foobar'
load_class(full_classname) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_load_testcase(self):\n tests = self.loader.load(\"tests.sampletest.hellotest.HelloTest\")\n self.assertEqual(len(tests), 1)\n from tests.sampletest.hellotest import HelloTest\n\n self.assertEqual(type(tests[0]), HelloTest)",
"def test_load_testcase_in_module(self):\n tests = self.loader.load(\"tests.sampletest.InitTest\")\n self.assertEqual(len(tests), 1)\n from tests.sampletest import InitTest\n\n self.assertEqual(type(tests[0]), InitTest)",
"def test_load_model_method_with_wrong_class_path(self):\n # arrange\n # instantiating the model manager class\n model_manager = ModelManager()\n\n # act\n # adding the model\n exception_raised = False\n exception_message = None\n # accessing the MLModelMock model object\n try:\n model_manager.load_model(\"sdf.sdf.sdf\")\n except Exception as e:\n exception_raised = True\n exception_message = str(e)\n\n # assert\n self.assertTrue(exception_raised)\n self.assertTrue(exception_message == \"No module named 'sdf'\")",
"def test_class_errored(self, cls, exception):",
"def _loadClass(self, loader):\r\n raise NotImplementedError(\"The method 'loadClass' has to \"\r\n 'be implemented.')",
"def test_class_started(self, cls):",
"def test_loader(cls):\r\n return _test_loader_factory(cls)",
"def test_initialization(self):\n test_node = class_dependency.JavaClass(self.TEST_PKG, self.TEST_CLS)\n self.assertEqual(test_node.name, f'{self.TEST_PKG}.{self.TEST_CLS}')\n self.assertEqual(test_node.package, self.TEST_PKG)\n self.assertEqual(test_node.class_name, self.TEST_CLS)",
"def _load(self):\n raise NotImplementedError()",
"def test_import_string_missing_class_or_attribute(self):\n valid_module = 'ttgn.pokedex'\n invalid_class = 'NonexistentClass'\n with pytest.raises(ImportError) as error:\n utils.import_string('{}.{}'.format(valid_module, invalid_class))\n assert 'Module {} has no class or attribute {}'.format(\n valid_module, invalid_class) == str(error.value)",
"def test_custom_class_fail_import(self):\n conf = Configuration(Path(self.conf_dir, \"custom_class_doesnt_exists.yaml\"))\n self.test_survey = Survey.objects.get(name=\"Test survëy\")\n fail_import = str(Survey2Tex(self.test_survey, conf))\n should_contain = [\n \"could not render\",\n \"not a standard type\",\n \"importable valid Question2Tex child class\",\n \"'raw'\",\n \"'sankey'\",\n \"'pie'\",\n \"'cloud'\",\n \"'square'\",\n \"'polar'\",\n ]\n for text in should_contain:\n self.assertIn(text, fail_import)",
"def test_load_model_method(self):\n # arrange\n # instantiating the model manager class\n model_manager = ModelManager()\n\n # adding the model\n model_manager.load_model(\"tests.mocks.MLModelMock\")\n\n # act\n exception_raised = False\n model_object = None\n # accessing the MLModelMock model object\n try:\n model_object = model_manager.get_model(qualified_name=\"qualified_name\")\n except Exception as e:\n exception_raised = True\n print_tb(e)\n\n # assert\n self.assertFalse(exception_raised)\n self.assertTrue(model_object is not None)",
"def test_lint(self):\n l = self.l\n l.loadTestsFromTestCase\n l.loadTestsFromModule\n l.loadTestsFromName\n l.loadTestsFromNames",
"def test_load(self):\n command = constituencies.Command()\n command.handle('load', silent=True)",
"def test_load_fail():\n parameters = {'path': 'foo.bar'}\n\n images.load(parameters)",
"def setUpClass(cls):\n # check for python3\n cls.assertGreaterEqual(cls, sys.version_info[0], 3)\n # This will import the module to be tested\n cls.module = importlib.import_module(PKG_NAME)",
"def load(self):\n\n raise NotImplementedError",
"def test_load(self):\n (spec, check) = bundylogging.load()\n # It returns the checking function\n self.assertEqual(check, bundylogging.check)\n # The plugin stores it's spec\n self.assertEqual(spec, bundylogging.spec)",
"def test_constructor(self):\n pass",
"def test_instantiates_badgr_lite_class(self):\n badgr = self.get_badgr_setup()\n self.assertIsInstance(badgr, BadgrLite)",
"def setUpClass(self):\n self.repo = Repository(\"https://github.com/qcoumes/gitload_test.git\")\n self.repo.get_repo()\n self.loader = self.repo.load_pltp(\"/PLTP/test.pltp\")",
"def setUpClass(cls):\n cls.assertGreaterEqual(cls, sys.version_info[0], 3)\n cls.module = importlib.import_module(PKG_NAME)",
"def test_bad_class(self):\n\n mock_entry_badclass = mock.create_autospec(EntryPoint)\n mock_entry_badclass.name = \"BadClass\"\n mock_entry_badclass.load = self.returnbadclass\n\n with pytest.warns(AstropyUserWarning, match=r\".*BadClass.*\"):\n populate_entry_points([mock_entry_badclass])",
"def __init__(self):\n if DynamicImporter._instance is not None:\n raise Exception(\"DynamicImporter instance already exists!\")\n DynamicImporter._instance = self\n\n current_path = Path(__file__).parent\n test_path = current_path / \"testdata\"\n files = test_path.rglob(\"*.py\")\n\n for file in files:\n\n if file.name in [\"__init__.py\", \"test_module.py\", \"test_registry.py\", \"connections.py\"]:\n continue\n\n name = file.stem\n module = import_module(f\"testdata.{name}\")\n class_title = f\"{name.title()}Test\"\n\n try:\n _class = getattr(module, class_title) # get the class\n self.class_list[class_title] = _class # add the class to the class list\n except AttributeError: # don't throw exceptions for files that don't have a test\n continue",
"def test_loads(self, game=\"SuperMarioKart-Snes\"):\n with self.assertRaises(NameError):\n retro.make(game=game)",
"def load(self):\n pass",
"def load(self):\n pass",
"def load(self):\n pass",
"def load(self):\n pass",
"def test_can_instantiate(self):\n\n exc_thrown = False\n\n try:\n self.klass(*self.instantiate_args)\n except Exception:\n exc_thrown = True\n\n self.assertFalse(exc_thrown)"
] | [
"0.7552986",
"0.7182643",
"0.70636034",
"0.686629",
"0.6754328",
"0.67336494",
"0.6622316",
"0.6577228",
"0.65132713",
"0.6475406",
"0.6446178",
"0.6431292",
"0.6425918",
"0.63992125",
"0.6388809",
"0.6313314",
"0.6301513",
"0.62873274",
"0.6244719",
"0.62361884",
"0.6233843",
"0.62248665",
"0.62203985",
"0.6209335",
"0.62069184",
"0.61509365",
"0.61509365",
"0.61509365",
"0.61509365",
"0.61487836"
] | 0.72938436 | 1 |
irq_handler contains the code you want to execute when the interrupt occurs. Define your own callback function here by rewriting the code. We make an LED flash in this example. | def irq_handler():
# open an LED session
with LEDs() as LED:
# specify the LED which you want to control
led = Led.LED1
# specify the LED status
led_on_off = True
# writes values 10 times, which makes LED1 flash for 3 seconds
for x in range(0, 10):
# turn LED0 on or off
LED.write(led, led_on_off)
# add a short delay
time.sleep(0.3)
# if the LED is on, set the parameter to off
# if the LED is off, set the parameter to on
led_on_off = not led_on_off | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def irq(self, handler: Callable, trigger: int, hard: bool = False) -> Callable:",
"def enable_irq(state:int):",
"def extirq_cbf(task):\n try:\n if not execute_LM_function_Core(task.split(' ')):\n console_write(\"[IRQ] EXTIRQ execute_LM_function_Core error: {}\".format(task))\n except Exception as e:\n console_write(\"[IRQ] EVENTIRQ callback: {} error: {}\".format(task, e))",
"def disable_irq() -> int:",
"def enable_irq(state: bool = True, /) -> None:",
"def enableInterrupt():\n console_write(\"[IRQ] TIMIRQ SETUP: {} SEQ: {}\".format(cfgget(\"timirq\"), cfgget(\"timirqseq\")))\n console_write(\"|- [IRQ] TIMIRQ CBF:{}\".format(cfgget('timirqcbf')))\n if cfgget(\"timirq\") and cfgget('timirqcbf').lower() != 'n/a':\n from machine import Timer\n # INIT TIMER IRQ with callback function wrapper\n timer = Timer(0)\n timer.init(period=int(cfgget(\"timirqseq\")), mode=Timer.PERIODIC,\n callback=lambda timer: timirq_cbfs(cfgget('timirqcbf')))",
"def set_on_interrupt_callback(self, callback):\n self.__interrupt_callback = callback",
"def interrupt_kernel(self, kernel_id):",
"def interrupt_kernel(self):",
"def test_interrupt(self):\n with patch('RPi.GPIO.setmode') as mock_setmode:\n gpio = GPIODevice()\n with patch('RPi.GPIO.setup') as mock_setup:\n with patch('RPi.GPIO.add_event_detection') as mock_detection:\n with patch('RPi.GPIO.add_event_callback') as mock_callback:\n gpio.interrupt(self._callback, 0)\n with patch('RPi.GPIO.cleanup') as mock_cleanup:\n gpio.close()\n mock_detection.called_once_with(0, GPIO.BOTH)\n mock_callback.called_once_with(0, self._callback)",
"def stopCallback (self):\n GPIO.remove_event_detect (self.IRQ_PIN)\n self.hasCallback = False",
"def handle(req):\n\n gpio.output(26, gpio.HIGH)\n time.sleep(0.2)\n gpio.output(26, gpio.LOW)\n\n return req",
"def imu_fth_isr(gpio, level, tick):\n isr_time = time.time()\n\n # Sometimes INT1 can trigger again as the FIFO is being read and filled\n # back up at the same time. If the time since the last tick is less than\n # 0.1s then exit the ISR.\n global last_tick\n MIN_TICK_DIFF_US = 10**5 \n tick_diff = pigpio.tickDiff(last_tick, tick)\n print(f\"Time since last tick {tick_diff / 10**6} seconds\")\n if tick_diff < MIN_TICK_DIFF_US:\n return\n\n global fifo_start\n print(f\"Interrupt at {isr_time}\")\n print(f\"FIFO fill time: {isr_time - fifo_start:4.03f} seconds\")\n fifo_start = isr_time\n\n # Read FIFO status\n status1 = imu._fifo_status1\n status2 = imu._fifo_status2\n status3 = imu._fifo_status3\n status4 = imu._fifo_status4\n\n # Number of unread words (16 bits) \n unread_words = ((status2 & 0x0F) << 8) + status1\n print(f\"Words in FIFO: {unread_words}\")\n\n # Pattern index\n # In our case, the accelerometer and gyroscope data rates are equal, so the\n # pattern is in [0:5] where\n # 0 -> Gx\n # 1 -> Gy\n # 2 -> Gz\n # 3 -> Ax\n # 4 -> Ay\n # 5 -> Az\n pattern_index = (status4 << 8) + status3\n print(f\"Index of next reading: {pattern_index}\")\n\n # Read in multiples of 6, the number of readings from Gx to Az\n BYTES_PER_WORD = 2\n WORDS_PER_PATTERN = 6\n words_to_read = unread_words // WORDS_PER_PATTERN * WORDS_PER_PATTERN\n buffer_size = words_to_read * BYTES_PER_WORD\n buffer = bytearray(buffer_size)\n FIFO_DATA_OUT_L = bytearray(b'\\x3E')\n\n # Read FIFO data into buffer\n start_time = time.time()\n imu.i2c_device.write_then_readinto(FIFO_DATA_OUT_L, buffer)\n end_time = time.time()\n total_read_time = end_time - start_time\n print(f\"{buffer_size} bytes read in {total_read_time:.6f} seconds. {buffer_size/total_read_time:.0f} bytes/s\")\n\n # Read FIFO status\n status1 = imu._fifo_status1\n status2 = imu._fifo_status2\n status3 = imu._fifo_status3\n status4 = imu._fifo_status4\n unread_words = ((status2 & 0x0F) << 8) + status1\n print(f\"Words in FIFO: {unread_words}\")\n pattern_index = (status4 << 8) + status3\n print(f\"Index of next reading: {pattern_index}\")\n\n last_tick = tick\n\n # Print data\n PREVIEW_BYTES = 12\n print(f\"buffer = {buffer[:PREVIEW_BYTES].hex()} ... {buffer[-PREVIEW_BYTES:].hex()} | Len: {len(buffer)}\")\n data = [parse_fifo_data(buffer[i:i+2]) for i in range(0, len(buffer), 2)]\n print(f\"data = [{', '.join(map(str, data[:PREVIEW_BYTES]))}, ..., {', '.join(map(str, data[-PREVIEW_BYTES:]))}] | Len: {len(data)}\")\n\n print()",
"def signal_handler(sig, frame):\r\n print('You pressed Control+C')\r\n led.off()\r\n sys.exit(0)",
"def add_button_callback(self, button, function, event=BUTTON_DOWN, threaded=True):\n\t\tif event == LCD.BUTTON_DOWN:\n\t\t\tedge = 'falling'\n\t\telif event == LCD.BUTTON_UP:\n\t\t\tedge = 'rising'\n\t\telif event == LCD.BUTTON_EITHER:\n\t\t\tedge = 'both'\n\t\tRPIO.add_interrupt_callback(button, function, edge, RPIO.PUD_UP, threaded, 20)",
"def startCallback (self):\n if self.hasCallback:\n return\n # set up IRQ interrupt function. GPIO.setmode should alreay have been called\n GPIO.setup(self.IRQ_PIN, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n GPIO.add_event_detect (self.IRQ_PIN, GPIO.FALLING)\n GPIO.add_event_callback (self.IRQ_PIN, AHF_LickDetectorCallback) \n self.hasCallack = True\n # state of touches from one invocation to next, used in callback to separate touches from untouches\n self.prevTouches = self.mpr121.touched()",
"def dummy_callback_handler(self, ret):\n pass",
"def __init__(self, pin_num, button_func):\n self.pin = Pin(pin_num, Pin.IN)\n print(pin_num, button_func)\n self.pin.irq(trigger=Pin.IRQ_RISING, handler=button_func)",
"def interrupt(func):\n def do_stuff(*args, **kwargs):\n App.get_running_app().controller.interrupt(restart=True)\n return func(*args, **kwargs)\n return do_stuff",
"def interrupt_handler(self, signo, frame):\n log.debug(\"interrupting run\")\n self._keep_running = False",
"def keyboard_interrupt_handler(interrupt_signal, frame):\n print(\"Scanning finished\")\n print(\"KeyboardInterrupt ID: {} {} has been caught.\".format(interrupt_signal, frame))\n exit(1)",
"def keyboard_interrupt_handler(interrupt_signal, frame):\n print(\"Scanning finished\")\n print(\"KeyboardInterrupt ID: {} {} has been caught.\".format(interrupt_signal, frame))\n exit(1)",
"def switch(ind, status):\n print(\"Switching :\", ind, \">>\", status == 'on')\n GPIO.output(ind, status == 'on')",
"def led(path, tags, args, source):\n\toscProg = args[0]\n\t#pinValue = args[1]\t\n\t#action = args[2]\n\tprint oscProg\n\t#print pinValue\n\t#print action\n\t\n\t#check if first argument is a pin value\n\tif oscProg in gpioPins.keys():\n\t\tpinValue = args[1]\n\t\taction = args[2]\n\t\t#search gpioPins dict for pin value. Exit when found\n\t\tfor dictColor,gpioPin in gpioPins.iteritems():\n\t\t\tif oscProg == dictColor:\n\t\t\t\tbreak\n\t\t#set the pin color\n\t\tif action == 'solid':\n\t\t\tcLED.setPinValue(gpioPin,pinValue)\n\t\telif action == 'flashFade':\n\t\t\tt = threading.Thread(target=ef.ledFlashFade,args=(gpioPin,pinValue,0.01))\n\t\t\tt.start()\n\t\t\tt.join\n\t\telif action ==\"flash\":\n\t\t\tef.flash(gpioPin,0.1)\n\t\telif action ==\"contFlash\":\n\t\t\tef.flash(gpioPin,0.1)\n\t\telse:\n\t\t\t#not a valid option\n\t\t\tpass\t\t\t\n\t\t\n\t#Turn all LEDs on\n\telif oscProg == 'allOn':\n\t\tcLED.setColor(1,[1,1,1])\n\t\tcLED.setColor(2,[1,1,1])\n\t#Turn all LEDs off\n\telif oscProg == 'allOff':\n\t\tcLED.allOff()\n\telse:\n\t\tpass",
"def callback(self):\n try:\n function()\n finally:\n main_loop.remove_handler(handler[0])",
"def _signal_handler(*_: typing.Any) -> None:\n shutdown_event.set()",
"def shutdown_callback():\n pass",
"def on_switch(self, callback):\n self._switch_callback = callback",
"def gpio_edge_listener(port):\n self.schedule_update_ha_state(True)",
"def event11512210():\n header(11512210, 0)\n knight, is_active = define_args('ii')\n if_event_flag_on(1, is_active)\n if_entity_health_less_than_or_equal(1, knight, 0.1)\n if_condition_true(0, 1)\n chr.disable_gravity(knight)\n chr.disable_collision(knight)\n chr.disable_ai(knight)\n chr.replan_ai(knight)\n wait(2.5)\n # Skipping the fade-out, they just get obliterated usually.\n anim.force_animation(knight, 1201, do_not_wait_for_transition=True, wait_for_completion=True)\n chr.enable_ai(knight)\n chr.disable(knight)\n chr.enable_gravity(knight)\n chr.enable_collision(knight)\n chr.set_special_effect(knight, 3231)\n flag.disable(is_active)\n restart()"
] | [
"0.6793415",
"0.59784335",
"0.5772505",
"0.5751147",
"0.569456",
"0.56923246",
"0.55878675",
"0.5526748",
"0.53978044",
"0.50646555",
"0.4927286",
"0.4895538",
"0.47954524",
"0.4737531",
"0.4658877",
"0.4636048",
"0.46294534",
"0.46239135",
"0.45867148",
"0.45833647",
"0.45762804",
"0.45762804",
"0.4533775",
"0.45126545",
"0.44978258",
"0.4471549",
"0.44616014",
"0.44111654",
"0.44101328",
"0.4403133"
] | 0.72954494 | 0 |
This property returns the training data, and loads the training data if it doesn't exist. Note that this function returns the training data and labels in the form ([MPS input size, batch, other dimensions], [batch, classifications]) in accordance with how it is used in the MPS and MPSOptimizer classes. If the data is required in the form ([batch, MPS input size, other dimensions], [batch, classifications]), the variable _training_data should be used | def training_data(self):
if self._training_data is None:
self._load_training_data()
if self._swapped_training_data is None:
self._swapped_training_data = {}
for key, value in self._training_data.items():
self._swapped_training_data[key] = value
return self._swapped_training_data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_training_data(self) -> Tuple[List[np.ndarray], np.ndarray]:\n return self._load_set(config.TRAIN_DIR, True)",
"def getTrainingData(self):\n raise NotImplementedError",
"def get_training_data() -> GraphDataset:\n _load_data_if_needed()\n return training_data",
"def get_data_train(self):\n return self.get_data(self.file_train, self.batch)",
"def _load_training_data(self):\n self._save_training_data()",
"def train_data(self):\n\n return self.__train_data, self.__train_labels",
"def load_data(self, training_data):\n \"\"\"training data format [(instance, label),(instance, label),...]\"\"\"\n self.training_data = training_data",
"def build_training_data_loader(self) -> DataLoader:\n pass",
"def train_data(self):\n return self._train_data",
"def load_data_and_labels(self):\n gen = image.ImageDataGenerator()\n target_size = (224,224)\n if self.preprocess:\n print('Preprocessing data...')\n if not os.path.isdir(self.pproc_dir()):\n os.mkdir(self.pproc_dir())\n \n batch_arr = []\n for ld,segment in [(self.train_dir(), 'train'),\n (self.valid_dir(), 'valid')]:\n # TODO(ness): segment = os.basename(ld)\n flowgen = gen.flow_from_directory(\n ld,\n target_size=target_size,\n shuffle=False,\n class_mode=None,\n batch_size=1)\n # Save the batches using method defined in utils.py\n data = np.concatenate([flowgen.next() for i in range(flowgen.n)])\n batches_dir = self.pproc_dir() + segment + '-bc'\n save_array(batches_dir, data)\n \n # Save the classes.\n cls_dir = self.pproc_dir() + segment + '-cl'\n save_array(cls_dir, flowgen.classes)\n \n batch_arr.append((data, flowgen.classes, flowgen.class_indices))\n \n # Set the data.\n self.training_data = batch_arr[0][0]\n self.validation_data = batch_arr[1][0]\n \n # Classes are zero-indexed and represent a category in\n # numerical form. So if the classes are 'dog' and 'cat',\n # the possible class values will be 0 and 1.\n self.trn_classes = batch_arr[0][1]\n self.val_classes = batch_arr[1][1]\n \n # Labels are the one-hot encoded (i.e. categorical)\n # version of the classes. In other words, if there are\n # 5 classes and an element belongs to class 2,\n # its label will be [0,0,1,0,0] (index 1).\n self.training_labels = to_categorical(batch_arr[0][1])\n self.validation_labels = to_categorical(batch_arr[1][1])\n \n # Class indices are dictionaries of the form\n # {'category_name': 0, 'category_name_2: 1}. They\n # make the mapping between numerical class indices and\n # a human-readable category name. They are (should be...)\n # the same for validation and training, so only load them\n # once, after sanity checking.\n self.cindices = batch_arr[0][2]\n print('Done preprocessing.')\n else:\n print('Loading data...')\n # Load the pre-saved data using methods defined in utils.py. See\n # preprocessing branch for the meaning of the data.\n self.training_data = load_array(self.pproc_dir() + 'train-bc')\n self.validation_data = load_array(self.pproc_dir() + 'valid-bc')\n self.trn_classes = load_array(self.pproc_dir() + 'train-cl')\n self.val_classes = load_array(self.pproc_dir() + 'valid-cl')\n self.training_labels = to_categorical(self.trn_classes)\n self.validation_labels = to_categorical(self.val_classes)\n \n # To get the class indices, we create the generator. It's cheap to\n # run since it doesn't actually load all the data.\n flowgen = gen.flow_from_directory(\n self.train_dir(),\n target_size=target_size,\n shuffle=False,\n class_mode=None,\n batch_size=1) \n self.cindices = flowgen.class_indices\n print('Done loading.')",
"def get_training_data(self):\n train_data = None\n \n if self.left_data is not None:\n train_data = self.left_data\n \n if self.right_data is not None:\n if train_data is not None:\n train_data = train_data.join(self.right_data)\n else:\n train_data = self.right_data\n \n return train_data",
"def load_training_data_generator(self) -> Generator[Tuple[List[np.ndarray], np.ndarray], None, None]:\n return self._load_generator(config.TRAIN_DIR, True)",
"def get_training_data(self):\n labels = self.get_labels()\n\n print 'Loading training data from ', self.train_folder , '...'\n train_index = []\n #train_ans = []\n train_text = []\n cnt = 0\n\n for f in listdir(self.train_folder):\n file_path = join(self.train_folder, f)\n if isfile(file_path):\n cnt += 1\n if cnt % 10000 == 0:\n print 'finished:', cnt # line counter\n #train_index.append(f[:-4])\n self.train_ans.append(labels[f[:-4]])\n with open(file_path, 'rb') as f:\n train_text.append( f.read() )\n\n return train_text",
"def train(self, training_data):\n pass",
"def prepare_data(self):\n # Set up the path\n self.path_target_train = os.path.join(self.data_dir, self.train_path_file_target + \".pkl\")\n self.path_target_test = os.path.join(self.data_dir, self.test_path_file_target + \".pkl\")\n\n if not os.path.exists(self.path_target_train) or not os.path.exists(self.path_target_test):\n # Create vocabularies of the appropriate sizes.\n self.create_vocabulary(self.train_path_file)\n\n # Create token ids for the training data.\n input_train_path = self.train_path_file\n target_train_path = self.train_path_file_target\n train_input, train_input_length, train_labels = self.data_to_token_ids(input_train_path, target_train_path)\n\n # Create token ids for the validation data.\n input_test_path = self.test_path_file\n target_test_path = self.test_path_file_target\n test_input, test_input_length, _ = self.data_to_token_ids(input_test_path, target_test_path, train=False)\n\n # Collect data into a list\n training_data = [train_input, train_input_length, train_labels]\n test_data = [test_input, test_input_length]\n\n # Save all the data\n with open(self.path_target_train, 'wb') as f:\n pickle.dump(training_data,f)\n with open(self.path_target_test, 'wb') as f:\n pickle.dump(test_data, f)\n else:\n # Load data\n with open(self.path_target_train, 'rb') as f:\n training_data = pickle.load(f)\n with open(self.path_target_test, 'rb') as f:\n test_data = pickle.load(f)\n\n # Initialize vocabulary\n self.initialize_vocabulary()\n\n # Convert list into a numpy array - train data\n train_input = pd.DataFrame(training_data[0]).fillna(value=0).astype(int).values\n train_length_input = np.array(training_data[1], dtype=int)\n train_labels = np.array(training_data[2], dtype=int)\n\n # Convert list into a numpy array - test data\n test_input = pd.DataFrame(test_data[0]).fillna(value=0).astype(int).values\n test_length_input = pd.DataFrame(test_data[1]).fillna(value=0).astype(int).values\n\n # Printing maximum length\n print(\"Shape of the input training matrix {}\".format(str(train_input.shape)))\n print(\"Shape of the input test matrix {}\".format(str(test_input.shape)))\n\n # Copy the files\n self.copy_files()\n\n # Return output\n return train_input, train_length_input, train_labels, test_input, test_length_input",
"def get_data(self):\n if self.config['model'] == 'vggnet':\n if self.is_training:\n return self.data.shuffle(self.shuffle).batch(self.batch_size)\n elif self.is_testing:\n return self.data.batch(self.batch_size)\n elif not self.is_testing and not self.is_training:\n return self.data.batch(self.batch_size)\n else:\n raise NotImplementedError('In dataset.py: default input not specified for this model!')",
"def get_train_data(self) -> Tuple[np.array, np.array, np.array]:\n train_data = []\n for season in self.__train_seasons:\n train_data.extend(self.__get_season_data(season, sys.maxsize, True))\n train_input = np.array([ExamDropEncoder.extract_features(sample, sys.maxsize) for sample in train_data])\n train_output = np.array([1.0 if get_is_mol(sample.selected_player) else 0.0 for sample in train_data])\n\n num_bins = self.get_num_bins(train_input, self.__max_splits)\n self.__discretizer = KBinsDiscretizer(n_bins = num_bins, encode = \"onehot-dense\",\n strategy = ExamDropExtractor.BIN_STRATEGY)\n train_input = self.__discretizer.fit_transform(train_input)\n train_input = self.__add_answered_on_feature(train_data, train_input)\n self.__anova_f_filter = SelectFpr(f_classif, alpha = self.__anova_f_significance)\n train_input = self.__anova_f_filter.fit_transform(train_input, train_output)\n self.__pca = PCA(n_components = self.__pca_explain)\n train_input = self.__pca.fit_transform(train_input)\n return train_input, train_output, self.__get_train_weights(train_data)",
"def get_train(self, data_file):\r\n return self.read_data(data_file)",
"def load_training_data(\n self,\n train_data_file=\"datasets/train_data.json\",\n test_data_file=\"datasets/test_data.json\",\n ):\n train_data = pd.read_json(train_data_file)\n test_data = pd.read_json(test_data_file)\n return train_data, test_data",
"def _load_train_data(self):\n\n self.train_loader = data.Train_loader(self.N_max, self.n_per_conn,\n self.data_path, self.device)\n self.train_loader.load_data()\n\n # load mean and std\n scc_mean_std = np.loadtxt(\n os.path.join(self.data_path, 'scc_mean_std.csv'), delimiter=',')\n self.mean = torch.Tensor(scc_mean_std[0])\n self.std = torch.Tensor(scc_mean_std[1])",
"def train(self, training_data, training_labels, validation_data, validation_labels):\n abstract",
"def load(self):\n\n X_train, y_train, X_test, y_test, variable_types, name = _load_data(\n self.task_id)\n\n self.X_train = X_train\n self.y_train = y_train\n self.X_test = X_test\n self.y_test = y_test\n self.variable_types = variable_types\n self.name = name\n\n return self.X_train, self.y_train, self.X_test, self.y_test",
"def _get_training_data(self) -> tuple:\n\n training_data = self._data.loc[self._data.target == 'train'].drop('target', axis=1)\n y = training_data.y_label.to_numpy()\n X = training_data.drop('y_label', axis=1).to_numpy()\n\n return X, y",
"def train(self, trainingData, trainingLabels, validationData, validationLabels):\n self.trainingData = trainingData\n self.trainingLabels = trainingLabels",
"def get_train(self, data_file):\n return self.read_data(data_file)",
"def training_set(self):\n return self._training_set",
"def train(self, batch_training=False):\n raise NotImplementedError",
"def load_training_data(config):\n # Load data\n LOGGER.info(\"Loading training data.\")\n train_x = load_data(config['data_source'], config['train_x_filename'])\n train_y = load_data(config['data_source'], config['train_y_filename'])\n val_x = load_data(config['data_source'], config['val_x_filename'])\n val_y = load_data(config['data_source'], config['val_y_filename'])\n LOGGER.info(\"Training data size: %d\", len(train_x))\n LOGGER.info(\"Validation data size: %d\", len(val_x))\n\n # Build datasets and create iterators\n LOGGER.info(\"Building dataset.\")\n train_dataset = get_dataset(\n train_x, train_y, config['batch_size'], config['data_shape'],\n config['n_classes'], True)\n val_dataset = get_dataset(\n val_x, val_y, config['batch_size'], config['data_shape'],\n config['n_classes'])\n\n return train_dataset, val_dataset, len(val_x)",
"def _load_processed_data(self):\n with open(os.path.join(self._data_root_path, self._processed_train_data_file_name),\n 'r') as f:\n train_examples = json.load(f)\n\n with open(os.path.join(self._data_root_path, self._processed_dev_data_file_name), 'r') as f:\n dev_examples = json.load(f)\n\n with open(os.path.join(self._data_root_path, self._word_vocab_file_name), 'r') as f:\n word_vocab = Vocab.from_json(json.load(f))\n\n with open(os.path.join(self._data_root_path, self._char_vocab_file_name), 'r') as f:\n char_vocab = Vocab.from_json(json.load(f))\n\n return train_examples, dev_examples, word_vocab, char_vocab",
"def init_train(self):\n data = self.loader.load_labelled_data(self.conf.split, 'training')\n\n # Initialise unlabelled data iterator\n num_ul = 0\n if self.conf.ul_mix > 0:\n ul_data = self.loader.load_unlabelled_data(self.conf.split, 'all')\n\n # calculate number of unlabelled images as a proportion of the labelled images\n num_ul = int(data.size() * self.conf.ul_mix)\n num_ul = num_ul if num_ul <= ul_data.size() else ul_data.size()\n log.info('Sampling %d unlabelled images out of total %d.' % (num_ul, ul_data.size()))\n ul_data.sample(num_ul)\n self.gen_X_U = data_utils.generator(self.conf.batch_size, 'overflow', ul_data.images)\n\n # Initialise labelled data iterator\n assert self.conf.l_mix >= 0\n\n # calculate number of labelled images\n num_l = int(data.size() * self.conf.l_mix)\n num_l = num_l if num_l <= data.size() else data.size()\n log.info('Using %d labelled images out of total %d.' % (num_l, data.size()))\n train_images = data.images[:num_l]\n train_masks = data.masks[:num_l]\n\n self.conf.unlabelled_image_num = num_ul\n self.conf.labelled_image_num = num_l\n self.conf.data_len = num_ul if num_ul > num_l else num_l\n self.conf.batches = int(np.ceil(self.conf.data_len / self.conf.batch_size))\n self.conf.save()\n\n self.gen_X_L = data_utils.generator(self.conf.batch_size, 'overflow', train_images, train_masks)\n\n # Initialise real masks iterator for discriminator training, using the real masks from the data CV split.\n self.other_masks = data_utils.generator(self.conf.batch_size, 'overflow', data.masks + 0)"
] | [
"0.7380025",
"0.71378094",
"0.6946885",
"0.69251007",
"0.6852432",
"0.6813262",
"0.6799918",
"0.6751542",
"0.6742031",
"0.6650401",
"0.6590069",
"0.65826356",
"0.64915186",
"0.6483886",
"0.6470438",
"0.6446683",
"0.6443755",
"0.62918407",
"0.6268519",
"0.6244086",
"0.6243447",
"0.6232298",
"0.6221952",
"0.62203336",
"0.6217214",
"0.62146485",
"0.6194592",
"0.61521375",
"0.6149793",
"0.61317915"
] | 0.72757715 | 1 |
Property giving the number of training samples for each key | def num_train_samples(self):
if self._num_training_samples is None:
for key, value in self._training_data.items():
self._num_training_samples[key] = len(value[0])
return self._num_training_samples | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def num_training_examples(self):",
"def get_num_train_samples(self):\n raise NotImplementedError",
"def __len__(self):\n if self.TRAIN_BOOL is True:\n count = len(self.dict_batch_1[b'data'])\n count += len(self.dict_batch_2[b'data'])\n count += len(self.dict_batch_3[b'data'])\n count += len(self.dict_batch_4[b'data'])\n count += len(self.dict_batch_5[b'data'])\n else:\n count = len(self.dict_batch_test[b'data'])\n return count",
"def get_sample_size(self, key=None):\n if key is None:\n return len(self.Y)\n else:\n return len(self.get_partitions(self.persistence)[key])",
"def sample_count(self):",
"def num_test_samples(self):\n if self._num_test_samples is None:\n for key, value in self._test_data.items():\n self._num_test_samples[key] = len(value[0])\n return self._num_test_samples",
"def _number_of_samples(self):\n return len(self._raw_data.samples)",
"def __len__(self):\r\n return len(self.train_data)",
"def num_train_instances(self):\n raise NotImplementedError()",
"def n_train(self):\n return self.factors[0].shape[0]",
"def get_number_of_features(key):\n sum = 0\n for name, module in common.QOL_PARAMS[key].items():\n sum += module.LENGTH\n\n return sum",
"def __len__(self):\n return len(self.train) + len(self.val) + len(self.test)",
"def get_number_of_training(self):\n return self.n_train",
"def __len__(self):\n return len(self.dataset) * self.samples_per_pair",
"def __len__(self):\n return self.n_samples",
"def getNumberOfKeys(self) -> int:\n ...",
"def num_samples(self):\n raise NotImplementedError()",
"def test_train_data_length(self):\n total_count = 0\n for batch in self._dataset.get_train():\n total_count += len(batch['label'])\n\n self.assertEqual(total_count, self._dataset.get_train_len())",
"def __len__(self):\n return self._num_samples",
"def __len__(self):\n return self.data.num_samples",
"def __len__(self):\n return self.__n_samples",
"def __len__(self):\n if self.settype == \"train\":\n return 64000\n else:\n return len(self.list_ids)",
"def num_examples(self):\r\n raise NotImplementedError",
"def __len__(self):\n return len(self.samples)",
"def __len__(self) -> int:\n return len(self.samples)",
"def __len__(self):\n return len(self.samples)",
"def __len__(self):\n return len(self.samples)",
"def training_set_count(self) -> int:\n return pulumi.get(self, \"training_set_count\")",
"def __len__(self):\n if self._train:\n return len(self._train_data)\n return len(self._test_data)",
"def __len__(self):\n if self._train:\n return len(self._train_data)\n return len(self._test_data)"
] | [
"0.76472265",
"0.72392845",
"0.7154465",
"0.71007866",
"0.6989497",
"0.6887514",
"0.68653405",
"0.6852151",
"0.6844715",
"0.680676",
"0.6782863",
"0.67534184",
"0.6745939",
"0.6720424",
"0.670524",
"0.66739583",
"0.66528374",
"0.66478294",
"0.66011435",
"0.659892",
"0.6594394",
"0.65546584",
"0.65115505",
"0.6476304",
"0.64750504",
"0.6449615",
"0.6449615",
"0.6442748",
"0.6419965",
"0.6419965"
] | 0.7513668 | 1 |
Property giving the number of test samples | def num_test_samples(self):
if self._num_test_samples is None:
for key, value in self._test_data.items():
self._num_test_samples[key] = len(value[0])
return self._num_test_samples | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _number_of_samples(self):\n return len(self._raw_data.samples)",
"def setTestSampleSize(self, Ntest):\n self.Ntest = Ntest",
"def getNrSamples(self): \r\n return self.numSamples",
"def num_training_examples(self):",
"def sample_count(self):",
"def get_number_of_testing(self):\n return self.n_test",
"def n_test(self):\n return self.factors[1].shape[0]",
"def num_samples(self):\n raise NotImplementedError()",
"def __len__(self):\n return self.n_samples",
"def num_examples(self):\r\n raise NotImplementedError",
"def num_trials(self):",
"def get_num_samples(self):\n return self._num_samples",
"def __len__(self):\n return self.__n_samples",
"def test_test_data_length(self):\n total_count = 0\n for batch in self._dataset.get_test():\n total_count += len(batch['label'])\n\n self.assertEqual(total_count, self._dataset.get_test_len())",
"def __len__(self):\n return self._num_samples",
"def get_num_train_samples(self):\n raise NotImplementedError",
"def test_getSampleCount(self):\r\n self.assertEqual(self.estimator1.getSampleCount(), 1)",
"def __len__(self):\n return self.data.num_samples",
"def test_len_testset(self):\n self.assertEqual(self.__dataset.get_test_len, 1000)",
"def get_num_samples(self) -> int:\n # must be implemented in subclass\n raise NotImplementedError",
"def __len__(self) -> int:\n return len(self.samples)",
"def num_train_samples(self):\n if self._num_training_samples is None:\n for key, value in self._training_data.items():\n self._num_training_samples[key] = len(value[0])\n return self._num_training_samples",
"def test_len_trainset(self):\n self.assertEqual(self.__dataset.get_train_len, 10000)",
"def samples(self) -> int:\n return self._samples",
"def __len__(self):\n return len(self.samples)",
"def __len__(self):\n return len(self.samples)",
"def __len__(self):\n return len(self.samples)",
"def test_size(self) -> int:\n return int(self.data_size * self.__test_fraction)",
"def __len__(self):\n return len(self.list_sample)",
"def n_samples(self) -> int: # pragma: no cover\n return self.samples.shape[0]"
] | [
"0.76248336",
"0.7584526",
"0.7412485",
"0.74109805",
"0.7401342",
"0.7390789",
"0.72752166",
"0.726213",
"0.7233075",
"0.7224675",
"0.71610683",
"0.71362716",
"0.71249604",
"0.70941573",
"0.7070653",
"0.704998",
"0.70279443",
"0.7010169",
"0.6996375",
"0.6985382",
"0.6942792",
"0.68967336",
"0.6891787",
"0.6871163",
"0.6865328",
"0.6865328",
"0.6835634",
"0.6789015",
"0.6767874",
"0.6764788"
] | 0.8134777 | 0 |
Highlights currentSelection on stdscr. | def highlightSelection(stdscr, selection):
s = tuple(list(selection.addStrArgs)+[curses.A_REVERSE])
stdscr.addstr(*s) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def switchSelection(stdscr, lastSelection, currentSelection):\n stdscr.addstr(*lastSelection.addStrArgs)\n highlightSelection(stdscr, currentSelection)",
"def draw_selected(self):\n if self.get_selected() is not None and not self.check_if_locked(self.get_selected()):\n self.color_cell(pos=self.get_selected(\n ), color=SELECTED_INVALID if self.get_selected() in self.invalid else SELECTED)",
"def __highlight_selection(self, x: int, y: int) -> None:\n round_rect(screen, (x-2, y-2, SELECTOR_WIDTH + 4, SELECTOR_HEIGHT + 4), HIGHLIGHT_COLOUR, 6)",
"def highlight_color(self):\n return curses.color_pair(4) if self.cycling else curses.color_pair(2)",
"def interactive_select(space, current):\n print \"Type an element name, an element index, or an unambiguous prefix to add to your selection.\"\n print \"Type '\" + color_code(MAGENTA) + \"list\" + CLEAR_COLOR +\"' to see the list of valid selections/indices.\"\n print \"Type '\" + color_code(MAGENTA) + \"clear\" + CLEAR_COLOR +\"' to clear selection.\"\n print \"Enter an empty line when done.\\n\"\n \n done = False\n while not done:\n print color_code(BLACK, bold=True), \"\\nCurrent selection\" + CLEAR_COLOR + \":\", (current if current else \"None\")\n tentative = raw_input(color_code(YELLOW) + \"Selection or Command\" + CLEAR_COLOR + \": \")\n matches = [el for el in space if el.startswith(tentative)]\n try: index = int(tentative)\n except ValueError: index = None\n if tentative == 'list':\n for i,el in enumerate(space):\n print \"\\t\", color_code(BLUE, bold=True), i, CLEAR_COLOR, el\n print \"\\n\"\n elif tentative == 'clear':\n current = []\n elif tentative == '':\n if current:\n print color_code(GREEN), \"\\nFinal selection\" + CLEAR_COLOR + \":\", current, \"\\n\\n\"\n done = True\n else:\n print_error(\"Must select at least one\")\n elif len(matches) > 1:\n print_error(\"Multiple matches found for `{}' ({})\".format(tentative, matches))\n elif len(matches):\n if matches[0] in current:\n print_warning(\"{} was already selected\".format(matches[0]))\n else:\n current.append(matches[0])\n elif index is not None:\n if index < 0 or index >= len(space):\n print_error(\"Invalid index {}\".format(index))\n elif space[index] in current:\n print_warning(\"{} was already selected\".format(space[index]))\n else:\n current.append(space[index])\n else:\n print_error(\"Unknown token: {}\".format(tentative))\n \n return current",
"def flush(self, header, caret, select_start_pos, select_end_pos, scr_topleft, scr_bottomright):\n self.update_screen_size()\n self.stdscr.erase()\n # header\n for text, color in header:\n self.stdscr.addstr(text, color_pair(color))\n text_selected = select_start_pos is not None\n # display lines\n displayed_lines = self.lines[scr_topleft.y : min(len(self.lines), scr_bottomright.y)]\n for index, line in enumerate(displayed_lines):\n self.stdscr.addstr(PADCHAR)\n if len(line) >= scr_topleft.x:\n # inclusive, position of line start and line end of displayed line\n ln_start = Position(scr_topleft.y + index, scr_topleft.x)\n ln_end = Position(scr_topleft.y + index, scr_topleft.x + self.screen_width())\n displayed_line = line[ln_start.x : min(len(line), scr_bottomright.x - 1)]\n if text_selected:\n # whether start position and end position of line are between selection\n start_between = ln_start.is_between(select_start_pos, select_end_pos)\n end_between = ln_end.is_between(select_start_pos, select_end_pos)\n # whether selection is between start and end position\n select_start_between = select_start_pos.is_between(ln_start, ln_end)\n select_end_between = select_end_pos.is_between(ln_start, ln_end)\n if start_between and end_between:\n # completely enclosed\n self.stdscr.addstr(displayed_line, color_pair(7))\n elif start_between:\n # only start between selection\n # end is on same line\n # only starting portion is highlighted\n self.stdscr.addstr(displayed_line[ : select_end_pos.x - ln_start.x + 1], color_pair(7))\n self.stdscr.addstr(displayed_line[select_end_pos.x - ln_start.x + 1 : ])\n elif end_between:\n # only end between selection\n # start is on same\n # only ending portion is highlighted\n self.stdscr.addstr(displayed_line[ : select_start_pos.x - ln_start.x])\n self.stdscr.addstr(displayed_line[select_start_pos.x - ln_start.x : ], color_pair(7))\n elif select_start_between and select_end_between:\n # selection is all on this line\n # start and end not highlighted\n self.stdscr.addstr(displayed_line[ : select_start_pos.x - ln_start.x])\n self.stdscr.addstr(\n displayed_line[select_start_pos.x - ln_start.x : select_end_pos.x - ln_start.x + 1],\n color_pair(7)\n )\n self.stdscr.addstr(displayed_line[select_end_pos.x + 1 - ln_start.x : ])\n else:\n # not enclosed by selection at all\n self.stdscr.addstr(displayed_line)\n else:\n self.stdscr.addstr(displayed_line)\n if index != len(displayed_lines) - 1:\n self.stdscr.addstr('\\n')\n self.stdscr.move(caret.y - scr_topleft.y + HEADER_LEN, caret.x - scr_topleft.x + PAD_LEN)",
"def highlight(self):\n\n if self.selected_text_file is None:\n return\n if self.selected_text_file[FULLTEXT] is None:\n return\n format_ = QtGui.QTextCharFormat()\n cursor = self.ui.textBrowser.textCursor()\n for item in self.case_text:\n try:\n cursor.setPosition(int(item['pos0']), QtGui.QTextCursor.MoveMode.MoveAnchor)\n cursor.setPosition(int(item['pos1']), QtGui.QTextCursor.MoveMode.KeepAnchor)\n format_.setFontUnderline(True)\n format_.setUnderlineColor(QtCore.Qt.GlobalColor.red)\n cursor.setCharFormat(format_)\n except Exception as err:\n msg = \"highlight, text length \" + str(len(self.ui.textBrowser.toPlainText()))\n msg += \"\\npos0:\" + str(item['pos0']) + \", pos1:\" + str(item['pos1'])\n msg += \"\\n\" + str(err)\n logger.debug(msg)",
"def on_selected(self):\n self.colour = self.selected_colour\n self.is_selected = True\n self.redraw()",
"def active_selection():\r\n\r\n om.MGlobal.getActiveSelectionList()",
"def paint(self):\n if self.config['colorize']:\n self.highlight()\n else:\n self.clear_highlight()",
"def _render_highlighted(\n document_text: str,\n begin: int,\n end: int,\n context_size: int = 0,\n highlight_color: str = \"On_Green\",\n) -> str:\n black_color = _get_text_color_from_list(\"Color_off\")\n return (\n document_text[begin - context_size : begin]\n + _get_text_color_from_list(highlight_color)\n + document_text[begin:end]\n + black_color\n + document_text[end : end + context_size]\n )",
"def unhighlight(self, current=False):\n if current:\n if self.currentEditor is not None:\n self.currentEditor.highlight()\n else:\n for editor in self.editors:\n editor.highlight()",
"def highlightCode(self, _event=None):\n count = 0\n if self.text.tag_ranges('sel'):\n self.text.tag_add('color' + str(count), tk.SEL_FIRST, tk.SEL_LAST)\n self.text.tag_configure('color' + str(count), foreground='black', background='yellow')\n count += 1\n else:\n # Do this if you want to overwrite all selection colors when you change color without selection\n # for tag in text.tag_names():\n # text.tag_delete(tag)\n self.text.config(foreground='yellow')\n\n fileContainingText = open(newTextFile, \"a\")\n\n hText = self.text.get(tk.SEL_FIRST, tk.SEL_LAST)\n fileContainingText.write(hText)",
"def __update_selection(self):\n if self.selected_offset != self.old_selected_offset:\n if self.old_selected_offset > -1:\n old_offset = (self.old_selected_offset - self.top_offset) * 8\n\n self.display.text(\">\", 0, old_offset, 0)\n\n new_offset = (self.selected_offset - self.top_offset) * 8\n self.display.text(\">\", 0, new_offset, 1)\n self.display.show()\n self.old_selected_offset = self.selected_offset",
"def set_mouse_selection(self, item, mpos):\r\n if item.is_mouse_selection(mpos):\r\n item.set_font_color(RED)\r\n item.set_italic(True)\r\n else:\r\n item.set_font_color(WHITE)\r\n item.set_italic(False)",
"def set_mouse_selection(self, item, mpos):\r\n\t\tif item.is_mouse_selection(mpos):\r\n\t\t\titem.set_font_color(YELLOW)\r\n\t\t\titem.set_italic(True)\r\n\t\telse:\r\n\t\t\titem.set_font_color(WHITE)\r\n\t\t\titem.set_italic(False)",
"def set_highlight(self, highlighted):\n self.highlighted = highlighted",
"def BaseSetSelection(self, start, end):\n super(EditraBaseStc, self).SetSelection(start, end)",
"def set_current_tool_to_selection_tool(self):\n\n self.variables.current_shape_id = self.variables.select_rect_id\n self.variables.active_tool = TOOLS.SELECT_TOOL\n self.variables.current_tool = TOOLS.SELECT_TOOL",
"def highlight(self, *args):\n cw = self.cur_win()\n cw.highlight()\n if self.cur == Win.right:\n cw.down()",
"def _conf_highlight(self):\n textbuffer = self.ref_object.get_buffer()\n tag_table = textbuffer.get_tag_table()\n c_tag = tag_table.lookup(\"colored\")\n if not c_tag:\n c_tag = textbuffer.create_tag(\"colored\", foreground=\"#000000\", background=\"#FFFF00\")\n text = textbuffer.get_text(textbuffer.get_bounds()[0], textbuffer.get_bounds()[1])\n textbuffer.delete(textbuffer.get_bounds()[0], textbuffer.get_bounds()[1])\n for line in re.split(r'\\r\\n|\\r|\\n', text):\n for e in re.compile(\"(\" + self.entry.get_text().lower() + \")\", re.I).split(line):\n if re.search(self.entry.get_text().lower(), e, re.I):\n textbuffer.insert_with_tags(textbuffer.get_end_iter(), e, c_tag)\n else:\n textbuffer.insert_with_tags(textbuffer.get_end_iter(), e)\n textbuffer.insert_with_tags(textbuffer.get_end_iter(), '\\n')",
"def SetSelection(self, s):\r\n\r\n self.selection = s\r\n self._commandInt = s",
"def select(self, selected = True):\n \n if selected != self._selected:\n if selected:\n self._border.set_border_width(globals.HIGHLIGHT_BORDER_WIDTH)\n Member.focus.append(self)\n else:\n self._border.set_border_width(self._border_width)\n Member.focus.remove(self)\n \n self._selected = selected",
"def __enter__(self):\n self.stdscr = curses.initscr()\n curses.noecho() # Don't display pressed keys\n curses.cbreak() # React to keys without Enter press\n self.stdscr.keypad(True) # Use keypad & navigation keys\n self.stdscr.nodelay(True) # Non-blocking input reading\n curses.start_color() # Enable coloured outputs\n curses.init_pair(1, curses.COLOR_WHITE, curses.COLOR_BLACK) # Color as (FG, BG)\n curses.init_pair(2, curses.COLOR_BLACK, curses.COLOR_WHITE)\n curses.init_pair(3, curses.COLOR_CYAN, curses.COLOR_BLACK)\n curses.init_pair(4, curses.COLOR_BLACK, curses.COLOR_CYAN)\n return self.stdscr",
"def update_extra_selections(self):\n\n if len(self.cursors) > 1:\n # get highlight colors\n highlight_color = self.txt_edit.palette().highlight()\n highlight_txt_color = self.txt_edit.palette().highlightedText()\n\n extra_selections = []\n\n for cursor in self.cursors:\n extra_sel = self.txt_edit.ExtraSelection()\n extra_sel.cursor = cursor\n extra_sel.format.setBackground(highlight_color)\n extra_sel.format.setForeground(highlight_txt_color)\n extra_selections.append(extra_sel)\n\n self.txt_edit.setExtraSelections(extra_selections)\n\n else:\n # clear extra selections\n self.txt_edit.setExtraSelections([])",
"def mark_selected():\n (buffer, start, end) = get_selection_or_word()\n selection = buffer.get_chars(start, end)\n\n if selection != \"\":\n for m in buffer.file().search(selection, regexp=False):\n GPS.Locations.add(\"Local occurrences\",\n m.file(), m.line(), m.column(),\n selection,\n highlight=\"dynamic occurrences\",\n length=len(selection))",
"def watchSelection(self, sel):\n sel.observers.append(self.selectionLabel.set_text)",
"def setSelectionColorScheme(self, focused=None, unfocused=None):\n if focused is None:\n focused = self.selectionColor\n if unfocused is None:\n unfocused = self.unfocusedRegionColor\n self.selection.setColorScheme(focused, unfocused)\n beg = self.selection.getBeginSeconds()\n dur = self.selection.getWidthSeconds()\n wform = self.selection.getSelectedWaveform()\n self.selection.select(beg, dur, wform)",
"def highlight(self, **highlight):\n self._evaluated = False\n self._highlight = highlight\n return self",
"def SetOldSelection(self, s):\r\n \r\n self.old_selection = s"
] | [
"0.77326286",
"0.6310758",
"0.62849665",
"0.6241305",
"0.6179359",
"0.6145725",
"0.6066579",
"0.5976721",
"0.59747756",
"0.5841172",
"0.5825609",
"0.5814059",
"0.57745796",
"0.5750539",
"0.5745168",
"0.5714649",
"0.56804645",
"0.5667968",
"0.5662644",
"0.56395173",
"0.5605114",
"0.5541363",
"0.5528447",
"0.55247223",
"0.54810756",
"0.54672265",
"0.5445241",
"0.543167",
"0.5425711",
"0.5397137"
] | 0.75370693 | 1 |
This initializes the DotStars object by setting a buffer, and creating an SPI object. The start and end frames for the SPI communication are created, and the leds are cleared of values. | def __init__(self, leds):
self.ledcount = leds
# create a buffer
self.buffersize = self.ledcount * 4
self.buffer = bytearray(self.ledcount * 4)
self.emptybuffer = bytearray(self.ledcount * 4)
for i in range(0, self.buffersize, 4):
self.emptybuffer[i] = 0xff
self.emptybuffer[i + 1] = 0x0
self.emptybuffer[i + 2] = 0x0
self.emptybuffer[i + 3] = 0x0
# Start frame and endframe for the SPI communication (end frame is not
# needed)
self.startframe = bytes([0x00, 0x00, 0x00, 0x00])
self.endframe = bytes([0xff, 0xff, 0xff, 0xff])
# initialize SPI (needs to be at 45 MHz in order to maximize the speed.
# This is the limiting factor for the system's speed)
self.spi = SPI(1, SPI.MASTER, baudrate=45000000,
polarity=0, phase=0, bits=8, firstbit=SPI.MSB)
self.clearleds() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, spi, dc, rst, led):\n self._spi = spi\n self._spi.open()\n self._spi.set_mode(0)\n self._spi.set_clock_frequency(4000000)\n\n self._dc = dc\n self._rst = rst\n self._led = led\n self._enabled = False",
"def SPIsetup(self):\n self.writecmd(0x01,0x10,0,self.data); #SPI/SETUP",
"def __init__ ( self, width, height, spi=None, spiMosi= None, spiDC=None, spiCS=None, spiReset=None, spiClk=None ):\n self._init_config(width, height, spi, spiMosi, spiDC, spiCS, spiReset, spiClk)",
"def init(\n baudrate=1000000, bits=8, mode=0, sclk=\"pin13\", mosi=\"pin15\", miso=\"pin14\"\n ):\n utils.print_for_unimplemented_functions(SPI.init.__qualname__)\n telemetry_py.send_telemetry(TelemetryEvent.MICROBIT_API_SPI)",
"def __init__(self, spi, width, height, rst, dc, cs, backlight=None,\n xstart=-1, ystart=-1):\n self.width = width\n self.height = height\n self.spi = spi\n\n self.rst = rst\n self.dc = dc\n self.cs = cs\n self.backlight = backlight\n\n self.cs.init(self.cs.OUT, value=1)\n self.dc.init(self.dc.OUT, value=0)\n if self.rst is not None:\n self.rst.init(self.rst.OUT, value=0)\n\n self._buf = bytearray(_BUFFER_SIZE * 2)\n # default white foregraound, black background\n self._colormap = bytearray(b'\\x00\\x00\\xFF\\xFF')\n\n if xstart >= 0 and ystart >= 0:\n self.xstart = xstart\n self.ystart = ystart\n elif (self.width, self.height) == (240, 240):\n self.xstart = 0\n self.ystart = 0\n elif (self.width, self.height) == (135, 240):\n self.xstart = 52\n self.ystart = 40\n else:\n raise ValueError(\n \"Unsupported display. Only 240x240 and 135x240 are supported \"\n \"without xstart and ystart provided\"\n )\n\n self.init_pins()\n if self.rst is not None:\n self.reset()\n else:\n self.soft_reset()\n self.init()",
"def __init__(self, config):\n spi = SPI(-1, baudrate=config.baudrate,\n sck=config.sck, mosi=config.mosi, miso=config.miso)\n self._epd = epaper2in9.EPD(spi, config.cs, config.dc,\n config.rst1, config.busy)\n self._epd.init()\n self._buffer = Buffer(epaper2in9.EPD_WIDTH, epaper2in9.EPD_HEIGHT)",
"def _init_config(self, width, height, spi=None, spiMosi= None, spiDC=None, spiCS=None, spiReset=None, spiClk=None):\n self._spi = spi\n self._spi_mosi = spiMosi\n self._spi_dc = spiDC\n self._spi_cs = spiCS\n self._spi_reset = spiReset\n self._spi_clk = spiClk\n\n self.width = width\n self.height = height",
"def init_buffer(self):\n \n self.shape.buf = [pi3d.Buffer(self.shape, self.verts, self.texcoords, self.inds, self.norms)]\n self.shape.set_draw_details(self.shader, [self.spritesheet.img])",
"def init_point_buffer(self, configurations={}):\n\n # initialize buffer configuration to indicate that the\n # buffers are not setup in case this function got a problem\n self.buffer_configuration = None\n\n # create new buffer configuration\n buffer_configurations = {}\n for name, configuration in configurations.items():\n buffer_configurations[name] = self._init_plot_buffer(configuration)\n\n self.buffer_configuration = buffer_configurations",
"def __init__(self, commands: dict):\n self.__commands = commands\n\n # Wait times (s).\n self.WT_PIN_TOGGLE = 0.2\n self.WT_STATE_LOOKUP = 0.1\n\n # GPIO pins.\n self.RST_PIN = 17\n self.DC_PIN = 25\n self.CS_PIN = 8\n self.BUSY_PIN = 24\n\n # Set GPIO pins.\n RPi.GPIO.setmode(RPi.GPIO.BCM)\n RPi.GPIO.setwarnings(False)\n RPi.GPIO.setup(self.RST_PIN, RPi.GPIO.OUT)\n RPi.GPIO.setup(self.DC_PIN, RPi.GPIO.OUT)\n RPi.GPIO.setup(self.CS_PIN, RPi.GPIO.OUT)\n RPi.GPIO.setup(self.BUSY_PIN, RPi.GPIO.IN)\n\n # SPI device.\n self.__spi = spidev.SpiDev(0, 0)\n\n # Set SPI device.\n self.__spi.max_speed_hz = 2000000\n self.__spi.mode = 0b00",
"def initiate():\n\n log = \"Initiate the SPI communication of the OPC-N3\"\n logger.debug(log)\n\n time.sleep(1)\n log = \"Sending bytes to the sensor...\"\n logger.debug(log)\n spi.writebytes([0x5A, 0x01])\n reading = spi.readbytes(3)\n log = \"Data read after sending bytes are: \" + str(reading)\n logger.debug(log)\n time.sleep(wait_between_bytes)\n\n log = \"Sending bytes to the sensor...\"\n logger.debug(log)\n spi.writebytes([0x5A, 0x03])\n reading = spi.readbytes(9)\n log = \"Bytes read after sending bytes are: \" + str(reading)\n logger.debug(log)\n time.sleep(wait_between_bytes)\n\n # SPI conncetion\n log = \"Sending bytes to the sensor...\"\n logger.debug(log)\n spi.writebytes([0x5A, 0x02, 0x92, 0x07])\n reading = spi.readbytes(2)\n log = \"Bytes read after sending bytes are: \" + str(reading)\n logger.debug(log)\n time.sleep(wait_between_bytes)\n\n return",
"def __init__(self, stencil_coefs, loffset, roffset):\n self.stencil_coefs = stencil_coefs\n self.loffset = loffset\n self.roffset = roffset",
"def spi_controller(\n # ---[ Module Ports]---\n glbl, # global interface, clock, reset, etc.\n spibus, # external SPI bus\n # optional ports\n fifobus=None, # streaming interface, FIFO bus\n mmbus=None, # memory-mapped bus, contro status access\n cso=None, # control-status object\n \n # ---[ Module Parameters ]---\n include_fifo=True, # include aan 8 byte deep FIFO\n):\n clock, reset = glbl.clock, glbl.reset\n if cso is None:\n cso = spi_controller.cso()\n\n # -- local signals --\n ena = Signal(False)\n clkcnt = Signal(modbv(0, min=0, max=2**12))\n bcnt = Signal(intbv(0, min=0, max=8))\n\n # separate tx and rx shift-registers (could be one in the same)\n treg = Signal(intbv(0)[8:]) # tx shift register\n rreg = Signal(intbv(0)[8:]) # rx shift register\n\n x_sck, x_ss, x_mosi, x_miso = Signals(bool(0), 4)\n\n # internal FIFO bus interfaces\n # external FIFO side (FIFO to external SPI bus)\n itx = FIFOBus(size=fifobus.size, width=fifobus.width)\n # internal FIFO side (FIFO to internal bus)\n irx = FIFOBus(size=fifobus.size, width=fifobus.width)\n \n states = enum('idle', 'wait_hclk', 'data_in', 'data_change',\n 'write_fifo', 'end')\n state = Signal(states.idle)\n\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # memory- mapped registers\n # add the peripheral's regfile to the bus (informational only)\n # @todo: the automatic building of the register files is incomplete\n if mmbus is not None:\n # the register-file (rf) will drive all the cso signals\n rf = cso.get_register_file()\n mmbus.add(rf, 'spi')\n\n # FIFO for the wishbone data transfer\n if include_fifo:\n fifo_fast.debug = spi_controller.debug\n fifo_tx_inst = fifo_fast(reset, clock, itx)\n fifo_rx_inst = fifo_fast(reset, clock, irx)\n\n @always_comb\n def rtl_assign():\n cso.tx_fifo_count.next = itx.count\n cso.rx_fifo_count.next = irx.count\n\n if clkcnt > 0:\n ena.next = False\n else:\n ena.next = True\n\n clock_counts = tuple([(2**ii)-1 for ii in range(13)])\n\n @always(clock.posedge)\n def rtl_clk_div():\n if cso.enable and clkcnt != 0 and state != states.idle:\n clkcnt.next = (clkcnt - 1)\n else:\n clkcnt.next = clock_counts[cso.clock_divisor]\n\n @always_seq(clock.posedge, reset=reset)\n def rtl_state_and_more():\n \"\"\"\n Designed to the following timing diagram\n\n SCK CPOL=0 ______/---\\___/---\\___/---\\___/---\\___/---\\___/---\\___/---\\___/---\\___/---\\ \n CPOL=1 ------\\___/---\\___/---\\___/---\\___/---\\___/---\\___/---\\___/---\\___/---\\___/ \n SS ---\\_______________________________________________________________________ \n CPHA=0 MOSI ...|.0....|.1.....|.2.....|.3.....|.4.....|.5.....|.6.....|.7.....|.0.....| \n MISO ...|.0....|.1.....|.2.....|.3.....|.4.....|.5.....|.6.....|.7.....|.0.....| \n CPHA=1 MOSI ...|....0.....|.1.....|.2.....|.3.....|.4.....|.5.....|.6.....|.7.....|.0...\n MISO ......|.0.....|.1.....|.2.....|.3.....|.4.....|.5.....|.6.....|.7.....|.0...\n \"\"\"\n if not cso.enable:\n state.next = states.idle\n bcnt.next = 0\n treg.next = 0\n \n itx.read.next = False\n irx.write.next = False\n\n x_sck.next = False\n x_ss.next = False\n else:\n if not cso.freeze:\n # ~~~~ Idle state ~~~~\n if state == states.idle:\n bcnt.next = 7\n treg.next = itx.read_data\n x_sck.next = cso.clock_polarity\n irx.write.next = False\n \n if not itx.empty and not irx.full:\n itx.read.next = True\n x_ss.next = False\n if cso.clock_phase: # Clock in on second phase\n state.next = states.wait_hclk\n else: # Clock in on first phase\n state.next = states.data_in\n else:\n itx.read.next = False\n x_ss.next = True\n\n # ~~~~ Wait half clock period for cpha=1 ~~~~\n elif state == states.wait_hclk:\n itx.read.next = False\n irx.write.next = False\n if ena:\n x_sck.next = not x_sck\n state.next = states.data_in\n\n # ~~~~ Clock data in (and out) ~~~~\n elif state == states.data_in:\n itx.read.next = False\n irx.write.next = False\n if ena: # clk div\n x_sck.next = not x_sck\n rreg.next = concat(rreg[7:0], x_miso)\n \n if cso.clock_phase and bcnt == 0:\n irx.write.next = True\n if itx.empty or irx.full:\n state.next = states.end\n else:\n state.next = states.data_change\n else:\n state.next = states.data_change\n\n # ~~~~ Get ready for next byte out/in ~~~~\n elif state == states.data_change:\n itx.read.next = False\n irx.write.next = False\n if ena:\n x_sck.next = not x_sck\n if bcnt == 0: \n if not cso.clock_phase:\n irx.write.next = True\n \n if itx.empty or irx.full:\n state.next = states.end\n else: # more data to transfer\n bcnt.next = 7\n state.next = states.data_in\n itx.read.next = True\n treg.next = itx.read_data\n else:\n treg.next = concat(treg[7:0], intbv(0)[1:])\n bcnt.next = bcnt - 1 \n state.next = states.data_in\n\n # ~~~~ End state ~~~~\n elif state == states.end:\n itx.read.next = False\n irx.write.next = False\n if ena: # Wait half clock cycle go idle\n state.next = states.idle\n\n # Shouldn't happen, error in logic\n else:\n state.next = states.idle\n assert False, \"SPI Invalid State\"\n\n @always_comb\n def rtl_fifo_sel():\n \"\"\"\n The `itx` and `irx` FIFO interfaces are driven by different\n logic depending on the configuration. This modules accesses\n the `itx` read side and drives the `irx` write side. The\n `itx` write side is driven by the `cso` or the `fifobus` port.\n The `irx` read side is accessed by the `cso` or the `fifobus`\n port.\n \"\"\"\n if cso.bypass_fifo:\n # data comes from the register file\n cso.tx_empty.next = itx.empty\n cso.tx_full.next = itx.full\n itx.write_data.next = cso.tx_byte\n\n cso.rx_empty.next = irx.empty\n cso.rx_full.next = irx.full\n cso.rx_byte.next = irx.read_data\n cso.rx_byte_valid.next = irx.read_valid\n\n # @todo: if cso.tx_byte write signal (written by bus) drive the\n # @todo: FIFO write signals, same if the cso.rx_byte is accessed\n itx.write.next = cso.tx_write\n irx.read.next = cso.rx_read\n\n else:\n # data comes from external FIFO bus interface\n fifobus.full.next = itx.full\n itx.write_data.next = fifobus.write_data\n itx.write.next = fifobus.write\n\n fifobus.empty.next = irx.empty\n fifobus.read_data.next = irx.read_data\n fifobus.read_valid.next = irx.read_valid\n irx.read.next = fifobus.read\n\n # same for all modes\n irx.write_data.next = rreg\n\n @always_comb\n def rtl_x_mosi():\n # @todo lsb control signal\n x_mosi.next = treg[7]\n\n @always_comb\n def rtl_gate_mosi():\n if cso.loopback:\n spibus.mosi.next = False\n else:\n spibus.mosi.next = x_mosi\n\n @always_comb #(clock.posedge)\n def rtl_spi_sigs():\n spibus.sck.next = x_sck\n if cso.loopback:\n x_miso.next = x_mosi\n else:\n x_miso.next = spibus.miso\n\n @always_comb\n def rtl_slave_select():\n if cso.manual_slave_select:\n spibus.ss.next = ~cso.slave_select\n elif x_ss:\n spibus.ss.next = 0xFF\n else:\n spibus.ss.next = ~cso.slave_select\n\n # myhdl generators in the __debug__ conditionals are not converted.\n if spi_controller.debug:\n @instance\n def mon_state():\n print(\" :{:<8d}: initial state {}\".format(\n now(), str(state)))\n \n while True:\n yield state\n print(\" :{:<8d}: state transition --> {}\".format(\n now(), str(state)))\n \n fbidle = intbv('0000')[4:]\n\n @instance\n def mon_trace():\n while True:\n yield clock.posedge\n ccfb = concat(itx.write, itx.read, irx.write, irx.read)\n if ccfb != fbidle:\n fstr = \" :{:<8d}: tx: w{} r{}, f{} e{}, rx: w{} r{} f{} e{}\"\n print(fstr.format(now(),\n int(itx.write), int(itx.read), int(itx.full), int(itx.empty),\n int(irx.write), int(irx.read), int(irx.full), int(irx.empty),)\n )\n \n @always(clock.posedge)\n def mon_tx_fifo_write():\n if itx.write:\n print(\" WRITE tx fifo {:02X}\".format(int(itx.write_data)))\n if itx.read:\n print(\" READ tx fifo {:02X}\".format(int(itx.read_data)))\n \n @always(clock.posedge)\n def mon_rx_fifo_write():\n if irx.write:\n print(\" WRITE rx fifo {:02X}\".format(int(irx.write_data)))\n \n if irx.read:\n print(\" READ rx fifo {:02X}\".format(int(irx.read_data)))\n\n # return the myhdl generators\n gens = myhdl.instances()\n return gens",
"def __init__(self, font=None):\n self._xPosition = 0\n self._yPosition = 0\n self._positionDegree = 0\n self._velocityMag = 0\n self._velocityDegree = 0\n self._accelerationMag = 0\n self._accelerationDegree = 0\n self._thrusters = None\n self._SASmodules = None\n\n if font is None:\n self._font = pg.font.SysFont(\"Futura\", 20)\n else:\n self._font = font",
"def __init__(self):\r\n # Check device ID.\r\n chip_id = self._read_byte(_BME280_REGISTER_CHIPID)\r\n if _BME280_CHIPID != chip_id:\r\n raise RuntimeError('Failed to find BME280! Chip ID 0x%x' % chip_id)\r\n self._write_register_byte(_BME280_REGISTER_SOFTRESET, 0xB6)\r\n time.sleep(0.5)\r\n self._read_coefficients()\r\n self.sea_level_pressure = 1013.25\r\n \"\"\"Pressure in hectoPascals at sea level. Used to calibrate `altitude`.\"\"\"\r\n # turn on humidity oversample 16x\r\n self._write_register_byte(_BME280_REGISTER_CTRL_HUM, 0x03)\r\n self._t_fine = None",
"def __init__(self, spi_rack, module, max_current=50e-3, reset_currents=True):\n self.spi_rack = spi_rack\n self.module = module\n self.span = [np.NaN]*4\n self.currents = [np.NaN]*4\n self.max_current = max_current\n\n for i in range(4):\n self.get_settings(i)\n\n if reset_currents:\n for i in range(4):\n self.change_span(i, S4g_module.range_max_bi)\n self.set_current(i, 0.0)",
"def __init__(self, verbose=False):\n self._verbose = verbose\n self._nSrv = 2\n toLog(\"Initializing ...\", True)\n\n # Create servo manager and servos ...\n self.SM = ServoManager(self._nSrv, verbose=verbose)\n self._Servos = []\n self._SPos = array.array('i', [0] *self._nSrv)\n self._SIDs = array.array('b', [SRV_PAN, SRV_TLT])\n self._Servos.append(Servo(board.SERVO_PAN, verbose=verbose))\n self._Servos[SRV_PAN].change_range(board.PAN_RANGE_US, board.PAN_RANGE_DEG)\n self.SM.add_servo(SRV_PAN, self._Servos[SRV_PAN])\n self._Servos.append(Servo(board.SERVO_TLT, verbose=verbose))\n self._Servos[SRV_TLT].change_range(board.TLT_RANGE_US, board.TLT_RANGE_DEG)\n self.SM.add_servo(SRV_TLT, self._Servos[SRV_TLT])\n toLog(\"Servo manager ready\", True)\n\n # Create spectrometer instance\n self.SP = C12880MA(trg=board.TRG, st=board.STA, clk=board.CLK, video=board.VID)\n self.SP.begin()\n self.SP.setIntegrationTime_s(0.01)\n time.sleep_ms(200)\n toLog(\"Spectrometer ready\", True)",
"def setup(self):\n if not self._gpio_setup:\n if self._gpio is None:\n try:\n import RPi.GPIO as GPIO\n self._gpio = GPIO\n except ImportError:\n raise ImportError('This library requires the RPi.GPIO module\\nInstall with: sudo apt install python-rpi.gpio')\n self._gpio.setmode(self._gpio.BCM)\n self._gpio.setwarnings(False)\n self._gpio.setup(self.cs_pin, self._gpio.OUT)\n self._gpio.setup(self.dc_pin, self._gpio.OUT, initial=self._gpio.LOW, pull_up_down=self._gpio.PUD_OFF)\n self._gpio.setup(self.reset_pin, self._gpio.OUT, initial=self._gpio.HIGH, pull_up_down=self._gpio.PUD_OFF)\n self._gpio.setup(self.busy_pin, self._gpio.IN, pull_up_down=self._gpio.PUD_OFF)\n\n if self._spi_bus is None:\n import spidev\n self._spi_bus = spidev.SpiDev()\n\n self._spi_bus.open(0, self.cs_channel)\n self._spi_bus.no_cs = True\n self._spi_bus.max_speed_hz = 5000000\n\n self._gpio_setup = True\n\n self._gpio.output(self.reset_pin, self._gpio.LOW)\n time.sleep(0.1)\n self._gpio.output(self.reset_pin, self._gpio.HIGH)\n time.sleep(0.1)\n\n self._gpio.output(self.reset_pin, self._gpio.LOW)\n time.sleep(0.1)\n self._gpio.output(self.reset_pin, self._gpio.HIGH)\n\n self._busy_wait(1.0)\n\n # Sending init commands to display\n self._send_command(AC073TC1_CMDH, [0x49, 0x55, 0x20, 0x08, 0x09, 0x18])\n\n self._send_command(AC073TC1_PWR, [0x3F, 0x00, 0x32, 0x2A, 0x0E, 0x2A])\n\n self._send_command(AC073TC1_PSR, [0x5F, 0x69])\n\n self._send_command(AC073TC1_POFS, [0x00, 0x54, 0x00, 0x44])\n\n self._send_command(AC073TC1_BTST1, [0x40, 0x1F, 0x1F, 0x2C])\n\n self._send_command(AC073TC1_BTST2, [0x6F, 0x1F, 0x16, 0x25])\n\n self._send_command(AC073TC1_BTST3, [0x6F, 0x1F, 0x1F, 0x22])\n\n self._send_command(AC073TC1_IPC, [0x00, 0x04])\n\n self._send_command(AC073TC1_PLL, [0x02])\n\n self._send_command(AC073TC1_TSE, [0x00])\n\n self._send_command(AC073TC1_CDI, [0x3F])\n\n self._send_command(AC073TC1_TCON, [0x02, 0x00])\n\n self._send_command(AC073TC1_TRES, [0x03, 0x20, 0x01, 0xE0])\n\n self._send_command(AC073TC1_VDCS, [0x1E])\n\n self._send_command(AC073TC1_T_VDCS, [0x00])\n\n self._send_command(AC073TC1_AGID, [0x00])\n\n self._send_command(AC073TC1_PWS, [0x2F])\n\n self._send_command(AC073TC1_CCSET, [0x00])\n\n self._send_command(AC073TC1_TSSET, [0x00])",
"def __init__(self, esp_mgr):\n self.esp_mgr = esp_mgr\n adafruit_esp32spi_socket.set_interface(self.esp_mgr.esp)\n self.inbuffer = ''\n self.cmds = []\n self.next_fn = self.state_text\n self.telnet_cmd = []\n self.client_socket = None\n self.server_socket = None\n self.termious = None # termious hack\n self.current_state = ''",
"def init_serial():\n\tglobal D\n\t# start serial connection\n\tbaud = 9600\n\ttry:\n\t\tD.gps_serial = serial.Serial(\"/dev/ttyAMA0\",baud,timeout=1)\n\t\tD.gps_serial.open()\n\t\tD.gps_serial.write(\"$PMTK220,200*2C\")\n\t\tD.gps_serial.write(\"$PMTK300,200,0,0,0,0*2F\")\n\texcept:\n\t\tprint \"Failed to open serial\"\n\t\trospy.shutdown(\"Failed to open gps serial\")",
"def __init__(self, pitch=30, pitch_type='duo', Z=4, Alt = 100):\n \n self.pitch_type = pitch_type\n self.pitch = pitch\n self.Z = Z\n self.Alt = Alt\n \n \n # set the Ce value (exposure coeff NA 2.16)\n self.Ce = 1\n \n # set the Ct value (thermal coeff NA 2.17)\n self.Ct = 1\n \n # snow load shjape coefficients\n if self.pitch_type == 'mono':\n if self.pitch <= 30:\n self.mu = 0.80\n elif 30 < self.pitch <= 60:\n self.mu = 0.80 * (60 - self.pitch) / 30\n else:\n self.mu = 0.0\n elif self.pitch_type == 'duo':\n if self.pitch <= 15:\n self.mu = 0.80\n elif 15 < self.pitch <= 30:\n self.mu = 0.80 + 0.40*(self.pitch - 15) / 15\n elif 30 < self.pitch <= 60:\n self.mu = 1.2*(60 - self.pitch) / 30\n else:\n self.mu = 0.0\n else:\n self.mu = 0.80 # end conservative number\n \n # calculate the value of the snow load on the ground \n self.sk = (0.15 + (0.1 * self.Z + 0.05) + ((self.Alt - 100) / 525))\n \n # calculate the roof snow load\n self.s = self.mu * self.Ce * self.Ct * self.sk",
"def __init__(self):\n self._read_calibration_data()\n self.configure_sensor(\n TemperatureOversamplings.x08,\n PressureOversamplings.x16,\n HumidityOversamplings.x08,\n IIRFilterCoefficients.FC_003,\n 250,\n 250)",
"def __init__(self, device = '/dev/spidev0.0', delay = 40, speed = 200000, bits = 8,Port=None,Server=None):\n self.Port = Port\n self.Server=Server\n if self.Server != None:\n self.Transaction=self._NetTransaction\n else:\n if self.Port != None: # Init Server Thread\n self.ServerThread = threading.Thread(target=self.ListenerTread)\n self.ServerThread.start()\n self.Bits = c_uint8(bits)\n self.Speed = self.WriteSpeed\n self.Delay = c_uint16(delay)\n self.Device = device\n self.File = posix.open(self.Device, posix.O_RDWR)\n self.SetBits()\n self.SetSpeed()",
"def use_spi():\n _LIB.oled_click_use_spi()",
"def qspi_init(self, retain_ram=False, init_params=None):\n class _CtypesQSPIInitParams(ctypes.Structure):\n _fields_ = [(\"read_mode\", ctypes.c_int), (\"write_mode\", ctypes.c_int), (\"address_mode\", ctypes.c_int), (\"frequency\", ctypes.c_int), (\"spi_mode\", ctypes.c_int), (\"sck_delay\", ctypes.c_uint32), (\"custom_instruction_io2_level\", ctypes.c_int), (\"custom_instruction_io3_level\", ctypes.c_int), (\"CSN_pin\", ctypes.c_uint32), (\"CSN_port\", ctypes.c_uint32), (\"SCK_pin\", ctypes.c_uint32), (\"SCK_port\", ctypes.c_uint32), (\"DIO0_pin\", ctypes.c_uint32), (\"DIO0_port\", ctypes.c_uint32), (\"DIO1_pin\", ctypes.c_uint32), (\"DIO1_port\", ctypes.c_uint32), (\"DIO2_pin\", ctypes.c_uint32), (\"DIO2_port\", ctypes.c_uint32), (\"DIO3_pin\", ctypes.c_uint32), (\"DIO3_port\", ctypes.c_uint32), (\"WIP_index\", ctypes.c_uint32), (\"pp_size\", ctypes.c_int)]\n \n if not self._is_bool(retain_ram):\n raise ValueError('The retain_ram parameter must be a boolean value.')\n \n if not self._is_right_class(init_params, QSPIInitParams) and init_params is not None:\n raise ValueError('The init_params parameter must be an instance of class QSPIInitParams.')\n \n if init_params is None:\n init_params = QSPIInitParams()\n\n retain_ram = ctypes.c_bool(retain_ram)\n qspi_init_params = _CtypesQSPIInitParams(init_params.read_mode, init_params.write_mode, init_params.address_mode, init_params.frequency, init_params.spi_mode, init_params.sck_delay, init_params.custom_instruction_io2_level, init_params.custom_instruction_io3_level, init_params.CSN_pin, init_params.CSN_port, init_params.SCK_pin, init_params.SCK_port, init_params.DIO0_pin, init_params.DIO0_port, init_params.DIO1_pin, init_params.DIO1_port, init_params.DIO2_pin, init_params.DIO2_port, init_params.DIO3_pin, init_params.DIO3_port, init_params.WIP_index, init_params.pp_size)\n \n result = self._lib.NRFJPROG_qspi_init(retain_ram, ctypes.byref(qspi_init_params))\n if result != NrfjprogdllErr.SUCCESS:\n raise APIError(result)",
"def __init__(self):\r\n super().__init__()\r\n self._name = \"PICOSCOPE2408b\"\r\n self._lib = None\r\n self._handle = None\r\n self._run_lock = Lock()\r\n self._driver_lock = Lock()\r\n\r\n self._sampling_time = 4E-9\r\n self._sampling_duration = 50E-6\r\n self._pulse_time = 100E-9\r\n self._samples = int(self._sampling_duration / self._sampling_time)\r\n self._idx = 0\r\n\r\n w_len = self._samples\r\n location = 0.1\r\n idx1 = int(w_len*(location - self._pulse_time/(2*self._sampling_duration)))\r\n idx2 = int(w_len*(location + self._pulse_time/(2*self._sampling_duration))) - 1\r\n self._waveform = np.array([-1*MAX_EXT if (i < idx1 or i >= idx2) else MAX_EXT for i in range(w_len)],dtype=c_int16)\r\n\r\n self._A_data = np.ones(self._samples)*2\r\n self._B_data = np.ones(self._samples)*-2\r\n self._C_data = np.ones(self._samples)*0\r\n self._window_est = np.ones(self._samples)*0\r\n self._t = np.linspace(0,self._sampling_duration,self._samples)\r\n self._range_A = None\r\n self._range_B = None\r\n self._depol_ratio = None\r\n\r\n self._process_queue = Queue()\r\n self._save_queue = Queue()",
"def __init__(self):\n self.ram = bytearray(256)\n self.register = [0] * 8\n self.pc = 0\n self.sp = 7",
"def __init__(sp, line) :\n ## frameNumber, eventName, photonEnergyEv, wavelengthA, GMD, peak_index, peak_x_raw, peak_y_raw, peak_r_assembled, peak_q, peak_resA, nPixels, totalIntensity, maxIntensity, sigmaBG, SNR\n #5, LCLS_2015_Feb22_r0169_022047_197ee, 6004.910515, 2.064714, 4.262349, 29997, 508.884796, 19.449471, 441.314606, 1.741234, 5.743053, 5, 361.105774, 112.819145, 19.236982, 18.771435\n\n sp.line = line[:-1] #.rstrip('\\n') # .replace(',',' ')\n sp.fields = sp.line.split()\n\n s_frameNumber, s_eventName, s_photonEnergyEv, s_wavelengthA, s_GMD, s_peak_index, s_peak_x_raw, s_peak_y_raw,\\\n s_peak_r_assembled, s_peak_q, s_peak_resA, s_nPixels, s_totalIntensity, s_maxIntensity, s_sigmaBG, s_SNR =\\\n sp.fields[0:16]\n\n sp.frameNumber, sp.photonEnergyEv, sp.wavelengthA = int(s_frameNumber), float(s_photonEnergyEv), float(s_wavelengthA)\n sp.GMD, sp.peak_index, sp.peak_x_raw, sp.peak_y_raw = float(s_GMD), int(s_peak_index), float(s_peak_x_raw), float(s_peak_y_raw)\n sp.peak_r_assembled, sp.peak_q, sp.peak_resA, sp.nPixels = float(s_peak_r_assembled), float(s_peak_q), float(s_peak_resA), int(s_nPixels)\n sp.totalIntensity, sp.maxIntensity, sp.sigmaBG, sp.SNR = float(s_totalIntensity), float(s_maxIntensity), float(s_sigmaBG), float(s_SNR)\n\n sp.runnum, sp.tstamp, sp.tsec, sp.s_fid = convertCheetahEventName(s_eventName)\n sp.fid = int(sp.s_fid, 16)\n\n #sp.seg, sp.row, sp.col = src_from_rc8x8(sp.peak_y_raw, sp.peak_x_raw)\n\n sp.line = line\n sp.empty = sp.empty_line()",
"def teleopInit(self):\n self.Drive.resetEncoder()\n\n self.Drive.disableAutoForward()\n self.Drive.disableAutoTurn()\n self.Drive.disableVision()\n\n self.DS.setWhichVariable(True)\n self.Drive.updateSetpoint(\"teleop\")\n self.DS.setFirstTimeVariable(True)\n self.timer.reset()\n\n self.matchTime.startMode(isAuto=False)",
"def __init__(self, gameCanvas, specs):\n\t\n\t\t# Initialization of the Dot\n\t\tsuper(Dot, self).__init__(gameCanvas, specs, specs['points'])\n\t\t\n\t\t# Draw the Dot\n\t\tself.draw()"
] | [
"0.6566145",
"0.6351516",
"0.63373953",
"0.6181888",
"0.61446565",
"0.6111663",
"0.59349567",
"0.57059205",
"0.55544007",
"0.554452",
"0.5522998",
"0.5509588",
"0.5503774",
"0.5380477",
"0.53760433",
"0.5336554",
"0.5321746",
"0.5299691",
"0.52520025",
"0.5248618",
"0.5240898",
"0.5219589",
"0.5202608",
"0.5198033",
"0.5195962",
"0.5170883",
"0.5133692",
"0.5128285",
"0.51246643",
"0.5120659"
] | 0.74753237 | 0 |
This method clears all the LEDs in the DotStar object | def clearleds(self):
self.buffer = self.emptybuffer[:] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def reset(self):\n for tlight in self.trafficLights:\n self.trafficLights[tlight].reset()",
"def clear(tft, oled):\n oled.fill(tft.BLACK)",
"def clear(self):\n self._delayvalue = _CFG[\"delay\"]\n self._colormode = _CFG[\"colormode\"]\n self._delete(\"all\")\n self._bgpic = self._createimage(\"\")\n self._bgpicname = \"nopic\"\n self._tracing = 1\n self._updatecounter = 0\n self._turtles = []\n self.bgcolor(\"white\")\n for btn in 1, 2, 3:\n self.onclick(None, btn)\n self.onkeypress(None)\n for key in self._keys[:]:\n self.onkey(None, key)\n self.onkeypress(None, key)\n Myturtle._pen = None",
"def off(self):\n for light in self.all:\n GPIO.output(light, 0)",
"def all_off():\n Leds.red_left.brightness = 0\n Leds.red_right.brightness = 0\n Leds.green_left.brightness = 0\n Leds.green_right.brightness = 0\n Leds.blue_left.brightness = 0\n Leds.blue_right.brightness = 0",
"def turn_all_off(self):\n for led_type in LED:\n self.led_off(led_type)\n logging.info('LED: ALL - Status: 0')",
"def clear_all(self):\n self._set_all(0x00, 0x00, 0x00)",
"def clear(self):\n self.np.fill(OFF)\n self.np.show()\n return True",
"def clear(self):\n black = neo.Color(0,0,0)\n self.set_all(black)\n self.draw()",
"def reset(self):\n for i in range(self.shapeRow):\n for j in range(self.shapeColumn):\n self.buttons[i][j].setText(\" \")",
"def clear_strip(self):\r\n wlogger.log_info(\"Clear Strip\")\r\n for led in range(self.num_led):\r\n self.set_pixel(led, 0, 0, 0)\r\n self.show()",
"def clear(self):\n self._frame.clear()\n self._turtles = []\n self._gpens = []",
"def clear_all(cls):\n del cls.buttons[:]",
"def clear(self):\n self.initialize()\n self.device_disconnect()",
"def clear(self):\n for i in range(len(self.canvas)):\n self.canvas[i] = 0",
"def all_off(self):\n self.fill_off()\n self.update()\n self.fill_off()\n self.update()",
"def remove_all_lights(self):\n self.RemoveAllLights()\n self._lights.clear()",
"def stop_all():\r\n motors.stop_all_motors()\r\n led.set_colour_solid(0)\r\n display.clear()",
"def reset(self):\n self.obstacles = []\n self._tick = 0",
"def remove_all(self):\n self.initial = None\n self.contour = None\n self.control_points = []",
"def clear(self):\n self.state = [[None, None, None],\n [None, None, None],\n [None, None, None]]",
"def reset(self):\n for gate in self.gates:\n gate.reset()",
"def off_all(self):\n self._set_status(\"off\", \"11111111\")",
"def clear(self):\n if self.flag == 0:\n for coord in INDICES:\n self.kill(coord)\n self.chart[coord] = DEAD",
"def deleteAll(self):\n for tlight in self.trafficLights:\n del tlight\n del self.trafficLights",
"def reset(self):\n for lane in self.lanes.values():\n lane.puck_area.clear_widgets()\n lane.patrons = list()\n lane.disabled = False\n lane.beers = list()\n\n self.message_holder.remove_widget(self.you_lose_label)\n self.message_holder.remove_widget(self.you_win_label)",
"def clear(self):\n self.raster_path_line.clear()\n self.labels_path.clear()\n self.shapefile_path.clear()\n self.costumelabels.clear()\n self.layer_name.clear()\n self.class_name.clear()\n self.idfield.clear()",
"def __clear(self):\n for i in range(len(self.buttons_list)):\n self.labels_strvar[i].set(\"\")\n if self.buttons_list[i][\"state\"] == DISABLED:\n self.buttons_list[i][\"state\"] = NORMAL\n self.entered_list = []\n return",
"def clearHotspots( self ):\n self._hotspots = []",
"def eraseAll(self): # remove all robots\n\t\tself.__robotList = []"
] | [
"0.7280109",
"0.71445733",
"0.70629704",
"0.7045428",
"0.7045238",
"0.6974367",
"0.6902242",
"0.6812371",
"0.679938",
"0.6789614",
"0.6783468",
"0.6701193",
"0.6695715",
"0.66421294",
"0.6637276",
"0.6621712",
"0.6612257",
"0.6599522",
"0.6587036",
"0.6560823",
"0.65587413",
"0.6557263",
"0.6530591",
"0.6514564",
"0.6512429",
"0.6502498",
"0.64983493",
"0.6476238",
"0.6462477",
"0.64595515"
] | 0.7821158 | 0 |
View all the images in the dataset, on a 3 by X grid size. | def view_images(dataset, size):
images, labels = dataset
assert images.shape[0] == labels.shape[0]
num_images = images.shape[0]
num_cols = 3
num_rows = np.ceil(num_images / num_cols).astype("int")
plt.figure(figsize=size)
for i in range(num_images):
image = images[i]
label = labels[i]
ax = plt.subplot(num_rows, num_cols, i + 1)
plt.imshow(np.array(image, dtype="float"))
plt.title("Number: " + str(label))
plt.axis("off") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def show_imgs(dataset, n_imgs, plot_size=(15, 15), cmap=None):\n n_cols = int(np.sqrt(n_imgs))\n n_rows = int(np.ceil(np.sqrt(n_imgs)))\n class_idx = dataset.class_to_idx\n idx_class = idx_to_class(class_idx)\n\n fig, axes = plt.subplots(n_rows, n_cols, figsize=plot_size)\n for i, ax in enumerate(axes.flatten()):\n ax.axis('off')\n title = f'Class : {idx_class[dataset.targets[i]]}'\n ax.imshow(dataset.data[i], cmap=cmap)\n ax.set_title(title)\n fig.tight_layout()",
"def display_sample_images(self):\n if self.train_dataset is None:\n self.init_datasets()\n\n images, labels = next(self.train_dataset)\n plt.figure(figsize=(5,5))\n for n in range(min(25, images.shape[0])):\n ax = plt.subplot(5,5,n+1)\n plt.imshow(images[n])\n if len(labels.shape) == 1:\n plt.title(self.class_names[int(labels[n])].title())\n else:\n m = np.argmax(labels[n])\n plt.title(self.class_names[int(labels[n, m])].title())\n plt.axis('off')\n\n plt.tight_layout()\n plt.show()",
"def PlotImages(x):\r\n # 5.1 Create figure-window and axes\r\n _, ax = plt.subplots(nrows = 2, ncols= 3)\r\n # 5.2\r\n ax[0,0].imshow(x[0, :].reshape(75,75))\r\n ax[0,1].imshow(x[1, :].reshape(75,75))\r\n ax[0,2].imshow(x[2, :].reshape(75,75))\r\n ax[1,0].imshow(x[3, :].reshape(75,75))\r\n ax[1,1].imshow(x[4, :].reshape(75,75))\r\n ax[1,2].imshow(x[5, :].reshape(75,75))\r\n plt.show()",
"def show_imagegrid_dataset(dataset,\n num=10,\n shuffle=True,\n classes='auto',\n figsize=None,\n fontsize=20,\n image_attr={'cmap': plt.cm.Greys_r}):\n sample = dataset[0]\n if isinstance(sample, tuple) and len(sample) == 2:\n images_per_class = get_labeled_imagegrid(dataset,\n num=num,\n shuffle=shuffle,\n classes=classes)\n num = min(num, max(map(len, images_per_class.values())))\n classes = list(images_per_class.keys())\n\n if figsize is None:\n figsize = (2 * num, 2 * len(classes))\n fig, axs = plt.subplots(figsize=figsize, nrows=len(classes), ncols=num)\n if len(classes) == 1:\n axs = np.expand_dims(axs, 0)\n if num == 1:\n axs = np.expand_dims(axs, -1)\n for i, (class_name, class_images) in enumerate(images_per_class.items()):\n for j, img in enumerate(class_images):\n show_image(img, axs[i][j], image_attr)\n axs[i][0].set_ylabel(str(class_name), fontsize=fontsize)\n elif isinstance(sample, (Image, torch.Tensor, np.ndarray)):\n image_list = get_imagegrid(dataset,\n num=num,\n shuffle=shuffle)\n num = min(len(image_list), num)\n nrows = math.ceil(math.sqrt(num))\n ncols = math.ceil(num / nrows)\n if figsize is None:\n figsize = (2 * nrows, 2 * ncols)\n fig, axs = plt.subplots(figsize=figsize, nrows=nrows, ncols=ncols)\n axs = axs.flatten()\n for i, img in enumerate(image_list):\n show_image(img, axs[i], image_attr)",
"def show_images(images, save=None, size=None):\n assert len(images) > 0, \"images should contain at least 1 element\"\n assert len(images[0].shape) == 3, \"each image should contain 3 elements (c, w,h)\"\n \n fig, ax = plt.subplots(nrows=images[0].shape[0], ncols=len(images))\n \n for i in range(len(images)): \n for j in range(images[0].shape[0]):\n ax[i,j].imshow(images[i][j,:,:], cmap='gray')\n \n plt.show()",
"def show_images(imgs, nrows, ncols, figsize=None):\n figsize = (ncols, nrows)\n _, figs = plt.subplots(nrows, ncols, figsize=figsize)\n for i in range(nrows):\n for j in range(ncols):\n figs[i][j].imshow(imgs[i*ncols+j].asnumpy())\n figs[i][j].axes.get_xaxis().set_visible(False)\n figs[i][j].axes.get_yaxis().set_visible(False)\n plt.show()",
"def view_image(train_dataloader):\n for (x, target) in train_dataloader:\n np.save(\"img.npy\", x)\n print(x.shape)\n exit(0)",
"def display_images_in_grid(imgs, row, col):\n if len(imgs) != (row * col):\n raise ValueError(f\"Invalid imgs len:{len(imgs)} col:{row} row:{col}\")\n\n for i, img in enumerate(imgs):\n plot_num = i + 1\n plt.subplot(row, col, plot_num)\n plt.tick_params(labelbottom=False) # remove x axis\n plt.tick_params(labelleft=False) # remove y axis\n plt.imshow(img)\n plt.show()",
"def grid(images, cols = 2, save = False, filename = \"\", show = False):\n \n rows = ceil(len(images) / cols)\n \n fig, ax = plt.subplots(rows, 1)\n\n index = 0\n element = []\n for row in range(rows):\n for col in range(cols): \n if index < len(images):\n element.append(images[index])\n index += 1\n \n stack = np.hstack(tuple(element))\n ax[row].axis('off')\n ax[row].imshow(stack)\n element = []\n \n plt.tight_layout()\n \n if save:\n fig.savefig(filename)\n\n if show:\n plt.show(fig)\n \n return 0",
"def display_images(digits_im):\n i = 0\n\n for img in digits_im:\n if i < N_NEIGHBOURS:\n # Visualize your data\n im_max = np.max(img)\n img = PIXELS * (np.abs(im_max - img) / im_max)\n res = cv2.resize(img, (DIM, DIM), interpolation=cv2.INTER_CUBIC)\n cv2.imwrite('digit ' + str(i) + '.png', res)\n i += 1\n else:\n break",
"def visulize_5(X):\n fig, axes1 = plt.subplots(5,5,figsize=(3,3))\n for j in range(5):\n for k in range(5):\n i = np.random.choice(range(len(X)))\n axes1[j][k].set_axis_off()\n axes1[j][k].imshow(X[:,i].reshape(32, 32, 3))\n plt.show()",
"def img_viewer_examples(images, labels, prediction = None, size=0, greyscale=False):\n batchSize = min(size, images.shape[0])\n \n if size == 0:\n batchSize = images.shape[0]\n\n # I CAN TAKE THE BATCH_SIZE from the images size/shape according the sent data type\n no_of_columns = round(math.sqrt(batchSize))\n no_of_rows = math.ceil(batchSize / no_of_columns)\n print(\"batch size {}, no_of_rows {}, no_of_columns {}\".format(batchSize, no_of_rows, no_of_columns))\n fig = plt.figure(figsize=(no_of_columns*1.25, no_of_rows*1.5))\n # (width, height)\n for idx in np.arange(batchSize):\n ax = fig.add_subplot(no_of_rows, no_of_columns,\n idx+1, xticks=[], yticks=[])\n if greyscale:\n ax.imshow(np.squeeze(images[idx]), cmap='gray')\n else:\n ax.imshow(np.squeeze(images[idx]))\n # print out the correct label for each image\n # .item() gets the value contained in a Tensor\n # WAIT FOR TASNEEM TO SEE THE RETURNED DATA TYPE\n if not prediction is None:\n ax.set_title(\"{} ({})\".format(str(prediction[idx]), str(labels[idx])),\n color=(\"green\" if prediction[idx] == labels[idx] else \"red\"))\n else:\n ax.set_title(str(labels[idx]))",
"def show_images(imgs, num_rows, num_cols, scale=2):\n figsize = (num_cols * scale, num_rows * scale)\n _, axes = plt.subplots(num_rows, num_cols, figsize=figsize)\n for i in range(num_rows):\n for j in range(num_cols):\n axes[i][j].imshow(imgs[i * num_cols + j].asnumpy())\n axes[i][j].axes.get_xaxis().set_visible(False)\n axes[i][j].axes.get_yaxis().set_visible(False)\n return axes",
"def imshow_grid(images, shape=[2, 8]):\n fig = plt.figure(1)\n grid = ImageGrid(fig, 111, nrows_ncols=shape, axes_pad=0.05)\n\n size = shape[0] * shape[1]\n for i in range(size):\n grid[i].axis('off')\n grid[i].imshow(images[i]) # The AxesGrid object work as a list of axes.\n\n plt.show()",
"def imshow_grid(images, shape=[2, 8]):\n fig = plt.figure(1)\n grid = ImageGrid(fig, 111, nrows_ncols=shape, axes_pad=0.05)\n\n size = shape[0] * shape[1]\n for i in range(size):\n grid[i].axis('off')\n grid[i].imshow(images[i]) # The AxesGrid object work as a list of axes.\n\n plt.show()",
"def imshow_grid(images, shape=[2, 2], name='default', save=False):\n fig = plt.figure()\n grid = ImageGrid(fig, 111, nrows_ncols=shape, axes_pad=0.05)\n\n size = shape[0] * shape[1]\n for i in range(size):\n grid[i].axis('off')\n img = images[i]\n if img.shape[0]==3:\n img = img.transpose(1, 2, 0)\n img = (img - img.min())/(img.max() - img.min())\n grid[i].imshow(img, vmin=-132, vmax = 164) # The AxesGrid object work as a list of axes.\n\n plt.show()",
"def show_imgs(imgs, row, col):\n if len(imgs) != (row * col):\n raise ValueError(\n \"Invalid imgs len:{} col:{} row:{}\".format(len(imgs), row, col))\n\n for i, img in enumerate(imgs):\n plot_num = i+1\n plt.subplot(row, col, plot_num)\n plt.tick_params(labelbottom=False) # x軸の削除\n plt.tick_params(labelleft=False) # y軸の削除\n plt.imshow(img)\n plt.show()",
"def show_images(imgs, num_rows, num_cols, scale=2):\n figsize = (num_cols*scale, num_rows*scale)\n _, axes = plt.subplots(num_rows, num_cols, figsize=figsize)\n for i in range(num_rows):\n for j in range(num_cols):\n # show the target image\n axes[i][j].imshow(imgs[i*num_cols+j])\n # set the sub-axis to be invisible\n axes[i][j].axes.get_xaxis().set_visible(False)\n axes[i][j].axes.get_yaxis().set_visible(False)\n # remember to show figure at last\n plt.show()\n return axes",
"def show_train_images(train_data, train_labels):\n plt.figure(1, figsize=(8, 8))\n n = 0\n\n for i in range(16):\n n += 1\n # each time random images are loaded\n # r = np.random.randint(0, train_data.shape[0], 1)\n plt.subplot(4, 4, n)\n plt.subplots_adjust(hspace=0.5, wspace=0.5)\n plt.imshow(train_data[i] / 255.)\n plt.title('{}'.format(train_labels[i]))\n plt.xticks([]), plt.yticks([])\n plt.show()",
"def show_images(imgs, num_rows, num_cols, titles=None, scale=1.5): #@save\n figsize = (num_cols * scale, num_rows * scale)\n _, axes = d2l.plt.subplots(num_rows, num_cols, figsize=figsize)\n axes = axes.flatten()\n for i, (ax, img) in enumerate(zip(axes, imgs)):\n ax.imshow(d2l.numpy(img))\n ax.axes.get_xaxis().set_visible(False)\n ax.axes.get_yaxis().set_visible(False)\n if titles:\n ax.set_title(titles[i])\n return axes",
"def show_images(images, db):\n images = [int(image) for image in images]\n files = get_img_files(images, db)\n show_files(files)",
"def plot_100_images(X, indices=None):\n width, height = IMAGE_WIDTH, IMAGE_HEIGHT\n nrows, ncols = 10, 10\n if indices is None:\n indices = range(X.shape[0])\n indices_to_display = np.random.choice(indices, nrows * ncols)\n\n big_picture = np.zeros((height * nrows, width * ncols))\n\n irow, icol = 0, 0\n for idx in indices_to_display:\n if icol == ncols:\n irow += 1\n icol = 0\n iimg = X[idx].reshape(width, height).T # transpose the data set\n big_picture[irow * height:irow * height + iimg.shape[0],\n icol * width:icol * width + iimg.shape[1]] = iimg\n icol += 1\n plt.imshow(big_picture, cmap=matplotlib.cm.Greys_r)\n\n plt.show()",
"def display_data():\n data = read_data_from_file()\n data_size = len(data)\n print('Data size - ' + str(data_size))\n\n\n for i in range(data_size-1, 0, -1):\n image = data[i]\n cv2.imshow('window', image[0])\n print(str(image[1]))\n cv2.waitKey(50)",
"def show_image_grid(imgs):\n grd = make_grid(imgs)\n npimg = grd.numpy()\n plt.imshow(np.transpose(npimg, (1,2,0)), interpolation='nearest')\n plt.ion()\n plt.show()",
"def plot_n_image(X, n):\n pic_size = int(np.sqrt(X.shape[1]))\n grid_size = int(np.sqrt(n))\n\n first_n_images = X[:n, :]\n\n fig, ax_array = plt.subplots(nrows=grid_size, ncols=grid_size,sharey=True, sharex=True, figsize=(8, 8))\n\n for r in range(grid_size):\n for c in range(grid_size):\n ax_array[r, c].imshow(first_n_images[grid_size * r + c].reshape((pic_size, pic_size)))\n plt.xticks(np.array([]))\n plt.yticks(np.array([]))",
"def show_torch_imgs(imgs, nrow=8, figsize=(8, 5), axis_off=True , **opt):\n import torchvision\n import torch\n if not torch.is_tensor(imgs):\n # Not a torch tensor. Assume that it is torch.autograd.Variable\n # Try to get the tensor inside the Variable.\n try:\n imgs = imgs.data\n except:\n raise ValueError('Expect input imgs to be a torch Tensor or torch.autograd.Variable.')\n # https://gist.github.com/anonymous/bf16430f7750c023141c562f3e9f2a91\n img = torchvision.utils.make_grid(imgs, nrow=nrow, **opt)\n npimg = img.cpu().numpy()\n # make it height x width x channels\n npimg = np.transpose(npimg, (1, 2, 0))\n\n plt.figure(figsize=figsize)\n plt.imshow(npimg, interpolation='nearest')\n if axis_off:\n plt.axis('off')",
"def display_imgs(img_dir,img_list):\n for img in img_list:\n display_img(img_dir, img)",
"def plot_many_images(images, titles, rows=1, columns=2):\n for i, image in enumerate(images):\n plt.subplot(rows, columns, i + 1)\n plt.imshow(image, \"gray\")\n plt.title(titles[i])\n plt.xticks([]), plt.yticks([]) # Hide tick marks\n plt.show()",
"def show_images(images):\n for name, img in images:\n cv2.imshow(name, img)\n\n cv2.waitKey(0)",
"def three_sample_images():\n samples = samples_path()\n _truck = np.array(Image.open(os.path.join(samples, \"truck.png\")))\n _deer = np.array(Image.open(os.path.join(samples, \"deer.png\")))\n _frog = np.array(Image.open(os.path.join(samples, \"frog.png\")))\n truck = transforms.ToTensor()(_truck)\n deer = transforms.ToTensor()(_deer)\n frog = transforms.ToTensor()(_frog)\n return torch.stack([truck, deer, frog])"
] | [
"0.6872789",
"0.6706518",
"0.66048354",
"0.65576744",
"0.6477456",
"0.6438162",
"0.64273864",
"0.6386546",
"0.6381494",
"0.63184965",
"0.6311449",
"0.62808603",
"0.6278003",
"0.6259482",
"0.6259482",
"0.62534124",
"0.6243781",
"0.6227257",
"0.6224085",
"0.6216505",
"0.6158989",
"0.61508155",
"0.61446035",
"0.61427486",
"0.61373985",
"0.6089885",
"0.6060406",
"0.6059162",
"0.6047537",
"0.60443485"
] | 0.760682 | 0 |
Normalises and reshapes the images in the dataset. | def normalise(dataset):
# Scale images to the [0, 1] range
dataset = dataset.astype("float32") / 255
# Make sure images have shape (28, 28, 1)
return np.expand_dims(dataset, -1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def normalize_dataset(self):",
"def normalize_images(data, blend_cat, Args):\n im = data['X_train']['blend_image']\n std = np.std(im)\n mean = np.mean(im)\n data['X_train']['blend_image'] = (im - mean) / std\n data['X_val']['blend_image'] = (data['X_val']['blend_image'] - mean) / std\n data['X_train'] = normalize_other_inputs(data['X_train'], Args)\n data['X_val'] = normalize_other_inputs(data['X_val'], Args)\n for key in data['Y_train'].keys():\n data['Y_train'][key] = (data['Y_train'][key] - mean) / std\n data['Y_val'][key] = (data['Y_val'][key] - mean) / std\n blend_cat['std'] = std\n blend_cat['mean'] = mean\n return data",
"def flatten_image(data):\n\t# print(img.shape[0])\n\t# print(img.shape[1])\n\t# cv2.imshow('image',img)\n\t# gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n\t# plt.imshow(gray)\n\t# plt.show()\n\t# X_normalized = preprocessing.normalize(img, norm='l2')\n\t\n\t# s = img.shape[0] * img.shape[1]\n\t# img_wide = img.reshape((1, s,-1))\t\n\t# img_wide = np.rollaxis(X_normalized, axis=1, start=0)\n\t# plt.imshow(img_wide[0])\n\t# plt.show()\n\t# print(X_normalized)\n\tnsamples, nx, ny = data.shape\n\td2_train_dataset = data.reshape((nsamples,nx*ny))\n\treturn d2_train_dataset",
"def normalize_data(img):\n nor = np.linalg.norm(img, axis = 1)\n nor = np.reshape(nor, (len(img), 1))\n img = np.divide(img, nor)\n return img",
"def _normalize(images):\n images -= images.mean(axis=0, keepdims=True)\n images /= np.maximum(images.std(axis=0, keepdims=True), 3e-1)",
"def _normalize(images):\n images -= images.mean(axis=0, keepdims=True)\n images /= np.maximum(images.std(axis=0, keepdims=True), 3e-1)",
"def normalise(image):",
"def normalize_data(data):\n if data.element_spec[0].shape[2] == 1:\n data = data.map(lambda x, y: (tf.image.grayscale_to_rgb(\n tf.image.resize(x, [32, 32])), y))\n else:\n data = data.map(lambda x, y: (tf.image.resize(x, [32, 32]), y))\n normalization_layer = tf.keras.layers.experimental.preprocessing.Rescaling(1. / 255)\n normalized_ds = data.map(lambda x, y: (normalization_layer(x), y))\n return normalized_ds",
"def preprocess(imgs):\n imgs_p = np.ndarray((len(imgs), img_rows, img_cols), dtype=np.float32)\n for i in range(len(imgs)):\n imgs_p[i] = imgs[i].reshape((img_rows, img_cols))/255.\n\n imgs_p = imgs_p[..., np.newaxis]\n\n # Perform data normalization\n mean = imgs_p.mean()\n std = imgs_p.std()\n imgs_p -= mean\n imgs_p /= std\n\n return imgs_p",
"def normalize_image(self):\n # The image normalization is identical to Cloud TPU ResNet.\n self._image = tf.image.convert_image_dtype(self._image, dtype=tf.float32)\n offset = tf.constant(DATASET_MEAN)\n offset = tf.expand_dims(offset, axis=0)\n offset = tf.expand_dims(offset, axis=0)\n self._image -= offset\n\n scale = tf.constant(DATASET_VAR)\n scale = tf.expand_dims(scale, axis=0)\n scale = tf.expand_dims(scale, axis=0)\n self._image /= scale",
"def unnormalize(images, mean, std):\n \n unnorm_images = images * std + mean\n \n \n return unnorm_images",
"def set_normalization(self, dataloader):\n mean = 0\n square = 0\n for (data_in, _) in dataloader:\n mean += data_in.mean()\n square += data_in.pow(2).mean()\n\n mean /= len(dataloader)\n square /= len(dataloader)\n std = np.sqrt(square - mean ** 2)\n\n # The input data should be roughly normally distributed after\n # passing through net_fixed.\n self.scale_in.bias.data.fill_(- mean / std)\n self.scale_in.weight.data.fill_(1 / std)",
"def normalize(self):\n d = learning_utils.convert_data_to_2d(self._data)\n d = learning_utils.normalize_2d(d)\n self._data = learning_utils.convert_data_to_1d(d)",
"def normalization(imgs):\n\n imgs = np.asarray(imgs).astype(np.float32)\n imgs = np.expand_dims(imgs / 255, axis=-1)\n return imgs",
"def denormalize(img, dataset=\"imagenet\"):\r\n if dataset == \"cifar10\":\r\n c_std = [0.247, 0.243, 0.261]\r\n c_mean = [0.4914, 0.4822, 0.4466]\r\n elif dataset == \"imagenet\":\r\n c_std = [0.229, 0.224, 0.225]\r\n c_mean = [0.485, 0.456, 0.406]\r\n for i in [0, 1, 2]:\r\n img[i] = img[i] * c_std[i] + c_mean[i]\r\n return img",
"def normalize(dataset):\n minVals = dataset.min(axis=0)\n maxVals = dataset.max(axis=0)\n factors = maxVals-minVals\n num = dataset.shape[0]\n norm_data = (dataset - np.tile(minVals,(num,1)))/np.tile(factors,(num,1)) \n return norm_data",
"def _normalize_images(self, images: th.Tensor) -> th.Tensor:\n output = ((images+2)/4 - self._norm_mean)/self._norm_std\n return output",
"def denormalize(img, means, stds, resize_to_original=False):\n\n img = np.moveaxis(img, 0, 2)\n img = img*stds + means\n img = np.clip(img, 0, 255).astype('uint8')\n\n if resize_to_original:\n # revert def preprocess_image()\n img = img[:,(img_w//4): (img_w - img_w//4),:]\n img = cv2.copyMakeBorder( img, img.shape[0], 0,0,0, cv2.BORDER_CONSTANT) #, borderType)\n img = cv2.resize(img, (img_orig_w, img_orig_h))\n \n return img",
"def normalize_ds(dataset):\n dataset = copy.copy(dataset)\n\n dim_dataset = dataset.shape\n\n for n_row in range(dim_dataset[0]):\n k = dataset[n_row,:]\n k_norm =(k - np.min(k))/(np.max(k) - np.min(k))\n dataset[n_row,:] = k_norm\n\n return dataset",
"def _normalize(self, dataset):\n if self.max is None: # if we are normalizing the training set\n self.max, self.min = dataset.max(), dataset.min() # find max, min value for each columns\n for row in dataset.index: # for each row in dataset\n for col in self.features: # for each feature in the instance (exclude target)\n dataset.at[row, col] = (dataset.at[row, col] - self.min[col]) / (self.max[col] - self.min[col]) if col != \"Bias\" else 1",
"def reshape_normalise(img):\n\t# The image shape is expected to match the input of VGG19\n\timg = np.resize(img, (1, CONFIG.IMAGE_HEIGHT, CONFIG.IMAGE_WIDTH, CONFIG.COLOR_CHANNELS)).astype('float32')\n\timg -= CONFIG.MEAN_PIXEL\n\treturn img",
"def _normalize(\n ds: xr.Dataset,\n *,\n dim: Sequence[str],\n kind: str = ADDITIVE,\n) -> xr.Dataset:\n if \"norm\" in ds:\n norm = ds.norm\n else:\n norm = ds.data.mean(dim=dim)\n norm.attrs[\"_group_apply_reshape\"] = True\n\n return xr.Dataset(\n dict(data=apply_correction(ds.data, invert(norm, kind), kind), norm=norm)\n )",
"def reshape_dataset(self, dataset, params):\n assert hasattr(params, \"vectorize_data\"), (\n \"Model params must set vectorize_data.\")\n for key in dataset.keys():\n if dataset[key] is None:\n continue\n dataset[key].images = dp.reshape_data(dataset[key].images, params.vectorize_data)[0]\n dataset[key].shape = dataset[key].images.shape\n return dataset",
"def normalize_images(x_images, mean_value):\n x_flat = np.zeros((x_images.shape[0], 784))\n for k in range(0, x_images.shape[0]):\n img = x_images[k, ...] - mean_value\n img = cv2.normalize(img.astype('float'), None, 0.0, 1.0, cv2.NORM_MINMAX).astype(np.float32)\n x_flat[k, ...] = np.reshape(img, [-1])\n\n return x_flat",
"def preprocess_image(self, batched_inputs):\n images = [x[\"image\"].float().to(self.device) for x in batched_inputs]\n images = [self.normalizer(img) for img in images]\n images = ImageList.from_tensors(images, self.backbone.size_divisibility)\n return images",
"def test_normalize(dummy_input):\n # Test the 2D image: H, W, C\n image, label = dummy_input(image_size=(512, 512, 3),\n label_size=(512, 512, 1))\n transform = Normalize(means=None, stds=None)\n _image, _label = transform(image, label, normalize_tags=[True, False])\n assert not (image == _image).all()\n assert (label == _label).all()\n\n # Test the 3D image: H, W, D, C\n image, label = dummy_input(image_size=(512, 512, 20, 3),\n label_size=(512, 512, 20, 1))\n transform = Normalize(means=None, stds=None)\n _image, _label = transform(image, label, normalize_tags=[True, False])\n assert not (image == _image).all()\n assert (label == _label).all()\n assert np.abs(np.mean(_image)-0) < 1e-8\n assert np.abs(np.std(_image)-1) < 1e-8",
"def normalize_data(data=None):\n # Data pre-processing\n n = data.shape[0]\n for i in range(n):\n xx = data[i,:,:]\n xx -= np.mean(xx) # Centering in 0\n xx /= np.linalg.norm(xx) # Normalizing to 1\n data[i] = xx # Affect value\n return data",
"def img_normalize(image, label):\n mean, std = ds_stats\n image -= tf.constant(mean, shape=[1, 1, num_channels], dtype=image.dtype)\n image /= tf.constant(std, shape=[1, 1, num_channels], dtype=image.dtype)\n return image, label",
"def normalize(self, image, transpose=False, data_type=None):\n return normalize(image, self.mean, self.std, transpose)",
"def pre_process_data(input_path: list, cuts: int, shape: int = 32, normalize: bool = True) -> list:\n images = []\n images_uncut = []\n for files_path in input_path:\n\n files = os.listdir(files_path) # TODO paths\n for f in files:\n file_path = f'{files_path}/{f}'\n im_uncut = cv2.imread(file_path)\n im_uncut = cv2.cvtColor(im_uncut, cv2.COLOR_RGB2GRAY)\n images_uncut.append(cv2.resize(im_uncut, (shape * cuts, shape * cuts)))\n x = np.array(images_uncut)\n\n if normalize:\n x_mean = np.mean(x, axis=(0, 1, 2))\n x_std = np.std(x, axis=(0, 1, 2))\n x = (x - x_mean) / (x_std + 1e-9)\n\n for im in x:\n height = im.shape[0]\n width = im.shape[1]\n frac_h = height // cuts\n frac_w = width // cuts\n i = 0\n image = []\n for h in range(cuts):\n for w in range(cuts):\n crop = im[h * frac_h:(h + 1) * frac_h, w * frac_w:(w + 1) * frac_w]\n crop_rehaped = cv2.resize(crop, (shape, shape))\n image.append([crop_rehaped, i, number_to_angle(i, cuts), neighbours(i, cuts)])\n i = i + 1\n images.append(image)\n # return np.array(images) # todo back to array\n return images"
] | [
"0.69572496",
"0.67912114",
"0.67600954",
"0.6746723",
"0.6741553",
"0.6741553",
"0.6712757",
"0.667788",
"0.66753983",
"0.66424495",
"0.6628572",
"0.6604109",
"0.657797",
"0.65456814",
"0.6539488",
"0.64729387",
"0.6469001",
"0.64613545",
"0.6418981",
"0.63990253",
"0.6396894",
"0.63954324",
"0.63640827",
"0.6331304",
"0.6283619",
"0.6265867",
"0.6249871",
"0.624411",
"0.62260234",
"0.6211367"
] | 0.7404679 | 0 |
Checks the date and time, and then decides if a shift from master to slave (or vice versa) is needed. If necessary, makes the shift. | def main():
date = time.gmtime().tm_mday
if date == 1 or date == 2: # in case it missed once
# shift from slave to master, checking to ensure it hasn't already happened
status = check_status()
if status == 'slave':
slave_to_master()
elif status == 'master':
print("Shift has probably already happened")
else:
print("In a forbidden state:", status)
elif date == 22 or date == 23: #in case it missed once
# shift from master to slave, checking to ensure it hasn't already happened
status = check_status()
if status == 'master':
master_to_slave()
elif status == 'slave':
print("Shift has probably already happened")
else:
print("In a forbidden state:", status)
else:
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __is_valid_move(self, scheduling_unit, turnus, date, person, overtime, depth=0, check_turnuses=[]):\n \n \n if not schedule_utils.is_valid_move (scheduling_unit, turnus, date, person, overtime):\n return False\n \n # if the workplace has the special rule: work in the afternoon, if the next\n # day is a work free day and you will work the next day, and you won't work\n # the next day, work in the morning or not at all\n if scheduling_unit.has_holiday_rule ( ):\n if holiday.is_workfree(date):\n prev_date = date - datetime.timedelta(days=1)\n prev_turnus = person.get_turnus(prev_date) \n if prev_turnus:\n # all afternoon codes start with P\n # all double shift codes start with C\n # TODO: document this\n if prev_turnus.code[0] != 'P' or prev_turnus.code[0] != 'C':\n return False\n else:\n return False\n else:\n next_date = date + datetime.timedelta(days=1)\n if holiday.is_workfree(next_date):\n # this bottom condition is enough, because the dates are added ascending\n if not person.is_free_day(next_date):\n return False\n \n # if the person schedules night turnuses in packages: \n # (Monday + Tuesday)\n # (Tuesday + Wednesday)\n # (Wednesday + Thursday)\n # (Friday + Saturday + Sunday)\n if person.packet_night_turnuses and turnus.code[0] == 'N':\n if depth == 0 and (date.weekday() == 0 or date.weekday() == 2 or date.weekday() == 4):\n return self.__is_valid_move(scheduling_unit, turnus, date + datetime.timedelta(days=1), person, overtime, depth + 1, check_turnuses + [turnus])\n #if this is the second day in the packet continue validation only if it is a Saturday\n elif depth == 1 and date.weekday() == 5:\n # TODO: allow only one holiday turnus per shift type (document this)\n sunday_night_turnus = None\n for alternative_turnus in self.mapper.get_turnuses (scheduling_unit, person):\n if alternative_turnus.holiday and alternative_turnus.code[0] == 'N':\n sunday_night_turnus = alternative_turnus\n break\n else:\n return False\n \n return self.__is_valid_move(scheduling_unit, sunday_night_turnus, date + datetime.timedelta(days=1), person, overtime, depth + 1, check_turnuses + [turnus])\n #Thursday to Friday combination does not exist\n elif depth == 1 and date.weekday() == 4:\n return False\n elif depth == 1:\n return True\n elif depth == 2:\n return True\n \n else:\n return False\n \n \n return True",
"def register_for_shift(self, shift_id, staff_id):\n try:\n result = False\n conflict = False\n\n date = datetime.now()\n mysql_date = f'{date.year}-{date.month}-{date.day} {date.hour}:{date.minute}:00'\n\n shifts_registered_on = self.db_handler.get_staff_registered_shifts_by_id(staff_id)\n shift_pretending = self.db_handler.get_shift_extended_info_by_id(shift_id)\n\n if shifts_registered_on.__len__() > 0:\n for shift in shifts_registered_on:\n diff = shift[1] - shift_pretending[8]\n\n if diff > timedelta(minutes=0): # shift is later\n interval = (diff - (shift_pretending[9] - shift_pretending[8])).seconds / 3600\n if interval >= int(self.get_config_value('HOURS_BETWEEN_SHIFTS')):\n conflict = False\n else:\n conflict = True\n break\n else: # pretending is later\n diff = shift_pretending[8] - shift[1]\n interval = (diff.days * 24) + (diff - (shift[2] - shift[1])).seconds / 3600\n if interval >= int(self.get_config_value('HOURS_BETWEEN_SHIFTS')):\n conflict = False\n else:\n conflict = True\n break\n\n if conflict:\n result = False\n else:\n if self.db_handler.get_shift_registration_by_staff_id_and_shift_id(staff_id, shift_id) is not None:\n self.db_handler.reregister_staff_to_shift(shift_id, staff_id, mysql_date)\n else:\n self.db_handler.register_staff_to_shift(shift_id, staff_id, mysql_date)\n self.logger.write_to_log('staff registered to shift', 'model')\n result = True\n\n return result\n except Exception as err:\n method_name = sys._getframe().f_code.co_name\n\n self.logger.write_to_log('exception', 'model')\n self.logger.write_to_err_log(f'exception in method {method_name} - {err}', 'model')",
"def check_manual_circuit(*args):\n if args[0] == 0:\n setTimeRegime(1)\n elif args[0] == 1:\n setTimeRegime(0)",
"def check_in_to_shift(self, shift_reg_id, staff_id):\n try:\n self.logger.write_to_log(f'requested check in to shift {shift_reg_id}', 'model')\n date = datetime.now()\n mysql_date = f'{date.year}-{date.month}-{date.day} {date.hour}:{date.minute}:00'\n dates = self.db_handler.get_event_date_by_shift_registration_id_and_staff_id(shift_reg_id, staff_id)\n status = 0\n\n diff = dates[2] - date\n\n if diff.days == 0:\n if (diff.seconds / 60) < int(self.get_config_value('CHECK_IN_ALLOWED_BEFORE_SHIFT_MIN')):\n self.db_handler.check_in_to_shift(mysql_date, shift_reg_id)\n self.logger.write_to_log(f'check in entered for shift {shift_reg_id}', 'model')\n status = 1\n else:\n status = 0\n self.logger.write_to_log(f'check in is not set to shift {shift_reg_id}', 'model')\n else:\n status = 0\n self.logger.write_to_log(f'check in is not set to shift {shift_reg_id}', 'model')\n\n return status\n except Exception as err:\n method_name = sys._getframe().f_code.co_name\n\n self.logger.write_to_log('exception', 'model')\n self.logger.write_to_err_log(f'exception in method {method_name} - {err}', 'model')",
"def shifter(self):\n #self.BA_shift = self.timeshift_latitude(self.latB, self.latA)\n #self.BC_shift = self.timeshift_latitude(self.latB, self.latC)\n\n\n self.shifted = True #changing boolean to True when function is called.\n\n secondsA = self.secondsA\n secondsB = self.secondsB\n secondsC = self.secondsC\n\n NeA = self.holefill(self.NeA, secondsA)\n NeB = self.holefill(self.NeB, secondsB)\n NeC = self.holefill(self.NeC, secondsC)\n\n start = 0\n stop = len(NeA) - np.max(np.array([self.BA_shift, self.BC_shift]))\n\n startA = start + self.BA_shift\n stopA = stop + self.BA_shift\n\n startC = start + self.BC_shift\n stopC = stop + self.BC_shift\n\n NeA = NeA[startA:stopA]\n NeB = NeB[start:stop]\n NeC = NeC[startC:stopC]\n\n longA = self.holefill(self.longA, secondsA)\n longB = self.holefill(self.longB, secondsB)\n longC = self.holefill(self.longC, secondsC)\n longA = longA[startA:stopA]\n longB = longB[start:stop]\n longC = longC[startC:stopC]\n\n latA = self.holefill(self.latA, secondsA)\n latB = self.holefill(self.latB, secondsB)\n latC = self.holefill(self.latC, secondsC)\n latA = latA[startA:stopA]\n latB = latB[start:stop]\n latC = latC[startC:stopC]\n\n radA = self.holefill(self.radA, secondsA)\n radB = self.holefill(self.radB, secondsB)\n radC = self.holefill(self.radC, secondsC)\n radA = radA[startA:stopA]\n radB = radB[start:stop]\n radC = radC[startC:stopC]\n\n velA = self.holefill(self.velA, secondsA)\n velB = self.holefill(self.velB, secondsB)\n velC = self.holefill(self.velC, secondsC)\n velA = velA[startA:stopA]\n velB = velB[start:stop]\n velC = velC[start:stop]\n\n altA = self.holefill(self.altA, secondsA)\n altB = self.holefill(self.altB, secondsB)\n altC = self.holefill(self.altC, secondsC)\n altA = altA[startA:stopA]\n altB = altB[start:stop]\n altC = altC[startC:stopC]\n\n\n mlatA = self.holefill(self.mlatA, secondsA)\n mlatB = self.holefill(self.mlatB, secondsB)\n mlatC = self.holefill(self.mlatC, secondsC)\n mlatA = mlatA[startA:stopA]\n mlatB = mlatB[start:stop]\n mlatC = mlatC[startC:stopC]\n\n mlongA = self.holefill(self.mlongA, secondsA)\n mlongB = self.holefill(self.mlongB, secondsB)\n mlongC = self.holefill(self.mlongC, secondsC)\n mlongA = mlongA[startA:stopA]\n mlongB = mlongB[start:stop]\n mlongC = mlongC[startC:stopC]\n\n mltA = self.holefill(self.mltA, secondsA)\n mltB = self.holefill(self.mltB, secondsB)\n mltC = self.holefill(self.mltC, secondsC)\n mltA = mltA[startA:stopA]\n mltB = mltB[start:stop]\n mltC = mltC[startC:stopC]\n\n secondsA = self.holefill(secondsA, secondsA)\n secondsB = self.holefill(secondsB, secondsB)\n secondsC = self.holefill(secondsC, secondsC)\n secondsA = secondsA[startA:stopA]\n secondsB = secondsB[start:stop]\n secondsC = secondsC[startC:stopC]\n\n indsA = np.nonzero(secondsA)[0]\n indsB = np.nonzero(secondsB)[0]\n indsC = np.nonzero(secondsC)[0]\n\n inds = np.intersect1d(indsA, indsB)\n inds = np.intersect1d(inds, indsC)\n\n self.NeA = NeA[inds]\n self.NeB = NeB[inds]\n self.NeC = NeC[inds]\n\n self.longA = longA[inds]\n self.longB = longB[inds]\n self.longC = longC[inds]\n\n self.latA = latA[inds]\n self.latB = latB[inds]\n self.latC = latC[inds]\n\n self.radA = radA[inds]\n self.radB = radB[inds]\n self.radC = radC[inds]\n\n self.velA = velA[inds]\n self.velB = velB[inds]\n self.velC = velC[inds]\n\n self.altA = altA[inds]\n self.altB = altB[inds]\n self.altC = altC[inds]\n\n self.mlatA = mlatA[inds]\n self.mlatB = mlatB[inds]\n self.mlatC = mlatC[inds]\n\n self.mlongA = mlongA[inds]\n self.mlongB = mlongB[inds]\n self.mlongC = mlongC[inds]\n\n self.mltA = mltA[inds]\n self.mltB = mltB[inds]\n self.mltC = mltC[inds]\n\n self.secondsA = secondsA[inds]\n self.secondsB = secondsB[inds]\n self.secondsC = secondsC[inds]",
"def check_time_server(self):\n ack = self.check_server_activity()\n if self.am_leader:\n t = Timer(5, self.set_offset_for_processes)\n t.daemon = True\n t.start()\n else:\n t = Timer(10, self.check_time_server)\n t.daemon = True\n t.start()\n return ack",
"def checkUpstreamScheduler():",
"def test_schedule_across_dst(self):\n self.mockTicketAddMessage()\n # start five hours from now\n params = self._getNowAsDict(add_hours=266) # 11 days 2 hours from now\n target = self._getNowAsDict(add_hours=271) # this implies a maintenance scheduled at CST(-6 UTC) into CDT (-5 CDT) \n target_maintcal_datetime = MaintcalDatetime(\n int(target['start_year']),\n int(target['start_month']),\n int(target['start_day']),\n int(target['start_hour']), \n int(params['start_minute']),0) \n params['tzname'] = 'America%2FChicago'\n params['is_dst'] = '1'\n response = self.app.post(url_for(controller='maintenances', action='schedule', id=3),\n params=params)\n self.assert_(response.body)\n this_maint = db_sess.query(ScheduledMaintenance).get(3)\n self.assertEqual(this_maint.services[0].start_time,target_maintcal_datetime)",
"def check(self):\n\t\tfails = 0\n\t\tworktime_month = timedelta(hours=0)\n\t\tworktime_homeoffice = timedelta(hours=0)\n\t\tfor num in self.workdays:\n\t\t\tday = self.workdays[num]\n\t\t\tif day.daytype == DayType.work:\t\n\t\t\t\tfails += day.check(num)\n\t\t\t\tworktime = day.getWorkingTime()\n\t\t\t\tworktime_month += worktime\n\t\t\t\thotime = day.getHomeofficeTime()\n\t\t\t\tworktime_homeoffice += hotime\t\t\t\t\n\t\tif (worktime_homeoffice > timedelta(days=10)):\n\t\t\tprRed('! {:02d}. max. mtl. Heimarbeit überschritten ({} <= 10days)'.format(num, worktime))\n\t\t\tfails += 1\n\t\tprint('----------------')\n\t\tif fails == 0:\n\t\t\tprGreen('Keine Verstöße erkannt')\n\t\telse:\n\t\t\tprRed('{0} Verstöße erkannt'.format(fails))",
"def check_availability(car):\n plate_num = int(car.plate[-1]) # Get the last number of the plate\n date = car.date # Get the date \n weekday = (date.weekday() + 1)*2 # Get the number of the week day\n time = date.time() # Get the time \n restricted = [(weekday-1) , weekday % 10] # Create an interval of restrictions\n check_time = (time <= morning_end.time() and time >= morning_in.time()) or \\\n (time <= afternoon_end.time() and time >= afternoon_in.time())\n # Boolean that verify the time \n if check_time and plate_num in restricted:\n car.availability = False\n else:\n car.availability = True",
"def schedule_monitor(schedule):\n if schedule[\"state\"] == EC2State.STOPPED:\n if (date.today() - schedule[\"lastStateChange\"]).days >= 7 - schedule[\n \"schedule\"\n ]:\n schedule[\"state\"] = EC2State.STARTED\n elif schedule[\"state\"] == EC2State.STARTED:\n if (date.today() - schedule[\"lastStateChange\"]).days >= schedule:\n schedule[\"state\"] = EC2State.STOPPED\n else:\n return schedule, False\n\n return schedule, True",
"def close_shift(self, shift_id, supervisor_id):\n try:\n res = False\n event = self.db_handler.get_shift_extended_info_by_id(shift_id)\n staff_on_shift = self.db_handler.get_staff_on_shift_for_closing(shift_id)\n staff_all_set_up = 0\n sh_reg = self.db_handler.get_shift_registration_by_staff_id_and_shift_id(supervisor_id, shift_id)\n\n diff = datetime.now() - event[9]\n\n if diff.days >= 0 and diff.seconds >= 0:\n for staff in staff_on_shift:\n if str(staff[0]) != str(supervisor_id):\n if staff[1] is not None and staff[2] is not None:\n staff_all_set_up += 1\n\n if staff_all_set_up == len(staff_on_shift)-1: # supervisor anyway is on shift\n res = True\n else:\n res = False # too early\n\n if res:\n self.check_out_off_shift(supervisor_id, sh_reg[0])\n self.db_handler.update_shift_status(shift_id)\n\n return res\n except Exception as err:\n method_name = sys._getframe().f_code.co_name\n\n self.logger.write_to_log('exception', 'model')\n self.logger.write_to_err_log(f'exception in method {method_name} - {err}', 'model')",
"def self_check(world_state, ros_util):\n if (\n ros_util.auto_function_command == 32\n or ros_util.auto_function_command == 0\n ):\n rospy.loginfo(\"Cancelling auto-function command...\")\n ros_util.publish_actions(\"stop\", 0, 0, 0, 0)\n ros_util.control_pub.publish(False)\n return -1\n\n if world_state.battery < 10:\n rospy.loginfo(\"Low battery! Rover must charge ASAP or it will halt!\")\n world_state.target_location = [0, 0]\n return 3\n\n # Future status checks for physical hardware\n \"\"\"\n if world_state.on_side == True:\n rospy.loginfo(\"On side! Attempting auto self-right...\")\n return 2\n if world_state.hardware_status == False:\n rospy.loginfo(\"Hardware failure! Shutting down...\")\n ros_util.publish_actions('stop', 1, 0, 0, 0)\n ros_util.control_pub.publish(False)\n return -1\n \"\"\"\n\n return 1",
"def _compute_current_timesheet(self, date, time_form, time_to, address):\n timesheet_ids = self.with_context(\n tz=self.env.user.tz, lang=self.env.user.lang).timesheet_ids\n weekday = date.weekday()\n current_timesheet = False\n current_date_from = datetime.datetime.min\n valid_timesheets = list(\n filter(lambda time: time.dayofweek == str(weekday)\n and not time.date_from or (\n time.date_from and time.date_from >= date)\n and not time.date_to or (time.date_to and time.date_to <= date)\n and datetime.time(hour=int(time.hour_from), minute=int(\n modf(time.hour_from)[0] * 60)) >= time_form\n and datetime.time(hour=int(time.hour_to), minute=int(\n modf(time.hour_to)[0] * 60)) <= time_to\n and time.adress_id == address.id,\n timesheet_ids\n ))\n if not valid_timesheets:\n return False\n # No we will check if there is an exception for the date\n for valid_time in valid_timesheets:\n if valid_time.date_from:\n if valid_time.date_from > current_date_from:\n current_date_from = valid_time.date_from\n current_timesheet = valid_time\n\n return current_timesheet or valid_timesheets[0]",
"def check(ht, mt, st, pid):\n\n ns_ticks = 0\n shift = 0\n\n diff = (mt - ht + TOTAL_TICKS) % TOTAL_TICKS\n for rep in range(12):\n tmp = diff + rep * TOTAL_TICKS\n if tmp % 11 == 0:\n ns_ticks = tmp / 11\n shift = (ht - ns_ticks + TOTAL_TICKS) % TOTAL_TICKS\n\n if (ns_ticks + shift) % TOTAL_TICKS != ht:\n continue\n\n if (12*ns_ticks + shift) % TOTAL_TICKS != mt:\n continue\n\n if (720*ns_ticks + shift) % TOTAL_TICKS != st:\n continue\n\n # calc_st = (720*ns_ticks + shift) % TOTAL_TICKS\n # if calc_st == st:\n ns = ns_ticks % 1e9\n ns_ticks /= 1e9\n\n secs = ns_ticks % 60\n ns_ticks /= 60\n\n mins = ns_ticks % 60\n ns_ticks /= 60\n\n hrs = ns_ticks\n\n if hrs < 12:\n print(f\"Case #{pid}: {int(hrs)} {int(mins)} {int(secs)} {int(ns)}\")\n return True\n\n return False",
"def check_diff(self,game,wanted_diff,wanted_starting_time=''):\n return True",
"def do_sync(self):\n # Synch up the station's clock if it's been more than clock_check\n # seconds since the last check:\n now_ts = time.time()\n if now_ts - self.last_synch_ts >= self.clock_check:\n self.last_synch_ts = now_ts\n try:\n console_time = self.engine.console.getTime()\n if console_time is None: return\n # getTime can take a long time to run, so we use the current\n # system time\n diff = console_time - time.time()\n syslog.syslog(syslog.LOG_INFO, \n \"engine: Clock error is %.2f seconds (positive is fast)\" % diff)\n if abs(diff) > self.max_drift:\n try:\n self.engine.console.setTime()\n except NotImplementedError:\n syslog.syslog(syslog.LOG_DEBUG, \"engine: Station does not support setting the time\")\n except NotImplementedError:\n syslog.syslog(syslog.LOG_DEBUG, \"engine: Station does not support reading the time\")",
"def check(self):\n validity_year = int(self.date[0:4])\n validity_month = int(self.date[5:7])\n validity_day = int(self.date[8:10])\n if datetime.today().year > validity_year:\n self.flag = False\n elif datetime.today().year == validity_year:\n if datetime.today().month > validity_month:\n self.flag = False\n elif datetime.today().month == validity_month:\n if datetime.today().day > validity_day:\n self.flag = False\n else:\n self.flag = True\n else:\n self.flag = True\n else:\n self.flag = True",
"def acceptable(self):\n now = datetime.datetime.now()\n origin = datetime.datetime.combine(self.date, datetime.time.min)\n start = origin + datetime.timedelta(hours=6)\n end = origin + datetime.timedelta(days=1)\n morning = end + datetime.timedelta(hours=6)\n if now < origin or now > morning:\n return 0\n if now >= end or now <= start:\n return 1\n return 3",
"def check_out_off_shift(self, staff_id, shift_reg_id):\n try:\n res = False\n\n date = datetime.now()\n mysql_date = f'{date.year}-{date.month}-{date.day} {date.hour}:{date.minute}:00'\n dates = self.db_handler.get_event_date_by_shift_registration_id_and_staff_id(shift_reg_id, staff_id)\n\n diff = dates[3] - date\n shift_reg = self.db_handler.get_shift_registration_by_shift_reg_id(shift_reg_id)\n\n if diff.days < 0 and diff.seconds >= 0:\n res = True\n\n if shift_reg[6] is not None:\n mysql_date = f'{shift_reg[6].year}-{shift_reg[6].month}-{shift_reg[6].day} {shift_reg[6].hour}:{shift_reg[6].minute}:00'\n\n self.db_handler.check_out_off_shift(shift_reg_id, mysql_date)\n return res\n except Exception as err:\n method_name = sys._getframe().f_code.co_name\n\n self.logger.write_to_log('exception', 'model')\n self.logger.write_to_err_log(f'exception in method {method_name} - {err}', 'model')",
"def validSchedule(self,schedule):\n\t\tdef validRow(content,start,row):\n \"\"\"\n part of valid Schedule, only check whether a given\n row is valid\n @param start: the start position\n @param row: given waiting area\n @return: a boolean value\n \"\"\"\n\t\t\tcur_id = content[1].id\n\t\t\ttry:\n\t\t\t\tnext_c = row[start+content[1].length]\n\t\t\texcept IndexError:\n\t\t\t\treturn True\n\t\t\tif next_c != None:\n\t\t\t\tif cur_id != next_c[1].id:\n\t\t\t\t\treturn True\n\t\t\t\telse:\n\t\t\t\t\t#print \"row not valid\"\n\t\t\t\t\treturn False\n\t\t\telse:\n\t\t\t\treturn True\n\n\t\tdef validCol(content,start,schedule):\n \"\"\"\n Similar to validRow,but only check whether the given\n Column is valid\n @param start: the start position\n @param schedule: given schedule\n @return: a boolean value\n \"\"\"\n\t\t\tcur_id = content[1].id\n\t\t\t#print \"cur_id,length,start\",cur_id,content[1].length,start\n\t\t\tflag = 0\n\t\t\tfor i in range(content[1].length):\n\t\t\t\tfor j in range(len(schedule.w)):\n\t\t\t\t\t#print start,i,content[1]\n\t\t\t\t\tif schedule.w[j][start+i]!=None and \\\n\t\t\t\t\t\tschedule.w[j][start+i][1].id == cur_id:\n\t\t\t\t\t\tflag += 1\n\t\t\tif flag != content[1].length:\n\t\t\t\t#print \"col not valid\",flag,content[1].length,cur_id\n\t\t\t\treturn False\n\t\t\telse:\n\t\t\t\treturn True\n\t\tdef validRowCol(content,start,row,schedule):\n \"\"\"\n Simply combine validRow and validCol\n \"\"\"\n\t\t\tif validRow(content,start,row) and \\\n\t\t\t\tvalidCol(content,start,schedule):\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\treturn False\n\n\t\ti = 0\n\t\twhile i < len(schedule.w):\n\t\t\tj = 0\n\t\t\twhile j < len(schedule.w[i]):\n\t\t\t\tc = schedule.w[i][j]\n\t\t\t\tif c != None:\n\t\t\t\t\tif not validRowCol(c,j,schedule.w[i],schedule):\n\t\t\t\t\t\treturn False,(c,i)\n\t\t\t\t\telse:\n\t\t\t\t\t\tj += c[1].length\n\t\t\t\telse:\n\t\t\t\t\tj += 1\n\t\t\ti += 1\n\t\treturn True,None",
"def ts_bootstrap_check(self):\n if not need_default_ts_bootstrap(self._old_table, self._new_table):\n return\n if self.allow_unsafe_ts_bootstrap:\n log.warning(\n \"Bootstraping timestamp column using current time is required. \"\n \"Bypassing the safety check as requested\"\n )\n return\n raise OSCError(\"UNSAFE_TS_BOOTSTRAP\")",
"def must_run(self):\r\n self.current_time = datetime.now()\r\n return all([self._minute(), self._hour(), self._day_of_month(), self._month(), self._day_of_week()])",
"def test_shift():\n\n test_hours = 113\n\n shift_test = shift.schedule(weekly_op_hours=test_hours)\n\n week_sched, schedule_type, daily_hours = shift_test.calc_weekly_schedule()\n\n hours_sum = daily_hours.sum()[0]\n\n hrs_per_operating = pd.concat([daily_hours,week_sched.where(\n week_sched.operating==True\n ).groupby('dayofweek').operating.sum()], axis=1\n )\n\n hrs_per_operating.daily_hours.update(\n hrs_per_operating.daily_hours.divide(\n hrs_per_operating.operating\n )\n )\n\n # hrs_per_operating = daily_hours.apply(\n # lambda x: x.divide(np.floor(x.divide(8))*8)\n # )\n\n weekly_hours_sum = week_sched.set_index('dayofweek').join(\n hrs_per_operating['daily_hours']\n )\n\n weekly_hours_sum = weekly_hours_sum.daily_hours.multiply(\n weekly_hours_sum.operating\n ).sum(level=0)\n\n assert test_hours == hours_sum\n\n # This tests that the correct number of hours are designated as operating\n assert all(weekly_hours_sum == daily_hours.daily_hours)",
"def test_spring_forward(self):\n # Exact crossover time:\n # datetime.datetime(2011, 3, 13, 2, 0, 0, tzinfo=pytz.utc)\n # This test will use times on either side of it.\n\n # From the PST vantage point, the run time is 21.1 hours away:\n s1a, s1b = self.hours_to_job_at_datetime(\n 'tz_test_job', 2011, 3, 13, 1, 55, 0)\n\n # From the PDT vantage point, the run time is 20.9 hours away:\n s2a, s2b = self.hours_to_job_at_datetime(\n 'tz_test_job', 2011, 3, 13, 3, 05, 0)\n\n self._assert_range(s1b - s1a, 23.99, 24.01)\n self._assert_range(s2b - s2a, 23.99, 24.01)\n\n # So we lose an hour here. The 2 AM block does not exist.\n # If this were not a DST crossover, this difference would be\n # 1.2, not 0.2.\n self._assert_range(s1a - s2a, 0.19, 0.21)",
"def make_move(self, move, check_valid=True):\r\n self.board[move.sr][move.sc] = \"--\"\r\n self.board[move.er][move.ec] = move.pieceMoved\r\n self.moveLog.append(move)\r\n self.turn_white = not self.turn_white\r\n if move.pieceMoved == 'wk':\r\n self.wKingPos = (move.er, move.ec)\r\n elif move.pieceMoved == 'bk':\r\n self.bKingPos = (move.er, move.ec)\r\n\r\n if move.isEnpassantMove:\r\n self.board[move.sr][move.ec] = \"--\"\r\n\r\n if move.pieceMoved[1] == 'p' and abs(move.sr - move.er) == 2:\r\n self.enpas_pos = ((move.er + move.sr) // 2, move.ec)\r\n else:\r\n self.enpas_pos = ()\r\n\r\n if move.isPawnPromotion and not check_valid:\r\n promoted_piece = \"a\"\r\n while promoted_piece not in ('q', 'r', 'b', 'n'):\r\n promoted_piece = input(\"Promote to q, r, b, or n: \")\r\n self.board[move.er][move.ec] = move.pieceMoved[0] + promoted_piece\r\n\r\n # castle\r\n if move.castle:\r\n if move.ec - move.sc == 2:\r\n self.board[move.er][move.ec - 1] = self.board[move.er][move.ec + 1]\r\n self.board[move.er][move.ec + 1] = '--'\r\n else:\r\n self.board[move.er][move.ec + 1] = self.board[move.er][move.ec - 2]\r\n self.board[move.er][move.ec - 2] = '--'\r\n\r\n # castle rights on rook, king move\r\n self.update_castle_rights(move)\r\n self.castleRightsLog.append(CastleRights(self.cr_castle_r.wks, self.cr_castle_r.bks,\r\n self.cr_castle_r.wqs, self.cr_castle_r.bqs))",
"def master_prep (fits_master, data_shape, create_master, pick_alt=True):\n\n if get_par(set_zogy.timing,tel):\n t = time.time()\n\n\n # infer path, imgtype, evening date and filter from input [fits_master]\n path, filename = os.path.split(fits_master)\n\n\n # previously, master frames were named [imgtype]_[date_eve]..\n # instead of [tel]_[imgtype]_[date_eve]..\n #imgtype, date_eve = filename.split('.fits')[0].split('_')[0:2]\n # the following selection handles both cases\n imgtype, date_eve = (filename.split('.fits')[0].split('{}_'.format(tel))[-1]\n .split('_')[0:2])\n\n\n # for flat, also extract filter\n if imgtype == 'flat':\n filt = filename.split('.fits')[0].split('_')[-1]\n else:\n filt = None\n\n\n # check if already present (if fpacked, fits_master below will\n # point to fpacked file)\n master_present, fits_master = already_exists (fits_master, get_filename=True)\n\n\n if master_present:\n log.info ('master {} {} exists'.format(imgtype, fits_master))\n\n\n # check if master bias/flat does not contain any red flags:\n master_ok = True\n if master_present:\n if qc_flagged (fits_master):\n master_ok = False\n log.warning ('existing master {} {} contains a red flag'\n .format(imgtype, fits_master))\n\n\n if not (master_present and master_ok):\n\n # in night mode only, sleep for 60s to make sure individual\n # biases and/or flats have been reduced and written to disk;\n # this is only used in the old \"chopper\" night mode, where the\n # master files were created on-the-fly triggered by the first\n # science frame of the night. At ilifu/Slurm or in the google\n # cloud, the master frames are prepared in advance before the\n # night starts\n google_cloud = (fits_master[0:5] == 'gs://')\n if proc_mode == 'night' and not google_cloud:\n log.warning ('waiting for 60s for all individual calibration frames '\n 'to have been reduced before continuing with '\n '[master_prep]')\n time.sleep(60)\n\n\n # prepare master image from files in [path] +/- the specified\n # time window\n nwindow = int(get_par(set_bb.cal_window,tel)[imgtype])\n\n\n # for both ilifu/Slurm and google cloud, the individual\n # calibration files are in red_dir\n red_dir = get_par(set_bb.red_dir,tel)\n\n\n # collect individual calibration files in [file_list]\n file_list = []\n for n_day in range(-nwindow, nwindow+1):\n # determine mjd at noon (local or UTC, does not matter) of\n # date_eve +- n_day\n mjd_noon = date2mjd('{}'.format(date_eve), time_str='12:00') + n_day\n # corresponding path\n date_tmp = (Time(mjd_noon, format='mjd').isot.split('T')[0]\n .replace('-','/'))\n path_tmp = '{}/{}/{}/{}_'.format(red_dir, date_tmp, imgtype, tel)\n\n # additional search string, which will select particular\n # filter for flats\n if imgtype=='flat':\n search_str = '{}.fits'.format(filt)\n else:\n search_str = '.fits'\n\n # collect files\n file_list.append(list_files(path_tmp, search_str=search_str))\n\n\n # clean up lists in [file_list] and sort\n file_list = sorted([f for sublist in file_list for f in sublist])\n nfiles = len(file_list)\n\n\n if create_master:\n\n # do not consider image with header QC-FLAG set to red,\n # and also avoid using MeerLICHT evening flats due to dome\n # vignetting for period from July 2019 until February\n # 2020; moreover, mjd_obs is read from header to be able\n # to sort the calibration files in time futher below\n\n # execute this block only if [create_master] is switched\n # on, otherwise the line with read_hdulist below leads to\n # an exception when running both bias and flatfield\n # reductions on Slurm with multiple tasks/processes:\n # Header missing END card. [blackbox_reduce, line 1323]\n mjd_obs = np.zeros(nfiles)\n mask_keep = np.ones(nfiles, dtype=bool)\n for i_file, filename in enumerate(file_list):\n\n log.info ('reading header of {}'.format(filename))\n # check!!! - the following line leads to an exception when\n # running both bias and flatfield reductions on Slurm with\n # multiple tasks/processes:\n # Header missing END card. [blackbox_reduce, line 1323]\n header_tmp = read_hdulist (filename, get_data=False,\n get_header=True)\n if 'QC-FLAG' in header_tmp and header_tmp['QC-FLAG'] == 'red':\n mask_keep[i_file] = False\n\n # record MJD-OBS in array\n if 'MJD-OBS' in header_tmp:\n mjd_obs[i_file] = header_tmp['MJD-OBS']\n\n # for period from July 2019 until February 2020, avoid\n # using MeerLICHT evening flats due to dome vignetting\n mjd_avoid = Time(['2019-07-01T12:00:00', '2020-03-01T12:00:00'],\n format='isot').mjd\n if (tel=='ML1' and mjd_obs[i_file] % 1 > 0.5 and\n mjd_obs[i_file] > mjd_avoid[0] and\n mjd_obs[i_file] < mjd_avoid[1]):\n\n mask_keep[i_file] = False\n\n\n file_list = np.array(file_list)[mask_keep]\n mjd_obs = mjd_obs[mask_keep]\n nfiles = len(file_list)\n\n\n\n # look for a nearby master instead if the master bias/flat\n # present contains a red flag, or if there are too few\n # individual frames to make a master, or the input\n # [create_master] is switched off\n if nfiles < 5 or not master_ok or not create_master:\n\n if imgtype=='flat':\n msg = 'flat in filter {}'.format(filt)\n else:\n msg = imgtype\n\n # if input [pick_alt] is True, look for a nearby master\n # flat, otherwise just return None\n if pick_alt or not create_master:\n fits_master_near = get_nearest_master(date_eve, imgtype,\n fits_master, filt=filt)\n else:\n if master_ok:\n log.warning ('too few good frames available to produce '\n 'master {} for evening date {} +/- window '\n 'of {} days'.format(msg, date_eve, nwindow))\n return None\n\n\n if fits_master_near is not None:\n\n # if master bias subtraction switch is off, the master\n # bias is still prepared; only show message below in\n # case switch is on, otherwise it is confusing\n if ((imgtype=='bias' and get_par(set_bb.subtract_mbias,tel))\n or imgtype=='flat'):\n\n log.warning ('using {} as master for evening date {}'\n .format(fits_master_near, date_eve))\n\n # previously we created a symbolic link so future\n # files would automatically use this as the master\n # file, but as this symbolic link is confusing, let's\n # not do that; searching for nearby master frame takes\n # a negligible amount of time\n # os.symlink(fits_master_near, fits_master)\n fits_master = fits_master_near\n\n else:\n if ((imgtype=='bias' and get_par(set_bb.subtract_mbias,tel))\n or imgtype=='flat'):\n\n log.error('no alternative master {} found'.format(msg))\n\n return None\n\n else:\n\n\n # should number of biases/darks/flats exceeds ncal_max,\n # select the ones closest in time to midnight of the\n # evening date\n nmax = int(get_par(set_bb.ncal_max,tel)[imgtype])\n\n # difference between observed MJD and mignight of the\n # evening date\n mjd_midnight = date2mjd('{}'.format(date_eve), time_str='23:59')\n mjd_obs_delta = mjd_obs - mjd_midnight\n # sort the observed delta MJDs of the files\n index_sort = np.argsort (np.abs(mjd_obs_delta))\n # select nmax\n file_list = file_list[index_sort][0:nmax]\n # update mjd_obs_delta for use further below\n mjd_obs_delta = mjd_obs_delta[index_sort][0:nmax]\n nfiles_orig = nfiles\n nfiles = len(file_list)\n\n\n # if nearest flat taken in the past is not within 12 hours\n # of midnight of the evening date, do not bother to make a\n # new master as it would be similar to (or worse than)\n # yesterday's master; make an exception if flats from\n # future nights are included\n all_past = np.all(mjd_obs_delta < 0)\n if np.amin(np.abs(mjd_obs_delta)) > 0.5 and all_past:\n log.warning ('no past calibration files within 12 hours of '\n 'midnight of {}; not making master {}'\n .format(date_eve, fits_master))\n return None\n\n\n # create the master frame\n if imgtype=='flat':\n msg = 'flat in filter {}'.format(filt)\n else:\n msg = imgtype\n\n log.info ('making {} master {} for night {} from the following '\n 'files:\\n{}'.format(tel, msg, date_eve, file_list))\n\n if nfiles_orig > nmax:\n log.warning ('number of available {} frames ({}) exceeds the '\n 'maximum specified ({}); using the frames closest '\n 'in time to midnight of the evening date ({})'\n .format(imgtype, len(index_sort), nmax, date_eve))\n\n if imgtype=='bias' and not get_par(set_bb.subtract_mbias,tel):\n log.warning ('this master bias will not be applied to the input '\n 'image as [subtract_mbias] is set to False)')\n\n\n # assuming that individual flats/biases have the same\n # shape as the input data\n ysize, xsize = data_shape\n master_cube = np.zeros((nfiles, ysize, xsize), dtype='float32')\n\n\n # initialize master header\n header_master = fits.Header()\n\n\n # fill the cube; ra_flats and dec_flats are used to check\n # offsets between flats\n ra_flats = []\n dec_flats = []\n for i_file, filename in enumerate(file_list):\n\n master_cube[i_file], header_tmp = read_hdulist(filename,\n get_header=True)\n\n if imgtype=='flat':\n # divide by median over the region [set_bb.flat_norm_sec]\n if 'MEDSEC' in header_tmp:\n median = header_tmp['MEDSEC']\n else:\n index_flat_norm = get_par(set_bb.flat_norm_sec,tel)\n median = np.median(master_cube[i_file][index_flat_norm])\n\n log.info ('flat name: {}, median: {:.1f} e-'\n .format(filename, median))\n\n if median != 0:\n master_cube[i_file] /= median\n\n # collect RA and DEC to check for dithering\n if 'RA' in header_tmp and 'DEC' in header_tmp:\n ra_flats.append(header_tmp['RA'])\n dec_flats.append(header_tmp['DEC'])\n\n\n # copy some header keyword values from first file\n if i_file==0:\n for key in ['IMAGETYP', 'DATE-OBS', 'FILTER', 'RA', 'DEC',\n 'XBINNING', 'YBINNING', 'MJD-OBS', 'AIRMASS',\n 'ORIGIN', 'TELESCOP', 'PYTHON-V', 'BB-V']:\n if key in header_tmp:\n header_master[key] = (header_tmp[key],\n header_tmp.comments[key])\n\n\n if imgtype=='flat':\n comment = 'name reduced flat'\n else:\n comment = 'name gain/os-corrected {} frame'.format(imgtype)\n\n # add name reduced calibration file to master header\n header_master['{}{}'.format(imgtype.upper(), i_file+1)] = (\n filename.split('/')[-1].split('.fits')[0],\n '{} {}'.format(comment, i_file+1))\n\n # add original name of calibration file to master header\n if 'ORIGFILE' in header_tmp.keys():\n header_master['{}OR{}'.format(imgtype.upper(), i_file+1)] = (\n header_tmp['ORIGFILE'], 'name original {} {}'\n .format(imgtype, i_file+1))\n\n # also copy a few header keyword values from the last file\n if i_file==nfiles-1:\n for key in ['DATE-END', 'MJD-END']:\n if key in header_tmp:\n header_master[key] = (header_tmp[key],\n header_tmp.comments[key])\n\n\n # determine the median\n master_median = np.median(master_cube, axis=0)\n\n\n # add number of files combined\n header_master['N{}'.format(imgtype.upper())] = (\n nfiles, 'number of {} frames combined'.format(imgtype.lower()))\n\n\n # add time window used\n header_master['{}-WIN'.format(imgtype.upper())] = (\n nwindow, '[days] input time window to include {} frames'\n .format(imgtype.lower()))\n\n\n # add some more header keywords to the master flat\n if imgtype=='flat':\n\n sec_tmp = get_par(set_bb.flat_norm_sec,tel)\n value_tmp = '[{}:{},{}:{}]'.format(\n sec_tmp[0].start+1, sec_tmp[0].stop+1,\n sec_tmp[1].start+1, sec_tmp[1].stop+1)\n header_master['STATSEC'] = (\n value_tmp, 'pre-defined statistics section [y1:y2,x1:x2]')\n\n\n header_master['MFMEDSEC'] = (\n np.median(master_median[sec_tmp]),\n 'median master flat over STATSEC')\n\n\n header_master['MFSTDSEC'] = (\n np.std(master_median[sec_tmp]),\n 'sigma (STD) master flat over STATSEC')\n\n\n # \"full\" image statistics\n index_stat = get_rand_indices(master_median.shape)\n __, median_master, std_master = sigma_clipped_stats(\n master_median[index_stat], mask_value=0)\n header_master['MFMED'] = (median_master, 'median master flat')\n header_master['MFSTD'] = (std_master, 'sigma (STD) master flat')\n\n\n # check if flats were dithered; calculate offset in\n # arcsec of each flat with respect to the previous one\n ra_flats = np.array(ra_flats)\n dec_flats = np.array(dec_flats)\n noffset = 0\n offset_mean = 0\n if len(ra_flats) > 0 and len(dec_flats) > 0:\n offset = 3600. * haversine (ra_flats, dec_flats,\n np.roll(ra_flats,1),\n np.roll(dec_flats,1))\n # count how many were offset by at least 5\"\n mask_off = (offset >= 5)\n noffset = np.sum(mask_off)\n if noffset > 0:\n offset_mean = np.mean(offset[mask_off])\n\n\n header_master['N-OFFSET'] = (noffset, 'number of flats with '\n 'offsets > 5 arcsec')\n header_master['OFF-MEAN'] = (offset_mean,\n '[arcsec] mean dithering offset')\n\n if float(noffset)/nfiles >= 0.66:\n flat_dithered = True\n else:\n flat_dithered = False\n\n header_master['FLATDITH'] = (flat_dithered,\n 'majority of flats were dithered')\n\n\n # set edge and non-positive pixels to 1; edge pixels\n # are identified by reading in bad pixel mask as\n # master preparation is not necessariliy linked to the\n # mask of an object image, e.g. in function\n # [masters_left]\n fits_bpm = (get_par(set_bb.bad_pixel_mask,tel)\n .replace('bpm', 'bpm_{}'.format(filt)))\n bpm_present, fits_bpm = already_exists (fits_bpm,\n get_filename=True)\n\n if bpm_present:\n # if mask exists, read it\n data_mask = read_hdulist(fits_bpm)\n mask_replace = ((data_mask==get_par(\n set_zogy.mask_value['edge'],tel)) | (master_median<=0))\n master_median[mask_replace] = 1\n\n\n # now that master flat is produced, calculate - but do\n # not apply - the different channels' normalization\n # factors such that the resulting image would appear\n # smooth without any jumps in levels between the\n # different channels\n __, __, __, __, data_sec_red = define_sections(data_shape,\n tel=tel)\n nchans = np.shape(data_sec_red)[0]\n med_chan_cntr = np.zeros(nchans)\n std_chan_cntr = np.zeros(nchans)\n\n\n # copy of master_median\n master_median_corr = np.copy(master_median)\n\n\n # first match the channels vertically, by using the\n # statistics of the regions at the top of the bottom\n # channels and bottom of the top channels\n nrows = 200\n for i_chan in range(nchans):\n data_chan = master_median_corr[data_sec_red[i_chan]]\n if i_chan < 8:\n med_chan_cntr[i_chan] = np.median(data_chan[-nrows:,:])\n else:\n med_chan_cntr[i_chan] = np.median(data_chan[0:nrows,:])\n\n # correct master image channel\n master_median_corr[data_sec_red[i_chan]] /= med_chan_cntr[i_chan]\n\n\n # channel correction factor applied so far\n factor_chan = 1./med_chan_cntr\n\n\n # now match channels horizontally\n ysize, xsize = data_shape\n ny = get_par(set_bb.ny,tel)\n nx = get_par(set_bb.nx,tel)\n dy = ysize // ny\n dx = xsize // nx\n\n\n nrows = 2000\n ncols = 200\n for i in range(1,nx):\n # index of lower left pixel of upper right channel\n # of the 4 being considered\n y_index = dy\n x_index = i*dx\n\n # statistics of right side of previous channel pair\n data_stat1 = master_median_corr[y_index-nrows:y_index+nrows,\n x_index-ncols:x_index]\n\n # statistics of left side of current channel pair\n data_stat2 = master_median_corr[y_index-nrows:y_index+nrows,\n x_index:x_index+ncols]\n ratio = np.median(data_stat1)/np.median(data_stat2)\n\n # correct relevant channels\n master_median_corr[data_sec_red[i]] *= ratio\n master_median_corr[data_sec_red[i+nx]] *= ratio\n\n # update correction factor\n factor_chan[i] *= ratio\n factor_chan[i+nx] *= ratio\n\n\n if False:\n # normalize corrected master to [flat_norm_sec] section\n sec_tmp = get_par(set_bb.flat_norm_sec,tel)\n ratio_norm = np.median(master_median_corr[sec_tmp])\n master_median_corr /= ratio_norm\n factor_chan /= ratio_norm\n\n\n # normalize correction factors to an average of unity\n factor_chan /= np.mean(factor_chan)\n\n\n # add factor_chan values to header\n for i_chan in range(nchans):\n header_master['GAINCF{}'.format(i_chan+1)] = (\n factor_chan[i_chan], 'channel {} gain correction factor'\n .format(i_chan+1))\n\n\n elif imgtype=='bias':\n\n # add some header keywords to the master bias\n index_stat = get_rand_indices(master_median.shape)\n mean_master, __, std_master = sigma_clipped_stats(\n master_median[index_stat], mask_value=0)\n header_master['MBMEAN'] = (mean_master, '[e-] mean master bias')\n header_master['MBRDN'] = (std_master, '[e-] sigma (STD) master '\n 'bias')\n\n # including the means and standard deviations of the master\n # bias in the separate channels\n __, __, __, __, data_sec_red = define_sections(data_shape,\n tel=tel)\n nchans = np.shape(data_sec_red)[0]\n mean_chan = np.zeros(nchans)\n std_chan = np.zeros(nchans)\n\n for i_chan in range(nchans):\n data_chan = master_median[data_sec_red[i_chan]]\n index_stat = get_rand_indices(data_chan.shape)\n mean_chan[i_chan], __, std_chan[i_chan] = sigma_clipped_stats(\n data_chan[index_stat], mask_value=0)\n\n for i_chan in range(nchans):\n header_master['MBIASM{}'.format(i_chan+1)] = (\n mean_chan[i_chan], '[e-] channel {} mean master bias'\n .format(i_chan+1))\n\n for i_chan in range(nchans):\n header_master['MBRDN{}'.format(i_chan+1)] = (\n std_chan[i_chan], '[e-] channel {} sigma (STD) master '\n 'bias'.format(i_chan+1))\n\n\n elif imgtype=='dark':\n\n # add some header keywords to the master dark\n index_stat = get_rand_indices(master_median.shape)\n mean_master, __, std_master = sigma_clipped_stats(\n master_median[index_stat], mask_value=0)\n header_master['MDMEAN'] = (mean_master, '[e-] mean master dark')\n header_master['MDRDN'] = (std_master, '[e-] sigma (STD) master '\n 'dark')\n\n # including the means and standard deviations of the master\n # dark in the separate channels\n __, __, __, __, data_sec_red = define_sections(data_shape,\n tel=tel)\n nchans = np.shape(data_sec_red)[0]\n mean_chan = np.zeros(nchans)\n std_chan = np.zeros(nchans)\n\n for i_chan in range(nchans):\n data_chan = master_median[data_sec_red[i_chan]]\n index_stat = get_rand_indices(data_chan.shape)\n mean_chan[i_chan], __, std_chan[i_chan] = sigma_clipped_stats(\n data_chan[index_stat], mask_value=0)\n\n for i_chan in range(nchans):\n header_master['MDARKM{}'.format(i_chan+1)] = (\n mean_chan[i_chan], '[e-] channel {} mean master dark'\n .format(i_chan+1))\n\n for i_chan in range(nchans):\n header_master['MDRDN{}'.format(i_chan+1)] = (\n std_chan[i_chan], '[e-] channel {} sigma (STD) master '\n 'dark'.format(i_chan+1))\n\n\n # call [run_qc_check] to update master header with any QC flags\n run_qc_check (header_master, tel)\n\n # write fits\n fits_master = write_fits (\n fits_master, master_median.astype('float32'), header_master,\n master=True)\n\n\n if get_par(set_zogy.timing,tel):\n log_timing_memory (t0=t, label='master_prep')\n\n\n return fits_master",
"def test_check_date_tour(self):\n date_start = timezone.now()\n date_end = timezone.now() - timedelta(days=5)\n new_tour = Tournament(date_start=date_start, date_end=date_end)\n\n self.assertEqual(new_tour.check_date(), False)",
"def check_room_co2(num):\n if num == 1:\n if room1_co2(): # Low Humidity needs a mist\n if len(initial_time('co2_room1_timestamp.txt')) == 0:\n time_write_to_file('co2_room1_timestamp.txt', 'w') \n add_co2_room1(True) # Turn ON co2\n time.sleep(5) # Hold ON for 5 secs\n add_co2_room1(False) # Turn OFF co2\n else:\n if mins_since_event('co2_room1_timestamp.txt') > 5:\n add_co2_room1(True) # Turn ON co2\n time.sleep(5) # Hold ON for 5 secs\n add_co2_room1(False) # Turn OFF co2\n time_write_to_file('co2_room1_timestamp.txt', 'w')\n else:\n remove_timestamp('co2_room1_timestamp.txt') \n elif num == 2:\n if room2_co2(): # Low Humidity needs a mist\n if len(initial_time('co2_room2_timestamp.txt')) == 0:\n time_write_to_file('co2_room2_timestamp.txt', 'w') \n add_co2_room2(True) # Turn ON co2\n time.sleep(5) # Hold ON for 5 secs\n add_co2_room2(False) # Turn OFF co2\n else:\n if mins_since_event('co2_room2_timestamp.txt') > 5:\n add_co2_room2(True) # Turn ON co2\n time.sleep(5) # Hold ON for 5 secs\n add_co2_room2(False) # Turn OFF co2\n time_write_to_file('co2_room2_timestamp.txt', 'w')\n else:\n remove_timestamp('co2_room2_timestamp.txt')",
"def check_conditions(self,wanted_starting_time,wanted_diff=0):\n print(time.ctime())\n if self.count_bad_attempts >= utils.MAX_ATTEMPTS_ALLOWED:\n raise MyException(FAILURE)\n soup = self.get_main_soup()\n game = soup.find(attrs={'data-id':self.game_id})\n if ('live' not in game['class'] and game.find(class_=[re.compile(\"live\")]) is None)\\\n and self.check_once: #game is over and we tried at least once to test\n raise MyException(FAILURE)\n if game is None:\n self.count_bad_attempts += 1\n self.check_conditions(wanted_starting_time,wanted_diff)\n elif self.check_time(game,wanted_starting_time) and\\\n self.check_diff(game,wanted_diff,wanted_starting_time):\n raise MyException(SUCCESS)\n else:\n self.count_bad_attempts = 0\n self.scheduler.enter(DELAY,1,action=self.check_conditions,\n argument=(wanted_starting_time,wanted_diff))\n self.scheduler.run()"
] | [
"0.5707775",
"0.56430596",
"0.5583629",
"0.5324878",
"0.5245917",
"0.5093938",
"0.50450516",
"0.49869803",
"0.4892492",
"0.488806",
"0.4854944",
"0.485275",
"0.48295307",
"0.48010412",
"0.47898087",
"0.4774762",
"0.47731286",
"0.4770391",
"0.47576922",
"0.47565082",
"0.4715005",
"0.4692135",
"0.46913084",
"0.4682072",
"0.46710536",
"0.46644533",
"0.46550554",
"0.46521887",
"0.46382046",
"0.46283993"
] | 0.74977547 | 0 |
Check the status of the application, i.e., whether it is running on the master or slave. Also check to see if there are any issues, like the web dyno on the slave running, or both workers running etc. | def check_status():
# assume no web dynos on master - there should never be a web dyno on master
r = req.get(f"{MASTER_API_URL}/formation/worker", headers=MASTER_API_HEADERS)
if r.status_code != req.codes.ok:
print("Couldn't get master worker formation")
print(r.status_code, ":", r.text)
return 'unknown:1'
master_worker = r.json()['quantity'] # this is guaranteed to work i think
r = req.get(f"{SLAVE_API_URL}/formation/worker", headers=SLAVE_API_HEADERS)
if r.status_code != req.codes.ok:
print("Couldn't get slave worker formation")
print(r.status_code, ":", r.text)
return 'unknown:2'
slave_worker = r.json()['quantity']
r = req.get(f"{SLAVE_API_URL}/formation/web", headers=SLAVE_API_HEADERS)
if r.status_code != req.codes.ok:
print("Couldn't get slave web formation")
print(r.status_code, ":", r.text)
return 'unknown:3'
slave_web = r.json()['quantity']
# all done
if slave_web != 0:
return 'forbidden-web'
elif master_worker != 0 and slave_worker != 0:
return 'both'
elif master_worker != 0:
return 'master'
elif slave_worker != 0:
return 'slave'
else:
return 'none' | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_configuration_server(self) -> bool:\n return (\n self.container is not None\n and self.container.exec_run(\n \"bash -c 'curl -s --head http://localhost:19071/ApplicationStatus'\"\n )\n .output.decode(\"utf-8\")\n .split(\"\\r\\n\")[0]\n == \"HTTP/1.1 200 OK\"\n )",
"def is_running(self):\n status = self.get_status_response()\n return ((status[1] & 2) == 2)\n #end is_running()",
"def check_status():\n status = \"\"\n if os.path.exists(current_app.config[\"ACI_STARTED_FILE\"]):\n logger.debug(\"application started flag is set\")\n # check mongo connection \n try:\n from . utils import get_db\n assert len(get_db().collection_names()) >= 0\n except Exception as e:\n logger.debug(\"failed to connect to mongo db: %s\", e)\n return (False, \"failed to connect to mongo database\")\n # check redis connection\n try:\n from . utils import get_redis\n assert get_redis().dbsize() >= 0\n except Exception as e:\n logger.debug(\"failed to connect to redis db: %s\", e)\n return (False, \"failed to connect to redis database\")\n # started flag and successfully connected to mongo and redis\n return (True, \"started\")\n\n logger.debug(\"application started flag not found, checking for status\")\n if os.path.exists(current_app.config[\"ACI_STATUS_FILE\"]):\n try:\n with open(current_app.config[\"ACI_STATUS_FILE\"], \"r\") as f:\n status = f.read()\n logger.debug(\"application status: %s\" % status)\n except Exception as e:\n logger.debug(\"failed to open status file: %s\" % e)\n else:\n logger.debug(\"application status flag not found\")\n status = \"not-ready\"\n return (False, status)",
"def service_check(self, env):\n import params\n\n self.active_master_host = params.hawqmaster_host\n self.active_master_port = params.hawq_master_address_port\n self.checks_failed = 0\n self.total_checks = 2\n\n # Checks HAWQ cluster state\n self.check_state()\n\n # Runs check for writing and reading tables on HAWQ\n self.check_hawq()\n\n # Runs check for writing and reading external tables on HDFS using PXF, if PXF is installed\n if params.is_pxf_installed:\n self.total_checks += 1\n self.check_hawq_pxf_hdfs()\n else:\n Logger.info(\"PXF not installed. Skipping HAWQ-PXF checks...\")\n\n if self.checks_failed != 0:\n Logger.error(\"** FAILURE **: Service check failed {0} of {1} checks\".format(self.checks_failed, self.total_checks))\n sys.exit(1)\n\n Logger.info(\"Service check completed successfully\")",
"def test_server_is_up_and_running(self):\n r = requests.get(self.get_server_url())\n self.assertEqual(r.status_code, 200)",
"def check_yarn_service(master, ec2_opts, num_nodes):\n output = spark_ec2.ssh_read(master, ec2_opts, \"/root/ephemeral-hdfs/bin/yarn node -list -all |grep RUNNING |wc -l\")\n # Ok if one slave is down\n return int(output) >= int(num_nodes) - 1",
"def check_status(self):",
"def slave_status():\n run_mysql_command(\"SHOW SLAVE STATUS\\G;\")",
"def is_sm_running() -> bool:\n initd = '/etc/init.d'\n print(\"Checking SUSE Manager running...\")\n\n # Get tomcat\n tomcat = \"\"\n for cmd in os.listdir(initd):\n if cmd.startswith('tomcat'):\n tomcat = initd + \"/\" + cmd\n break\n\n return os.popen(tomcat + \" status 2>&1\").read().strip().find('dead') == -1",
"def ready(self):\n\n if not self.running:\n return False\n\n try:\n response = requests.get(\n 'http://{}:{}/v1/kv/health'.format(\n self.running_host,\n self.running_port\n )\n )\n except requests.ConnectionError:\n return False\n\n if response.status_code == 404:\n return True\n elif response.status_code == 500:\n return False\n else:\n return False",
"def _ServerIsRunning( self ):\n return utils.ProcessIsRunning( self._gocode_handle )",
"def issuer_liveness_check():\n global app_config\n\n if app_config[\"running\"]:\n # return True until we get a shutdown request\n return True\n\n # return True until the work queue is cleared\n return tob_connection_active()",
"def health_ok(self):\n for client in self.clients():\n if client.run_cmd('ls'):\n log.info('Vmware cluster is up.')\n return True\n else:\n return False",
"def check_running(self, fail_on_error=True):\n status = True\n state = self.check_mount_state(self.running_hosts)\n if state[\"unmounted\"] or state[\"nodirectory\"]:\n self.log.error(\n \"Error: dfuse not running on %s\",\n str(state[\"unmounted\"].union(state[\"nodirectory\"])))\n status = False\n if fail_on_error:\n raise CommandFailure(\"dfuse not running\")\n return status",
"def checkBuildStatus(self):\n pass",
"def pre_upgrade_checks(self):\n\n #HostOverview\n Logger.info(\"******************************************************************************************************************************************************\")\n Logger.info(\"\\t\\t\\t\\t\\t\\t\\tHOST OVERVIEW\")\n Logger.info(\"******************************************************************************************************************************************************\")\n print (\"\\n\")\n Logger.info(\"Ambari version\\t\\t:{0}\".format(self.ambari_version))\n\n #Check OS\n os = platform.dist()\n if os[1] != None:\n Logger.info(\"Operating System\\t\\t:{0} {1} - {2}\".format(os[0],os[1],os[2]))\n else:\n Logger.error(\"Unable to fetch OS details.\")\n self.terminate()\n return\n\n self.check_java_version()\n self.check_exactly_one_current_version()\n\n\n #Check if rack awareness is enabled ?\n rack_awareness = \"SELECT DISTINCT rack_info FROM hosts WHERE rack_info!='/default-rack';\"\n self.cursor.execute(rack_awareness)\n result = self.cursor.fetchone()\n if result is None or len(result) != 1:\n Logger.info(\"Rack Awareness ?\\t\\tNo\\n\")\n else:\n Logger.info(\"Rack Awareness ?\\t\\tYes\\n\")\n\n #Security Overview\n self.check_security()\n\n #Check High Availability configuration\n self.check_high_availability()\n\n #Check Metastores\n self.check_metastore()",
"def check_connection_to_db(self):\n try:\n self._client.admin.command('ismaster')\n return True\n except Exception:\n return False",
"def check_microservice(params) -> None:\n cmd = \"docker container inspect -f '{{.State.Running}}' bg_changer >/dev/null 2>&1\"\n if os.system(cmd) == 0:\n print(\"Microservice is running\")\n else:\n print(\"Microservice is NOT running\")",
"def check_status():\n js = _get_jetstream_conn()\n i = js.compute.instances.get(session.attributes.get('instance_id'))\n if not i:\n return question(\"There was a problem. Please retry your command.\")\n\n status = i.state\n if session.attributes['status'] != status:\n msg = \"New instance status is {0}.\".format(status)\n if not session.attributes['public_ip'] and status == 'running':\n # Attach a floating IP to the instance\n fip = None\n fips = js.network.floating_ips()\n for ip in fips:\n if not ip.in_use():\n fip = ip\n if fip:\n i.add_floating_ip(fip.public_ip)\n session.attributes['public_ip'] = fip.public_ip\n else:\n msg = \"Instance status is {0}\".format(status)\n\n session.attributes['status'] = status\n\n if session.attributes['status'] != 'running':\n q = \"Would you like to check the status again?\"\n return question(msg + q).reprompt(q)\n else:\n card_content = 'Access your instance at http://{0}'.format(\n session.attributes.get('public_ip'))\n return statement(msg).simple_card(\n title=\"Instance {0} was launched.\".format(i.name),\n content=msg + card_content)",
"def check_that_instance_is_alive(self):\n if not self.instance.is_running():\n raise Exception(f\"Starter instance is not running. Base directory: {str(self.basedir)}\")\n if self.instance.status() == psutil.STATUS_ZOMBIE:\n raise Exception(f\"Starter instance is a zombie. Base directory: {str(self.basedir)}\")",
"def status(self):\n if self.app_id:\n return self.yarn_api.apps_info(self.app_id)\n else:\n raise KnitException(\"Cannot get status, app not started\")",
"def isRunning(self):\n if not self.hasBeenStarted():\n return False\n \n if not self._slave_dhcp_client_proc.poll(): # Poll our direct child (sudo)\n return False\n \n for pid in self._all_processes_pid:\n if not self._checkPid(pid):\n return False\n \n return True",
"def __call__(self):\n status = self.os.popen('circusctl status validator').read().strip()\n\n if status == 'active':\n return True\n elif status == 'stopped':\n return False",
"def index():\n logging.debug('Healthy check.')\n pass # healthy check",
"def index():\n logging.debug('Healthy check.')\n pass # healthy check",
"def is_running(self) -> bool:\n return False",
"def lantern_check():\n if not app.config.get(\"ENABLE_LANTERN\", False):\n print \"[{x}] Not checking Lantern jobs - interface disabled\".format(x=dates.now())\n return\n print \"[{x}] Checking Lantern jobs\".format(x=dates.now())\n LanternApi.check_jobs()",
"def check_status(con):\n try:\n status = con.sudo('su - splunk -c \"/opt/splunk/bin/splunk status\"', hide=True)\n if 'is running' in status.stdout:\n return True\n else:\n return False\n except (ConnectionError, AuthenticationException, NoValidConnectionsError, UnexpectedExit):\n return False",
"def status_check(self):\n try:\n client = self.connect()\n client.sys.is_initialized() # make an actual network connection\n return True\n except:\n return False",
"def check(self):\n if self.backend.poll():\n raise RuntimeError('Backend process died.')\n\n if self.esp.poll():\n raise RuntimeError('ESP process died.')"
] | [
"0.6630676",
"0.6584592",
"0.64988655",
"0.6488543",
"0.6485963",
"0.6476117",
"0.6447399",
"0.63786393",
"0.62928927",
"0.6289477",
"0.6250488",
"0.62484485",
"0.6197466",
"0.6183795",
"0.6161205",
"0.61303514",
"0.61219287",
"0.61159056",
"0.6114376",
"0.609758",
"0.6067191",
"0.60650265",
"0.6048285",
"0.60220516",
"0.60220516",
"0.60218185",
"0.60207057",
"0.6017335",
"0.60001916",
"0.5999674"
] | 0.6729618 | 0 |
Shift the process from master to slave, shifting data as needed. | def master_to_slave():
print("Shifting from master to slave")
stop_master_worker()
setup_slave_web()
prepare_push()
push_to_slave()
stop_slave_web()
start_slave_worker()
print("DONE!") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def slave_to_master():\n print(\"Shifting from slave to master\")\n stop_slave_worker()\n setup_slave_web()\n pull_from_slave()\n commit_pull_to_db()\n stop_slave_web()\n start_master_worker()\n print(\"DONE!\")",
"def runSlaveRun():\n\n np.set_printoptions(linewidth=1000)\n function = None\n options = None\n\n # print(\"Process {}/{} reporting for duty!\".format(rank, size))\n\n function = comm.bcast(function, root=0)\n arguments = comm.scatter(options, root=0)\n\n results = function(*arguments)\n\n comm.Barrier()\n comm.gather(results, root=0)\n comm.Disconnect()",
"def onSlave(self):",
"def write_master(self, data):\n self._write(self.master_fd, data)",
"def reset_slave():\n\n # Confirm slave status in case we need to refer to the values later\n slave_status()\n run_mysql_command(\"STOP SLAVE;\")\n\n with hide('everything'):\n # Store last known log file and position\n master_log_file = run(\"sudo -i mysql -e 'SHOW SLAVE STATUS\\G' | grep '^\\s*Relay_Master_Log_File:' | awk '{ print $2 }'\")\n master_log_pos = run(\"sudo -i mysql -e 'SHOW SLAVE STATUS\\G' | grep '^\\s*Exec_Master_Log_Pos:' | awk '{ print $2 }'\")\n\n if not master_log_file or not master_log_pos:\n abort(\"Failed to determine replication log file and position, aborting.\")\n\n # Forget log file and position\n run_mysql_command(\"RESET SLAVE;\")\n\n # Repoint log file and position to last known values\n run_mysql_command(\"CHANGE MASTER TO MASTER_LOG_FILE='{}', MASTER_LOG_POS={};\"\n .format(master_log_file, master_log_pos))\n run_mysql_command(\"START SLAVE;\")\n\n with hide('everything'):\n seconds_behind_master = run(\"sudo -i mysql -e 'SHOW SLAVE STATUS\\G' | grep '^\\s*Seconds_Behind_Master:' | awk '{ print $2 }'\")\n\n # Compare as a string to ensure we got a non-nil value from MySQL\n if seconds_behind_master != '0':\n abort(\"Slave is still behind master by {} seconds; run mysql.slave_status to check status\"\n .format(seconds_behind_master))",
"def _standby_clone():\n # manualy:\n # $ mkdir -p /var/lib/postgresql/9.1/testscluster/\n # $ rsync -avz --rsh='ssh -p2222' [email protected]:/var/lib/postgresql/9.1/testscluster/ /var/lib/postgresql/9.1/testscluster/\n\n with settings(hide('running', 'stdout', 'stderr', 'warnings'), warn_only=True):\n puts(green('Start cloning the master'))\n repmgr_clone_command = 'repmgr -D %(slave_pgdata_path)s -d %(sync_db)s -p %(cluster_port)s -U %(sync_user)s -R postgres --verbose standby clone %(pgmaster_ip)s' % env\n puts(green(repmgr_clone_command))\n puts(\"-\" * 40)\n res = sudo(repmgr_clone_command, user='postgres')\n if 'Can not connect to the remote host' in res or 'Connection to database failed' in res:\n puts(\"-\" * 40)\n puts(green(repmgr_clone_command))\n puts(\"-\" * 40)\n puts(\"Master server is %s reachable.\" % red(\"NOT\"))\n puts(\"%s you can try to CLONE the slave manually [%s]:\" % (green(\"BUT\"), red(\"at your own risk\")))\n puts(\"On the slave server:\")\n puts(\"$ sudo -u postgres rsync -avz --rsh='ssh -p%(master_ssh_port)s' postgres@%(pgmaster_ip)s:%(master_pgdata_path)s %(slave_pgdata_path)s --exclude=pg_xlog* --exclude=pg_control --exclude=*.pid\" % env)\n puts(\"Here:\")\n puts(\"$ fab <cluster_task_name> finish_configuring_slave\")\n abort(\"STOP...\")",
"def replicate_slave_from_master(master):\n if len(env.hosts) > 1:\n exit('This job is currently only setup to run against one slave at a time')\n\n with settings(host_string=master):\n # `--single-transaction` in conjunction with `--master-data` avoids\n # locking tables for any significant length of time. See\n # https://web.archive.org/web/20160308163516/https://dev.mysql.com/doc/refman/5.5/en/mysqldump.html#option_mysqldump_single-transaction\n run('sudo -i mysqldump -u root --all-databases --master-data --single-transaction --quick --add-drop-database > dump.sql')\n\n with settings(host_string=master, forward_agent=True):\n run('scp dump.sql {0}:~'.format(env.hosts[0]))\n\n with settings(host_string=master):\n run('rm dump.sql')\n\n run_mysql_command(\"STOP SLAVE\")\n run_mysql_command(\"SET GLOBAL slow_query_log=OFF\")\n\n with hide('running', 'stdout'):\n database_file_size = run(\"stat --format='%s' dump.sql\")\n\n print('Importing MySQL database which is {0}GB, this might take a while...'.format(round(int(database_file_size) / (1024 * 1024 * 1024 * 1.0), 1)))\n run('sudo -i mysql -uroot < dump.sql')\n\n run('rm dump.sql')\n\n run_mysql_command(\"START SLAVE\")\n run_mysql_command(\"SET GLOBAL slow_query_log=ON\")\n\n slave_status()",
"def promote_slave_commands(self):\n return [\n \"RESET MASTER\",\n \"STOP SLAVE\",\n \"RESET SLAVE\",\n \"CHANGE MASTER TO MASTER_HOST = ''\",\n ]",
"def modifyMastersWithMultipleInstances(self):\n # Nothing to do\n pass",
"def swap_master(qtile):\n grp = qtile.current_group\n if grp.layout.clients.current_index > 0:\n grp.layout.cmd_swap_main()\n elif grp.layout.clients.current_index == 0 and len(grp.layout.clients.clients) > 0:\n grp.layout.cmd_shuffle_down()\n c = grp.layout.clients.focus_first()\n grp.focus(c, True)",
"def sim_process(self, process):\n if (self.data[process].protocol==[]) or \\\n (not self.data[process].protocol[-1]['crossed']):\n time_left = getattr(self, 'initial_' + process)()\n if not time_left:\n return False\n\n # Main loop for replica exchange\n if (self.args.params[process]['repX_cycles'] is not None) and \\\n ((self.data[process].cycle < \\\n self.args.params[process]['repX_cycles'])):\n\n # Load configurations to score from another program\n if (process=='CD') and (self.data['CD'].cycle==1) and \\\n (self.args.params['CD']['pose'] == -1) and \\\n (self.args.FNs['score'] is not None) and \\\n (self.args.FNs['score']!='default'):\n self.log.set_lock('CD')\n self.log.tee(\"\\n>>> Reinitializing replica exchange configurations\")\n self.system.setParams(self.system.paramsFromAlpha(1.0, 'CD'))\n confs = self._get_confs_to_rescore(\\\n nconfs=len(self.data['CD'].protocol), site=True, minimize=True)[0]\n self.log.clear_lock('CD')\n if len(confs) > 0:\n self.data['CD'].confs['replicas'] = confs\n\n self.log.tee(\"\\n>>> Replica exchange for {0}, starting at {1}\\n\".format(\\\n process, time.strftime(\"%a, %d %b %Y %H:%M:%S\", time.localtime())), \\\n process=process)\n self.log.recordStart(process + '_repX_start')\n start_cycle = self.data[process].cycle\n cycle_times = []\n while (self.data[process].cycle <\n self.args.params[process]['repX_cycles']):\n from AlGDock.replica_exchange import ReplicaExchange\n ReplicaExchange(self.args, self.log, self.top, self.system,\n self.iterator, self.data, self.save, self._u_kln).run(process)\n self.SIRS(process)\n cycle_times.append(self.log.timeSince('repX cycle'))\n if process == 'CD':\n self._insert_CD_state_between_low_acc()\n if not self.log.isTimeForTask(cycle_times):\n return False\n self.log.tee(\"Elapsed time for %d cycles of replica exchange: %s\"%(\\\n (self.data[process].cycle - start_cycle), \\\n HMStime(self.log.timeSince(process+'_repX_start'))), \\\n process=process)\n\n # If there are insufficient configurations,\n # do additional replica exchange on the BC process\n if (process == 'BC'):\n E_MM = []\n for k in range(len(self.data['BC'].Es[0])):\n E_MM += list(self.data['BC'].Es[0][k]['MM'])\n while len(E_MM) < self.args.params['CD']['seeds_per_state']:\n self.log.tee(\n \"More samples from high temperature ligand simulation needed\",\n process='BC')\n from AlGDock.replica_exchange import ReplicaExchange\n ReplicaExchange(self.args, self.log, self.top, self.system,\n self.iterator, self.data, self.save, self._u_kln).run('BC')\n self.SIRS(process)\n cycle_times.append(self.log.timeSince('repX cycle'))\n if not self.log.isTimeForTask(cycle_times):\n return False\n E_MM = []\n for k in range(len(self.data['BC'].Es[0])):\n E_MM += list(self.data['BC'].Es[0][k]['MM'])\n\n # Clear evaluators to save memory\n self.system.clear_evaluators()\n\n return True # The process has completed",
"def run(self):\n\t\t#check if master and slave are there\n\t\tinfo_file_name = self.CFG['INFO_FILE_NAME']\n\t\tbase_dir = self.CFG['BASE_DIR']\n\t\tsync_file_name = self.CFG['SYNC_FILE_NAME']\n\t\tmaster = self.getMaster(base_dir, info_file_name)\n\t\tslave = None\n\n\t\tif master != None :\n\t\t\tlogging.info(\"We have a master in {0}\".format(master['path']))\n\t\t\tslave = self.getSlave(base_dir, info_file_name, master['info']['signature'])\n\n\t\tif slave != None:\n\t\t\tlogging.info(\"We have a slave in {0}\".format(slave['path']))\n\n\t\tif master!= None and slave != None:\n\t\t\totherProcess = self.checkRunningProcessAndMark(sync_file_name)\n\n\t\t\tif otherProcess is False:\n\t\t\t\t#we can sync\n\t\t\t\t#TODO: what if the rsync command fails? \n\t\t\t\tcommand = \"rsync -avz --exclude={2} {0}/ {1}/\"\n\t\t\t\tcommand = command.format(master['path'], slave['path'], info_file_name)\n\t\t\t\tlogging.debug(command)\n\t\t\t\tresp = os.system(command)\n\t\t\t\tlogging.debug(resp)\n\t\t\t\tself.markSynced(master['path'], info_file_name, master['info'])\n\t\t\t\tself.markSynced(slave['path'], info_file_name, slave['info'])\n\t\t\t\tresp = os.system(\"rm {0}\".format(sync_file_name))\n\n\t\t\t\t#print command",
"def onSlaveLost(self):",
"def addSlavePid(self, pid):\n if self._logger is not None:\n self._logger.debug('Adding slave PID ' + str(pid))\n if not pid in self._all_processes_pid: # Make sure we don't add twice a PID\n self._all_processes_pid += [pid] # Add",
"def loop(self):\n log = logging.getLogger('mailman.runner')\n log.info('Master started')\n self._pause()\n while True:\n try:\n pid, status = os.wait()\n except OSError as error:\n # No children? We're done.\n if error.errno == errno.ECHILD:\n break\n # If the system call got interrupted, just restart it.\n elif error.errno == errno.EINTR:\n continue\n else:\n raise\n # Find out why the subprocess exited by getting the signal\n # received or exit status.\n if os.WIFSIGNALED(status):\n why = os.WTERMSIG(status)\n elif os.WIFEXITED(status):\n why = os.WEXITSTATUS(status)\n else:\n why = None\n # We'll restart the subprocess if it exited with a SIGUSR1 or\n # because of a failure (i.e. no exit signal), and the no-restart\n # command line switch was not given. This lets us better handle\n # runaway restarts (e.g. if the subprocess had a syntax error!)\n rname, slice_number, count, restarts = self._kids.pop(pid)\n config_name = 'runner.' + rname\n restart = False\n if why == signal.SIGUSR1 and self._restartable:\n restart = True\n # Have we hit the maximum number of restarts?\n restarts += 1\n max_restarts = int(getattr(config, config_name).max_restarts)\n if restarts > max_restarts:\n restart = False\n # Are we permanently non-restartable?\n log.debug(\"\"\"\\\nMaster detected subprocess exit\n(pid: {0:d}, why: {1}, class: {2}, slice: {3:d}/{4:d}) {5}\"\"\".format(\n pid, why, rname, slice_number + 1, count,\n ('[restarting]' if restart else '')))\n # See if we've reached the maximum number of allowable restarts.\n if restarts > max_restarts:\n log.info(\"\"\"\\\nRunner {0} reached maximum restart limit of {1:d}, not restarting.\"\"\",\n rname, max_restarts)\n # Now perhaps restart the process unless it exited with a\n # SIGTERM or we aren't restarting.\n if restart:\n spec = '{0}:{1:d}:{2:d}'.format(rname, slice_number, count)\n new_pid = self._start_runner(spec)\n new_info = (rname, slice_number, count, restarts)\n self._kids.add(new_pid, new_info)\n log.info('Master stopped')",
"def start_slave_worker():\n print(\"Starting slave worker\")\n r = req.patch(f\"{SLAVE_API_URL}/formation/worker\", json=API_PAYLOAD_1, headers=SLAVE_API_HEADERS)\n if r.status_code != req.codes.ok:\n print(\"Unable to start the worker dyno on slave\")\n print(r.text)\n return False\n #wait a bit for the worker process to start up\n print(\"Waiting a bit\")\n time.sleep(10)\n return True",
"def TransferMemorySequence():\r\n pass",
"def ExtractInfoAndCopyMaster(self):\n self.ExtractandWriteInfo()\n self.CreateMasterCopy()\n return \"TurnOffMirror\"",
"def _rotate_workers(self, worker):\n raise NotImplementedError",
"def pg_copy_master(self, master_db, user, password):\n\n self.pg_cmd(\"stop\")\n sudo(\"rm -rf {0}\".format(self.pg_data))\n pgpass_line = \":\".join(\n [master_db.internal_ip, \"*\", \"replication\", user, password]\n )\n sudo(\n 'echo \"{line}\" > {file_}' \"\".format(file_=self.pgpass, line=pgpass_line),\n user=\"postgres\",\n )\n sudo(\"chmod 600 {0}\".format(self.pgpass), user=\"postgres\")\n sudo(\n \"{pg_bin}/pg_basebackup -X stream -D {pg_data} -P -h {host} -U {user}\"\n \"\".format(\n pg_bin=self.pg_bin,\n pg_data=self.pg_data,\n host=master_db.internal_ip,\n user=user,\n ),\n user=\"postgres\",\n )\n with cd(self.pg_data):\n signal = \"standby.signal\"\n sudo(\"touch {file_}\".format(file_=signal), user=\"postgres\")\n self.pg_set_str(\n \"primary_conninfo\",\n \"host={host} user={user} password={password}\"\n \"\".format(host=master_db.internal_ip, user=user, password=password),\n )\n sudo(\"ln -s /etc/ssl/certs/ssl-cert-snakeoil.pem server.crt\")\n sudo(\"ln -s /etc/ssl/private/ssl-cert-snakeoil.key server.key\")\n self.pg_cmd(\"start\")",
"def _move_chunk(self, args: MigrationArgs) -> None:\n def move_command():\n self._mongo_client.admin.command(\"moveChunk\", args.collection, find={SHARD_KEY: args.shard_key},\n to=args.shard, _secondaryThrottle=False, _waitForDelete=True)\n self._try_until_done(move_command)\n self._chunks[args.collection][args.shard_key] = args.shard\n logging.info(f\"MongoAgent: Moved chunk {args.shard_key} of collection {args.collection} to {args.shard}\")",
"def __init__(self, master):\n super().__init__()\n self.master = master\n self.proc = None\n self.start()",
"def test_failover_to_second_master(\n event_listener,\n salt_mm_failover_master_1,\n salt_mm_failover_master_2,\n salt_mm_failover_minion_1,\n salt_mm_failover_minion_2,\n mm_failover_master_1_salt_cli,\n mm_failover_master_2_salt_cli,\n run_salt_cmds,\n):\n event_patterns = [\n (\n salt_mm_failover_master_2.id,\n \"salt/minion/{}/start\".format(salt_mm_failover_minion_1.id),\n )\n ]\n\n start_time = time.time()\n with salt_mm_failover_master_1.stopped():\n assert salt_mm_failover_master_2.is_running()\n # We need to wait for them to realize that the master is not alive\n # At this point, only the first minion will need to change masters\n events = event_listener.wait_for_events(\n event_patterns,\n timeout=salt_mm_failover_minion_1.config[\"master_alive_interval\"] * 4,\n after_time=start_time,\n )\n\n assert salt_mm_failover_minion_1.is_running()\n assert not events.missed\n\n returns = run_salt_cmds(\n [mm_failover_master_1_salt_cli, mm_failover_master_2_salt_cli],\n [salt_mm_failover_minion_1, salt_mm_failover_minion_2],\n )\n\n assert len(returns) == 2\n assert (mm_failover_master_2_salt_cli, salt_mm_failover_minion_1) in returns\n assert (mm_failover_master_2_salt_cli, salt_mm_failover_minion_2) in returns",
"def do_work(self, data):\n rank = MPI.COMM_WORLD.Get_rank()\n name = MPI.Get_processor_name()\n\n print(' Slave %s rank %d executing task %s' % (name, rank, data['task']))\n\n if data['task'] == 'initial_sim':\n # define explicit assimulo problem\n sim_obj = data['sim_obj']\n rhs_fun = sim_obj.rhs_fun # data['rhs_fun']\n y_initial = data['y0']\n estimate_id = data['id']\n ode_opts = sim_obj.ode_opts # data['ode_opts']\n ode_sys_opts = data['ode_sys_opts']\n t_final = sim_obj.t_final # data['t_final']\n all_options = [ode_opts, ode_sys_opts]\n\n print(' Slave %s rank %d executing initial_sim for estimate: %s sample: %s, data set: %s' %\n (name, rank, estimate_id[0], estimate_id[1], estimate_id[2]))\n slave_tout, slave_yout, _, _ = simulate_ode(rhs_fun, y_initial, tf=t_final, opts=all_options)\n print(' ode simulation complete ')\n\n # calculate flux\n flux_fun = sim_obj.flux_fun # data['flux_fun']\n slave_flux = np.array(list(map(lambda x: flux_fun(x, ode_sys_opts), slave_yout)))\n\n result = (slave_tout, slave_yout, slave_flux, estimate_id[0], estimate_id[1], estimate_id[2], sim_obj,\n ode_sys_opts)\n\n elif data['task'] == 'perturbation_sim':\n\n sim_obj = data['sim_obj']\n rhs_fun = sim_obj.rhs_fun # data['rhs_fun']\n y_initial = data['y0']\n estimate_id = data['id']\n perturbation_id = data['perturbation_id']\n ode_opts = sim_obj.ode_opts # data['ode_opts']\n ode_sys_opts = data['ode_sys_opts']\n t_final = sim_obj.t_final # data['t_final']\n all_options = [ode_opts, ode_sys_opts]\n\n print(' Slave %s rank %d executing initial_sim for estimate: %s sample: %s, data set: %s '\n 'perturbation: %s' %\n (name, rank, estimate_id[0], estimate_id[1], estimate_id[2], perturbation_id))\n slave_tout, slave_yout, _, _ = simulate_ode(rhs_fun, y_initial, tf=t_final, opts=all_options)\n print(' ode perturbation simulation complete ')\n\n # calculate flux\n flux_fun = sim_obj.flux_fun # data['flux_fun']\n slave_flux = np.array(list(map(lambda x: flux_fun(x, ode_sys_opts), slave_yout)))\n\n result = (slave_tout, slave_yout, slave_flux, estimate_id[0], estimate_id[1], estimate_id[2],\n perturbation_id)\n\n return data['task'], result",
"def finish_command(self):\n\n # remap source\n orig_nodes = self.data.keys()\n for orig_node in orig_nodes:\n new_node = self.mapping[orig_node] or orig_node\n\n if new_node != orig_node:\n self.data[new_node] = self.data[orig_node]\n del self.data[orig_node]\n\n set_data(self.data,\n create_attrs=self.create_attrs,\n set_values=self.set_values,\n set_values_on_all=self.set_values_on_all)",
"def master(self, value):\n\n self._master = value",
"def master_read(self, data):\n self.write_stdout(data)",
"def update_master(self):\n\n\n #GET MASTER DATASET\n master = pd.read_csv('/home/austin/Desktop/Falcon/realestate/Falcon/Datasets/master.csv')\n\n #UNION DF WITH MASTER W/O DUPLICATES *** NOT WORKING\n master = pd.concat([\n master, self.df\n ], sort = True).drop_duplicates().reset_index(drop=True)\n\n #WRITE TO MASTER\n master.to_csv('/home/austin/Desktop/Falcon/realestate/Falcon/Datasets/master.csv')",
"def reproject_image_to_master ( master, slave, res=None ):\n slave_ds = gdal.Open( slave )\n if slave_ds is None:\n raise IOError, \"GDAL could not open slave file %s \" \\\n % slave\n slave_proj = slave_ds.GetProjection()\n slave_geotrans = slave_ds.GetGeoTransform()\n data_type = slave_ds.GetRasterBand(1).DataType\n n_bands = slave_ds.RasterCount\n\n master_ds = gdal.Open( master )\n if master_ds is None:\n raise IOError, \"GDAL could not open master file %s \" \\\n % master\n master_proj = master_ds.GetProjection()\n master_geotrans = master_ds.GetGeoTransform()\n w = master_ds.RasterXSize\n h = master_ds.RasterYSize\n if res is not None:\n master_geotrans[1] = float( res )\n master_geotrans[-1] = - float ( res )\n\n dst_filename = slave.replace( \".tif\", \"_crop.vrt\" )\n dst_ds = gdal.GetDriverByName('VRT').Create(dst_filename,\n w, h, n_bands, data_type)\n dst_ds.SetGeoTransform( master_geotrans )\n dst_ds.SetProjection( master_proj)\n\n gdal.ReprojectImage( slave_ds, dst_ds, slave_proj,\n master_proj, gdal.GRA_NearestNeighbour)\n dst_ds = None # Flush to disk\n return dst_filename",
"def update_worker():\n from test import get_remote_runner\n runner = get_remote_runner()\n runner.run(\"python2.7 /vagrant/bootstrap_lxc_manager.py --update_only=True\")"
] | [
"0.72516125",
"0.5815426",
"0.567222",
"0.5663159",
"0.54625714",
"0.5429651",
"0.53810555",
"0.53091127",
"0.5294686",
"0.5285994",
"0.51691014",
"0.5157244",
"0.5134187",
"0.512011",
"0.50734735",
"0.50418085",
"0.5016566",
"0.5001079",
"0.4958527",
"0.49188",
"0.49104735",
"0.49086466",
"0.49071345",
"0.48784205",
"0.48526478",
"0.48410785",
"0.48261112",
"0.48174596",
"0.48113924",
"0.4809835"
] | 0.7314937 | 0 |
Shift the process from slave to master, shifting data as needed. | def slave_to_master():
print("Shifting from slave to master")
stop_slave_worker()
setup_slave_web()
pull_from_slave()
commit_pull_to_db()
stop_slave_web()
start_master_worker()
print("DONE!") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def master_to_slave():\n print(\"Shifting from master to slave\")\n stop_master_worker()\n setup_slave_web()\n prepare_push()\n push_to_slave()\n stop_slave_web()\n start_slave_worker()\n print(\"DONE!\")",
"def write_master(self, data):\n self._write(self.master_fd, data)",
"def runSlaveRun():\n\n np.set_printoptions(linewidth=1000)\n function = None\n options = None\n\n # print(\"Process {}/{} reporting for duty!\".format(rank, size))\n\n function = comm.bcast(function, root=0)\n arguments = comm.scatter(options, root=0)\n\n results = function(*arguments)\n\n comm.Barrier()\n comm.gather(results, root=0)\n comm.Disconnect()",
"def onSlave(self):",
"def _standby_clone():\n # manualy:\n # $ mkdir -p /var/lib/postgresql/9.1/testscluster/\n # $ rsync -avz --rsh='ssh -p2222' [email protected]:/var/lib/postgresql/9.1/testscluster/ /var/lib/postgresql/9.1/testscluster/\n\n with settings(hide('running', 'stdout', 'stderr', 'warnings'), warn_only=True):\n puts(green('Start cloning the master'))\n repmgr_clone_command = 'repmgr -D %(slave_pgdata_path)s -d %(sync_db)s -p %(cluster_port)s -U %(sync_user)s -R postgres --verbose standby clone %(pgmaster_ip)s' % env\n puts(green(repmgr_clone_command))\n puts(\"-\" * 40)\n res = sudo(repmgr_clone_command, user='postgres')\n if 'Can not connect to the remote host' in res or 'Connection to database failed' in res:\n puts(\"-\" * 40)\n puts(green(repmgr_clone_command))\n puts(\"-\" * 40)\n puts(\"Master server is %s reachable.\" % red(\"NOT\"))\n puts(\"%s you can try to CLONE the slave manually [%s]:\" % (green(\"BUT\"), red(\"at your own risk\")))\n puts(\"On the slave server:\")\n puts(\"$ sudo -u postgres rsync -avz --rsh='ssh -p%(master_ssh_port)s' postgres@%(pgmaster_ip)s:%(master_pgdata_path)s %(slave_pgdata_path)s --exclude=pg_xlog* --exclude=pg_control --exclude=*.pid\" % env)\n puts(\"Here:\")\n puts(\"$ fab <cluster_task_name> finish_configuring_slave\")\n abort(\"STOP...\")",
"def reset_slave():\n\n # Confirm slave status in case we need to refer to the values later\n slave_status()\n run_mysql_command(\"STOP SLAVE;\")\n\n with hide('everything'):\n # Store last known log file and position\n master_log_file = run(\"sudo -i mysql -e 'SHOW SLAVE STATUS\\G' | grep '^\\s*Relay_Master_Log_File:' | awk '{ print $2 }'\")\n master_log_pos = run(\"sudo -i mysql -e 'SHOW SLAVE STATUS\\G' | grep '^\\s*Exec_Master_Log_Pos:' | awk '{ print $2 }'\")\n\n if not master_log_file or not master_log_pos:\n abort(\"Failed to determine replication log file and position, aborting.\")\n\n # Forget log file and position\n run_mysql_command(\"RESET SLAVE;\")\n\n # Repoint log file and position to last known values\n run_mysql_command(\"CHANGE MASTER TO MASTER_LOG_FILE='{}', MASTER_LOG_POS={};\"\n .format(master_log_file, master_log_pos))\n run_mysql_command(\"START SLAVE;\")\n\n with hide('everything'):\n seconds_behind_master = run(\"sudo -i mysql -e 'SHOW SLAVE STATUS\\G' | grep '^\\s*Seconds_Behind_Master:' | awk '{ print $2 }'\")\n\n # Compare as a string to ensure we got a non-nil value from MySQL\n if seconds_behind_master != '0':\n abort(\"Slave is still behind master by {} seconds; run mysql.slave_status to check status\"\n .format(seconds_behind_master))",
"def replicate_slave_from_master(master):\n if len(env.hosts) > 1:\n exit('This job is currently only setup to run against one slave at a time')\n\n with settings(host_string=master):\n # `--single-transaction` in conjunction with `--master-data` avoids\n # locking tables for any significant length of time. See\n # https://web.archive.org/web/20160308163516/https://dev.mysql.com/doc/refman/5.5/en/mysqldump.html#option_mysqldump_single-transaction\n run('sudo -i mysqldump -u root --all-databases --master-data --single-transaction --quick --add-drop-database > dump.sql')\n\n with settings(host_string=master, forward_agent=True):\n run('scp dump.sql {0}:~'.format(env.hosts[0]))\n\n with settings(host_string=master):\n run('rm dump.sql')\n\n run_mysql_command(\"STOP SLAVE\")\n run_mysql_command(\"SET GLOBAL slow_query_log=OFF\")\n\n with hide('running', 'stdout'):\n database_file_size = run(\"stat --format='%s' dump.sql\")\n\n print('Importing MySQL database which is {0}GB, this might take a while...'.format(round(int(database_file_size) / (1024 * 1024 * 1024 * 1.0), 1)))\n run('sudo -i mysql -uroot < dump.sql')\n\n run('rm dump.sql')\n\n run_mysql_command(\"START SLAVE\")\n run_mysql_command(\"SET GLOBAL slow_query_log=ON\")\n\n slave_status()",
"def swap_master(qtile):\n grp = qtile.current_group\n if grp.layout.clients.current_index > 0:\n grp.layout.cmd_swap_main()\n elif grp.layout.clients.current_index == 0 and len(grp.layout.clients.clients) > 0:\n grp.layout.cmd_shuffle_down()\n c = grp.layout.clients.focus_first()\n grp.focus(c, True)",
"def modifyMastersWithMultipleInstances(self):\n # Nothing to do\n pass",
"def promote_slave_commands(self):\n return [\n \"RESET MASTER\",\n \"STOP SLAVE\",\n \"RESET SLAVE\",\n \"CHANGE MASTER TO MASTER_HOST = ''\",\n ]",
"def sim_process(self, process):\n if (self.data[process].protocol==[]) or \\\n (not self.data[process].protocol[-1]['crossed']):\n time_left = getattr(self, 'initial_' + process)()\n if not time_left:\n return False\n\n # Main loop for replica exchange\n if (self.args.params[process]['repX_cycles'] is not None) and \\\n ((self.data[process].cycle < \\\n self.args.params[process]['repX_cycles'])):\n\n # Load configurations to score from another program\n if (process=='CD') and (self.data['CD'].cycle==1) and \\\n (self.args.params['CD']['pose'] == -1) and \\\n (self.args.FNs['score'] is not None) and \\\n (self.args.FNs['score']!='default'):\n self.log.set_lock('CD')\n self.log.tee(\"\\n>>> Reinitializing replica exchange configurations\")\n self.system.setParams(self.system.paramsFromAlpha(1.0, 'CD'))\n confs = self._get_confs_to_rescore(\\\n nconfs=len(self.data['CD'].protocol), site=True, minimize=True)[0]\n self.log.clear_lock('CD')\n if len(confs) > 0:\n self.data['CD'].confs['replicas'] = confs\n\n self.log.tee(\"\\n>>> Replica exchange for {0}, starting at {1}\\n\".format(\\\n process, time.strftime(\"%a, %d %b %Y %H:%M:%S\", time.localtime())), \\\n process=process)\n self.log.recordStart(process + '_repX_start')\n start_cycle = self.data[process].cycle\n cycle_times = []\n while (self.data[process].cycle <\n self.args.params[process]['repX_cycles']):\n from AlGDock.replica_exchange import ReplicaExchange\n ReplicaExchange(self.args, self.log, self.top, self.system,\n self.iterator, self.data, self.save, self._u_kln).run(process)\n self.SIRS(process)\n cycle_times.append(self.log.timeSince('repX cycle'))\n if process == 'CD':\n self._insert_CD_state_between_low_acc()\n if not self.log.isTimeForTask(cycle_times):\n return False\n self.log.tee(\"Elapsed time for %d cycles of replica exchange: %s\"%(\\\n (self.data[process].cycle - start_cycle), \\\n HMStime(self.log.timeSince(process+'_repX_start'))), \\\n process=process)\n\n # If there are insufficient configurations,\n # do additional replica exchange on the BC process\n if (process == 'BC'):\n E_MM = []\n for k in range(len(self.data['BC'].Es[0])):\n E_MM += list(self.data['BC'].Es[0][k]['MM'])\n while len(E_MM) < self.args.params['CD']['seeds_per_state']:\n self.log.tee(\n \"More samples from high temperature ligand simulation needed\",\n process='BC')\n from AlGDock.replica_exchange import ReplicaExchange\n ReplicaExchange(self.args, self.log, self.top, self.system,\n self.iterator, self.data, self.save, self._u_kln).run('BC')\n self.SIRS(process)\n cycle_times.append(self.log.timeSince('repX cycle'))\n if not self.log.isTimeForTask(cycle_times):\n return False\n E_MM = []\n for k in range(len(self.data['BC'].Es[0])):\n E_MM += list(self.data['BC'].Es[0][k]['MM'])\n\n # Clear evaluators to save memory\n self.system.clear_evaluators()\n\n return True # The process has completed",
"def onSlaveLost(self):",
"def run(self):\n\t\t#check if master and slave are there\n\t\tinfo_file_name = self.CFG['INFO_FILE_NAME']\n\t\tbase_dir = self.CFG['BASE_DIR']\n\t\tsync_file_name = self.CFG['SYNC_FILE_NAME']\n\t\tmaster = self.getMaster(base_dir, info_file_name)\n\t\tslave = None\n\n\t\tif master != None :\n\t\t\tlogging.info(\"We have a master in {0}\".format(master['path']))\n\t\t\tslave = self.getSlave(base_dir, info_file_name, master['info']['signature'])\n\n\t\tif slave != None:\n\t\t\tlogging.info(\"We have a slave in {0}\".format(slave['path']))\n\n\t\tif master!= None and slave != None:\n\t\t\totherProcess = self.checkRunningProcessAndMark(sync_file_name)\n\n\t\t\tif otherProcess is False:\n\t\t\t\t#we can sync\n\t\t\t\t#TODO: what if the rsync command fails? \n\t\t\t\tcommand = \"rsync -avz --exclude={2} {0}/ {1}/\"\n\t\t\t\tcommand = command.format(master['path'], slave['path'], info_file_name)\n\t\t\t\tlogging.debug(command)\n\t\t\t\tresp = os.system(command)\n\t\t\t\tlogging.debug(resp)\n\t\t\t\tself.markSynced(master['path'], info_file_name, master['info'])\n\t\t\t\tself.markSynced(slave['path'], info_file_name, slave['info'])\n\t\t\t\tresp = os.system(\"rm {0}\".format(sync_file_name))\n\n\t\t\t\t#print command",
"def TransferMemorySequence():\r\n pass",
"def loop(self):\n log = logging.getLogger('mailman.runner')\n log.info('Master started')\n self._pause()\n while True:\n try:\n pid, status = os.wait()\n except OSError as error:\n # No children? We're done.\n if error.errno == errno.ECHILD:\n break\n # If the system call got interrupted, just restart it.\n elif error.errno == errno.EINTR:\n continue\n else:\n raise\n # Find out why the subprocess exited by getting the signal\n # received or exit status.\n if os.WIFSIGNALED(status):\n why = os.WTERMSIG(status)\n elif os.WIFEXITED(status):\n why = os.WEXITSTATUS(status)\n else:\n why = None\n # We'll restart the subprocess if it exited with a SIGUSR1 or\n # because of a failure (i.e. no exit signal), and the no-restart\n # command line switch was not given. This lets us better handle\n # runaway restarts (e.g. if the subprocess had a syntax error!)\n rname, slice_number, count, restarts = self._kids.pop(pid)\n config_name = 'runner.' + rname\n restart = False\n if why == signal.SIGUSR1 and self._restartable:\n restart = True\n # Have we hit the maximum number of restarts?\n restarts += 1\n max_restarts = int(getattr(config, config_name).max_restarts)\n if restarts > max_restarts:\n restart = False\n # Are we permanently non-restartable?\n log.debug(\"\"\"\\\nMaster detected subprocess exit\n(pid: {0:d}, why: {1}, class: {2}, slice: {3:d}/{4:d}) {5}\"\"\".format(\n pid, why, rname, slice_number + 1, count,\n ('[restarting]' if restart else '')))\n # See if we've reached the maximum number of allowable restarts.\n if restarts > max_restarts:\n log.info(\"\"\"\\\nRunner {0} reached maximum restart limit of {1:d}, not restarting.\"\"\",\n rname, max_restarts)\n # Now perhaps restart the process unless it exited with a\n # SIGTERM or we aren't restarting.\n if restart:\n spec = '{0}:{1:d}:{2:d}'.format(rname, slice_number, count)\n new_pid = self._start_runner(spec)\n new_info = (rname, slice_number, count, restarts)\n self._kids.add(new_pid, new_info)\n log.info('Master stopped')",
"def ExtractInfoAndCopyMaster(self):\n self.ExtractandWriteInfo()\n self.CreateMasterCopy()\n return \"TurnOffMirror\"",
"def _move_chunk(self, args: MigrationArgs) -> None:\n def move_command():\n self._mongo_client.admin.command(\"moveChunk\", args.collection, find={SHARD_KEY: args.shard_key},\n to=args.shard, _secondaryThrottle=False, _waitForDelete=True)\n self._try_until_done(move_command)\n self._chunks[args.collection][args.shard_key] = args.shard\n logging.info(f\"MongoAgent: Moved chunk {args.shard_key} of collection {args.collection} to {args.shard}\")",
"def addSlavePid(self, pid):\n if self._logger is not None:\n self._logger.debug('Adding slave PID ' + str(pid))\n if not pid in self._all_processes_pid: # Make sure we don't add twice a PID\n self._all_processes_pid += [pid] # Add",
"def start_slave_worker():\n print(\"Starting slave worker\")\n r = req.patch(f\"{SLAVE_API_URL}/formation/worker\", json=API_PAYLOAD_1, headers=SLAVE_API_HEADERS)\n if r.status_code != req.codes.ok:\n print(\"Unable to start the worker dyno on slave\")\n print(r.text)\n return False\n #wait a bit for the worker process to start up\n print(\"Waiting a bit\")\n time.sleep(10)\n return True",
"def _rotate_workers(self, worker):\n raise NotImplementedError",
"def pg_copy_master(self, master_db, user, password):\n\n self.pg_cmd(\"stop\")\n sudo(\"rm -rf {0}\".format(self.pg_data))\n pgpass_line = \":\".join(\n [master_db.internal_ip, \"*\", \"replication\", user, password]\n )\n sudo(\n 'echo \"{line}\" > {file_}' \"\".format(file_=self.pgpass, line=pgpass_line),\n user=\"postgres\",\n )\n sudo(\"chmod 600 {0}\".format(self.pgpass), user=\"postgres\")\n sudo(\n \"{pg_bin}/pg_basebackup -X stream -D {pg_data} -P -h {host} -U {user}\"\n \"\".format(\n pg_bin=self.pg_bin,\n pg_data=self.pg_data,\n host=master_db.internal_ip,\n user=user,\n ),\n user=\"postgres\",\n )\n with cd(self.pg_data):\n signal = \"standby.signal\"\n sudo(\"touch {file_}\".format(file_=signal), user=\"postgres\")\n self.pg_set_str(\n \"primary_conninfo\",\n \"host={host} user={user} password={password}\"\n \"\".format(host=master_db.internal_ip, user=user, password=password),\n )\n sudo(\"ln -s /etc/ssl/certs/ssl-cert-snakeoil.pem server.crt\")\n sudo(\"ln -s /etc/ssl/private/ssl-cert-snakeoil.key server.key\")\n self.pg_cmd(\"start\")",
"def __init__(self, master):\n super().__init__()\n self.master = master\n self.proc = None\n self.start()",
"def _send_sequence(self):\n # For new processes that may spawn\n _SHARED_SEQUENCES[self.uid] = self.sequence",
"def master(self, value):\n\n self._master = value",
"def ForceMaster(node, is_testver):\n gsaport = core_utils.GSAMasterPort(is_testver)\n # ignore the result of forcemaster\n port_talker.TCPTalk(node, gsaport, 30, command='GET /forcemaster\\n')",
"def finish_command(self):\n\n # remap source\n orig_nodes = self.data.keys()\n for orig_node in orig_nodes:\n new_node = self.mapping[orig_node] or orig_node\n\n if new_node != orig_node:\n self.data[new_node] = self.data[orig_node]\n del self.data[orig_node]\n\n set_data(self.data,\n create_attrs=self.create_attrs,\n set_values=self.set_values,\n set_values_on_all=self.set_values_on_all)",
"def test_failover_to_second_master(\n event_listener,\n salt_mm_failover_master_1,\n salt_mm_failover_master_2,\n salt_mm_failover_minion_1,\n salt_mm_failover_minion_2,\n mm_failover_master_1_salt_cli,\n mm_failover_master_2_salt_cli,\n run_salt_cmds,\n):\n event_patterns = [\n (\n salt_mm_failover_master_2.id,\n \"salt/minion/{}/start\".format(salt_mm_failover_minion_1.id),\n )\n ]\n\n start_time = time.time()\n with salt_mm_failover_master_1.stopped():\n assert salt_mm_failover_master_2.is_running()\n # We need to wait for them to realize that the master is not alive\n # At this point, only the first minion will need to change masters\n events = event_listener.wait_for_events(\n event_patterns,\n timeout=salt_mm_failover_minion_1.config[\"master_alive_interval\"] * 4,\n after_time=start_time,\n )\n\n assert salt_mm_failover_minion_1.is_running()\n assert not events.missed\n\n returns = run_salt_cmds(\n [mm_failover_master_1_salt_cli, mm_failover_master_2_salt_cli],\n [salt_mm_failover_minion_1, salt_mm_failover_minion_2],\n )\n\n assert len(returns) == 2\n assert (mm_failover_master_2_salt_cli, salt_mm_failover_minion_1) in returns\n assert (mm_failover_master_2_salt_cli, salt_mm_failover_minion_2) in returns",
"def _sync_bootstrap_to_masters(\n cluster: Cluster,\n dcos_checkout_dir: Path,\n sudo: bool,\n) -> None:\n local_packages = dcos_checkout_dir / 'packages'\n local_bootstrap_dir = (\n local_packages / 'bootstrap' / 'extra' / 'dcos_internal_utils'\n )\n node_lib_dir = Path('/opt/mesosphere/active/bootstrap/lib')\n # Different versions of DC/OS have different versions of Python.\n master = next(iter(cluster.masters))\n ls_result = master.run(args=['ls', str(node_lib_dir)])\n python_version = ls_result.stdout.decode().strip()\n node_python_dir = node_lib_dir / python_version\n node_bootstrap_dir = (\n node_python_dir / 'site-packages' / 'dcos_internal_utils'\n )\n bootstrap_tarstream = _tar_with_filter(\n path=local_bootstrap_dir,\n tar_filter=_cache_filter,\n )\n\n for master in cluster.masters:\n _send_tarstream_to_node_and_extract(\n tarstream=bootstrap_tarstream,\n node=master,\n remote_path=node_bootstrap_dir,\n sudo=sudo,\n )",
"def migrate_to_dest(self):\n self.executor.loader.build_graph()\n self.executor.migrate(self.migrate_to)",
"def update_master(self):\n\n\n #GET MASTER DATASET\n master = pd.read_csv('/home/austin/Desktop/Falcon/realestate/Falcon/Datasets/master.csv')\n\n #UNION DF WITH MASTER W/O DUPLICATES *** NOT WORKING\n master = pd.concat([\n master, self.df\n ], sort = True).drop_duplicates().reset_index(drop=True)\n\n #WRITE TO MASTER\n master.to_csv('/home/austin/Desktop/Falcon/realestate/Falcon/Datasets/master.csv')"
] | [
"0.72428024",
"0.5692221",
"0.56806463",
"0.55942285",
"0.5393446",
"0.5345593",
"0.53245896",
"0.53045934",
"0.5239213",
"0.5215958",
"0.5118116",
"0.5114614",
"0.50847006",
"0.5059445",
"0.5055491",
"0.5023818",
"0.5010442",
"0.5004643",
"0.49860305",
"0.49616227",
"0.49346024",
"0.492914",
"0.48935518",
"0.488998",
"0.48651117",
"0.4858509",
"0.48527628",
"0.48409417",
"0.483884",
"0.48026586"
] | 0.7228141 | 1 |
Sets up the web server on the slave, then checks it. | def setup_slave_web():
print("Starting slave web")
r = req.patch(f"{SLAVE_API_URL}/formation/web", json=API_PAYLOAD_1, headers=SLAVE_API_HEADERS)
if r.status_code != req.codes.ok:
print("Unable to start the web dyno on slave")
print(r.text)
return False
#wait a bit for the web process to start up
print("Waiting a bit")
time.sleep(10)
r = req.get(SLAVE_URL)
if not r.text.startswith("Index"):
print("Something is wrong with slave:")
print(r.text)
return False
print("Got response from slave:", r.text)
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def setup_server(manager):\n if not manager.is_daemon:\n return\n\n web_server_config = manager.config.get('web_server')\n\n if not web_server_config:\n return\n\n web_server = WebServer(\n bind=web_server_config['bind'],\n port=web_server_config['port'],\n )\n\n if web_server.is_alive():\n web_server.stop()\n\n if _app_register:\n web_server.start()",
"def webserver_start():\n run(_webserver_command())",
"def run(self):\n log.debug(\"start web server running\")\n webDir = self.config.webDir\n self.root.putChild(\"images\", static.File(webDir+\"/images\"))\n self.root.putChild(\"css\", static.File(webDir+\"/css\")) \n self.root.putChild(\"scripts\", static.File(webDir+\"/scripts\"))\n self.root.putChild(\"style\", static.File(webDir+\"/style\"))\n self.root.putChild(\"docs\", static.File(webDir+\"/docs\"))\n xulDir = self.config.xulDir\n self.root.putChild(\"xulscripts\", static.File(xulDir+\"/scripts\"))\n self.root.putChild(\"xultemplates\", static.File(xulDir+\"/templates\"))\n self.root.putChild(\"templates\", static.File(webDir+\"/templates\"))\n self.root.putChild(\"editor\", self.editor)\n self.root.putChild(\"preferences\", self.preferences)\n self.root.putChild(\"about\", self.about)\n verbose_port_search = 0\n port_test_done = 0\n found_port = 0\n test_port_num = self.config.port\n test_port_count = 0\n max_port_tests = 5000\n while not port_test_done:\n test_port_num = self.config.port + test_port_count\n try:\n if verbose_port_search:\n print \"trying to listenTCP on port# \", test_port_num\n reactor.listenTCP(test_port_num, appserver.NevowSite(self.root),\n interface=\"127.0.0.1\")\n if verbose_port_search:\n print \"still here after listenTCP on port# \", test_port_num\n found_port = 1\n port_test_done = 1\n except CannotListenError, exc:\n if verbose_port_search:\n print \"caught exception after listenTCP on port# \", test_port_num\n last_exception = exc\n test_port_count += 1\n if test_port_count >= max_port_tests:\n port_test_done = 1\n if found_port:\n self.config.port = test_port_num\n if verbose_port_search:\n print \"found available eXe port# \", self.config.port\n reactor.run()\n else:\n print \"Sorry, unable to find an available port in the range of: \", self.config.port, \" - \", test_port_num\n print \"last exception: \", unicode(last_exception)\n log.error(\"Can't listen on interface 127.0.0.1, ports %s-%s, last exception: %s\" % \n (self.config.port, test_port_num, unicode(last_exception)))",
"def local_webserver_start():\n if not _is_webserver_running():\n local(_webserver_command())",
"async def serve_web(self):\n interface = \"0.0.0.0\" if settings.PUBLIC_ACCESS else \"127.0.0.1\"\n port = settings.WEB_PORT\n self.logger.info(f\"web: starting the server on {interface}:{port}...\")\n await self.runner.setup()\n site = aioweb.TCPSite(self.runner, interface, port)\n await site.start()\n self.preparing_task = None",
"def run_webserver():\n\tglobal hostname, portnum\n\t#bottle.debug(True)\t# While in development, we want the data\n\tbottle.run(host=hostname, port=portnum) \n\tlogging.info(\"Exiting server.\")",
"def setup_server():\n cherrypy.config.update('server.conf')\n cherrypy.tree.mount(StringGeneratorWebService(), '/', 'server.conf')",
"def test_server_is_up_and_running(self):\n r = requests.get(self.get_server_url())\n self.assertEqual(r.status_code, 200)",
"def server_init(self):\n if not self.web_interface_thread.isAlive():\n # spawn the web interface.\n self.web_interface_thread.start()",
"def setup_server(manager, session=None):\n if not manager.is_daemon:\n return\n\n web_server_config = manager.config.get('web_server')\n\n if not web_server_config:\n return\n\n web_server = WebServer(\n bind=web_server_config['bind'],\n port=web_server_config['port'],\n )\n\n _default_app.secret_key = get_secret()\n\n # Create default flexget user\n if session.query(User).count() == 0:\n session.add(User(name=\"flexget\", password=\"flexget\"))\n session.commit()\n\n if web_server.is_alive():\n web_server.stop()\n\n if _app_register:\n web_server.start()",
"def server_init(self):\n if not self._web_interface_thread.isAlive():\n # spawn the web interface.\n self._web_interface_thread.start()",
"def start_slave_worker():\n print(\"Starting slave worker\")\n r = req.patch(f\"{SLAVE_API_URL}/formation/worker\", json=API_PAYLOAD_1, headers=SLAVE_API_HEADERS)\n if r.status_code != req.codes.ok:\n print(\"Unable to start the worker dyno on slave\")\n print(r.text)\n return False\n #wait a bit for the worker process to start up\n print(\"Waiting a bit\")\n time.sleep(10)\n return True",
"def test_starts_http_api_server(self):\n options = ControlOptions()\n options.parseOptions(\n [b\"--port\", b\"tcp:8001\", b\"--data-path\", self.mktemp()])\n reactor = MemoryCoreReactor()\n ControlScript().main(reactor, options)\n server = reactor.tcpServers[0]\n port = server[0]\n factory = server[1].__class__\n self.assertEqual((port, factory), (8001, Site))",
"def bootstrapFrontend(serverName, serverPort, sslPublicCertPath,\n sslPrivateCertPath):\n # Upload files\n put(sslPublicCertPath, 'fluidinfo.pem')\n put(sslPrivateCertPath, 'fluidinfo.key')\n\n # Install requirements.\n sudo('DEBIAN_FRONTEND=noninteractive apt-get install -y nginx haproxy')\n\n # Set up haproxy.\n sudo('/etc/init.d/haproxy stop')\n deployConfigFiles(\n {'server-name': serverName},\n\n ('haproxy/haproxy.cfg', '/etc/haproxy/haproxy.cfg'),\n ('haproxy/haproxy-default', '/etc/default/haproxy'))\n\n sudo('mkdir -p ../var/run/haproxy')\n sudo('chown haproxy:haproxy ../var/run/haproxy')\n sudo('/etc/init.d/haproxy start')\n sudo('curl --silent http://127.0.0.1:9000 > /dev/null && echo Works!')\n\n # Set up nginx.\n sudo('/etc/init.d/nginx stop')\n sudo('mkdir -p /etc/nginx/ssl')\n sudo('mv fluidinfo.pem /etc/nginx/ssl')\n sudo('chmod 600 /etc/nginx/ssl/fluidinfo.pem')\n sudo('mkdir -p /var/lib/fluidinfo/logs')\n\n sudo('mv fluidinfo.key /etc/nginx/ssl')\n sudo('chmod 600 /etc/nginx/ssl/fluidinfo.key')\n deployConfigFiles(\n {'server-name': serverName},\n\n ('nginx/fluidinfo-secure.conf.template',\n '/etc/nginx/sites-available/{server-name}'))\n\n sudo('ln -sf /etc/nginx/sites-available/{0} '\n '/etc/nginx/sites-enabled/{0}'.format(serverName))\n sudo('rm -f /etc/nginx/sites-enabled/default')\n sudo('/etc/init.d/nginx start')\n time.sleep(1)\n sudo('curl --silent http://127.0.0.1:%d > /dev/null && echo Works!'\n % serverPort)",
"def start_server(self):\n if not self._server:",
"def start(self):\n #url = '{}://{}:{}/'.format('http',\n # self.ip,\n # self.port)\n #self.service_info = ServiceInfo(\n # '_webthing._sub._http._tcp.local.',\n # '{}._http._tcp.local.'.format(self.name),\n # address=socket.inet_aton(self.ip),\n # port=self.port,\n # properties={\n # 'url': url,\n # },\n # server='{}.local.'.format(socket.gethostname()))\n #self.zeroconf = Zeroconf()\n #self.zeroconf.register_service(self.service_info)\n\n # If WebSocketS used and NOT running in thread, and WebServer IS\n # running in thread make shure WebServer has enough stack size to\n # handle also the WebSocket requests.\n log.info('Starting Web Server')\n self.server.Start(threaded=srv_run_in_thread, stackSize=8192)",
"def setup():\n global server, app\n \n galaxy_test_host = os.environ.get( 'GALAXY_TEST_HOST', default_galaxy_test_host )\n galaxy_test_port = os.environ.get( 'GALAXY_TEST_PORT', default_galaxy_test_port )\n \n start_server = 'GALAXY_TEST_EXTERNAL' not in os.environ \n \n if start_server:\n \n tempdir = tempfile.mkdtemp()\n file_path = os.path.join( tempdir, 'database', 'files' )\n os.makedirs( file_path )\n if 'GALAXY_TEST_DBURI' in os.environ:\n database_connection = os.environ['GALAXY_TEST_DBURI']\n else:\n database_connection = 'sqlite:///' + os.path.join( tempdir, 'database', 'universe.sqlite' )\n \n app = UniverseApplication( job_queue_workers = 5,\n template_path = \"templates\",\n database_connection = database_connection,\n file_path = file_path,\n tool_config_file = \"tool_conf.xml\",\n tool_path = \"tools\",\n test_conf = \"test.conf\",\n log_destination = \"stdout\",\n use_heartbeat=True )\n \n log.info( \"Embedded Universe application started\" )\n\n webapp = universe_wsgi.app_factory( dict(),\n use_translogger = False,\n app=app )\n\n server = galaxy.web.server.serve( webapp, dict(), \n host=galaxy_test_host, \n port=galaxy_test_port, \n start_loop=False )\n \n atexit.register( teardown )\n \n import threading\n t = threading.Thread( target=server.serve_forever )\n t.start()\n\n time.sleep( 2 )\n \n log.info( \"Embedded web server started\" )\n \n if app:\n # TODO: provisions for loading toolbox from file when using external server\n import test_toolbox\n test_toolbox.toolbox = app.toolbox\n else:\n from galaxy import tools\n import test_toolbox\n test_toolbox.toolbox = tools.ToolBox( 'tool_conf.xml', 'tools' )\n \n # Test if the server is up\n import httplib\n conn = httplib.HTTPConnection( galaxy_test_host, galaxy_test_port )\n conn.request( \"GET\", \"/\" )\n assert conn.getresponse().status == 200, \"Test HTTP server did not return '200 OK'\"\n \n os.environ['GALAXY_TEST_HOST'] = galaxy_test_host\n os.environ['GALAXY_TEST_PORT'] = galaxy_test_port\n os.environ['GALAXY_TEST_FILE_DIR'] = galaxy_test_file_dir",
"def setup_remote_site(self):\n raise NotImplementedError",
"def StartServer(serve_webapp=True, serve_static_web=True, serve_admin=True):\n client = db_client.DBClient.Instance()\n\n settings = {\n 'gzip': True,\n 'login_url': '/',\n 'admin_login_url': '/admin/otp',\n 'domain': options.options.domain,\n 'server_version': options.options.server_version,\n 'cookie_secret': secrets.GetSecret('cookie_secret'),\n 'facebook_api_key': secrets.GetSecret('facebook_api_key'),\n 'facebook_secret': secrets.GetSecret('facebook_secret'),\n 'google_client_id': secrets.GetSecret('google_client_id'),\n 'google_client_secret': secrets.GetSecret('google_client_secret'),\n 'google_client_mobile_id': secrets.GetSecret('google_client_mobile_id'),\n 'google_client_mobile_secret': secrets.GetSecret('google_client_mobile_secret'),\n 'template_path': ResourcesManager.Instance().template_path,\n 'ui_modules': uimodules,\n 'xsrf_cookies' : options.options.enable_xsrf,\n 'debug': options.options.server_debug,\n 'static_path': ResourcesManager.Instance().static_path,\n }\n\n if options.options.log_file_prefix:\n settings['logs_dir'] = os.path.dirname(options.options.log_file_prefix)\n\n # Configure metrics uploading.\n if options.options.upload_metrics:\n for interval in metric.METRIC_INTERVALS:\n metric.Metric.StartMetricUpload(client, metric.DEFAULT_CLUSTER_NAME, interval)\n\n # Setup application and SSL HTTP server.\n handlers = deepcopy(COMMON_HANDLERS)\n if serve_webapp:\n # Configure web application handlers.\n webapp_handlers = deepcopy(WEBAPP_HANDLERS)\n\n # Initialize the file object store if specified.\n obj_store = ObjectStore.GetInstance(ObjectStore.PHOTO)\n settings['obj_store'] = obj_store\n if options.options.fileobjstore:\n for store_name, content_type in ((ObjectStore.PHOTO, r'image/jpeg'),\n (ObjectStore.USER_LOG, r'text/plain'),\n (ObjectStore.USER_ZIPS, r'application/zip')):\n webapp_handlers.append((r'/fileobjstore/%s/(.*)' % store_name,\n file_object_store.FileObjectStoreHandler,\n { 'storename': store_name, 'contenttype': content_type}))\n\n if ServerEnvironment.IsDevBox():\n webapp_handlers.append((r'/(link|login|register)/fakeviewfinder', auth_viewfinder.FakeAuthViewfinderHandler))\n # Set the testing directories.\n if options.options.testing_path is not None:\n webapp_handlers.append((r'/testing/hook/(.*)', test_hook.TestHookHandler))\n webapp_handlers.append((r'/testing/static/(.*)',\n web.StaticFileHandler,\n {'path': '%s' % options.options.testing_path}))\n\n handlers.extend(webapp_handlers)\n\n if serve_static_web:\n # Configure static web handlers.\n static_web_handlers = deepcopy(STATIC_WEB_HANDLERS)\n handlers.extend(static_web_handlers)\n\n if serve_admin:\n # Configure and verify admin handlers.\n admin_handlers = deepcopy(ADMIN_HANDLERS)\n for path, handler in admin_handlers:\n if not issubclass(handler, basic_auth.BasicAuthHandler):\n raise TypeError('Administration handlers must '\n 'subclass BasicAuthHandler')\n handlers.extend(admin_handlers)\n\n # Catch-all handler for 404 pages.\n handlers.extend([(r'/.*', base.PageNotFoundHandler)])\n\n # Create application and separately add handlers for the short domain and the \n # regular domain. \n # \n # Note that, although the short-domain handlers are added after the initial construction\n # of the Application, those routes will take priority over the routes in the handlers \n # array.\n application = web.Application(handlers, **settings)\n application.add_handlers(re.escape(options.options.short_domain), SHORT_DOMAIN_HANDLERS)\n\n # Start the HTTP server.\n http_server = httpserver.HTTPServer(\n application, xheaders=options.options.xheaders,\n ssl_options={\n 'certfile': secrets.GetSecretFile('%s.crt' % settings['domain']),\n 'keyfile': secrets.GetSecretFile('%s.key' % settings['domain']),\n } if options.options.ssl else None)\n with stack_context.NullContext():\n http_server.listen(options.options.port)\n\n # Setup redirect server for HTTP -> HTTPS.\n if options.options.ssl:\n http_settings = {\n 'host': ServerEnvironment.GetHost(),\n 'redirect_port': options.options.redirect_port,\n 'xheaders': options.options.xheaders,\n }\n\n redirect_handlers = [\n (r'/(.*)', index.RedirectHandler),\n ]\n redirect_server = httpserver.HTTPServer(web.Application(\n redirect_handlers, **http_settings))\n with stack_context.NullContext():\n redirect_server.listen(options.options.insecure_port)\n\n # Ensure that system users have been created if running with a local db (needs server to be running).\n if options.options.localdb:\n yield CreateSystemUsers(client)\n\n # Run the server until it hits an exception or stop signal.\n yield gen.Task(lambda callback: None)",
"def setup_server():\n\n require('environment', provided_by=env.environments)\n upgrade_packages()\n # Install required system packages for deployment, plus some extras\n # Install pip, and use it to install virtualenv\n install_packages()\n sudo(\"easy_install -i http://d.pypi.python.org/simple -U pip\")\n sudo(\"pip install -i http://d.pypi.python.org/simple -U virtualenv\")\n create_postgis_template()\n create_db_user()\n create_db()\n create_webserver_user()",
"def main():\r\n LOG.info('Starting server build.')\r\n web.run_app(init_app(),\r\n host=os.environ.get('APP_HOST', CONFIG.registry.get('app_host', '0.0.0.0')),\r\n port=int(os.environ.get('APP_PORT', CONFIG.registry.get('app_port', 8080))),\r\n shutdown_timeout=0,\r\n ssl_context=application_security())",
"def Run(self):\n self.BuildWebAppSite()\n\n self.BuildRPCSite(self.env.umpire_cli_port, self.methods_for_cli, '0.0.0.0')\n self.BuildRPCSite(self.env.umpire_rpc_port, self.methods_for_dut)\n\n # Start services.\n reactor.callWhenRunning(self.OnStart)\n # And start reactor loop.\n reactor.run()",
"def startup(req=None):\n global started\n if not started:\n started = True\n cherrypy.server.start(init_only=True, server_class=None)\n return 0 # apache.OK",
"async def prepare_web(self):\n try:\n await self.serve_web()\n except asyncio.CancelledError:\n pass\n except Exception:\n self.logger.exception(\"web: an error occurred while serving:\")",
"def _configure_webservers(self, node_roles):\n logger.info(\"Configuring uwsgi\")\n with hide(*fab_quiet):\n # Configure the uwsgi app\n context = {\n 'project_root': env.project_root,\n 'domain': env.pstat_url,\n }\n upload_template(\n '../config/tpl/newrelic/policystat.ini',\n '/etc/newrelic/policystat.ini',\n context,\n use_sudo=True\n )\n upload_template(\n '../config/tpl/uwsgi/policystat.yaml',\n '/etc/uwsgi/policystat.yaml',\n context,\n use_sudo=True\n )\n\n # Configure the supervisord config for uwsgi\n newrelic_conf = self.conf.get('newrelic', {})\n new_relic_environment = newrelic_conf.get('environment', None)\n context = {\n 'new_relic_environment': new_relic_environment,\n }\n changed = upload_template_changed(\n '../config/tpl/uwsgi/etc/supervisor/conf.d/uwsgi.conf',\n '/etc/supervisor/conf.d/uwsgi.conf',\n use_sudo=True,\n mode=0600,\n use_jinja=True,\n context=context,\n )\n if changed:\n self.modified_services.append(SUPERVISORD)\n\n # Give user policystat access to configuration files\n files = [\n '/etc/uwsgi/policystat.yaml',\n '/etc/newrelic/policystat.ini',\n ]\n sudo('chown %s %s' % (F_CHOWN, ' '.join(files)))\n\n logger.info(\"Configuring nginx\")\n # Configure the nginx host\n context = {\n 'project_root': env.project_root,\n 'domain': env.pstat_url,\n }\n upload_template(\n '../config/tpl/nginx/pstat',\n '/etc/nginx/sites-available/%s' % env.pstat_url,\n context,\n use_sudo=True,\n )\n\n # Make sure no other sites are enabled\n sudo('rm -f /etc/nginx/sites-enabled/*')\n\n # Enable our site\n sudo(\n 'ln -s '\n '/etc/nginx/sites-available/%(pstat_url)s '\n '/etc/nginx/sites-enabled/%(pstat_url)s' % env\n )",
"def setup():\n require('hosts', provided_by=[prod])\n require('code_root')\n sudo('apt-get update')\n sudo('apt-get install -y python-setuptools')\n sudo('easy_install pip')\n sudo('pip install virtualenv')\n sudo('aptitude install -y apache2')\n sudo('aptitude install -y libapache2-mod-wsgi')\n sudo('apt-get install -y nginx')\n update_webserver_config()\n sudo('mkdir -p %s; cd %s; virtualenv .;' % (env.code_root, env.code_root))\n sudo('cd %s;mkdir releases; mkdir shared; mkdir packages; mkdir shared/media; mkdir shared/media/file;' % (env.code_root))\n deploy()",
"def web():\n env['remote_port'] = env['port_map']['8000']\n\n sys.stdout.write('Launching browser on remote port %(remote_port)s\\n' % env)\n\n run('open http://%(relay_server)s:%(remote_port)s' % env)",
"def stop_slave_web():\n print(\"Stopping slave web\")\n r = req.patch(f\"{SLAVE_API_URL}/formation/web\", json=API_PAYLOAD_0, headers=SLAVE_API_HEADERS)\n if r.status_code != req.codes.ok:\n print(\"Unable to stop the web dyno on slave\")\n print(r.text)\n return False\n #wait a bit for the web process to stop\n print(\"Waiting a bit\")\n time.sleep(2)\n return True",
"def setup_client():\n webtest.WebCase.PORT = cherrypy.server.socket_port\n webtest.WebCase.HOST = cherrypy.server.socket_host\n if cherrypy.server.ssl_certificate:\n CPWebCase.scheme = 'https'",
"def server():\n package('apache2')\n require_started('apache2')"
] | [
"0.6809068",
"0.67656696",
"0.6656338",
"0.6610853",
"0.65221626",
"0.6354455",
"0.62950474",
"0.6216161",
"0.62008417",
"0.62003654",
"0.6176954",
"0.601398",
"0.6004903",
"0.5986498",
"0.59861344",
"0.5985435",
"0.5959048",
"0.5956801",
"0.5941647",
"0.58429754",
"0.58145416",
"0.5787388",
"0.5786717",
"0.57728636",
"0.5770331",
"0.5760817",
"0.5730788",
"0.5726301",
"0.5689836",
"0.5688232"
] | 0.81821865 | 0 |
Stops the web process on the slave. | def stop_slave_web():
print("Stopping slave web")
r = req.patch(f"{SLAVE_API_URL}/formation/web", json=API_PAYLOAD_0, headers=SLAVE_API_HEADERS)
if r.status_code != req.codes.ok:
print("Unable to stop the web dyno on slave")
print(r.text)
return False
#wait a bit for the web process to stop
print("Waiting a bit")
time.sleep(2)
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def webserver_stop():\n run(\"kill $(cat %s)\" % GUNICORN_PIDFILE)\n run(\"rm %s\" % GUNICORN_PIDFILE)",
"def stop_slave_worker():\n print(\"Stopping slave worker\")\n r = req.patch(f\"{SLAVE_API_URL}/formation/worker\", json=API_PAYLOAD_0, headers=SLAVE_API_HEADERS)\n if r.status_code != req.codes.ok:\n print(\"Unable to stop the worker dyno on slave\")\n print(r.text)\n return False\n #wait a bit for the worker process to stop\n print(\"Waiting a bit\")\n time.sleep(2)\n return True",
"def stop():\n server = current_server()\n server.stop()",
"def stop(self):\n\n self._stop_server = True\n\n self.join()\n self.httpd.server_close()",
"def stop(self):\n self.scion_sh('stop')",
"def stop(self):\n # print \"process shutdown complete\"",
"def stop(self):\n self.api.stop()",
"def stop(self):\n response = self._shell_client.exec_command('{} stop'.format(self._executable_path), async=False)\n\n if not response.is_success():\n self._logger.error('clusterrunner stop failed on host {} with output: {}, error: {}'.format(\n self.host, response.raw_output, response.raw_error))",
"def Stop(self):\n if self.child_pid:\n self.data = self.host.Communicate(self.child_pid, echo_error=True,\n kill=True,\n kill_string=IperfServer.KILL_STRING)\n self.child_pid = None",
"def stop_server(request):\n def stop_callback():\n global process\n process.terminate()\n request.addfinalizer(stop_callback)",
"def stop(self):\n if self._process is not None:\n self._process.terminate()",
"def stop(self):\n return self._send_command(\"stop\")",
"def stop(self):\n\n log_info(\"Stopping LiteServ ...\")\n\n self.logfile.flush()\n self.logfile.close()\n self.process.kill()\n self.process.wait()\n\n self._verify_not_running()",
"def stop(self):\n self.shutdown_ = True\n if self.running():\n os.kill(self.server_pid_, signal.SIGTERM)",
"def stop(self) -> str:\n return self.rpc_call(\"stop\")",
"def stop(self):\n if self._proc_is_alive():\n\n if os.name == 'nt':\n # os.killpg is not available on Windows\n # See: https://bugs.python.org/issue5115\n self._proc.kill()\n\n # When we kill the child process on Windows the return code\n # gets set to 1, so we want to reset the return code back to 0\n # in order to avoid creating logging output in the download(...)\n # method\n self._proc.returncode = 0\n else:\n os.killpg(self._proc.pid, signal.SIGKILL)\n\n self._set_returncode(self.STOPPED)",
"def stop():\n global server_handle\n server_handle.kill()\n server_handle = None",
"def stop(self):\n shutdown_url = self._env[\"DATASTORE_HOST\"] + \"/shutdown\"\n req = urllib.request.Request(shutdown_url, method=\"POST\")\n urllib.request.urlopen(req)",
"def stopwasp():\n\n\trespond = send_command('stopwasp')",
"def Stop(self):\n if self.child_pid:\n self.data = self.host.Communicate(self.child_pid, echo_error=True,\n kill=(not self.length),\n kill_string=IperfClient.KILL_STRING)\n self.child_pid = None",
"def stop(self):\n self.logger.info('Shutting down SimpleHTTPServer')\n stop_cmd = \"pkill -9 -f '{0}'\".format(self.server_cmd)\n self._execute_command(stop_cmd)",
"def Stop(self):\n\t\tpayload = { \"Arg1\": self.href }\n\t\treturn self._execute('stop', payload=payload, response_object=None)",
"def stop():\n with cd(env.directory):\n sudo('./bin/supervisorctl stop all', user=env.deploy_user)",
"def stop(self):\n os.system(\"taskkill /T /F /PID %s\" % self.process.pid)\n self.running = False",
"def stop(self):\n if not self.process_pid:\n raise Exception('why is this being called? %s' % self.server_name)\n\n if self.stop_kill:\n os.kill(self.process_pid, signal.SIGTERM)\n rc = wait_for_fork(self.process_pid, raise_error=False)\n return (rc, '', '')",
"def stop() -> None:\n global _server\n if _server:\n try:\n _server.shutdown()\n except Exception:\n pass",
"def stop_server(self):\r\n # TODO-SDH Add way to stop the server from running.\r",
"def stop_test(self, request):\n request.worker.stop_test(request.message.test_id)\n\n return SuccessReply()",
"def stop(self):\n try:\n self.process.terminate()\n self.process = None\n except AttributeError:\n return",
"def processStop(name):\n imrclient.update_server_info()\n imrclient.process_stop(name)"
] | [
"0.7413745",
"0.7409462",
"0.70902383",
"0.7082552",
"0.68931115",
"0.68733066",
"0.68609893",
"0.6817112",
"0.67590433",
"0.67129856",
"0.6705761",
"0.6702287",
"0.66915804",
"0.6671726",
"0.6669743",
"0.6660702",
"0.66476685",
"0.66400963",
"0.6629474",
"0.6618239",
"0.6569697",
"0.6561379",
"0.65600526",
"0.65489125",
"0.6530412",
"0.652372",
"0.65151525",
"0.6506462",
"0.6502744",
"0.6497385"
] | 0.8396582 | 0 |
Starts the worker process on the master. | def start_master_worker():
print("Starting master worker")
r = req.patch(f"{MASTER_API_URL}/formation/worker", json=API_PAYLOAD_1, headers=MASTER_API_HEADERS)
if r.status_code != req.codes.ok:
print("Unable to start the worker dyno on master")
print(r.text)
return False
#wait a bit for the worker process to start
print("Waiting a bit")
time.sleep(10)
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def worker(ctx_obj):\n execute(start_worker_command(settings=ctx_obj['settings']))",
"def start(self):\n control_process = mp.Process(target = self._start, args = [])\n control_process.start()",
"def start_slave_worker():\n print(\"Starting slave worker\")\n r = req.patch(f\"{SLAVE_API_URL}/formation/worker\", json=API_PAYLOAD_1, headers=SLAVE_API_HEADERS)\n if r.status_code != req.codes.ok:\n print(\"Unable to start the worker dyno on slave\")\n print(r.text)\n return False\n #wait a bit for the worker process to start up\n print(\"Waiting a bit\")\n time.sleep(10)\n return True",
"def run_worker(self):\n # TODO(xiejw): To allow execution framework to add train hooks.\n return self._start_distributed_training()",
"def start(self):\n self._do_work.set()\n self._worker_thread.start()",
"def start(self) -> None:\n JavaGate().exec_process_instance(\n self._user,\n self._project,\n self.name,\n \"\",\n self.worker_group,\n self.warning_type,\n self.warning_group_id,\n 24 * 3600,\n )",
"def __init__(self, master):\n super().__init__()\n self.master = master\n self.proc = None\n self.start()",
"def start(self):\n \n rpc = self.smartstarter.rpcsystem\n \n process = yield self.smartstarter.start()\n \n try:\n \n make_worker_url = yield process.get_function_url(make_worker)\n make_worker_stub = rpc.create_function_stub(make_worker_url)\n \n worker = yield make_worker_stub(\"local\") # TODO remove network\n \n worker.get_function_url = process.get_function_url_stub\n \n worker.reset = rpc.create_local_function_stub(process.reset)\n worker.stop = rpc.create_local_function_stub(process.stop)\n worker.kill = rpc.create_local_function_stub(process.kill)\n worker.stdout = process.stdout.make_stub(rpc)\n worker.stderr = process.stderr.make_stub(rpc)\n worker.exited = process.exited.make_stub(rpc)\n\n except:\n process.kill()\n raise \n \n\n \n # worker.stdout.add_callback(stdout)\n # worker.stderr.add_callback(stderr)\n \n# receiver_stub = rpc.create_local_function_stub(hook.receiver)\n# hookinstall_url = yield process.get_function_url(hook.install_hook)\n# hookinstall_url_stub = rpc.create_function_stub(hookinstall_url)\n# yield hookinstall_url_stub(receiver_stub)\n \n defer.returnValue(worker)",
"def main():\n setup()\n master = Master()\n master.start()",
"def run(self, worker_num=1):\n worker_proc = Process(target=self._start_worker, args=(worker_num,))\n worker_proc.start()\n\n beat_proc = Process(target=self._start_beat, args=())\n beat_proc.start()\n\n beat_proc.join()\n worker_proc.join()",
"def start(self):\n if not self._worker:\n # the worker might be already created in case of deserialization\n self._worker = APIWorker(self.queue)\n self._worker.start()",
"def _StartWorkerProcess(self, process_name):",
"def run(self):\n self.process.start()",
"def start(self):\r\n thread = threading.Thread(target=self.run)\r\n try:\r\n thread.start()\r\n except RuntimeError as e:\r\n raise SchedulerError(f\"Failed to start worker '{self.WORKER_ID}': \" + str(e))",
"def start(self):\n\n dburl = dbconn.DbURL()\n gparray = GpArray.initFromCatalog(dburl, utility=True)\n numcontent = gparray.getNumSegmentContents()\n standby = gparray.standbyMaster\n master = gp.MasterStart(\"Starting Master Standby\",\n self.datadir, self.port, standby.dbid,\n 0, numcontent, None, None, None)\n # -w option would wait forever.\n master.cmdStr = master.cmdStr.replace(' -w', '')\n master.run(validateAfter=True)\n\n return master.get_results()",
"def start():\n\n start_server()",
"def start_work(self):\n self.worker_thread = WorkerThread(self.feedback_log, self.job_list) # only created when processing begins. May be recreated\n self.worker_thread.daemon = True\n self.worker_thread.start()",
"def init_worker (self):\n print(\"initializing map worker in directory: \", os.getcwd ())\n\n context = zmq.Context()\n\n # Socket to receive messages on. Worker uses PULL from the master\n # To that end, we connect to the server. The map worker pulls info\n # from the base port of the master\n self.receiver = context.socket (zmq.PULL)\n self.receiver.setsockopt (zmq.RCVHWM, 0)\n connect_addr = \"tcp://\"+ self.master_ip + \":\" + str (self.master_port)\n print(\"Using PULL, map worker connecting to \", connect_addr)\n self.receiver.connect (connect_addr)\n \n # As part of the initialization, we tell the master that we are up.\n # This information is to be pushed to the master at a port which is\n # 2 more than the base of the master.\n self.init_sender = context.socket (zmq.PUSH)\n self.init_sender.setsockopt (zmq.LINGER, -1)\n connect_addr = \"tcp://\" + self.master_ip + \":\" + str (self.master_port+2)\n print(\"Using PUSH, map worker connecting to worker up barrier at \", connect_addr)\n self.init_sender.connect (connect_addr)\n #bind_addr = \"tcp://\" + self.master_ip + \":\" + str (self.master_port+2)\n #print \"Using PUSH, map worker binding to worker up barrier at \", bind_addr\n #self.init_sender.bind (bind_addr)\n\n # now send an ACK to the barrier to let it know that we are up\n self.init_sender.send (b'0')\n\n # close the socket\n # self.init_sender.close ()\n\n # To send the results, we need to initialize the send address to point\n # to the map results barrier\n #\n # Note that the port number of the maps result barrier is 3 more than\n # the port of the master. Initialize it so we can send results \n self.results_sender = context.socket (zmq.PUSH)\n self.results_sender.setsockopt (zmq.LINGER, -1)\n self.results_sender.setsockopt (zmq.SNDHWM, 0)\n connect_addr = \"tcp://\" + self.master_ip + \":\" + str (self.master_port+3)\n print(\"Using PUSH, map worker connecting to map results barrier at \", connect_addr)\n self.results_sender.connect (connect_addr)\n #bind_addr = \"tcp://\" + self.master_ip + \":\" + str (self.master_port+3)\n #print \"Using PUSH, map worker binding to map results barrier at \", bind_addr\n #self.results_sender.bind (bind_addr)",
"def start():\n events.bind('jobs.cancel', 'slicer_cli_web_worker', _manageWorkers)\n events.bind('jobs.schedule', 'slicer_cli_web_worker', _manageWorkers)\n events.bind('jobs.job.update.after', 'slicer_cli_web_worker', _manageWorkers)\n events.bind('model.job.save.after', 'slicer_cli_web_worker', _manageWorkers)\n\n events.bind('model.setting.save.after', 'slicer_cli_web_worker', _manageWorkersConfig)\n events.bind('model.file.save.after', 'slicer_cli_web_worker', _manageWorkersConfigFile)\n _manageWorkers(None)",
"def workers_start(self, properties=None):\n self._post('agents/start-workers', properties)",
"def start(self):\n for workload in self._workloads:\n self.log.info(\"%-20s STARTING port=%s\" % (workload.name(), workload.port()))\n workload.pre_start()\n workload.start()\n self._monitor_loop()\n self._cleanup()",
"def _run(self):\n self._worker = _stash.runtime.run(\n input_=self.cmd,\n final_ins=self._sp_stdin,\n final_outs=self._sp_stdout,\n final_errs=self._sp_stderr,\n add_to_history=None,\n persistent_level=2,\n is_background=False,\n cwd=self._cwd,\n environ=self._environ\n )\n self.pid = self._worker.job_id",
"def start():\n Networker.stop()\n Networker.Instance = Networker()",
"def start(self):\n\n self._task.start()",
"def start(self):\n self.p.start()",
"def run(self):\n # Daemonize and continue the work.\n if not self.daemonize():\n return\n \n # run the work.\n syslog.syslog('This is worker daemon and we will now begin the work.')\n self._do_work()\n\n # shall never reach.\n return",
"def startService(self):\n super(MasterService, self).startService()\n self.dispatcher.startDispatching()",
"def main() -> None:\n worker = Worker()\n worker.do_work()",
"def main():\n server = ThreadedServer(MasterControllerService, port=5000)\n server.start()",
"def run(self):\n self.submit()\n self.start()"
] | [
"0.74038696",
"0.7210624",
"0.70151126",
"0.7001591",
"0.6949406",
"0.6915681",
"0.68317443",
"0.67932683",
"0.6752919",
"0.6713696",
"0.6707904",
"0.66620183",
"0.66078997",
"0.6578205",
"0.647364",
"0.6429986",
"0.63926333",
"0.6350077",
"0.6317507",
"0.6302991",
"0.6230232",
"0.6217646",
"0.6204072",
"0.6202991",
"0.62004286",
"0.6170012",
"0.6158643",
"0.61457145",
"0.61429983",
"0.6136926"
] | 0.7759477 | 0 |
Stops the worker process on the master. | def stop_master_worker():
print("Stopping master worker")
r = req.patch(f"{MASTER_API_URL}/formation/worker", json=API_PAYLOAD_0, headers=MASTER_API_HEADERS)
if r.status_code != req.codes.ok:
print("Unable to stop the worker dyno on master")
print(r.text)
return False
#wait a bit for the worker process to stop
print("Waiting a bit")
time.sleep(2)
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def stop(self):\n\n # immediate is necessary if it's in recovery (for now).\n # we don't care the result.\n master = gp.MasterStop(\"Stopping Master Standby\",\n self.datadir, mode='immediate')\n master.run()",
"def stop_slave_worker():\n print(\"Stopping slave worker\")\n r = req.patch(f\"{SLAVE_API_URL}/formation/worker\", json=API_PAYLOAD_0, headers=SLAVE_API_HEADERS)\n if r.status_code != req.codes.ok:\n print(\"Unable to stop the worker dyno on slave\")\n print(r.text)\n return False\n #wait a bit for the worker process to stop\n print(\"Waiting a bit\")\n time.sleep(2)\n return True",
"def terminate(self):\n self._worker.kill()",
"def stop(self):\n if self._process is not None:\n self._process.terminate()",
"def stop(self):\n # print \"process shutdown complete\"",
"def stop():\n if Networker.Instance is not None:\n Networker.Instance.end()\n Networker.Instance = None",
"def stop(self):\n self.scion_sh('stop')",
"def stop(self):\n debug(\"CBA4.__worker_thread.stop()\")\n self.__run = False\n #end stop()",
"def stop(self):\n try:\n self.process.terminate()\n self.process = None\n except AttributeError:\n return",
"async def stop(self):\n await self.node._send(op='stop', guildId=self.guild_id)\n self.current = None",
"def stop(self):\n os.system(\"taskkill /T /F /PID %s\" % self.process.pid)\n self.running = False",
"def stop(self):\n with open(self.pid_file, 'w') as fhandle:\n fhandle.write('exit')\n self.queue.close()\n self.queue.join_thread()\n self.monitor_process.join()\n self.remove_pid_file()",
"def Stop(self):\n if self.child_pid:\n self.data = self.host.Communicate(self.child_pid, echo_error=True,\n kill=True,\n kill_string=IperfServer.KILL_STRING)\n self.child_pid = None",
"def stop(self):\r\n self.stopped = True\r\n #self.worker.join()\r\n self.FPSThread.join()",
"def stop(self):\n if self._proc_is_alive():\n\n if os.name == 'nt':\n # os.killpg is not available on Windows\n # See: https://bugs.python.org/issue5115\n self._proc.kill()\n\n # When we kill the child process on Windows the return code\n # gets set to 1, so we want to reset the return code back to 0\n # in order to avoid creating logging output in the download(...)\n # method\n self._proc.returncode = 0\n else:\n os.killpg(self._proc.pid, signal.SIGKILL)\n\n self._set_returncode(self.STOPPED)",
"def stop(self):\n response = self._shell_client.exec_command('{} stop'.format(self._executable_path), async=False)\n\n if not response.is_success():\n self._logger.error('clusterrunner stop failed on host {} with output: {}, error: {}'.format(\n self.host, response.raw_output, response.raw_error))",
"def stop(self):\n self.api.stop()",
"def stop():\n server = current_server()\n server.stop()",
"def stop_thread(self):\n t, e = self.workers[0]\n e = e.set() # put event to set True for stop thread\n del self.workers[0]",
"def stop(self):\n self.send_stop()\n self.join()",
"def stopzmq(self):\n\n self.context.destroy()",
"def Stop(self):\n if self.child_pid:\n self.data = self.host.Communicate(self.child_pid, echo_error=True,\n kill=(not self.length),\n kill_string=IperfClient.KILL_STRING)\n self.child_pid = None",
"def stop(self) -> None:\n self._client.terminate_job(jobId = self.id, reason = self.STOP_REASON)",
"def _stopWorker(idx):\n cmd = _workerConfig['workers'][idx]['stop']\n _workerConfig['started'] -= {idx}\n _commandQueue.add(cmd)",
"def stop(self):\n\n with open(self.pidfile, 'r') as pidfile:\n pid = int(pidfile.read().strip())\n\n proc = psutil.Process(pid)\n proc.terminate()",
"def stop_force(self):\n if self._worker:\n self._worker.stop_immediate()",
"def stop(self):\n if self.p.is_alive():\n self.p.terminate()",
"def stop(self):\n os.killpg(os.getpgid(self.process.pid), signal.SIGTERM)",
"def stop(self):\n self.running = False\n self.join()",
"async def stop(self):\n await self._bot.lavalink.ws.send(op='stop', guildId=self.guild_id)\n self.current = None"
] | [
"0.7388482",
"0.7279379",
"0.7237633",
"0.6967597",
"0.6921898",
"0.68627954",
"0.6821255",
"0.68206173",
"0.67904496",
"0.6728565",
"0.67254424",
"0.67202437",
"0.6709311",
"0.67050105",
"0.6670142",
"0.66601145",
"0.66253245",
"0.6620445",
"0.6605411",
"0.66041267",
"0.65776855",
"0.6569228",
"0.6551724",
"0.65516335",
"0.6542407",
"0.6540424",
"0.6538308",
"0.65205914",
"0.6502034",
"0.6500104"
] | 0.76747125 | 0 |
Starts the worker process on the slave. | def start_slave_worker():
print("Starting slave worker")
r = req.patch(f"{SLAVE_API_URL}/formation/worker", json=API_PAYLOAD_1, headers=SLAVE_API_HEADERS)
if r.status_code != req.codes.ok:
print("Unable to start the worker dyno on slave")
print(r.text)
return False
#wait a bit for the worker process to start up
print("Waiting a bit")
time.sleep(10)
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def run_worker(self):\n # TODO(xiejw): To allow execution framework to add train hooks.\n return self._start_distributed_training()",
"def worker(ctx_obj):\n execute(start_worker_command(settings=ctx_obj['settings']))",
"def start_master_worker():\n print(\"Starting master worker\")\n r = req.patch(f\"{MASTER_API_URL}/formation/worker\", json=API_PAYLOAD_1, headers=MASTER_API_HEADERS)\n if r.status_code != req.codes.ok:\n print(\"Unable to start the worker dyno on master\")\n print(r.text)\n return False\n #wait a bit for the worker process to start\n print(\"Waiting a bit\")\n time.sleep(10)\n return True",
"def start(self):\n control_process = mp.Process(target = self._start, args = [])\n control_process.start()",
"def master_to_slave():\n print(\"Shifting from master to slave\")\n stop_master_worker()\n setup_slave_web()\n prepare_push()\n push_to_slave()\n stop_slave_web()\n start_slave_worker()\n print(\"DONE!\")",
"def slave_to_master():\n print(\"Shifting from slave to master\")\n stop_slave_worker()\n setup_slave_web()\n pull_from_slave()\n commit_pull_to_db()\n stop_slave_web()\n start_master_worker()\n print(\"DONE!\")",
"def run(self):\n self.process.start()",
"def __init__(self, master):\n super().__init__()\n self.master = master\n self.proc = None\n self.start()",
"def start(self) -> None:\n JavaGate().exec_process_instance(\n self._user,\n self._project,\n self.name,\n \"\",\n self.worker_group,\n self.warning_type,\n self.warning_group_id,\n 24 * 3600,\n )",
"def start(self):\n \n rpc = self.smartstarter.rpcsystem\n \n process = yield self.smartstarter.start()\n \n try:\n \n make_worker_url = yield process.get_function_url(make_worker)\n make_worker_stub = rpc.create_function_stub(make_worker_url)\n \n worker = yield make_worker_stub(\"local\") # TODO remove network\n \n worker.get_function_url = process.get_function_url_stub\n \n worker.reset = rpc.create_local_function_stub(process.reset)\n worker.stop = rpc.create_local_function_stub(process.stop)\n worker.kill = rpc.create_local_function_stub(process.kill)\n worker.stdout = process.stdout.make_stub(rpc)\n worker.stderr = process.stderr.make_stub(rpc)\n worker.exited = process.exited.make_stub(rpc)\n\n except:\n process.kill()\n raise \n \n\n \n # worker.stdout.add_callback(stdout)\n # worker.stderr.add_callback(stderr)\n \n# receiver_stub = rpc.create_local_function_stub(hook.receiver)\n# hookinstall_url = yield process.get_function_url(hook.install_hook)\n# hookinstall_url_stub = rpc.create_function_stub(hookinstall_url)\n# yield hookinstall_url_stub(receiver_stub)\n \n defer.returnValue(worker)",
"def runSlaveRun():\n\n np.set_printoptions(linewidth=1000)\n function = None\n options = None\n\n # print(\"Process {}/{} reporting for duty!\".format(rank, size))\n\n function = comm.bcast(function, root=0)\n arguments = comm.scatter(options, root=0)\n\n results = function(*arguments)\n\n comm.Barrier()\n comm.gather(results, root=0)\n comm.Disconnect()",
"def start(self):\r\n return self.start_subprocess()",
"def _start_child(self):\n parent_pipe, child_pipe = mp.Pipe()\n self._poll.register(parent_pipe.fileno(), select.POLLIN | select.POLLPRI)\n\n pid = os.fork()\n if not pid:\n ch = Worker(child_pipe, self.server_socket)\n parent_pipe.close()\n ch.run()\n else:\n self._children[parent_pipe.fileno()] = ManagerChild(pid, parent_pipe)\n child_pipe.close()",
"def onSlave(self):",
"def _start_runner(self, spec):\n pid = os.fork()\n if pid:\n # Parent.\n return pid\n # Child.\n #\n # Set the environment variable which tells the runner that it's\n # running under bin/master control. This subtly changes the error\n # behavior of bin/runner.\n env = {'MAILMAN_UNDER_MASTER_CONTROL': '1'}\n # Craft the command line arguments for the exec() call.\n rswitch = '--runner=' + spec\n # Wherever master lives, so too must live the runner script.\n exe = os.path.join(config.BIN_DIR, 'runner')\n # config.PYTHON, which is the absolute path to the Python interpreter,\n # must be given as argv[0] due to Python's library search algorithm.\n args = [sys.executable, sys.executable, exe, rswitch]\n # Always pass the explicit path to the configuration file to the\n # sub-runners. This avoids any debate about which cfg file is used.\n config_file = (config.filename if self._config_file is None\n else self._config_file)\n args.extend(['-C', config_file])\n log = logging.getLogger('mailman.runner')\n log.debug('starting: %s', args)\n # We must pass this environment variable through if it's set,\n # otherwise runner processes will not have the correct VAR_DIR.\n var_dir = os.environ.get('MAILMAN_VAR_DIR')\n if var_dir is not None:\n env['MAILMAN_VAR_DIR'] = var_dir\n # For the testing framework, if these environment variables are set,\n # pass them on to the subprocess.\n for envvar in PRESERVE_ENVS:\n if envvar in os.environ:\n env[envvar] = os.environ[envvar]\n args.append(env)\n os.execle(*args)\n # We should never get here.\n raise RuntimeError('os.execle() failed')",
"def start(self):\n self._proc = self._get_subprocess()\n self._pid = self._proc.pid\n self._return_code = None",
"def _run(self):\n self._worker = _stash.runtime.run(\n input_=self.cmd,\n final_ins=self._sp_stdin,\n final_outs=self._sp_stdout,\n final_errs=self._sp_stderr,\n add_to_history=None,\n persistent_level=2,\n is_background=False,\n cwd=self._cwd,\n environ=self._environ\n )\n self.pid = self._worker.job_id",
"def start_bot(self):\n self.proc = subprocess.Popen(\"./start\", stdin=subprocess.PIPE,\n\t\t\t\t\t\t\t\t\t stdout=subprocess.PIPE,\n\t\t\t\t\t\t\t\t\t cwd=os.path.abspath(self.path))",
"def start(self):\n self._do_work.set()\n self._worker_thread.start()",
"def _run_server_cycle(self) -> None:\n\n self._prep_subprocess_environment()\n\n # Launch the binary and grab its stdin;\n # we'll use this to feed it commands.\n self._subprocess_launch_time = time.time()\n\n # Set an environment var so the server process knows its being\n # run under us. This causes it to ignore ctrl-c presses and other\n # slight behavior tweaks. Hmm; should this be an argument instead?\n os.environ['BA_SERVER_WRAPPER_MANAGED'] = '1'\n\n print(f'{Clr.CYN}Launching server subprocess...{Clr.RST}')\n binary_name = ('ballisticacore_headless.exe'\n if os.name == 'nt' else './ballisticacore_headless')\n self._subprocess = subprocess.Popen(\n [binary_name, '-cfgdir', 'ba_root'],\n stdin=subprocess.PIPE,\n cwd='dist')\n\n # Do the thing.\n # No matter how this ends up, make sure the process is dead after.\n try:\n self._run_subprocess_until_exit()\n finally:\n self._kill_subprocess()\n\n # If we want to die completely after this subprocess has ended,\n # tell the main thread to die.\n if self._wrapper_shutdown_desired:\n\n # Only do this if the main thread is not already waiting for\n # us to die; otherwise it can lead to deadlock.\n if not self._done:\n self._done = True\n\n # This should break the main thread out of its blocking\n # interpreter call.\n os.kill(os.getpid(), signal.SIGTERM)",
"def startJob(self):\n\n logging.info(\"STARTING job with args %r\" % self.args)\n self.pid = os.fork()\n\n if self.pid == 0:\n # Redirect the stdout output to dev null in the child.\n logPath = self.args['resourcePrefix'] + self.args['path'] + self.args['filename'] + '_log.txt'\n logFile = open(logPath, 'wb')\n\n # A set of jobs. Currently [0 0] is flat ground, [1 0] is a block field, [0 1] is hilly terrain, and [1 1] is both\n # This will expand in the future.\n terrainMatrix = self.args['terrain']\n # Update this if the subprocess call gets changed\n if len(terrainMatrix[0]) < 4:\n raise NTRTMasterError(\"Not enough terrain args!\")\n \n # Run through a set of binary job options. Currently handles terrain switches\n for run in terrainMatrix:\n if (len(run)) >= 5:\n trialLength = run[4]\n else:\n trialLength = self.args['length']\n #TODO improve error handling here\n subprocess.check_call([self.args['executable'], \"-l\", self.args['filename'], \"-s\", str(trialLength), \"-b\", str(run[0]), \"-H\", str(run[1]), \"-a\", str(run[2]), \"-B\", str(run[3])], stdout=logFile)\n sys.exit()",
"def run(self, worker_num=1):\n worker_proc = Process(target=self._start_worker, args=(worker_num,))\n worker_proc.start()\n\n beat_proc = Process(target=self._start_beat, args=())\n beat_proc.start()\n\n beat_proc.join()\n worker_proc.join()",
"def main():\n setup()\n master = Master()\n master.start()",
"def start():\n\n start_server()",
"def run(self):\n self.node_id = CONFIG.node_id\n self.running = Event()\n if not CONFIG.master_pub or not CONFIG.master_repl:\n print colors.yellow(\"Master IP:port is not set in config file (%s)\"\n % CONFIG._fn)\n master_pub = raw_input(\"Enter Master PUB uri (IP or IP:port):\")\n if \":\" in master_pub:\n ip, _, port = master_pub.rpartition(\":\")\n else:\n ip = master_pub\n port = 5551\n CONFIG.update(\"General\", \"master_pub\", \"%s:%s\" % (ip,\n port))\n master_repl = raw_input(\"Enter Master REPLY uri (IP or IP:port), \"\n \"hit ENTER for default(%s:5552):\" % ip)\n if not master_repl:\n port = 5552\n elif \":\" in master_repl:\n ip, _, port = master_repl.rpartition(\":\")\n else:\n ip = master_repl\n port = 5552\n CONFIG.update(\"General\", \"master_repl\", \"%s:%s\" % (ip,\n port))\n CONFIG.reload()\n\n if not validate_address(CONFIG.master_pub) or \\\n not validate_address(CONFIG.master_repl):\n LOG.error('Server IP not present in config or is not valid.\\n'\n 'Check your config')\n exit(1)\n\n if not self.node_id:\n LOG.error(\"The node id not set in config. \"\n \"Run program with config option first\")\n exit(1)\n\n self.backend = self.transport_class.from_config(\n CONFIG, **vars(self.args))\n load_plugins(CONFIG)\n self.sessions = {}\n self.matcher = Matcher(self.node_id, self.backend.meta())\n\n LOG.info(\"Starting node\")\n self.details()\n self._sig_int = signal.getsignal(signal.SIGINT)\n self._sig_term = signal.getsignal(signal.SIGTERM)\n\n if os.name == 'nt':\n # Use Ctrl+C to invoke clean on Windows\n import win32api\n win32api.SetConsoleCtrlHandler(self.clean, True)\n else:\n signal.signal(signal.SIGINT, self._handle_terminate)\n signal.signal(signal.SIGTERM, self._handle_terminate)\n\n # Invoke clean for sessions\n signal.signal(signal.SIGHUP, self.clean)\n\n if not self.backend.prepare():\n LOG.info(\"Cannot start transport backend\")\n self._handle_terminate()\n exit(1)\n\n def request_processor():\n req_queue = self.backend.consume_queue('requests',\n ident=\"DISPATCHER\")\n poller = self.backend.create_poller(req_queue)\n while not self.running.is_set():\n try:\n ready = poller.poll(200)\n if not ready:\n continue\n if req_queue in ready:\n message = req_queue.recv()[0]\n if not message:\n continue\n job = JobTarget.build(message)\n if job:\n self.target_match(job)\n except ConnectionError:\n break\n except Exception:\n continue\n req_queue.close()\n\n Thread(target=request_processor).start()\n\n self.backend.loop()\n\n LOG.info(\"Node exited\")",
"def start(self):\n if config['port'] or config['host']:\n port = config['port'] or 5222\n host = config['host'] or sleekxmpp.JID(config['jid']).host\n addr = (host, port)\n else:\n addr = tuple()\n self.connect(addr)\n self.process(threaded=True)",
"def start(self):\n if self._start_event is None:\n _call_spawn_callbacks(self)\n hub = get_my_hub(self) # pylint:disable=undefined-variable\n self._start_event = hub.loop.run_callback(self.switch)",
"def start(self):\n\n dburl = dbconn.DbURL()\n gparray = GpArray.initFromCatalog(dburl, utility=True)\n numcontent = gparray.getNumSegmentContents()\n standby = gparray.standbyMaster\n master = gp.MasterStart(\"Starting Master Standby\",\n self.datadir, self.port, standby.dbid,\n 0, numcontent, None, None, None)\n # -w option would wait forever.\n master.cmdStr = master.cmdStr.replace(' -w', '')\n master.run(validateAfter=True)\n\n return master.get_results()",
"def start(self):\n for workload in self._workloads:\n self.log.info(\"%-20s STARTING port=%s\" % (workload.name(), workload.port()))\n workload.pre_start()\n workload.start()\n self._monitor_loop()\n self._cleanup()",
"def Start(self):\n\n\n\n assert not self._process, 'Start() can only be called once'\n self._process = subprocess.Popen(self._args)"
] | [
"0.7050969",
"0.68271756",
"0.6755335",
"0.67251605",
"0.65629065",
"0.65401715",
"0.6436881",
"0.6374149",
"0.6337312",
"0.6323815",
"0.6307696",
"0.6272211",
"0.62686425",
"0.62494606",
"0.6231326",
"0.61917204",
"0.61633587",
"0.61614466",
"0.6132043",
"0.61305064",
"0.6127619",
"0.6089708",
"0.6079754",
"0.60447276",
"0.6020899",
"0.6017201",
"0.5988872",
"0.5979403",
"0.59737325",
"0.59626025"
] | 0.7931532 | 0 |
Stops the worker process on the slave. | def stop_slave_worker():
print("Stopping slave worker")
r = req.patch(f"{SLAVE_API_URL}/formation/worker", json=API_PAYLOAD_0, headers=SLAVE_API_HEADERS)
if r.status_code != req.codes.ok:
print("Unable to stop the worker dyno on slave")
print(r.text)
return False
#wait a bit for the worker process to stop
print("Waiting a bit")
time.sleep(2)
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def Stop(self):\n if self.child_pid:\n self.data = self.host.Communicate(self.child_pid, echo_error=True,\n kill=True,\n kill_string=IperfServer.KILL_STRING)\n self.child_pid = None",
"def stop(self):\n\n # immediate is necessary if it's in recovery (for now).\n # we don't care the result.\n master = gp.MasterStop(\"Stopping Master Standby\",\n self.datadir, mode='immediate')\n master.run()",
"def Stop(self):\n if self.child_pid:\n self.data = self.host.Communicate(self.child_pid, echo_error=True,\n kill=(not self.length),\n kill_string=IperfClient.KILL_STRING)\n self.child_pid = None",
"def stop(self):\n response = self._shell_client.exec_command('{} stop'.format(self._executable_path), async=False)\n\n if not response.is_success():\n self._logger.error('clusterrunner stop failed on host {} with output: {}, error: {}'.format(\n self.host, response.raw_output, response.raw_error))",
"def stop_master_worker():\n print(\"Stopping master worker\")\n r = req.patch(f\"{MASTER_API_URL}/formation/worker\", json=API_PAYLOAD_0, headers=MASTER_API_HEADERS)\n if r.status_code != req.codes.ok:\n print(\"Unable to stop the worker dyno on master\")\n print(r.text)\n return False\n #wait a bit for the worker process to stop\n print(\"Waiting a bit\")\n time.sleep(2)\n return True",
"def stop(self):\n # print \"process shutdown complete\"",
"def stop_slave_web():\n print(\"Stopping slave web\")\n r = req.patch(f\"{SLAVE_API_URL}/formation/web\", json=API_PAYLOAD_0, headers=SLAVE_API_HEADERS)\n if r.status_code != req.codes.ok:\n print(\"Unable to stop the web dyno on slave\")\n print(r.text)\n return False\n #wait a bit for the web process to stop\n print(\"Waiting a bit\")\n time.sleep(2)\n return True",
"def stop(self):\n if self._process is not None:\n self._process.terminate()",
"def stop(self):\n if not self.process_pid:\n raise Exception('why is this being called? %s' % self.server_name)\n\n if self.stop_kill:\n os.kill(self.process_pid, signal.SIGTERM)\n rc = wait_for_fork(self.process_pid, raise_error=False)\n return (rc, '', '')",
"def kill(self):\n \n self.killSlavePids()",
"def stop(self):\n self.scion_sh('stop')",
"def stop():\n if Networker.Instance is not None:\n Networker.Instance.end()\n Networker.Instance = None",
"def stop(self):\n if self._proc_is_alive():\n\n if os.name == 'nt':\n # os.killpg is not available on Windows\n # See: https://bugs.python.org/issue5115\n self._proc.kill()\n\n # When we kill the child process on Windows the return code\n # gets set to 1, so we want to reset the return code back to 0\n # in order to avoid creating logging output in the download(...)\n # method\n self._proc.returncode = 0\n else:\n os.killpg(self._proc.pid, signal.SIGKILL)\n\n self._set_returncode(self.STOPPED)",
"def stop(self):\n with open(self.pid_file, 'w') as fhandle:\n fhandle.write('exit')\n self.queue.close()\n self.queue.join_thread()\n self.monitor_process.join()\n self.remove_pid_file()",
"def stop(self):\n try:\n self.process.terminate()\n self.process = None\n except AttributeError:\n return",
"def stop(self):\n\n log_info(\"Stopping LiteServ ...\")\n\n self.logfile.flush()\n self.logfile.close()\n self.process.kill()\n self.process.wait()\n\n self._verify_not_running()",
"def stop(self):\n self.send_stop()\n self.join()",
"def terminate_slaves(self):\n self.master.terminate_slaves()",
"def terminate(self):\n self._worker.kill()",
"def stop(self):\n debug(\"CBA4.__worker_thread.stop()\")\n self.__run = False\n #end stop()",
"def stop_monitor(self):\n self._logger.info(\"Stopping monitor...\")\n if self.monitor_lc:\n self.monitor_lc.cancel()\n if self.monitor_process:\n self.monitor_process.terminate()\n os.system(\"pkill -f listenblock\") # To kill the spawned Go run subprocess",
"def stop():\n server = current_server()\n server.stop()",
"def stop(self):\n os.system(\"taskkill /T /F /PID %s\" % self.process.pid)\n self.running = False",
"def stop(self):\n\n with open(self.pidfile, 'r') as pidfile:\n pid = int(pidfile.read().strip())\n\n proc = psutil.Process(pid)\n proc.terminate()",
"def stop(self):\n self._listeners = None\n\n try:\n if self._started_daemon:\n logging.info('Stopping Transmission daemon')\n exec_cmd(['transmission-remote', '--exit'], wait_after=2)\n\n except subprocess.CalledProcessError:\n logging.error('Unable to stop daemon')\n logging.debug('Error details', stack_info=True, exc_info=True)\n\n self._done = True",
"def _stopWorker(idx):\n cmd = _workerConfig['workers'][idx]['stop']\n _workerConfig['started'] -= {idx}\n _commandQueue.add(cmd)",
"def stop(self):\n if self.running:\n log.info('Stopping sub process (pid {}).'.format(self.sub_process.pid))\n self.sub_process.terminate()\n self.sub_process.join()\n log.info('Stopped sub process (pid {}).'.format(self.sub_process.pid))\n self.daemon.cancel()\n log.info('Cancelled polling daemon for sub process {}.'.format(self.sub_process.pid))\n\n # Cleanup the stream\n log.info('Cleaning sub-process (pid {}).'.format(self.sub_process.pid))\n self.mp_queue.close()\n self.mp_queue = None\n self.sub_process = None\n self.daemon = None",
"def stop(self) -> None:\n self._client.terminate_job(jobId = self.id, reason = self.STOP_REASON)",
"def stop(self):\n if self.p.is_alive():\n self.p.terminate()",
"def stop(self):\n self.shutdown_ = True\n if self.running():\n os.kill(self.server_pid_, signal.SIGTERM)"
] | [
"0.7121769",
"0.7078228",
"0.70730764",
"0.703866",
"0.69969434",
"0.6940482",
"0.6888461",
"0.6873332",
"0.6864292",
"0.6846163",
"0.6844958",
"0.6808371",
"0.6796847",
"0.6751896",
"0.6735001",
"0.6733168",
"0.6719779",
"0.67189586",
"0.66959256",
"0.6666449",
"0.6627743",
"0.6627396",
"0.66111016",
"0.66055304",
"0.6592373",
"0.65863246",
"0.6580113",
"0.6573053",
"0.657187",
"0.6565466"
] | 0.817291 | 0 |
Uses the current cursor position, which is in a code view, and gets the corresponding instruction address that is associated to the code. Returns the start of the function if unable to calculate. | def get_src_to_inst(self) -> int:
# get the Qt document
doc: QCodeDocument = self.document()
# get the current position of the cursor
cursor = self.textCursor()
pos = cursor.position()
# get the node at the associated cursor position
current_node = doc.get_stmt_node_at_position(pos)
if (
current_node is not None
and hasattr(current_node, "tags")
and current_node.tags is not None
and "ins_addr" in current_node.tags
):
asm_ins_addr = current_node.tags["ins_addr"]
else:
# the top of the function decompiled
asm_ins_addr = self._code_view.function.addr
return asm_ins_addr | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_code(func):\n import inspect\n\n raw = \"\".join(inspect.getsource(func))\n found = re.findall(\"(k = .*)\", raw)\n\n if any(found):\n code = found[0]\n return code\n else:\n return \"\"",
"def getInstructionBefore(self, address: ghidra.program.model.address.Address) -> ghidra.program.model.listing.Instruction:\n ...",
"def line_at_cursor(code: str, cursor_pos: int = 0):\n offset = 0\n lines = code.splitlines(True)\n for line in lines:\n next_offset = offset + len(line)\n if not line.endswith('\\n'):\n # If the last line doesn't have a trailing newline, treat it as if\n # it does so that the cursor at the end of the line still counts\n # as being on that line.\n next_offset += 1\n if next_offset > cursor_pos:\n break\n offset = next_offset\n else:\n line = \"\"\n return (line, offset)",
"def get_jump_code(tdiff, pdiff, poffset):\n return FH.get_jump_code(tdiff, pdiff, poffset)",
"def code_ptr(runtime_addr, runtime_addr_high=None, offset=0):\n\n if runtime_addr_high is None:\n runtime_addr_high = runtime_addr + 1\n runtime_addr = memorymanager.RuntimeAddr(runtime_addr)\n runtime_addr_high = memorymanager.RuntimeAddr(runtime_addr_high)\n binary_addr, _ = movemanager.r2b(runtime_addr)\n binary_addr_high, _ = movemanager.r2b(runtime_addr_high)\n assert memorymanager.is_data_loaded_at_binary_addr(binary_addr)\n assert memorymanager.is_data_loaded_at_binary_addr(binary_addr_high)\n code_at_runtime_addr = ((memory_binary[binary_addr_high] << 8) | memory_binary[binary_addr]) + offset\n # Label and trace the code at code_at\n label = entry(code_at_runtime_addr, warn=False) # ENHANCE: allow optional user-specified label?\n # Reference that label at addr/addr_high.\n offset_string = \"\" if offset == 0 else (\"%+d\" % -offset)\n if binary_addr_high == binary_addr + 1:\n # The general code in the \"else\" branch would work for this case as\n # well, but since the assembler has support for emitting a little-endian\n # 16-bit word it's nice to use it when we can.\n assert runtime_addr_high == runtime_addr + 1\n # TODO: Use word()/expr() variants which take a binary addr directly?\n word(runtime_addr)\n expr(runtime_addr, utils.LazyString(\"%s%s\", label, offset_string))\n else:\n # TODO: Use byte()/expr() variants which take a binary addr directly?\n byte(runtime_addr)\n expr(runtime_addr, make_lo(utils.LazyString(\"%s%s\", label, offset_string)))\n byte(runtime_addr_high)\n expr(runtime_addr_high, make_hi(utils.LazyString(\"%s%s\", label, offset_string)))\n if abs(runtime_addr_high - runtime_addr) == 1:\n return max(runtime_addr, runtime_addr_high) + 1\n return None",
"def _get_first_code_line():\n return min(_code_lines)",
"def get_current_instruction(self) -> Dict:\n\n instructions = self.environment.code.instruction_list\n return instructions[self.mstate.pc]",
"def get_code(self, data_start, data_size, offset):\n first_block = 0x1000 - data_start % 0x1000\n full_blocks = ((data_size + (data_start % 0x1000)) / 0x1000) - 1\n left_over = (data_size + data_start) % 0x1000\n\n code = \"\"\n\n # Deal with reads that are smaller than a block\n if data_size < first_block:\n data_read = self.obj_vm.zread(data_start, data_size)\n code += data_read\n return (offset, code)\n\n data_read = self.obj_vm.zread(data_start, first_block)\n code += data_read\n\n # The middle part of the read\n new_vaddr = data_start + first_block\n\n for _i in range(0, full_blocks):\n data_read = self.obj_vm.zread(new_vaddr, 0x1000)\n code += data_read\n new_vaddr = new_vaddr + 0x1000\n\n # The last part of the read\n if left_over > 0:\n data_read = self.obj_vm.zread(new_vaddr, left_over)\n code += data_read\n return (offset, code)",
"def get_code_first_line(f):\n # todo maybe use inspect.unwrap instead?\n if hasattr(f, '__wrapped__'):\n return get_code_first_line(f.__wrapped__)\n elif hasattr(f, '__code__'):\n # a function\n return f.__code__.co_firstlineno\n else:\n # a class ?\n try:\n _, lineno = findsource(f)\n return lineno\n except: # noqa\n raise ValueError(\"Cannot get code information for function or class %r\" % f)",
"def _get_address_calculation(segment, index, file_name):\n\n if segment == \"constant\": # Temp starts at 5\n load_bytecode = [f\"@{index}\", \"D=A\"]\n\n elif segment == \"temp\":\n load_bytecode = [f\"@{int(index) + 5}\", \"D=A\"]\n\n elif segment == \"static\":\n variable_name = file_name + \".\" + index\n load_bytecode = [f\"@{variable_name}\", \"D=A\"]\n\n elif segment == \"pointer\":\n if index == \"0\":\n register = \"THIS\"\n else:\n register = \"THAT\"\n\n load_bytecode = [f\"@{register}\", \"D=A\"]\n\n else:\n load_bytecode = [f\"@{VirtualMachineLibrary._get_symbolic_symbol(segment)}\", \"D=M\", f\"@{index}\", \"D=D+A\"]\n\n full_address_bytecode = load_bytecode + [\"@R13\", \"M=D\"]\n return full_address_bytecode",
"def get_location(self):\n self.location = self.func(0)",
"def get_offset_address():\n return command(\"O\")",
"def getInstructionAt(self, address: ghidra.program.model.address.Address) -> ghidra.program.model.listing.Instruction:\n ...",
"def getaddr(self, space):\n return space.newint(rffi.cast(rffi.LONG, self.func.funcsym))",
"def get_offset(code, line, key):\n offset = 0\n while True:\n if key in code[line + offset].replace(\" \", \"\"):\n break\n offset += 1\n return offset",
"def GetCurrentOffset():\r\n return GetData().offsetCurrent",
"def get_linenumber():\n\n # inspect.stack()[0][2] returns line number in this function\n lineno = str(inspect.stack()[1][2])\n\n return lineno",
"def getFirstInstruction(self, function: ghidra.program.model.listing.Function) -> ghidra.program.model.listing.Instruction:\n ...",
"def run_code(code: List) -> Tuple[int, int]:\n executed_lines = set()\n\n prv_ptr, ins_ptr, acc = -1, 0, 0\n\n while True:\n if ins_ptr in executed_lines:\n break\n\n executed_lines.add(ins_ptr)\n\n cmd, args = code[ins_ptr]\n\n if cmd == \"acc\":\n acc += int(args)\n\n elif cmd == \"nop\":\n pass\n\n elif cmd == \"jmp\":\n prv_ptr = ins_ptr\n ins_ptr += int(args)\n continue\n\n prv_ptr = ins_ptr\n ins_ptr += 1\n\n else:\n # No loop detected\n return acc, -1\n\n return acc, ins_ptr",
"def get_func_code(func):\r\n source_file = None\r\n try:\r\n code = func.__code__\r\n source_file = code.co_filename\r\n if not os.path.exists(source_file):\r\n # Use inspect for lambda functions and functions defined in an\r\n # interactive shell, or in doctests\r\n source_code = ''.join(inspect.getsourcelines(func)[0])\r\n line_no = 1\r\n if source_file.startswith('<doctest '):\r\n source_file, line_no = re.match(\r\n '\\<doctest (.*\\.rst)\\[(.*)\\]\\>',\r\n source_file).groups()\r\n line_no = int(line_no)\r\n source_file = '<doctest %s>' % source_file\r\n return source_code, source_file, line_no\r\n # Try to retrieve the source code.\r\n with open(source_file) as source_file_obj:\r\n first_line = code.co_firstlineno\r\n # All the lines after the function definition:\r\n source_lines = list(islice(source_file_obj, first_line - 1, None))\r\n return ''.join(inspect.getblock(source_lines)), source_file, first_line\r\n except:\r\n # If the source code fails, we use the hash. This is fragile and\r\n # might change from one session to another.\r\n if hasattr(func, '__code__'):\r\n # Python 3.X\r\n return str(func.__code__.__hash__()), source_file, -1\r\n else:\r\n # Weird objects like numpy ufunc don't have __code__\r\n # This is fragile, as quite often the id of the object is\r\n # in the repr, so it might not persist across sessions,\r\n # however it will work for ufuncs.\r\n return repr(func), source_file, -1",
"def get_function_loc(self):\n return Gumtree.gumtree.getFunctionLoc()",
"def rts_code_ptr(runtime_addr, runtime_addr_high=None):\n return code_ptr(runtime_addr, runtime_addr_high, offset=1)",
"def fetch_execute(self):\n\n op_code = self.mem.read(self.reg.ip)\n self.reg.ip_inc()\n addr = self.mem.read(self.reg.ip)\n self.reg.ip_inc()\n\n # Execute the instruction on addr.\n self.op_codes[op_code.num](addr)",
"def extract_first_line(func_code):\r\n if func_code.startswith(FIRST_LINE_TEXT):\r\n func_code = func_code.split('\\n')\r\n first_line = int(func_code[0][len(FIRST_LINE_TEXT):])\r\n func_code = '\\n'.join(func_code[1:])\r\n else:\r\n first_line = -1\r\n return func_code, first_line",
"def Start_Code(self):\r\n if len(self.Code_Lines[len(self.Code_Lines) - 1]) == 2:\r\n if (self.Code_Lines[self.Code_Lines.__len__() - 1][0] == \"end\") and ((self.Code_Lines[len(self.Code_Lines) - 1][1]) in self.Functions_names):\r\n self.Registers.update({\"eip\": self.Functions_names[self.Code_Lines[len(self.Code_Lines) - 1][1]]})\r\n self.Registers.update({\"eip\": self.Registers[\"eip\"] + 1})\r\n self.Stack_segment.append(-1)\r\n while self.Registers[\"eip\"] < len(self.Code_segment):\r\n if self.Max_Memory < len(self.Memory_data_segment) + len(self.Stack_segment):\r\n self.State = \"ML\"\r\n return False\r\n if self.Max_Instructions < self.Instructions:\r\n self.State = \"TL\"\r\n return False\r\n self.Instructions += 1\r\n if self.Registers[\"eip\"] == -1:\r\n return True\r\n if (self.Code_segment[self.Registers[\"eip\"]] == \"\") and (self.Search_lable(self.Registers[\"eip\"]) == False):\r\n return False\r\n if self.Code_segment[self.Registers[\"eip\"]] == \"\":\r\n self.Registers.update({\"eip\": self.Registers[\"eip\"] + 1})\r\n self.Instructions -= 1\r\n continue\r\n elif self.Special_Names_no_Operands.__contains__(self.Code_segment[self.Registers[\"eip\"]]):\r\n if self.Code_segment[self.Registers[\"eip\"]] == \"exit\":\r\n return True\r\n elif self.Code_segment[self.Registers[\"eip\"]] == \"cbw\":\r\n a=self.Get_value_from_reg_X(\"al\")\r\n if bool(a & pow(2, (8) - 1)):\r\n self.Save_value_in_reg_X(\"ah\",pow(2, (8) - 1))\r\n else:\r\n self.Save_value_in_reg_X(\"ah\", 0)\r\n elif self.Code_segment[self.Registers[\"eip\"]] == \"cwd\":\r\n a = self.Get_value_from_reg_X(\"ax\")\r\n if bool(a & pow(2, (2*8) - 1)):\r\n self.Save_value_in_reg_X(\"dx\", pow(2, (2*8) - 1))\r\n else:\r\n self.Save_value_in_reg_X(\"dx\", 0)\r\n elif self.Code_segment[self.Registers[\"eip\"]] == \"cdq\":\r\n a = self.Registers[\"eax\"]\r\n if bool(a & pow(2, (4*8) - 1)):\r\n self.Registers[\"edx\"]=pow(2, (4*8) - 1)\r\n else:\r\n self.Registers[\"edx\"]=0\r\n elif self.Code_segment[self.Registers[\"eip\"]] == \"cld\":\r\n self.Flags.update({\"df\": 0})\r\n elif self.Code_segment[self.Registers[\"eip\"]] == \"std\":\r\n self.Flags.update({\"df\": 1})\r\n elif self.Code_segment[self.Registers[\"eip\"]] == \"stc\":\r\n self.Flags.update({\"cf\": 1})\r\n elif self.Code_segment[self.Registers[\"eip\"]] == \"clc\":\r\n self.Flags.update({\"cf\": 0})\r\n elif self.Code_segment[self.Registers[\"eip\"]] == \"ret\":\r\n if self.Use_Uses.__len__() != 0:\r\n\r\n reg_32 = {\"edi\": 0, \"esi\": 0, \"ebp\": 0, \"esp\": 0, \"ebx\": 0, \"edx\": 0, \"ecx\": 0,\"eax\": 0}\r\n\r\n i = self.Use_Uses.__len__() - 1\r\n while (i >= 0):\r\n if (len(self.Stack_segment) == 0) or (self.Registers[\"esp\"] < 0):\r\n self.State = \"RTE\"\r\n return False\r\n reg_32.update({self.Use_Uses[i]: self.Stack_segment[self.Registers[\"esp\"]]})\r\n self.Stack_segment = self.Stack_segment[:-1]\r\n self.Registers.update({\"esp\": self.Registers[\"esp\"] - 1})\r\n i -= 1\r\n\r\n for i in self.Use_Uses:\r\n if (i.__len__() == 3) and (i != 'eip'):\r\n self.Registers.update({i: reg_32[i]})\r\n\r\n self.Use_Uses=[]\r\n self.Registers.update({\"eip\": self.Stack_segment[self.Registers[\"esp\"]]})\r\n self.Stack_segment = self.Stack_segment[:-1]\r\n self.Registers.update({\"esp\": self.Registers[\"esp\"] - 1})\r\n continue\r\n elif self.Special_Names_one_Operands.__contains__(self.Code_segment[self.Registers[\"eip\"]][0]):\r\n if (self.Code_segment[self.Registers[\"eip\"]][0][0] == 'j') or (self.Code_segment[self.Registers[\"eip\"]][0][0] == 'l'):\r\n tmp = self.Jmp_X(self.Code_segment[self.Registers[\"eip\"]][0])\r\n if tmp:\r\n self.Registers.update({\"eip\": self.Labels_names[self.Code_segment[self.Registers[\"eip\"]][1]]})\r\n continue\r\n elif (self.Code_segment[self.Registers[\"eip\"]][0] == 'mul') or (self.Code_segment[self.Registers[\"eip\"]][0] == 'imul'):\r\n if not self.Mul_X(self.Code_segment[self.Registers[\"eip\"]][0],self.Code_segment[self.Registers[\"eip\"]][1]):\r\n return False\r\n elif (self.Code_segment[self.Registers[\"eip\"]][0] == 'div') or (self.Code_segment[self.Registers[\"eip\"]][0] == 'idiv'):\r\n if not self.Div_X(self.Code_segment[self.Registers[\"eip\"]][0],self.Code_segment[self.Registers[\"eip\"]][1]):\r\n return False\r\n elif (self.Code_segment[self.Registers[\"eip\"]][0] == 'neg') or (self.Code_segment[self.Registers[\"eip\"]][0] == 'inc') or (self.Code_segment[self.Registers[\"eip\"]][0] == 'dec'):\r\n if not self.Neg_inc_dec(self.Code_segment[self.Registers[\"eip\"]][0], self.Code_segment[self.Registers[\"eip\"]][1]):\r\n return False\r\n elif self.Code_segment[self.Registers[\"eip\"]][0] == 'call':\r\n if self.Functions_names.__contains__(self.Code_segment[self.Registers[\"eip\"]][1]):\r\n self.Stack_segment.append(self.Registers[\"eip\"] + 1)\r\n self.Registers.update({\"esp\": self.Registers[\"esp\"] + 1})\r\n self.Registers.update({\"eip\": self.Functions_names[self.Code_segment[self.Registers[\"eip\"]][1]]})\r\n else:\r\n if not self.Irvine32(self.Code_segment[self.Registers[\"eip\"]][1]):\r\n return False\r\n elif self.Special_Names_two_Operands.__contains__(self.Code_segment[self.Registers[\"eip\"]][0]):\r\n L1 = [\"add\", \"sub\", \"sbb\", \"acd\"]\r\n L2 = [\"test\", \"xor\", \"and\", \"or\"]\r\n L4 = [\"shl\", \"shr\", \"sal\", \"sar\", \"rol\", \"ror\", \"rcl\", \"rcr\"]\r\n if self.Code_segment[self.Registers[\"eip\"]][0][0] == 'm':\r\n if not self.Mov_X(self.Code_segment[self.Registers[\"eip\"]][0],self.Code_segment[self.Registers[\"eip\"]][1]):\r\n return False\r\n elif self.Code_segment[self.Registers[\"eip\"]][0][0] == 'c':\r\n if not self.Cmp(self.Code_segment[self.Registers[\"eip\"]][0],self.Code_segment[self.Registers[\"eip\"]][1]):\r\n return False\r\n elif self.Code_segment[self.Registers[\"eip\"]][0] == 'xchg':\r\n if not self.Xchg(self.Code_segment[self.Registers[\"eip\"]][0],self.Code_segment[self.Registers[\"eip\"]][1]):\r\n return False\r\n elif L1.__contains__(self.Code_segment[self.Registers[\"eip\"]][0]):\r\n if not self.Add_sub(self.Code_segment[self.Registers[\"eip\"]][0],self.Code_segment[self.Registers[\"eip\"]][1]):\r\n return False\r\n elif L2.__contains__(self.Code_segment[self.Registers[\"eip\"]][0]):\r\n if not self.Test(self.Code_segment[self.Registers[\"eip\"]][0],self.Code_segment[self.Registers[\"eip\"]][1]):\r\n return False\r\n elif L4.__contains__(self.Code_segment[self.Registers[\"eip\"]][0]):\r\n if not self.Shift(self.Code_segment[self.Registers[\"eip\"]][0],self.Code_segment[self.Registers[\"eip\"]][1]):\r\n return False\r\n elif self.Code_segment[self.Registers[\"eip\"]][0]=='uses':\r\n if self.Use_Uses.__len__()!=0:\r\n return False\r\n else:\r\n self.Use_Uses=self.Code_segment[self.Registers[\"eip\"]][1:]\r\n\r\n reg_32 = {\"eax\": 0, \"ecx\": 0, \"edx\": 0, \"ebx\": 0, \"esp\": 0, \"ebp\": 0, \"esi\": 0, \"edi\": 0}\r\n for i in self.Use_Uses:\r\n if (i.__len__() == 3) and (i != 'eip'):\r\n reg_32.update({i: self.Registers[i]})\r\n for i in self.Use_Uses:\r\n if (i.__len__() == 3) and (i != 'eip'):\r\n self.Stack_segment.append(reg_32[i])\r\n self.Registers.update({\"esp\": self.Registers[\"esp\"] + 1})\r\n\r\n\r\n self.Registers.update({\"eip\": self.Registers[\"eip\"] + 1})\r\n\r\n if (self.Registers[\"eip\"] < 0) or (self.Registers[\"eip\"] >= self.Code_segment.__len__()):\r\n self.State = \"RTE\"\r\n return False\r\n else:\r\n return False\r\n else:\r\n return False\r\n\r\n return True",
"def getStage(code):\n loc = code.find('x')\n if loc < 0: loc = 4\n if code == \"XXXX\": loc = 0\n return loc",
"def code(self):\n code = self._code\n if code is None:\n raise IOError('source code not available')\n return code",
"def code(self):\n code = self._code\n if code is None:\n raise IOError('source code not available')\n return code",
"def read_address(self, opcode: int) -> int:\n\n if self.insight:\n self.insight.address(opcode)\n\n return opcode & 0xFFF",
"def get_address(self, mode, offset):\n\t\taddress = None\n\t\tif mode == 0:\n\t\t\taddress = self.data[ self.pos + offset ]\n\t\telif mode == 1:\n\t\t\taddress = self.pos + offset\n\t\telif mode == 2:\n\t\t\taddress = self.rel_pos + self.data[ self.pos + offset ]\n\t\telse:\n\t\t\tprint(\"FAIL - wrong mode parameter\")\n\t\treturn address"
] | [
"0.6299851",
"0.6007909",
"0.60040885",
"0.6001951",
"0.5926025",
"0.59078115",
"0.5856952",
"0.5770866",
"0.57665575",
"0.57530594",
"0.57365364",
"0.5735565",
"0.5707904",
"0.5673848",
"0.56713",
"0.5665861",
"0.5659602",
"0.5657597",
"0.5647901",
"0.5603577",
"0.5597595",
"0.558757",
"0.5585015",
"0.5582053",
"0.5576437",
"0.55503577",
"0.55300725",
"0.55300725",
"0.55263305",
"0.55124974"
] | 0.7334392 | 0 |
Calculate a checksum for the given path. Will eventually use this to ensure config has changed before reloading. | def checksum(path):
with open(path, 'r') as f:
return md5(f.read()).digest() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_checksum(path: Union[Path, str]) -> str:\n path = Path(path)\n if not (path.is_file() or path.is_dir()):\n msg.fail(f\"Can't get checksum for {path}: not a file or directory\", exits=1)\n if path.is_file():\n return hashlib.md5(Path(path).read_bytes()).hexdigest()\n else:\n # TODO: this is currently pretty slow\n dir_checksum = hashlib.md5()\n for sub_file in sorted(fp for fp in path.rglob(\"*\") if fp.is_file()):\n dir_checksum.update(sub_file.read_bytes())\n return dir_checksum.hexdigest()",
"def compute(self):\n self.checksum = self.get_files_hashes_in_path()\n self.real_checksum = self.checksum\n # This appends the filename when checksum was made for a single file.\n # We need to get this when testing the consistency on the moment of\n # restore.\n if self.count == 1:\n self.checksum = self.real_checksum + os.path.basename(self.path)\n return self.checksum",
"def _calc_sha1(path):\n calc = hashlib.sha1()\n with open(path, 'r') as f:\n calc.update(f.read())\n return calc.hexdigest()",
"def checksum(self, filepath) -> str:\n if os.path.exists(filepath):\n hash_md5 = md5()\n with open(filepath, \"rb\") as f:\n for chunk in iter(lambda: f.read(4096), b\"\"):\n hash_md5.update(chunk)\n return urlsafe_b64encode(hash_md5.digest()).decode('utf-8')\n\n return \"\"",
"def checksum(self):\n hasher = md5()\n with self.open('rb') as fd:\n buf = fd.read(_BLOCKSIZE)\n while len(buf) > 0:\n # TODO Could cancel work here.\n hasher.update(buf)\n buf = fd.read(_BLOCKSIZE)\n digest = safetype(hasher.hexdigest())\n return digest",
"def path_checksum(paths):\n\n if not hasattr(paths, '__iter__'):\n raise TypeError('sequence or iterable expected not %r!' % type(paths))\n\n def _update_checksum(checksum, dirname, filenames):\n for filename in sorted(filenames):\n path = path_join(dirname, filename)\n if isfile(path):\n fh = open(path, 'rb')\n while 1:\n buf = fh.read(4096)\n if not buf : break\n checksum.update(buf)\n fh.close()\n\n chksum = sha1()\n\n for path in sorted([normpath(f) for f in paths]):\n if path_exists(path):\n if isdir(path):\n walk(path, _update_checksum, chksum)\n elif isfile(path):\n _update_checksum(chksum, dirname(path), basename(path))\n\n return chksum.hexdigest()",
"def file_checksum(file_path, block_size=65536):\n path = Path(file_path)\n h = xxhash.xxh64()\n with path.open(\"rb\") as f:\n for chunk in iter(lambda: f.read(block_size), b\"\"):\n h.update(chunk)\n return h.hexdigest()",
"def md5_checksum(file_path):\n with open(file_path, 'rb') as fh:\n m = hashlib.md5()\n while True:\n data = fh.read(8192)\n if not data:\n break\n m.update(data)\n return m.hexdigest()",
"def md5_sum_file(path):\n with open(path, 'rb') as f:\n m = hashlib.md5()\n while True:\n data = f.read(8192)\n if not data:\n break\n m.update(data)\n return m.hexdigest()",
"def get_file_checksum(file_path):\n with open(file_path) as f:\n content = f.read()\n return md5(content.encode()).hexdigest()",
"def checksum(self):\n def stat_string(path):\n stat = os.stat(path)\n return '%s,%s' % (str(stat.st_size), str(stat.st_mtime))\n\n return dict((path, stat_string(path))\n for path in self.crawl()\n if os.path.exists(path))",
"def compute_digest(path):\n hash = hashlib.sha512()\n for part in DiskCrawler.partial_reader(path, 4 * 1024 * 1024):\n hash.update(part)\n return hash.digest()",
"def _get_checksum(self, text):\n # Compute the new checksum over everything but the sha1sum line.\n # This will fail if sha1sum appears for some other reason. It won't ;-)\n text = \"\".join([line for line in text.splitlines(True) if \"sha1sum\" not in line])\n return utils.str_checksum(text)",
"def checksum_of(filepath):\n bfsz = 10240000 # 10 MB buffer\n sum = hashlib.sha256()\n with open(filepath) as fd:\n while True:\n buf = fd.read(bfsz)\n if not buf: break\n sum.update(buf)\n return sum.hexdigest()",
"def file_digest(path, algo=hashlib.md5):\n checksum = algo()\n with open(path, 'rb') as f:\n for chunk in iter(lambda: f.read(4096), b\"\"):\n checksum.update(chunk)\n return checksum.hexdigest()",
"def get_checksum(file_path: str) -> str:\n\n # Open the file in binary mode\n with open(file_path, \"rb\") as file:\n # Create a SHA-256 hash object\n hash_object = hashlib.sha256()\n\n # Iterate over the file in chunks\n for chunk in iter(lambda: file.read(4096), b\"\"):\n # Feed the chunk to the hash object\n hash_object.update(chunk)\n\n # Obtain the checksum in hexadecimal format\n checksum = hash_object.hexdigest()\n\n return checksum",
"def calc_file_md5(file_path):\n hash_md5 = str()\n method = hashlib.md5()\n if not os.path.exists(file_path):\n logger.error(\"File(%s) don not exist, can not calculation file hash\" % file_path)\n return hash_md5\n\n with open(file_path, 'rb') as f:\n for chunk in read_chunks(f, 1024 * 1024):\n method.update(chunk)\n return method.hexdigest()",
"def checksum(self):\n checksums = {\n \"slug\": hashlib.sha256(\n self.slug.encode(\"utf-8\")\n ).hexdigest(),\n \"files\": {},\n }\n\n def file_hash(filepath):\n running_hash = hashlib.sha256()\n with open(filepath, \"rb\") as IN:\n while True:\n # Read file in as little chunks.\n buf = IN.read(4096)\n if not buf:\n break\n running_hash.update(buf)\n return running_hash.hexdigest()\n\n # iterate over the direcory and calucalte the hash\n for root, dirs, files in os.walk(self.thawed_dir):\n for file_path in sorted(files):\n full_path = str(Path(root) / file_path)\n # Calculate a relative path to the freezable object\n rel_path = full_path.replace(str(self.thawed_dir) + \"/\", \"\")\n # calculate and store the checksums\n phash = file_hash(full_path)\n filesize = os.path.getsize(full_path)\n checksums[\"files\"][rel_path] = {\n \"checksum\": phash,\n \"size\": filesize,\n }\n # calculate the total\n total = hashlib.sha256(checksums[\"slug\"].encode(\"utf-8\"))\n # Iterate over filenames AND hashes and update checksum\n for filename, data in checksums[\"files\"].items():\n total.update(filename.encode(\"utf-8\"))\n total.update(data[\"checksum\"].encode(\"utf-8\"))\n checksums[\"total\"] = total.hexdigest()\n return checksums",
"def checksum(self) -> str:\n return self.workspace.get_checksum(self)",
"def checksum(self):\r\n return self._checksum",
"def checksumFile(filename):\n return md5File(filename)",
"def _hash_file_content(self, path):\n hasher = hashlib.sha1()\n with open(path, 'rb') as file:\n buffer = file.read(self.hash_block_size)\n while len(buffer) > 0:\n hasher.update(buffer)\n buffer = file.read(self.hash_block_size)\n return hasher.hexdigest()",
"def compute_checksum(filename):\n cmd = 'md5sum ' + filename\n return pipe(cmd)",
"def fsum(fpath):\n import hashlib\n import codecs\n with codecs.open(fpath, \"r\", \"utf-8\") as filep:\n buff = filep.read()\n cksum = hashlib.md5(buff.encode(\"utf-8\"))\n return cksum.hexdigest()",
"def _get_hash(self, path):\n with open(path, \"r\") as fp:\n content = fp.read()\n\n return sha256(content).hexdigest()",
"def checksum(self):\n\n return self.__checksum",
"def get_hash(path: Path) -> str:\n m = hashlib.sha256()\n m.update(path.read_bytes())\n return m.hexdigest()",
"def get_checksum(input_fname):\n with open(input_fname, \"rb\") as infile:\n file_contents = infile.read()\n\n checksum = hashlib.md5(file_contents).hexdigest()\n return checksum",
"def calculate_md5sum_of_a_file(context, file_name, file_path):\n command = \"md5sum \" + file_path + \"/\" + file_name + \" | awk {'print $1'}\"\n return context.cme_session.send_ssh_command(command=command)",
"def checksum(self) -> str:\n return self._checksum"
] | [
"0.7271267",
"0.6893249",
"0.67964584",
"0.6699954",
"0.65897936",
"0.6586321",
"0.6519239",
"0.65081114",
"0.6485526",
"0.6472071",
"0.646679",
"0.6462213",
"0.64450413",
"0.64431435",
"0.63804424",
"0.6329311",
"0.6290257",
"0.62881494",
"0.6251396",
"0.6229306",
"0.6219351",
"0.6216641",
"0.62120277",
"0.61829376",
"0.6161841",
"0.6145024",
"0.6141205",
"0.61404085",
"0.61185324",
"0.6115524"
] | 0.7938589 | 0 |
Set up inotify if requested. | def _setup_inotify(self, flag):
i = None
if flag:
try:
import inotify.adapters
except ImportError:
raise AssertionError(
'cannot use inotify, package not installed')
else:
i = inotify.adapters.Inotify(paths=[self.watch],
block_duration_s=0)
return (flag, i) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def start(self):\n self._class_setup()\n\n self._inotify_fd = InotifyFileWatcher._libc.inotify_init()\n if self._inotify_fd < 0:\n error = OSError('failed call to inotify_init')\n error.errno = ctypes.get_errno()\n error.strerror = errno.errorcode[ctypes.get_errno()]\n raise error\n self._inotify_poll = select.poll()\n self._inotify_poll.register(self._inotify_fd, select.POLLIN)\n self._add_watch_for_path(self._directory)",
"def SetupFileWatcher(filename, cb):\n wm = pyinotify.WatchManager()\n handler = FileEventHandler(wm, filename, cb)\n asyncnotifier.AsyncNotifier(wm, default_proc_fun=handler)",
"def inotify_init(flags=0, closefd=CLOEXEC_DEFAULT):\n assert isinstance(flags, int), 'Flags must be an integer'\n\n if closefd:\n flags |= IN_CLOEXEC\n\n fd = lib.inotify_init1(flags)\n \n if fd < 0:\n err = ffi.errno\n if err == errno.EINVAL:\n raise ValueError(\"Invalid argument or flag\")\n elif err == errno.EMFILE:\n raise OSError(\"Maximum inotify instances reached\")\n elif err == errno.ENFILE:\n raise OSError(\"File descriptor limit hit\")\n elif err == errno.ENOMEM:\n raise MemoryError(\"Insufficent kernel memory avalible\")\n else:\n # If you are here, its a bug. send us the traceback\n raise UnknownError(err)\n\n return fd",
"def watch(self):\n wm = pyinotify.WatchManager()\n self.notifier = pyinotify.Notifier(wm, default_proc_fun=self.callback)\n wm.add_watch(self.directory, pyinotify.ALL_EVENTS)\n try:\n self.notifier.loop()\n except (KeyboardInterrupt, AttributeError):\n print_notification(\"Stopping\")\n finally:\n self.notifier.stop()\n self.terminate_processes()",
"def test_inotify(self):\n self.fail(\"write a test\")",
"def _setup(self):\n # Look for ini file\n if not os.path.isfile(self.ini_file):\n self._fail('Cannot find ini file')\n\n self._setup_logging()\n\n # Import debexpo root directory\n sys.path.append(os.path.dirname(self.ini_file))\n\n # Initialize Pylons app\n conf = appconfig('config:' + self.ini_file)\n pylons.config = load_environment(conf.global_conf, conf.local_conf)\n\n # Change into the incoming directory\n incoming_dir = pylons.config['debexpo.upload.incoming']\n logging.info(\"Changing dir to %s\", incoming_dir)\n os.chdir(incoming_dir)\n\n # Look for the changes file\n if not os.path.isfile(self.changes_file):\n self._fail('Cannot find changes file')",
"def process_default(self, event):\n if event.name == self._filename:\n logging.debug(\"Received inotify event %s\", event)\n self._cb()",
"async def watchForFileSystemEvents(self):\n\n # Things that can throw this off:\n #\n # * Moving a watched directory out of the watch tree (will still\n # generate events even when outside of directory tree)\n #\n # * Doing two changes on a directory or something before the program\n # has a time to handle it (this will also throw off a lot of inotify\n # code, though)\n #\n # * Moving a watched directory within a watched directory will get the\n # wrong path. This needs to use the cookie system to link events\n # together and complete the move properly, which can still make some\n # events get the wrong path if you get file events during the move or\n # something silly like that, since MOVED_FROM and MOVED_TO aren't\n # guaranteed to be contiguous. That exercise is left up to the\n # reader.\n #\n # * Trying to watch a path that doesn't exist won't automatically\n # create it or anything of the sort.\n #\n # * Deleting and recreating or moving the watched directory won't do\n # anything special, but it probably should.\n #\n async for event in self.inotify:\n\n if not self.continueWatchingFS :\n return\n\n # If this is a creation event, add a watch for the new path (and its\n # subdirectories if any)\n #\n if Mask.CREATE in event.mask and event.path is not None :\n await self.watchAPath(event.path)\n\n if Mask.DELETE_SELF in event.mask and event.path is not None :\n await self.unWatchAPath(event.path, event.watch)\n\n # If there are some bits in the cpMask in the event.mask yield this\n # event\n #\n if event.mask & self.cpMask:\n yield event\n else:\n # Note that these events are needed for cleanup purposes.\n # We'll always get IGNORED events so the watch can be removed\n # from the inotify. We don't need to do anything with the\n # events, but they do need to be generated for cleanup.\n # We don't need to pass IGNORED events up, because the end-user\n # doesn't have the inotify instance anyway, and IGNORED is just\n # used for management purposes.\n #\n self.logger.debug(f'UNYIELDED EVENT: {event}')",
"def start(self):\n print('start watching {}'.format(self.conf_directory))\n self.conf_observer.start()",
"def _setup_watch(self, watch):\n assert not isfile(watch), 'watch dir is a file'\n\n if pathexists(watch):\n return watch\n\n os.makedirs(watch)\n\n if self.chown:\n try:\n os.chown(watch, *self.chown)\n\n except OSError:\n pass # Non-fatal\n\n if self.chmod:\n try:\n os.chmod(watch, self.chmod)\n\n except OSError:\n pass # Non-fatal\n\n return watch",
"def _initialize_watcher(self):\n self._watcher = Watcher(\n Path(self._git.git_dir) / \"refs\", _watcher_cls=_GitRefWatcher\n )\n\n def _schedule(change=None):\n IOLoop.current().add_callback(self._on_ref_change, change)\n\n self._watcher.observe(_schedule, \"changes\")\n\n _schedule()",
"def setup():\n\tglobal config_parser, config_file\n\tglobal prefix\n\n\tif os.path.islink(sys.argv[0]):\n\t\tlink = os.readlink(sys.argv[0])\n\n\t\tif not os.path.isabs(link):\n\t\t\tlink = os.path.join(os.path.dirname(sys.argv[0]), link)\n\n\t\tprefix = os.path.dirname(os.path.abspath(link))\n\telse:\n\t\tprefix = os.path.dirname(os.path.abspath(sys.argv[0]))\n\n\tconfig_parser = ConfigParser.ConfigParser()\n\tset_defaults()\n\n\tconfig_file = os.path.join (xdg_config_home, \"sushi\", \"nigiri\")\n\n\tif not check_config_file(config_file):\n\t\tprint \"Config file creation failed. Aborting.\"\n\t\treturn\n\n\tread_config_file()",
"def start(self):\n self._pref_decls = {}\n def_path = os.path.join(MODULE_PATH, 'default.ini')\n def_pref_path = os.path.join(MODULE_PATH, 'default_prefs.ini')\n self._prefs = ConfigObj()\n if os.path.isfile(def_path):\n defaults = ConfigObj(def_path)\n self.default_folder = defaults['folder']\n self.default_file = defaults['file']\n pref_path = os.path.join(defaults['folder'], defaults['file'])\n if os.path.isfile(pref_path):\n self._prefs.merge(ConfigObj(pref_path))\n self._prefs.filename = pref_path\n elif os.path.isfile(def_pref_path):\n defaults = ConfigObj(def_pref_path)\n self._prefs.merge(defaults)\n\n elif os.path.isfile(def_pref_path):\n defaults = ConfigObj(def_pref_path)\n self._prefs.merge(defaults)\n\n self._refresh_pref_decls()\n self._bind_observers()",
"def setup(self):\n # Set bashrc file\n self._bashrc()\n\n # Return if not running script as root user\n if self.running_as_root is False:\n return\n\n # Return if user prompted doesn't exist\n if self.infoset_user_exists is False:\n return\n\n # Set file permissions\n self._file_permissions()\n\n # Setup systemd\n self._systemd()",
"def init_managers(endpoints_file: Optional[Text]) -> None:",
"async def setup(self) -> None:\n if self.args.sync:\n self._processing = threading.Thread(target=self.start_events_sync)\n self._processing.daemon = True\n self._processing.start()\n else:\n self._processing = asyncio.ensure_future(self.start_events_async())",
"def listen_folders(config):\n i = inotify.adapters.Inotify()\n\n new_config = {}\n for watch_path in config:\n i.add_watch(watch_path)\n # Se é recursivo, adiciona watch a cada subdiretoria\n if config[watch_path][0]:\n subdirs = [x[0] for x in os.walk(watch_path)]\n for dir_path in subdirs:\n if dir_path not in config and dir_path not in new_config:\n i.add_watch(dir_path)\n entry = {dir_path: config[watch_path]}\n new_config = concat_config(new_config, entry)\n config = concat_config(config, new_config)\n\n try:\n for event in i.event_gen(yield_nones=False):\n (_, type_names, watch_path, filename) = event\n if 'IN_CREATE' in type_names and os.path.isdir(watch_path + '/' + filename) \\\n and config[watch_path][0]:\n dir_path = watch_path + '/' + filename\n if dir_path not in config:\n i.add_watch(dir_path)\n entry = {dir_path: config[watch_path]}\n config = concat_config(config, entry)\n # Obter todos os nomes de ficheiros sobre os quais atuar\n filename_matches = []\n for name_regex in config[watch_path].keys():\n if name_regex != 0 and re.search(name_regex, filename):\n filename_matches.append(name_regex)\n # Executar ações\n for name_regex in filename_matches:\n types = intersection(type_names, config[watch_path][name_regex].keys())\n for type_name in types:\n exec_actions(config[watch_path][name_regex][type_name], watch_path, filename)\n\n except KeyboardInterrupt:\n for watch_path in config:\n i.remove_watch(watch_path)\n print('FINISHED')",
"def setup_hooks(self):\n pass",
"def setup(self):\n # TODO : figure out how to make the map interface a singleton class\n\n if not hasattr(self, 'mapInterface'):\n self.mapInterface = MapInterface(settings['FILE_CONFIG']['filename'])",
"def Init(self):\n # First iteration over all the files in root searching for symlinks and\n # non-regular files.\n seen_inodes = {}\n for basepath, _, filenames in sorted(os.walk(self._root)):\n for filename in sorted(filenames):\n full_path = os.path.join(basepath, filename)\n rel_path = full_path[len(self._root):]\n st = os.lstat(full_path)\n\n file_data = {\n 'size': st.st_size,\n }\n self._files[rel_path] = file_data\n\n # Track symlinks.\n if stat.S_ISLNK(st.st_mode):\n link_path = os.readlink(full_path)\n # lddtree's normpath handles a little more cases than the os.path\n # version. In particular, it handles the '//' case.\n self._symlinks[rel_path] = (\n link_path.lstrip('/') if link_path and link_path[0] == '/' else\n lddtree.normpath(os.path.join(os.path.dirname(rel_path),\n link_path)))\n file_data['deps'] = {\n 'symlink': [self._symlinks[rel_path]]\n }\n\n # Track hardlinks.\n if st.st_ino in seen_inodes:\n self._hardlinks[rel_path] = seen_inodes[st.st_ino]\n continue\n seen_inodes[st.st_ino] = rel_path",
"def connect_inotify(loop=None):\n if not loop:\n loop = asyncio.get_event_loop()\n _, protocol = yield from _connect_inotify(loop)\n return protocol",
"async def setup(self):\n\t\tlogging.config.dictConfig(self.log_settings['log'])\n\t\tself.logger = logging.getLogger('Responder3')\n\t\tself.create_dir_strucutre()\n\n\t\tif 'handlers' in self.log_settings:\n\t\t\tasync for handlerclass, handler in self.get_handlers():\n\t\t\t\tawait self.start_extension(handlerclass, self.log_settings[self.log_settings['handlers'][handler]])",
"def _setup_watchers(self, folders):\n \n return {'observer': Observer()}",
"def __init__(self):\n super().__init__()\n\n etc_conf_names = ('app.conf', 'app.local.conf')\n conf_paths = [os.path.join(APP_DIR, 'etc', c) for c in etc_conf_names]\n\n user_config_path = os.path.join(\n os.path.expanduser('~'),\n '.config',\n 'url_manager.conf'\n )\n conf_paths.append(user_config_path)\n\n self.read(conf_paths)\n self.set('DEFAULT', 'app_dir', APP_DIR)",
"def setUp(self):\n self.setUpPyfakefs()\n self.fake_os = fake_filesystem.FakeOsModule(self.fs)\n\n populate_fakefs(self)",
"def _setup_applications(self):\n if 'host_nfs_path' in self.config['settings'] and 'guest_nfs_path' in self.config['settings']:\n self.settings['nfs'] = NFSSettings(host_vm_nfs_path=self.config['settings']['host_nfs_path'],\n guest_vm_nfs_path=self.config['settings']['guest_nfs_path'])\n\n self._setup_printer()",
"def setup(self):\n\n self.select_file.on_change(\"value\", self.callback_select_file)",
"def __init__(self, notify_filter = 'UnionChange', **kwargs):\n FileMonitor.__init__(self, notify_filter, **kwargs)\n valid_notify_filter = (\n 'FileNameChange',\n 'DirNameChange',\n 'LastWriteChange',\n 'UnionChange'\n )\n if self._notify_filter not in valid_notify_filter:\n raise FileMonitorError(\n 'Watcher installation error.'\n 'The notify_filter value cannot be: \"{}\".'.\n format(self._notify_filter)\n )\n try:\n self._directory = win32file.CreateFile(\n self._kwargs['Path'],\n winnt.FILE_LIST_DIRECTORY,\n win32con.FILE_SHARE_READ |\n win32con.FILE_SHARE_WRITE,\n None,\n win32con.OPEN_EXISTING,\n win32con.FILE_FLAG_BACKUP_SEMANTICS |\n win32con.FILE_FLAG_OVERLAPPED,\n None\n )\n except pywintypes.error as err:\n raise FileMonitorError(\n 'Failed to open directory. Error code: {}.'\n .format(err.winerror)\n ) from err\n self._overlapped = pywintypes.OVERLAPPED()\n self._overlapped.hEvent = win32event.CreateEvent(\n None,\n False,\n False,\n None\n )\n if not self._overlapped.hEvent:\n raise FileMonitorError(\n 'Failed to create event. Error code: {}.'\n .format(self._overlapped.hEvent)\n )\n self._buffer = win32file.AllocateReadBuffer(1024)\n self._num_bytes_returned = 0\n self._set_watcher()",
"def setup():\n # Create the Dallinger config file if it does not already exist.\n config_name = \".dallingerconfig\"\n config_path = os.path.join(os.path.expanduser(\"~\"), config_name)\n\n if os.path.isfile(config_path):\n log(\"Dallinger config file already exists.\", chevrons=False)\n\n else:\n log(\"Creating Dallinger config file at ~/.dallingerconfig...\", chevrons=False)\n src = os.path.join(\n os.path.dirname(os.path.realpath(__file__)),\n \"..\",\n \"default_configs\",\n config_name,\n )\n shutil.copyfile(src, config_path)",
"def __init__(self, notify_obj='File', notify_filter='Operation', **kwargs):\n FileMonitor.__init__(self, notify_filter, **kwargs)\n valid_notify_filter = (\n 'Operation',\n 'Creation',\n 'Deletion',\n 'Modification'\n )\n if self._notify_filter not in valid_notify_filter:\n raise FileMonitorError(\n 'Watcher installation error.'\n 'The notify_filter value cannot be: \"{}\".'.\n format(self._notify_filter)\n )\n valid_notify_obj = ('File', 'Directory')\n if notify_obj not in valid_notify_obj:\n raise FileMonitorError(\n 'Watcher installation error.'\n 'The notify_obj value cannot be: \"{}\".'.\n format(notify_obj)\n )\n wmi_obj = wmi.WMI(namespace='root/CIMv2')\n if notify_obj == 'File':\n try:\n self._watcher = wmi_obj.CIM_DataFile.watch_for(\n self._notify_filter,\n **kwargs\n )\n except wmi.x_wmi as err:\n raise FileMonitorError(\n 'Watcher installation error. Error code: {}.'.\n format(err.com_error.hresult or 'unknown')\n ) from err\n elif notify_obj == 'Directory':\n try:\n self._watcher = wmi_obj.CIM_Directory.watch_for(\n self._notify_filter,\n **kwargs\n )\n except wmi.x_wmi as err:\n raise FileMonitorError(\n 'Watcher installation error. Error code: {}.'.\n format(err.com_error.hresult or 'unknown')\n ) from err\n else:\n raise FileMonitorError('Watcher installation error.')"
] | [
"0.68859357",
"0.6521524",
"0.5825635",
"0.5661374",
"0.56051093",
"0.5601565",
"0.55341774",
"0.5521033",
"0.54301476",
"0.53801805",
"0.53308344",
"0.521609",
"0.5213829",
"0.5188312",
"0.51532716",
"0.5120239",
"0.51025105",
"0.5082971",
"0.507481",
"0.50697386",
"0.5063522",
"0.505565",
"0.5052569",
"0.50293666",
"0.5001923",
"0.4991477",
"0.4971326",
"0.49547386",
"0.49344993",
"0.4920117"
] | 0.7264749 | 0 |
Create watch directory if it does not exist. | def _setup_watch(self, watch):
assert not isfile(watch), 'watch dir is a file'
if pathexists(watch):
return watch
os.makedirs(watch)
if self.chown:
try:
os.chown(watch, *self.chown)
except OSError:
pass # Non-fatal
if self.chmod:
try:
os.chmod(watch, self.chmod)
except OSError:
pass # Non-fatal
return watch | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_dir(self):\n\n if not os.path.isdir(self.directory):\n os.mkdir(self.directory, 755)\n if not os.path.exists(self.log_file):\n from pathlib import Path\n Path(self.log_file).touch()",
"def start_check():\n if not os.path.exists(outfancy_temp_files):\n os.mkdir(outfancy_temp_files)\n if not os.path.exists(outfancy_temp_files + log_file):\n os.system('touch ' + outfancy_temp_files + log_file)",
"def newdir(self, path, watch=True):\n assert False\n if watch:\n wm.add_watch(path, mask, rec=True)\n # the following may result in double-processed files but prevents a different race condition\n for d,sds,fns in os.walk(path):\n for fn in fns:\n self.newfile(os.path.join(d,fn))",
"def create_working_directory(self):\n os.makedirs(self.working_directory, exist_ok=True)",
"def create_dirs(self):\n for new_directory in [self.event_dir, self.event_dir / 'videos']:\n new_directory.mkdir(exist_ok=self.overwrite)\n logger.debug('Dir {} created', new_directory)",
"def _check_or_create_dir(directory):\n if not tf.gfile.Exists(directory):\n tf.gfile.MakeDirs(directory)",
"def dirChecking(dir):\n if not os.path.exists(dir):\n os.mkdir(dir)",
"def checkDir(directory):\n ## test if directory is there\n if not os.path.exists(directory):\n os.mkdir(directory)\n sys.out = open(directory + '/' + str(time.time()) + '.log', 'w')\n print(\"Making new directory: \" + directory + \"\\n\")\n else:\n sys.out = open(directory + '/' + str(time.time()) + '.log', 'w')\n print(\"Found directory: \" + directory + \"\\n\")",
"def _check_dirs(self):\r\n for dir in [self.papers_dir,\r\n self.buffer_dir]:\r\n if not os.path.exists(dir):\r\n message = f'Dir not exists: {dir}. Making it.'\r\n logging.warning(message)\r\n os.mkdir(dir)",
"def ensure_working_dir(self):\n # @todo: Log the error/exception if there is one\n # Try to make the directory(s) \n \n path = self.config.working_dir\n \n try:\n os.makedirs(path)\n except OSError as exc: # Python >2.5\n if exc.errno == errno.EEXIST and os.path.isdir(path):\n pass\n else:\n raise",
"def CreateTrackerDirIfNeeded():\n tracker_dir = config.get(\n 'GSUtil', 'resumable_tracker_dir',\n os.path.join(GetGsutilStateDir(), 'tracker-files'))\n CreateDirIfNeeded(tracker_dir)\n return tracker_dir",
"def make_experiment_directory(path='',config=None,default_dir='_runs'):\n directory = path\n if not path:\n timestamp = datetime.now().strftime('%Y-%m-%dT%H-%M-%S-%f')\n directory = os.path.join(default_dir,timestamp)\n directory = os.path.abspath(directory) \n if os.path.isdir(directory) and not config.override and not config.cloud:\n raise ValueError(\n 'directory already exists, use --override option: %s'\n % directory)\n elif os.path.isdir(directory) and not config.cloud: \n rmtree(directory)\n if not config.cloud: \n os.makedirs(directory)\n if config:\n config.wdir = directory \n return directory",
"def should_watch_dir(self, entry):\n return True",
"def create_cache_dir(self) -> None:\n try:\n os.makedirs(self.cache_folder)\n except FileExistsError:\n pass",
"def make_dir(self):\n if not os.path.exists(self.d):\n try:\n os.mkdir(self.d)\n except OSError, e:\n if e.errno != 17:\n raise\n pass",
"def _ensure_directory(self, dirname):\n if not os.path.exists(dirname):\n os.makedirs(dirname)",
"def mkdir_if_not_exists(self, allow_dirty_run : bool = False):\n try:\n self.path.mkdir(parents=True, exist_ok=False)\n _already_exists = False\n except FileExistsError:\n _already_exists = True\n\n if _already_exists and not allow_dirty_run:\n error(\n f\"Daemon '{self.daemon_id}' already exists. \" +\n f\"To allow this daemon to run, do one of the following:\\n\"\n + \" - Execute `daemon.cleanup()`.\\n\"\n + f\" - Delete the directory '{self.path}'.\\n\"\n + \" - Pass `allow_dirty_run=True` to `daemon.run()`.\\n\",\n FileExistsError,\n )",
"def _makeDir(self):\n try:\n os.mkdir(self.dir)\n # log('created directory: %s\\n' % self.dir)\n except OSError, err:\n if err.errno != errno.EEXIST:\n raise",
"def _check_app_dir():\n if not os.path.exists(os.path.expanduser('~/.config/scheduler')):\n os.mkdir(os.path.expanduser('~/.config/scheduler'))",
"def make_workdir():\n workdir = tempfile.mkdtemp(prefix='cerise_runner_')\n _workdirs.append(workdir)\n return workdir",
"def create_dir(working_dir):\n if not os.path.exists(working_dir):\n os.makedirs(working_dir)",
"def mkdir_if_missing(d):\n if not os.path.exists(d):\n os.makedirs(d)",
"def ifnotexistmkdir(directory):\n if not os.path.exists(directory):\n os.mkdir(directory)\n return Path(directory)",
"def _make_new_directory(self, *, file_path: Path, need_init: bool):\r\n\r\n if Path(file_path).exists():\r\n raise FileExistsError(f\"The directory at {file_path} already exists.\")\r\n\r\n Path(file_path).mkdir(parents=True)\r\n\r\n if need_init:\r\n\r\n Path(file_path / \"__init__.py\").touch()\r\n\r\n self._logger.info(\"The directory %s has been created.\", file_path)",
"def fresh_directory():\n os.chdir(tempfile.mkdtemp())",
"def directory_setup(self):\n if not os.path.exists(self.settings_dir):\n os.makedirs(self.settings_dir)\n\n if not os.path.exists(self.sync_dir):\n os.makedirs(self.sync_dir)",
"def check_dir(dir):\n if not os.path.exists(dir):\n print(\"[+] Creating directory for target..\")\n os.makedirs(dir)",
"def safe_mkdir(path):\n # avoid race condition\n while True:\n try:\n if os.path.isdir(path):\n return\n os.makedirs(path)\n break\n except FileExistsError:\n sleep(0.1)",
"def _check_path(path):\n os.system(\"if [ ! -d \" + path + \" ]; then mkdir -p \" + path + \"; fi\")",
"def create_file_directory():\n\n # Verify if directory exist.\n # If yes, delete it and every thing inside and create it again.\n # If not, just create it.\n\n if os.path.isdir('./file'):\n\n shutil.rmtree('./file')\n\n os.mkdir('./file')"
] | [
"0.6459634",
"0.6346766",
"0.6331183",
"0.6299552",
"0.6197254",
"0.6110146",
"0.6098661",
"0.6089687",
"0.6088672",
"0.60865545",
"0.6084467",
"0.6080845",
"0.6065921",
"0.6042916",
"0.603886",
"0.59679985",
"0.5966238",
"0.5957223",
"0.59505254",
"0.5901048",
"0.5883695",
"0.58800656",
"0.5873469",
"0.5868089",
"0.5819347",
"0.58162314",
"0.58122104",
"0.5798983",
"0.5789843",
"0.5787266"
] | 0.77630746 | 0 |
Reload configuration. If reload command is given, run that, otherwise, signal process with HUP. | def reload_command(self):
if self.reload is None:
if not self.check_command():
LOGGER.info('Command dead, restarting...')
self.start_command(wait_for_config=False)
else:
LOGGER.info('Sending HUP signal...')
self.process.send_signal(signal.SIGHUP)
else:
LOGGER.info('Executing reload command...')
subprocess.call(shlex.split(self.reload)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def reload_config():\n subprocess.run([SUPERVISOR_CMD, \"reload\"])",
"def nginx_reload():\n log('reload nginx', yellow)\n sudo('/etc/init.d/nginx reload')",
"def reload_config(self):\n pass",
"def command_reload(interface,command,args):\n command_unload(interface,command,args)\n command_load(interface,command,args)",
"def Reload(what):\n \n if what == \"commands\":\n print \"Reloading commands...\"\n main_script.Reload()\n print \"Success!\"\n\n elif what == \"triggers\": \n print \"NOTICE: Currently,only commands can be reloaded.\"\n\n else:\n print \"Error: %s is an invalid option!\"%what",
"def reload_gunicorn():\n puts(yellow(\"Reload gunicorn graceful\"))\n sudo('kill -HUP `cat %s`' % (env.gunicorn_pidpath), user=env.app_user)",
"def graceful_reload(signum, traceback):\n court.close()\n signal.signal(signal.SIGHUP, graceful_reload)",
"def reload_configurations(self) -> None:\n ...",
"def refresh():\n\n if _TRAFFICCTL:\n cmd = _traffic_ctl(\"config\", \"reload\")\n else:\n cmd = _traffic_line(\"-x\")\n\n return _subprocess(cmd)",
"def handleReload(self, confInfo=None):",
"def reload(bot, event, *args):\n bot.config.load()\n bot.memory.load()",
"def reload(self):\n self.read(self._cfg_path)",
"def restart_nginx():\n run_command_on_selected_server(_restart_nginx)",
"def handle_adminreloadconfig(bot, event):\n try:\n bot.cfg.reload()\n getmainconfig().reload()\n except Exception, ex: handle_exception()\n event.done()",
"def reload(bot, event, *args):\n\n yield from bot.coro_send_message(event.conv, \"<b>reloading config.json</b>\")\n bot.config.load()\n\n yield from bot.coro_send_message(event.conv, \"<b>reloading memory.json</b>\")\n bot.memory.load()",
"def reload(self):\n puts('Reloading application...')\n local('touch ../reload.txt')",
"def comando_reload(self):\r\n\tif args.opcao == 'gne':\r\n configs = self.reload_gne_framework(args.file, args.loja, args.serie, args.nnf)\r\n return configs\r\n else:\r\n configs = self.reload_daruma_framework(args.file)\r\n return configs",
"def reload_process(self):\n try:\n output = subprocess.check_output([\"pidof\", \"haproxy\"])\n pids = output.strip().split(\" \")\n except Exception as exc:\n command = \"/usr/sbin/haproxy -f {{ dest }} -p /var/run/haproxy.pid\"\n else:\n command = \"/usr/sbin/haproxy -f {{ dest }} -p /var/run/haproxy.pid -sf xyz\"\n command = command.replace(\"xyz\", \" \".join(pids))\n\n command = command.replace(\"{{ dest }}\", self.dest)\n log.debug(\"Running reload_cmd: {}\".format(command))\n\n args = shlex.split(command)\n process = subprocess.Popen(args)",
"def reload_config(self):\n if self.faucet is not None:\n self.faucet.reload_config(None)",
"def rehash(self):\n logging.info(\"Rehashing started\")\n modules = self.cmd_plugins.get_modules()\n CommandBot.pause(self)\n PlugBot.stop(self)\n\n logging.info(\"Reloading config file\")\n self.botconfig = self.load_config(self.config_file)\n for module in modules:\n reload(module)\n CommandBot.reset(self)\n\n PlugBot.start(self)\n CommandBot.resume(self)\n self.join_rooms()",
"def trigger_reload(server):\n log.info(\"Triggering /reload on %s\", server)\n screenCmd(server, 'reload')",
"async def tool_reload(self, ctx, *, cog: str):\n\n try:\n self.bot.unload_extension(cog)\n self.bot.load_extension(cog)\n except Exception as e:\n await zb.bot_errors(ctx,sp.format(e))\n else:\n await ctx.send('**`SUCCESS`**')",
"def reload():\n if not _status_apf():\n return __apf_cmd(\"-r\")",
"def reload():\n xd = display.XKCDDisplayService()\n if xd.is_running():\n click.echo(\"gracefully reloading changes\")\n xd.send_signal(signal.SIGHUP)\n else:\n click.echo(\"xkcd service not running\")",
"def reconfigure(self):\n log.debug('Reconfiguring and restarting the DHCP daemon...')\n\n # Don't set the daemon running status here, but let the status\n # check take care of that.\n\n p = Properties(self.storage, CONFIG_SECTION)\n p.addCallback(self.changed).\\\n addCallback(lambda trigger: p.load()).\\\n addCallback(self.emit_config, p).\\\n addCallback(self.restart_daemon).\\\n addErrback(self.restart_error)",
"def restart_scrapy_daemon():\n global REPO_BASE_PATH\n logger.info('Scrapy daemon restarting...')\n arguments = ['python'] + [REPO_BASE_PATH+'/deploy/sqs_ranking_spiders/scrapy_daemon.py'] + sys.argv[1:]\n if 'restarted' not in arguments:\n arguments += ['restarted']\n else:\n logger.error('Error while restarting scrapy daemon. '\n 'Already restarted.')\n return\n logging.info('Starting %s with args %s' % (sys.executable, arguments))\n os.execv(sys.executable, arguments)",
"def restart():\n run('kill -HUP $(cat /tmp/pyar_web.pid)')",
"def restart():\n log('reiniciando servicos', yellow)\n nginx_stop()\n nginx_start()\n nginx_restart()\n nginx_reload()\n supervisor_stop()\n supervisor_start()",
"def restart_nginx():\n sudo('/etc/init.d/nginx restart')",
"def reload_test(test_name):\n sudo(\"restart %s\" % test_name)"
] | [
"0.75852543",
"0.6688586",
"0.6681827",
"0.66347086",
"0.633552",
"0.6311228",
"0.6310406",
"0.62902033",
"0.6279588",
"0.6202909",
"0.6199834",
"0.61551714",
"0.6126377",
"0.6089492",
"0.6079683",
"0.60628176",
"0.59531575",
"0.5931519",
"0.5927496",
"0.5915352",
"0.5904567",
"0.58952326",
"0.5886798",
"0.58565116",
"0.5840183",
"0.58117783",
"0.58108455",
"0.5788766",
"0.5782809",
"0.5780196"
] | 0.8385148 | 0 |
Return False if command is dead, otherwise True. | def check_command(self):
return self.process is not None and self.process.poll() is None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_dead(self):\n if self.killer:\n if self.killer.stype == 'fire' and not (self.killer in self.pjs.fires):\n return True\n elif self.killer.stype == 'enemy' and self.timeout == 0:\n return True\n else:\n return False",
"def __is_active(self, command):\n return True",
"def is_dead(self):\n return self.hp <= 0",
"def get_death(self):\r\n if self.dead:\r\n self.dead = False\r\n return True\r\n return False",
"def has_commands(self) -> bool:\n return len(self.commands) > 0",
"def isDead(self):\n\n\t\tif self.mCurrentHealthPoints < 1:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False",
"def still_active(pid: int, cmd: str) -> bool:\n os_cmd = get_command_for_pid(pid)\n return cmd in os_cmd",
"def responds_to(self, command) -> bool:\n return command == self.command and self.active is True and self.command is not None",
"def is_dead(self):\r\n if len(self.parachute) <= 5:\r\n self.parachute.pop(0)\r\n self.parachute.insert(0, \" x\")\r\n return True\r\n else:\r\n return False",
"def are_you_dead(dead):\n if dead:\n quit()",
"def is_dead(self):\n return self.hearts <= 0",
"def is_alive(self):\n if self.health > 0:\n return True\n return False",
"def check( self ):\n\n if ( self.alive is not None ) \\\n and ( time.time() > ( self.alive + self.timeout ) ):\n return False\n return True",
"def alive(self, pid):\n try:\n self.ssh(\"kill -0 %s\" % str(pid), allow_fail=False)\n return True\n except:\n return False",
"def check_finish(self):\r\n return not self.proc.is_alive()",
"def alive(self):\n return True",
"def is_dead(self):\n dead = True if 0 >= self._score else False\n if p.params['ageing'] and self._age > p.params['age_of_death']:\n dead = True\n return dead",
"def should_poll(self):\n return self._command_state is not None",
"def is_alive(self):\n return (self.read_name() != '')",
"def is_alive(self):\n if self.status == 1:\n return True\n else:\n return False",
"def _iscommand(self, key):\r\n\t\tyes = False\r\n\t\tfor i in COMMAND_NAME.keys():\r\n\t\t\tif key == i: \r\n\t\t\t\tyes = True; break\r\n\t\treturn yes",
"def is_alive(self):\n try:\n return self.get_life() > 0\n except KeyError:\n return True",
"def assumed_state(self):\n return self._command_state is False",
"def alive(self):\n\n return self.subprocess.poll() is None and not self.thread_stop.is_set()",
"def _is_alive(self) -> bool:\n\n if self._on:\n return True\n\n try:\n os.kill(self.proc.pid, 0)\n except (OSError, ProcessLookupError):\n return False\n\n return True",
"def finishesCommand(self):\n\n return self.flag in ':fF'",
"def shooting(self):\r\n return not self.stopped",
"def dead(self):\n if not self._weak:\n return False\n cb = self._callback()\n if cb is None:\n return True\n return False",
"def is_alive(self):\n return not (self._find.is_alive() or \n self._sum.is_alive() or\n self._tag.is_alive() or \n self._register.is_alive() or\n self._dispatcher.is_alive())",
"def is_player_dead(self):\n player_rect = self.plane.get_drawables()[0].get_rect()\n for enemy in self.enemies:\n if enemy.collided_with(player_rect):\n return True"
] | [
"0.6922289",
"0.6847067",
"0.68249875",
"0.66671854",
"0.66655314",
"0.6653957",
"0.65789324",
"0.6558258",
"0.6550506",
"0.649371",
"0.64785314",
"0.64250284",
"0.63963515",
"0.6387006",
"0.6375392",
"0.6337235",
"0.62643814",
"0.62609",
"0.6241368",
"0.6222854",
"0.62103415",
"0.6206319",
"0.6201083",
"0.6182915",
"0.6174221",
"0.6174099",
"0.6151375",
"0.61437863",
"0.61029863",
"0.60829115"
] | 0.69605875 | 0 |
Get unique list of new config files in watch dir. | def get_config(self):
config = set()
while True:
filenames = self.get_config_files()
for fn in filenames:
if fn not in self.watch_names:
filenames.remove(fn)
if fn in config:
filenames.remove(fn)
# If we did not find any new config files, exit loop.
if not filenames:
break
# Save the config files we found, sleep, then look again.
config.update(filenames)
# Sleep a bit to allow for settling. We loop until no new
# config files are found.
time.sleep(1.0)
return config | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_config_files(self):\n flag, i = self.inotify\n\n if flag:\n kwargs = {}\n\n if PY3:\n kwargs['timeout_s'] = 0\n\n filenames = set()\n\n for event in i.event_gen(**kwargs):\n if event is None:\n break\n\n filenames.add(event[3])\n\n return list(filenames)\n\n else:\n return os.listdir(self.watch)",
"def watch_list(self) -> list:\n return []",
"def get_config_files(self):\n self.clear_lists()\n print self.abs_directory\n for file in os.listdir(self.abs_directory):\n print file\n if file.endswith('.json') and \"qemii\" in file:\n self.txt_files.append(file)",
"def get_file_list_without_current_log():\n full_list = sorted(filter(os.path.isfile, os.listdir('.')), key=os.path.getmtime)\n full_list.remove(\"connect-log.log\")\n return full_list",
"def _configFiles(self):\n import glob\n ret = [] \n for ext in self.configManager.extensions:\n ret.extend(\n glob.glob(f\"{self.pipelinesDir}/{self.pipeName}/*{ext}\"))\n return ret",
"def get_already_processed_files(config: Config) -> list[str]:\n already_processed_files = []\n if os.path.exists(config.already_processed):\n with open(config.already_processed, 'r') as f:\n already_processed_files = f.read().splitlines()\n\n return already_processed_files",
"def _get_changed_paths(self):\n paths = set()\n while True:\n if not self._inotify_poll.poll(0):\n break\n\n self._inotify_events += os.read(self._inotify_fd, 1024)\n while len(self._inotify_events) > _INOTIFY_EVENT_SIZE:\n wd, mask, cookie, length = _INOTIFY_EVENT.unpack(\n self._inotify_events[:_INOTIFY_EVENT_SIZE])\n if len(self._inotify_events) < _INOTIFY_EVENT_SIZE + length:\n break\n\n name = self._inotify_events[\n _INOTIFY_EVENT_SIZE:_INOTIFY_EVENT_SIZE+length]\n name = name.rstrip('\\0')\n\n logging.debug('wd=%s, mask=%s, cookie=%s, length=%s, name=%r',\n wd, hex(mask), cookie, length, name)\n\n self._inotify_events = self._inotify_events[_INOTIFY_EVENT_SIZE+length:]\n\n if mask & IN_IGNORED:\n continue\n try:\n directory = self._watch_to_directory[wd]\n except KeyError:\n logging.debug('Watch deleted for watch descriptor=%d', wd)\n continue\n\n path = os.path.join(directory, name)\n if os.path.isdir(path) or path in self._directory_to_watch_descriptor:\n if mask & IN_DELETE:\n self._remove_watch_for_path(path)\n elif mask & IN_MOVED_FROM:\n self._remove_watch_for_path(path)\n elif mask & IN_CREATE:\n self._add_watch_for_path(path)\n elif mask & IN_MOVED_TO:\n self._add_watch_for_path(path)\n if path not in paths:\n paths.add(path)\n return paths",
"def find_config_files(create: bool = False) -> List[str]:\n files = [\".wpwatcher/wpwatcher.conf\", \"wpwatcher.conf\"]\n env = [\"HOME\", \"XDG_CONFIG_HOME\", \"APPDATA\", \"PWD\"]\n\n return Config.find_files(\n env, files, Config.TEMPLATE_FILE, create=create\n )",
"def find_config_files(create=False):\n files = [\".wpwatcher/wpwatcher.conf\", \"wpwatcher.conf\"]\n env = [\"HOME\", \"XDG_CONFIG_HOME\", \"APPDATA\", \"PWD\"]\n\n return WPWatcherConfig.find_files(env, files, WPWatcherConfig.TEMPLATE_FILE)",
"def getFileList(self):\n sid = 86400 # change to 3600 for hour-by-hour\n uDays = range(sid*(int(self.uStart)/sid),sid+(sid*(int(self.uStop)/sid)),sid)\n fileList = []\n sep = os.path.sep\n for d in uDays:\n s = unixTimeToString(d)\n ymdPath = 'year' + s[0:4] + sep + 'month' + s[5:7] + sep + 'day' + s[8:10]\n dirname = self.basePath + sep + ymdPath + sep + self.sensor + sep + 'padhist'\n pattern = '*' + self.sensor + '_hstv*.mat'\n nameList = glob.glob1(dirname,pattern)\n for name in nameList:\n uTime = stringTimeToUnix(name[0:13] + '_00_00.000')\n if ( self.uStart <= uTime <= self.uStop ):\n #print 'IN: %s' % unixTimeToString(uTime)\n fileList.append(dirname + sep + name)\n fileList.sort()\n self.fileList = fileList",
"def _list_gen(watchdog_path):\n # Remove all dotfiles and all non-file\n for watchdog in os.listdir(watchdog_path):\n if watchdog[0] == '.':\n continue\n\n filename = os.path.join(watchdog_path, watchdog)\n try:\n filestat = os.lstat(filename)\n except os.error:\n continue\n\n if not stat.S_ISREG(filestat.st_mode):\n continue\n\n yield (watchdog, filename, filestat)",
"def get_update_file_list(directory):\n update_files_list = set(UPDATE_FILES_STATIC)\n update_files_exclude = set(UPDATE_FILES_EXCLUDE)\n\n for root, dirs, files in os.walk(path.join(PATH_ROOT, directory)):\n for filen in files:\n if UPDATE_FILES_RE.match(filen):\n filep = path.join(root, filen)\n update_files_list.add(path.relpath(filep, PATH_ROOT))\n \n return update_files_list - update_files_exclude",
"def getFileList(self):\n print 'getting fileList ...',\n sid = 86400 # change to 3600 for hour-by-hour\n uDays = range(sid*(int(self.uStart)/sid),sid+(sid*(int(self.uStop)/sid)),sid)\n fileList = []\n sep = os.path.sep\n for d in uDays:\n s = unixTimeToString(d)\n ymdPath = 'year' + s[0:4] + sep + 'month' + s[5:7] + sep + 'day' + s[8:10]\n dirname = self.basePath + sep + ymdPath + sep + self.subDir\n pattern = '*' + self.sensor\n nameList = glob.glob1(dirname,pattern)\n for name in nameList:\n ufStart = stringTimeToUnix(name[0:23])\n ufStop = stringTimeToUnix(name[24:47])\n if ( ufStart <= self.uStart <= ufStop ) or ( self.uStart <= ufStart <= self.uStop ) or ( ufStart <= self.uStop <= ufStop ):\n #print 'IN: %s' % unixTimeToString(uTime)\n fileList.append(dirname + sep + name)\n## else:\n## print 'OUT:\\n%s\\n%s\\n%s' % (unixTimeToString(ufStart),unixTimeToString(self.uStart),unixTimeToString(ufStop))\n fileList.sort()\n self.fileList = fileList\n print 'done'",
"def _config_list(res, ctx):\n\n if _has_error_code(res):\n return print_errors(res, ctx)\n\n lines = []\n for config in res['configs']:\n line = '* ' if config['current'] else ' '\n\n if ctx.verbose:\n line += config['mtime'] + ' '\n\n line += config['name']\n lines.append(line)\n\n return \"\\n\".join(lines)",
"def get_new_change_status_files():\n rdict=dict()\n mastcontrol=dirutil.get_mast_control_path()\n mastscratch=dirutil.get_mast_scratch_path()\n olddirs=list()\n recipedirs=dirutil.immediate_subdirs(os.path.join(mastcontrol,\"changestatusfiles\"))\n for recipedir in recipedirs:\n ingreddirs = dirutil.immediate_subdirs(os.path.join(mastcontrol,\"changestatusfiles\",recipedir))\n for ingreddir in ingreddirs:\n olddirs.append(\"%s/%s\" % (recipedir, ingreddir))\n srecdirs = dirutil.immediate_subdirs(mastscratch)\n for srecdir in srecdirs:\n singreddirs = dirutil.immediate_subdirs(os.path.join(mastscratch,srecdir))\n for singreddir in singreddirs:\n csfile = os.path.join(mastscratch, srecdir, singreddir, \"change_status.txt\")\n if os.path.isfile(csfile):\n if not \"%s/%s\" % (srecdir, singreddir) in olddirs:\n if not srecdir in rdict.keys():\n rdict[srecdir]=dict()\n rdict[srecdir][\"MAIN\"]=\"changed\"\n rdict[srecdir][singreddir]=\"send\"\n return rdict",
"def loadRecentFiles(self):\n self.recentFiles.clear()\n for n in range(RECENTFILEMAX):\n rf = self.getSection(CFG_RECENT, str(n))\n if rf:\n self.recentFiles.append(rf)\n else:\n break",
"def files():\n return get_cached(\"files.json\")",
"def make_files(self):\n return []",
"def get_file_list(start):\n valid_files = []\n for root, dirs, files in os.walk(start):\n for name in files:\n if name[-5:] == \".conf\":\n valid_files.append(os.path.join(root,name))\n return valid_files",
"def _get_daemon_logs_files(self):\n for fname in os.listdir('/tmp/'):\n fname = os.path.join('/tmp/', fname)\n if fname.lower().endswith('.log'):\n yield fname",
"def _findChangedFiles(self):\n changedFiles = []\n # calculate and update checksums always for ALL files\n for observedFile in self.observedFiles:\n if os.path.isfile(observedFile.filePath):\n currentChecksum = checksumFile(observedFile.filePath)\n else:\n currentChecksum = None\n # different values with None value checking\n if ((observedFile.lastChecksum is None\n and currentChecksum is not None)\n or observedFile.lastChecksum != currentChecksum):\n changedFiles.append(observedFile) # notify change\n observedFile.lastChecksum = currentChecksum # update checksum\n\n return changedFiles",
"def get_monitored_changes(self) -> List:\n pass",
"def get_files(self, name):\n return self.apps[name]['configuration_files']",
"def get_file_list(self):\n try:\n for filename in os.listdir(SHARED_DIR):\n self.file_list.append(filename)\n except Exception as e:\n print \"Error: retriving file list, %s\" % e",
"def _most_recent_event_files(self):\n regex = re.compile(r\"\\w*events.log\")\n return [\n os.path.join(self._output_dir, x)\n for x in os.listdir(self._output_dir)\n if regex.search(x)\n ]",
"def _get_changed_files():\n if not ci_diff_helper:\n return None\n\n try:\n config = ci_diff_helper.get_config()\n except OSError: # Not on CI.\n return None\n\n changed_files = ci_diff_helper.get_changed_files('HEAD', config.base)\n\n changed_files = set([\n './{}'.format(filename) for filename in changed_files])\n\n return changed_files",
"def invalidate_for_files(self):\r\n return []",
"def all_changed_files(self):\n return [path_to_file_type(os.path.join(self.path, p)) for p in self.changed_paths() if p]",
"def last_videos_recorded(self) -> list:\n return sorted(glob.glob(VIDEOS_DIR), key=os.path.getmtime)[-20:]",
"def _get_files_list(self):\n ts_filepaths = []\n conn_filepaths = []\n ts_filepaths_from_dir = sorted(os.listdir(self.ts_dir))\n conn_filepaths_from_dir = sorted(os.listdir(self.conn_dir))\n for sub_id in self.ids:\n for ts_file in ts_filepaths_from_dir:\n if sub_id in ts_file:\n ts_filepaths += [os.path.join(self.ts_dir, ts_file)]\n ts_filepaths_from_dir.remove(ts_file)\n break\n for conn_file in conn_filepaths_from_dir:\n if sub_id in conn_file:\n conn_filepaths += [os.path.join(self.conn_dir, conn_file)]\n conn_filepaths_from_dir.remove(conn_file)\n break\n\n return ts_filepaths, conn_filepaths"
] | [
"0.7831209",
"0.70418906",
"0.66463995",
"0.6577422",
"0.64742094",
"0.64292943",
"0.64021975",
"0.6402068",
"0.63111544",
"0.62958807",
"0.6260233",
"0.61890477",
"0.61561424",
"0.61066765",
"0.610564",
"0.6097907",
"0.6075739",
"0.6075056",
"0.6053437",
"0.6035494",
"0.60281545",
"0.6017644",
"0.6009015",
"0.5984452",
"0.59477365",
"0.5919009",
"0.589806",
"0.58844763",
"0.5878595",
"0.5856593"
] | 0.7109554 | 1 |
Kill the running command. | def kill(self):
if self.process is not None:
LOGGER.info('Killing command...')
self.process.kill()
self.process = None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def kill(self):\n\n self.proc.kill()",
"def kill(self):\n self._process.kill()",
"def kill(self):\n self._stop_proc(signal.SIGKILL)",
"def kill(self):\n self._update()\n if self.running_mode == \"local\":\n for process in self.processes:\n try:\n process.kill()\n except psutil.NoSuchProcess:\n # The process has just terminated\n # In multiprocess run this is likely to happen when other processes stops.\n pass\n elif self.running_mode == \"grid\":\n subprocess.check_call(\"qdel %d\" % self.job[\"job_number\"], shell=True)\n pass\n else:\n logger.warning(\"Asked for termination of a Run not known to be running.\")",
"def kill(self):\r\n try:\r\n if self.process:\r\n self.process.kill()\r\n self.process.wait()\r\n except WindowsError:\r\n # kill may not be available under windows environment\r\n pass",
"def kill(self):\n if self.transport.pid is not None:\n self.transport.signalProcess('KILL')",
"def kill(self):\n self.send_signal(signal.SIGKILL)",
"def kill(self):\n self.child.kill()",
"def kill(self):\n self.proc.kill()\n self.proc.wait()\n self.thread.join()",
"def kill(self):\n subprocess.check_output(['sudo', 'kill', str(self._relaypid)])\n self.close()",
"def _kill(self) -> None:\n if not hasattr(self, \"proc\"):\n raise FuzzFrontendError(\"Attempted to kill non-running PID.\")\n\n self.proc.terminate()\n try:\n self.proc.wait(timeout=0.5)\n L.info(\"Fuzzer subprocess exited with `%d`\", self.proc.returncode)\n except subprocess.TimeoutExpired:\n raise FuzzFrontendError(\"Subprocess could not terminate in time\")\n\n self._on = False",
"def stop(self):\n os.killpg(os.getpgid(self.process.pid), signal.SIGTERM)",
"def kill(self):\n self.error_code = 'KILLED'\n self.running = False",
"def remote_kill():",
"def kill():\n sb.call(\"Taskkill /IM SLDWORKS.exe /F\")",
"def kill(self):\n return self._raw_execute(\"cancel\", {\"job_id\": self.job_id})",
"def stop(self):\n try:\n self.process.terminate()\n self.process = None\n except AttributeError:\n return",
"def kill(self):\n self.status = Modem.Status.KILL",
"def stop(self):\n if self._process is not None:\n self._process.terminate()",
"def terminate(self):\n if self.proc:\n self.proc.kill()\n self.proc = None",
"def kill(self):\n self._destruct()\n pass",
"def kill(self):\n \n self.killSlavePids()",
"def __del__(self):\n self._proc.kill()",
"def script_kill(self):\n return self._execute([b'SCRIPT', b'KILL'], b'OK')",
"def stop(self):\n os.system(\"taskkill /T /F /PID %s\" % self.process.pid)\n self.running = False",
"def kill(self):\n self._exit = True",
"def stop(self, kill=False):\n if not self._process:\n raise JubaTestFixtureFailedError('this instance has not been started yet')\n\n try:\n if kill:\n log.debug('KILLing process')\n self._process.kill()\n else:\n log.debug('terminating process')\n self._process.terminate()\n except OSError as e:\n if e.errno != errno.ESRCH: # \"No such process\"\n raise e\n # may be a race between poll and signal; just ignore\n log.debug('race between poll and signal detected')\n finally:\n (self.stdout, self.stderr) = self._process.communicate()\n self._process = None",
"def processKill(uPid):\n return processTerminate(uPid);",
"def Stop(self):\n if self.child_pid:\n self.data = self.host.Communicate(self.child_pid, echo_error=True,\n kill=(not self.length),\n kill_string=IperfClient.KILL_STRING)\n self.child_pid = None",
"def kill(self):\n\n #Kill relevant process names\n if self.driver_type != 'firefox_wdm':\n os.system('pkill -f chrome')\n os.system('pkill -f Chrome')\n os.system('pkill -f chromedriver')\n else:\n os.system('pkill -f FireFox')\n #TODO: confirm this -> os.system('pkill -f geckodriver')"
] | [
"0.7861475",
"0.7555022",
"0.749188",
"0.7406674",
"0.72241443",
"0.7057569",
"0.7053252",
"0.7020611",
"0.7016701",
"0.69516635",
"0.693628",
"0.6888209",
"0.6873801",
"0.6854114",
"0.6798234",
"0.6792874",
"0.67909163",
"0.6775448",
"0.674491",
"0.6723775",
"0.6702169",
"0.6690302",
"0.66873866",
"0.6676881",
"0.66537446",
"0.6639816",
"0.6628959",
"0.6610448",
"0.65701115",
"0.6567629"
] | 0.8239272 | 0 |
Backs up entire configuration. | def backup_config(self):
prev_config = set()
for src in self.config:
dst = '%s.prev' % src
LOGGER.debug('Backing up %s to %s', src, dst)
try:
shutil.copy(src, dst)
except IOError as e:
if e.errno != errno.ENOENT:
raise
# If the config file is missing, we can skip backing it up.
LOGGER.warning('File %s missing, skipping backup', src)
else:
prev_config.add(dst)
return prev_config | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def backup(config_file, bakfile):\n return _backup_config(config_file, bakfile)",
"def backup(ctx, project, origin, force):\n\n if not check_main_conf(ctx):\n return\n\n if origin is not None and project is None:\n click.echo(\"--project option is required when --origin is set.\")\n return\n\n bkp = ctx.obj[\"bkp\"]\n\n if not os.path.exists(ctx.obj[\"PROJECTS_DIR\"]):\n click.echo(\"Projects directory doesn't exists at %s\" % ctx.obj[\"PROJECTS_DIR\"])\n return\n\n if project is not None:\n bkp.project_load(project_name=project)\n bkp.backup(origin=origin, force=force)\n else:\n for file in os.listdir(ctx.obj[\"PROJECTS_DIR\"]):\n if file.endswith(\".conf\"):\n project_name = file.replace(\".conf\", \"\")\n bkp.project_load(project_name=project_name)\n bkp.backup(origin=origin, force=force)",
"def backup(self):\n self.logger.info(\"Backing up current version of model...\")\n self.save_checkpoint(filename='backup.pth.tar')",
"def backup(self):\n self.rollback_steps.insert(0, self.mongos.start_balancer)\n self.run_step(self.mongos.stop_balancer, 2)\n\n self.run_step(self.wait_for_locks)\n\n self.rollback_steps.insert(0, self.finish_shards_maintenance)\n self.run_step(self.prepare_shards_maintenance)\n\n self.run_step(self.backup_dump)\n\n self.rollback_steps.remove(self.finish_shards_maintenance)\n self.run_step(self.finish_shards_maintenance, 2)\n\n self.rollback_steps.remove(self.mongos.start_balancer)\n self.run_step(self.mongos.start_balancer, 4) # it usually starts on\n # the second try\n\n if self.backup_bucket is not None:\n run(\"rmdir %s\" % self.backup_path)\n\n logging.info(\"Finished successfully\")",
"def backup(self):\r\n print('Backing up old files...')\r\n\r\n # Connect with SSH-PubKey and execute backup script\r\n subprocess.run(\r\n ['ssh',\r\n '-i', self.ssh_key,\r\n '-o', 'StrictHostKeyChecking=no',\r\n 'robot@{}'.format(self.settings['ip']),\r\n 'robolab-backup'\r\n ])\r\n\r\n print('Done.')",
"def _backup_config(config_file, bak_path=None):\n try:\n if not bak_path:\n bak_path = config_file+\".bak\"\n with open(config_file, 'r') as oldfile, open(bak_path, 'w') as bakfile:\n tmp = oldfile.read(1024)\n while tmp:\n bakfile.write(tmp)\n tmp = oldfile.read(1024)\n except Exception, e:\n return 1, e\n return 0, \"success\"",
"def backup(self):\n import datetime\n suffix = datetime.datetime.now().strftime('%Y-%m-%d--%H-%M-%S')\n self.host.run(\"test -f '%s' && cp --archive '%s' '%s.%s'\" % (\n esc1(self.remote_path), esc1(self.remote_path), esc1(self.remote_path), esc1(suffix)), use_sudo=self.use_sudo)",
"def backup():\n backup_shift(os, config.utils.tasks.backup_depth)\n if config.utils.tasks.secret_key is None:\n shutil.copyfile(config.core.database_name, config.core.database_name+'.1')\n else:\n data = get_encrypted_database()\n with open(config.core.database_name+'.1', 'wb') as f:\n f.write(data)",
"def makeBackup(self):\n #--File Path\n original = self.path\n #--Backup\n backup = self.path+'.bak'\n shutil.copy(original,backup)\n #--First backup\n firstBackup = self.path+'.baf'\n if not os.path.exists(firstBackup):\n shutil.copy(original,firstBackup)",
"def backup(ctx):\n config_path = ctx.obj['config_path']\n logger = ctx.obj['logger']\n\n config = Config(config_path)\n scheduler = BlockingScheduler(\n executors={'default': ThreadPoolExecutor(max_workers=1)}\n )\n\n for job in config.jobs.values():\n logger.info(f'filesystem={job.filesystem} '\n f'cron=\"{job.cron}\" '\n 'msg=\"Adding job.\"')\n scheduler.add_job(job.start, 'cron', **job.cron, coalesce=True)\n\n try:\n scheduler.start()\n except (KeyboardInterrupt, SystemExit):\n pass",
"def __makeBackup(self):\n pass #FIXME!!!",
"def backup_tempest_config(conf_file, res_dir):\n if not os.path.exists(res_dir):\n os.makedirs(res_dir)\n shutil.copyfile(conf_file,\n os.path.join(res_dir, 'tempest.conf'))",
"def test_backup_compact(self):\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n self.backup_cluster_validate()\n self.backup_compact_validate()",
"def backup_all_db():\n filename = BACKUP_DIR + \"/\" + str(datetime.datetime.now().isoformat()) + \".yaml\"\n with open(filename, 'w+') as base_fp:\n for model in [Framework, Project, Document, Component, Arch, # Meta models\n WorkItem, AutoCase, Linkage, Bug, AutoCaseFailure]:\n base_fp.write(serializers.serialize('yaml', model.objects.all(), fields=model._min_dump))",
"def restore_config(self, config):\n for src in config:\n # Remove .prev\n dst, _ = splitext(src)\n LOGGER.debug('Restoring %s from %s', dst, src)\n shutil.move(src, dst)",
"def save(self):\n for p, c in self.configs_:\n c.write(p)",
"def archive_backup(self):\n\n # Archiving the Training script\n shutil.copyfile(self.script_path, self.save_path + '/0-' + os.path.basename(self.script_path))\n os.chmod(self.save_path + '/0-' + os.path.basename(self.script_path), 0o755)\n # Archiving the src folder\n pkg_path = os.path.dirname(arch_src)\n backup_path = os.path.join(self.save_path, 'src_backup')\n shutil.make_archive(backup_path, 'gztar', pkg_path)\n\n # Archiving the Environment Info\n env_info = collect_env.get_pretty_env_info()\n with open(self.save_path + '/env_info.txt', 'w') as f:\n f.write(env_info)",
"def backup_config(context):\n context.copy_from(DNF_PLUGIN_DATA_PATH, DNF_PLUGIN_DATA_LOG_PATH)",
"def automatic_backup(self):\n\n if self.observationId:\n logging.info(\"automatic backup\")\n self.save_project_activated()",
"def backup_database():\n db_path = os.path.join(config.cum_dir, 'cum.db')\n backup_path = os.path.join(config.cum_dir, 'cum.db.bak')\n copyfile(db_path, backup_path)",
"def do_backup(infile, simulate=False):\n\n # parse the input file\n cp = cparse.ConfigParser()\n cp.optionxform = str\n cp.read(infile)\n\n\n # store the list of files and directories we will backup\n\n # in each dictionary, the key is the root directory to copy from and the\n # list it indexes is the list of files/directories under that root to copy\n dirs = {}\n files = {}\n\n for sec in cp.sections():\n\n if sec == \"main\":\n\n # defaults\n root = \"/backup\"\n prefix = \"my-backup-\"\n nstore = 3\n email_sender = \"root\"\n email_receiver = \"root\"\n\n for opt in cp.options(\"main\"):\n if opt == \"root\":\n root = cp.get(sec, opt)\n elif opt == \"prefix\":\n prefix = cp.get(sec, opt)\n elif opt == \"nstore\":\n nstore = cp.get(sec, opt)\n elif opt == \"email_sender\":\n email_sender = cp.get(sec, opt)\n elif opt == \"email_receiver\":\n email_receiver = cp.get(sec, opt)\n else:\n sys.exit(\"invalid option in [main]\")\n\n bo = Backup(root, prefix, nstore,\n email_sender, email_receiver)\n else:\n\n for opt in cp.options(sec):\n value = cp.get(sec, opt)\n\n if opt == \"files\":\n flist = [f.strip() for f in value.split(',')]\n files[sec] = flist\n\n if opt == \"dirs\":\n dlist = [d.strip() for d in value.split(',')]\n dirs[sec] = dlist\n\n\n # log the output\n out_msg = f\"Output from backup-machine.py, inputs file: {infile}\\n\"\n\n blog = Log(out_msg)\n\n # make sure that the output directory exists and if so, get all the\n # subdirectories in it\n try:\n old_dirs = os.listdir(bo.root)\n except:\n blog.log(\"destination directory is not readable/doesn't exist\\n\")\n report(blog.ostr, SUBJECT_FAIL, bo.sender, bo.receiver)\n sys.exit(\"directory not readable\")\n\n\n # how many existing backups are in that directory?\n backup_dirs = [o for o in old_dirs if o.startswith(bo.prefix) and\n os.path.isdir(f\"{bo.root}/{o}\")]\n\n backup_dirs.sort()\n backup_dirs.reverse()\n\n\n # backup_dirs now contains a list of all the currently stored backups.\n # The most recent backups are at the start of the list.\n print(\"currently stored backups: \")\n for bdir in backup_dirs:\n print(bdir)\n\n # get ready for the new backups\n backup_dest = os.path.normpath(bo.root) + '/' + bo.prefix + bo.date\n\n if not simulate:\n try:\n os.mkdir(backup_dest)\n except:\n blog.log(\"error making directory\\n\")\n report(blog.ostr, SUBJECT_FAIL, bo.sender, bo.receiver)\n sys.exit(\"Error making dir\")\n else:\n blog.log(f\"mkdir {backup_dest}\\n\")\n\n\n blog.log(f\"writing to: {backup_dest}\\n\\n\")\n\n failure = 0\n\n # backup all the directories\n for root_dir in dirs:\n for d in dirs[root_dir]:\n\n mydir = os.path.normpath(root_dir + '/' + d)\n if not os.path.isdir(mydir):\n blog.log(f\"WARNING: directory {mydir} does not exist... skipping.\\n\")\n continue\n\n blog.log(f\"copying {mydir} ...\\n\")\n\n if not simulate:\n try:\n shutil.copytree(mydir,\n os.path.normpath(backup_dest) + '/' + d,\n symlinks=True)\n except:\n blog.log(f\"ERROR copying {mydir}\\n\")\n blog.log(\"aborting\\n\")\n failure = 1\n break\n\n blog.log(\"done with directories\\n\\n\")\n\n # backup all the files\n for root_dir in files.keys():\n for f in files[root_dir]:\n\n myfile = os.path.normpath(root_dir + '/' + f)\n if not os.path.isfile(myfile):\n blog.log(f\"WARNING: file {myfile} does not exist... skipping.\\n\")\n continue\n\n blog.log(f\"copying {root_dir}/{f} ...\\n\")\n\n if not simulate:\n try:\n shutil.copy(myfile,\n os.path.normpath(backup_dest) + '/' + f)\n except:\n blog.log(\"ERROR copying\\n\")\n blog.log(\"aborting\\n\")\n failure = 1\n break\n\n blog.log(\"done with individual files\\n\\n\")\n\n # if we were successful, then remove any old backups, as necessary\n if not failure:\n\n # keep in mind that we just stored another backup\n if len(backup_dirs) > bo.nstore-1:\n for n in range(bo.nstore-1, len(backup_dirs)):\n rm_dir = bo.root + '/' + backup_dirs[n]\n\n blog.log(f\"removing old backup: {rm_dir}\\n\")\n\n if not simulate:\n try:\n shutil.rmtree(rm_dir)\n except:\n blog.log(f\"ERROR removing {rm_dir}\\n\")\n\n subject = f\"summary from backup-machine.py, infile: {infile}\"\n if simulate:\n subject = \"[simulate] \" + subject\n else:\n subject = f\"ERROR from backup-machine.py, infile: {infile}\"\n\n\n report(blog.ostr, subject, bo.sender, bo.receiver)",
"def backup_database():\n logger.info(\"start database_backup\")\n management.call_command('dbbackup', compress=True)\n logger.info(\"end database_backup\")",
"def run(self):\n try:\n print \"# Obiba backup started (%s)\" % datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n self.__loadConfig()\n self.__setup()\n self.__backupRemoteProjects()\n self.__backupProjects()\n except Exception, e:\n print '*' * 80\n print \"* ERROR\"\n print\n print traceback.format_exc()\n print '*' * 80\n finally:\n print \"# Obiba backup completed (%s)\" % datetime.now().strftime('%Y-%m-%d %H:%M:%S')",
"def SaveConfig(self):\n config_value = getattr(self, APPDATA)\n path_value = config_value.AbsolutePaths[0]\n default_cfg_file = os.path.join(path_value, CONFIG_FILE_NAME)\n temp_file = default_cfg_file + '.TEMP'\n if os.path.exists(default_cfg_file):\n json.dump(type(self)._CURRENT_CONFIG,\n open(temp_file.lower(),\n mode='w'),\n cls=ConfigEncoder,\n sort_keys=False,\n indent=4)\n EnsureBackup(temp_file, default_cfg_file)\n else:\n if not os.path.isdir(path_value):\n os.mkdir(path_value)\n json.dump(type(self)._CURRENT_CONFIG,\n open(default_cfg_file.lower(),\n mode='w'),\n cls=ConfigEncoder,\n sort_keys=False,\n indent=4)",
"def backup():\n # Backup the WordPress database.\n db('backup')\n\n # Copy teh wp-config.php file from the server.\n get(os.path.join(env.wordpress_path, 'wp-config.php'),\n './backups/wp-config.php')\n\n now = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')\n\n theme_list = wp_cli('theme list --format=csv')\n plugin_list = wp_cli('plugin list --format=csv')\n\n # Backup the installed themes\n #with open('./backups/themes.csv', 'w') as f:\n # f.write(theme_list)\n\n # Backup the installed plugins\n #with open('./backups/plugins.csv', 'w') as f:\n # f.write(plugin_list)",
"def _save_changes(self):\n copy2(self._cfg_filename, self._cfg_filename + \".bak\")\n with open(self._cfg_filename, \"w\", encoding=\"utf-8\") as self._cfg_file:\n self.write(self._cfg_file)",
"def fusion_api_update_backup_config(self, body, api=None, headers=None):\n return self.backup.update(body, api=api, headers=headers)",
"def __setup(self):\n\n backupFolder = self.config['destination']\n self.__createBackupFolder(backupFolder)\n\n # create the project based backup folder\n today = date.today()\n\n if 'projects' in self.config:\n for project in self.config['projects'].iterkeys():\n timestamp = datetime.now().strftime('%d-%H-%M-%S')\n backupDestination = os.path.join(backupFolder, project, str(today.year), today.strftime('%m'), timestamp)\n self.__createBackupFolder(backupDestination)\n self.config['projects'][project]['destination'] = backupDestination",
"def backup(self):\n\n for filename in self.filenames[:]:\n if not filename.endswith(\".\"+self.PYTHON_EXTENSION):\n continue\n origfilename = filename + \".\" + self.BACKUP_EXTENSION\n if origfilename not in self.filenames:\n shutil.copy(filename, origfilename)\n self.filenames.append(origfilename)",
"def __restoreBackup(self):\n pass #FIXME!!!"
] | [
"0.6370239",
"0.62242675",
"0.61583614",
"0.61377156",
"0.6079129",
"0.60359734",
"0.59865093",
"0.59671223",
"0.59083843",
"0.5764068",
"0.5725047",
"0.5714281",
"0.56883514",
"0.56292564",
"0.5621002",
"0.5616206",
"0.5596354",
"0.5594662",
"0.55940187",
"0.5514033",
"0.5496969",
"0.5484424",
"0.54832053",
"0.5438956",
"0.5419573",
"0.54167485",
"0.5414309",
"0.540266",
"0.5394158",
"0.5376204"
] | 0.6756827 | 0 |
Backup old config, write new config, test config, HUP or restore. | def test_and_swap(self, config):
LOGGER.info('Attempting to apply new configuration')
backup = self.backup_config()
# We have backed up ALL config files (not just the ones we might
# replace). If any error occurs from here out, we will need to restore
# our config, so we will use exception handling.
try:
self.install_config(config)
# We have now merged in our new configuration files, lets test this
# config.
if self.test_command(quiet=False):
LOGGER.debug('Configuration good, reloading')
self.reload_command()
self.remove_config(backup)
else:
LOGGER.info('Configuration bad, restoring')
self.restore_config(backup)
except Exception:
LOGGER.exception('Failure, restoring config', exc_info=True)
self.restore_config(backup) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _backup_config(config_file, bak_path=None):\n try:\n if not bak_path:\n bak_path = config_file+\".bak\"\n with open(config_file, 'r') as oldfile, open(bak_path, 'w') as bakfile:\n tmp = oldfile.read(1024)\n while tmp:\n bakfile.write(tmp)\n tmp = oldfile.read(1024)\n except Exception, e:\n return 1, e\n return 0, \"success\"",
"def backup_config(self):\n prev_config = set()\n for src in self.config:\n dst = '%s.prev' % src\n LOGGER.debug('Backing up %s to %s', src, dst)\n\n try:\n shutil.copy(src, dst)\n\n except IOError as e:\n if e.errno != errno.ENOENT:\n raise\n\n # If the config file is missing, we can skip backing it up.\n LOGGER.warning('File %s missing, skipping backup', src)\n\n else:\n prev_config.add(dst)\n return prev_config",
"def backup(config_file, bakfile):\n return _backup_config(config_file, bakfile)",
"def test_config_save_restore(self):\n\n config_filename_initial = 'test_configuration'\n config_filename_save = 'save_configuration'\n\n # Get config path\n local_dir = os.path.dirname(__file__)\n config_path_initial = os.path.join(local_dir, config_filename_initial)\n config_path_save = os.path.join(local_dir, config_filename_save)\n\n # Load initial configuration from file\n config_initial = neat.Config(neat.DefaultGenome, neat.DefaultReproduction,\n neat.DefaultSpeciesSet, neat.DefaultStagnation, config_path_initial)\n\n config1 = config_initial.genome_config\n names1 = [p.name for p in config1._params]\n for n in names1:\n assert hasattr(config1, n)\n\n # Save configuration to another file\n config_initial.save(config_path_save)\n\n # Obtain configuration from saved file\n config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction,\n neat.DefaultSpeciesSet, neat.DefaultStagnation, config_path_save)\n\n config2 = config.genome_config\n names2 = [p.name for p in config2._params]\n for n in names2:\n assert hasattr(config2, n)\n\n self.assertEqual(names1, names2)\n\n for n in names1:\n v1 = getattr(config1, n)\n v2 = getattr(config2, n)\n self.assertEqual(v1, v2)",
"def restore_config(self, config):\n for src in config:\n # Remove .prev\n dst, _ = splitext(src)\n LOGGER.debug('Restoring %s from %s', dst, src)\n shutil.move(src, dst)",
"def sanitize_new_config(self):\n config_log = self._load_config_log()\n if 'new' in config_log:\n for cfg in config_log['new']:\n with open(cfg, 'r+') as f:\n data = yaml.load(f)\n f.seek(0)\n yaml.safe_dump(data, f, default_flow_style=False)\n f.truncate()\n del config_log['new']\n\n self._save_config_log(config_log)",
"def restore_backup():\n\n # restore vim configuration folder\n if exists('.vim-bkp'):\n print(green('Restoring your vim configuration folder.'))\n cmd = 'rm -rf .vim'\n run(cmd)\n cmd = 'mv .vim-bkp .vim'\n run(cmd)\n else:\n print(red('vim-bkp folder not found.'))\n\n # restore vim configuration file\n if exists('.vimrc-bkp'):\n print(green('Restoring your vim configuration file.'))\n cmd = 'rm -rf .vimrc'\n run(cmd)\n cmd = 'mv .vimrc-bkp .vimrc'\n run(cmd)\n else:\n print(red('vimrc-bkp file not found.'))",
"def test_config_save_restore1(self):\n\n config_filename_initial = 'test_configuration2'\n config_filename_save = 'save_configuration2'\n\n # Get config path\n local_dir = os.path.dirname(__file__)\n config_path_initial = os.path.join(local_dir, config_filename_initial)\n config_path_save = os.path.join(local_dir, config_filename_save)\n\n # Load initial configuration from file\n config_initial = neat.Config(neat.DefaultGenome, neat.DefaultReproduction,\n neat.DefaultSpeciesSet, neat.DefaultStagnation, config_path_initial)\n\n config1 = config_initial.genome_config\n names1 = [p.name for p in config1._params]\n for n in names1:\n assert hasattr(config1, n)\n\n # Save configuration to another file\n config_initial.save(config_path_save)\n\n # Obtain configuration from saved file\n config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction,\n neat.DefaultSpeciesSet, neat.DefaultStagnation, config_path_save)\n\n config2 = config.genome_config\n names2 = [p.name for p in config2._params]\n for n in names2:\n assert hasattr(config2, n)\n\n self.assertEqual(names1, names2)\n\n for n in names1:\n v1 = getattr(config1, n)\n v2 = getattr(config2, n)\n self.assertEqual(v1, v2)",
"def backup_config(context):\n context.copy_from(DNF_PLUGIN_DATA_PATH, DNF_PLUGIN_DATA_LOG_PATH)",
"def backup_tempest_config(conf_file, res_dir):\n if not os.path.exists(res_dir):\n os.makedirs(res_dir)\n shutil.copyfile(conf_file,\n os.path.join(res_dir, 'tempest.conf'))",
"def _save_changes(self):\n copy2(self._cfg_filename, self._cfg_filename + \".bak\")\n with open(self._cfg_filename, \"w\", encoding=\"utf-8\") as self._cfg_file:\n self.write(self._cfg_file)",
"def save(self):\n if self.changed:\n logger.info(\"Overwriting Redis config\")\n self.client.config_rewrite()\n self.changed = False",
"def update(self):\n self.save_config_file()",
"def backup(self):\r\n print('Backing up old files...')\r\n\r\n # Connect with SSH-PubKey and execute backup script\r\n subprocess.run(\r\n ['ssh',\r\n '-i', self.ssh_key,\r\n '-o', 'StrictHostKeyChecking=no',\r\n 'robot@{}'.format(self.settings['ip']),\r\n 'robolab-backup'\r\n ])\r\n\r\n print('Done.')",
"def SaveConfig(self):\n config_value = getattr(self, APPDATA)\n path_value = config_value.AbsolutePaths[0]\n default_cfg_file = os.path.join(path_value, CONFIG_FILE_NAME)\n temp_file = default_cfg_file + '.TEMP'\n if os.path.exists(default_cfg_file):\n json.dump(type(self)._CURRENT_CONFIG,\n open(temp_file.lower(),\n mode='w'),\n cls=ConfigEncoder,\n sort_keys=False,\n indent=4)\n EnsureBackup(temp_file, default_cfg_file)\n else:\n if not os.path.isdir(path_value):\n os.mkdir(path_value)\n json.dump(type(self)._CURRENT_CONFIG,\n open(default_cfg_file.lower(),\n mode='w'),\n cls=ConfigEncoder,\n sort_keys=False,\n indent=4)",
"def disable(self):\n logging.debug(\"Restoring autologin configuration...\")\n backup_filename = None\n for filename in self.generate_backup_filename():\n if not os.path.exists(filename):\n break\n backup_filename = filename\n\n if backup_filename:\n shutil.copy(backup_filename, self.CONFIG_FILENAME)\n os.remove(backup_filename)\n else:\n os.remove(self.CONFIG_FILENAME)",
"def backup():\n backup_shift(os, config.utils.tasks.backup_depth)\n if config.utils.tasks.secret_key is None:\n shutil.copyfile(config.core.database_name, config.core.database_name+'.1')\n else:\n data = get_encrypted_database()\n with open(config.core.database_name+'.1', 'wb') as f:\n f.write(data)",
"def upgrade_config_format(self):\n # migrate older config files\n if self.version == 1:\n # capture_init()\n self.version = 3\n\n # If token exists check still valid and can login\n if self.token and self.token != DEFAULT_TOKEN:\n from .api import ping\n\n with suppress(Exception):\n self.username = ping(config=self, cli_login=True, verbose=False)\n\n self.save()\n elif self.version == 2:\n # re-init against new server\n # capture_init()\n self.version = 3\n self.save()",
"def save():\n print(\"Saving config file..\")\n\n res = yaml.round_trip_dump(_conf, indent=2, block_seq_indent=1)\n\n with open(__config_file, 'w', encoding='utf-8') as stream:\n stream.write(res)",
"def backup(self):\n self.logger.info(\"Backing up current version of model...\")\n self.save_checkpoint(filename='backup.pth.tar')",
"def save_config(self, new_config, filename=None):\n self.cfg.update(new_config)\n if filename is None:\n self.cfg.filename = self.cfg_filename\n else:\n self.cfg.filename = filename\n self.cfg.write()\n logger.info(\"Config file %s written out\" % self.cfg.filename)",
"def backup(self):\n self.rollback_steps.insert(0, self.mongos.start_balancer)\n self.run_step(self.mongos.stop_balancer, 2)\n\n self.run_step(self.wait_for_locks)\n\n self.rollback_steps.insert(0, self.finish_shards_maintenance)\n self.run_step(self.prepare_shards_maintenance)\n\n self.run_step(self.backup_dump)\n\n self.rollback_steps.remove(self.finish_shards_maintenance)\n self.run_step(self.finish_shards_maintenance, 2)\n\n self.rollback_steps.remove(self.mongos.start_balancer)\n self.run_step(self.mongos.start_balancer, 4) # it usually starts on\n # the second try\n\n if self.backup_bucket is not None:\n run(\"rmdir %s\" % self.backup_path)\n\n logging.info(\"Finished successfully\")",
"def restore_config(self):\n self._clear_previous_windows_assigment()\n self._restart_i3_config()",
"def saveCurrentConfig():\n cf = ConfigParser.ConfigParser()\n cf.add_section(\"dir_config\")\n cf.set(\"dir_config\", \"7zpath\", SW_CONFIG['7zpath'])\n cf.set(\"dir_config\", \"sharefolder\", SW_CONFIG['sharefolder'])\n cf.set(\"dir_config\", \"distpath\", SW_CONFIG['distpath'])\n cf.add_section(\"sw_config\")\n cf.set(\"sw_config\", \"version\", SW_CONFIG['sw_version'])\n cf.set(\"sw_config\", \"startup\", SW_CONFIG['startup'])\n cf.add_section(\"run_config\")\n cf.set(\"run_config\", \"pop\", RUN_CONFIG['pop'])\n cf.set(\"run_config\", \"backup\", RUN_CONFIG['backup'])\n cf.add_section(\"hook_config'\")\n for k, v in HOOK_CONFIG:\n cf.set(\"hook_config\", k, v)\n fp = open(CONFIG_FILE, \"w\")\n cf.write(fp)\n fp.close()",
"def saveConfig(config):\n global SW_CONFIG\n cf = ConfigParser.ConfigParser()\n cf.add_section(\"dir_config\")\n cf.set(\"dir_config\", \"7zpath\", config['7zpath'])\n cf.set(\"dir_config\", \"sharefolder\", config['sharefolder'])\n cf.set(\"dir_config\", \"distpath\", config['distpath'])\n cf.add_section(\"sw_config\")\n cf.set(\"sw_config\", \"version\", config['sw_version'])\n cf.set(\"sw_config\", \"startup\", config['startup'])\n cf.add_section(\"run_config\")\n cf.set(\"run_config\", \"pop\", False)\n cf.set(\"run_config\", \"backup\", False)\n fp = open(CONFIG_FILE, \"w\")\n cf.write(fp)\n fp.close()\n SW_CONFIG = config",
"def config_restore(cls):\n for restore_method in cls.restore_methods:\n restore_method()",
"def test_config_overwrite(self):\n inc = REFRESH_COMMANDS.calculate_refresh_commands(\"Rainmeter.exe\", \"test-config\", \"file.inc\", False, True)\n ini = REFRESH_COMMANDS.calculate_refresh_commands(\"Rainmeter.exe\", \"test-config\", \"file.ini\", False, True)\n\n self.assertEquals(inc, ini)",
"def reload_config(self):\n pass",
"def reset_config():\r\n # TODO implement configuration reset\r\n pass",
"def __restoreBackup(self):\n pass #FIXME!!!"
] | [
"0.7156795",
"0.7003359",
"0.67606544",
"0.6459555",
"0.64326864",
"0.63440835",
"0.63262016",
"0.63146156",
"0.62513983",
"0.6244165",
"0.6195551",
"0.61006075",
"0.60527635",
"0.6048218",
"0.60470015",
"0.60405976",
"0.6031093",
"0.59871054",
"0.5954373",
"0.59194183",
"0.5919194",
"0.59069043",
"0.58963144",
"0.58916354",
"0.58892137",
"0.5879218",
"0.5866438",
"0.58635896",
"0.58345664",
"0.5819663"
] | 0.72788405 | 0 |
Plot and save histograms from predicted steerings and real steerings. Arguments | def make_and_save_histogramsX(pred_steerings, real_steerings,
img_name = "histogramsX.png"):
pred_steerings = np.array(pred_steerings)
real_steerings = np.array(real_steerings)
max_h = np.maximum(np.max(pred_steerings), np.max(real_steerings))
min_h = np.minimum(np.min(pred_steerings), np.min(real_steerings))
bins = np.linspace(min_h, max_h, num=50)
plt.hist(pred_steerings, bins=bins, alpha=0.5, label='Predicted', color='b')
plt.hist(real_steerings, bins=bins, alpha=0.5, label='Real', color='r')
#plt.title('Steering angle')
plt.legend(fontsize=10)
plt.savefig(img_name, bbox_inches='tight') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def make_and_save_histogramsY(pred_steerings, real_steerings,\n img_name = \"histogramsY.png\"):\n pred_steerings = np.array(pred_steerings)\n real_steerings = np.array(real_steerings)\n max_h = np.maximum(np.max(pred_steerings), np.max(real_steerings))\n min_h = np.minimum(np.min(pred_steerings), np.min(real_steerings))\n bins = np.linspace(min_h, max_h, num=50)\n plt.hist(pred_steerings, bins=bins, alpha=0.5, label='Predicted', color='b')\n plt.hist(real_steerings, bins=bins, alpha=0.5, label='Real', color='r')\n #plt.title('Steering angle')\n plt.legend(fontsize=10)\n plt.savefig(img_name, bbox_inches='tight')",
"def featuresHist(self, **kwargs):\n\n # Selecting bins automatically:\n bins_onpower = np.arange(self.onpower_train.min().values[0],\n self.onpower_train.max().values[0],\n (self.onpower_train.max().values[0] -\n self.onpower_train.min().values[0]) / 50)\n\n bins_offpower = np.arange(self.offpower_train.min().values[0],\n self.offpower_train.max().values[0],\n (self.offpower_train.max().values[0] -\n self.offpower_train.min().values[0]) / 50)\n\n bins_duration = np.arange(self.duration_train.min().values[0],\n self.duration_train.max().values[0],\n (self.duration_train.max().values[0] -\n self.duration_train.min().values[0]) / 50)\n\n # If a bin has been specified update the bin sizes.\n for key in kwargs:\n if key == 'bins_onpower':\n bins_onpower = kwargs[key]\n elif key == 'bins_offpower':\n bins_offpower = kwargs[key]\n elif key == 'bins_duration':\n bins_duration = kwargs[key]\n else:\n print(\"Non valid kwarg\")\n\n # Plot structure:\n fig = plt.figure()\n ax1 = fig.add_subplot(311)\n ax2 = fig.add_subplot(312)\n ax3 = fig.add_subplot(313)\n\n # Evaluating score for:\n # Onpower\n x = np.arange(bins_onpower.min(), bins_onpower.max() + \\\n np.diff(bins_onpower)[0], np.diff(bins_onpower)[0] / float(1000)).reshape(-1, 1)\n y = self.__pdf(self.onpower, x)\n norm = pd.cut(\n self.onpower_train.onpower, bins=bins_onpower).value_counts().max() / max(y)\n # Plots for Onpower\n ax1.hist(\n self.onpower_train.onpower.values, bins=bins_onpower, alpha=0.5)\n ax1.plot(x, y * norm)\n #ax1.set_title(\"Feature: Onpower\")\n #ax1.set_ylabel(\"Counts\")\n #ax1.set_xlabel(\"On power (W)\")\n ax1.set_ylabel(\"On power counts\")\n\n # Offpower\n x = np.arange(bins_offpower.min(), bins_offpower.max() + \\\n np.diff(bins_offpower)[0], np.diff(bins_offpower)[0] / float(1000)).reshape(-1, 1)\n y = self.__pdf(self.offpower, x)\n norm = pd.cut(self.offpower_train.offpower,\n bins=bins_offpower).value_counts().max() / max(y)\n # Plots for Offpower\n ax2.hist(self.offpower_train.offpower.values,\n bins=bins_offpower, alpha=0.5)\n ax2.plot(x, y * norm)\n #ax2.set_title(\"Feature: Offpower\")\n #ax2.set_ylabel(\"Counts\")\n #ax2.set_xlabel(\"Off power (W)\")\n ax2.set_ylabel(\"Off power counts\")\n\n # Duration\n x = np.arange(bins_duration.min(), bins_duration.max() + \\\n np.diff(bins_duration)[0], np.diff(bins_duration)[0] / float(1000)).reshape(-1, 1)\n y = self.__pdf(self.duration, x)\n norm = pd.cut(self.duration_train.duration,\n bins=bins_duration).value_counts().max() / max(y)\n # Plots for duration\n ax3.hist(self.duration_train.duration.values,\n bins=bins_duration, alpha=0.5)\n ax3.plot(x, y * norm)\n #ax3.set_title(\"Feature: Duration\")\n #ax3.set_ylabel(\"Counts\")\n #ax3.set_xlabel(\"Duration (seconds)\")\n ax3.set_ylabel(\"Duration counts\")",
"def plot_hist_snfit_meta(self): \n \n self.read_meta()\n self.read_snfit_results()\n\n \n self.diff_x0 = []\n self.diff_x0_err = []\n self.diff_x1 = []\n self.diff_x1_err = [] \n self.diff_c = []\n self.diff_c_err = [] \n self.diff_mb = []\n self.diff_mb_err = [] \n self.diff_cov_x0_x1 = []\n self.diff_cov_x0_c = []\n self.diff_cov_x1_c = []\n self.diff_cov_mb_x1 = []\n self.diff_cov_mb_c = []\n\n for i in range (len(self.sn_name)):\n for j in range (len(self.meta_sn_name_list)):\n if self.sn_name[i] == self.meta_sn_name_list[j]:\n if np.abs(self.mb[i] - self.meta_mb[j]) < 0.0001:\n self.diff_x0.append(self.x0[i] - self.meta_x0[j])\n self.diff_x0_err.append(self.x0_err[i] - self.meta_x0_err[j])\n self.diff_x1.append(self.x1[i] - self.meta_x1[j])\n self.diff_x1_err.append(self.x1_err[i] - self.meta_x1_err[j]) \n self.diff_c.append(self.c[i] - self.meta_c[j])\n self.diff_c_err.append(self.c_err[i] - self.meta_c_err[j]) \n self.diff_mb.append(self.mb[i] - self.meta_mb[j])\n self.diff_mb_err.append(self.mb_err[i] - self.meta_mb_err[j])\n# self.diff_cov_x0_x1.append()\n# self.diff_cov_x0_c.append()\n# self.diff_cov_x1_c.append()\n# self.diff_cov_mb_x1.append()\n# self.diff_cov_mb_c.append()\n else:\n print self.x1[i] - self.meta_x1[j], self.sn_name[i],self.meta_sn_name_list[j], self.x1[i], self.meta_x1[j]\n\n\n fig = plt.figure(figsize=(8.,8.)) \n \n gs = gridspec.GridSpec(2, 1) #subplots ratio\n f, (ax0_1, ax0_2) = plt.subplots(2, sharex=True)\n f.subplots_adjust(hspace = 0.5)\n ax0_1 = plt.subplot(gs[0, 0])\n ax0_2 = plt.subplot(gs[1, 0])\n \n ax0_1.hist(self.diff_x0,25,label='$\\Delta$ X0')\n ax0_2.hist(self.diff_x0_err,25,label='$\\Delta$ X0 error')\n ax0_1.legend()\n ax0_2.legend()\n ax0_1.set_ylabel('N')\n ax0_2.set_ylabel('N')\n pdffile = '../sugar_analysis_data/results/x0_plot_meta_snfit.pdf'\n plt.savefig(pdffile, bbox_inches='tight')\n plt.show()\n \n gs = gridspec.GridSpec(2, 1) #subplots ratio\n f, (ax0_1, ax0_2) = plt.subplots(2, sharex=True)\n f.subplots_adjust(hspace = 0.5)\n ax0_1 = plt.subplot(gs[0, 0])\n ax0_2 = plt.subplot(gs[1, 0])\n \n ax0_1.hist(self.diff_x1,25,label='$\\Delta$ X1')\n ax0_2.hist(self.diff_x1_err,25,label='$\\Delta$ X1 error')\n ax0_1.legend()\n ax0_2.legend()\n ax0_1.set_ylabel('N')\n ax0_2.set_ylabel('N')\n pdffile = '../sugar_analysis_data/results/x1_plot_meta_snfit.pdf'\n plt.savefig(pdffile, bbox_inches='tight')\n plt.show()\n \n gs = gridspec.GridSpec(2, 1) #subplots ratio\n f, (ax0_1, ax0_2) = plt.subplots(2, sharex=True)\n f.subplots_adjust(hspace = 0.5)\n ax0_1 = plt.subplot(gs[0, 0])\n ax0_2 = plt.subplot(gs[1, 0])\n \n ax0_1.hist(self.diff_c,25,label='$\\Delta$ Color')\n ax0_2.hist(self.diff_c_err,25,label='$\\Delta$ Color error')\n ax0_1.legend()\n ax0_2.legend()\n ax0_1.set_ylabel('N')\n ax0_2.set_ylabel('N')\n pdffile = '../sugar_analysis_data/results/color_plot_meta_snfit.pdf'\n plt.savefig(pdffile, bbox_inches='tight')\n plt.show()\n\n gs = gridspec.GridSpec(2, 1) #subplots ratio\n f, (ax0_1, ax0_2) = plt.subplots(2, sharex=True)\n f.subplots_adjust(hspace = 0.5)\n ax0_1 = plt.subplot(gs[0, 0])\n ax0_2 = plt.subplot(gs[1, 0])\n \n ax0_1.hist(self.diff_mb,50,label='$\\Delta$ mb')\n ax0_2.hist(self.diff_mb_err,50,label='$\\Delta$ mb error')\n ax0_1.legend()\n ax0_2.legend()\n ax0_1.set_ylabel('N')\n ax0_2.set_ylabel('N')\n pdffile = '../sugar_analysis_data/results/mb_plot_meta_snfit.pdf'\n plt.savefig(pdffile, bbox_inches='tight')\n plt.show()",
"def generate_plots():\n\n hmp = homemonitor_plot()\n hmp.load_data()\n hmp.plot_day()\n hmp.plot_hist()",
"def hist_save(self, d, bin1, name, no):\n\t\tfor i in range(0,no):\n\t\t\ts = d[:,i]\n\t\t\tplt.hist(s, bin1, normed=True, color='c')\t# Extracting the parameters from the histogram\n\t\t\tplt.title('Probability Distribution Fnction of %s' %name, fontsize=20)\n\t\t\tplt.xlabel(\"Filter tap values\", fontsize=20)\n\t\t\tplt.ylabel(\"Probability Distribution\", fontsize=20)\n#\t\t\tplt.xlim(0,0.10)\n\t\t\tplt.ylim(0,100)\n#\t\t\tplt.legend(fontsize = 'xx-large')\n\t\t\tplt.savefig('/home/abhishek/Results/comparison_all_sets/Curve fitting/test/set_1/hist_%s_index_%d' %(name,i))\n\t\t\tplt.close()",
"def plot_predictions_histogram(df):\n\n fig = go.Figure()\n fig.add_trace(go.Histogram(x=df[\"preds\"], name=\"preds\"))\n fig.add_trace(go.Histogram(x=df[\"truth\"], name=\"truth\"))\n\n # Overlay both histograms\n fig.update_layout(barmode=\"overlay\")\n # Reduce opacity to see both histograms\n fig.update_traces(opacity=0.5)\n fig.update_layout(xaxis_title=r\"HOMO-LUMO\", yaxis_title=r\"count.\")\n wandb.log({f\"Predictions Hist\": fig})",
"def plot_residuals(turnstile_weather, predictions):\n plt.figure()\n (turnstile_weather['ENTRIESn_hourly'] - predictions).hist()\n return plt",
"def plot_hist(self):\n labels = [self.get_class_str(action, obj)\n for (action, obj, subj, rec, beg, end) in self.action_clips]\n visualize.plot_hist(labels, proportion=True)",
"def plot_onemitexample_R2N_hist_paperfigure(eg_netseed,eg_mitnum,resultsdir='../results/odor_morphs'):\n fig = figure(figsize=(columnwidth,columnwidth/2.0),dpi=300,facecolor='w') # 'none' is transparent\n ax3 = fig.add_subplot(2,3,1)\n ax4 = fig.add_subplot(2,3,2)\n ax5 = fig.add_subplot(2,3,4)\n ax6 = fig.add_subplot(2,3,5)\n ax1 = fig.add_subplot(2,3,3)\n ax2 = fig.add_subplot(2,3,6)\n ## inh = (no_singles,no_joints,no_lat,no_PGs,varyRMP)\n inh_options = [ (0,(False,False,False,False,False),'lat inh') ]\n for ploti,(inhi,inh,inhstr) in enumerate(inh_options):\n R2Ns = []\n lin_R2Ns = []\n chilist = []\n n_accept = 0\n for stimi,stimseed in enumerate(stim_seeds):\n if not salient: net_seeds = [stimseed]\n for neti,netseed in enumerate(net_seeds):\n for ngi,num_gloms in enumerate([3]):\n\n filename, switch_strs \\\n = get_filename(netseed,stimseed,inh,num_gloms,stimi,neti,inhi,resultsdir=resultsdir)\n switches_str = string.join(switch_strs,'')\n ## if the result file for these seeds & tweaks doesn't exist,\n ## then carry on to the next.\n if not os.path.exists(filename): continue\n print filename\n for fitted_mitral in [0,1]:\n ## First the weighted-linear sigmoid:\n ## If the fitted params file does not exist, create it (them).\n if not os.path.exists(filename+'_params'+str(fitted_mitral)):\n print \"fitting file\",filename\n refit = True\n else: refit = False\n ## read in params & responses for this result file\n mit_fit_params = \\\n fit_om.fit_morphs(filename, fitted_mitral, 'arb', refit=refit)\n params,chisq,inputsA,inputsB,fitted_responses,\\\n numavgs,firingbinsmeanList,firingbinserrList = mit_fit_params\n S2N,S2R = forR2N.residual2noise(fitted_responses[-2],firingbinsmeanList[-2],\\\n firingbinserrList[-2]*sqrt(numavgs),starti=0) # odor A\n R2N_A = S2N/S2R\n if isnan(R2N_A): continue\n S2N,S2R = forR2N.residual2noise(fitted_responses[0],firingbinsmeanList[0],\\\n firingbinserrList[0]*sqrt(numavgs),starti=0) # odor B\n R2N_B = S2N/S2R\n if isnan(R2N_B): continue\n R2Ns.append(R2N_A)\n R2Ns.append(R2N_B)\n if netseed == eg_netseed and fitted_mitral == eg_mitnum:\n fit_om.plot_example_onemit(ax3,ax4,eg_mitnum,mit_fit_params)\n \n ## Linear-rectifier or Linear-sigmoid depending on FULLlin variable above.\n ## If the fitted params file does not exist, create it (them).\n if not os.path.exists(filename+'_params'+linextn+str(fitted_mitral)):\n print \"fitting FULLlin file\",filename\n refit = True\n else: refit = False\n ## fit/get the params and responses for this result file\n mit_fit_params = \\\n fit_om.fit_morphs(filename, fitted_mitral, 'lin', refit=refit)\n params,chisq,inputsA,inputsB,fitted_responses,\\\n numavgs,firingbinsmeanList,firingbinserrList = mit_fit_params\n S2N,S2R = forR2N.residual2noise(fitted_responses[-2],firingbinsmeanList[-2],\\\n firingbinserrList[-2]*sqrt(numavgs),starti=0) # odor A\n R2N_A = S2N/S2R\n if isnan(R2N_A): continue\n S2N,S2R = forR2N.residual2noise(fitted_responses[0],firingbinsmeanList[0],\\\n firingbinserrList[0]*sqrt(numavgs),starti=0) # odor B\n R2N_B = S2N/S2R\n if isnan(R2N_B): continue\n lin_R2Ns.append(R2N_A)\n lin_R2Ns.append(R2N_B)\n chilist.append(sqrt(chisq))\n if netseed == eg_netseed and fitted_mitral == eg_mitnum:\n fit_om.plot_example_onemit(ax5,ax6,eg_mitnum,mit_fit_params)\n\n n_accept += 1\n\n R2N_max = 1.0\n ax1.hist(clip(R2Ns,0,R2N_max),20,normed=True,edgecolor='b',facecolor='b')\n _,y1 = ax1.get_ylim()\n ax2.hist(clip(lin_R2Ns,0,R2N_max),20,normed=True,edgecolor='b',facecolor='b')\n #ax2.hist(clip(chilist,0,R2N_max),20,normed=True,edgecolor='b',facecolor='b')\n _,y2 = ax2.get_ylim()\n yR2Nmax = max(y1,y2)\n print \"Number of mitral cells accepted =\",n_accept\n \n ## beautify plots\n for axnum,ax in enumerate([ax1,ax2]):\n xmin,xmax,ymin,ymax = \\\n beautify_plot(ax,x0min=True,y0min=True,xticksposn='bottom',yticksposn='left')\n ax.set_xlim([0,R2N_max])\n ax.set_xticks([0,R2N_max])\n ax.set_ylim([0,yR2Nmax])\n ax.set_yticks([0,yR2Nmax])\n for ax in [ax1,ax3,ax4]:\n ax.set_xticklabels(['',''])\n ## axes_labels() sets sizes of tick labels too.\n axes_labels(ax1,'','prob. density',adjustpos=False,xpad=0,ypad=0)\n ax1.yaxis.set_label_coords(-0.29,-0.3)\n axes_labels(ax2,'$\\sqrt{residual/noise}$','',adjustpos=False,xpad=1,ypad=0)\n\n axes_labels(ax3,'','firing rate (Hz)',adjustpos=False,xpad=0,ypad=0)\n ax3.yaxis.set_label_coords(-0.29,-0.3)\n axes_labels(ax5,'time (s)','',adjustpos=False,xpad=3,ypad=0)\n\n axes_labels(ax4,'','fitted weight',adjustpos=False,xpad=0,ypad=0)\n ax4.yaxis.set_label_coords(-0.24,-0.3)\n axes_labels(ax6,'conc (% SV)','',adjustpos=False,xpad=3,ypad=0)\n\n fig_clip_off(fig)\n fig.tight_layout()\n fig.subplots_adjust(hspace=0.3,wspace=0.5) # has to be after tight_layout()\n fig.savefig('../figures/morphs_R2Ns.svg',dpi=fig.dpi)\n fig.savefig('../figures/morphs_R2Ns.png',dpi=fig.dpi)",
"def save_dataset_visual(lines, output_dir):\n\n # Ensure output directory exists\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n cols = 3\n rows = 3\n fig_size = (7 * cols, 4 * rows) # Figure width and height, in inches\n # Random sample of images\n save_name = \"training_data_sample.png\"\n fig, ax = plt.subplots(rows, cols, figsize=fig_size)\n for row in range(rows):\n for col in range(cols):\n idx = np.random.randint(0, len(lines))\n ax[row, col].imshow(ndimage.imread(lines[idx][0]))\n ax[row, col].set_title(\"Angle = \" + str(round(lines[idx][1], 3)))\n plt.savefig(output_dir + save_name, bbox_inches='tight')\n # Distribution of steering angles\n save_name = \"data_histogram.png\"\n fig_size = (5, 3) # Figure width and height, in inches\n num_bins = 100\n angles = np.array([line[1] for line in lines])\n hist, bins = np.histogram(angles, bins=num_bins)\n fig = plt.figure(figsize=fig_size)\n plt.bar(bins[:-1], hist)\n plt.xlabel(\"Steering Angle\")\n plt.ylabel(\"Frequency\")\n plt.title(\"Distribution of Steering Angles in Training Data\")\n plt.savefig(output_dir + save_name, bbox_inches='tight')",
"def featuresHist_colors(self, **kwargs):\n # Selecting bins automatically:\n bins_onpower = np.arange(self.onpower_train.min().values[0],\n self.onpower_train.max().values[0],\n (self.onpower_train.max().values[0] -\n self.onpower_train.min().values[0]) / 50)\n\n bins_offpower = np.arange(self.offpower_train.min().values[0],\n self.offpower_train.max().values[0],\n (self.offpower_train.max().values[0] -\n self.offpower_train.min().values[0]) / 50)\n\n bins_duration = np.arange(self.duration_train.min().values[0],\n self.duration_train.max().values[0],\n (self.duration_train.max().values[0] -\n self.duration_train.min().values[0]) / 50)\n\n # If a bin has been specified update the bin sizes.\n # Updating bins with specified values.\n for key in kwargs:\n if key == 'bins_onpower':\n bins_onpower = kwargs[key]\n elif key == 'bins_offpower':\n bins_offpower = kwargs[key]\n elif key == 'bins_duration':\n bins_duration = kwargs[key]\n else:\n print(\"Non valid kwarg\")\n\n # Plot:\n fig1 = plt.figure()\n ax1 = fig1.add_subplot(311)\n ax2 = fig1.add_subplot(312)\n ax3 = fig1.add_subplot(313)\n\n start = 0\n end = 0\n for ind in np.arange(len(self.stats)):\n\n if self.stats[ind]['Nevents'] != 0:\n if ind == 0:\n start = 0\n else:\n start = end\n end += self.stats[ind]['Nevents']\n ax1.hist(\n self.onpower_train[start:end].onpower.values, bins=bins_onpower, alpha=0.5)\n ax2.hist(\n self.offpower_train[start:end].offpower.values, bins=bins_offpower, alpha=0.5)\n ax3.hist(\n self.duration_train[start:end].duration.values, bins=bins_duration, alpha=0.5)\n\n ax1.set_title(\"Feature: Onpower\")\n ax1.set_xlabel(\"Watts\")\n ax1.set_ylabel(\"Counts\")\n\n ax2.set_title(\"Feature: Offpower\")\n ax2.set_xlabel(\"Watts\")\n ax2.set_ylabel(\"Counts\")\n\n ax3.set_title(\"Feature: Duration\")\n ax3.set_xlabel(\"Seconds\")\n ax3.set_ylabel(\"Counts\")",
"def sSFR_hist(**kwargs):\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n\n method = p.method\n fig,ax = plt.subplots(figsize = (8,4))\n xlim = [-4,0.5] \n\n # Plot 25 Mpc box? \n if p.select == '_25Mpc':\n GR = glo.global_results(sim_run='_25Mpc',nGal=240,grid_ext='_ext_ism_BPASS')\n M_star,SFR,Zsfr = getattr(GR,'M_star'),getattr(GR,'SFR'),getattr(GR,'Zsfr')\n sSFR = SFR/M_star\n ax.hist(np.log10(1e9*sSFR),bins=50,color='deepskyblue',alpha=1,label='Simba-25 galaxy sample',zorder=10)\n\n # Plot current sample\n GR = glo.global_results()\n M_star,SFR,Zsfr = getattr(GR,'M_star'),getattr(GR,'SFR'),getattr(GR,'Zsfr')\n sSFR = SFR/M_star\n ax.hist(np.log10(1e9*sSFR),bins=50,color='green',alpha=0.5,label='Simba-100 galaxy sample',zorder=10)\n\n # Plot all galaxies in simulation volume\n df_all = pd.read_pickle(p.d_data + 'galaxy_selection/z0_all_galaxies%s' % p.sim_runs[0])\n print('%i galaxies in Simba-%s' % (len(df_all),p.sim_runs[0]))\n df_all1 = df_all[(df_all['SFR_'+method] > 0) & (df_all['SFR_'+method] != 1)]\n df_all = pd.read_pickle(p.d_data + 'galaxy_selection/z0_all_galaxies%s' % p.sim_runs[1])\n print('%i galaxies in Simba-%s' % (len(df_all),p.sim_runs[1]))\n df_all2 = df_all[df_all['SFR_'+method] > 0]\n df_all = df_all1.append(df_all2, ignore_index=True)\n\n sSFR = df_all['SFR_'+method].values/df_all['M_star_'+method].values\n ax2 = ax.twinx()\n sSFR = sSFR[(df_all['SFR_'+method] > 0) & (df_all['M_star_'+method] > 0)].astype('float')\n #ax2.hist(np.log10(1e9*sSFR[sSFR > 10**xlim[0]]),bins=100,color='grey',alpha=0.5,label='All SF galaxies in Simba-25 and Simba-100',zorder=10)\n ax2.hist(np.log10(1e9*sSFR[sSFR > 10.**xlim[0]/1e9]),fill=False,bins=100,histtype='stepfilled',fc=None,ec='k',alpha=0.8,label='All SF galaxies in Simba-25 and Simba-100',zorder=10)\n\n ax.set_xlabel(r'log sSFR [Gyr$^{-1}$]')\n ax.set_ylabel('Number of selected galaxies')\n ax2.set_ylabel('Number of galaxies in Simba')\n ax.set_yscale('log')\n ax.set_xlim(xlim)\n ax.set_ylim([0.8,5e1])\n \n handles,labels = ax.get_legend_handles_labels()\n handles2,labels2 = ax2.get_legend_handles_labels()\n handles = np.append(handles,handles2)\n labels = np.append(labels,labels2)\n #ax.legend(fontsize=12)\n ax.legend(handles,labels,fontsize=11)\n plt.tight_layout()\n if p.savefig:\n if not os.path.isdir(p.d_plot + 'sim_data/'): os.mkdir(p.d_plot + 'sim_data/') \n plt.savefig('plots/sim_data/sSFR_%s_%s%s' % (method,p.sim_name,p.sim_run),dpi=250,facecolor='w')",
"def bench_plotter(self):\n\n # plot random as histogram, upper en lower bound as a red line\n minima = []\n for i in range(1, 4):\n cost_list = []\n with open(f\"../output_runs/text_info_random{i}_10k.txt\", \"r\") as f:\n text = f.read().split('\\n')\n counter = 0\n for number in text:\n counter += 1\n if number is not \"\":\n cost_list.append(int(number))\n if counter == 1000:\n break\n minim = min(cost_list)\n minima.append(minim)\n maxim = max(cost_list)\n print(\"random:\", minim, maxim)\n plt.axvline(x=53188, color='r')\n plt.axvline(x=103030, color=\"r\")\n plt.hist(cost_list, bins=20, alpha=0.5, label=f\"Random walk\")\n\n # plot histogram of priority and hillclimber\n cost_list = []\n with open(f\"../output_runs/text_info_prior_hill{i}_\\\n 1k.txt\", \"r\") as f:\n text = f.read().split('\\n')\n for number in text:\n if number is not \"\":\n cost_list.append(int(number))\n minim = min(cost_list)\n minima.append(minim)\n maxim = max(cost_list)\n print(\"prior hill:\", minim, maxim)\n plt.hist(cost_list, bins=20, alpha=0.5, label=f\"Priority + Hill\")\n\n # plot histogram of simulated annealing\n cost_list = []\n with open(f\"../output_runs/simulated_annealing{i}_1000.txt\",\n \"r\") as f:\n text = f.read().split('\\n')\n for number in text:\n if number is not \"\":\n cost_list.append(int(number))\n minim = min(cost_list)\n minima.append(minim)\n maxim = max(cost_list)\n print(\"random+anneal:\", minim, maxim)\n plt.hist(cost_list, bins=20, alpha=0.5,\n label=f\"Random + sim anneal\")\n\n # plot histogram of random plus hillclimber\n cost_list = []\n with open(f\"../output_runs/random_hill{i}_1000.txt\", \"r\") as f:\n text = f.read().split('\\n')\n for number in text:\n if number is not \"\":\n cost_list.append(int(number))\n minim = min(cost_list)\n minima.append(minim)\n maxim = max(cost_list)\n print(\"random+hill:\", minim, maxim)\n plt.hist(cost_list, bins=100, alpha=0.5,\n label=f\"Random + Hillclimber\")\n\n # plot histogram of kmeans plus hillclimber\n cost_list = []\n with open(f\"../output_runs/text_k-means_hill{i}_\\\n 1000.txt\", \"r\") as f:\n text = f.read().split('\\n')\n for number in text:\n if number is not \"\":\n cost_list.append(int(number))\n plt.hist(cost_list, bins=20, alpha=0.5,\n label=f\"Kmean and hill {i}\")\n totalmin = min(minima)\n plt.axvline(x=totalmin, color=\"g\")\n plt.title(f\"4 algorithms Wijk {i}, lowest cost: {totalmin}\")\n plt.xlabel(\"Cost\")\n plt.ylabel(\"Frequency\")\n plt.legend(loc='upper right')\n plt.show()",
"def test_traj () :\n samples = getAllTraj()\n states = []\n for t in samples : \n states.extend([toInternalStateRep(s) for s, _, _ in t])\n states = np.stack(states)\n xRange = np.linspace(-np.pi, np.pi, 100)\n yRange = np.linspace(-np.pi, np.pi, 100)\n plotHist(states, xRange, yRange, 'theta1', 'theta2', 'S Count')",
"def plot_hist(datasets, bins, labels, alphas):\n assert len(labels) == len(datasets)\n assert len(alphas) == len(datasets)\n plt.figure(figsize=[9,6])\n for idx, data in enumerate(datasets):\n plt.hist(data, bins=bins[idx], density=True, label=labels[idx], alpha=alphas[idx])\n plt.xlabel(\"PHQ score\")\n plt.ylabel(\"Probability\")\n plt.legend()\n plt.savefig(\"saved_plots/hist_\"+\"_\".join(labels)+\".png\")\n plt.show()",
"def plot_loss(self):\n #x = [k for k in range(self.rep)]\n loss = self.min_list[:,0]//100 #For clarity\n #plt.plot(x,self.min_list[:,0])\n plt.hist(loss,density=True)\n plt.xlabel(self.list_name + '_loss//100')\n plt.ylabel('Frequency')\n #plt.xticks(range(8),[0,250,500,750,1000,1250,1500,1750])\n plt.title('Distribution of '+self.list_name+'_loss ('+str(self.rep)+' iterations)')\n plt.savefig('img/stats/'+self.list_name+'_lossFrequency_'+self.model_name+'.png')\n plt.show()",
"def generate_history_plot(data, labels_dict, file_title, plot_title):\n fig = plt.figure()\n ax = sns.histplot(data)\n\n if labels_dict:\n ax.set_xlabel(labels_dict[\"x\"])\n if plot_title:\n ax.set_title(plot_title)\n\n plt.savefig(file_title)",
"def plot_and_save(data, prefix, name):\n plt.figure()\n plt.hist(data)\n plt.title(name)\n plt.xlabel(\"Value\")\n plt.ylabel(\"Frequency\")\n plt.savefig(prefix + name + '.png')\n plt.close()",
"def ex1_plots(instance, destination, prefix, save, animate):\n \n plts = ukf_plots(instance, destination, prefix, save, animate)\n\n truths = truth_parser(instance)\n nan_array= nan_array_parser(instance, truths, instance.base_model)\n #obs, obs_key = obs_parser(instance, True)\n obs_key = obs_key_parser(instance, True)\n preds = preds_parser(instance, True)\n #forecasts = forecasts_parser(instance, True)\n \n ukf_params = instance.ukf_params\n index2 = ukf_params[\"index2\"]\n \n \"remove agents not in model to avoid wierd plots\"\n #obs *= nan_array\n truths *= nan_array\n preds *= nan_array\n #forecasts*= nan_array\n \n \"indices for unobserved agents\"\n not_index2 = np.array([i for i in np.arange(truths.shape[1]) if i not in index2])\n plts.pair_frame(truths, preds, obs_key, 10, destination)\n plts.error_hist(truths[::instance.sample_rate,index2], \n preds[::instance.sample_rate,index2],\"Observed Errors\")\n if len(not_index2)>0:\n plts.error_hist(truths[::instance.sample_rate, not_index2], \n preds[::instance.sample_rate, not_index2],\"Unobserved Errors\")\n \n #plts.path_plots(obs[::instance.sample_rate] , \"Observed\")\n plts.path_plots(preds[::instance.sample_rate], \"Predicted\")\n plts.path_plots(truths, \"True\")\n #plts.path_plots(forecasts[::instance.sample_rate], \"Forecasts\")\n\n if animate:\n #plts.trajectories(truths, \"plots/\")\n plts.pair_frames(truths, preds, obs_key,\n truths.shape[0], \"../../plots/\")",
"def _plot(self, step, rewards, losses):\n plt.figure(figsize=(20, 5))\n plt.subplot(131)\n plt.title('Total Episode Reward')\n plt.plot(rewards)\n plt.subplot(132)\n plt.title('MSE Loss')\n plt.plot(losses)\n plt.show()",
"def analyze_hrv(self, save_plot=False, save_csv=False):\n\n df = self.df_epoch.copy()\n\n plt.subplots(1, 2, figsize=(self.fig_width, self.fig_height))\n plt.title(\"HRV with interpretation (Shaffer & Ginsberg, 2017)\")\n\n plt.subplot(1, 2, 1)\n h = plt.hist(df[\"RR_SD\"].dropna(), bins=np.arange(0, 250, 10),\n weights=100*np.ones(len(df[\"RR_SD\"].dropna())) / len(df[\"RR_SD\"].dropna()),\n edgecolor='black', color='grey', alpha=.5, cumulative=False)\n plt.ylabel(\"% of epochs\")\n plt.xlabel(\"RR SD (ms)\")\n plt.title(\"All data\")\n\n # Shaffer & Ginsberg, 2017 interpretation\n plt.fill_betweenx(x1=0, x2=50, y=[0, plt.ylim()[1]], color='red', alpha=.5,\n label=\"Unhealthy ({}%)\".format(round(sum(h[0][0:5]), 1)))\n plt.fill_betweenx(x1=50, x2=100, y=[0, plt.ylim()[1]], color='orange', alpha=.5,\n label=\"Compromised ({}%)\".format(round(sum(h[0][5:10]), 1)))\n plt.fill_betweenx(x1=100, x2=250, y=[0, plt.ylim()[1]], color='green', alpha=.5,\n label=\"Healthy ({}%)\".format(round(sum(h[0][10:]), 1)))\n plt.legend()\n\n df = self.df_epoch.dropna()\n df = df.loc[df[\"HR_Intensity\"] == 0]\n\n plt.subplot(1, 2, 2)\n h = plt.hist(df[\"RR_SD\"].dropna(), bins=np.arange(0, 250, 10),\n weights=100*np.ones(len(df[\"RR_SD\"].dropna())) / len(df[\"RR_SD\"].dropna()),\n edgecolor='black', color='grey', alpha=.5, cumulative=False)\n plt.xlabel(\"RR SD (ms)\")\n plt.title(\"Sedentary only\")\n\n # Shaffer & Ginsberg, 2017 interpretation\n plt.fill_betweenx(x1=0, x2=50, y=[0, plt.ylim()[1]], color='red', alpha=.5,\n label=\"Unhealthy ({}%)\".format(round(sum(h[0][0:5]), 1)))\n plt.fill_betweenx(x1=50, x2=100, y=[0, plt.ylim()[1]], color='orange', alpha=.5,\n label=\"Compromised ({}%)\".format(round(sum(h[0][5:10]), 1)))\n plt.fill_betweenx(x1=100, x2=250, y=[0, plt.ylim()[1]], color='green', alpha=.5,\n label=\"Healthy ({}%)\".format(round(sum(h[0][10:]), 1)))\n plt.legend()\n\n if save_plot:\n print(\"Saving plot as HRV_Histogram.png\")\n plt.savefig(\"HRV_Histogram.png\")\n\n if save_csv:\n print(\"Data saved as HRV_FrequencyData.csv\")\n bins = [i for i in h[1][:-1]]\n freqs = [i for i in h[0]]\n data = pd.DataFrame(list(zip(bins, freqs)), columns=[\"Bin\", \"Frequency_%\"])\n data.to_csv(\"HRV_FrequencyData.csv\", index=False)",
"def counts_histogram(predicted=False):\n log.info(\"loading the results from the joint fit to predict the counts\")\n results = load_yaml(\n f\"{config.repo_path}/results/fit/gammapy/joint/fit_results_logparabola.yaml\"\n )\n parameters = results[\"parameters\"]\n\n model_lp = LogParabola.from_log10(\n amplitude=parameters[0][\"value\"] * u.Unit(parameters[0][\"unit\"]),\n reference=parameters[1][\"value\"] * u.Unit(parameters[1][\"unit\"]),\n alpha=parameters[2][\"value\"] * u.Unit(parameters[2][\"unit\"]),\n beta=parameters[3][\"value\"] * u.Unit(parameters[3][\"unit\"]),\n )\n\n # defining the figure\n dict_color = {\n \"fermi\": COLORS[0],\n \"magic\": COLORS[1],\n \"veritas\": COLORS[2],\n \"fact\": COLORS[3],\n \"hess\": COLORS[4],\n }\n fig, ax = plt.subplots()\n\n for which in config.all_datasets:\n log.info(f\"predicting counts for {which} dataset\")\n dataset = config.get_dataset(which)\n obs = dataset.get_SpectrumObservationList().stack()\n cts_pred = CountsPredictor(\n model=model_lp, aeff=obs.aeff, edisp=obs.edisp, livetime=obs.livetime\n )\n cts_pred.run()\n\n e_max = dataset.energy_range[1].to(\"TeV\").value\n e_min = dataset.energy_range[0].to(\"TeV\").value\n\n kwargs_mdl = dict(ls=\":\", range=(e_min, e_max), lw=2.2, color=dict_color[which])\n kwargs_data = dict(\n ls=\"-\", range=(e_min, e_max), lw=2.2, color=dict_color[which]\n )\n\n # CountsSpectrum with observed and predicted excesses\n ex_pred = cts_pred.npred\n ex_obs = obs.excess_vector\n # if it is an IACT rebin the counts before plotting\n if which != \"fermi\":\n ex_pred = ex_pred.rebin(2)\n ex_obs = ex_obs.rebin(2)\n\n if predicted: # if you want to display the predicted counts\n ex_pred.plot_hist(ax, **kwargs_mdl)\n ex_obs.plot_hist(ax, **kwargs_data)\n\n # custom legend\n legend_observed = mlines.Line2D(\n [], [], color=\"gray\", marker=\"\", ls=\"-\", lw=2, label=\"observed\"\n )\n legend_expected = mlines.Line2D(\n [], [], color=\"gray\", marker=\"\", ls=\":\", lw=2, label=\"expected\"\n )\n legend_fermi = mlines.Line2D(\n [], [], color=COLORS[0], marker=\"\", ls=\"-\", lw=2, label=\"Fermi-LAT\"\n )\n legend_magic = mlines.Line2D(\n [], [], color=COLORS[1], marker=\"\", ls=\"-\", lw=2, label=\"MAGIC\"\n )\n legend_veritas = mlines.Line2D(\n [], [], color=COLORS[2], marker=\"\", ls=\"-\", lw=2, label=\"VERITAS\"\n )\n legend_fact = mlines.Line2D(\n [], [], color=COLORS[3], marker=\"\", ls=\"-\", lw=2, label=\"FACT\"\n )\n legend_hess = mlines.Line2D(\n [], [], color=COLORS[4], marker=\"\", ls=\"-\", lw=2, label=\"H.E.S.S.\"\n )\n legend_handles = [\n legend_fermi,\n legend_magic,\n legend_veritas,\n legend_fact,\n legend_hess,\n ]\n if predicted: # if you want to display the predicted counts\n legend_handles = [legend_observed, legend_expected] + legend_handles\n\n ax.legend(handles=legend_handles, fontsize=FONTSIZE)\n\n ax.set_xscale(\"log\")\n ax.set_ylabel(\"Excess counts\", size=FONTSIZE)\n ax.set_xlabel(E_UNIT_LABEL, size=FONTSIZE)\n\n # make axis thicker\n for axis in [\"top\", \"bottom\", \"left\", \"right\"]:\n ax.spines[axis].set_linewidth(1.6)\n ax.tick_params(\"both\", length=7, width=1.6, which=\"major\", labelsize=FONTSIZE)\n ax.tick_params(\"both\", length=4, width=1.6, which=\"minor\", labelsize=FONTSIZE)\n\n plt.tight_layout()\n\n filename = f\"{config.repo_path}/results/figures/counts_spectra.png\"\n filename_pdf = f\"{config.repo_path}/results/figures/counts_spectra.pdf\"\n log.info(f\"saving figure in {filename}\")\n fig.savefig(filename)\n fig.savefig(filename_pdf)",
"def plot_heldout_prediction(input_val,\n y_val,\n mu_val,\n sigma_val,\n fname=None,\n n=1,\n title=\"\"):\n fig = figure.Figure(figsize=(9, 3 * n))\n canvas = backend_agg.FigureCanvasAgg(fig)\n for i in range(n):\n ax = fig.add_subplot(n, i + 1, 1)\n ax.plot(input_val, y_val, label='True data')\n ax.plot(input_val, mu_val, label='Predictive mean')\n lower = mu_val - 1.96 * sigma_val\n upper = mu_val + 1.96 * sigma_val\n ax.fill_between(\n input_val, lower, upper, label='95% confidence interval')\n\n plt.legend()\n fig.suptitle(title)\n fig.tight_layout()\n\n if fname is not None:\n canvas.print_figure(fname, format=\"png\")\n print(\"saved {}\".format(fname))",
"def visualize(self):\r\n self.aggregator.plot_loss()\r\n self.save_figure()",
"def getSFsFromPostPreFitPlots( plots , plotDir , saveDir , bins = [] , keys = [ 'Fakes', 'WJets', 'TTJets' , 'stack' ] , name = \"PostPre\", hist_colors=h_colors, dOpt=\"hist text\") :\n plot_values = degTools.dict_function( plots, lambda x: degTools.getTH1FbinContent( x , get_errors=True) ) #if type( x ) in [ ROOT.TH1F , ROOT.TH2F] else None ) \n #SFs = {'WJets':{}, 'TTJets':{} , 'Fakes':{} , 'stack':{}}\n SFs = {k:{} for k in keys}\n SF_hists = degTools.deepcopy(SFs)\n canv_sfs = ROOT.TCanvas( \"SFs\", \"SFs\", 1000,800 )\n dOpt_ = \"%s\"%dOpt\n ROOT.gStyle.SetPaintTextFormat(\"0.2f\")\n hsh = degTools.uniqueHash()\n for bkg in keys:\n SFs[bkg] = degTools.dict_manipulator( [ plot_values[f][bkg] for f in ['fit_b', 'prefit' ] ] , lambda a,b: a/b.val if b.val else u_float(1.0) )\n SF_hists[bkg] = degTools.makeHistoFromDict( SFs[bkg], bin_order = bins, name = \"TF_%s_%s\"%(bkg,hsh))\n SF_hists[bkg].GetXaxis().SetLabelSize(0.05)\n SF_hists[bkg].SetLineColor( hist_colors[bkg] )\n SF_hists[bkg].SetMarkerColor( hist_colors[bkg] )\n SF_hists[bkg].SetMarkerColor( hist_colors[bkg] )\n SF_hists[bkg].Draw(dOpt_)\n SF_hists[bkg].SetMinimum(0.65)\n SF_hists[bkg].SetMaximum(1.7)\n dOpt_='same %s'%dOpt\n #output_name = name.replace(\".pkl\", \"_SFs.pkl\")\n name = name if name.endswith(\".pkl\") else \"%s.pkl\"%name\n degTools.pickle.dump( SFs, file('%s/%s'%(saveDir, name) , 'w') )\n degTools.saveCanvas( canv_sfs, plotDir, name.replace(\".pkl\",\"\") )\n return SFs, SF_hists",
"def plot_hist_snfit_sncosmo(self):\n \n self.read_sncosmo(path='../sugar_analysis_data/results/res_salt2_SNF_'+str(self.width)+'.txt')\n self.read_snfit_results(snfit_res_path='../sugar_analysis_data/results/results_snfit_'+str(self.width)+'.txt')\n\n# self.read_sncosmo(path='../sugar_analysis_data/results/res_salt2_SNF_GF.txt')\n# self.read_snfit_results(snfit_res_path='../sugar_analysis_data/results/results_snfit_GF.txt')\n\n self.diff_x0_sncosmo = []\n self.diff_x0_err_sncosmo = []\n self.diff_x1_sncosmo = []\n self.diff_x1_err_sncosmo = [] \n self.diff_c_sncosmo = []\n self.diff_c_err_sncosmo = [] \n self.diff_mb_sncosmo = []\n self.diff_mb_err_sncosmo = [] \n self.diff_cov_x0_x1_sncosmo = []\n self.diff_cov_x0_c_sncosmo = []\n self.diff_cov_x1_c_sncosmo = []\n self.diff_cov_mb_x1_sncosmo = []\n self.diff_cov_mb_c_sncosmo = []\n self.diff_chi2 = []\n for i in range (len(self.sn_name)):\n for j in range (len(self.sncosmo_sn_name)):\n if self.sn_name[i] == self.sncosmo_sn_name[j]:\n if np.abs(self.x1[i] - self.sncosmo_x1[j]) < 0.02:\n self.diff_x0_sncosmo.append(self.x0[i] - self.sncosmo_x0[j])\n self.diff_x0_err_sncosmo.append(self.x0_err[i] - self.sncosmo_x0_err[j])\n self.diff_x1_sncosmo.append(self.x1[i] - self.sncosmo_x1[j])\n self.diff_x1_err_sncosmo.append(self.x1_err[i] - self.sncosmo_x1_err[j]) \n self.diff_c_sncosmo.append(self.c[i] - self.sncosmo_c[j])\n self.diff_c_err_sncosmo.append(self.c_err[i] - self.sncosmo_c_err[j]) \n self.diff_mb_sncosmo.append(self.mb[i] - self.sncosmo_mb[j])\n self.diff_mb_err_sncosmo.append(self.mb_err[i] - self.sncosmo_mb_err[j])\n self.diff_chi2.append(self.snfit_chi2[i] - self.sncosmo_chi2[j])\n# self.diff_cov_x0_x1_sncosmo.append()\n# self.diff_cov_x0_c_sncosmo.append()\n# self.diff_cov_x1_c_sncosmo.append()\n# self.diff_cov_mb_x1_sncosmo.append()\n# self.diff_cov_mb_c_sncosmo.append()\n else:\n print self.x1[i] - self.sncosmo_x1[j], self.sn_name[i],self.sncosmo_sn_name[j], self.x1[i], self.sncosmo_x1[j]\n\n# rcParams['font.size'] = 16.\n# font = {'family': 'normal', 'size': 16}\n# rc('axes', linewidth=1.5)\n# rc(\"text\", usetex=True)\n# rc('font', family='serif')\n# rc('font', serif='Times')\n# rc('legend', fontsize=25)\n# rc('xtick.major', size=5, width=1.5)\n# rc('ytick.major', size=5, width=1.5)\n# rc('xtick.minor', size=3, width=1)\n# rc('ytick.minor', size=3, width=1)\n# fig = plt.figure(figsize=(8.,8.)) \n# \n gs = gridspec.GridSpec(2, 1) #subplots ratio\n f, (ax0_1, ax0_2) = plt.subplots(2, sharex=True)\n ax0_1 = plt.subplot(gs[0, 0])\n ax0_2 = plt.subplot(gs[1, 0])\n \n ax0_1.hist(self.diff_x0_sncosmo,50,label='$\\Delta$ x0_'+str(self.width))\n ax0_2.hist(self.diff_x0_err_sncosmo,50,label='$\\Delta$ x0 error')\n ax0_1.legend()\n ax0_2.legend()\n ax0_1.set_ylabel('N')\n ax0_2.set_ylabel('N')\n# ax0_1.ticklabel_format(axis='x', style='scientific', scilimits=(-1, 2))\n pdffile = '../sugar_analysis_data/results/x0_plot_'+str(self.width)+'.pdf'\n plt.savefig(pdffile, bbox_inches='tight')\n plt.show()\n \n gs = gridspec.GridSpec(2, 1) #subplots ratio\n f, (ax0_1, ax0_2) = plt.subplots(2, sharex=True)\n ax0_1 = plt.subplot(gs[0, 0])\n ax0_2 = plt.subplot(gs[1, 0])\n \n ax0_1.hist(self.diff_x1_sncosmo,50,label='$\\Delta$ X1_'+str(self.width))\n ax0_2.hist(self.diff_x1_err_sncosmo,50,label='$\\Delta$ X1 error')\n ax0_1.legend()\n ax0_2.legend()\n ax0_1.set_ylabel('N')\n ax0_2.set_ylabel('N')\n plt.ticklabel_format(axis='x', style='scientific', scilimits=(-1, 2))\n pdffile = '../sugar_analysis_data/results/x1_plot_'+str(self.width)+'.pdf'\n plt.savefig(pdffile, bbox_inches='tight')\n plt.show()\n \n gs = gridspec.GridSpec(2, 1) #subplots ratio\n f, (ax0_1, ax0_2) = plt.subplots(2, sharex=True)\n ax0_1 = plt.subplot(gs[0, 0])\n ax0_2 = plt.subplot(gs[1, 0])\n \n ax0_1.hist(self.diff_c_sncosmo,50,label='$\\Delta$ Color_'+str(self.width))\n ax0_2.hist(self.diff_c_err_sncosmo,50,label='$\\Delta$ Color error')\n ax0_1.legend()\n ax0_2.legend()\n ax0_1.set_ylabel('N')\n ax0_2.set_ylabel('N')\n plt.ticklabel_format(axis='x', style='scientific', scilimits=(-1, 2))\n pdffile = '../sugar_analysis_data/results/color_plot_'+str(self.width)+'.pdf'\n plt.savefig(pdffile, bbox_inches='tight')\n plt.show()\n\n gs = gridspec.GridSpec(2, 1) #subplots ratio\n f, (ax0_1, ax0_2) = plt.subplots(2, sharex=True)\n ax0_1 = plt.subplot(gs[0, 0])\n ax0_2 = plt.subplot(gs[1, 0])\n \n ax0_1.hist(self.diff_mb_sncosmo,50,label='$\\Delta$ mb_'+str(self.width))\n ax0_2.hist(self.diff_mb_err_sncosmo,50,label='$\\Delta$ mb error')\n ax0_1.legend()\n ax0_2.legend()\n ax0_1.set_ylabel('N')\n ax0_2.set_ylabel('N')\n plt.ticklabel_format(axis='x', style='scientific', scilimits=(-1, 2))\n pdffile = '../sugar_analysis_data/results/mb_plot_'+str(self.width)+'.pdf'\n plt.savefig(pdffile, bbox_inches='tight')\n plt.show()\n\n plt.hist(self.diff_chi2,50,label='$\\Delta$ chi2_'+str(self.width))\n pdffile = '../sugar_analysis_data/results/chi2_'+str(self.width)+'.pdf'\n plt.legend()\n plt.savefig(pdffile, bbox_inches='tight')\n plt.show()",
"def save_figs(self):\n if not self._fitted:\n self.fit()\n #self._message(\"Saving plots...\")\n # 1. Generate the required PNG plots\n # 1.1 Truncation plots\n for i,s in enumerate(self.samples):\n fig,ax=plt.subplots(1,2,figsize=(8,4))\n cyct=np.arange(self.nvalues)\n cycf=np.arange(self._cutoffidx[i])\n cycd=0.5*(cyct[1:]+cyct[:-1])\n ax[0].plot(cyct,self.samplesdata[:,i],'k.-',linewidth=0.5,label=\"Full series\")\n ax[0].plot(cycf,self.samplesdata[:self._cutoffidx[i],i],'r-',linewidth=1,label=\"Truncated\")\n ax[0].set_xlim([0,self.nvalues-1])\n ax[0].set_ylim([0,self.samplesdata.max()*1.1])\n ax[0].set_xlabel(\"Cycle\")\n ax[0].set_ylabel(\"Fluorescence (a.u.)\")\n ax[0].set_title(\"Detected fluorescence\")\n plt.legend(loc='upper left',frameon=False)\n # First derivative\n ax[1].plot(cycd,self.samplesdatadiff[:,i],'k.-',linewidth=0.5)\n ax[1].axvline(self._cutoffidx[i],color='r')\n ax[1].set_xlim([0,self.nvalues-1])\n ax[1].set_ylim([self.samplesdatadiff.min()*1.1,self.samplesdatadiff.max()*1.1])\n ax[1].set_xlabel(\"Cycle\")\n ax[1].set_ylabel(\"dF/dCycle (a.u.)\")\n ax[1].set_title(\"Fluorescence rate\")\n plt.tight_layout()\n fn=get_valid_fname(self.samples[i])\n figname=\"%s_%s_%s.svg\"%(self.ID,\"01truncation\",fn)\n self.info['samples'][s]['Data truncation for fitting']=figname\n plt.savefig('%s/%s'%(self.info['resultsdir'],figname))\n plt.close() \n # 1.2 Fitting plots\n for i,s in enumerate(self.samples):\n fig,ax=plt.subplots(1,3,figsize=(12,4))\n cyct=np.arange(self.nvalues)\n cycf=np.arange(self._cutoffidx[i])\n ax[0].plot(cyct,self.samplesdata[:,i],'k:',linewidth=0.5,label=\"Full series\")\n ax[0].plot(cycf,self.samplesdata[:self._cutoffidx[i],i],'r.-',linewidth=0.5,label=\"Truncated\")\n #ax[0].plot(cycf,self.mak3fpre[s],'y-',linewidth=1,label=\"prefit\")\n ax[0].plot(cycf,self.mak3fluorescence[s],'g-',linewidth=1,label=\"MAK3 fit\")\n ax[0].axvline(self._cutoffidx[i],color='k')\n ax[0].set_xlim([0,self.nvalues-1])\n ax[0].set_ylim([0,self.samplesdata.max()*1.1])\n ax[0].set_xlabel(\"Cycle\")\n ax[0].set_ylabel(\"Fluorescence (a.u.)\")\n ax[0].set_title(\"Detected fluorescence\")\n ax[0].legend(loc='upper left',frameon=False)\n # DNA levels\n ax[1].plot(cycf,self.mak3concentration[s],'g-',linewidth=1,label=\"MAK3\")\n ax[1].axvline(self._cutoffidx[i],color='k')\n ax[1].set_xlim([0,self.nvalues-1])\n ax[1].set_ylim([0,self.mak3concentration[s].max()*1.1])\n ax[1].set_xlabel(\"Cycle\")\n ax[1].set_ylabel(\"concentration (a.u.)\")\n ax[1].set_title(\"estimated cDNA levels\")\n # Efficiency\n ax[2].plot(cycf,self.mak3efficiency[s],'b-',linewidth=1,label=\"MAK3\")\n ax[2].axvline(self._cutoffidx[i],color='k')\n ax[2].set_xlim([0,self.nvalues-1])\n ax[2].set_ylim([0,1.1])\n ax[2].set_xlabel(\"Cycle\")\n ax[2].set_ylabel(\"Efficiency\")\n ax[2].set_title(\"Amplification efficiency\") \n plt.tight_layout()\n fn=get_valid_fname(self.samples[i])\n figname=\"%s_%s_%s.svg\"%(self.ID,\"02mak3\",fn)\n self.info['samples'][s]['MAK3 Fitting']=figname\n plt.savefig('%s/%s'%(self.info['resultsdir'],figname))\n plt.close()\n # 2 Initial concentrations\n figwdth=np.maximum(5,0.4*self.nsamples+1)\n fig,ax=plt.subplots(1,1,figsize=(figwdth,7))\n v=list(self.initialConcentration.values())\n k=list(self.initialConcentration.keys())\n ax.bar(0.75+np.arange(self.nsamples),v,facecolor='k',width=0.5)\n ax.set_xticks(1+np.arange(self.nsamples))\n ax.set_xticklabels(k,rotation=90)\n ax.set_xlim([0,self.nsamples+1])\n plt.tight_layout()\n figname=\"%s_%s_.svg\"%(self.ID,\"00initialConcentration\")\n self.info['figname_initialConcentration']=figname\n plt.savefig('%s/%s'%(self.info['resultsdir'],figname))\n plt.close()\n # 3 Fitting Error\n fig,ax=plt.subplots(1,1,figsize=(figwdth,7))\n v=list(self.fitting_error.values())\n k=list(self.fitting_error.keys())\n ax.bar(0.75+np.arange(self.nsamples),v,facecolor='k',width=0.5)\n ax.set_xticks(1+np.arange(self.nsamples))\n ax.set_xticklabels(k,rotation=90)\n ax.set_xlim([0,self.nsamples+1])\n ax.set_ylim([0,1e-2])\n plt.tight_layout()\n figname=\"%s_%s_.svg\"%(self.ID,\"00fittingError\")\n self.info['figname_fittingError']=figname\n plt.savefig('%s/%s'%(self.info['resultsdir'],figname))\n # 4 kinetic constant\n fig,ax=plt.subplots(1,1,figsize=(figwdth,7))\n v=list(self.k.values())\n k=list(self.k.keys())\n ax.bar(0.75+np.arange(self.nsamples),v,facecolor='k',width=0.5)\n ax.set_xticks(1+np.arange(self.nsamples))\n ax.set_xticklabels(k,rotation=90)\n ax.set_xlim([0,self.nsamples+1])\n plt.tight_layout()\n figname=\"%s_%s_.svg\"%(self.ID,\"00kineticConstant\")\n self.info['figname_k']=figname\n plt.savefig('%s/%s'%(self.info['resultsdir'],figname))\n # 5 background fluorescence\n fig,ax=plt.subplots(1,1,figsize=(figwdth,7))\n v=list(self.Fb.values())\n k=list(self.Fb.keys())\n ax.bar(0.75+np.arange(self.nsamples),v,facecolor='k',width=0.5)\n ax.set_xticks(1+np.arange(self.nsamples))\n ax.set_xticklabels(k,rotation=90)\n ax.set_xlim([0,self.nsamples+1])\n plt.tight_layout()\n figname=\"%s_%s_.svg\"%(self.ID,\"00bkgFluorescence\")\n self.info['figname_Fb']=figname\n plt.savefig('%s/%s'%(self.info['resultsdir'],figname))\n # 6 slope\n fig,ax=plt.subplots(1,1,figsize=(figwdth,7))\n v=list(self.slope.values())\n k=list(self.slope.keys())\n ax.bar(0.75+np.arange(self.nsamples),v,facecolor='k',width=0.5)\n ax.set_xticks(1+np.arange(self.nsamples))\n ax.set_xticklabels(k,rotation=90)\n ax.set_xlim([0,self.nsamples+1])\n ax.set_ylim([0,0.025])\n plt.tight_layout()\n figname=\"%s_%s_.svg\"%(self.ID,\"00fluorescenceSlope\")\n self.info['figname_slope']=figname\n plt.savefig('%s/%s'%(self.info['resultsdir'],figname))",
"def plot_histogram(bins, data, title, saving_path, hist_name):\n\n x = np.asarray(data)\n plt.figure()\n plt.hist(x[np.isfinite(x)], bins)\n plt.title(title)\n if not os.path.exists(saving_path):\n os.mkdir(saving_path)\n plt.savefig(saving_path + hist_name)",
"def parameters_histograms(w, dw, a, da, b, db):\n w = w.cpu()\n dw = dw.cpu()\n a = a.cpu()\n da = da.cpu()\n b = b.cpu()\n db = db.cpu()\n \n fig = plt.figure(figsize=(10,6))\n ax = fig.add_subplot(231)\n ax.hist(w.reshape(1, w.shape[0] * w.shape[1]))\n ax.set_title('Weights', fontsize = 11)\n ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0))\n ax = fig.add_subplot(232)\n ax.hist(dw.reshape(1, dw.shape[0] * dw.shape[1]))\n ax.set_title('Weights variations', fontsize = 11)\n ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0))\n ax = fig.add_subplot(233)\n ax.hist(a)\n ax.set_title('Visible bias', fontsize = 11)\n ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0))\n ax = fig.add_subplot(234)\n ax.hist(da)\n ax.set_title('Visible bias variations', fontsize = 11)\n ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0))\n ax = fig.add_subplot(235)\n ax.hist(b)\n ax.set_title('Hidden bias', fontsize = 11)\n ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0))\n ax = fig.add_subplot(236)\n ax.hist(db)\n ax.set_title('Hidden bias variations', fontsize = 11)\n ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0))\n plt.subplots_adjust(hspace=0.25)\n plt.show()\n plt.close('all')",
"def plotResultsNoNoise(inputfile, title, bins=10):\n path = datetime.datetime.now().isoformat()\n os.mkdir(path)\n path += '/'\n\n results = cPickle.load(open(inputfile))\n #copy input to the path\n try:\n shutil.copy2(inputfile, path+inputfile)\n except:\n pass\n\n print '\\n\\n\\n\\nFitted centre:'\n\n e = results['eclean'] - results['eCTI']\n e1 = results['e1clean'] - results['e1CTI']\n e2 = results['e2clean'] - results['e2CTI']\n x = results['xclean'] - results['xCTI']\n y = results['yclean'] - results['yCTI']\n r2 = (results['R2clean'] - results['R2CTI']) / results['R2clean']\n meane = np.mean(e)\n meane1 = np.mean(e1)\n meane2 = np.mean(e2)\n meanx = np.mean(x)\n meany = np.mean(y)\n meanr2 = np.mean(r2)\n\n print 'Delta e, e_1, e_2:', meane, meane1, meane2\n #print 'std e, e_1, e_2:', np.std(e), np.std(e1), np.std(e2)\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_title(title)\n ax.hist(e, bins=bins, color='b', label='$e$', alpha=0.5)\n ax.hist(e1, bins=bins, color='r', label='$e_{1}$', alpha=0.5)\n ax.hist(e2, bins=bins, color='g', label='$e_{2}$', alpha=0.5)\n ax.axvline(x=meane, color='b', label='%.2e' % meane)\n ax.axvline(x=meane1, color='r', label='%.2e' % meane1)\n ax.axvline(x=meane2, color='g', label='%.2e' % meane2)\n ax.set_xlabel(r'$\\delta e$ [w/o - w/ CTI]')\n plt.legend(shadow=True, fancybox=True)\n plt.savefig(path+'ellipticityDeltaFittedCentre.pdf')\n plt.close()\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_title(title)\n ax.scatter(e1, e2, s=8, color='r', marker='o', alpha=0.5, label='w/o - w/ CTI')\n ax.set_xlabel(r'$\\delta e_{1}$')\n ax.set_ylabel(r'$\\delta e_{2}$')\n plt.legend(shadow=True, fancybox=True)\n plt.savefig(path+'ellipticityFittedCentre.pdf')\n plt.close()\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_title(title)\n ax.scatter(results['e1clean'], results['e2clean'], s=8, color='k', marker='s', alpha=0.1, label='no CTI')\n ax.scatter(results['e1CTI'], results['e2CTI'], s=8, color='r', marker='o', alpha=0.4, label='CTI')\n ax.set_xlim(-1, 1)\n ax.set_ylim(-1, 1)\n ax.set_xlabel(r'$e_{1}$')\n ax.set_ylabel(r'$e_{2}$')\n plt.legend(shadow=True, fancybox=True)\n plt.savefig(path+'e1vse2FittedCentre.pdf')\n plt.close()\n\n print 'delta R2 / R2: mean, std ', meanr2, np.std(r2)\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_title(title)\n ax.hist(r2, bins=bins, color='b', label='$R^{2}$')\n ax.axvline(x=meanr2,color='b', label='%.2e' % meanr2)\n ax.set_xlabel(r'$\\frac{\\delta R^{2}}{R^{2}_{ref}}$ [w/o - w CTI]')\n plt.legend(shadow=True, fancybox=True)\n plt.savefig(path+'sizeDeltaFittedCentre.pdf')\n plt.close()\n\n print 'delta x: mean, std ', meanx, np.std(x)\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_title(title)\n ax.hist(x, bins=bins, color='b', label='X Centre')\n ax.axvline(x=meanx,color='b', label='%.2e' % meanx)\n ax.set_xlabel(r'$\\delta X - X_{CTI}$ [w/o - w CTI]')\n plt.legend(shadow=True, fancybox=True)\n plt.savefig(path+'xDeltaFittedCentre.pdf')\n plt.close()\n\n print 'delta y: mean, std ', meany, np.std(y)\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_title(title)\n ax.hist(y, bins=bins, color='b', label='Y Centre')\n ax.axvline(x=meany,color='b', label='%.2e' % meany)\n ax.set_xlabel(r'$\\delta Y - Y_{CTI}$ [w/o - w CTI]')\n plt.legend(shadow=True, fancybox=True)\n plt.savefig(path+'yDeltaFittedCentre.pdf')\n plt.close()\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_title(title)\n ax.scatter(x, y, s=15, color='k', marker='s', alpha=0.5, label='w/o - w/ CTI')\n ax.set_xlabel(r'$\\delta X$')\n ax.set_ylabel(r'$\\delta Y$')\n plt.legend(shadow=True, fancybox=True, scatterpoints=1)\n plt.savefig(path+'coordinatesFittedCentre.pdf')\n plt.close()\n\n print '\\n\\n\\n\\nFixed centre:'\n\n e = results['eclean'] - results['eCTIfixed']\n e1 = results['e1clean'] - results['e1CTIfixed']\n e2 = results['e2clean'] - results['e2CTIfixed']\n x = results['xclean'] - results['xCTIfixed']\n y = results['yclean'] - results['yCTIfixed']\n r2 = (results['R2clean'] - results['R2CTIfixed']) / results['R2clean']\n meane = np.mean(e)\n meane1 = np.mean(e1)\n meane2 = np.mean(e2)\n meanx = np.mean(x)\n meany = np.mean(y)\n meanr2 = np.mean(r2)\n\n print 'Delta e, e_1, e_2:', meane, meane1, meane2\n #print 'std e, e_1, e_2:', np.std(e), np.std(e1), np.std(e2)\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_title(title)\n ax.hist(e, bins=bins, color='b', label='$e$', alpha=0.5)\n ax.hist(e1, bins=bins, color='r', label='$e_{1}$', alpha=0.5)\n ax.hist(e2, bins=bins, color='g', label='$e_{2}$', alpha=0.5)\n ax.axvline(x=meane, color='b', label='%.2e' % meane)\n ax.axvline(x=meane1, color='r', label='%.2e' % meane1)\n ax.axvline(x=meane2, color='g', label='%.2e' % meane2)\n ax.set_xlabel(r'$\\delta e$ [w/o - w/ CTI]')\n plt.legend(shadow=True, fancybox=True)\n plt.savefig(path+'ellipticityDeltaFixedCentre.pdf')\n plt.close()\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_title(title)\n ax.scatter(e1, e2, s=8, color='r', marker='o', alpha=0.5, label='w/o - w/ CTI')\n ax.set_xlabel(r'$\\delta e_{1}$')\n ax.set_ylabel(r'$\\delta e_{2}$')\n plt.legend(shadow=True, fancybox=True)\n plt.savefig(path+'ellipticityFixedCentre.pdf')\n plt.close()\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_title(title)\n ax.scatter(results['e1clean'], results['e2clean'], s=8, color='k', marker='s', alpha=0.1, label='no CTI')\n ax.scatter(results['e1CTIfixed'], results['e2CTIfixed'], s=8, color='r', marker='o', alpha=0.4, label='CTI')\n ax.set_xlabel(r'$e_{1}$')\n ax.set_ylabel(r'$e_{2}$')\n ax.set_xlim(-1, 1)\n ax.set_ylim(-1, 1)\n plt.legend(shadow=True, fancybox=True)\n plt.savefig(path+'e1vse2FixedCentre.pdf')\n plt.close()\n\n print 'delta R2 / R2: mean, std ', meanr2, np.std(r2)\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_title(title)\n ax.hist(r2, bins=bins, color='b', label='$R^{2}$')\n ax.axvline(x=meanr2, color='b', label='%.2e' % meanr2)\n ax.set_xlabel(r'$\\frac{\\delta R^{2}}{R^{2}_{ref}}$ [w/o - w CTI]')\n plt.legend(shadow=True, fancybox=True)\n plt.savefig(path+'sizeDeltaFixedCentre.pdf')\n plt.close()\n\n print 'delta x: mean, std ', meanx, np.std(x)\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_title(title)\n ax.hist(x, bins=bins, color='b', label='X Centre')\n ax.axvline(x=meanx, color='b', label='%.2e' % meanx)\n ax.set_xlabel(r'$X - X_{CTI}$')\n plt.legend(shadow=True, fancybox=True)\n plt.savefig(path+'xDeltaFixedCentre.pdf')\n plt.close()\n\n print 'delta y: mean, std ', meany, np.std(y)\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_title(title)\n ax.hist(y, bins=bins, color='b', label='Y Centre')\n ax.axvline(x=meany, color='b', label='%.2e' % meany)\n ax.set_xlabel(r'$Y - Y_{CTI}$')\n plt.legend(shadow=True, fancybox=True)\n plt.savefig(path+'yDeltaFixedCentre.pdf')\n plt.close()\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_title(title)\n ax.scatter(x, y, s=15, color='k', marker='s', alpha=0.5, label='w/o - w/ CTI')\n ax.set_xlabel(r'$\\delta X$')\n ax.set_ylabel(r'$\\delta Y$')\n plt.legend(shadow=True, fancybox=True, scatterpoints=1)\n plt.savefig(path+'coordinatesFixedCentre.pdf')\n plt.close()"
] | [
"0.76754117",
"0.64281666",
"0.6419849",
"0.63231015",
"0.6262315",
"0.62221205",
"0.6151655",
"0.6125331",
"0.6123557",
"0.61125845",
"0.6079311",
"0.60636973",
"0.6062547",
"0.6060509",
"0.6034784",
"0.6033427",
"0.60138935",
"0.6013693",
"0.6010012",
"0.59823984",
"0.5972633",
"0.5950683",
"0.5926307",
"0.591979",
"0.5912986",
"0.5908566",
"0.59066075",
"0.58881277",
"0.5873746",
"0.5869676"
] | 0.7740116 | 0 |
Plot and save histograms from predicted steerings and real steerings. Arguments | def make_and_save_histogramsY(pred_steerings, real_steerings,
img_name = "histogramsY.png"):
pred_steerings = np.array(pred_steerings)
real_steerings = np.array(real_steerings)
max_h = np.maximum(np.max(pred_steerings), np.max(real_steerings))
min_h = np.minimum(np.min(pred_steerings), np.min(real_steerings))
bins = np.linspace(min_h, max_h, num=50)
plt.hist(pred_steerings, bins=bins, alpha=0.5, label='Predicted', color='b')
plt.hist(real_steerings, bins=bins, alpha=0.5, label='Real', color='r')
#plt.title('Steering angle')
plt.legend(fontsize=10)
plt.savefig(img_name, bbox_inches='tight') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def make_and_save_histogramsX(pred_steerings, real_steerings,\n img_name = \"histogramsX.png\"):\n pred_steerings = np.array(pred_steerings)\n real_steerings = np.array(real_steerings)\n max_h = np.maximum(np.max(pred_steerings), np.max(real_steerings))\n min_h = np.minimum(np.min(pred_steerings), np.min(real_steerings))\n bins = np.linspace(min_h, max_h, num=50)\n plt.hist(pred_steerings, bins=bins, alpha=0.5, label='Predicted', color='b')\n plt.hist(real_steerings, bins=bins, alpha=0.5, label='Real', color='r')\n #plt.title('Steering angle')\n plt.legend(fontsize=10)\n plt.savefig(img_name, bbox_inches='tight')",
"def featuresHist(self, **kwargs):\n\n # Selecting bins automatically:\n bins_onpower = np.arange(self.onpower_train.min().values[0],\n self.onpower_train.max().values[0],\n (self.onpower_train.max().values[0] -\n self.onpower_train.min().values[0]) / 50)\n\n bins_offpower = np.arange(self.offpower_train.min().values[0],\n self.offpower_train.max().values[0],\n (self.offpower_train.max().values[0] -\n self.offpower_train.min().values[0]) / 50)\n\n bins_duration = np.arange(self.duration_train.min().values[0],\n self.duration_train.max().values[0],\n (self.duration_train.max().values[0] -\n self.duration_train.min().values[0]) / 50)\n\n # If a bin has been specified update the bin sizes.\n for key in kwargs:\n if key == 'bins_onpower':\n bins_onpower = kwargs[key]\n elif key == 'bins_offpower':\n bins_offpower = kwargs[key]\n elif key == 'bins_duration':\n bins_duration = kwargs[key]\n else:\n print(\"Non valid kwarg\")\n\n # Plot structure:\n fig = plt.figure()\n ax1 = fig.add_subplot(311)\n ax2 = fig.add_subplot(312)\n ax3 = fig.add_subplot(313)\n\n # Evaluating score for:\n # Onpower\n x = np.arange(bins_onpower.min(), bins_onpower.max() + \\\n np.diff(bins_onpower)[0], np.diff(bins_onpower)[0] / float(1000)).reshape(-1, 1)\n y = self.__pdf(self.onpower, x)\n norm = pd.cut(\n self.onpower_train.onpower, bins=bins_onpower).value_counts().max() / max(y)\n # Plots for Onpower\n ax1.hist(\n self.onpower_train.onpower.values, bins=bins_onpower, alpha=0.5)\n ax1.plot(x, y * norm)\n #ax1.set_title(\"Feature: Onpower\")\n #ax1.set_ylabel(\"Counts\")\n #ax1.set_xlabel(\"On power (W)\")\n ax1.set_ylabel(\"On power counts\")\n\n # Offpower\n x = np.arange(bins_offpower.min(), bins_offpower.max() + \\\n np.diff(bins_offpower)[0], np.diff(bins_offpower)[0] / float(1000)).reshape(-1, 1)\n y = self.__pdf(self.offpower, x)\n norm = pd.cut(self.offpower_train.offpower,\n bins=bins_offpower).value_counts().max() / max(y)\n # Plots for Offpower\n ax2.hist(self.offpower_train.offpower.values,\n bins=bins_offpower, alpha=0.5)\n ax2.plot(x, y * norm)\n #ax2.set_title(\"Feature: Offpower\")\n #ax2.set_ylabel(\"Counts\")\n #ax2.set_xlabel(\"Off power (W)\")\n ax2.set_ylabel(\"Off power counts\")\n\n # Duration\n x = np.arange(bins_duration.min(), bins_duration.max() + \\\n np.diff(bins_duration)[0], np.diff(bins_duration)[0] / float(1000)).reshape(-1, 1)\n y = self.__pdf(self.duration, x)\n norm = pd.cut(self.duration_train.duration,\n bins=bins_duration).value_counts().max() / max(y)\n # Plots for duration\n ax3.hist(self.duration_train.duration.values,\n bins=bins_duration, alpha=0.5)\n ax3.plot(x, y * norm)\n #ax3.set_title(\"Feature: Duration\")\n #ax3.set_ylabel(\"Counts\")\n #ax3.set_xlabel(\"Duration (seconds)\")\n ax3.set_ylabel(\"Duration counts\")",
"def plot_hist_snfit_meta(self): \n \n self.read_meta()\n self.read_snfit_results()\n\n \n self.diff_x0 = []\n self.diff_x0_err = []\n self.diff_x1 = []\n self.diff_x1_err = [] \n self.diff_c = []\n self.diff_c_err = [] \n self.diff_mb = []\n self.diff_mb_err = [] \n self.diff_cov_x0_x1 = []\n self.diff_cov_x0_c = []\n self.diff_cov_x1_c = []\n self.diff_cov_mb_x1 = []\n self.diff_cov_mb_c = []\n\n for i in range (len(self.sn_name)):\n for j in range (len(self.meta_sn_name_list)):\n if self.sn_name[i] == self.meta_sn_name_list[j]:\n if np.abs(self.mb[i] - self.meta_mb[j]) < 0.0001:\n self.diff_x0.append(self.x0[i] - self.meta_x0[j])\n self.diff_x0_err.append(self.x0_err[i] - self.meta_x0_err[j])\n self.diff_x1.append(self.x1[i] - self.meta_x1[j])\n self.diff_x1_err.append(self.x1_err[i] - self.meta_x1_err[j]) \n self.diff_c.append(self.c[i] - self.meta_c[j])\n self.diff_c_err.append(self.c_err[i] - self.meta_c_err[j]) \n self.diff_mb.append(self.mb[i] - self.meta_mb[j])\n self.diff_mb_err.append(self.mb_err[i] - self.meta_mb_err[j])\n# self.diff_cov_x0_x1.append()\n# self.diff_cov_x0_c.append()\n# self.diff_cov_x1_c.append()\n# self.diff_cov_mb_x1.append()\n# self.diff_cov_mb_c.append()\n else:\n print self.x1[i] - self.meta_x1[j], self.sn_name[i],self.meta_sn_name_list[j], self.x1[i], self.meta_x1[j]\n\n\n fig = plt.figure(figsize=(8.,8.)) \n \n gs = gridspec.GridSpec(2, 1) #subplots ratio\n f, (ax0_1, ax0_2) = plt.subplots(2, sharex=True)\n f.subplots_adjust(hspace = 0.5)\n ax0_1 = plt.subplot(gs[0, 0])\n ax0_2 = plt.subplot(gs[1, 0])\n \n ax0_1.hist(self.diff_x0,25,label='$\\Delta$ X0')\n ax0_2.hist(self.diff_x0_err,25,label='$\\Delta$ X0 error')\n ax0_1.legend()\n ax0_2.legend()\n ax0_1.set_ylabel('N')\n ax0_2.set_ylabel('N')\n pdffile = '../sugar_analysis_data/results/x0_plot_meta_snfit.pdf'\n plt.savefig(pdffile, bbox_inches='tight')\n plt.show()\n \n gs = gridspec.GridSpec(2, 1) #subplots ratio\n f, (ax0_1, ax0_2) = plt.subplots(2, sharex=True)\n f.subplots_adjust(hspace = 0.5)\n ax0_1 = plt.subplot(gs[0, 0])\n ax0_2 = plt.subplot(gs[1, 0])\n \n ax0_1.hist(self.diff_x1,25,label='$\\Delta$ X1')\n ax0_2.hist(self.diff_x1_err,25,label='$\\Delta$ X1 error')\n ax0_1.legend()\n ax0_2.legend()\n ax0_1.set_ylabel('N')\n ax0_2.set_ylabel('N')\n pdffile = '../sugar_analysis_data/results/x1_plot_meta_snfit.pdf'\n plt.savefig(pdffile, bbox_inches='tight')\n plt.show()\n \n gs = gridspec.GridSpec(2, 1) #subplots ratio\n f, (ax0_1, ax0_2) = plt.subplots(2, sharex=True)\n f.subplots_adjust(hspace = 0.5)\n ax0_1 = plt.subplot(gs[0, 0])\n ax0_2 = plt.subplot(gs[1, 0])\n \n ax0_1.hist(self.diff_c,25,label='$\\Delta$ Color')\n ax0_2.hist(self.diff_c_err,25,label='$\\Delta$ Color error')\n ax0_1.legend()\n ax0_2.legend()\n ax0_1.set_ylabel('N')\n ax0_2.set_ylabel('N')\n pdffile = '../sugar_analysis_data/results/color_plot_meta_snfit.pdf'\n plt.savefig(pdffile, bbox_inches='tight')\n plt.show()\n\n gs = gridspec.GridSpec(2, 1) #subplots ratio\n f, (ax0_1, ax0_2) = plt.subplots(2, sharex=True)\n f.subplots_adjust(hspace = 0.5)\n ax0_1 = plt.subplot(gs[0, 0])\n ax0_2 = plt.subplot(gs[1, 0])\n \n ax0_1.hist(self.diff_mb,50,label='$\\Delta$ mb')\n ax0_2.hist(self.diff_mb_err,50,label='$\\Delta$ mb error')\n ax0_1.legend()\n ax0_2.legend()\n ax0_1.set_ylabel('N')\n ax0_2.set_ylabel('N')\n pdffile = '../sugar_analysis_data/results/mb_plot_meta_snfit.pdf'\n plt.savefig(pdffile, bbox_inches='tight')\n plt.show()",
"def generate_plots():\n\n hmp = homemonitor_plot()\n hmp.load_data()\n hmp.plot_day()\n hmp.plot_hist()",
"def hist_save(self, d, bin1, name, no):\n\t\tfor i in range(0,no):\n\t\t\ts = d[:,i]\n\t\t\tplt.hist(s, bin1, normed=True, color='c')\t# Extracting the parameters from the histogram\n\t\t\tplt.title('Probability Distribution Fnction of %s' %name, fontsize=20)\n\t\t\tplt.xlabel(\"Filter tap values\", fontsize=20)\n\t\t\tplt.ylabel(\"Probability Distribution\", fontsize=20)\n#\t\t\tplt.xlim(0,0.10)\n\t\t\tplt.ylim(0,100)\n#\t\t\tplt.legend(fontsize = 'xx-large')\n\t\t\tplt.savefig('/home/abhishek/Results/comparison_all_sets/Curve fitting/test/set_1/hist_%s_index_%d' %(name,i))\n\t\t\tplt.close()",
"def plot_predictions_histogram(df):\n\n fig = go.Figure()\n fig.add_trace(go.Histogram(x=df[\"preds\"], name=\"preds\"))\n fig.add_trace(go.Histogram(x=df[\"truth\"], name=\"truth\"))\n\n # Overlay both histograms\n fig.update_layout(barmode=\"overlay\")\n # Reduce opacity to see both histograms\n fig.update_traces(opacity=0.5)\n fig.update_layout(xaxis_title=r\"HOMO-LUMO\", yaxis_title=r\"count.\")\n wandb.log({f\"Predictions Hist\": fig})",
"def plot_residuals(turnstile_weather, predictions):\n plt.figure()\n (turnstile_weather['ENTRIESn_hourly'] - predictions).hist()\n return plt",
"def plot_hist(self):\n labels = [self.get_class_str(action, obj)\n for (action, obj, subj, rec, beg, end) in self.action_clips]\n visualize.plot_hist(labels, proportion=True)",
"def plot_onemitexample_R2N_hist_paperfigure(eg_netseed,eg_mitnum,resultsdir='../results/odor_morphs'):\n fig = figure(figsize=(columnwidth,columnwidth/2.0),dpi=300,facecolor='w') # 'none' is transparent\n ax3 = fig.add_subplot(2,3,1)\n ax4 = fig.add_subplot(2,3,2)\n ax5 = fig.add_subplot(2,3,4)\n ax6 = fig.add_subplot(2,3,5)\n ax1 = fig.add_subplot(2,3,3)\n ax2 = fig.add_subplot(2,3,6)\n ## inh = (no_singles,no_joints,no_lat,no_PGs,varyRMP)\n inh_options = [ (0,(False,False,False,False,False),'lat inh') ]\n for ploti,(inhi,inh,inhstr) in enumerate(inh_options):\n R2Ns = []\n lin_R2Ns = []\n chilist = []\n n_accept = 0\n for stimi,stimseed in enumerate(stim_seeds):\n if not salient: net_seeds = [stimseed]\n for neti,netseed in enumerate(net_seeds):\n for ngi,num_gloms in enumerate([3]):\n\n filename, switch_strs \\\n = get_filename(netseed,stimseed,inh,num_gloms,stimi,neti,inhi,resultsdir=resultsdir)\n switches_str = string.join(switch_strs,'')\n ## if the result file for these seeds & tweaks doesn't exist,\n ## then carry on to the next.\n if not os.path.exists(filename): continue\n print filename\n for fitted_mitral in [0,1]:\n ## First the weighted-linear sigmoid:\n ## If the fitted params file does not exist, create it (them).\n if not os.path.exists(filename+'_params'+str(fitted_mitral)):\n print \"fitting file\",filename\n refit = True\n else: refit = False\n ## read in params & responses for this result file\n mit_fit_params = \\\n fit_om.fit_morphs(filename, fitted_mitral, 'arb', refit=refit)\n params,chisq,inputsA,inputsB,fitted_responses,\\\n numavgs,firingbinsmeanList,firingbinserrList = mit_fit_params\n S2N,S2R = forR2N.residual2noise(fitted_responses[-2],firingbinsmeanList[-2],\\\n firingbinserrList[-2]*sqrt(numavgs),starti=0) # odor A\n R2N_A = S2N/S2R\n if isnan(R2N_A): continue\n S2N,S2R = forR2N.residual2noise(fitted_responses[0],firingbinsmeanList[0],\\\n firingbinserrList[0]*sqrt(numavgs),starti=0) # odor B\n R2N_B = S2N/S2R\n if isnan(R2N_B): continue\n R2Ns.append(R2N_A)\n R2Ns.append(R2N_B)\n if netseed == eg_netseed and fitted_mitral == eg_mitnum:\n fit_om.plot_example_onemit(ax3,ax4,eg_mitnum,mit_fit_params)\n \n ## Linear-rectifier or Linear-sigmoid depending on FULLlin variable above.\n ## If the fitted params file does not exist, create it (them).\n if not os.path.exists(filename+'_params'+linextn+str(fitted_mitral)):\n print \"fitting FULLlin file\",filename\n refit = True\n else: refit = False\n ## fit/get the params and responses for this result file\n mit_fit_params = \\\n fit_om.fit_morphs(filename, fitted_mitral, 'lin', refit=refit)\n params,chisq,inputsA,inputsB,fitted_responses,\\\n numavgs,firingbinsmeanList,firingbinserrList = mit_fit_params\n S2N,S2R = forR2N.residual2noise(fitted_responses[-2],firingbinsmeanList[-2],\\\n firingbinserrList[-2]*sqrt(numavgs),starti=0) # odor A\n R2N_A = S2N/S2R\n if isnan(R2N_A): continue\n S2N,S2R = forR2N.residual2noise(fitted_responses[0],firingbinsmeanList[0],\\\n firingbinserrList[0]*sqrt(numavgs),starti=0) # odor B\n R2N_B = S2N/S2R\n if isnan(R2N_B): continue\n lin_R2Ns.append(R2N_A)\n lin_R2Ns.append(R2N_B)\n chilist.append(sqrt(chisq))\n if netseed == eg_netseed and fitted_mitral == eg_mitnum:\n fit_om.plot_example_onemit(ax5,ax6,eg_mitnum,mit_fit_params)\n\n n_accept += 1\n\n R2N_max = 1.0\n ax1.hist(clip(R2Ns,0,R2N_max),20,normed=True,edgecolor='b',facecolor='b')\n _,y1 = ax1.get_ylim()\n ax2.hist(clip(lin_R2Ns,0,R2N_max),20,normed=True,edgecolor='b',facecolor='b')\n #ax2.hist(clip(chilist,0,R2N_max),20,normed=True,edgecolor='b',facecolor='b')\n _,y2 = ax2.get_ylim()\n yR2Nmax = max(y1,y2)\n print \"Number of mitral cells accepted =\",n_accept\n \n ## beautify plots\n for axnum,ax in enumerate([ax1,ax2]):\n xmin,xmax,ymin,ymax = \\\n beautify_plot(ax,x0min=True,y0min=True,xticksposn='bottom',yticksposn='left')\n ax.set_xlim([0,R2N_max])\n ax.set_xticks([0,R2N_max])\n ax.set_ylim([0,yR2Nmax])\n ax.set_yticks([0,yR2Nmax])\n for ax in [ax1,ax3,ax4]:\n ax.set_xticklabels(['',''])\n ## axes_labels() sets sizes of tick labels too.\n axes_labels(ax1,'','prob. density',adjustpos=False,xpad=0,ypad=0)\n ax1.yaxis.set_label_coords(-0.29,-0.3)\n axes_labels(ax2,'$\\sqrt{residual/noise}$','',adjustpos=False,xpad=1,ypad=0)\n\n axes_labels(ax3,'','firing rate (Hz)',adjustpos=False,xpad=0,ypad=0)\n ax3.yaxis.set_label_coords(-0.29,-0.3)\n axes_labels(ax5,'time (s)','',adjustpos=False,xpad=3,ypad=0)\n\n axes_labels(ax4,'','fitted weight',adjustpos=False,xpad=0,ypad=0)\n ax4.yaxis.set_label_coords(-0.24,-0.3)\n axes_labels(ax6,'conc (% SV)','',adjustpos=False,xpad=3,ypad=0)\n\n fig_clip_off(fig)\n fig.tight_layout()\n fig.subplots_adjust(hspace=0.3,wspace=0.5) # has to be after tight_layout()\n fig.savefig('../figures/morphs_R2Ns.svg',dpi=fig.dpi)\n fig.savefig('../figures/morphs_R2Ns.png',dpi=fig.dpi)",
"def save_dataset_visual(lines, output_dir):\n\n # Ensure output directory exists\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n cols = 3\n rows = 3\n fig_size = (7 * cols, 4 * rows) # Figure width and height, in inches\n # Random sample of images\n save_name = \"training_data_sample.png\"\n fig, ax = plt.subplots(rows, cols, figsize=fig_size)\n for row in range(rows):\n for col in range(cols):\n idx = np.random.randint(0, len(lines))\n ax[row, col].imshow(ndimage.imread(lines[idx][0]))\n ax[row, col].set_title(\"Angle = \" + str(round(lines[idx][1], 3)))\n plt.savefig(output_dir + save_name, bbox_inches='tight')\n # Distribution of steering angles\n save_name = \"data_histogram.png\"\n fig_size = (5, 3) # Figure width and height, in inches\n num_bins = 100\n angles = np.array([line[1] for line in lines])\n hist, bins = np.histogram(angles, bins=num_bins)\n fig = plt.figure(figsize=fig_size)\n plt.bar(bins[:-1], hist)\n plt.xlabel(\"Steering Angle\")\n plt.ylabel(\"Frequency\")\n plt.title(\"Distribution of Steering Angles in Training Data\")\n plt.savefig(output_dir + save_name, bbox_inches='tight')",
"def featuresHist_colors(self, **kwargs):\n # Selecting bins automatically:\n bins_onpower = np.arange(self.onpower_train.min().values[0],\n self.onpower_train.max().values[0],\n (self.onpower_train.max().values[0] -\n self.onpower_train.min().values[0]) / 50)\n\n bins_offpower = np.arange(self.offpower_train.min().values[0],\n self.offpower_train.max().values[0],\n (self.offpower_train.max().values[0] -\n self.offpower_train.min().values[0]) / 50)\n\n bins_duration = np.arange(self.duration_train.min().values[0],\n self.duration_train.max().values[0],\n (self.duration_train.max().values[0] -\n self.duration_train.min().values[0]) / 50)\n\n # If a bin has been specified update the bin sizes.\n # Updating bins with specified values.\n for key in kwargs:\n if key == 'bins_onpower':\n bins_onpower = kwargs[key]\n elif key == 'bins_offpower':\n bins_offpower = kwargs[key]\n elif key == 'bins_duration':\n bins_duration = kwargs[key]\n else:\n print(\"Non valid kwarg\")\n\n # Plot:\n fig1 = plt.figure()\n ax1 = fig1.add_subplot(311)\n ax2 = fig1.add_subplot(312)\n ax3 = fig1.add_subplot(313)\n\n start = 0\n end = 0\n for ind in np.arange(len(self.stats)):\n\n if self.stats[ind]['Nevents'] != 0:\n if ind == 0:\n start = 0\n else:\n start = end\n end += self.stats[ind]['Nevents']\n ax1.hist(\n self.onpower_train[start:end].onpower.values, bins=bins_onpower, alpha=0.5)\n ax2.hist(\n self.offpower_train[start:end].offpower.values, bins=bins_offpower, alpha=0.5)\n ax3.hist(\n self.duration_train[start:end].duration.values, bins=bins_duration, alpha=0.5)\n\n ax1.set_title(\"Feature: Onpower\")\n ax1.set_xlabel(\"Watts\")\n ax1.set_ylabel(\"Counts\")\n\n ax2.set_title(\"Feature: Offpower\")\n ax2.set_xlabel(\"Watts\")\n ax2.set_ylabel(\"Counts\")\n\n ax3.set_title(\"Feature: Duration\")\n ax3.set_xlabel(\"Seconds\")\n ax3.set_ylabel(\"Counts\")",
"def sSFR_hist(**kwargs):\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n\n method = p.method\n fig,ax = plt.subplots(figsize = (8,4))\n xlim = [-4,0.5] \n\n # Plot 25 Mpc box? \n if p.select == '_25Mpc':\n GR = glo.global_results(sim_run='_25Mpc',nGal=240,grid_ext='_ext_ism_BPASS')\n M_star,SFR,Zsfr = getattr(GR,'M_star'),getattr(GR,'SFR'),getattr(GR,'Zsfr')\n sSFR = SFR/M_star\n ax.hist(np.log10(1e9*sSFR),bins=50,color='deepskyblue',alpha=1,label='Simba-25 galaxy sample',zorder=10)\n\n # Plot current sample\n GR = glo.global_results()\n M_star,SFR,Zsfr = getattr(GR,'M_star'),getattr(GR,'SFR'),getattr(GR,'Zsfr')\n sSFR = SFR/M_star\n ax.hist(np.log10(1e9*sSFR),bins=50,color='green',alpha=0.5,label='Simba-100 galaxy sample',zorder=10)\n\n # Plot all galaxies in simulation volume\n df_all = pd.read_pickle(p.d_data + 'galaxy_selection/z0_all_galaxies%s' % p.sim_runs[0])\n print('%i galaxies in Simba-%s' % (len(df_all),p.sim_runs[0]))\n df_all1 = df_all[(df_all['SFR_'+method] > 0) & (df_all['SFR_'+method] != 1)]\n df_all = pd.read_pickle(p.d_data + 'galaxy_selection/z0_all_galaxies%s' % p.sim_runs[1])\n print('%i galaxies in Simba-%s' % (len(df_all),p.sim_runs[1]))\n df_all2 = df_all[df_all['SFR_'+method] > 0]\n df_all = df_all1.append(df_all2, ignore_index=True)\n\n sSFR = df_all['SFR_'+method].values/df_all['M_star_'+method].values\n ax2 = ax.twinx()\n sSFR = sSFR[(df_all['SFR_'+method] > 0) & (df_all['M_star_'+method] > 0)].astype('float')\n #ax2.hist(np.log10(1e9*sSFR[sSFR > 10**xlim[0]]),bins=100,color='grey',alpha=0.5,label='All SF galaxies in Simba-25 and Simba-100',zorder=10)\n ax2.hist(np.log10(1e9*sSFR[sSFR > 10.**xlim[0]/1e9]),fill=False,bins=100,histtype='stepfilled',fc=None,ec='k',alpha=0.8,label='All SF galaxies in Simba-25 and Simba-100',zorder=10)\n\n ax.set_xlabel(r'log sSFR [Gyr$^{-1}$]')\n ax.set_ylabel('Number of selected galaxies')\n ax2.set_ylabel('Number of galaxies in Simba')\n ax.set_yscale('log')\n ax.set_xlim(xlim)\n ax.set_ylim([0.8,5e1])\n \n handles,labels = ax.get_legend_handles_labels()\n handles2,labels2 = ax2.get_legend_handles_labels()\n handles = np.append(handles,handles2)\n labels = np.append(labels,labels2)\n #ax.legend(fontsize=12)\n ax.legend(handles,labels,fontsize=11)\n plt.tight_layout()\n if p.savefig:\n if not os.path.isdir(p.d_plot + 'sim_data/'): os.mkdir(p.d_plot + 'sim_data/') \n plt.savefig('plots/sim_data/sSFR_%s_%s%s' % (method,p.sim_name,p.sim_run),dpi=250,facecolor='w')",
"def bench_plotter(self):\n\n # plot random as histogram, upper en lower bound as a red line\n minima = []\n for i in range(1, 4):\n cost_list = []\n with open(f\"../output_runs/text_info_random{i}_10k.txt\", \"r\") as f:\n text = f.read().split('\\n')\n counter = 0\n for number in text:\n counter += 1\n if number is not \"\":\n cost_list.append(int(number))\n if counter == 1000:\n break\n minim = min(cost_list)\n minima.append(minim)\n maxim = max(cost_list)\n print(\"random:\", minim, maxim)\n plt.axvline(x=53188, color='r')\n plt.axvline(x=103030, color=\"r\")\n plt.hist(cost_list, bins=20, alpha=0.5, label=f\"Random walk\")\n\n # plot histogram of priority and hillclimber\n cost_list = []\n with open(f\"../output_runs/text_info_prior_hill{i}_\\\n 1k.txt\", \"r\") as f:\n text = f.read().split('\\n')\n for number in text:\n if number is not \"\":\n cost_list.append(int(number))\n minim = min(cost_list)\n minima.append(minim)\n maxim = max(cost_list)\n print(\"prior hill:\", minim, maxim)\n plt.hist(cost_list, bins=20, alpha=0.5, label=f\"Priority + Hill\")\n\n # plot histogram of simulated annealing\n cost_list = []\n with open(f\"../output_runs/simulated_annealing{i}_1000.txt\",\n \"r\") as f:\n text = f.read().split('\\n')\n for number in text:\n if number is not \"\":\n cost_list.append(int(number))\n minim = min(cost_list)\n minima.append(minim)\n maxim = max(cost_list)\n print(\"random+anneal:\", minim, maxim)\n plt.hist(cost_list, bins=20, alpha=0.5,\n label=f\"Random + sim anneal\")\n\n # plot histogram of random plus hillclimber\n cost_list = []\n with open(f\"../output_runs/random_hill{i}_1000.txt\", \"r\") as f:\n text = f.read().split('\\n')\n for number in text:\n if number is not \"\":\n cost_list.append(int(number))\n minim = min(cost_list)\n minima.append(minim)\n maxim = max(cost_list)\n print(\"random+hill:\", minim, maxim)\n plt.hist(cost_list, bins=100, alpha=0.5,\n label=f\"Random + Hillclimber\")\n\n # plot histogram of kmeans plus hillclimber\n cost_list = []\n with open(f\"../output_runs/text_k-means_hill{i}_\\\n 1000.txt\", \"r\") as f:\n text = f.read().split('\\n')\n for number in text:\n if number is not \"\":\n cost_list.append(int(number))\n plt.hist(cost_list, bins=20, alpha=0.5,\n label=f\"Kmean and hill {i}\")\n totalmin = min(minima)\n plt.axvline(x=totalmin, color=\"g\")\n plt.title(f\"4 algorithms Wijk {i}, lowest cost: {totalmin}\")\n plt.xlabel(\"Cost\")\n plt.ylabel(\"Frequency\")\n plt.legend(loc='upper right')\n plt.show()",
"def test_traj () :\n samples = getAllTraj()\n states = []\n for t in samples : \n states.extend([toInternalStateRep(s) for s, _, _ in t])\n states = np.stack(states)\n xRange = np.linspace(-np.pi, np.pi, 100)\n yRange = np.linspace(-np.pi, np.pi, 100)\n plotHist(states, xRange, yRange, 'theta1', 'theta2', 'S Count')",
"def plot_hist(datasets, bins, labels, alphas):\n assert len(labels) == len(datasets)\n assert len(alphas) == len(datasets)\n plt.figure(figsize=[9,6])\n for idx, data in enumerate(datasets):\n plt.hist(data, bins=bins[idx], density=True, label=labels[idx], alpha=alphas[idx])\n plt.xlabel(\"PHQ score\")\n plt.ylabel(\"Probability\")\n plt.legend()\n plt.savefig(\"saved_plots/hist_\"+\"_\".join(labels)+\".png\")\n plt.show()",
"def plot_loss(self):\n #x = [k for k in range(self.rep)]\n loss = self.min_list[:,0]//100 #For clarity\n #plt.plot(x,self.min_list[:,0])\n plt.hist(loss,density=True)\n plt.xlabel(self.list_name + '_loss//100')\n plt.ylabel('Frequency')\n #plt.xticks(range(8),[0,250,500,750,1000,1250,1500,1750])\n plt.title('Distribution of '+self.list_name+'_loss ('+str(self.rep)+' iterations)')\n plt.savefig('img/stats/'+self.list_name+'_lossFrequency_'+self.model_name+'.png')\n plt.show()",
"def generate_history_plot(data, labels_dict, file_title, plot_title):\n fig = plt.figure()\n ax = sns.histplot(data)\n\n if labels_dict:\n ax.set_xlabel(labels_dict[\"x\"])\n if plot_title:\n ax.set_title(plot_title)\n\n plt.savefig(file_title)",
"def plot_and_save(data, prefix, name):\n plt.figure()\n plt.hist(data)\n plt.title(name)\n plt.xlabel(\"Value\")\n plt.ylabel(\"Frequency\")\n plt.savefig(prefix + name + '.png')\n plt.close()",
"def ex1_plots(instance, destination, prefix, save, animate):\n \n plts = ukf_plots(instance, destination, prefix, save, animate)\n\n truths = truth_parser(instance)\n nan_array= nan_array_parser(instance, truths, instance.base_model)\n #obs, obs_key = obs_parser(instance, True)\n obs_key = obs_key_parser(instance, True)\n preds = preds_parser(instance, True)\n #forecasts = forecasts_parser(instance, True)\n \n ukf_params = instance.ukf_params\n index2 = ukf_params[\"index2\"]\n \n \"remove agents not in model to avoid wierd plots\"\n #obs *= nan_array\n truths *= nan_array\n preds *= nan_array\n #forecasts*= nan_array\n \n \"indices for unobserved agents\"\n not_index2 = np.array([i for i in np.arange(truths.shape[1]) if i not in index2])\n plts.pair_frame(truths, preds, obs_key, 10, destination)\n plts.error_hist(truths[::instance.sample_rate,index2], \n preds[::instance.sample_rate,index2],\"Observed Errors\")\n if len(not_index2)>0:\n plts.error_hist(truths[::instance.sample_rate, not_index2], \n preds[::instance.sample_rate, not_index2],\"Unobserved Errors\")\n \n #plts.path_plots(obs[::instance.sample_rate] , \"Observed\")\n plts.path_plots(preds[::instance.sample_rate], \"Predicted\")\n plts.path_plots(truths, \"True\")\n #plts.path_plots(forecasts[::instance.sample_rate], \"Forecasts\")\n\n if animate:\n #plts.trajectories(truths, \"plots/\")\n plts.pair_frames(truths, preds, obs_key,\n truths.shape[0], \"../../plots/\")",
"def _plot(self, step, rewards, losses):\n plt.figure(figsize=(20, 5))\n plt.subplot(131)\n plt.title('Total Episode Reward')\n plt.plot(rewards)\n plt.subplot(132)\n plt.title('MSE Loss')\n plt.plot(losses)\n plt.show()",
"def analyze_hrv(self, save_plot=False, save_csv=False):\n\n df = self.df_epoch.copy()\n\n plt.subplots(1, 2, figsize=(self.fig_width, self.fig_height))\n plt.title(\"HRV with interpretation (Shaffer & Ginsberg, 2017)\")\n\n plt.subplot(1, 2, 1)\n h = plt.hist(df[\"RR_SD\"].dropna(), bins=np.arange(0, 250, 10),\n weights=100*np.ones(len(df[\"RR_SD\"].dropna())) / len(df[\"RR_SD\"].dropna()),\n edgecolor='black', color='grey', alpha=.5, cumulative=False)\n plt.ylabel(\"% of epochs\")\n plt.xlabel(\"RR SD (ms)\")\n plt.title(\"All data\")\n\n # Shaffer & Ginsberg, 2017 interpretation\n plt.fill_betweenx(x1=0, x2=50, y=[0, plt.ylim()[1]], color='red', alpha=.5,\n label=\"Unhealthy ({}%)\".format(round(sum(h[0][0:5]), 1)))\n plt.fill_betweenx(x1=50, x2=100, y=[0, plt.ylim()[1]], color='orange', alpha=.5,\n label=\"Compromised ({}%)\".format(round(sum(h[0][5:10]), 1)))\n plt.fill_betweenx(x1=100, x2=250, y=[0, plt.ylim()[1]], color='green', alpha=.5,\n label=\"Healthy ({}%)\".format(round(sum(h[0][10:]), 1)))\n plt.legend()\n\n df = self.df_epoch.dropna()\n df = df.loc[df[\"HR_Intensity\"] == 0]\n\n plt.subplot(1, 2, 2)\n h = plt.hist(df[\"RR_SD\"].dropna(), bins=np.arange(0, 250, 10),\n weights=100*np.ones(len(df[\"RR_SD\"].dropna())) / len(df[\"RR_SD\"].dropna()),\n edgecolor='black', color='grey', alpha=.5, cumulative=False)\n plt.xlabel(\"RR SD (ms)\")\n plt.title(\"Sedentary only\")\n\n # Shaffer & Ginsberg, 2017 interpretation\n plt.fill_betweenx(x1=0, x2=50, y=[0, plt.ylim()[1]], color='red', alpha=.5,\n label=\"Unhealthy ({}%)\".format(round(sum(h[0][0:5]), 1)))\n plt.fill_betweenx(x1=50, x2=100, y=[0, plt.ylim()[1]], color='orange', alpha=.5,\n label=\"Compromised ({}%)\".format(round(sum(h[0][5:10]), 1)))\n plt.fill_betweenx(x1=100, x2=250, y=[0, plt.ylim()[1]], color='green', alpha=.5,\n label=\"Healthy ({}%)\".format(round(sum(h[0][10:]), 1)))\n plt.legend()\n\n if save_plot:\n print(\"Saving plot as HRV_Histogram.png\")\n plt.savefig(\"HRV_Histogram.png\")\n\n if save_csv:\n print(\"Data saved as HRV_FrequencyData.csv\")\n bins = [i for i in h[1][:-1]]\n freqs = [i for i in h[0]]\n data = pd.DataFrame(list(zip(bins, freqs)), columns=[\"Bin\", \"Frequency_%\"])\n data.to_csv(\"HRV_FrequencyData.csv\", index=False)",
"def counts_histogram(predicted=False):\n log.info(\"loading the results from the joint fit to predict the counts\")\n results = load_yaml(\n f\"{config.repo_path}/results/fit/gammapy/joint/fit_results_logparabola.yaml\"\n )\n parameters = results[\"parameters\"]\n\n model_lp = LogParabola.from_log10(\n amplitude=parameters[0][\"value\"] * u.Unit(parameters[0][\"unit\"]),\n reference=parameters[1][\"value\"] * u.Unit(parameters[1][\"unit\"]),\n alpha=parameters[2][\"value\"] * u.Unit(parameters[2][\"unit\"]),\n beta=parameters[3][\"value\"] * u.Unit(parameters[3][\"unit\"]),\n )\n\n # defining the figure\n dict_color = {\n \"fermi\": COLORS[0],\n \"magic\": COLORS[1],\n \"veritas\": COLORS[2],\n \"fact\": COLORS[3],\n \"hess\": COLORS[4],\n }\n fig, ax = plt.subplots()\n\n for which in config.all_datasets:\n log.info(f\"predicting counts for {which} dataset\")\n dataset = config.get_dataset(which)\n obs = dataset.get_SpectrumObservationList().stack()\n cts_pred = CountsPredictor(\n model=model_lp, aeff=obs.aeff, edisp=obs.edisp, livetime=obs.livetime\n )\n cts_pred.run()\n\n e_max = dataset.energy_range[1].to(\"TeV\").value\n e_min = dataset.energy_range[0].to(\"TeV\").value\n\n kwargs_mdl = dict(ls=\":\", range=(e_min, e_max), lw=2.2, color=dict_color[which])\n kwargs_data = dict(\n ls=\"-\", range=(e_min, e_max), lw=2.2, color=dict_color[which]\n )\n\n # CountsSpectrum with observed and predicted excesses\n ex_pred = cts_pred.npred\n ex_obs = obs.excess_vector\n # if it is an IACT rebin the counts before plotting\n if which != \"fermi\":\n ex_pred = ex_pred.rebin(2)\n ex_obs = ex_obs.rebin(2)\n\n if predicted: # if you want to display the predicted counts\n ex_pred.plot_hist(ax, **kwargs_mdl)\n ex_obs.plot_hist(ax, **kwargs_data)\n\n # custom legend\n legend_observed = mlines.Line2D(\n [], [], color=\"gray\", marker=\"\", ls=\"-\", lw=2, label=\"observed\"\n )\n legend_expected = mlines.Line2D(\n [], [], color=\"gray\", marker=\"\", ls=\":\", lw=2, label=\"expected\"\n )\n legend_fermi = mlines.Line2D(\n [], [], color=COLORS[0], marker=\"\", ls=\"-\", lw=2, label=\"Fermi-LAT\"\n )\n legend_magic = mlines.Line2D(\n [], [], color=COLORS[1], marker=\"\", ls=\"-\", lw=2, label=\"MAGIC\"\n )\n legend_veritas = mlines.Line2D(\n [], [], color=COLORS[2], marker=\"\", ls=\"-\", lw=2, label=\"VERITAS\"\n )\n legend_fact = mlines.Line2D(\n [], [], color=COLORS[3], marker=\"\", ls=\"-\", lw=2, label=\"FACT\"\n )\n legend_hess = mlines.Line2D(\n [], [], color=COLORS[4], marker=\"\", ls=\"-\", lw=2, label=\"H.E.S.S.\"\n )\n legend_handles = [\n legend_fermi,\n legend_magic,\n legend_veritas,\n legend_fact,\n legend_hess,\n ]\n if predicted: # if you want to display the predicted counts\n legend_handles = [legend_observed, legend_expected] + legend_handles\n\n ax.legend(handles=legend_handles, fontsize=FONTSIZE)\n\n ax.set_xscale(\"log\")\n ax.set_ylabel(\"Excess counts\", size=FONTSIZE)\n ax.set_xlabel(E_UNIT_LABEL, size=FONTSIZE)\n\n # make axis thicker\n for axis in [\"top\", \"bottom\", \"left\", \"right\"]:\n ax.spines[axis].set_linewidth(1.6)\n ax.tick_params(\"both\", length=7, width=1.6, which=\"major\", labelsize=FONTSIZE)\n ax.tick_params(\"both\", length=4, width=1.6, which=\"minor\", labelsize=FONTSIZE)\n\n plt.tight_layout()\n\n filename = f\"{config.repo_path}/results/figures/counts_spectra.png\"\n filename_pdf = f\"{config.repo_path}/results/figures/counts_spectra.pdf\"\n log.info(f\"saving figure in {filename}\")\n fig.savefig(filename)\n fig.savefig(filename_pdf)",
"def plot_heldout_prediction(input_val,\n y_val,\n mu_val,\n sigma_val,\n fname=None,\n n=1,\n title=\"\"):\n fig = figure.Figure(figsize=(9, 3 * n))\n canvas = backend_agg.FigureCanvasAgg(fig)\n for i in range(n):\n ax = fig.add_subplot(n, i + 1, 1)\n ax.plot(input_val, y_val, label='True data')\n ax.plot(input_val, mu_val, label='Predictive mean')\n lower = mu_val - 1.96 * sigma_val\n upper = mu_val + 1.96 * sigma_val\n ax.fill_between(\n input_val, lower, upper, label='95% confidence interval')\n\n plt.legend()\n fig.suptitle(title)\n fig.tight_layout()\n\n if fname is not None:\n canvas.print_figure(fname, format=\"png\")\n print(\"saved {}\".format(fname))",
"def visualize(self):\r\n self.aggregator.plot_loss()\r\n self.save_figure()",
"def getSFsFromPostPreFitPlots( plots , plotDir , saveDir , bins = [] , keys = [ 'Fakes', 'WJets', 'TTJets' , 'stack' ] , name = \"PostPre\", hist_colors=h_colors, dOpt=\"hist text\") :\n plot_values = degTools.dict_function( plots, lambda x: degTools.getTH1FbinContent( x , get_errors=True) ) #if type( x ) in [ ROOT.TH1F , ROOT.TH2F] else None ) \n #SFs = {'WJets':{}, 'TTJets':{} , 'Fakes':{} , 'stack':{}}\n SFs = {k:{} for k in keys}\n SF_hists = degTools.deepcopy(SFs)\n canv_sfs = ROOT.TCanvas( \"SFs\", \"SFs\", 1000,800 )\n dOpt_ = \"%s\"%dOpt\n ROOT.gStyle.SetPaintTextFormat(\"0.2f\")\n hsh = degTools.uniqueHash()\n for bkg in keys:\n SFs[bkg] = degTools.dict_manipulator( [ plot_values[f][bkg] for f in ['fit_b', 'prefit' ] ] , lambda a,b: a/b.val if b.val else u_float(1.0) )\n SF_hists[bkg] = degTools.makeHistoFromDict( SFs[bkg], bin_order = bins, name = \"TF_%s_%s\"%(bkg,hsh))\n SF_hists[bkg].GetXaxis().SetLabelSize(0.05)\n SF_hists[bkg].SetLineColor( hist_colors[bkg] )\n SF_hists[bkg].SetMarkerColor( hist_colors[bkg] )\n SF_hists[bkg].SetMarkerColor( hist_colors[bkg] )\n SF_hists[bkg].Draw(dOpt_)\n SF_hists[bkg].SetMinimum(0.65)\n SF_hists[bkg].SetMaximum(1.7)\n dOpt_='same %s'%dOpt\n #output_name = name.replace(\".pkl\", \"_SFs.pkl\")\n name = name if name.endswith(\".pkl\") else \"%s.pkl\"%name\n degTools.pickle.dump( SFs, file('%s/%s'%(saveDir, name) , 'w') )\n degTools.saveCanvas( canv_sfs, plotDir, name.replace(\".pkl\",\"\") )\n return SFs, SF_hists",
"def plot_hist_snfit_sncosmo(self):\n \n self.read_sncosmo(path='../sugar_analysis_data/results/res_salt2_SNF_'+str(self.width)+'.txt')\n self.read_snfit_results(snfit_res_path='../sugar_analysis_data/results/results_snfit_'+str(self.width)+'.txt')\n\n# self.read_sncosmo(path='../sugar_analysis_data/results/res_salt2_SNF_GF.txt')\n# self.read_snfit_results(snfit_res_path='../sugar_analysis_data/results/results_snfit_GF.txt')\n\n self.diff_x0_sncosmo = []\n self.diff_x0_err_sncosmo = []\n self.diff_x1_sncosmo = []\n self.diff_x1_err_sncosmo = [] \n self.diff_c_sncosmo = []\n self.diff_c_err_sncosmo = [] \n self.diff_mb_sncosmo = []\n self.diff_mb_err_sncosmo = [] \n self.diff_cov_x0_x1_sncosmo = []\n self.diff_cov_x0_c_sncosmo = []\n self.diff_cov_x1_c_sncosmo = []\n self.diff_cov_mb_x1_sncosmo = []\n self.diff_cov_mb_c_sncosmo = []\n self.diff_chi2 = []\n for i in range (len(self.sn_name)):\n for j in range (len(self.sncosmo_sn_name)):\n if self.sn_name[i] == self.sncosmo_sn_name[j]:\n if np.abs(self.x1[i] - self.sncosmo_x1[j]) < 0.02:\n self.diff_x0_sncosmo.append(self.x0[i] - self.sncosmo_x0[j])\n self.diff_x0_err_sncosmo.append(self.x0_err[i] - self.sncosmo_x0_err[j])\n self.diff_x1_sncosmo.append(self.x1[i] - self.sncosmo_x1[j])\n self.diff_x1_err_sncosmo.append(self.x1_err[i] - self.sncosmo_x1_err[j]) \n self.diff_c_sncosmo.append(self.c[i] - self.sncosmo_c[j])\n self.diff_c_err_sncosmo.append(self.c_err[i] - self.sncosmo_c_err[j]) \n self.diff_mb_sncosmo.append(self.mb[i] - self.sncosmo_mb[j])\n self.diff_mb_err_sncosmo.append(self.mb_err[i] - self.sncosmo_mb_err[j])\n self.diff_chi2.append(self.snfit_chi2[i] - self.sncosmo_chi2[j])\n# self.diff_cov_x0_x1_sncosmo.append()\n# self.diff_cov_x0_c_sncosmo.append()\n# self.diff_cov_x1_c_sncosmo.append()\n# self.diff_cov_mb_x1_sncosmo.append()\n# self.diff_cov_mb_c_sncosmo.append()\n else:\n print self.x1[i] - self.sncosmo_x1[j], self.sn_name[i],self.sncosmo_sn_name[j], self.x1[i], self.sncosmo_x1[j]\n\n# rcParams['font.size'] = 16.\n# font = {'family': 'normal', 'size': 16}\n# rc('axes', linewidth=1.5)\n# rc(\"text\", usetex=True)\n# rc('font', family='serif')\n# rc('font', serif='Times')\n# rc('legend', fontsize=25)\n# rc('xtick.major', size=5, width=1.5)\n# rc('ytick.major', size=5, width=1.5)\n# rc('xtick.minor', size=3, width=1)\n# rc('ytick.minor', size=3, width=1)\n# fig = plt.figure(figsize=(8.,8.)) \n# \n gs = gridspec.GridSpec(2, 1) #subplots ratio\n f, (ax0_1, ax0_2) = plt.subplots(2, sharex=True)\n ax0_1 = plt.subplot(gs[0, 0])\n ax0_2 = plt.subplot(gs[1, 0])\n \n ax0_1.hist(self.diff_x0_sncosmo,50,label='$\\Delta$ x0_'+str(self.width))\n ax0_2.hist(self.diff_x0_err_sncosmo,50,label='$\\Delta$ x0 error')\n ax0_1.legend()\n ax0_2.legend()\n ax0_1.set_ylabel('N')\n ax0_2.set_ylabel('N')\n# ax0_1.ticklabel_format(axis='x', style='scientific', scilimits=(-1, 2))\n pdffile = '../sugar_analysis_data/results/x0_plot_'+str(self.width)+'.pdf'\n plt.savefig(pdffile, bbox_inches='tight')\n plt.show()\n \n gs = gridspec.GridSpec(2, 1) #subplots ratio\n f, (ax0_1, ax0_2) = plt.subplots(2, sharex=True)\n ax0_1 = plt.subplot(gs[0, 0])\n ax0_2 = plt.subplot(gs[1, 0])\n \n ax0_1.hist(self.diff_x1_sncosmo,50,label='$\\Delta$ X1_'+str(self.width))\n ax0_2.hist(self.diff_x1_err_sncosmo,50,label='$\\Delta$ X1 error')\n ax0_1.legend()\n ax0_2.legend()\n ax0_1.set_ylabel('N')\n ax0_2.set_ylabel('N')\n plt.ticklabel_format(axis='x', style='scientific', scilimits=(-1, 2))\n pdffile = '../sugar_analysis_data/results/x1_plot_'+str(self.width)+'.pdf'\n plt.savefig(pdffile, bbox_inches='tight')\n plt.show()\n \n gs = gridspec.GridSpec(2, 1) #subplots ratio\n f, (ax0_1, ax0_2) = plt.subplots(2, sharex=True)\n ax0_1 = plt.subplot(gs[0, 0])\n ax0_2 = plt.subplot(gs[1, 0])\n \n ax0_1.hist(self.diff_c_sncosmo,50,label='$\\Delta$ Color_'+str(self.width))\n ax0_2.hist(self.diff_c_err_sncosmo,50,label='$\\Delta$ Color error')\n ax0_1.legend()\n ax0_2.legend()\n ax0_1.set_ylabel('N')\n ax0_2.set_ylabel('N')\n plt.ticklabel_format(axis='x', style='scientific', scilimits=(-1, 2))\n pdffile = '../sugar_analysis_data/results/color_plot_'+str(self.width)+'.pdf'\n plt.savefig(pdffile, bbox_inches='tight')\n plt.show()\n\n gs = gridspec.GridSpec(2, 1) #subplots ratio\n f, (ax0_1, ax0_2) = plt.subplots(2, sharex=True)\n ax0_1 = plt.subplot(gs[0, 0])\n ax0_2 = plt.subplot(gs[1, 0])\n \n ax0_1.hist(self.diff_mb_sncosmo,50,label='$\\Delta$ mb_'+str(self.width))\n ax0_2.hist(self.diff_mb_err_sncosmo,50,label='$\\Delta$ mb error')\n ax0_1.legend()\n ax0_2.legend()\n ax0_1.set_ylabel('N')\n ax0_2.set_ylabel('N')\n plt.ticklabel_format(axis='x', style='scientific', scilimits=(-1, 2))\n pdffile = '../sugar_analysis_data/results/mb_plot_'+str(self.width)+'.pdf'\n plt.savefig(pdffile, bbox_inches='tight')\n plt.show()\n\n plt.hist(self.diff_chi2,50,label='$\\Delta$ chi2_'+str(self.width))\n pdffile = '../sugar_analysis_data/results/chi2_'+str(self.width)+'.pdf'\n plt.legend()\n plt.savefig(pdffile, bbox_inches='tight')\n plt.show()",
"def save_figs(self):\n if not self._fitted:\n self.fit()\n #self._message(\"Saving plots...\")\n # 1. Generate the required PNG plots\n # 1.1 Truncation plots\n for i,s in enumerate(self.samples):\n fig,ax=plt.subplots(1,2,figsize=(8,4))\n cyct=np.arange(self.nvalues)\n cycf=np.arange(self._cutoffidx[i])\n cycd=0.5*(cyct[1:]+cyct[:-1])\n ax[0].plot(cyct,self.samplesdata[:,i],'k.-',linewidth=0.5,label=\"Full series\")\n ax[0].plot(cycf,self.samplesdata[:self._cutoffidx[i],i],'r-',linewidth=1,label=\"Truncated\")\n ax[0].set_xlim([0,self.nvalues-1])\n ax[0].set_ylim([0,self.samplesdata.max()*1.1])\n ax[0].set_xlabel(\"Cycle\")\n ax[0].set_ylabel(\"Fluorescence (a.u.)\")\n ax[0].set_title(\"Detected fluorescence\")\n plt.legend(loc='upper left',frameon=False)\n # First derivative\n ax[1].plot(cycd,self.samplesdatadiff[:,i],'k.-',linewidth=0.5)\n ax[1].axvline(self._cutoffidx[i],color='r')\n ax[1].set_xlim([0,self.nvalues-1])\n ax[1].set_ylim([self.samplesdatadiff.min()*1.1,self.samplesdatadiff.max()*1.1])\n ax[1].set_xlabel(\"Cycle\")\n ax[1].set_ylabel(\"dF/dCycle (a.u.)\")\n ax[1].set_title(\"Fluorescence rate\")\n plt.tight_layout()\n fn=get_valid_fname(self.samples[i])\n figname=\"%s_%s_%s.svg\"%(self.ID,\"01truncation\",fn)\n self.info['samples'][s]['Data truncation for fitting']=figname\n plt.savefig('%s/%s'%(self.info['resultsdir'],figname))\n plt.close() \n # 1.2 Fitting plots\n for i,s in enumerate(self.samples):\n fig,ax=plt.subplots(1,3,figsize=(12,4))\n cyct=np.arange(self.nvalues)\n cycf=np.arange(self._cutoffidx[i])\n ax[0].plot(cyct,self.samplesdata[:,i],'k:',linewidth=0.5,label=\"Full series\")\n ax[0].plot(cycf,self.samplesdata[:self._cutoffidx[i],i],'r.-',linewidth=0.5,label=\"Truncated\")\n #ax[0].plot(cycf,self.mak3fpre[s],'y-',linewidth=1,label=\"prefit\")\n ax[0].plot(cycf,self.mak3fluorescence[s],'g-',linewidth=1,label=\"MAK3 fit\")\n ax[0].axvline(self._cutoffidx[i],color='k')\n ax[0].set_xlim([0,self.nvalues-1])\n ax[0].set_ylim([0,self.samplesdata.max()*1.1])\n ax[0].set_xlabel(\"Cycle\")\n ax[0].set_ylabel(\"Fluorescence (a.u.)\")\n ax[0].set_title(\"Detected fluorescence\")\n ax[0].legend(loc='upper left',frameon=False)\n # DNA levels\n ax[1].plot(cycf,self.mak3concentration[s],'g-',linewidth=1,label=\"MAK3\")\n ax[1].axvline(self._cutoffidx[i],color='k')\n ax[1].set_xlim([0,self.nvalues-1])\n ax[1].set_ylim([0,self.mak3concentration[s].max()*1.1])\n ax[1].set_xlabel(\"Cycle\")\n ax[1].set_ylabel(\"concentration (a.u.)\")\n ax[1].set_title(\"estimated cDNA levels\")\n # Efficiency\n ax[2].plot(cycf,self.mak3efficiency[s],'b-',linewidth=1,label=\"MAK3\")\n ax[2].axvline(self._cutoffidx[i],color='k')\n ax[2].set_xlim([0,self.nvalues-1])\n ax[2].set_ylim([0,1.1])\n ax[2].set_xlabel(\"Cycle\")\n ax[2].set_ylabel(\"Efficiency\")\n ax[2].set_title(\"Amplification efficiency\") \n plt.tight_layout()\n fn=get_valid_fname(self.samples[i])\n figname=\"%s_%s_%s.svg\"%(self.ID,\"02mak3\",fn)\n self.info['samples'][s]['MAK3 Fitting']=figname\n plt.savefig('%s/%s'%(self.info['resultsdir'],figname))\n plt.close()\n # 2 Initial concentrations\n figwdth=np.maximum(5,0.4*self.nsamples+1)\n fig,ax=plt.subplots(1,1,figsize=(figwdth,7))\n v=list(self.initialConcentration.values())\n k=list(self.initialConcentration.keys())\n ax.bar(0.75+np.arange(self.nsamples),v,facecolor='k',width=0.5)\n ax.set_xticks(1+np.arange(self.nsamples))\n ax.set_xticklabels(k,rotation=90)\n ax.set_xlim([0,self.nsamples+1])\n plt.tight_layout()\n figname=\"%s_%s_.svg\"%(self.ID,\"00initialConcentration\")\n self.info['figname_initialConcentration']=figname\n plt.savefig('%s/%s'%(self.info['resultsdir'],figname))\n plt.close()\n # 3 Fitting Error\n fig,ax=plt.subplots(1,1,figsize=(figwdth,7))\n v=list(self.fitting_error.values())\n k=list(self.fitting_error.keys())\n ax.bar(0.75+np.arange(self.nsamples),v,facecolor='k',width=0.5)\n ax.set_xticks(1+np.arange(self.nsamples))\n ax.set_xticklabels(k,rotation=90)\n ax.set_xlim([0,self.nsamples+1])\n ax.set_ylim([0,1e-2])\n plt.tight_layout()\n figname=\"%s_%s_.svg\"%(self.ID,\"00fittingError\")\n self.info['figname_fittingError']=figname\n plt.savefig('%s/%s'%(self.info['resultsdir'],figname))\n # 4 kinetic constant\n fig,ax=plt.subplots(1,1,figsize=(figwdth,7))\n v=list(self.k.values())\n k=list(self.k.keys())\n ax.bar(0.75+np.arange(self.nsamples),v,facecolor='k',width=0.5)\n ax.set_xticks(1+np.arange(self.nsamples))\n ax.set_xticklabels(k,rotation=90)\n ax.set_xlim([0,self.nsamples+1])\n plt.tight_layout()\n figname=\"%s_%s_.svg\"%(self.ID,\"00kineticConstant\")\n self.info['figname_k']=figname\n plt.savefig('%s/%s'%(self.info['resultsdir'],figname))\n # 5 background fluorescence\n fig,ax=plt.subplots(1,1,figsize=(figwdth,7))\n v=list(self.Fb.values())\n k=list(self.Fb.keys())\n ax.bar(0.75+np.arange(self.nsamples),v,facecolor='k',width=0.5)\n ax.set_xticks(1+np.arange(self.nsamples))\n ax.set_xticklabels(k,rotation=90)\n ax.set_xlim([0,self.nsamples+1])\n plt.tight_layout()\n figname=\"%s_%s_.svg\"%(self.ID,\"00bkgFluorescence\")\n self.info['figname_Fb']=figname\n plt.savefig('%s/%s'%(self.info['resultsdir'],figname))\n # 6 slope\n fig,ax=plt.subplots(1,1,figsize=(figwdth,7))\n v=list(self.slope.values())\n k=list(self.slope.keys())\n ax.bar(0.75+np.arange(self.nsamples),v,facecolor='k',width=0.5)\n ax.set_xticks(1+np.arange(self.nsamples))\n ax.set_xticklabels(k,rotation=90)\n ax.set_xlim([0,self.nsamples+1])\n ax.set_ylim([0,0.025])\n plt.tight_layout()\n figname=\"%s_%s_.svg\"%(self.ID,\"00fluorescenceSlope\")\n self.info['figname_slope']=figname\n plt.savefig('%s/%s'%(self.info['resultsdir'],figname))",
"def plot_histogram(bins, data, title, saving_path, hist_name):\n\n x = np.asarray(data)\n plt.figure()\n plt.hist(x[np.isfinite(x)], bins)\n plt.title(title)\n if not os.path.exists(saving_path):\n os.mkdir(saving_path)\n plt.savefig(saving_path + hist_name)",
"def parameters_histograms(w, dw, a, da, b, db):\n w = w.cpu()\n dw = dw.cpu()\n a = a.cpu()\n da = da.cpu()\n b = b.cpu()\n db = db.cpu()\n \n fig = plt.figure(figsize=(10,6))\n ax = fig.add_subplot(231)\n ax.hist(w.reshape(1, w.shape[0] * w.shape[1]))\n ax.set_title('Weights', fontsize = 11)\n ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0))\n ax = fig.add_subplot(232)\n ax.hist(dw.reshape(1, dw.shape[0] * dw.shape[1]))\n ax.set_title('Weights variations', fontsize = 11)\n ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0))\n ax = fig.add_subplot(233)\n ax.hist(a)\n ax.set_title('Visible bias', fontsize = 11)\n ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0))\n ax = fig.add_subplot(234)\n ax.hist(da)\n ax.set_title('Visible bias variations', fontsize = 11)\n ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0))\n ax = fig.add_subplot(235)\n ax.hist(b)\n ax.set_title('Hidden bias', fontsize = 11)\n ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0))\n ax = fig.add_subplot(236)\n ax.hist(db)\n ax.set_title('Hidden bias variations', fontsize = 11)\n ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0))\n plt.subplots_adjust(hspace=0.25)\n plt.show()\n plt.close('all')",
"def plotResultsNoNoise(inputfile, title, bins=10):\n path = datetime.datetime.now().isoformat()\n os.mkdir(path)\n path += '/'\n\n results = cPickle.load(open(inputfile))\n #copy input to the path\n try:\n shutil.copy2(inputfile, path+inputfile)\n except:\n pass\n\n print '\\n\\n\\n\\nFitted centre:'\n\n e = results['eclean'] - results['eCTI']\n e1 = results['e1clean'] - results['e1CTI']\n e2 = results['e2clean'] - results['e2CTI']\n x = results['xclean'] - results['xCTI']\n y = results['yclean'] - results['yCTI']\n r2 = (results['R2clean'] - results['R2CTI']) / results['R2clean']\n meane = np.mean(e)\n meane1 = np.mean(e1)\n meane2 = np.mean(e2)\n meanx = np.mean(x)\n meany = np.mean(y)\n meanr2 = np.mean(r2)\n\n print 'Delta e, e_1, e_2:', meane, meane1, meane2\n #print 'std e, e_1, e_2:', np.std(e), np.std(e1), np.std(e2)\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_title(title)\n ax.hist(e, bins=bins, color='b', label='$e$', alpha=0.5)\n ax.hist(e1, bins=bins, color='r', label='$e_{1}$', alpha=0.5)\n ax.hist(e2, bins=bins, color='g', label='$e_{2}$', alpha=0.5)\n ax.axvline(x=meane, color='b', label='%.2e' % meane)\n ax.axvline(x=meane1, color='r', label='%.2e' % meane1)\n ax.axvline(x=meane2, color='g', label='%.2e' % meane2)\n ax.set_xlabel(r'$\\delta e$ [w/o - w/ CTI]')\n plt.legend(shadow=True, fancybox=True)\n plt.savefig(path+'ellipticityDeltaFittedCentre.pdf')\n plt.close()\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_title(title)\n ax.scatter(e1, e2, s=8, color='r', marker='o', alpha=0.5, label='w/o - w/ CTI')\n ax.set_xlabel(r'$\\delta e_{1}$')\n ax.set_ylabel(r'$\\delta e_{2}$')\n plt.legend(shadow=True, fancybox=True)\n plt.savefig(path+'ellipticityFittedCentre.pdf')\n plt.close()\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_title(title)\n ax.scatter(results['e1clean'], results['e2clean'], s=8, color='k', marker='s', alpha=0.1, label='no CTI')\n ax.scatter(results['e1CTI'], results['e2CTI'], s=8, color='r', marker='o', alpha=0.4, label='CTI')\n ax.set_xlim(-1, 1)\n ax.set_ylim(-1, 1)\n ax.set_xlabel(r'$e_{1}$')\n ax.set_ylabel(r'$e_{2}$')\n plt.legend(shadow=True, fancybox=True)\n plt.savefig(path+'e1vse2FittedCentre.pdf')\n plt.close()\n\n print 'delta R2 / R2: mean, std ', meanr2, np.std(r2)\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_title(title)\n ax.hist(r2, bins=bins, color='b', label='$R^{2}$')\n ax.axvline(x=meanr2,color='b', label='%.2e' % meanr2)\n ax.set_xlabel(r'$\\frac{\\delta R^{2}}{R^{2}_{ref}}$ [w/o - w CTI]')\n plt.legend(shadow=True, fancybox=True)\n plt.savefig(path+'sizeDeltaFittedCentre.pdf')\n plt.close()\n\n print 'delta x: mean, std ', meanx, np.std(x)\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_title(title)\n ax.hist(x, bins=bins, color='b', label='X Centre')\n ax.axvline(x=meanx,color='b', label='%.2e' % meanx)\n ax.set_xlabel(r'$\\delta X - X_{CTI}$ [w/o - w CTI]')\n plt.legend(shadow=True, fancybox=True)\n plt.savefig(path+'xDeltaFittedCentre.pdf')\n plt.close()\n\n print 'delta y: mean, std ', meany, np.std(y)\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_title(title)\n ax.hist(y, bins=bins, color='b', label='Y Centre')\n ax.axvline(x=meany,color='b', label='%.2e' % meany)\n ax.set_xlabel(r'$\\delta Y - Y_{CTI}$ [w/o - w CTI]')\n plt.legend(shadow=True, fancybox=True)\n plt.savefig(path+'yDeltaFittedCentre.pdf')\n plt.close()\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_title(title)\n ax.scatter(x, y, s=15, color='k', marker='s', alpha=0.5, label='w/o - w/ CTI')\n ax.set_xlabel(r'$\\delta X$')\n ax.set_ylabel(r'$\\delta Y$')\n plt.legend(shadow=True, fancybox=True, scatterpoints=1)\n plt.savefig(path+'coordinatesFittedCentre.pdf')\n plt.close()\n\n print '\\n\\n\\n\\nFixed centre:'\n\n e = results['eclean'] - results['eCTIfixed']\n e1 = results['e1clean'] - results['e1CTIfixed']\n e2 = results['e2clean'] - results['e2CTIfixed']\n x = results['xclean'] - results['xCTIfixed']\n y = results['yclean'] - results['yCTIfixed']\n r2 = (results['R2clean'] - results['R2CTIfixed']) / results['R2clean']\n meane = np.mean(e)\n meane1 = np.mean(e1)\n meane2 = np.mean(e2)\n meanx = np.mean(x)\n meany = np.mean(y)\n meanr2 = np.mean(r2)\n\n print 'Delta e, e_1, e_2:', meane, meane1, meane2\n #print 'std e, e_1, e_2:', np.std(e), np.std(e1), np.std(e2)\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_title(title)\n ax.hist(e, bins=bins, color='b', label='$e$', alpha=0.5)\n ax.hist(e1, bins=bins, color='r', label='$e_{1}$', alpha=0.5)\n ax.hist(e2, bins=bins, color='g', label='$e_{2}$', alpha=0.5)\n ax.axvline(x=meane, color='b', label='%.2e' % meane)\n ax.axvline(x=meane1, color='r', label='%.2e' % meane1)\n ax.axvline(x=meane2, color='g', label='%.2e' % meane2)\n ax.set_xlabel(r'$\\delta e$ [w/o - w/ CTI]')\n plt.legend(shadow=True, fancybox=True)\n plt.savefig(path+'ellipticityDeltaFixedCentre.pdf')\n plt.close()\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_title(title)\n ax.scatter(e1, e2, s=8, color='r', marker='o', alpha=0.5, label='w/o - w/ CTI')\n ax.set_xlabel(r'$\\delta e_{1}$')\n ax.set_ylabel(r'$\\delta e_{2}$')\n plt.legend(shadow=True, fancybox=True)\n plt.savefig(path+'ellipticityFixedCentre.pdf')\n plt.close()\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_title(title)\n ax.scatter(results['e1clean'], results['e2clean'], s=8, color='k', marker='s', alpha=0.1, label='no CTI')\n ax.scatter(results['e1CTIfixed'], results['e2CTIfixed'], s=8, color='r', marker='o', alpha=0.4, label='CTI')\n ax.set_xlabel(r'$e_{1}$')\n ax.set_ylabel(r'$e_{2}$')\n ax.set_xlim(-1, 1)\n ax.set_ylim(-1, 1)\n plt.legend(shadow=True, fancybox=True)\n plt.savefig(path+'e1vse2FixedCentre.pdf')\n plt.close()\n\n print 'delta R2 / R2: mean, std ', meanr2, np.std(r2)\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_title(title)\n ax.hist(r2, bins=bins, color='b', label='$R^{2}$')\n ax.axvline(x=meanr2, color='b', label='%.2e' % meanr2)\n ax.set_xlabel(r'$\\frac{\\delta R^{2}}{R^{2}_{ref}}$ [w/o - w CTI]')\n plt.legend(shadow=True, fancybox=True)\n plt.savefig(path+'sizeDeltaFixedCentre.pdf')\n plt.close()\n\n print 'delta x: mean, std ', meanx, np.std(x)\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_title(title)\n ax.hist(x, bins=bins, color='b', label='X Centre')\n ax.axvline(x=meanx, color='b', label='%.2e' % meanx)\n ax.set_xlabel(r'$X - X_{CTI}$')\n plt.legend(shadow=True, fancybox=True)\n plt.savefig(path+'xDeltaFixedCentre.pdf')\n plt.close()\n\n print 'delta y: mean, std ', meany, np.std(y)\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_title(title)\n ax.hist(y, bins=bins, color='b', label='Y Centre')\n ax.axvline(x=meany, color='b', label='%.2e' % meany)\n ax.set_xlabel(r'$Y - Y_{CTI}$')\n plt.legend(shadow=True, fancybox=True)\n plt.savefig(path+'yDeltaFixedCentre.pdf')\n plt.close()\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_title(title)\n ax.scatter(x, y, s=15, color='k', marker='s', alpha=0.5, label='w/o - w/ CTI')\n ax.set_xlabel(r'$\\delta X$')\n ax.set_ylabel(r'$\\delta Y$')\n plt.legend(shadow=True, fancybox=True, scatterpoints=1)\n plt.savefig(path+'coordinatesFixedCentre.pdf')\n plt.close()"
] | [
"0.7740603",
"0.6428022",
"0.6419536",
"0.6322531",
"0.62623787",
"0.62226415",
"0.61514246",
"0.61257786",
"0.6123551",
"0.6112254",
"0.6079733",
"0.6064892",
"0.6062117",
"0.60601425",
"0.6035143",
"0.6033051",
"0.601378",
"0.6013595",
"0.6009719",
"0.5982897",
"0.59727293",
"0.59513307",
"0.59256643",
"0.5920028",
"0.59134245",
"0.59081846",
"0.59062093",
"0.5888135",
"0.587446",
"0.5869458"
] | 0.7675895 | 1 |
Plot and save confusion matrix computed from predicted and real labels. Arguments | def plot_confusion_matrix(real_labels, pred_prob, classes,
normalize=False,
img_name="confusion.png"):
real_labels = np.array(real_labels)
# Binarize predicted probabilities
pred_prob = np.array(pred_prob)
pred_labels = np.zeros_like(pred_prob)
pred_labels[pred_prob >= 0.5] = 1
cm = confusion_matrix(real_labels, pred_labels)
plt.figure()
plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)
#plt.title("Confusion matrix")
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes)
plt.yticks(tick_marks, classes, rotation=90)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.savefig(img_name) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def plot_confusion_matrix(self, y_true, y_pred, title=None):\r\n\r\n if not title:\r\n title = 'confusion matrix'\r\n\r\n # Compute confusion matrix\r\n\r\n y_pred = np.array(y_pred)\r\n y_true = np.array(y_true)\r\n cm = confusion_matrix(y_true, y_pred)\r\n # Only use the labels that appear in the data\r\n classes = self.classes\r\n print('Confusion matrix')\r\n\r\n print(cm)\r\n fig2, ax = plt.subplots()\r\n im = ax.imshow(cm, interpolation='nearest')\r\n ax.figure.colorbar(im, ax=ax)\r\n # We want to show all ticks...\r\n ax.set(xticks=np.arange(cm.shape[1]),\r\n yticks=np.arange(cm.shape[0]),\r\n # ... and label them with the respective list entries\r\n xticklabels=classes, yticklabels=classes,\r\n title=title,\r\n ylabel='True label',\r\n xlabel='Predicted label')\r\n\r\n # Rotate the tick labels and set their alignment.\r\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\r\n rotation_mode=\"anchor\")\r\n\r\n # Loop over data dimensions and create text annotations.\r\n fmt = '.2f'\r\n thresh = cm.max() / 2.\r\n for i in range(cm.shape[0]):\r\n for j in range(cm.shape[1]):\r\n ax.text(j, i, format(cm[i, j], fmt),\r\n ha=\"center\", va=\"center\",\r\n color=\"white\" if cm[i, j] > thresh else \"black\")\r\n fig2.tight_layout()\r\n file_loc = [str(self.training_path) +\r\n '\\\\checkpoints\\\\confusion_matrix.jpg'] # NEED TO FIX\r\n s = \"\"\r\n s = s.join(file_loc)\r\n conf_path = Path(s)\r\n plt.savefig(conf_path)\r\n plt.show()\r\n\r\n return ax",
"def plotConfusionMatrix(y, pred, title, labels, outfile, cmap=plt.cm.Blues):\n \n cm = confusion_matrix(y, pred);\n ncm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n accuracy = accuracy_score(y, pred)\n \n fig = plt.figure(figsize=(10, 10))\n plt.imshow(ncm, interpolation='nearest', cmap=cmap, vmin=0, vmax=1)\n plt.title(title+\" Acc: \"+str(accuracy)+\")\")\n plt.colorbar()\n for i in range(0,len(labels)):\n for j in range(0,len(labels)):\n plt.text(j,i,cm[i,j],va='center',ha='center')\n tick_marks = np.arange(len(labels))\n plt.xticks(tick_marks, labels, rotation=45)\n plt.yticks(tick_marks, labels)\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n pdfplot = PdfPages(outfile);\n pdfplot.savefig(fig)\n pdfplot.close()",
"def plot_confusion_matrix(y_true, y_pred, classes,\n normalize=False,\n title=None,\n cmap= cm.Blues,\n save:bool = False):\n if not title:\n if normalize:\n title = 'Normalized confusion matrix'\n else:\n title = 'Confusion matrix, without normalization'\n\n # Compute confusion matrix\n cmat = confusion_matrix(y_true, y_pred,labels = classes)\n # Only use the labels that appear in the data\n if normalize:\n cmat = cmat.astype('float') / cmat.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cmat)\n\n fig, ax = plt.subplots()\n im = ax.imshow(cmat, interpolation='nearest', cmap=cmap)\n ax.figure.colorbar(im, ax=ax)\n # We want to show all ticks...\n ax.set(xticks=np.arange(cmat.shape[1]),\n yticks=np.arange(cmat.shape[0]),\n # ... and label them with the respective list entries\n xticklabels=classes, yticklabels=classes,\n title=title,\n ylabel='True label',\n xlabel='Predicted label')\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Loop over data dimensions and create text annotations.\n fmt = '.2f' if normalize else 'd'\n thresh = cmat.max() / 2.\n for i in range(cmat.shape[0]):\n for j in range(cmat.shape[1]):\n ax.text(j, i, format(cmat[i, j], fmt),\n ha=\"center\", va=\"center\",\n color=\"white\" if cmat[i, j] > thresh else \"black\")\n fig.tight_layout()\n plt.show()\n if save:\n cwd=os.getcwd()\n fig.savefig(os.path.join(cwd, 'Keras\\\\Model_images', title +'_CM.png'))\n return ax",
"def plot_confusion_matrix(save_name, y_true, y_pred, normalize=False, title=None, cmap=plt.cm.Blues):\n if not title:\n if normalize:\n title = 'Normalized confusion matrix'\n else:\n title = 'Confusion matrix, without normalization'\n\n # Compute confusion matrix\n cm = confusion_matrix(y_true, y_pred)\n # Only use the labels that appear in the data\n classes = ['0 (C)', '1 (C#)', '2 (D)', '3 (D#)', '4 (E)', '5 (F)', '6 (F#)','7 (G)', '8 (G#)', '9 (A)', '10 (A#)', '11 (B)']\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n fig, ax = plt.subplots()\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\n ax.figure.colorbar(im, ax=ax)\n # We want to show all ticks...\n ax.set(xticks=np.arange(cm.shape[1]),\n yticks=np.arange(cm.shape[0]),\n # ... and label them with the respective list entries\n xticklabels=classes, yticklabels=classes,\n title=title,\n ylabel='True label',\n xlabel='Predicted label')\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Loop over data dimensions and create text annotations.\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i in range(cm.shape[0]):\n for j in range(cm.shape[1]):\n ax.text(j, i, format(cm[i, j], fmt),\n ha=\"center\", va=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n fig.tight_layout()\n if save_name != None:\n plt.savefig(save_name)\n return ax",
"def plot_confusion_matrix(y_pred, y_true, classes_list):\n fig = plt.figure(figsize=(8, 8))\n cm = confusion_matrix(y_pred, y_true)\n plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)\n plt.title('Confusion matrix')\n plt.colorbar()\n tick_marks = np.arange(len(classes_list))\n plt.xticks(tick_marks, classes_list, rotation=45)\n plt.yticks(tick_marks, classes_list)\n cm = np.around(cm.astype('float') / cm.sum(axis=1)[:, np.newaxis], decimals=2)\n\n thresh = cm.max() / 2.0\n for i in range(cm.shape[0]):\n for j in range(cm.shape[1]):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n return fig",
"def plot(self):\n plt.imshow(self.cm, interpolation='nearest', cmap=self.cmap)\n plt.title(self.title)\n plt.colorbar()\n tick_marks = np.arange(len(self.classes))\n plt.xticks(tick_marks, self.classes, rotation=45)\n plt.yticks(tick_marks, self.classes)\n \n if self.normalize:\n self.cm = self.cm.astype('float') / self.cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n \n print(self.cm)\n \n thresh = self.cm.max() / 2.\n for i, j in itertools.product(range(self.cm.shape[0]), range(self.cm.shape[1])):\n plt.text(j, i, self.cm[i, j], horizontalalignment=\"center\", color=\"white\" if self.cm[i, j] > thresh else \"black\")\n plt.tight_layout()\n plt.ylabel('True Label')\n plt.xlabel('Predicted label')",
"def confusion_matrix(y_true, y_pred):\n skplt.plot_confusion_matrix(y_true, y_pred, normalize=True)\n plt.show()",
"def plot_confusion_matrix(y_true, y_pred, classes,\n normalize=False,\n title=None,\n cmap=plt.cm.Blues):\n if not title:\n if normalize:\n title = 'Normalized confusion matrix'\n else:\n title = 'Confusion matrix, without normalization'\n\n # Compute confusion matrix\n cm = confusion_matrix(y_true, y_pred)\n # Only use the labels that appear in the data\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n fig, ax = plt.subplots(figsize=(8, 8))\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\n ax.figure.colorbar(im, ax=ax)\n # We want to show all ticks...\n ax.set(xticks=np.arange(cm.shape[1]),\n yticks=np.arange(cm.shape[0]),\n # ... and label them with the respective list entries\n xticklabels=classes, yticklabels=classes)\n ax.set_title(title,size = 20)\n ax.set_ylabel('True label',size = 20)\n ax.set_xlabel('Predicted label',size = 20)\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\",size = 18)\n plt.setp(ax.get_yticklabels(),size = 18)\n\n # Loop over data dimensions and create text annotations.\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i in range(cm.shape[0]):\n for j in range(cm.shape[1]):\n ax.text(j, i, format(cm[i, j], fmt),\n ha=\"center\", va=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n fig.tight_layout()\n name = OUTFOLDER + \"/confusion_matrix_batch%d_layers%d_epochs%d_f1%d\" % (BATCH_SIZE,LAYERS,EPOCHS,f1_mean_test*100)\n if normalize:\n name = name + \"_norm\"\n plt.savefig(name)\n plt.close()\n return ax",
"def plot_confusion_matrix1(y_true, y_pred, classes,\n normalize=False,\n title=None,\n cmap=plt.cm.Blues):\n if not title:\n if normalize:\n title = 'Normalized confusion matrix'\n else:\n title = 'Confusion matrix, without normalization'\n\n # Compute confusion matrix\n cm = confusion_matrix(y_true, y_pred)\n # Only use the labels that appear in the data\n #classes = classes[unique_labels(y_true, y_pred)]\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n fig, ax = plt.subplots()\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\n ax.figure.colorbar(im, ax=ax)\n # We want to show all ticks...\n ax.set(xticks=np.arange(cm.shape[1]),\n yticks=np.arange(cm.shape[0]),\n # ... and label them with the respective list entries\n xticklabels=classes, yticklabels=classes,\n title=title,\n ylabel='True label',\n xlabel='Predicted label')\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Loop over data dimensions and create text annotations.\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i in range(cm.shape[0]):\n for j in range(cm.shape[1]):\n ax.text(j, i, format(cm[i, j], fmt),\n ha=\"center\", va=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n fig.tight_layout()\n return ax",
"def plot_confusion_matrix(y_true, y_pred, classes,\n normalize=False,\n title=None,\n cmap=plt.cm.Blues,\n directory=\"plots\",\n session_name=\"\"):\n if not title:\n if normalize:\n title = 'Normalized confusion matrix'\n else:\n title = 'Confusion matrix, without normalization'\n\n # Compute confusion matrix\n cm = confusion_matrix(y_true, y_pred)\n # Only use the labels that appear in the data\n classes = unique_labels(y_true, y_pred)\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n fig, ax = plt.subplots()\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\n ax.figure.colorbar(im, ax=ax)\n # We want to show all ticks...\n ax.set(xticks=np.arange(cm.shape[1]),\n yticks=np.arange(cm.shape[0]),\n # ... and label them with the respective list entries\n xticklabels=classes, yticklabels=classes,\n title=title,\n ylabel='True label',\n xlabel='Predicted label')\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Loop over data dimensions and create text annotations.\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i in range(cm.shape[0]):\n for j in range(cm.shape[1]):\n ax.text(j, i, format(cm[i, j], fmt),\n ha=\"center\", va=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n fig.tight_layout()\n plot_filepath = \"{}/{}_function_LOC_hist.png\".format(directory, session_name)\n plt.savefig(plot_filepath)\n return ax",
"def plot_confusion_matrix(y_true, y_pred, classes,\n normalize=False,\n title=None,\n cmap=plt.cm.Blues):\n if not title:\n if normalize:\n title = 'Normalized confusion matrix'\n else:\n title = 'Confusion matrix, without normalization'\n\n # Compute confusion matrix\n cm = confusion_matrix(y_true, y_pred)\n # Only use the labels that appear in the data\n classes = classes[unique_labels(y_true, y_pred)]\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n fig, ax = plt.subplots()\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\n ax.figure.colorbar(im, ax=ax)\n # We want to show all ticks...\n ax.set(xticks=np.arange(cm.shape[1]),\n yticks=np.arange(cm.shape[0]),\n # ... and label them with the respective list entries\n xticklabels=classes, yticklabels=classes,\n title=title,\n ylabel='True label',\n xlabel='Predicted label')\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Loop over data dimensions and create text annotations.\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i in range(cm.shape[0]):\n for j in range(cm.shape[1]):\n ax.text(j, i, format(cm[i, j], fmt),\n ha=\"center\", va=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n fig.tight_layout()\n return fig, ax",
"def plot_confusion_matrix(y_true, y_pred, classes,\n normalize=True,\n title=None,\n cmap=plt.cm.Blues):\n if not title:\n if normalize:\n title = 'Normalized confusion matrix (in %)'\n else:\n title = 'Confusion matrix, without normalization'\n\n # Compute confusion matrix\n cm = confusion_matrix(y_true, y_pred)\n # Only use the labels that appear in the data\n #tmp = unique_labels(y_true, y_pred)\n #classes = classes[tmp]\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n cm = cm * 100 # affichage en % pour une meilleure visibilité\n print(\"Normalized confusion matrix (in %)\")\n else:\n print('Confusion matrix, without normalization')\n\n #print(cm)\n\n fig, ax = plt.subplots()\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\n ax.figure.colorbar(im, ax=ax)\n # We want to show all ticks...\n ax.set(xticks=np.arange(cm.shape[1]),\n yticks=np.arange(cm.shape[0]),\n # ... and label them with the respective list entries\n xticklabels=classes, yticklabels=classes,\n title=title,\n ylabel='True label',\n xlabel='Predicted label',\n # xlim = (-0.5,len(classes)-0.5),\n # ylim = (-0.5,len(classes)-0.5)\n )\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=90, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Loop over data dimensions and create text annotations.\n # fmt = '.2f' if normalize else 'd'\n fmt = '.0f' if normalize else 'd' #pour une meilleure visibilité\n thresh = cm.max() / 2.\n for i in range(cm.shape[0]):\n for j in range(cm.shape[1]):\n ax.text(j, i, format(cm[i, j], fmt),\n ha=\"center\", va=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n #fig.tight_layout()\n plt.show()\n return ax",
"def plot_confusion_matrix(y_true, y_pred, classes,\n normalize=False,\n title=None,\n cmap=plt.cm.Blues):\n if not title:\n if normalize:\n title = 'Normalized confusion matrix'\n else:\n title = 'Confusion matrix, without normalization'\n\n # Compute confusion matrix\n cm = confusion_matrix(y_true, y_pred)\n # Only use the labels that appear in the data\n ul = unique_labels(y_true, y_pred)\n if np.sum(ul)==0:\n \tul = [0, 1]\n classes = classes[ul]\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n fig, ax = plt.subplots()\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\n ax.figure.colorbar(im, ax=ax)\n # We want to show all ticks...\n ax.set(xticks=np.arange(cm.shape[1]),\n yticks=np.arange(cm.shape[0]),\n # ... and label them with the respective list entries\n xticklabels=classes, yticklabels=classes,\n title=title,\n ylabel='True label',\n xlabel='Predicted label')\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Loop over data dimensions and create text annotations.\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i in range(cm.shape[0]):\n for j in range(cm.shape[1]):\n ax.text(j, i, format(cm[i, j], fmt),\n ha=\"center\", va=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n fig.tight_layout()\n return ax",
"def plot_confusion_matrix(y_true, y_pred, classes,\n normalize=False,\n title=None,\n cmap=plt.cm.Blues):\n if not title:\n if normalize:\n title = 'Normalized confusion matrix'\n else:\n title = 'Confusion matrix, without normalization'\n\n # Compute confusion matrix\n cm = confusion_matrix(y_true, y_pred)\n # Only use the labels that appear in the data\n #classes = classes[unique_labels(y_true, y_pred)]\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n fig, ax = plt.subplots()\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\n ax.figure.colorbar(im, ax=ax)\n # We want to show all ticks...\n ax.set(xticks=np.arange(cm.shape[1]),\n yticks=np.arange(cm.shape[0]),\n # ... and label them with the respective list entries\n xticklabels=classes, yticklabels=classes,\n title=title,\n ylabel='True label',\n xlabel='Predicted label')\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=90, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Loop over data dimensions and create text annotations.\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i in range(cm.shape[0]):\n for j in range(cm.shape[1]):\n ax.text(j, i, format(cm[i, j], fmt),\n ha=\"center\", va=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n fig.tight_layout()\n fig.set_size_inches(18.5, 10.5)\n return ax",
"def plot_confusion_matrix(y_true, y_pred, classes,\n normalize=False,\n title=None,\n cmap=plt.cm.Blues):\n if not title:\n if normalize:\n title = 'Normalized confusion matrix'\n else:\n title = 'Confusion matrix, without normalization'\n\n # Compute confusion matrix\n cm = confusion_matrix(y_true, y_pred)\n # Only use the labels that appear in the data\n classes = classes[unique_labels(y_true, y_pred)]\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n #print(cm)\n\n fig, ax = plt.subplots( figsize=(12,8), dpi=120)\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\n ax.figure.colorbar(im, ax=ax)\n # We want to show all ticks...\n ax.set(xticks=np.arange(cm.shape[1]),\n yticks=np.arange(cm.shape[0]),\n # ... and label them with the respective list entries\n xticklabels=classes, yticklabels=classes,\n title=title,\n ylabel='True label',\n xlabel='Predicted label')\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Loop over data dimensions and create text annotations.\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i in range(cm.shape[0]):\n for j in range(cm.shape[1]):\n ax.text(j, i, format(cm[i, j], fmt),\n ha=\"center\", va=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n fig.tight_layout()\n return fig",
"def plot_confusion_matrix(y_true, y_pred, classes=None,\n normalize=False,\n title=None,\n cmap=plt.cm.Blues):\n if not title:\n if normalize:\n title = 'Normalized confusion matrix'\n else:\n title = 'Confusion matrix, without normalization'\n\n # Compute confusion matrix\n cm = confusion_matrix(y_true, y_pred)\n if classes is not None:\n # Only use the labels that appear in the data\n classes = classes[unique_labels(y_true, y_pred)]\n else:\n classes = unique_labels(y_true, y_pred)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n fig, ax = plt.subplots()\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\n ax.figure.colorbar(im, ax=ax)\n # We want to show all ticks...\n ax.set(xticks=np.arange(cm.shape[1]),\n yticks=np.arange(cm.shape[0]),\n # ... and label them with the respective list entries\n xticklabels=classes, yticklabels=classes,\n title=title,\n ylabel='True label',\n xlabel='Predicted label')\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Loop over data dimensions and create text annotations.\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i in range(cm.shape[0]):\n for j in range(cm.shape[1]):\n ax.text(j, i, format(cm[i, j], fmt),\n ha=\"center\", va=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n fig.tight_layout()\n return fig",
"def plot_confusion_matrix(y_true, y_pred, classes,\n normalize=False,\n title=None,\n cmap=plt.cm.Blues):\n if not title:\n if normalize:\n title = 'Normalized confusion matrix'\n else:\n title = 'Confusion matrix'\n\n # Compute confusion matrix\n cm = confusion_matrix(y_true, y_pred)\n # Only use the labels that appear in the data\n #classes = classes[unique_labels(y_true, y_pred)]\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n fig, ax = plt.subplots()\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\n ax.figure.colorbar(im, ax=ax)\n # We want to show all ticks...\n ax.set(xticks=np.arange(cm.shape[1]),\n yticks=np.arange(cm.shape[0]),\n # ... and label them with the respective list entries\n xticklabels=classes, yticklabels=classes,\n title=title,\n ylabel='True label',\n xlabel='Predicted label')\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Loop over data dimensions and create text annotations.\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i in range(cm.shape[0]):\n for j in range(cm.shape[1]):\n ax.text(j, i, format(cm[i, j], fmt),\n ha=\"center\", va=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n fig.tight_layout()\n return ax",
"def plot_confusion_matrix(y_true, y_pred, classes,\n normalize=False,\n title=None,\n cmap=plt.cm.Blues):\n if not title:\n if normalize:\n title = 'Normalized confusion matrix'\n else:\n title = 'Confusion matrix, without normalization'\n\n # Compute confusion matrix\n cm = confusion_matrix(y_true, y_pred)\n # Only use the labels that appear in the data\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n fig, ax = plt.subplots()\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\n ax.figure.colorbar(im, ax=ax)\n # We want to show all ticks...\n ax.set(xticks=np.arange(cm.shape[1]),\n yticks=np.arange(cm.shape[0]),\n # ... and label them with the respective list entries\n xticklabels=[0, 1], yticklabels=classes,\n title=title,\n ylabel='True label',\n xlabel='Predicted label')\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Loop over data dimensions and create text annotations.\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i in range(cm.shape[0]):\n for j in range(cm.shape[1]):\n ax.text(j, i, format(cm[i, j], fmt),\n ha=\"center\", va=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n fig.tight_layout()\n return fig, cm",
"def plot_confusion_matrix(y_true, y_pred, classes,\n normalize=False,\n title=None,\n cmap=plt.cm.Blues):\n if not title:\n if normalize:\n title = 'Normalized confusion matrix'\n else:\n title = 'Confusion matrix, without normalization'\n\n # Compute confusion matrix\n cm = confusion_matrix(y_true, y_pred)\n # Only use the labels that appear in the data\n labels = [int(x) for x in unique_labels(y_true, y_pred)]\n classes = classes[labels]\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n fig, ax = plt.subplots()\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\n ax.figure.colorbar(im, ax=ax)\n # We want to show all ticks...\n ax.set(xticks=np.arange(cm.shape[1]),\n yticks=np.arange(cm.shape[0]),\n # ... and label them with the respective list entries\n xticklabels=classes, yticklabels=classes,\n title=title,\n ylabel='True label',\n xlabel='Predicted label')\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Loop over data dimensions and create text annotations.\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i in range(cm.shape[0]):\n for j in range(cm.shape[1]):\n ax.text(j, i, format(cm[i, j], fmt),\n ha=\"center\", va=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n fig.tight_layout()\n return ax, cm",
"def plot_confusion_matrix(y_true, y_pred, classes,\n normalize=False,\n title=None,\n cmap=plt.cm.Blues):\n if not title:\n if normalize:\n title = 'Normalized confusion matrix'\n else:\n title = 'Confusion matrix, without normalization'\n\n # Compute confusion matrix\n cm = confusion_matrix(y_true, y_pred)\n # Only use the labels that appear in the data\n #classes = classes[unique_labels(y_true, y_pred)]\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n fig, ax = plt.subplots()\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\n ax.figure.colorbar(im, ax=ax)\n # We want to show all ticks...\n ax.set(xticks=np.arange(cm.shape[1]),\n yticks=np.arange(cm.shape[0]),\n # ... and label them with the respective list entries\n xticklabels=classes, yticklabels=classes,\n title=title,\n ylabel='True label',\n xlabel='Predicted label')\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Loop over data dimensions and create text annotations.\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i in range(cm.shape[0]):\n for j in range(cm.shape[1]):\n ax.text(j, i, format(cm[i, j], fmt),\n ha=\"center\", va=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n fig.tight_layout()\n return ax",
"def plot_confusion_matrix(y_true, y_pred, classes,\n normalize=False,\n title=None,\n cmap=plt.cm.Blues):\n if not title:\n if normalize:\n title = 'Normalized confusion matrix'\n else:\n title = 'Confusion matrix, without normalization'\n\n # Compute confusion matrix\n cm = confusion_matrix(y_true, y_pred)\n \n \n # Only use the labels that appear in the data\n \n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n #print(\"Normalized confusion matrix\")\n else:\n pass\n #print('Confusion matrix, without normalization')\n\n #print(cm)\n\n fig, ax = plt.subplots()\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\n ax.figure.colorbar(im, ax=ax)\n # We want to show all ticks...\n ax.set(xticks=np.arange(cm.shape[1]),\n yticks=np.arange(cm.shape[0]),\n # ... and label them with the respective list entries\n xticklabels=classes, yticklabels=classes,\n title=title,\n ylabel='True label',\n xlabel='Predicted label')\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Loop over data dimensions and create text annotations.\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i in range(cm.shape[0]):\n for j in range(cm.shape[1]):\n ax.text(j, i, format(cm[i, j], fmt),\n ha=\"center\", va=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n fig.tight_layout()\n return ax",
"def plot_confusion_matrix(y_true, y_pred, classes,\n normalize=False,\n title=None,\n cmap=plt.cm.Blues):\n if not title:\n if normalize:\n title = 'Normalized confusion matrix'\n else:\n title = 'Confusion matrix, without normalization'\n\n # Compute confusion matrix\n cm = confusion_matrix(y_true, y_pred)\n # Only use the labels that appear in the data\n # classes = classes[unique_labels(y_true, y_pred)]\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n # print(\"Normalized confusion matrix\")\n # else:\n # print('Confusion matrix, without normalization')\n\n print(cm)\n\n fig, ax = plt.subplots()\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\n ax.figure.colorbar(im, ax=ax)\n # We want to show all ticks...\n ax.set(xticks=np.arange(cm.shape[1]),\n yticks=np.arange(cm.shape[0]),\n # ... and label them with the respective list entries\n xticklabels=classes, yticklabels=classes,\n title=title,\n ylabel='True label',\n xlabel='Predicted label')\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Loop over data dimensions and create text annotations.\n fmt = '.4f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i in range(cm.shape[0]):\n for j in range(cm.shape[1]):\n ax.text(j, i, format(cm[i, j], fmt),\n ha=\"center\", va=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n fig.tight_layout()\n return ax",
"def plot_confusion_matrix(y_true, y_pred, classes,\n normalize=False,\n title=None,\n cmap=plt.cm.Blues):\n if not title:\n if normalize:\n title = 'Normalized confusion matrix'\n else:\n title = 'Confusion matrix, without normalization'\n\n # Compute confusion matrix\n cm = confusion_matrix(y_true, y_pred)\n # Only use the labels that appear in the data\n classes = classes[unique_labels(y_true, y_pred)]\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n fig, ax = plt.subplots()\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\n ax.figure.colorbar(im, ax=ax)\n # We want to show all ticks...\n ax.set(xticks=np.arange(cm.shape[1]),\n yticks=np.arange(cm.shape[0]),\n # ... and label them with the respective list entries\n xticklabels=classes, yticklabels=classes,\n title=title,\n ylabel='True label',\n xlabel='Predicted label')\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Loop over data dimensions and create text annotations.\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i in range(cm.shape[0]):\n for j in range(cm.shape[1]):\n ax.text(j, i, format(cm[i, j], fmt),\n ha=\"center\", va=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n fig.tight_layout()\n return ax",
"def plot_confusion_matrix(y_true, y_pred, classes,\n normalize=False,\n title=None,\n cmap=plt.cm.Blues):\n if not title:\n if normalize:\n title = 'Normalized confusion matrix'\n else:\n title = 'Confusion matrix, without normalization'\n\n # Compute confusion matrix\n cm = confusion_matrix(y_true, y_pred)\n # Only use the labels that appear in the data\n classes = classes[unique_labels(y_true, y_pred)]\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n fig, ax = plt.subplots()\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\n ax.figure.colorbar(im, ax=ax)\n # We want to show all ticks...\n ax.set(xticks=np.arange(cm.shape[1]),\n yticks=np.arange(cm.shape[0]),\n # ... and label them with the respective list entries\n xticklabels=classes, yticklabels=classes,\n title=title,\n ylabel='True label',\n xlabel='Predicted label')\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Loop over data dimensions and create text annotations.\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i in range(cm.shape[0]):\n for j in range(cm.shape[1]):\n ax.text(j, i, format(cm[i, j], fmt),\n ha=\"center\", va=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n fig.tight_layout()\n return ax",
"def plot_confusion_matrix(y_true, y_pred, classes,\n normalize=False,\n title=None,\n cmap=plt.cm.Blues):\n if not title:\n if normalize:\n title = 'Normalized confusion matrix'\n else:\n title = 'Confusion matrix, without normalization'\n\n # Compute confusion matrix\n cm = confusion_matrix(y_true, y_pred)\n # Only use the labels that appear in the data\n classes = classes[unique_labels(y_true, y_pred)]\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n fig, ax = plt.subplots()\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\n ax.figure.colorbar(im, ax=ax)\n # We want to show all ticks...\n ax.set(xticks=np.arange(cm.shape[1]),\n yticks=np.arange(cm.shape[0]),\n # ... and label them with the respective list entries\n xticklabels=classes, yticklabels=classes,\n title=title,\n ylabel='True label',\n xlabel='Predicted label')\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Loop over data dimensions and create text annotations.\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i in range(cm.shape[0]):\n for j in range(cm.shape[1]):\n ax.text(j, i, format(cm[i, j], fmt),\n ha=\"center\", va=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n fig.tight_layout()\n return ax",
"def plot_confusion_matrix(y_true, y_pred, classes,\n normalize=False,\n title=None,\n cmap=plt.cm.Blues):\n if not title:\n if normalize:\n title = 'Normalized confusion matrix'\n else:\n title = 'Confusion matrix, without normalization'\n\n # Compute confusion matrix\n cm = confusion_matrix(y_true, y_pred)\n # Only use the labels that appear in the data\n classes = classes[unique_labels(y_true, y_pred)]\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n fig, ax = plt.subplots()\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\n ax.figure.colorbar(im, ax=ax)\n # We want to show all ticks...\n ax.set(xticks=np.arange(cm.shape[1]),\n yticks=np.arange(cm.shape[0]),\n # ... and label them with the respective list entries\n xticklabels=classes, yticklabels=classes,\n title=title,\n ylabel='True label',\n xlabel='Predicted label')\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Loop over data dimensions and create text annotations.\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i in range(cm.shape[0]):\n for j in range(cm.shape[1]):\n ax.text(j, i, format(cm[i, j], fmt),\n ha=\"center\", va=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n fig.tight_layout()\n return ax",
"def plot_confusion_matrix(y_true, y_pred, classes,\n normalize=False,\n title=None,\n cmap=plt.cm.Blues):\n if not title:\n if normalize:\n title = 'Normalized confusion matrix'\n else:\n title = 'Confusion matrix, without normalization'\n\n # Compute confusion matrix\n cm = confusion_matrix(y_true, y_pred)\n # Only use the labels that appear in the data\n classes = classes[unique_labels(y_true, y_pred)]\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n fig, ax = plt.subplots()\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\n ax.figure.colorbar(im, ax=ax)\n # We want to show all ticks...\n ax.set(xticks=np.arange(cm.shape[1]),\n yticks=np.arange(cm.shape[0]),\n # ... and label them with the respective list entries\n xticklabels=classes, yticklabels=classes,\n title=title,\n ylabel='True label',\n xlabel='Predicted label')\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Loop over data dimensions and create text annotations.\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i in range(cm.shape[0]):\n for j in range(cm.shape[1]):\n ax.text(j, i, format(cm[i, j], fmt),\n ha=\"center\", va=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n fig.tight_layout()\n return ax",
"def plot_confusion_matrix(y_true, y_pred, classes,\n normalize=False,\n title=None,\n cmap=plt.cm.Blues):\n if not title:\n if normalize:\n title = 'Normalized confusion matrix'\n else:\n title = 'Confusion matrix, without normalization'\n\n # Compute confusion matrix\n cm = confusion_matrix(y_true, y_pred)\n # Only use the labels that appear in the data\n classes = classes[unique_labels(y_true, y_pred)]\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n fig, ax = plt.subplots()\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\n ax.figure.colorbar(im, ax=ax)\n # We want to show all ticks...\n ax.set(xticks=np.arange(cm.shape[1]),\n yticks=np.arange(cm.shape[0]),\n # ... and label them with the respective list entries\n xticklabels=classes, yticklabels=classes,\n title=title,\n ylabel='True label',\n xlabel='Predicted label')\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Loop over data dimensions and create text annotations.\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i in range(cm.shape[0]):\n for j in range(cm.shape[1]):\n ax.text(j, i, format(cm[i, j], fmt),\n ha=\"center\", va=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n fig.tight_layout()\n return ax",
"def plot_confusion_matrix(y_true, y_pred,\n labels=None,\n normalize=False,\n title=None,\n cmap='YlGn'):\n import pandas as pd\n from sklearn.metrics import confusion_matrix\n from sklearn.utils.multiclass import unique_labels\n\n if not title:\n if normalize:\n title = 'Normalized confusion matrix'\n else:\n title = 'Confusion matrix, without normalization'\n\n # Compute confusion matrix\n cm = confusion_matrix(y_true, y_pred)\n\n # Only use the labels that appear in the data\n classes = unique_labels(y_true, y_pred)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n\n if isinstance(labels, pd.DataFrame):\n id = []\n for i in classes:\n test = np.flatnonzero(labels.iloc[:,-1] == i)\n if test.size != 0:\n id.append(int(test))\n labels = list(labels.iloc[id, 0])\n else:\n labels = classes\n\n fig = plt.figure(figsize=(12,9))\n ax = plt.gca()\n im = ax.imshow(cm, interpolation='nearest', aspect='equal', origin='lower', cmap=plt.get_cmap(cmap))\n ax.figure.colorbar(im, ax=ax)\n # ax.xaxis.tick_top()\n\n ax.set(xticks=np.arange(len(labels)),\n yticks=np.arange(len(labels)),\n xticklabels=labels, yticklabels=labels,\n title=title,\n ylabel='True label',\n xlabel='Predicted label')\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Loop over data dimensions and create text annotations.\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i in range(cm.shape[0]):\n for j in range(cm.shape[1]):\n ax.text(j, i, format(cm[i, j], fmt),\n ha=\"center\", va=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n fig.tight_layout()\n return ax",
"def plot_confusion_matrix(y_true, y_pred, classes,\n normalize=True,\n title=None,\n cmap=plt.cm.Blues):\n if not title:\n if normalize:\n title = 'Normalized confusion matrix'\n else:\n title = 'Confusion matrix, without normalization'\n\n # Compute confusion matrix\n cm = confusion_matrix(y_true, y_pred)\n # Only use the labels that appear in the data\n #classes = classes[unique_labels(y_true, y_pred)]\n if normalize:\n cm = 100*cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n \n \n fig, ax = plt.subplots()\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\n ax.figure.colorbar(im, ax=ax)\n # We want to show all ticks...\n ax.set(xticks=np.arange(cm.shape[1]),\n yticks=np.arange(cm.shape[0]),\n # ... and label them with the respective list entries\n xticklabels=classes, yticklabels=classes,\n title=title,\n ylabel='True label',\n xlabel='Predicted label')\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Loop over data dimensions and create text annotations.\n fmt = '.1f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i in range(cm.shape[0]):\n for j in range(cm.shape[1]):\n ax.text(j, i, format(cm[i, j], fmt),\n ha=\"center\", va=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n fig.tight_layout()\n return ax"
] | [
"0.7909803",
"0.77706945",
"0.7693289",
"0.7669506",
"0.7644297",
"0.7616977",
"0.757757",
"0.75686",
"0.7555622",
"0.7483602",
"0.74831176",
"0.74774474",
"0.7476126",
"0.7471495",
"0.7468893",
"0.7465456",
"0.74556404",
"0.7446722",
"0.74444157",
"0.7443814",
"0.74419767",
"0.7438832",
"0.7430613",
"0.7430613",
"0.7430613",
"0.7430613",
"0.7430613",
"0.7430613",
"0.74304605",
"0.74278754"
] | 0.7841449 | 1 |
Discover and add a Tasmota sensor. | async def async_discover_sensor(tasmota_entity, discovery_hash):
async_add_entities(
[
TasmotaSensor(
tasmota_entity=tasmota_entity, discovery_hash=discovery_hash
)
]
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def async_add_binary_sensor(mac):\n if USB_MOTION_ID in api_stick.devices[mac].features:\n _LOGGER.debug(\"Add binary_sensors for %s\", mac)\n async_add_entities([USBBinarySensor(api_stick.devices[mac])])\n\n # Register services\n platform.async_register_entity_service(\n SERVICE_CONFIGURE_SCAN,\n {\n vol.Required(ATTR_SCAN_SENSITIVITY_MODE): vol.In(\n SCAN_SENSITIVITY_MODES\n ),\n vol.Required(ATTR_SCAN_RESET_TIMER): vol.All(\n vol.Coerce(int), vol.Range(min=1, max=240)\n ),\n vol.Required(ATTR_SCAN_DAYLIGHT_MODE): cv.boolean,\n },\n \"_service_configure_scan\",\n )\n platform.async_register_entity_service(\n SERVICE_CONFIGURE_BATTERY,\n {\n vol.Required(ATTR_SED_STAY_ACTIVE): vol.All(\n vol.Coerce(int), vol.Range(min=1, max=120)\n ),\n vol.Required(ATTR_SED_SLEEP_FOR): vol.All(\n vol.Coerce(int), vol.Range(min=10, max=60)\n ),\n vol.Required(ATTR_SED_MAINTENANCE_INTERVAL): vol.All(\n vol.Coerce(int), vol.Range(min=5, max=1440)\n ),\n vol.Required(ATTR_SED_CLOCK_SYNC): cv.boolean,\n vol.Required(ATTR_SED_CLOCK_INTERVAL): vol.All(\n vol.Coerce(int), vol.Range(min=60, max=10080)\n ),\n },\n \"_service_configure_battery_savings\",\n )",
"def discoved_sensor(mac):\n hass.async_create_task(async_add_sensor(mac))",
"async def async_setup_entry(hass, config_entry, async_add_entities):\n\n async def async_discover_sensor(tasmota_entity, discovery_hash):\n \"\"\"Discover and add a Tasmota sensor.\"\"\"\n async_add_entities(\n [\n TasmotaSensor(\n tasmota_entity=tasmota_entity, discovery_hash=discovery_hash\n )\n ]\n )\n\n hass.data[\n DATA_REMOVE_DISCOVER_COMPONENT.format(sensor.DOMAIN)\n ] = async_dispatcher_connect(\n hass,\n TASMOTA_DISCOVERY_ENTITY_NEW.format(sensor.DOMAIN, TASMOTA_DOMAIN),\n async_discover_sensor,\n )",
"def flash_tasmota(self, flash_mode, serial_port):\n # Make sure device is tasmota\n if self.software != 'tasmota':\n print('{f_name} is {software}, not tasmota'.format(**self))\n return(False)\n if current_tasmota_version != get_tasmota_version():\n print('{RED}Error: Tasmota version mismatch expected: \"{expected}\", got \"{current}\"{NC}'.format(**colors, expected=current_tasmota_version, current= get_tasmota_version()))\n return(False)\n self.write_tasmota_config()\n\n correctPIO = os.path.join(espqdir, 'platformio_override.ini')\n tasmotaPIO = os.path.join(tasmota_dir, 'platformio_override.ini')\n if not os.path.exists(tasmotaPIO) or not cmp(correctPIO, tasmotaPIO):\n copyfile(correctPIO, tasmotaPIO)\n\n\n os.chdir(tasmota_dir)\n\n pio_call = 'platformio run -e tasmota-{flash_mode} -t upload'.format(flash_mode=flash_mode);\n\n # if we're flashing via wifi or serial port is specified to us,\n # specify it to pio\n if flash_mode == 'wifi' or serial_port:\n pio_call += ' --upload-port {port}'\n\n if flash_mode == 'wifi':\n self.flashing_notice(flash_mode, self.ip_addr)\n # If we don't know the IP address, ask device\n if not 'ip_addr' in self or not self.ip_addr:\n print('No IP address for this device in the config.'\n 'Querying device...')\n self.query_tas_status()\n if 'ip' in self.reported:\n print('{name} is online at {ip}'.format(name=self.f_name,\n ip=self.reported['ip']))\n self.ip_addr = self.reported['ip']\n else:\n print('{f_name} did not respond at {c_topic}. IP address '\n 'unavailable. Skipping device...'.format(**self))\n return(False)\n pio_call = pio_call.format(port=(self.ip_addr + '/u2'))\n elif flash_mode == 'serial':\n self.flashing_notice(flash_mode, serial_port)\n pio_call = pio_call.format(port=serial_port)\n print('{BLUE}{f_name}\\'s MQTT topic is '\n '{topic}{NC}'.format(**colors, **self))\n print(pio_call)\n flash_result = call(pio_call, shell=True)\n return(True if flash_result == 0 else False)",
"def flash_tasmota(self):\n # Make sure device is tasmota\n if self.software != 'tasmota':\n print('{f_name} is {software}, not tasmota'.format(**self))\n return(False)\n if current_tasmota_version != get_tasmota_version():\n print('{RED}Error: Tasmota version mismatch{NOCOLOR}'.format(**colors))\n return(False)\n self.write_tasmota_config()\n\n correctPIO = os.path.join(espqdir, 'platformio.ini')\n tasmotaPIO = os.path.join(tasmotadir, 'platformio.ini')\n if filecmp.cmp(correctPIO, tasmotaPIO) == False:\n shutil.copyfile(correctPIO, tasmotaPIO)\n\n os.chdir(tasmotadir)\n pio_call = 'platformio run -e {environment} -t upload --upload-port {port}'\n if self.flash_mode == 'wifi':\n pio_call = pio_call.format(environment='sonoff-wifi', port=(self.ip_addr + '/u2'))\n print(('{BLUE}Now flashing {module} {f_name} with {software} via '\n '{flash_mode} at {ip_addr}{NOCOLOR}'.format(**colors, **self)))\n elif self.flash_mode == 'serial':\n pio_call = pio_call.format(environment='sonoff-serial', port=self.serial_port)\n print(('{BLUE}Now flashing {module} {f_name} with {software} via '\n '{flash_mode} at {serial_port}{NOCOLOR}'.format(**colors, **self)))\n print('{BLUE}{f_name}\\'s MQTT topic is {base_topic}/{topic}{NOCOLOR}'.format(**colors, **self))\n print(pio_call)\n flash_result = call(pio_call, shell=True)\n os.chdir(espqdir)\n return(True if flash_result == 0 else False)",
"def setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None,\n) -> None:\n host: str = config[CONF_HOST]\n port: int = config[CONF_PORT]\n name: str = config[CONF_NAME]\n url = f\"http://{host}:{port}/api/LiveData.xml\"\n\n gateway = Ted5000Gateway(url)\n\n # Get MUT information to create the sensors.\n gateway.update()\n\n entities = []\n for mtu in gateway.data:\n for description in SENSORS:\n entities.append(Ted5000Sensor(gateway, name, mtu, description))\n\n add_entities(entities)",
"def LSM_acquisition(add):\n # control register\n CTRL0 = 0x1f # p.34, accelerator\n CTRL1 = 0x20\n CTRL2 = 0x21\n CTRL5 = 0x24 # p.36, magnetic\n CTRL6 = 0x25\n CTRL7 = 0x26\n FIFO_CTRL = 0x2e # p.40\n # accelerater\n OUT_X_L_A = 0x28\n OUT_X_H_A = 0x29\n OUT_Y_L_A = 0x2a\n OUT_Y_H_A = 0x2b\n OUT_Z_L_A = 0x2c\n OUT_Z_H_A = 0x2d\n # magentic\n OUT_X_L_M = 0x08\n OUT_X_H_M = 0x09\n OUT_Y_L_M = 0x0a\n OUT_Y_H_M = 0x0b\n OUT_Z_L_M = 0x0c\n OUT_Z_H_M = 0x0d\n\n # follow lsm303D arduino library\n # AFS = 0, +-2g scale\n bus.write_byte_data(add, CTRL2, 0x00)\n # 50 Hz AODR, all axis enable\n bus.write_byte_data(add, CTRL1, 0x57)\n # high resolution, 6.25Hz MODR\n bus.write_byte_data(add, CTRL5, 0x64)\n # +-4 gauss scale\n bus.write_byte_data(add, CTRL6, 0x20)\n # low power mode off, continuous conversion mode\n bus.write_byte_data(add, CTRL7, 0x00)\n # # FIFO mode\n # bus.write_byte_data(add, CTRL0, 0b01000000)\n # bus.write_byte_data(add, FIFO_CTRL, 0b01000000)\n # # accelerator with 12.5Hz, all axis enable\n # bus.write_byte_data(add, CTRL1, 0b00110111)\n # # magnetic 12.5Hz, high resolutn, temp en\n # bus.write_byte_data(add, CTRL5, 0b11100000)\n # # full scale range \\pm 12 gauss\n # bus.write_byte_data(add, CTRL6, 0b01101000)\n # # enable magnetic\n # bus.write_byte_data(add, CTRL7, 0x00)\n\n # accelerator accumulate\n while True:\n uint16_ax = (bus.read_byte_data(add, OUT_X_H_A) << 8) + \\\n bus.read_byte_data(add, OUT_X_L_A)\n uint16_ay = (bus.read_byte_data(add, OUT_Y_H_A) << 8) + \\\n bus.read_byte_data(add, OUT_Y_L_A)\n uint16_az = (bus.read_byte_data(add, OUT_Z_H_A) << 8) + \\\n bus.read_byte_data(add, OUT_Z_L_A)\n\n uint16_mx = (bus.read_byte_data(add, OUT_X_H_M) << 8) + \\\n bus.read_byte_data(add, OUT_X_L_M)\n uint16_my = (bus.read_byte_data(add, OUT_Y_H_M) << 8) + \\\n bus.read_byte_data(add, OUT_Y_L_M)\n uint16_mz = (bus.read_byte_data(add, OUT_Z_H_M) << 8) + \\\n bus.read_byte_data(add, OUT_Z_L_M)\n # accelerometer 12 bit left aligned\n # ax = twos_comp(uint16_ax>>4, 12)\n # ay = twos_comp(uint16_ay>>4, 12)\n # az = twos_comp(uint16_az>>4, 12)\n ax = twos_comp(uint16_ax, 16)\n ay = twos_comp(uint16_ay, 16)\n az = twos_comp(uint16_az, 16)\n\n mx = twos_comp(uint16_mx, 16)\n my = twos_comp(uint16_my, 16)\n mz = twos_comp(uint16_mz, 16)\n\n yield [ax, ay, az, mx, my, mz]",
"def registerDevice(self):\n\t\tr = req.post(\"http://localhost:9090/devices?id={}&sensors={}_{}&board={}\".format(\n\t\t\tBOARD_ID,\n\t\t\tSENSOR1,\n\t\t\tSENSOR2,\n\t\t\tBOARD\n\t\t))\n\t\tprint (\"[{}] Device Registered on Room Catalog\".format(\n\t\t\tint(time.time()),\n\t\t))",
"async def async_setup_platform(\n hass, config, async_add_entities, discovery_info=None):\n data = await nooa.get_data()\n entities = [\n NooaSensor(data, \"R\"),\n NooaSensor(data, \"S\"),\n NooaSensor(data, \"G\"),\n ]\n async_add_entities(entities)",
"def setup_platform(hass, config, add_devices, discovery_info=None):\n name = config.get(CONF_NAME)\n code = config.get(CONF_CODE)\n username = config.get(CONF_USERNAME)\n password = config.get(CONF_PASSWORD)\n\n add_devices([SimpliSafeAlarm(name, username, password, code)])",
"def setup(hass, config):\n\n logger = logging.getLogger(__name__)\n\n try:\n import tellcore.telldus as telldus\n import tellcore.constants as tellcore_constants\n except ImportError:\n logger.exception(\n \"Failed to import tellcore\")\n return False\n\n core = telldus.TelldusCore()\n\n sensors = core.sensors()\n\n if len(sensors) == 0:\n logger.error(\"No Tellstick sensors found\")\n return False\n\n sensor_value_descriptions = {\n tellcore_constants.TELLSTICK_TEMPERATURE:\n DatatypeDescription(\n 'temperature', config[DOMAIN]['temperature_scale']),\n\n tellcore_constants.TELLSTICK_HUMIDITY:\n DatatypeDescription('humidity', ' %'),\n\n tellcore_constants.TELLSTICK_RAINRATE:\n DatatypeDescription('rain rate', ''),\n\n tellcore_constants.TELLSTICK_RAINTOTAL:\n DatatypeDescription('rain total', ''),\n\n tellcore_constants.TELLSTICK_WINDDIRECTION:\n DatatypeDescription('wind direction', ''),\n\n tellcore_constants.TELLSTICK_WINDAVERAGE:\n DatatypeDescription('wind average', ''),\n\n tellcore_constants.TELLSTICK_WINDGUST:\n DatatypeDescription('wind gust', '')\n }\n\n def update_sensor_value_state(sensor_name, sensor_value):\n \"Update the state of a sensor value\"\n sensor_value_description = \\\n sensor_value_descriptions[sensor_value.datatype]\n sensor_value_name = '{} {}'.format(\n sensor_name, sensor_value_description.name)\n\n entity_id = ENTITY_ID_FORMAT.format(\n util.slugify(sensor_value_name))\n\n state = sensor_value.value\n\n state_attr = {\n ATTR_FRIENDLY_NAME: sensor_value_name,\n ATTR_UNIT_OF_MEASUREMENT: sensor_value_description.unit\n }\n\n hass.states.set(entity_id, state, state_attr)\n\n sensor_value_datatypes = [\n tellcore_constants.TELLSTICK_TEMPERATURE,\n tellcore_constants.TELLSTICK_HUMIDITY,\n tellcore_constants.TELLSTICK_RAINRATE,\n tellcore_constants.TELLSTICK_RAINTOTAL,\n tellcore_constants.TELLSTICK_WINDDIRECTION,\n tellcore_constants.TELLSTICK_WINDAVERAGE,\n tellcore_constants.TELLSTICK_WINDGUST\n ]\n\n def update_sensor_state(sensor):\n \"Updates all the sensor values from the sensor\"\n try:\n sensor_name = config[DOMAIN][str(sensor.id)]\n except KeyError:\n if 'only_named' in config[DOMAIN]:\n return\n sensor_name = str(sensor.id)\n\n for datatype in sensor_value_datatypes:\n if datatype & int(config[DOMAIN]['datatype_mask']) and \\\n sensor.has_value(datatype):\n update_sensor_value_state(sensor_name, sensor.value(datatype))\n\n # pylint: disable=unused-argument\n def update_sensors_state(time):\n \"Update the state of all sensors\"\n for sensor in sensors:\n update_sensor_state(sensor)\n\n update_sensors_state(None)\n\n hass.track_time_change(update_sensors_state, second=[0, 30])\n\n return True",
"async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):\n\n # Check config again during load - dependency available\n config = _check_sensor_schema(config)\n\n # Init all default sensors\n sensor_def = pysma.Sensors()\n\n # Sensor from the custom config\n sensor_def.add(\n [\n pysma.Sensor(o[CONF_KEY], n, o[CONF_UNIT], o[CONF_FACTOR], o.get(CONF_PATH))\n for n, o in config[CONF_CUSTOM].items()\n ]\n )\n\n # Use all sensors by default\n config_sensors = config[CONF_SENSORS]\n hass_sensors = []\n used_sensors = []\n\n if isinstance(config_sensors, dict): # will be remove from 0.99\n if not config_sensors: # Use all sensors by default\n config_sensors = {s.name: [] for s in sensor_def}\n\n # Prepare all Home Assistant sensor entities\n for name, attr in config_sensors.items():\n sub_sensors = [sensor_def[s] for s in attr]\n hass_sensors.append(SMAsensor(sensor_def[name], sub_sensors))\n used_sensors.append(name)\n used_sensors.extend(attr)\n\n if isinstance(config_sensors, list):\n if not config_sensors: # Use all sensors by default\n config_sensors = [s.name for s in sensor_def]\n used_sensors = list(set(config_sensors + list(config[CONF_CUSTOM].keys())))\n for sensor in used_sensors:\n hass_sensors.append(SMAsensor(sensor_def[sensor], []))\n\n used_sensors = [sensor_def[s] for s in set(used_sensors)]\n async_add_entities(hass_sensors)\n\n # Init the SMA interface\n session = async_get_clientsession(hass, verify_ssl=config[CONF_VERIFY_SSL])\n grp = config[CONF_GROUP]\n\n protocol = \"https\" if config[CONF_SSL] else \"http\"\n url = f\"{protocol}://{config[CONF_HOST]}\"\n\n sma = pysma.SMA(session, url, config[CONF_PASSWORD], group=grp)\n\n # Ensure we logout on shutdown\n async def async_close_session(event):\n \"\"\"Close the session.\"\"\"\n await sma.close_session()\n\n hass.bus.async_listen(EVENT_HOMEASSISTANT_STOP, async_close_session)\n\n backoff = 0\n backoff_step = 0\n\n async def async_sma(event):\n \"\"\"Update all the SMA sensors.\"\"\"\n nonlocal backoff, backoff_step\n if backoff > 1:\n backoff -= 1\n return\n\n values = await sma.read(used_sensors)\n if not values:\n try:\n backoff = [1, 1, 1, 6, 30][backoff_step]\n backoff_step += 1\n except IndexError:\n backoff = 60\n return\n backoff_step = 0\n\n for sensor in hass_sensors:\n sensor.async_update_values()\n\n interval = config.get(CONF_SCAN_INTERVAL) or timedelta(seconds=5)\n async_track_time_interval(hass, async_sma, interval)",
"def setup_platform(hass, config, add_devices, discovery_info=None):\n dev_id = config.get(CONF_ID, None)\n devname = config.get(CONF_NAME, \"EnOcean binary sensor\")\n add_devices([EnOceanBinarySensor(dev_id, devname)])",
"def setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None,\n) -> None:\n bt_device_id: int = config[CONF_BT_DEVICE_ID]\n\n beacons: dict[str, dict[str, str]] = config[CONF_BEACONS]\n devices: list[EddystoneTemp] = []\n\n for dev_name, properties in beacons.items():\n namespace = get_from_conf(properties, CONF_NAMESPACE, 20)\n instance = get_from_conf(properties, CONF_INSTANCE, 12)\n name = properties.get(CONF_NAME, dev_name)\n\n if instance is None or namespace is None:\n _LOGGER.error(\"Skipping %s\", dev_name)\n continue\n\n devices.append(EddystoneTemp(name, namespace, instance))\n\n if devices:\n mon = Monitor(hass, devices, bt_device_id)\n\n def monitor_stop(event: Event) -> None:\n \"\"\"Stop the monitor thread.\"\"\"\n _LOGGER.info(\"Stopping scanner for Eddystone beacons\")\n mon.stop()\n\n def monitor_start(event: Event) -> None:\n \"\"\"Start the monitor thread.\"\"\"\n _LOGGER.info(\"Starting scanner for Eddystone beacons\")\n mon.start()\n\n add_entities(devices)\n mon.start()\n hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, monitor_stop)\n hass.bus.listen_once(EVENT_HOMEASSISTANT_START, monitor_start)\n else:\n _LOGGER.warning(\"No devices were added\")",
"def setup_platform(\n hass: HomeAssistant,\n config: Dict,\n add_devices: Callable,\n discovery_info: Optional[Dict] = None,\n) -> None:\n havdalah = config[HAVDALAH_MINUTES]\n candle_light = config[CANDLE_LIGHT_MINUTES]\n cities = config[GEONAMES]\n cities_list = cities.split(\",\")\n\n add_devices(\n [\n ShabbatTimes(\n hass,\n city,\n \"Shabbat Times {}\".format(city.replace(\"-\", \"_\")),\n havdalah,\n candle_light,\n )\n for city in cities_list\n ]\n )",
"def setup(port, baud = int('9600'), apn = 'internet.movistar.com.co'):\n try:\n module = serial.Serial('/dev/tty{}'.format(port.upper(), '{}'.format(baud)))\n time.sleep(0.1)\n if module.isOpen():\n print ('Serial Port Available')\n else:\n print ('Serial Port not Available')\n except serial.SerialException:\n print ('Something goes wrong')\n module.close()\n try:\n module.write('AT+CGATT=1\\r\\n'.encode())\n time.sleep(0.01)\n module.write(('AT+CGDCONT=1,\\\"IP\\\",\\\"{}\\\"\\r\\n').format(apn).encode()) \n time.sleep(0.01)\n module.write(('AT+CGSOCKCONT=1,\\\"IP\\\",\\\"{}\\\"\\r\\n').format(apn).encode())\n module.write(('AT+CSOCKSETPN=1\\r\\n').encode())\n time.sleep(0.01)\n module.write(('AT+CGPSURL=\\\"supl.google.com:7276\\\"\\r\\n').encode())\n time.sleep(0.1)\n module.write(('AT+CGPSSSL=1\\r\\n').encode())\n time.sleep(0.1)\n #module.write(('AT+CGPS=1,3\\r\\n').encode())\n #time.sleep(0.2)\n #if _valid_gps(module):\n # print ('GPS configurated')\n #else:\n # print ('GPS not configurated')\n print ('SIM53XX Configurated!')\n except serial.SerialException:\n print ('Something failed during configuration\\rPlase try again...')\n\n return module",
"async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):\n if discovery_info is None:\n return\n _LOGGER.debug(\"Setting up sensor platform.\")\n coordinator = hass.data[DOMAIN][\"coordinator\"]\n appliances = coordinator.data[\"appliances\"]\n async_add_entities(\n [\n NatureRemoE(coordinator, appliance)\n for appliance in appliances.values()\n if appliance[\"type\"] == \"EL_SMART_METER\"\n ]\n )",
"async def async_discover_sensor(dev, instance):\r\n if isinstance(dev, dict):\r\n if 'sensor_type' in dev:\r\n sensor_type = dev['sensor_type']\r\n async_add_entities([ShellyBinaryInfoSensor(dev['itm'], instance,\r\n sensor_type, sensor_type, dev['ukey'])])\r\n return\r\n if dev.device_type == \"SWITCH\":\r\n async_add_entities([ShellySwitch(dev, instance)])\r\n elif dev.device_type == \"BINARY_SENSOR\":\r\n async_add_entities([\r\n ShellyBinarySensor(dev, instance, dev.sensor_type,\r\n dev.sensor_type)\r\n ])",
"async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):\n if hass.config.time_zone is None:\n _LOGGER.error(\"Timezone is not set in Home Assistant configuration\")\n return False\n\n sensors = []\n\n for device, device_config in config[CONF_SENSORS].items():\n date_str = device_config.get(ATTR_DATE)\n is_lunar = device_config.get(CONF_IS_LUNAR_DATE)\n is_intercalation = device_config.get(CONF_INTERCALATION)\n anniv_type = device_config.get(CONF_TYPE)\n\n name = device_config.get(CONF_NAME)\n if name == '':\n name = device\n\n is_mmdd = False\n if dt_util.parse_date(date_str) is None:\n year_added_date_str = str(dt_util.as_local(dt_util.utcnow()).date().year) + \"-\" + date_str\n if dt_util.parse_date(year_added_date_str) is not None:\n date_str = year_added_date_str\n is_mmdd = True\n else:\n continue\n\n sensor = AnniversarySensor(hass, device, name, date_str, is_lunar, is_intercalation, anniv_type, is_mmdd)\n async_track_point_in_utc_time(\n hass, sensor.point_in_time_listener, sensor.get_next_interval())\n\n sensors.append(sensor)\n\n sensor = AnniversaryTTSSensor(hass, \"anniversary_tts\", config.get(CONF_TTS_DAYS), config.get(CONF_TTS_SCAN_INTERVAL))\n async_track_point_in_utc_time(\n hass, sensor.point_in_time_listener, sensor.get_next_interval())\n sensors.append(sensor)\n\n async_add_entities(sensors, True)",
"def connect(self):\n self.arduino = Serial(self.port, self.baud_rate, timeout=self.timeout)",
"def setup_platform(hass, config, add_entities, discovery_info=None):\n devices = []\n dev = discovery_info.get(\"dev\")\n param = discovery_info.get(\"param\")\n devices = []\n for idx in dev['data']:\n if dev['devtype'] in OT_SENSOR_TYPES and idx in [\"Z\",\"V\",\"P3\",\"P4\"]:\n devices.append(LifeSmartSensor(dev,idx,dev['data'][idx],param))\n else:\n devices.append(LifeSmartSensor(dev,idx,dev['data'][idx],param))\n add_entities(devices)",
"def async_add_devices_discovery(hass, discovery_info, async_add_devices):\n items = discovery_info[CONF_ITEMS]\n for item in items:\n async_add_devices([AmpioSwitch(hass, item)])",
"def continue_setup_platform(hass, config, token, add_devices, discovery_info=None):\n if \"trakt\" in _CONFIGURING:\n hass.components.configurator.request_done(_CONFIGURING.pop(\"trakt\"))\n \n add_devices([TraktMyShowCalendarSensor(hass, config, token)], True)",
"def setup_platform(hass, config, add_entities, discovery_info=None):\n sensors = hass.data[COMPONENT_DOMAIN][SENSORS]\n actuators = hass.data[COMPONENT_DOMAIN][ACTUATORS]\n\n sensor_entities = []\n for sensor in sensors:\n belongs_to_climate_actuator = False\n for actuator in actuators:\n if (\n actuator.type() == ActuatorType.TEMPERATURE\n and actuator.name() in sensor.name()\n ):\n belongs_to_climate_actuator = True\n break\n\n if not belongs_to_climate_actuator:\n sensor_entities.append(XS1Sensor(sensor))\n\n add_entities(sensor_entities)",
"def setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None,\n) -> None:\n host = config[CONF_HOST]\n monitored_variables = config[CONF_MONITORED_VARIABLES]\n\n charger = openevsewifi.Charger(host)\n\n entities = [\n OpenEVSESensor(charger, description)\n for description in SENSOR_TYPES\n if description.key in monitored_variables\n ]\n\n add_entities(entities, True)",
"def setup_platform(hass, config, add_devices, discovery_info=None):\n import sharp_aquos_rc\n\n name = config.get(CONF_NAME)\n port = config.get(CONF_PORT)\n username = config.get(CONF_USERNAME)\n password = config.get(CONF_PASSWORD)\n\n if discovery_info:\n _LOGGER.debug('%s', discovery_info)\n vals = discovery_info.split(':')\n if len(vals) > 1:\n port = vals[1]\n\n host = vals[0]\n remote = sharp_aquos_rc.TV(host,\n port,\n username,\n password)\n add_devices([SharpAquosTVDevice(name, remote)])\n return True\n\n host = config.get(CONF_HOST)\n remote = sharp_aquos_rc.TV(host,\n port,\n username,\n password)\n\n add_devices([SharpAquosTVDevice(name, remote)])\n return True",
"def send_AStoDataBase(id_arduino, actuator_name, actuator_status):\n #Corentin : Ecrire code",
"def add_station(self, station_id=None, time=None, location=None):",
"async def async_setup_entry(hass, entry, async_add_entities):\n stick = hass.data[DOMAIN][entry.entry_id][\"stick\"]\n\n async def async_add_sensor(mac):\n \"\"\"Add plugwise sensor.\"\"\"\n node = stick.node(mac)\n for sensor_type in node.get_sensors():\n if sensor_type in SENSORS and sensor_type != AVAILABLE_SENSOR_ID:\n async_add_entities([PlugwiseSensor(node, mac, sensor_type)])\n\n for mac in hass.data[DOMAIN][entry.entry_id][\"sensor\"]:\n hass.async_create_task(async_add_sensor(mac))\n\n def discoved_sensor(mac):\n \"\"\"Add newly discovered sensor\"\"\"\n hass.async_create_task(async_add_sensor(mac))\n\n #Listen for discovered nodes\n stick.subscribe_stick_callback(discoved_sensor, CB_NEW_NODE)",
"def setup_platform(hass, config, add_devices, discovery_info=None):\n resource = config.get(CONF_RESOURCE)\n pin = config.get(CONF_PIN)\n\n if None in (resource, pin):\n _LOGGER.error('Not all required config keys present: %s',\n ', '.join((CONF_RESOURCE, CONF_PIN)))\n return False\n\n try:\n response = requests.get(resource, timeout=10).json()\n except requests.exceptions.MissingSchema:\n _LOGGER.error('Missing resource or schema in configuration. '\n 'Add http:// to your URL.')\n return False\n except requests.exceptions.ConnectionError:\n _LOGGER.error('No route to device at %s. '\n 'Please check the IP address in the configuration file.',\n resource)\n return False\n\n arest = ArestData(resource, pin)\n\n add_devices([ArestBinarySensor(arest,\n resource,\n config.get('name', response['name']),\n pin)])"
] | [
"0.6059688",
"0.583049",
"0.5694654",
"0.5584414",
"0.5579093",
"0.55689627",
"0.55304766",
"0.5517644",
"0.55015105",
"0.550125",
"0.5478783",
"0.54301524",
"0.54066986",
"0.5383559",
"0.5372278",
"0.5371746",
"0.5358791",
"0.53568757",
"0.53443986",
"0.5335553",
"0.5331908",
"0.53258544",
"0.5317057",
"0.5299774",
"0.5281674",
"0.5280486",
"0.526922",
"0.52417165",
"0.52397054",
"0.52049094"
] | 0.65760076 | 0 |
Installs the OpenMPI package on the VM. | def _Install(vm):
vm.Install('build_tools')
vm.Install('wget')
vm.RemoteCommand('wget %s -P %s' % (MPI_URL, INSTALL_DIR))
vm.RemoteCommand('cd %s && tar xvfz %s' % (INSTALL_DIR, MPI_TAR))
make_jobs = vm.NumCpusForBenchmark()
shared_lib_command = ('--enable-shared' if FLAGS.openmpi_enable_shared
else '--disable-shared')
if FLAGS.openmpi_with_cuda_support:
cuda_cmd = ('--with-cuda=/usr/local/cuda-{version}/ '
'--with-cuda-libdir=/usr/local/cuda-{version}/lib64/'.format(
version=FLAGS.cuda_toolkit_version))
else:
cuda_cmd = ''
config_cmd = (
'./configure --enable-static {shared_lib_cmd} --prefix=/usr '
'{cuda_cmd}'.format(shared_lib_cmd=shared_lib_command,
cuda_cmd=cuda_cmd))
vm.RobustRemoteCommand(
'cd %s && %s && make -j %s && sudo make install' %
(MPI_DIR, config_cmd, make_jobs)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def AptInstall(vm):\n vm.RobustRemoteCommand('sudo apt-get {}'.format(REMOVE_MPI_CMD))\n _Install(vm)",
"def YumInstall(vm):\n vm.RobustRemoteCommand('sudo yum {}'.format(REMOVE_MPI_CMD))\n _Install(vm)",
"def install():\n PackCommandExecutor().pack()\n InstallCommandExecutor().install()",
"def install(self):\n\n self.clean_git_checkout(self.git_repo, '/src')\n\n self.__copy_config_templates();\n\n self.local(\"sudo pip install -r src/requirements.txt --upgrade\")\n\n if not self.is_local():\n PiService.install(self) #copy to remote\n\n self.sudo(\"pip install -r src/requirements.txt --upgrade\")",
"def pip_install():\n _require_environment()\n remote(PIP_INSTALL_PREFIX)",
"def AptInstall(vm):\n _Install(vm)",
"def AptInstall(vm):\n _Install(vm)",
"def install():\n deploy()\n configure()",
"def _Install(vm):\n nthreads = vm.NumCpusForBenchmark() * 2\n vm.Install('build_tools')\n vm.RemoteCommand('git clone {0} {1}'.format(GIT_REPO, SILO_DIR))\n vm.RemoteCommand('cd {0} && git checkout {1}'.format(SILO_DIR,\n GIT_TAG))\n # This is due to a failing clone command when executing behind a proxy.\n # Replacing the protocol to https instead of git fixes the issue.\n vm.RemoteCommand('git config --global url.\"https://\".insteadOf git://')\n # Disable -Wmaybe-uninitialized errors when GCC has the option to workaround\n # a spurious error in masstree.\n cxx = '\"g++ -std=gnu++0x \\\n $(echo | gcc -Wmaybe-uninitialized -E - >/dev/null 2>&1 && \\\n echo -Wno-error=maybe-uninitialized)\"'\n vm.RemoteCommand(\n 'cd {0} && CXX={2} MODE=perf DEBUG=0 CHECK_INVARIANTS=0 make -j{1} dbtest'\n .format(SILO_DIR, nthreads, cxx))",
"def _Install(vm):\n if vm.OS_TYPE not in MOFED_OS_MAPPING:\n raise ValueError('OS type {} not in {}'.format(vm.OS_TYPE,\n sorted(MOFED_OS_MAPPING)))\n driver = MOFED_DRIVER.format(version=FLAGS.mofed_version,\n os=MOFED_OS_MAPPING[vm.OS_TYPE])\n vm.InstallPackages('libdapl2 libmlx4-1')\n try:\n vm.RemoteCommand('curl -fSsL {} | tar -zxpf -'.format(driver))\n except:\n raise errors.Setup.InvalidSetupError('Failed to download {}'.format(driver))\n stdout, _ = vm.RemoteCommand('cd MLNX_OFED_LINUX-* && sudo ./mlnxofedinstall '\n '--force')\n if not regex_util.ExtractExactlyOneMatch(r'Installation passed successfully',\n stdout):\n raise errors.Benchmarks.PrepareException(\n 'Mellanox OpenFabrics driver isn\\'t installed successfully.')\n vm.RemoteCommand('sudo /etc/init.d/openibd restart')\n vm.RemoteCommand(\"sudo sed -i -e 's/# OS.EnableRDMA=y/\"\n \"OS.EnableRDMA=y/g' /etc/waagent.conf\")\n vm.RemoteCommand(\"sudo sed -i -e 's/# OS.UpdateRdmaDriver=y/\"\n \"OS.UpdateRdmaDriver=y/g' /etc/waagent.conf\")\n # https://docs.microsoft.com/en-us/azure/virtual-machines/linux/sizes-hpc#rdma-capable-instances\n vm.RemoteCommand('cat << EOF | sudo tee -a /etc/security/limits.conf\\n'\n '* hard memlock unlimited\\n'\n '* soft memlock unlimited\\n'\n '* hard nofile 65535\\n'\n '* soft nofile 65535\\n'\n 'EOF')",
"def install(cluster=False):\n \"\"\"Configure openvwsitch and neutron packages\"\"\"\n package_ensure('python-amqp')\n package_ensure('neutron-server')\n package_ensure('neutron-plugin-openvswitch')\n package_ensure('python-pyparsing')\n package_ensure('python-mysqldb')\n if cluster:\n stop()",
"def AptInstall(vm):\n for package in APT_PACKAGES:\n vm.InstallPackages(package)",
"def install():\n execute(generate)\n execute(upload)",
"def AptInstall(vm):\n vm.InstallPackages(APT_PACKAGES)\n _Install(vm)",
"def install(version=minv.__version__, release=\"1\"):\n sudo(\"yum install -y %s\" % \" \".join(RPMS))\n sudo(\"yum install -y minv-%s-%s.noarch.rpm\" % (version, release))\n sudo(\n 'printf \"abcdefghijklmnopq\\nabcdefghijklmnopq\" '\n '| sh minv_install_postgresql.sh --tablespace /disk/minv_tablespace/'\n )",
"def install_local(self) -> None:\n pass",
"def install(self):\n PiService.install(self)\n self.sudo('svn co https://svn.code.sf.net/p/mjpg-streamer/code /etc/mjpg-streamer')\n self.run('cd /etc/mjpg-streamer/mjpg-streamer && sudo make USE_LIB4VL=true clean all && sudo make DESTDIR=/usr install')",
"def YumInstall(vm):\n _Install(vm)",
"def YumInstall(vm):\n _Install(vm)",
"def install():\n sudo('apt-get install python')",
"def install(self) -> None:\n if self.local_packages:\n self.prepare_install_local()\n self.install_local()\n if self.remote_packages:\n self.install_from_url()\n if self.repository_packages:\n self.install_from_repository()\n if self.debuginfo_packages:\n self.install_debuginfo()",
"def install():\n remote_egg_path = os.path.join(remote_egg_dir, get_egg_name())\n sudo('easy_install -U %s' % remote_egg_path)\n sudo('rm %s' % remote_egg_path)",
"def _InstallOSReqs(self, vm):\n if 'ubuntu' in vm.OS_TYPE:\n vm.InstallPackages(' '.join(PREREQ_UBUNTU))\n elif 'centos' in vm.OS_TYPE:\n vm.InstallPackages(' '.join(PREREQ_CENTOS))\n else:\n raise errors.VirtualMachine.VirtualMachineError('OS not supported')",
"def install():\n verun('pip install -r {0}'.format(requirements))",
"def setup():\n sudo(\"minv_setup.sh\")",
"def test_defaults_centos(self):\n ompi = openmpi()\n self.assertEqual(str(ompi),\nr'''# OpenMPI version 4.0.5\nRUN yum install -y \\\n bzip2 \\\n file \\\n hwloc \\\n make \\\n numactl-devel \\\n openssh-clients \\\n perl \\\n tar \\\n wget && \\\n rm -rf /var/cache/yum/*\nRUN mkdir -p /var/tmp && wget -q -nc --no-check-certificate -P /var/tmp https://www.open-mpi.org/software/ompi/v4.0/downloads/openmpi-4.0.5.tar.bz2 && \\\n mkdir -p /var/tmp && tar -x -f /var/tmp/openmpi-4.0.5.tar.bz2 -C /var/tmp -j && \\\n cd /var/tmp/openmpi-4.0.5 && ./configure --prefix=/usr/local/openmpi --disable-getpwuid --enable-orterun-prefix-by-default --with-cuda --with-verbs && \\\n make -j$(nproc) && \\\n make -j$(nproc) install && \\\n rm -rf /var/tmp/openmpi-4.0.5 /var/tmp/openmpi-4.0.5.tar.bz2\nENV LD_LIBRARY_PATH=/usr/local/openmpi/lib:$LD_LIBRARY_PATH \\\n PATH=/usr/local/openmpi/bin:$PATH''')",
"def install_system_packages():\n print(\"Installiere notwendige Pakete...\")\n _run('sudo apt update')\n _run(\n \"sudo apt install \"\n \"apache2 apache2-dev python3-dev python3-venv python3-pip postgresql-contrib libpq-dev\"\n )\n print(\"Fertig!\", end=\"\\n\\n\")",
"def local_install(self):\n import subprocess\n\n print(\"Making local install\")\n from pathlib import Path\n\n root = Path(__file__).parent.parent\n\n def run(args, shell=False):\n print(\"---\", \" \".join(args))\n return subprocess.check_call(args, cwd=curdir, shell=shell)\n\n def get_version():\n import json\n\n p = Path(curdir / \"package.json\")\n contents = json.loads(p.read_text())\n return contents[\"version\"]\n\n print(\"--- installing RobotFramework Language Server\")\n curdir = root / \"robotframework-ls\"\n run(\"python -m dev vendor_robocorp_ls_core\".split())\n run(\"vsce package\".split(), shell=sys.platform == \"win32\")\n run(\n f\"code --install-extension robotframework-lsp-{get_version()}.vsix\".split(),\n shell=sys.platform == \"win32\",\n )\n run(\"python -m dev remove_vendor_robocorp_ls_core\".split())\n\n print(\"\\n--- installing Robocorp Code\")\n curdir = root / \"robocorp-code\"\n run(\"python -m dev vendor_robocorp_ls_core\".split())\n run(\"vsce package\".split(), shell=sys.platform == \"win32\")\n run(\n f\"code --install-extension robocorp-code-{get_version()}.vsix\".split(),\n shell=sys.platform == \"win32\",\n )\n run(\"python -m dev remove_vendor_robocorp_ls_core\".split())",
"def AptInstall(vm):\n vm.Install('build_tools')\n vm.InstallPackages(APT_PACKAGES)",
"def install_ssh(app):\n os.system('lxc-attach -n %s -- apk update' % app)\n os.system('lxc-attach -n %s -- apk add openssh' % app)\n # Config sshd\n config = '/var/lib/lxc/%s/rootfs/etc/ssh/sshd_config' % app\n with open(config, \"a\") as myfile:\n myfile.write(\"RSAAuthentication yes\\nPubkeyAuthentication yes\\nPermitRootLogin yes\\nPermitEmptyPasswords yes\")\n os.system('lxc-attach -n %s -- /etc/init.d/sshd start' % app)"
] | [
"0.749848",
"0.6820002",
"0.6406495",
"0.6319433",
"0.62312424",
"0.61590135",
"0.61590135",
"0.6028888",
"0.59898764",
"0.5981365",
"0.5926291",
"0.59055406",
"0.5884631",
"0.58507264",
"0.5765704",
"0.5713537",
"0.57048243",
"0.5699972",
"0.5699972",
"0.5667024",
"0.5660338",
"0.5635533",
"0.56315666",
"0.55915576",
"0.5573308",
"0.5564503",
"0.5563866",
"0.55606675",
"0.5559384",
"0.55526215"
] | 0.80011517 | 0 |
Installs the OpenMPI package on the VM. | def AptInstall(vm):
vm.RobustRemoteCommand('sudo apt-get {}'.format(REMOVE_MPI_CMD))
_Install(vm) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _Install(vm):\n vm.Install('build_tools')\n vm.Install('wget')\n vm.RemoteCommand('wget %s -P %s' % (MPI_URL, INSTALL_DIR))\n vm.RemoteCommand('cd %s && tar xvfz %s' % (INSTALL_DIR, MPI_TAR))\n make_jobs = vm.NumCpusForBenchmark()\n shared_lib_command = ('--enable-shared' if FLAGS.openmpi_enable_shared\n else '--disable-shared')\n if FLAGS.openmpi_with_cuda_support:\n cuda_cmd = ('--with-cuda=/usr/local/cuda-{version}/ '\n '--with-cuda-libdir=/usr/local/cuda-{version}/lib64/'.format(\n version=FLAGS.cuda_toolkit_version))\n else:\n cuda_cmd = ''\n config_cmd = (\n './configure --enable-static {shared_lib_cmd} --prefix=/usr '\n '{cuda_cmd}'.format(shared_lib_cmd=shared_lib_command,\n cuda_cmd=cuda_cmd))\n vm.RobustRemoteCommand(\n 'cd %s && %s && make -j %s && sudo make install' %\n (MPI_DIR, config_cmd, make_jobs))",
"def YumInstall(vm):\n vm.RobustRemoteCommand('sudo yum {}'.format(REMOVE_MPI_CMD))\n _Install(vm)",
"def install():\n PackCommandExecutor().pack()\n InstallCommandExecutor().install()",
"def install(self):\n\n self.clean_git_checkout(self.git_repo, '/src')\n\n self.__copy_config_templates();\n\n self.local(\"sudo pip install -r src/requirements.txt --upgrade\")\n\n if not self.is_local():\n PiService.install(self) #copy to remote\n\n self.sudo(\"pip install -r src/requirements.txt --upgrade\")",
"def pip_install():\n _require_environment()\n remote(PIP_INSTALL_PREFIX)",
"def AptInstall(vm):\n _Install(vm)",
"def AptInstall(vm):\n _Install(vm)",
"def install():\n deploy()\n configure()",
"def _Install(vm):\n nthreads = vm.NumCpusForBenchmark() * 2\n vm.Install('build_tools')\n vm.RemoteCommand('git clone {0} {1}'.format(GIT_REPO, SILO_DIR))\n vm.RemoteCommand('cd {0} && git checkout {1}'.format(SILO_DIR,\n GIT_TAG))\n # This is due to a failing clone command when executing behind a proxy.\n # Replacing the protocol to https instead of git fixes the issue.\n vm.RemoteCommand('git config --global url.\"https://\".insteadOf git://')\n # Disable -Wmaybe-uninitialized errors when GCC has the option to workaround\n # a spurious error in masstree.\n cxx = '\"g++ -std=gnu++0x \\\n $(echo | gcc -Wmaybe-uninitialized -E - >/dev/null 2>&1 && \\\n echo -Wno-error=maybe-uninitialized)\"'\n vm.RemoteCommand(\n 'cd {0} && CXX={2} MODE=perf DEBUG=0 CHECK_INVARIANTS=0 make -j{1} dbtest'\n .format(SILO_DIR, nthreads, cxx))",
"def _Install(vm):\n if vm.OS_TYPE not in MOFED_OS_MAPPING:\n raise ValueError('OS type {} not in {}'.format(vm.OS_TYPE,\n sorted(MOFED_OS_MAPPING)))\n driver = MOFED_DRIVER.format(version=FLAGS.mofed_version,\n os=MOFED_OS_MAPPING[vm.OS_TYPE])\n vm.InstallPackages('libdapl2 libmlx4-1')\n try:\n vm.RemoteCommand('curl -fSsL {} | tar -zxpf -'.format(driver))\n except:\n raise errors.Setup.InvalidSetupError('Failed to download {}'.format(driver))\n stdout, _ = vm.RemoteCommand('cd MLNX_OFED_LINUX-* && sudo ./mlnxofedinstall '\n '--force')\n if not regex_util.ExtractExactlyOneMatch(r'Installation passed successfully',\n stdout):\n raise errors.Benchmarks.PrepareException(\n 'Mellanox OpenFabrics driver isn\\'t installed successfully.')\n vm.RemoteCommand('sudo /etc/init.d/openibd restart')\n vm.RemoteCommand(\"sudo sed -i -e 's/# OS.EnableRDMA=y/\"\n \"OS.EnableRDMA=y/g' /etc/waagent.conf\")\n vm.RemoteCommand(\"sudo sed -i -e 's/# OS.UpdateRdmaDriver=y/\"\n \"OS.UpdateRdmaDriver=y/g' /etc/waagent.conf\")\n # https://docs.microsoft.com/en-us/azure/virtual-machines/linux/sizes-hpc#rdma-capable-instances\n vm.RemoteCommand('cat << EOF | sudo tee -a /etc/security/limits.conf\\n'\n '* hard memlock unlimited\\n'\n '* soft memlock unlimited\\n'\n '* hard nofile 65535\\n'\n '* soft nofile 65535\\n'\n 'EOF')",
"def install(cluster=False):\n \"\"\"Configure openvwsitch and neutron packages\"\"\"\n package_ensure('python-amqp')\n package_ensure('neutron-server')\n package_ensure('neutron-plugin-openvswitch')\n package_ensure('python-pyparsing')\n package_ensure('python-mysqldb')\n if cluster:\n stop()",
"def AptInstall(vm):\n for package in APT_PACKAGES:\n vm.InstallPackages(package)",
"def install():\n execute(generate)\n execute(upload)",
"def AptInstall(vm):\n vm.InstallPackages(APT_PACKAGES)\n _Install(vm)",
"def install(version=minv.__version__, release=\"1\"):\n sudo(\"yum install -y %s\" % \" \".join(RPMS))\n sudo(\"yum install -y minv-%s-%s.noarch.rpm\" % (version, release))\n sudo(\n 'printf \"abcdefghijklmnopq\\nabcdefghijklmnopq\" '\n '| sh minv_install_postgresql.sh --tablespace /disk/minv_tablespace/'\n )",
"def install_local(self) -> None:\n pass",
"def install(self):\n PiService.install(self)\n self.sudo('svn co https://svn.code.sf.net/p/mjpg-streamer/code /etc/mjpg-streamer')\n self.run('cd /etc/mjpg-streamer/mjpg-streamer && sudo make USE_LIB4VL=true clean all && sudo make DESTDIR=/usr install')",
"def YumInstall(vm):\n _Install(vm)",
"def YumInstall(vm):\n _Install(vm)",
"def install():\n sudo('apt-get install python')",
"def install(self) -> None:\n if self.local_packages:\n self.prepare_install_local()\n self.install_local()\n if self.remote_packages:\n self.install_from_url()\n if self.repository_packages:\n self.install_from_repository()\n if self.debuginfo_packages:\n self.install_debuginfo()",
"def install():\n remote_egg_path = os.path.join(remote_egg_dir, get_egg_name())\n sudo('easy_install -U %s' % remote_egg_path)\n sudo('rm %s' % remote_egg_path)",
"def _InstallOSReqs(self, vm):\n if 'ubuntu' in vm.OS_TYPE:\n vm.InstallPackages(' '.join(PREREQ_UBUNTU))\n elif 'centos' in vm.OS_TYPE:\n vm.InstallPackages(' '.join(PREREQ_CENTOS))\n else:\n raise errors.VirtualMachine.VirtualMachineError('OS not supported')",
"def install():\n verun('pip install -r {0}'.format(requirements))",
"def setup():\n sudo(\"minv_setup.sh\")",
"def test_defaults_centos(self):\n ompi = openmpi()\n self.assertEqual(str(ompi),\nr'''# OpenMPI version 4.0.5\nRUN yum install -y \\\n bzip2 \\\n file \\\n hwloc \\\n make \\\n numactl-devel \\\n openssh-clients \\\n perl \\\n tar \\\n wget && \\\n rm -rf /var/cache/yum/*\nRUN mkdir -p /var/tmp && wget -q -nc --no-check-certificate -P /var/tmp https://www.open-mpi.org/software/ompi/v4.0/downloads/openmpi-4.0.5.tar.bz2 && \\\n mkdir -p /var/tmp && tar -x -f /var/tmp/openmpi-4.0.5.tar.bz2 -C /var/tmp -j && \\\n cd /var/tmp/openmpi-4.0.5 && ./configure --prefix=/usr/local/openmpi --disable-getpwuid --enable-orterun-prefix-by-default --with-cuda --with-verbs && \\\n make -j$(nproc) && \\\n make -j$(nproc) install && \\\n rm -rf /var/tmp/openmpi-4.0.5 /var/tmp/openmpi-4.0.5.tar.bz2\nENV LD_LIBRARY_PATH=/usr/local/openmpi/lib:$LD_LIBRARY_PATH \\\n PATH=/usr/local/openmpi/bin:$PATH''')",
"def install_system_packages():\n print(\"Installiere notwendige Pakete...\")\n _run('sudo apt update')\n _run(\n \"sudo apt install \"\n \"apache2 apache2-dev python3-dev python3-venv python3-pip postgresql-contrib libpq-dev\"\n )\n print(\"Fertig!\", end=\"\\n\\n\")",
"def local_install(self):\n import subprocess\n\n print(\"Making local install\")\n from pathlib import Path\n\n root = Path(__file__).parent.parent\n\n def run(args, shell=False):\n print(\"---\", \" \".join(args))\n return subprocess.check_call(args, cwd=curdir, shell=shell)\n\n def get_version():\n import json\n\n p = Path(curdir / \"package.json\")\n contents = json.loads(p.read_text())\n return contents[\"version\"]\n\n print(\"--- installing RobotFramework Language Server\")\n curdir = root / \"robotframework-ls\"\n run(\"python -m dev vendor_robocorp_ls_core\".split())\n run(\"vsce package\".split(), shell=sys.platform == \"win32\")\n run(\n f\"code --install-extension robotframework-lsp-{get_version()}.vsix\".split(),\n shell=sys.platform == \"win32\",\n )\n run(\"python -m dev remove_vendor_robocorp_ls_core\".split())\n\n print(\"\\n--- installing Robocorp Code\")\n curdir = root / \"robocorp-code\"\n run(\"python -m dev vendor_robocorp_ls_core\".split())\n run(\"vsce package\".split(), shell=sys.platform == \"win32\")\n run(\n f\"code --install-extension robocorp-code-{get_version()}.vsix\".split(),\n shell=sys.platform == \"win32\",\n )\n run(\"python -m dev remove_vendor_robocorp_ls_core\".split())",
"def AptInstall(vm):\n vm.Install('build_tools')\n vm.InstallPackages(APT_PACKAGES)",
"def install_ssh(app):\n os.system('lxc-attach -n %s -- apk update' % app)\n os.system('lxc-attach -n %s -- apk add openssh' % app)\n # Config sshd\n config = '/var/lib/lxc/%s/rootfs/etc/ssh/sshd_config' % app\n with open(config, \"a\") as myfile:\n myfile.write(\"RSAAuthentication yes\\nPubkeyAuthentication yes\\nPermitRootLogin yes\\nPermitEmptyPasswords yes\")\n os.system('lxc-attach -n %s -- /etc/init.d/sshd start' % app)"
] | [
"0.80011517",
"0.6820002",
"0.6406495",
"0.6319433",
"0.62312424",
"0.61590135",
"0.61590135",
"0.6028888",
"0.59898764",
"0.5981365",
"0.5926291",
"0.59055406",
"0.5884631",
"0.58507264",
"0.5765704",
"0.5713537",
"0.57048243",
"0.5699972",
"0.5699972",
"0.5667024",
"0.5660338",
"0.5635533",
"0.56315666",
"0.55915576",
"0.5573308",
"0.5564503",
"0.5563866",
"0.55606675",
"0.5559384",
"0.55526215"
] | 0.749848 | 1 |
Uninstalls the OpenMPI package on the VM. | def _Uninstall(vm):
vm.RemoteCommand('cd {0} && sudo make uninstall'.format(MPI_DIR)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def AptUninstall(vm):\n remove_str = 'sudo apt-get --purge autoremove -y '\n for package in APT_PACKAGES:\n vm.RemoteCommand(remove_str + package)",
"def AptUninstall(vm):\n _Uninstall(vm)",
"def YumUninstall(vm):\n _Uninstall(vm)",
"def _unprovision_node(self, conn):\n conn.run(f\"rm -rf {EXPORTER_HOME}\")",
"def remove(self):\n self.model_or_sim.remove_package(self)",
"def remove_package(package, remote):\n flavor = remote.os.package_type\n if flavor == 'deb':\n pkgcmd = ['DEBIAN_FRONTEND=noninteractive',\n 'sudo',\n '-E',\n 'apt-get',\n '-y',\n 'purge',\n '{package}'.format(package=package)]\n elif flavor == 'rpm':\n # FIXME: zypper\n pkgcmd = ['sudo',\n 'yum',\n '-y',\n 'erase',\n '{package}'.format(package=package)]\n else:\n log.error('remove_package: bad flavor ' + flavor + '\\n')\n return False\n return remote.run(args=pkgcmd)",
"def uninstall_ubuntu_packages():\n package_clean('python-amqp')\n package_clean('neutron-server')\n package_clean('neutron-plugin-openvswitch')\n package_clean('python-pyparsing')\n package_clean('python-mysqldb')",
"def remove():\n run('pew rm {0}'.format(package_name()))",
"def uninstall(package):\n return G.DEVICE.uninstall_app(package)",
"def PluginUninstall(self, packageName):\n pass",
"def local_uninstall(environment):\n environment.remove_cleanup(\n environment.cfy.local.execute,\n args=['uninstall'],\n )\n result = environment.cfy.local.execute('uninstall')\n assert result['returncode'] == 0, (\n 'Uninstall workflow failed!'\n )",
"def uninstall(self):\n return PackageHelper.uninstall_package(name=self.name)",
"def unintallpack(package_name: str) -> None:\n\tresp = subprocess.call(['pip', 'uninstall', '-y', package_name])",
"def uninstall():\n global _task\n _task.stop()\n _task = None",
"def _uninstall(package_name, remove_all, app_id, cli, app):\n\n package_manager = _get_package_manager()\n err = package.uninstall(\n package_manager, package_name, remove_all, app_id, cli, app)\n if err is not None:\n emitter.publish(err)\n return 1\n\n return 0",
"def uninstall(repo, package, yes):\n uinfo = repo.uninstall(package)\n if not uinfo.installed:\n click.echo('%s is not installed' % package)\n else:\n click.echo('The following paths will be removed:')\n for path in uinfo.paths:\n click.echo(' %s' % click.format_filename(path))\n click.echo()\n if yes or click.confirm('Do you want to uninstall %s?' % package):\n uinfo.perform()\n click.echo('Done!')\n else:\n click.echo('Aborted!')",
"def test_ospf_interface_uninstall(self):\n self._common_uninstall_external_and_unintialized(\n 'some_id', ospf_interface.delete,\n {'interface': {}}\n )",
"def uninstall_mac_processor(interface, mac_profile):\n pass",
"def uninstall(package:str, path:str=None):\r\n logging.info(\"Uninstalling Package {}...\".format(package))\r\n if path is None:\r\n path = get_site_packages_path()\r\n dst = os.path.join(path, package)\r\n\r\n if not os.path.isdir(dst):\r\n raise IOError(\"The Package You Have Attempted To Install Is Not Installed On Your Machine. Use 'install' To Install Packages\")\r\n\r\n shutil.rmtree(dst)\r\n logging.info(\"Finished Uninstalling Package {}\".format(package))",
"def remove():\n vbox = Vbox(env.vm_name)\n vbox.remove()",
"def _uninstall():\n\tif not \"SCRIPTS\" in os.environ:\n\t\tprint \"Please set SCRIPTS environment variable.\"\n\t\tsys.exit(1)\n\t\n\tscript_dir = os.environ[\"SCRIPTS\"]\n\t\n\tif SCRIPT_NAME in os.listdir(script_dir):\n\t\tshutil.rmtree(os.path.join(script_dir, SCRIPT_NAME))\n\tfor name in EXEC_NAMES:\n\t\tif name in os.listdir(\"/bin/\"):\n\t\t\tos.system(\"sudo rm -f /bin/{}\".format(name))",
"def prepareUninstall(self):\n e5App().unregisterPluginObject(pluginTypename)",
"def remove(name):\n if name==\"autopy\":\n print(\"\\n\\tUNINSTALLING WORKING MODULE WILL CAUSE ERRORS AND MAKE YOUR CODE UNUSABLE\\n\")\n choice=input(f\"Are you sure to remove {name}?\\nEnter YES,PROCEED to continue:\")\n if choice == 'YES,PROCEED':os.system(f'python -m pip uninstall {name}')\n else:print(\"Operetion Cancelled\")",
"def uninstall(args):\n scripts = get_console_scripts(args.package)\n for script in scripts:\n path = os.path.join(args.destination, script)\n logger.info('removing {0}'.format(path))\n os.remove(path)",
"def _remove(self):\n self._system.remove(self.get_install_path())\n self._system.remove(self._source_path)",
"def uninstall_rpm_remotely(self, rpm_filename, host, rpm_database = RPM_DATABASE):\n rpm_package_name = rpm_filename[:rpm_filename.index('.')]\n run_remote_command(\"rpm -e %s --dbpath %s\" % (rpm_package_name, rpm_database), host)\n self.check_remote_rpm_uninstall(rpm_package_name, host)",
"def uninstall_python_packages():\n\n if no_python_uninstall():\n print(NO_PYTHON_UNINSTALL_MESSAGE)\n return\n\n # So that we don't constantly uninstall things, use a hash of the packages\n # to be uninstalled. Check it, and skip this if we're up to date.\n hasher = hashlib.sha1()\n hasher.update(repr(PACKAGES_TO_UNINSTALL).encode('utf-8'))\n expected_version = hasher.hexdigest()\n state_file_path = os.path.join(PREREQS_STATE_DIR, \"Python_uninstall.sha1\")\n create_prereqs_cache_dir()\n\n if os.path.isfile(state_file_path):\n with open(state_file_path) as state_file:\n version = state_file.read()\n if version == expected_version:\n print('Python uninstalls unchanged, skipping...')\n return\n\n # Run pip to find the packages we need to get rid of. Believe it or not,\n # edx-val is installed in a way that it is present twice, so we have a loop\n # to really really get rid of it.\n for _ in range(3):\n uninstalled = False\n frozen = sh(\"pip freeze\", capture=True)\n\n for package_name in PACKAGES_TO_UNINSTALL:\n if package_in_frozen(package_name, frozen):\n # Uninstall the pacakge\n sh(f\"pip uninstall --disable-pip-version-check -y {package_name}\")\n uninstalled = True\n if not uninstalled:\n break\n else:\n # We tried three times and didn't manage to get rid of the pests.\n print(\"Couldn't uninstall unwanted Python packages!\")\n return\n\n # Write our version.\n with open(state_file_path, \"wb\") as state_file:\n state_file.write(expected_version.encode('utf-8'))",
"def productUninstall(command):\n try:\n Executingbysubprocess(command)\n print \"uninstallcommand\". command\n except Exception as er:\n print \"Not able to uninstall the product\"",
"def UndeployModel(self, request, global_params=None):\n config = self.GetMethodConfig('UndeployModel')\n return self._RunMethod(\n config, request, global_params=global_params)",
"def AptInstall(vm):\n vm.RobustRemoteCommand('sudo apt-get {}'.format(REMOVE_MPI_CMD))\n _Install(vm)"
] | [
"0.6643343",
"0.6554691",
"0.6437639",
"0.61634016",
"0.61421573",
"0.6033786",
"0.6024719",
"0.5982157",
"0.59412205",
"0.59231645",
"0.5891756",
"0.5881228",
"0.5796456",
"0.5779653",
"0.57771873",
"0.57449394",
"0.5722418",
"0.57108957",
"0.5706674",
"0.56761074",
"0.5650325",
"0.56096953",
"0.5608291",
"0.5584738",
"0.5556681",
"0.55310917",
"0.55267656",
"0.55102324",
"0.54782355",
"0.54749805"
] | 0.8364465 | 0 |
This method provides an ability to set order book's deep on the fly. If some of deep's parameters is <0 (bid_count or ask_count) then method raise the custom ChangeOrderBookDeepError exception. | def set_deep(self, deep: Deep) -> None:
def is_deep_invalid(var: Deep):
return not isinstance(var, Deep) \
or False in [str(value).isdigit() for value in deep.__dict__.values()] \
or deep.bid_count < 0 \
or deep.ask_count < 0
# Exit rule
if is_deep_invalid(deep):
raise ChangeOrderBookDeepError(deep)
self.deep = deep | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_overflow_bids_market_default_depth(new_order_book: Callable[[], OrderBook]) -> NoReturn:\n book = new_order_book\n\n for _ in range(book.depth):\n book.add_offer('bids', 1, 1)\n\n assert book.depth == len(book.bids)\n assert not book.asks\n\n # try to put 21th lot into bids\n with pytest.raises(TradeTypeOverflowedException):\n book.add_offer('bids', 1, 1)",
"def test_overflow_bids_market_custom_depth() -> NoReturn:\n book = OrderBook(10)\n\n for _ in range(book.depth):\n book.add_offer('bids', 1, 1)\n\n assert book.depth == len(book.bids)\n assert not book.asks\n\n # try to put 11th lot into bids\n with pytest.raises(TradeTypeOverflowedException):\n book.add_offer('bids', 1, 1)",
"def test_overflow_asks_market_default_depth(new_order_book: Callable[[], OrderBook]) -> NoReturn:\n book = new_order_book\n\n for _ in range(book.depth):\n book.add_offer('asks', 1, 1)\n\n assert book.depth == len(book.asks)\n assert not book.bids\n\n # try to put 21th lot into asks\n with pytest.raises(TradeTypeOverflowedException):\n book.add_offer('asks', 1, 1)",
"def test_overflow_asks_market_custom_depth() -> NoReturn:\n book = OrderBook(10)\n\n for _ in range(book.depth):\n book.add_offer('asks', 1, 1)\n\n assert book.depth == len(book.asks)\n assert not book.bids\n\n # try to put 11th lot into asks\n with pytest.raises(TradeTypeOverflowedException):\n book.add_offer('asks', 1, 1)",
"def test_e2e_override_depth_amount_greater_than_from_order_book(self):\n\n cli = \"--balance 1 --override_depth_amount 0.5 offline --test -ob test_data/order_books.csv \"\n deal = self._run_bot_offine(cli)\n\n self.assertEqual(0.5, float(deal.data_row[\"_config_override_depth_amount\"]), 4)\n self.assertEqual(0.5, float(deal.data_row[\"start-qty\"]))\n self.assertEqual(float(deal.data_row[\"ob_result\"]), float(deal.data_row[\"result\"]))\n self.assertEqual(0.024282400000000093, float(deal.data_row[\"result-fact-diff\"]))\n\n # check if prices are from tickers\n self.assertEqual(float(deal.data_row[\"leg1-price\"]), float(deal.data_row[\"leg1-ob-price\"]))\n self.assertEqual(float(deal.data_row[\"leg2-price\"]), float(deal.data_row[\"leg2-ob-price\"]))\n self.assertEqual(float(deal.data_row[\"leg3-price\"]), float(deal.data_row[\"leg3-ob-price\"]))",
"def test_get_fails_when_setting_nested_object(self):\n with pytest.raises(\n ClickException,\n match=r\"Attribute `non_existing_attribute.dummy` is not allowed to be updated!\",\n ):\n self.runner.invoke(\n cli,\n [\n *CLI_LOG_OPTION,\n \"config\",\n \"set\",\n \"skills.dummy.non_existing_attribute.dummy\",\n \"new_value\",\n ],\n standalone_mode=False,\n catch_exceptions=False,\n )",
"def test_e2e_override_depth_amount_less_than_from_order_book(self):\n\n cli = \"--balance 1 --override_depth_amount 0.03 offline --test -ob test_data/order_books.csv \"\n deal = self._run_bot_offine(cli)\n\n self.assertAlmostEqual(0.06000734789047485, float(deal.data_row[\"start-qty\"]), 4)\n self.assertEqual(0.002407822109525136, float(deal.data_row[\"result-fact-diff\"]))\n\n # prices from order book\n self.assertNotEqual(float(deal.data_row[\"leg1-price\"]), float(deal.data_row[\"leg1-ob-price\"]))",
"def test_too_deeply_nested(self) -> None:\n nested_action = TestNestedMenuAction()\n nested2_action = TestNested2MenuAction()\n nested3_action = TooDeeplyNestedAction()\n\n actions_registry.register(self.test_menu_action)\n actions_registry.register(nested_action)\n actions_registry.register(nested2_action)\n\n with self.assertRaises(DepthLimitExceededError):\n actions_registry.register(nested3_action)",
"def parse_l2_depth(cls, instmt, raw):\n l2_depth = instmt.get_l2_depth()\n keys = list(raw.keys())\n if cls.get_bids_field_name() in keys and \\\n cls.get_asks_field_name() in keys:\n\n # Date time\n timestamp = raw['timestamp']\n l2_depth.date_time = datetime.utcfromtimestamp(timestamp/1000.0).strftime(\"%Y%m%d %H:%M:%S.%f\")\n\n # Bids\n bids = raw[cls.get_bids_field_name()]\n bids_len = min(l2_depth.depth, len(bids))\n for i in range(0, bids_len):\n l2_depth.bids[i].price = float(bids[i][0]) if type(bids[i][0]) != float else bids[i][0]\n l2_depth.bids[i].volume = float(bids[i][1]) if type(bids[i][1]) != float else bids[i][1]\n\n # Asks\n asks = raw[cls.get_asks_field_name()]\n asks_len = min(l2_depth.depth, len(asks))\n for i in range(0, asks_len):\n l2_depth.asks[i].price = float(asks[i][0]) if type(asks[i][0]) != float else asks[i][0]\n l2_depth.asks[i].volume = float(asks[i][1]) if type(asks[i][1]) != float else asks[i][1]\n else:\n raise Exception('Does not contain order book keys in instmt %s-%s.\\nOriginal:\\n%s' % \\\n (instmt.get_exchange_name(), instmt.get_instmt_name(), \\\n raw))\n return l2_depth",
"def test_deep_circuit(self):\n # Specify a type of circuit used in this test\n self.check_circuit_type('deep')",
"def test_get_bid_offer_data(filled_order_book: Callable[[], OrderBook]) -> NoReturn:\n book = filled_order_book\n\n bid_keys = list(book.bids.keys())\n offer_key = choice(bid_keys)\n\n received_offer = book.get_offers_data(offer_key)\n\n assert isinstance(received_offer, dict)\n\n try:\n offer_price = received_offer['price']\n offer_quantity = received_offer['quantity']\n\n except KeyError:\n pytest.fail('While parsing received_offer KeyError occured')\n\n assert isinstance(offer_price, int)\n assert isinstance(offer_quantity, int)\n\n try:\n bid_offer = book.bids[offer_key]\n\n except KeyError:\n pytest.fail('While parsing book.bids KeyError occured')\n\n assert bid_offer == received_offer\n\n with pytest.raises(TradeTypeOverflowedException):\n book.add_offer('bids', 1, 1)",
"def slot_fulldepth(self, dummy_sender, data):\r\n (depth) = data\r\n self.debug(\"### got full depth, updating orderbook...\")\r\n self.bids = []\r\n self.asks = []\r\n self.total_ask = 0\r\n self.total_bid = 0\r\n if \"error\" in depth:\r\n self.debug(\"### \", depth[\"error\"])\r\n return\r\n for order in depth[\"data\"][\"asks\"]:\r\n price = int(order[\"price_int\"])\r\n volume = int(order[\"amount_int\"])\r\n self._update_total_ask(volume)\r\n self.asks.append(Level(price, volume))\r\n for order in depth[\"data\"][\"bids\"]:\r\n price = int(order[\"price_int\"])\r\n volume = int(order[\"amount_int\"])\r\n self._update_total_bid(volume, price)\r\n self.bids.insert(0, Level(price, volume))\r\n\r\n # update own volume cache\r\n for order in self.owns:\r\n self._update_level_own_volume(\r\n order.typ, order.price, self.get_own_volume_at(order.price, order.typ))\r\n\r\n if len(self.bids):\r\n self.bid = self.bids[0].price\r\n if len(self.asks):\r\n self.ask = self.asks[0].price\r\n\r\n self._valid_ask_cache = -1\r\n self._valid_bid_cache = -1\r\n self.ready_depth = True\r\n self.signal_fulldepth_processed(self, None)\r\n self.signal_changed(self, None)",
"def test_depth_limit(self):\n with self.assertRaisesRegexp(\n RemoteException,\n r'.*DepthLimitExceeded: Depth limit of 2 ' +\n 'exceeded at localhost -> localhost -> localhost'):\n recursive()",
"def test_deep_update_illegal_update(self):\n # Update with an illegal type\n for update_with in [42, None, [42], \"bar\"]:\n with self.assertRaisesRegex(\n SaltInvocationError,\n r\"Cannot update {} with a {}.\" \"\".format(type({}), type(update_with)),\n ):\n dictupdate.update_dict_key_value({}, \"foo\", update_with)\n # Again, but now using OrderedDicts\n for update_with in [42, None, [42], \"bar\"]:\n with self.assertRaisesRegex(\n SaltInvocationError,\n r\"Cannot update {} with a {}.\"\n \"\".format(type(OrderedDict()), type(update_with)),\n ):\n dictupdate.update_dict_key_value(\n {}, \"foo\", update_with, ordered_dict=True\n )",
"def _set_level_depth(self, optobj):\n has_relationship = optobj is not None and 'relationship' in optobj.optional_attrs\n\n def _init_level(rec):\n if rec.level is None:\n if rec.parents:\n rec.level = min(_init_level(rec) for rec in rec.parents) + 1\n else:\n rec.level = 0\n return rec.level\n\n def _init_depth(rec):\n if rec.depth is None:\n if rec.parents:\n rec.depth = max(_init_depth(rec) for rec in rec.parents) + 1\n else:\n rec.depth = 0\n return rec.depth\n\n def _init_reldepth(rec):\n if not hasattr(rec, 'reldepth'):\n up_terms = rec.get_goterms_upper()\n if up_terms:\n rec.reldepth = max(_init_reldepth(rec) for rec in up_terms) + 1\n else:\n rec.reldepth = 0\n return rec.reldepth\n\n for rec in self.values():\n\n # Add invert relationships\n if has_relationship:\n if rec.depth is None:\n _init_reldepth(rec)\n\n # print(\"BBBBBBBBBBB1\", rec.id, rec.relationship)\n #for (typedef, terms) in rec.relationship.items():\n # invert_typedef = self.typedefs[typedef].inverse_of\n # # print(\"BBBBBBBBBBB2 {} ({}) ({}) ({})\".format(\n # # rec.id, rec.relationship, typedef, invert_typedef))\n # if invert_typedef:\n # # Add inverted relationship\n # for term in terms:\n # if not hasattr(term, 'relationship'):\n # term.relationship = defaultdict(set)\n # term.relationship[invert_typedef].add(rec)\n # print(\"BBBBBBBBBBB3\", rec.id, rec.relationship)\n\n if rec.level is None:\n _init_level(rec)\n\n if rec.depth is None:\n _init_depth(rec)",
"def _duplicate_fields_with_name_ending_with_chain(params, cls, depth):\n if not isinstance(params, cls) or not isinstance(params, pecos.BaseParams):\n raise ValueError(\"invalid type(params)!\")\n\n for f in dc.fields(cls):\n old_val = getattr(params, f.name)\n if isinstance(old_val, f.type) and f.name.endswith(\"_chain\"):\n setattr(params, f.name, [copy.deepcopy(old_val) for _ in range(depth)])\n elif isinstance(old_val, (list, tuple)):\n if len(old_val) != depth:\n raise ValueError(f\"len(params.{f.name})={len(old_val)} != {depth}\")\n if any(not isinstance(cur_param, f.type) for cur_param in old_val):\n raise ValueError(\"invalid params!\")\n return params",
"def pad_book(book, depth, price, side):\n orders = []\n sign = -1.0 if side == Side.BUY else 1.0\n if depth < MAX_DEPTH:\n num = MAX_DEPTH - depth\n best = price\n offset = 1\n for _ in itertools.repeat(None, num):\n orders += [Order(secid=book.security,\n side=side,\n price=round(best + sign * offset *\n MIN_TICK, DECIMALS),\n qty=random.randint(1, 10))]\n offset += random.randint(1, 3)\n return orders",
"def deepupdate(self, other, copy=False):\n for k in other:\n if isinstance(other[k], self.__class__):\n if not k in self:\n self[k] = self.__class__()\n elif isinstance(self[k], self.__class__):\n pass\n elif isinstance(self[k], dict):\n self[k] = self.__class__(self[k]).rconvert()\n else:\n self[k] = self.__class__()\n self[k].deepupdate(other[k])\n else:\n if copy: self[k] = copymod.deepcopy(other[k])\n else: self[k] = other[k]\n return self",
"def test_get_ask_offer_data(filled_order_book: Callable[[], OrderBook]) -> NoReturn:\n book = filled_order_book\n\n ask_keys = list(book.asks.keys())\n offer_key = choice(ask_keys)\n\n received_offer = book.get_offers_data(offer_key)\n\n assert isinstance(received_offer, dict)\n\n try:\n offer_price = received_offer['price']\n offer_quantity = received_offer['quantity']\n\n except KeyError:\n pytest.fail('While parsing received_offer KeyError occured')\n\n assert isinstance(offer_price, int)\n assert isinstance(offer_quantity, int)\n\n try:\n ask_offer = book.asks[offer_key]\n\n except KeyError:\n pytest.fail('While parsing book.asks KeyError occured')\n\n assert ask_offer == received_offer\n\n with pytest.raises(TradeTypeOverflowedException):\n book.add_offer('asks', 1, 1)",
"def _assert_valid_deep(value):\n if isinstance(value, dict):\n for v in value.itervalues():\n _assert_valid_deep(v)\n elif isinstance(value, list):\n for v in value:\n _assert_valid_deep(v)\n else:\n if hasattr(value, \"assert_valid\"):\n value.assert_valid()",
"def add_order(self, order):\n if order.is_bid:\n if order.price in self.buy_levels:\n limit = self.buy_levels[order.price]\n if limit.size == 0:\n self.buy_tree.size += 1\n limit.add(order)\n self.buy_map[order.uid] = order\n order.parent_limit = limit\n else:\n limit = Limit(order.price)\n limit.add(order)\n self.buy_map[order.uid] = order\n self.buy_tree.insert(limit)\n self.buy_tree.size += 1\n self.buy_levels[order.price] = limit\n order.parent_limit = self.buy_levels[order.price]\n if self.highest_buy is None or order.price > self.highest_buy:\n self.highest_buy = order.price\n else:\n if order.price in self.sell_levels:\n limit = self.sell_levels[order.price]\n if limit.size == 0:\n self.sell_tree.size += 1\n limit.add(order)\n self.sell_map[order.uid] = order\n order.parent_limit = self.sell_levels[order.price]\n else:\n limit = Limit(order.price)\n limit.add(order)\n self.sell_map[order.uid] = order\n self.sell_tree.insert(limit)\n self.sell_tree.size += 1\n self.sell_levels[order.price] = limit\n order.parent_limit = self.sell_levels[order.price]\n if self.lowest_sell is None or order.price < self.lowest_sell:\n self.lowest_sell = order.price\n self.update_book()",
"def find_deep(neighs_info):\n if '__len__' not in dir(neighs_info):\n deep = 0\n else:\n if len(neighs_info) == 0:\n deep = 1\n elif '__len__' not in dir(neighs_info[0]):\n deep = 1\n else:\n logi = [len(neighs_info[i]) == 0 for i in range(len(neighs_info))]\n if all(logi):\n deep = 2\n elif '__len__' not in dir(neighs_info[0][0]):\n deep = 2\n else:\n deep = 3\n return deep",
"def test_set_with_deep_key_path_with_list():\n deep_key_path = ('second', 'deep', 'key', 'path')\n test_value = 'second deep key path value'\n\n config.set(deep_key_path, test_value)\n assert isinstance(config.get('second'), dict)\n assert config.get(deep_key_path) == test_value",
"def order_book_builder(self, data, timestamp, datetime, symbol):\n if isinstance(data[1], list):\n data = data[1]\n # Price, Count, Amount\n bids = {\n str(level[0]): [str(level[1]), str(level[2])]\n for level in data if level[2] > 0\n }\n asks = {\n str(level[0]): [str(level[1]), str(abs(level[2]))]\n for level in data if level[2] < 0\n }\n self.orderbooks[symbol].update({'bids': bids})\n self.orderbooks[symbol].update({'asks': asks})\n self.orderbooks[symbol].update({'timestamp': timestamp})\n self.orderbooks[symbol].update({'datetime': datetime})\n\n else:\n # Example update message structure [1765.2, 0, 1] where we have [price, count, amount].\n # Update algorithm pseudocode from Bitfinex documentation:\n # 1. - When count > 0 then you have to add or update the price level.\n # 1.1- If amount > 0 then add/update bids.\n # 1.2- If amount < 0 then add/update asks.\n # 2. - When count = 0 then you have to delete the price level.\n # 2.1- If amount = 1 then remove from bids\n # 2.2- If amount = -1 then remove from asks\n data = data[1:]\n data = [str(data[0]), str(data[1]), str(data[2])]\n if int(data[1]) > 0: # 1.\n\n if float(data[2]) > 0: # 1.1\n self.orderbooks[symbol]['bids'].update({data[0]: [data[1], data[2]]})\n\n elif float(data[2]) < 0: # 1.2\n self.orderbooks[symbol]['asks'].update({data[0]: [data[1], str(abs(float(data[2])))]})\n\n elif data[1] == '0': # 2.\n\n if data[2] == '1': # 2.1\n if self.orderbooks[symbol]['bids'].get(data[0]):\n del self.orderbooks[symbol]['bids'][data[0]]\n\n elif data[2] == '-1': # 2.2\n if self.orderbooks[symbol]['asks'].get(data[0]):\n del self.orderbooks[symbol]['asks'][data[0]]",
"def test_set_with_deep_key_path_with_string():\n deep_key_path = 'deep.key.path'\n test_value = 'deep key path value'\n\n config.set(deep_key_path, test_value)\n assert isinstance(config.get('deep'), dict)\n assert config.get(deep_key_path) == test_value",
"def slot_depth(self, dummy_sender, data):\r\n (typ, price, _voldiff, total_vol) = data\r\n if self._update_book(typ, price, total_vol):\r\n self.signal_changed(self, None)",
"def test_blind_sig_chain_wrong_intermediary(self): # pylint: disable=too-many-locals\n\n test_levels = 4\n msg = os.urandom(1024)\n wrong_level = 2\n\n ca = ECCBlind()\n signer_obj = ca\n fake_intermediary = ECCBlind()\n\n output = bytearray()\n\n for level in range(test_levels):\n if not level:\n output.extend(ca.pubkey())\n requester_obj = ECCBlind(pubkey=signer_obj.pubkey())\n child_obj = ECCBlind()\n point_r = signer_obj.signer_init()\n pubkey = child_obj.pubkey()\n\n if level == test_levels - 1:\n msg_blinded = requester_obj.create_signing_request(point_r,\n msg)\n else:\n msg_blinded = requester_obj.create_signing_request(point_r,\n pubkey)\n signature_blinded = signer_obj.blind_sign(msg_blinded)\n signature = requester_obj.unblind(signature_blinded)\n if level == wrong_level:\n output.extend(fake_intermediary.pubkey())\n elif level != test_levels - 1:\n output.extend(pubkey)\n output.extend(signature)\n signer_obj = child_obj\n verifychain = ECCBlindChain(ca=ca.pubkey(), chain=str(output))\n self.assertFalse(verifychain.verify(msg, 1))",
"def __init__(self, real_path, first_path, second_path):\n\t\tsuper(RecursionError, self).__init__(real_path, first_path, second_path)",
"def updateError(self, traversed=[], updateNeis=False):\n if not self.checkForErrors:\n return\n nodePins = set([self])\n if self.constraint:\n nodePins = set(self.owningNode().constraints[self.constraint])\n for connectedPin in getConnectedPins(self):\n if connectedPin.isAny():\n nodePins.add(connectedPin)\n for neighbor in nodePins:\n if neighbor not in traversed:\n if all([neighbor.activeDataType == \"AnyPin\",\n neighbor.canChangeTypeOnConnection([], neighbor.optionEnabled(PinOptions.ChangeTypeOnConnection), []),\n not neighbor.optionEnabled(PinOptions.AllowAny)]):\n neighbor.setError(\"AnyPin Not Initialized\")\n neighbor.super = None\n else:\n neighbor.clearError()\n if neighbor.activeDataType == \"AnyPin\":\n neighbor.super = AnyPin\n traversed.append(neighbor)\n if neighbor.isAny():\n neighbor.updateError(traversed, updateNeis)\n if updateNeis:\n neighbor.owningNode().checkForErrors()",
"def test_edge_driver_errors(self):\n\n with pytest.raises(\n ValueError, match=r\"Encountered invalid entry in 'reward', expected 2-bit bitstrings.\"\n ):\n qaoa.edge_driver(Graph([(0, 1), (1, 2)]), [\"10\", \"11\", 21, \"g\"])\n\n with pytest.raises(\n ValueError,\n match=r\"'reward' cannot contain either '10' or '01', must contain neither or both.\",\n ):\n qaoa.edge_driver(Graph([(0, 1), (1, 2)]), [\"11\", \"00\", \"01\"])\n\n with pytest.raises(ValueError, match=r\"Input graph must be a nx.Graph\"):\n qaoa.edge_driver([(0, 1), (1, 2)], [\"00\", \"11\"])"
] | [
"0.5391954",
"0.52650684",
"0.5231182",
"0.5148186",
"0.46605176",
"0.46045893",
"0.4553652",
"0.44586703",
"0.43845412",
"0.43609735",
"0.4305438",
"0.42446724",
"0.42142078",
"0.40942338",
"0.40583956",
"0.40488294",
"0.40381512",
"0.4022892",
"0.4010043",
"0.40016043",
"0.39973825",
"0.39793435",
"0.39618516",
"0.3952347",
"0.39069355",
"0.38854676",
"0.38814378",
"0.3876726",
"0.38742697",
"0.38408175"
] | 0.79812324 | 0 |
Private method that provide an ability to sort the orders by price | def __sort_orders_by_price(self):
self.orders = sorted(self.orders, key=lambda o: o.price, reverse=True) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_ordering_by_price_desc(self):\n request = self.factory.get('/api/v1/cars', {'distance': 10000,\n 'ordering': '-price'})\n response = CarAdViewSet.as_view({'get': 'list'})(request)\n self.assertEqual(response.status_code, HTTPStatus.OK._value_)\n cars = response.data['results'][0:2]\n self.assertGreater(cars[0]['price'], cars[1]['price'])\n self.assertNotEqual(cars[0], cars[1])",
"def select_sort_by_price_ascendant(self):\n msg = \"The new order of the items is by ascendant price\"\n with self.allure.step(msg):\n self.__product_sort.select_by_text('Price (low to high)')\n self.allure.attach_image(self.driver, msg)",
"def _sort_by_price(self, data):\n # Separate the data by currency\n alch = []\n fusing = []\n chaos = []\n exalted = []\n \n for item in data:\n price = item['price']\n if \"alchemy\" in price:\n alch.append(item)\n elif \"fusing\" in price:\n fusing.append(item)\n elif \"chaos\" in price:\n chaos.append(item)\n elif \"exalted\" in price:\n exalted.append(item)\n \n alch = natsorted(alch, key=lambda item: item['price'])\n fusing = natsorted(fusing, key=lambda item: item['price'])\n chaos = natsorted(chaos, key=lambda item: item['price'])\n exalted = natsorted(exalted, key=lambda item: item['price'])\n \n result = []\n result.extend(alch)\n result.extend(fusing)\n result.extend(chaos)\n result.extend(exalted)\n return result",
"def orderby():\n pass",
"def _get_open_orders_by_price(self):\n log.debug(\"Getting open orders sorted by price-based priority\")\n\n # time_created ordering will be preserved as the secondary sort key.\n unprioritized_open_orders = list(Order.objects.filter(\n status=Order.STATUS_OPEN).order_by('time_created'))\n fulfilled_orders = list(Order.objects.filter(\n status=Order.STATUS_FULFILLED))\n\n sorted_orders_dict = self._sort_open_orders_by_price(\n unprioritized_open_orders,\n fulfilled_orders)\n return sorted_orders_dict['sorted_open_orders']",
"def sortby(self):\n ...",
"def select_sort_by_price_descendant(self):\n msg = \"The new order of the items is by descendant price\"\n with self.allure.step(msg):\n self.__product_sort.select_by_text('Price (high to low)')\n self.allure.attach_image(self.driver, msg)",
"def sort_by_price(children_events_options_list: List[ChildrenEventOption], sort_type: SortType = SortType.ASCENDING):\n return _sort_by(children_events_options_list, sort_type, key=attrgetter('price_in_uah'))",
"def _sort_open_orders_by_price(self,\n open_orders,\n fulfilled_orders):\n priorities_stats = self._compute_order_priorities_stats(\n open_orders + fulfilled_orders)\n median_demand, order_prices, tab_limits, total_fulfilled_prices = \\\n (priorities_stats['median_demand'],\n priorities_stats['order_prices'],\n priorities_stats['tab_limits'],\n priorities_stats['total_fulfilled_prices'])\n\n # The get_priority function also does a write to the database to update\n # tab_based_priority field for each order. This is because we use that\n # as a cached field to show the user the order's last known priority.\n # This is a side-effect of the function\n def get_priority(open_order):\n \"\"\"Compute an open order's price-based priority.\n\n The floor and 20% fudge keep FIFO as a small component of priority\n instead of severely penalizing people who ordered early but want\n just a bit more than average demand.\n\n Maintenance orders are a special case and always priced at 0.0\n to be processed early.\n \"\"\"\n priority = 0.0\n if not open_order.maintenance:\n order_price = order_prices[open_order.sid]\n tab = open_order.tab\n owner_total_fulfilled_price = \\\n total_fulfilled_prices.get(tab.id, 0.0)\n tab_limit = tab_limits[tab.sid]\n priority = floor(\n ((order_price + owner_total_fulfilled_price) / tab_limit) /\n (1.2 * median_demand))\n\n open_order.tab_based_priority = priority\n open_order.save(update_fields=['tab_based_priority'])\n\n return priority\n\n order_priorities = {\n order.sid: get_priority(order) for order in open_orders\n }\n\n log.debug('Open order price-based priorities: %s' % order_priorities)\n\n sorted_open_orders = \\\n sorted(open_orders,\n key=lambda o: order_priorities[o.sid])\n\n return {\n 'sorted_open_orders': sorted_open_orders,\n 'open_order_priorities': order_priorities\n }",
"def test_get_order(self):\n pass",
"def get_order_price(self):\r\n if self.price is not None:\r\n return self.price #typical limit price order\r\n else:\r\n #Check the orderbook\r\n logger.info(\"floating price\")\r\n self.get_orderbook()\r\n logger.info(self.orderbook_snapshot)\r\n\r\n pass",
"def place_limit_order(self, side, symbol, size, price, **kwargs):\n pass",
"def order(self, order):\n\n #print(\"Evaluating order: \", order)\n if self.security != order.secid:\n raise (\"Cannot place order for security \"\n \"%s on book[%s]\" % (order.security, self.security))\n\n levels = self.bid\n if order.side == Side.SELL:\n levels = self.offer\n\n new_level = OrderBookLevel(price=order.price, qty=order.qty, order_count=1)\n start_index = levels.bisect_right(new_level)\n levels.insert(start_index, new_level)\n OrderBookUtils.compact(levels, start_index)\n\n # Trim list\n if order.side == Side.SELL:\n # Delete from end of list - highest offers\n size = len(self.offer)\n if size > MAX_DEPTH:\n for _ in itertools.repeat(None, size - MAX_DEPTH):\n del self.offer[-1]\n else:\n # Delete from start of list - lowest bids\n size = len(self.bid)\n if size > MAX_DEPTH:\n for _ in itertools.repeat(None, size - MAX_DEPTH):\n del self.bid[0]\n\n return self.match(order.side)",
"def _set_order_price(self):\n\n price = self.calculate_price()\n self.price = price\n return self.price",
"def order(self, order_id, symbol, **kwargs):\n pass",
"def sort_products(param: str, querySet: QuerySet):\n products_list = list(querySet)\n products_list.sort(\n key=lambda product: (-product.calculate_score(param), product.price)\n )\n return products_list",
"def sort_vendor_price_lists_grid_column(self, column_name, descending_order):\n self.sort_grid_column(self.view_price_list_div_id, column_name, descending_order)",
"def sort_docs_by_price(self, reverse=False):\n self.documents_predicted_relevant.sort(key=lambda doc: doc.price, reverse=reverse)",
"def get_sort_query(self, kind, order, is_number):\n pass",
"def update_order():",
"def update_order():",
"def sort_prices(list_of_tuples):\n list_of_tuples.sort(key = get_price, reverse = True)\n return list_of_tuples",
"def sort(self, order):\r\n params = base.get_params(None, locals())\r\n url = '{0}/sort'.format(self.get_url())\r\n\r\n request = http.Request('PUT', url, params)\r\n\r\n return request, parsers.parse_json",
"def sort(request):\n stars = Product.objects.annotate(\n avg_review=Avg('productreview__rating'),\n )\n select = request.GET['sort']\n if select == 'LtoH':\n results = Product.objects.order_by('price')\n return render(request, \"products.html\",\n {\"products\": results, 'stars': stars})\n elif select == 'HtoL':\n results = Product.objects.order_by('-price')\n return render(request, \"products.html\",\n {\"products\": results, 'stars': stars})\n elif select == 'AtoZ':\n results = Product.objects.order_by('name')\n return render(request, \"products.html\",\n {\"products\": results, 'stars': stars})\n elif select == 'ZtoA':\n results = Product.objects.order_by('-name')\n return render(request, \"products.html\",\n {\"products\": results, 'stars': stars})",
"def data_for_sorting() -> NoReturn:\n raise NotImplementedError",
"def data_for_sorting() -> NoReturn:\n raise NotImplementedError",
"def test_get_order_items(self):\n pass",
"def sorted_data():\n stock_data = scrape_data()\n filtered_data = list(filter(sort_func, stock_data))\n return filtered_data",
"def test_get_orders(self):\n pass",
"def sort_vendor_price_list_detail_grid_column(self, column_name, descending_order):\n self.wait_for_ajax_spinner_load(300)\n self.sort_grid_column(self.vendor_price_list_detail_rates_grid_div_id, column_name, descending_order)"
] | [
"0.6997947",
"0.6924603",
"0.68009967",
"0.64891934",
"0.64143217",
"0.6405557",
"0.63602364",
"0.62634486",
"0.6196882",
"0.61302906",
"0.60694",
"0.6041694",
"0.5944023",
"0.5943299",
"0.5924566",
"0.59217286",
"0.5914415",
"0.59045845",
"0.58919334",
"0.58767736",
"0.58767736",
"0.58214194",
"0.58181745",
"0.58101285",
"0.57990116",
"0.57990116",
"0.57840747",
"0.575617",
"0.5754199",
"0.5753358"
] | 0.8428332 | 0 |
This method provide an ability to find order by id and reject it. | def reject_order(self, order: Order) -> None:
order = self.get_order_by_id(order.id)
order.status = OrderStatus.REJECT | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _on_order_not_found(self, msg):\r\n parts = msg[\"id\"].split(\":\")\r\n oid = parts[1]\r\n self.debug(\"### got 'Order not found' for\", oid)\r\n # we are now going to fake a user_order message (the one we\r\n # obviously missed earlier) that will have the effect of\r\n # removing the order cleanly.\r\n fakemsg = {\"user_order\": {\"oid\": oid, \"reason\": \"requested\"}}\r\n self._on_op_private_user_order(fakemsg)",
"def test_get_non_existing_order(self):\n self.orders_list.place_order(self.order)\n self.orders_list.place_order(self.order)\n self.assertEqual(self.orders_list.get_order(2),\n \"order does not exist\")",
"def resolve_order(info, id):\n order = get_node(info, id, only_type=Order)\n user = info.context.user\n if (order.user == user or user.get_all_permissions() & {\n 'order.view_order', 'order.edit_order'}):\n return order",
"def test_non_existing_order(self):\n self.orders_list.place_order(self.order)\n self.orders_list.place_order(self.order)\n self.assertEqual(self.orders_list.change_status(\n 2, \"pending\"), \"order not found\")",
"def _order_not_found():\n pecan.abort(404, u._('Order not found.'))",
"def test_fetch_specific_order_when_does_not_exist(self):\n response = self.api_test_client.get(\n '{}/orders/100'.format(self.BASE_URL))\n self.assertEqual(response.status_code, 404)\n self.assertEqual(\n 'Order with id 100 not found', response_as_json(\n response)['message'])",
"def test_delete_non_existing_order(self):\n self.orders_list.place_order(self.order)\n self.orders_list.place_order(self.order)\n self.assertEqual(self.orders_list.deletes_order(2),\n \"order not found\")\n self.assertEqual(len(self.orders_list.get_orders()), 2)",
"async def get_order_by_id(request: web.Request, order_id) -> web.Response:\n return web.Response(status=200)",
"async def fetch_closed_order(self, id: str, symbol: Optional[str] = None, params={}):\n request = {\n 'id': [int(id)],\n }\n orders = await self.fetch_closed_orders(symbol, None, None, self.extend(request, params))\n order = self.safe_value(orders, 0)\n if order is None:\n raise OrderNotFound(self.id + ' order ' + id + ' not found')\n return order",
"def test_order_cannot_be_deleted_if_dont_exist(self):\n\n\t\tres = self.login_user()\n\t\taccess_token = json.loads(res.data.decode())['access_token']\n\n\t\tresponse = self.client().post(\n\t\t\t'/api/v2/orders',\n\t\t\theaders={\"x-access-token\": access_token},\n\t\t\tdata = json.dumps(\n\t\t\t\tself.order_data) , content_type = 'application/json')\n\t\tself.assertEqual(response.status_code, 201)\n\n\t\tresponse = self.client().delete(\n\t\t\t'/api/v2/orders/5',\n\t\t\theaders={\"x-access-token\": access_token})\n\n\t\tresult = json.loads(response.data)\n\t\tself.assertEqual(response.status_code, 404)\n\t\tself.assertEqual(result[\"message\"], \"That order is not available\")",
"def delete_order():",
"def get_order_by_id(self, order_id: uuid4) -> Order:\n return next(filter(lambda order: order.id == order_id, self.orders), None)",
"async def test_retrieve_order_by_id(self):\n order = {\n 'id': '46871284',\n 'type': 'ORDER_TYPE_BUY_LIMIT',\n 'state': 'ORDER_STATE_PLACED',\n 'symbol': 'AUDNZD',\n 'magic': 123456,\n 'platform': 'mt5',\n 'time': '2020-04-20T08:38:58.270Z',\n 'openPrice': 1.03,\n 'currentPrice': 1.05206,\n 'volume': 0.01,\n 'currentVolume': 0.01,\n 'comment': 'COMMENT2'\n }\n client.get_order = AsyncMock(return_value=order)\n actual = await api.get_order('46871284')\n assert actual == order\n client.get_order.assert_called_with('accountId', '46871284')",
"def test_get_specific_order(self):\n # Test with wrong parcel id\n # Correct format but not there\n response = self.client.get(\n 'api/v1/parcels/24034', headers=self.user_token_dict)\n data = json.loads(response.data)\n self.assertEqual(\n data, {'message': 'No Parcel delivery order with that id'})\n self.assertEqual(response.status_code, 400)\n # Test with wrong parcel id format\n response = self.client.get(\n 'api/v1/parcels/24034u', headers=self.user_token_dict) # Incorrect id format\n data = json.loads(response.data)\n self.assertEqual(data, {'message': 'Wrong id format'})\n self.assertEqual(response.status_code, 400)",
"def get_order(self, order_id):\n for o in self.order_lst:\n if o.get_orderId() == order_id:\n return o",
"def replace_order(self, custom_id=None, **params):\n self.conn.send('cancelReplaceOrder', custom_id=custom_id, **params)",
"def cancel(self, id):\n self.__init_client()\n order = self.get_open_order(id)\n\n if order is None:\n return False\n\n try:\n retry(lambda: self.client.futures_cancel_order(symbol=self.pair, origClientOrderId=order['clientOrderId']))\n except HTTPNotFound:\n return False\n logger.info(f\"Cancel Order : (clientOrderId, type, side, quantity, price, stop) = \"\n f\"({order['clientOrderId']}, {order['type']}, {order['side']}, {order['origQty']}, \"\n f\"{order['price']}, {order['stopPrice']})\")\n return True",
"def test_get_order(self):\n pass",
"def __clean_orders(self):\n canceled_id = []\n for order_id, order in self.orders_dict.items():\n if order[\"status\"] == \"canceled\":\n canceled_id.append(order_id)\n for id in canceled_id:\n del self.orders_dict[id]",
"def validate_order_id(self, value):\n\n if not Order.objects.filter(order_id=value).exists():\n raise ValidationError(f'Order with id {value} does not exist.')\n order_obj = Order.objects.get(order_id=value)\n if order_obj.assign_time is None:\n raise ValidationError(f'Order with id {value} was not assigned to any courier.')\n if order_obj.complete_time is not None:\n raise ValidationError(f'Order with id {value} has already been completed.')\n return value",
"def get_or_raise(self, _id):\n res = super(CustomQuery, self).get(_id)\n if not res:\n raise NotFoundException\n return res",
"def cancel_order(self, walletId, orderId):\n return",
"def order_w_order_id(order_id):\n # Megnyutjuk a kapcsolatot\n conn = get_db()\n try:\n # Keszitunk egy cursort\n cur = conn.cursor()\n try:\n # Ezt a parameteres SQL lekerdezest hajtjuk vegre, mellyel megkapjuk az adott\n # order_id-ju megrendelest.\n cur.execute('SELECT description, vehicle_type, quantity, origin, destination,' +\n ' order_date, deadline_date, comment_text FROM orders WHERE' +\n ' order_id = :order_id', order_id=order_id)\n # Ebben a valtozoban lesz az eredmenytabla egyetlen\n # sora (Biztosan 1 lesz, mert az order_id egyedi)\n result = cur.fetchone()\n # Ha nem talaltunk ilyen megrendelest, szolunk a felhasznalonak\n if result is None:\n abort(404)\n else:\n # 2. feladat - lekerdezzuk az adott orszag valutajat\n #\n # Az origin illetve destination mezokben megkeressuk az orszag betujelet\n # Ez mindig a string vegen, ( es ) jelek kozott allo 2 betu.\n # Mivel ezek nagybetuvel irodtak at kell konvertalnunk kisbeture.\n # Ezek futtatjuk a kerest, majd a kapott eredmenyt JSON formatumra parsoljuk.\n # Ebbol kiolvassuk a valuta erteket, amit majd atadunk a kimeneti mezonknek.\n origin001 = result[3]\n origin_len = len(origin001)\n origin_tmp = origin001[origin_len-3:origin_len-1]\n origin_url = \"http://rapid.eik.bme.hu:9080/currency_ws/currencies/\" + origin_tmp.lower() + \".json\"\n r1 = requests.get(origin_url)\n var1 = r1.json()\n origin_currency = var1['currency']\n \n destination001 = result[4]\n destination_len = len(destination001)\n destination_tmp = destination001[destination_len-3:destination_len-1]\n destination_url = \"http://rapid.eik.bme.hu:9080/currency_ws/currencies/\" + destination_tmp.lower() + \".json\"\n r2 = requests.get(destination_url)\n var2 = r2.json()\n destination_currency = var2['currency']\n # Visszaterunk a JSON formatumu dictionary-vel,\n # ami mindent a megfelelo formatumban tarol\n return jsonify({\"description\": result[0],\n \"vehicle_type\": result[1],\n \"quantity\": result[2],\n \"origin\": result[3],\n \"destination\": result[4],\n \"order_date\": result[5].date().isoformat(),\n \"deadline_date\": result[6].date().isoformat(),\n \"comment_text\": result[7],\n \"origin_currency\": origin_currency,\n\"destination_currency\": destination_currency})\n finally:\n cur.close()\n finally:\n conn.close()",
"def test_404_if_order_doesnt_exist(self, public_omis_api_client):\n url = reverse(\n 'api-v3:public-omis:payment:collection',\n kwargs={'public_token': ('1234-abcd-' * 5)}, # len(token) == 50\n )\n response = public_omis_api_client.get(url)\n\n assert response.status_code == status.HTTP_404_NOT_FOUND",
"def remove_order(self, order_id):\n for idx, o in enumerate(self.order_lst):\n if o.get_orderId() == order_id:\n rem_idx = idx\n \n self.order_lst.pop(rem_idx)",
"def api_delete_order(request, id):\n\n close_old_connections()\n\n # Not marking it as served if it isn't even ready yet.\n if not request.user.is_authenticated:\n return HttpResponseForbidden(\"You're not authenticated.\")\n \n # Delete the order.\n Order.objects.get(id=id).delete()\n\n close_old_connections()\n \n return HttpResponse('Deleted.')",
"def test_exception_invalid_sort_order(self):\n self.assertRaises(ValueError, self.conn.query, \"id:\" + \"abc\",\n **{\"sort\":\"id\", \"sort_order\":\"invalid_sort_order\"})",
"def test_404_if_in_disallowed_status(self, order_status, public_omis_api_client):\n order = OrderFactory(status=order_status)\n\n url = reverse(\n 'api-v3:public-omis:payment:collection',\n kwargs={'public_token': order.public_token},\n )\n response = public_omis_api_client.get(url)\n\n assert response.status_code == status.HTTP_404_NOT_FOUND",
"def checked_classified(self, order):\n assert (order.get_status() is OrderStatus.Created)\n assert (order.direction is not Direction.Cancel)\n if order.exec_type not in [Exectype.Market, Exectype.Stop]:\n assert (order.price is not None)\n check_left = np.round(order.price / self.table.tick_size, self.tick_size_decimals)\n check_right = np.round(order.price / self.table.tick_size, self.tick_size_decimals)\n assert check_left == check_right\n # Check expiration\n order = self.checked_expired(order)\n if order.get_status() is OrderStatus.Expired:\n self.debug(\"Order expired: order.reject()\")\n order.reject(self.table.get_current_time())\n else:\n if not self.table.allow_duplicated_ids and order.m_orderId in self.queue_observer.get_order_ids():\n self.debug(\"Order implies duplicated id: order.reject()\")\n order.reject(self.table.get_current_time())\n else:\n self.debug(\"Order can be accepted: order.accept()\")\n order.accept(self.table.get_current_time())\n return order",
"def test_get_by_id_wrong_type(self):\n assert ExampleUserModel.get_by_id(\"xyz\") is None"
] | [
"0.6331195",
"0.6186556",
"0.6181075",
"0.6084242",
"0.5875732",
"0.57537323",
"0.5748262",
"0.5568031",
"0.5513399",
"0.5510577",
"0.5493943",
"0.54261667",
"0.54258156",
"0.5360014",
"0.52910525",
"0.5274176",
"0.52361274",
"0.51995444",
"0.5193858",
"0.51862836",
"0.51693845",
"0.51672065",
"0.5160975",
"0.5160119",
"0.5159642",
"0.51552516",
"0.51521134",
"0.51489794",
"0.51361513",
"0.5129024"
] | 0.65955037 | 0 |
This method provide an ability to fill order in order book. This action means that order is completed. | def fill_order(self, order: Order) -> None:
order = self.get_order_by_id(order.id)
order.status = OrderStatus.FILL | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def complete_order(order):\n enroll_user_in_order_items(order)\n\n # If this order included assigned coupons, update them to indicate that they're redeemed\n order_coupon_ids = order.couponredemption_set.values_list(\n \"coupon_version__coupon__id\", flat=True\n )\n if order_coupon_ids:\n set_coupons_to_redeemed(order.purchaser.email, order_coupon_ids)\n\n # clear the basket\n with transaction.atomic():\n BasketItem.objects.filter(basket__user=order.purchaser).delete()\n CourseRunSelection.objects.filter(basket__user=order.purchaser).delete()\n CouponSelection.objects.filter(basket__user=order.purchaser).delete()",
"def mark_completed(self, order_id=None):\n self.status = \"paid\"\n if order_id and not self.order_id:\n self.order_id = order_id\n print(\"Order completed\")\n self.save()",
"def fulfill_order(self, **kwargs):\n return self.client.execute(\"order/fulfill-one\", \"POST\", kwargs)",
"def test_create_confirm_order_details(self):\n pass",
"def finish_order(self, order_id):\n request_name = \"get_order_info\"\n\n orders = self.make_request(request_name, url_id=order_id)\n if orders is None:\n print(\"Unsuccessful updating order\")\n return\n order = orders[0]\n update_dict = dict()\n for key in order:\n if str(key)[0] == \"_\":\n continue\n try:\n update_dict[key.encode('utf-8')] = order[key].encode('utf-8')\n except AttributeError:\n update_dict[key.encode('utf-8')] = order[key]\n\n update_dict['status'] = 'Complete'\n resp = self.make_request('set_inventory_order', url_id=order_id, arguments=update_dict)",
"def set_order_done():\n data = select_data_source()\n user = data['user']\n order_id = data['id']\n \n if check_user_permission(user) : return permission_denied_return\n \n db = database.getdb()\n \n ### Check if is valid.\n \n cmd = 'select passed from orders where id==\"{0}\"'.format(order_id)\n order_valid = db.execute(cmd).fetchall()[0][0]\n if order_valid == 0 :\n return finish_invalid_return\n \n ### Check if is done.\n cmd = 'select done from orders where id==\"{0}\"'.format(order_id)\n order_done = db.execute(cmd).fetchall()[0][0]\n if order_done != 0 :\n return finish_done_return\n \n ### All check done.\n ### Set it to done.\n cmd = 'update orders set done=1 where id==\"{0}\"'.format(order_id)\n db.execute(cmd)\n db.commit()\n print('user sets order {0} to be done.'.format(user))\n \n return finish_complete_return",
"def test_process_order(self):\n checkout_form = self.get_checkout_form()\n checkout_form.submit()\n\n sleep(0.5)\n result = self.browser.find_element_by_id('result')\n self.assertIn(\n \"Your order was placed.\",\n result.text\n )\n self.assertTrue(len(self.client.session['cart']) == 0)\n OrderInfo.objects.get()",
"def _is_order_filled(self):\r\n if self.filled_quantity == self.quantity:\r\n self.order_finish()",
"def m_ts_OrderFilled(self, sender, e):\r\n if e.FillType == ttapi.FillType.Full:\r\n print(\"Order was fully filled for {0} at {1}.\".format(e.Fill.Quantity, e.Fill.MatchPrice))\r\n else:\r\n print(\"Order was partially filled for {0} at {1}.\".format(e.Fill.Quantity, e.Fill.MatchPrice))\r\n print(\"Average Buy Price = {0} : Net Position = {1} : P&L = {2}\".format(self.m_ts.ProfitLossStatistics.BuyAveragePrice, self.m_ts.ProfitLossStatistics.NetPosition, self.m_ts.ProfitLoss.AsPrimaryCurrency))",
"def set_filled_order(self):\n self.set_values(\n start_phrase='Filled Orders',\n end_phrase=None,\n start_with=2,\n end_until=-1,\n prop_keys=self.filled_order_keys,\n prop_name='filled_order'\n )\n\n self.filled_order = map(self.del_empty_keys, self.filled_order)\n self.fillna_dict_with_exists(\n self.filled_order,\n 'exec_time',\n ('exec_time', 'spread', 'order')\n )\n\n self.replace_nan(self.filled_order)\n self.convert_type(self.filled_order, 'exec_time', self.convert_datetime, 0)\n\n self.convert_type(self.filled_order, 'quantity', int, 0)\n self.convert_type(self.filled_order, 'strike', float, 0.0)\n self.convert_type(self.filled_order, 'price', float, 0.0)\n self.convert_type(self.filled_order, 'net_price', float, 0.0)\n self.convert_type(self.filled_order, 'expire_date', str, '')",
"def fulfill_order(request_data):\n # First, save this information in a receipt\n receipt = Receipt.objects.create(data=request_data)\n\n # Link the order with the receipt if we can parse it\n reference_number = request_data[\"req_reference_number\"]\n req_bill_to_email = request_data.get(\"req_bill_to_email\")\n order = Order.objects.get_by_reference_number(reference_number)\n receipt.order = order\n receipt.save()\n\n new_order_status = determine_order_status_change(order, request_data[\"decision\"])\n if new_order_status is None:\n # This is a duplicate message, ignore since it's already handled\n return\n\n order.status = new_order_status\n order.save()\n sync_hubspot_deal(order)\n\n if order.status == Order.FULFILLED:\n complete_order(order)\n if settings.ENABLE_ORDER_RECEIPTS:\n send_ecommerce_order_receipt(\n order=order, cyber_source_provided_email=req_bill_to_email\n )\n\n # Save to log everything to an audit table including enrollments created in complete_order\n order.save_and_log(None)",
"def test_order_complete_order_completed(self):\n u = User.objects.get(username=\"test1\")\n u.userplan.expire = date.today() + timedelta(days=50)\n u.userplan.active = False\n u.userplan.save()\n plan_pricing = PlanPricing.objects.get(plan=u.userplan.plan, pricing__period=30)\n order = Order.objects.create(\n user=u,\n pricing=plan_pricing.pricing,\n amount=100,\n plan=plan_pricing.plan,\n completed=date(2010, 10, 10),\n )\n self.assertFalse(order.complete_order())",
"def on_order(self, order: OrderData):\n # print(\"on_order\")\n # print(order)\n pass",
"def complete_order(order_id, filename, url, proxy):\n order_json = read_work_order(order_id, filename)\n if order_json is None:\n raise LookupError(\"No order found with id %r\"%order_id)\n order = json.loads(order_json)\n message = make_complete(order)\n data = json.dumps(message, indent=4)\n print data\n if url:\n send_request(data, url, proxy)\n else:\n print \"\\nUse the --url argument to specify destination\"",
"def order_ready(request):\n\tcs , status = CookStatus.objects.get_or_create(cook_name=request.user)\n\tif cs.current_order is not None:\n\t\tcs.current_order.status = 'ready-to-serve'\n\t\tcs.current_order.save()\n\t\tcs.current_order = None\n\t\tcs.save()\n\n\treturn HttpResponseRedirect(\"/staff/cook_order_list/\")",
"def fillOrder(self,orderID=None,order=None):\n\t\t#locate where the order resides\n\t\tif order is not None:\n\t\t\torderID=order.ID\n\t\tside=self.Stack[orderID].Side\n\t\tamount=0\n\t\t#calculate the amount to deposit basing on the side of the order\n\t\tif side is 'sell':\n\t\t\tamount=self[orderID].Price*self.Stack[orderID].Amount\n\t\telse:\n\t\t\tamount=self[orderID].Amount\n\t\t#deposit to the opposite account, if buy order, deposit into sell account,and vice\n\t\tside=self.invertSide(side)\n\t\tself[orderID].Account.deposit(side,amount)\n\t\t#Take order off the stack\n\t\tprint(\"\\n\"+order.Side.upper()+\" order \"+str(self[orderID].ID)+\" of \"+ str(self[orderID].Price) +\" has been filled\")\n\t\tself.destroyOrder(orderID)",
"def fulfilled_order(test_data):\n user, _, bootcamp_run = test_data\n order = OrderFactory.create(user=user, status=Order.FULFILLED)\n LineFactory.create(order=order, bootcamp_run=bootcamp_run, price=123.45)\n return order",
"def on_fill(self, oid, body):\n\t\tlogger.info('Consuming filled Order')\n\t\tfill = body['fill']\n\n\t\t# update the position first\n\t\tself.pos[fill.symbol].on_fill(fill)\n\n\t\t# getting data from the fill event\n\t\tQ = fill.quantity\n\t\tK, D, C = fill.fill_cost, fill.fill_type, fill.commission\n\n\t\tcost = D.value * K * Q\n\n\t\tself.commission += C\n\t\tself.cash -= cost + C",
"def execute_order(self, event):\n if event.type == 'ORDER':\n fill_event = FillEvent(datetime.datetime.utcnow(), event.symbol,\n 'ARCA', event.quantity, event.direction, None)\n self.events.put(fill_event)",
"def fill_order_info(self, fill_order_info):\n\n self._fill_order_info = fill_order_info",
"def completed(request):\n order_id = ''\n try:\n order_id = request.session['order_id']\n except:\n pass\n if order_id != '':\n auth = HTTPBasicAuth(klarna_un, klarna_pw)\n headers = {'content-type': 'application/json'}\n response = requests.get(\n settings.KLARNA_BASE_URL + '/checkout/v3/orders/' +\n order_id,\n auth=auth,\n headers=headers,\n )\n klarna_order = response.json()\n order = Order(\n order_id=klarna_order['order_id'],\n status=klarna_order['status'],\n given_name=klarna_order['billing_address']['given_name'],\n family_name=klarna_order['billing_address']['family_name'],\n email=klarna_order['billing_address']['email'],\n phone_number=klarna_order['billing_address']['phone'],\n country=klarna_order['billing_address']['country'],\n postcode=klarna_order['billing_address']['postal_code'],\n town_or_city=klarna_order['billing_address']['city'],\n street_address1=klarna_order['billing_address']['street_address'],\n order_total=klarna_order['order_amount'],\n klarna_line_items=klarna_order['order_lines']\n )\n order.save()\n request.session['cart'] = {}\n request.session['order_id'] = ''\n \n context = {\n 'klarna_order': klarna_order\n }\n\n return render(request, 'checkout/completed.html', context)\n else:\n return redirect(reverse(view_cart))",
"def order_success(self, request):\n order = self.order_from_request(request)\n\n if not order:\n return self.order_new(request)\n\n if not order.balance_remaining:\n self.set_order_on_request(request, order=None)\n\n\n order_data = OrderData.objects.get(order=order)\n o_data = simplejson.loads(order_data.data)\n\n paymentData = {}\n paymentData['delivery_address2'] = o_data['delivery_address2']\n paymentData['billing_address2'] = o_data['billing_address2']\n paymentData['delivery_date'] = o_data['delivery_date']\n paymentData['delivery_state'] = o_data['delivery_state']\n paymentData['billing_state'] = o_data['billing_state']\n paymentData['salutation'] = o_data['salutation']\n paymentData['contact_number'] = o_data['billing_contact_number']\n\n #try:\n oPayment = OrderPayment.objects.get(order=order)\n oPayment.payment_method = o_data['order_payment_method']\n oPayment.data = simplejson.dumps(paymentData)\n oPayment.save()\n #except:\n # pass\n\n \"\"\"\n order update note\n \"\"\"\n notes = o_data['order_notes']\n order.notes = notes\n order.save()\n\n # st_save_helper(request, order)\n\n \"\"\"\n sbid = None\n\n if 'customer_styleboard' in request.session:\n sbid = request.session.get('customer_styleboard').id\n\n if 'personalize_id' in request.session:\n print \"There's a personalize_id\"\n \"\"\"\n\n current_user = User.objects.get(id=int(request.user.id))\n\n if 'ipn_emailed' in o_data and o_data['ipn_emailed']:\n\n pass\n \n else:\n\n emailed = send_email_order(order, current_user, notes, paymentData['contact_number'], self)\n\n logr.info('emailed order confirmation to : %s from order success' % current_user.email)\n\n\n order_data.delete() # not needed after saving to order payment\\\n \n clear_styleboard_session(request)\n\n try:\n del request.session['customer_styleboard']\n del request.session['personalize_id']\n except:\n pass\n\n return self.render(request, 'plata/shop_order_success.html',\n self.get_context(request, {\n 'order': order,\n 'progress': 'success',\n }))",
"def test_acknowledge_orders(self):\n pass",
"def on_order(self, order: OrderData):\n pass",
"def on_order(self, order: OrderData):\n pass",
"def on_order(self, order: OrderData):\n pass",
"def post_save_order_receiver(sender, instance, created, *args, **kwargs):\n sender_email = instance.cargo.sender.email\n recepient_email = instance.cargo.recepient.email\n\n booking_agent = instance.cargo.booking_station.branch_agent.email\n\n if created:\n instance._set_order_price()\n instance._set_time_approximations()\n price = instance.price\n\n subject = \"Order Finalized and ready to go.\"\n message = f\"Your cargo has been booked and is ready for delivery. You will be notified whenever the status changes. It is currently {instance.get_status_display().title()}. It cost a total of ${price:.3f}. Your booking agent is {booking_agent}\"\n\n send_async_email(\n subject=subject,\n message=message,\n sender=booking_agent,\n recepients=[sender_email, recepient_email],\n )",
"def test_process_order(self):\n expected_contents = self.fill_session_cart()\n\n response = self.client.post(\n self.CHECKOUT_URL, self.build_checkout_form())\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"Your order was placed.\")\n\n placed_order = OrderInfo.objects.get()\n order_contents = placed_order.ordercontents_set.all()\n # arbitrary 5 seconds to account for some fault\n self.assertTrue(\n timezone.now() - placed_order.ordered < timedelta(seconds=5))\n self.assertEqual(len(expected_contents), len(order_contents))\n for expected in expected_contents:\n db_contents = order_contents.get(menu_item__id=expected['id'])\n dict_from_db = {\n 'id': db_contents.menu_item.id,\n 'name': db_contents.menu_item.name,\n 'price': db_contents.menu_item.price,\n 'amount': db_contents.amount,\n 'cost': db_contents.cost,\n }\n self.assertEqual(expected, dict_from_db)",
"def notify_order(self, order):\n if order.status in [order.Submitted, order.Accepted]:\n return # active buy/sell order submitted/accepted - do nothing\n\n # check if order has been completed (could reject if not enough cash)\n if order.status in [order.Completed]:\n if order.isbuy():\n self.log(f'BUY EXECUTED, {order.executed.price:.2f}')\n elif order.issell():\n self.log(f'SELL EXECUTED, {order.executed.price:.2f}')\n elif order.status in [order.Canceled, order.Margin, order.Rejected]:\n self.log('Order Canceled/Margin/Rejected')\n\n self.bar_executed = len(self)\n\n self.order = None # reset orders",
"def order_finish(self):\r\n logger.info(f'Remaining qty:{self.quantity-self.filled_quantity}')\r\n self.is_active = False\r\n self.is_finished = True\r\n self.is_trading = False\r\n schedule.clear(tag=self.id)\r\n logger.info(f'Order {self.id} is finished')"
] | [
"0.69658357",
"0.6763817",
"0.6571123",
"0.6434398",
"0.642819",
"0.637576",
"0.63689506",
"0.6323911",
"0.6295991",
"0.6225115",
"0.6218789",
"0.6211016",
"0.619821",
"0.61803144",
"0.61738944",
"0.61684597",
"0.61589706",
"0.6155346",
"0.61527157",
"0.61496073",
"0.61088544",
"0.600536",
"0.60035604",
"0.5950763",
"0.5950763",
"0.5950763",
"0.5932399",
"0.5906284",
"0.5889912",
"0.5877747"
] | 0.72761214 | 0 |
Converts the auto incremented id of a ShortURL object and turns it into a shorturl hash | def encode(shorturl_id: int) -> str:
short_resource = []
while shorturl_id > 0:
character_index = shorturl_id % BASE
short_resource.append(CHARACTER_SPACE[character_index])
shorturl_id //= BASE
return "".join(short_resource[::-1]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def shortURLToId(self, shortURL):\n id = 0\n for i in shortURL: \n val_i = ord(i) \n if(val_i >= ord('a') and val_i <= ord('z')): \n id = id*62 + val_i - ord('a') \n elif(val_i >= ord('A') and val_i <= ord('Z')): \n id = id*62 + val_i - ord('Z') + 26\n else: \n id = id*62 + val_i - ord('0') + 52\n return id",
"def short_url(lastid):\n number = lastid +100000000000\n bs62encoded = base62.encode(number)\n return 'https://abc.com/{id}'.format(id=str(bs62encoded))",
"def get_or_create_short_url(self, url):\n hash = utils.gen_hash()\n url_short_obj, _ = self.get_or_create(url=url, defaults={'hash': hash})\n return url_short_obj",
"def __create_short_url(self):\n last_short_url = Url.__load_last_short_url()\n short_url = self.__increment_string(last_short_url)\n Url.__save_last_short_url(short_url)\n return short_url",
"def gen_shorter_url(long_url):\n if long_url in URL_PAIR_STORE.long_url:\n return URL_PAIR_STORE.short_url[\n URL_PAIR_STORE.long_url == long_url]\n else:\n short_url = DOMAIN_NAME + '/' + do_hashing(long_url)\n new_entry = URLPair(\n id=gen_unique_id(),\n long_url=long_url,\n short_url=short_url,\n )\n insert_new_pairs(new_entry)\n return short_url",
"def encode(self, longUrl):\n self.hash = {}\n if longUrl not in self.hash:\n idx = hash(longUrl)\n self.hash[idx] = longUrl\n final_string = 'https://tinyurl.com/' + str(idx)\n return (final_string)",
"def decode(self, shortUrl):\n v = shortUrl[20:len(shortUrl)]\n return (self.hash[int(v)])",
"def self_assign_short_url(self):\n self.image_short_url = short_url.encode_url(self.id)\n return self.image_short_url",
"def encode(self, longUrl):\n shortUrl = \"http://tinyurl.com/\" + str(hash(longUrl))\n self.decode_map[shortUrl] = longUrl\n return shortUrl",
"def encode(self, longUrl: str) -> str:\n while True:\n result = hashlib.sha256(longUrl.encode()).hexdigest()\n shortUrl = result[:7]\n if longUrl not in self.bucket.get(shortUrl):\n self.bucket.put(shortUrl, longUrl)\n break \n return shortUrl",
"def _shortenUrl(self, url):\n posturi = \"https://www.googleapis.com/urlshortener/v1/url\"\n headers = {'Content-Type' : 'application/json'}\n data = {'longUrl' : url}\n data = json.dumps(data)\n request = urllib2.Request(posturi,data,headers)\n response = urllib2.urlopen(request)\n response_data = response.read()\n shorturi = json.loads(response_data)['id']\n return shorturi",
"def shortener(url_hash: str) -> TResponse:\n shortened_id = decode(url_hash)\n tags = db.session.query(Shortener).get(shortened_id)\n if tags is None:\n return jsonify(error='/@%s not found' % str(url_hash)), 404\n\n tags = dict(tags.__dict__)\n tags.pop('_sa_instance_state', None)\n\n if request.headers['Accept'] == 'application/json':\n return jsonify(hash=url_hash, short_url='https://fanlens.io/@%s' % url_hash, tags=tags)\n else:\n user_agent = request.headers.get('User-Agent', '').lower()\n if user_agent.startswith('twitterbot') or user_agent.startswith('facebo') or user_agent.startswith('LinkedIn'):\n return render_template('shortener.html', **tags)\n return redirect(tags['url'], code=302)",
"def retrieve(short_id):\n try:\n url = Url.get(short_id)\n\n url.update(actions=[\n url.hits.set(url.hits + 1),\n url.lastHit.set(datetime.utcnow())\n ])\n\n return jsonify({\n \"statusCode\": 301,\n \"location\": url.longURL\n })\n\n except:\n return jsonify({\"Error\", \"No Such ID\"})",
"def long_to_short(self, url, url_mobile=None, url_tablet=None):\n\n temp_short = uuid4() #temporary short code so we can get lastworid after insert\n query = 'INSERT into urls(short,default_url,mobile_url,tablet_url) VALUES (\"{short}\",\"{url}\",\"{mobile}\",\"{tablet}\");'.\\\n format(short=temp_short, url=url,\n mobile=url_mobile, tablet=url_tablet)\n with sq.connect(self.DB) as conn:\n cursor = conn.cursor()\n try:\n cursor.execute(query)\n url_id = cursor.lastrowid + 1\n based_id = base36.encode(url_id)\n #Update to the definitive short url\n update_query = 'UPDATE urls SET short = \"{new_short}\" WHERE short = \"{temp_uuid}\";'.\\\n format(new_short=based_id, temp_uuid=temp_short)\n cursor.execute(update_query)\n return based_id\n except sq.OperationalError:\n print(\"ERROR\")\n return False\n except ValueError:\n return False",
"def encode(self, longUrl: str) -> str:\n ans = \"http://tinyurl.com/\" + hex(abs(hash(longUrl)))\n self.lookup[ans] = longUrl\n return ans",
"def short_id(self):\n if self.short_id_missing:\n return \"0\" * settings.ID_LENGTH\n return str(self.id)[0:settings.ID_LENGTH]",
"def encode(self, longUrl):\n url_list = []\n md5 = hashlib.md5()\n md5.update(longUrl.encode('UTF-8'))\n hash_bytes = md5.hexdigest()\n for i in range(0, 32, 8):\n url_bytes = hash_bytes[i:i + 8]\n n = int(url_bytes, 16)\n n &= 0x3FFFFFFF\n short_url = \"\"\n for j in range(0, 6):\n k = n & 0x1F\n # print(k)\n short_url += Codec.chars[k]\n n >>= 5\n url_list.append(short_url)\n short_url = url_list[random.randint(0, 3)]\n Codec.url_map[short_url] = longUrl\n return short_url",
"def track_to_hash(track):\n return hashlib.sha1(track.encode('utf-8')).hexdigest()",
"def banner_hash(self) -> undefined.UndefinedNoneOr[str]:",
"def _calculate_hash(self, entry):\n entry.pop('id', None)\n return hashlib.sha224(json.dumps(\n entry, cls=DjangoJSONEncoder).encode('utf-8')).hexdigest()",
"def decode(self, shortUrl):\n cleanedID = shortUrl[len(self.baseUrl)+len(self.prefix):]\n long_URL = self.storage[cleanedID]\n return long_URL",
"def encodeUrl(self, id):\n characters = \"0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n # base = 62\n base = len(characters)\n ret = []\n while id > 0:\n val = id % base\n ret.append(characters[val])\n id = id // base\n # reverse and return\n return \"\".join(ret[::-1])",
"def getHash():\n return str(uuid.uuid4())[-17:].replace(\"-\", \"\")",
"def _short_id(video_id):\n return '-'.join(video_id.split('-')[0:2])",
"def get_key(self, obj):\n if hasattr(obj, \"id\"):\n hashed_id = hashlib.md5(str(obj.id).encode(\"utf-8\")).hexdigest()\n return hashed_id\n else:\n return None",
"def _extract_image_short_id(scan_result: dict[str, Any]) -> str:\n\n if \"id\" not in scan_result:\n return \"sha256:unknown\"\n\n image_id: str = scan_result[\"id\"]\n\n if image_id.startswith(\"sha256:\"):\n return image_id[:17]\n return image_id[:10]",
"def encode(self, longUrl):\n if self.map.get(longUrl) is None:\n tiny_url = \"http://tinyurl.com/\" + str(self.counter)\n self.demap[tiny_url] = longUrl\n self.map[longUrl] = tiny_url\n self.counter += 1\n return tiny_url\n else:\n return self.map[longUrl]",
"def url_generator(request):\n if request.method == \"POST\":\n data = json.loads(request.body.decode(\"utf-8\"))\n url_received = data.get(\"urlToShorten\")\n shortened_url = check_available_short_url()\n new_url = Url.objects.create(long_url=url_received, short_url=shortened_url)\n new_url.save()\n\n return JsonResponse(\n {\"short_url\": new_url.short_url, \"long_url\": new_url.long_url}\n )",
"def make_hash(self, long_url: str, hash_length: int):\n hasher = hashlib.md5(long_url.encode())\n bytes_hash = base64.urlsafe_b64encode(hasher.digest())[:hash_length]\n str_hash = bytes_hash.decode()\n return str_hash",
"def get_id_shortlink(link = None):\n choppedLink = legacy_check(link)\n id = None\n try:\n id = choppedLink[3] # or -1 instead of 3\n except:\n pass #dont care bout issues here\n return id"
] | [
"0.7222362",
"0.6813473",
"0.6670428",
"0.6613821",
"0.6553491",
"0.6440487",
"0.6438503",
"0.6381131",
"0.6311566",
"0.62947756",
"0.6259774",
"0.62407327",
"0.620263",
"0.61301494",
"0.61228126",
"0.6112265",
"0.6034276",
"0.59468937",
"0.5900785",
"0.5891555",
"0.58526796",
"0.58479637",
"0.5830146",
"0.577403",
"0.5746225",
"0.57419556",
"0.5739081",
"0.5698361",
"0.56901735",
"0.56738794"
] | 0.69266266 | 1 |
Attempt to place mover into contents. Returns a Boolean representation of success. | def contain(self, mover):
# Check if mover can exit old location
old_location = mover.location
if(not old_location):
return False
if(not old_location.allow_exit(mover)):
return False
# Check if mover can enter current location
if(not self.allow_entry(mover)):
return False
# Set new location
if(not self.contents):
self.contents = []
self.contents.append(mover)
mover.location = self
# Inform both locations of movement
if(old_location):
old_location.exited(mover)
self.entered(mover)
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def move(self) -> bool:\n pass",
"def allow_entry(self, mover):\n return True",
"def test_object_move(self):\n self.assertTrue(self.obj1 in self.room1.contents)\n # use move_to hook\n self.obj1.move_to(self.room2)\n self.assertFalse(self.obj1 in self.room1.contents)\n self.assertTrue(self.obj1 in self.room2.contents)\n\n # move back via direct setting of .location\n self.obj1.location = self.room1\n self.assertTrue(self.obj1 in self.room1.contents)\n self.assertFalse(self.obj1 in self.room2.contents)",
"def allow_exit(self, mover):\n return True",
"def entered(self, mover):\n pass",
"def _ispinnedmove(self, from_, to_):\n return False",
"def metropolis_accept_move(self):\n return self.mc.metropolis(self)",
"def move_atoms(self):\n return self.abivars.ionmov != 0",
"def take_control_over(self, other):\n a = self\n if a == other: return\n if util.onechancein(6): #make a master of b\n if other.master is not None:\n if other.master != a and a.master != other: #if b already had master, make a enemy of b.master\n a.history.append('In year %d %s tried to overtake the control over %s, but failed' % (world.year, a.name, other.name))\n other.master.conflict_with(a)\n else:\n if a.master == other: #if we overtook controll\n a.master = None\n try:\n other.minions.remove(a)\n except ValueError: pass\n try:\n other.master.minions.remove(other)\n except Exception : pass\n a.minions.append(other)\n other.master = a\n a.history.append('In year %d %s became boss over %s' %(world.year, a.name, other.name))",
"def has_moved(self):\n return self.move_count > 0",
"def can_be_moved(self, card):\n return is_one_rank_apart(self.waste_card, card)",
"def test_verify_move(self):\n self._verify([self.applied_commands['move']])",
"def _maybe_move(self, source_chunk, target_chunk, path_index, move_func):\n if len(source_chunk.paths) <= 1:\n return False\n\n move_time = source_chunk.paths[path_index].time\n\n new_source_badness = self._badness(source_chunk.time - move_time)\n new_target_badness = self._badness(target_chunk.time + move_time)\n\n delta_badness = ((new_source_badness + new_target_badness) -\n (source_chunk.badness + target_chunk.badness))\n if delta_badness < 0:\n move_func()\n return True\n\n return False",
"def apply_move(self, move):\n if self.check_move(move=move):\n self.board_list[move] = self.current_player.marker # changes value in the board to player which is either X or O\n self.moves_made += str(move) # keeps track of all moves\n return True\n else:\n return False",
"def attach(self, _place):\n\n\t\tself.kill()\n\t\tif not _place or not _place.putItem(self):\n\t\t\treturn False\n\n\t\tself.rect.center = (-1000, -1000) # trzeba go wyrzucić poza obszar rysowania(żeby nie wisiał w dziwnym miejscu póki się ekran nie odświeży)\n\t\tself._attached = _place\n\t\tself._detached = False\n\t\treturn True",
"def move_piece(self, addr_from: str, addr_to: str) -> bool:\n\n pos_from = self.get_pos(addr_from)\n pos_to = self.get_pos(addr_to)\n piece = pos_from.piece\n\n if isinstance(piece, Piece):\n res = piece.move(pos_to)\n # print(f\"moved {piece} from {addr_from} to {addr_to}\")\n if res:\n self._push_move(res)\n return True\n return False",
"def has_moved(self):\n return bool(self.rename_phases)",
"def move(self, orig_pos, new_pos):\n orig_x, orig_y = orig_pos\n new_x, new_y = new_pos\n\n orig_i = orig_y * self.ncols + orig_x\n new_i = new_y * self.ncols + new_x\n\n orig_piece = self.squares[orig_i]\n new_piece = self.squares[new_i]\n\n # ensure there is no vertical or horizontal movement\n if orig_piece.can_move(orig_pos, [(new_pos, new_piece)]):\n self.squares[new_i] = self.squares[orig_i]\n self.squares[orig_i] = None\n return True\n return False",
"def _is_valid_move(self, vector, current_piece, other_piece):\n return True",
"def move_to(self, dest, force_move=False):\n origin = self.location\n if self.fixed and force_move == False:\n if hasattr(self, 'is_liquid'):\n if not dest.liquid:\n return False\n elif not hasattr(dest, 'exits'):\n return False # cannot move an object that is fixed in place\n if origin:\n origin.extract(self)\n # if cannot insert into destination, return to where it came from\n # (dest.insert returns True if insertion fails)\n if not dest.insert(self, force_insert=force_move): \n return True\n else:\n if (origin):\n origin.insert(self, force_insert=True)\n return False",
"def testCheckMoveOperation_FailStagnantBlocks(self):\n payload_checker = checker.PayloadChecker(self.MockPayload())\n op = update_metadata_pb2.InstallOperation()\n op.type = common.OpType.MOVE\n\n self.AddToMessage(op.src_extents,\n self.NewExtentList((1, 4), (12, 2), (1024, 128)))\n self.AddToMessage(op.dst_extents,\n self.NewExtentList((8, 128), (512, 6)))\n self.assertRaises(\n PayloadError, payload_checker._CheckMoveOperation,\n op, None, 134, 134, 'foo')",
"def can_undo(self) -> bool:\n\n return self.position > 0",
"def move_to_inspect_pose(self, inspect_target):\n # calculate the arm_lift_link which must be sent\n z_head = inspect_target.z() + self.z_over\n\n # check whether moving the arm is necessary\n if z_head < 1.3:\n rospy.logdebug(\"Entity is low enough. we don't need to move the arm\")\n return True\n\n # saturate the arm lift goal\n z_arm = (z_head - self.z_hh) * self.torso_to_arm_ratio\n z_arm = min(0.69, max(z_arm, 0.0)) # arm_lift_joint limit\n\n arm = self.get_arm(required_goals=['arm_out_of_way'])\n\n # noinspection PyProtectedMember\n pose = arm._arm.default_configurations['arm_out_of_way']\n pose[0] = z_arm\n # noinspection PyProtectedMember\n arm._arm._send_joint_trajectory([pose])\n\n self.base.turn_towards(inspect_target.x(), inspect_target.y(), \"map\", 1.57)\n arm.wait_for_motion_done()\n self.base.wait_for_motion_done()\n return True",
"def canMove(self):\n\n if self.index == len(self.path):\n self.move = False\n return self.move",
"def test_rover_position(self):\n rover = Rover(self.plateau_dimensions, self.rover_initial_position, Rover.DIRECTIONS.get('E'))\n rover.execute_instructions(\"LMLM\")\n self.assertEqual(rover._position.x, 1)\n self.assertEqual(rover._position.y, 2)\n self.assertEqual(rover.get_heading, 'W')",
"def is_valid(self, layer: int, index: int, tower) -> bool:\r\n tower = copy.deepcopy(tower)\r\n tower.move_piece(layer, index)\r\n \r\n if tower.will_fall():\r\n del tower\r\n return False\r\n else:\r\n del tower\r\n return True",
"def move_valid(move):\n return True",
"def test_contents_order(self):\n self.assertEqual(\n self.room1.contents, [self.exit, self.obj1, self.obj2, self.char1, self.char2]\n )\n self.assertEqual(self.room2.contents, [])\n\n # use move_to hook to move obj1\n self.obj1.move_to(self.room2)\n self.assertEqual(self.room1.contents, [self.exit, self.obj2, self.char1, self.char2])\n self.assertEqual(self.room2.contents, [self.obj1])\n\n # move obj2\n self.obj2.move_to(self.room2)\n self.assertEqual(self.room1.contents, [self.exit, self.char1, self.char2])\n self.assertEqual(self.room2.contents, [self.obj1, self.obj2])\n\n # move back and forth - it should\n self.obj1.move_to(self.room1)\n self.assertEqual(self.room1.contents, [self.exit, self.char1, self.char2, self.obj1])\n self.obj1.move_to(self.room2)\n self.assertEqual(self.room2.contents, [self.obj2, self.obj1])\n\n # use move_to hook\n self.obj2.move_to(self.room1)\n self.obj2.move_to(self.room2)\n self.assertEqual(self.room2.contents, [self.obj1, self.obj2])",
"def attempt_move(self, move_input):\n # handle undo move\n if move_input == ['UN', 0, 'UN']:\n self.undo_move()\n return True\n\n # handle stock draw Special Action first\n if move_input == ['S0', 0, 'S0']:\n self.save_board_state()\n self.stock.deal_to_wp(self.wp)\n self.moves += 1\n return True\n\n # handle basic cases\n if len(move_input) != 3:\n return False\n if move_input[0] not in self.move_dict or move_input[2] not in self.move_dict:\n return False\n if type(move_input[1]) is not int:\n return False\n if move_input[2] == \"W0\":\n return False\n\n orig_pile = self.move_dict[move_input[0]]\n orig_ind = move_input[1]\n dest_pile = self.move_dict[move_input[2]]\n if orig_ind >= orig_pile.get_length():\n return False\n\n # handle flip tableau card Special Action\n if move_input[0][0] == 'T' and orig_pile == dest_pile and orig_ind == 0:\n orig_pile.reveal_top_card()\n\n # basic conditions have been met\n adj_ind = orig_pile.get_length() - orig_ind - 1\n if orig_pile.is_valid_retrieval(orig_ind):\n self.save_board_state()\n move_pile = orig_pile.remove_cards(orig_ind + 1)\n if dest_pile.is_valid_placement(move_pile):\n dest_pile.merge_pile(move_pile)\n if move_input[0][0] == 'T' and self.auto_flip_tab:\n orig_pile.reveal_top_card()\n self.moves += 1\n return True\n else:\n orig_pile.merge_pile(move_pile)\n self.board_states.pop()\n return False\n return False",
"def game_over(self):\n self.over = True"
] | [
"0.5737424",
"0.5638439",
"0.5441351",
"0.5409434",
"0.5399845",
"0.5309671",
"0.52931213",
"0.5276102",
"0.5168468",
"0.5082271",
"0.50646865",
"0.505076",
"0.50131536",
"0.50104046",
"0.50004804",
"0.4979135",
"0.49734983",
"0.4963041",
"0.49610388",
"0.49543136",
"0.49519753",
"0.4899124",
"0.48883027",
"0.48705944",
"0.4868707",
"0.48290384",
"0.4824386",
"0.48162547",
"0.48109806",
"0.480459"
] | 0.7234833 | 0 |
Determines if the mover is permitted to enter the room | def allow_entry(self, mover):
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def allowedToEnter(self):\n if base.cr.isPaid():\n return True\n place = base.cr.playGame.getPlace()\n myHoodId = ZoneUtil.getCanonicalHoodId(place.zoneId)\n if myHoodId in \\\n (ToontownGlobals.ToontownCentral,\n ToontownGlobals.MyEstate,\n ToontownGlobals.GoofySpeedway,\n ):\n # trialer going to TTC/Estate/Goofy Speedway, let them through\n return True\n return False",
"def won(self):\n if self.current_room.name == \"Victory\":\n return True\n else:\n return False",
"def can_act(self, **kwargs):\n source_entity = kwargs[action.SOURCE_ENTITY]\n item = self._get_item_on_floor(source_entity)\n return (not item is None and\n source_entity.inventory.has_room_for_item(item))",
"def is_legal(self, start, end) -> bool:\n return self.board(end) == 0 \\\n and self.board(start) > 0 \\\n and self._check_zone_locks(start, end) \\\n and self.exists_path(start, end)",
"def allow_exit(self, mover):\n return True",
"def enter_night_club(individual):\n if individual.age > LEGAL_DRINKING_AGE:\n print(\"Allowed to enter.\")\n else:\n print(\"Enterance of minors is denited.\")",
"def validate_can_enter(self, user, contest_pool):\n\n # the contest attempting to be joined\n target_skill_level = contest_pool.skill_level\n if target_skill_level.enforced == False:\n return # the skill level of this contest is not enforced -- anyone can join no matter what\n\n # find any enforced skill_levels we have an entry in not matching our target.\n # if any are found, that means we cant join and must raise exception\n entries = Entry.objects.filter(\n user=user,\n contest_pool__draft_group=contest_pool.draft_group,\n contest_pool__skill_level__enforced=True\n ).exclude(contest_pool__skill_level=target_skill_level)\n\n if entries.count() > 0:\n raise self.CanNotEnterSkillLevel()",
"def _checkPlayer(self):\r\n pawn = self.startCell.getPawn()\r\n if(not pawn.owner == self.player):\r\n message = (\"Player (%r) is not allowed to move that pawn (%r)\" %\r\n (self.player, pawn))\r\n raise IllegalMoveException(message)",
"def can_exist_outside_of_game(self):\n return True",
"def can_exist_outside_of_game(self):\n return True",
"def is_legal_move(self, house_num):\n return True",
"def contain(self, mover):\n # Check if mover can exit old location\n old_location = mover.location\n if(not old_location):\n return False\n if(not old_location.allow_exit(mover)):\n return False\n # Check if mover can enter current location\n if(not self.allow_entry(mover)):\n return False\n # Set new location\n if(not self.contents):\n self.contents = []\n self.contents.append(mover)\n mover.location = self\n # Inform both locations of movement\n if(old_location):\n old_location.exited(mover)\n self.entered(mover)\n return True",
"def is_legal(self, move, player, board):\r\n if(self.is_valid(move)==False):\r\n return False\r\n if(board[move]!=core.EMPTY):\r\n return False\r\n return True",
"def _check_for_win(self):\n slots_available = any(\n [slot.available for slot in self.board.iter_slots() if not slot.mine]\n )\n if not slots_available:\n self.status = GameStatusEnum.won\n self.end_time = datetime.utcnow()",
"def canAct(self) -> bool:\n return self.cooldown < 1",
"def still_valid(self) -> bool:\n return self._data.player_alive(self._data.player_turn)",
"def is_allowed(self, cpos):\n if self.step is None:\n return True\n \n # has the player clicked on one of the allowed cells?\n if (cpos in self.step.toclick):\n # mark step as finished\n self.step.finished = True\n return True\n return False",
"def is_valid_room(self, x, y):\r\n return 0 <= x < self.__nx and 0 <= y < self.__ny",
"def is_allowed(self) -> bool:\n return self.effect == ALLOW_ACCESS",
"def can_act(self) -> bool:\n return self.cooldown < 1",
"def can_act(self) -> bool:\n return self.cooldown < 1",
"def test_change_classroom_specific_for_coach_pt2(self):\n self.assertFalse(self.coach2.has_perm('auth.change_classroom', self.classrooms[0]))",
"def permit(self):\n in_role = self.in_role(self.role)\n if type(in_role) is not bool:\n raise TilesError(\"Function '{0}' must return a boolean value.\".format(self.in_role.__name__))\n return in_role",
"def can_spawn(self, entities, player):\n if self.spawner_cooldown_level > 0:\n self.spawner_cooldown_level -= 1\n return False\n else:\n if player.distance_to(self.owner) <= self.spawn_radius:\n self.spawner_cooldown_level = self.spawner_cooldown\n return self.room.monster_limit_reached(entities)",
"async def _is_room_accessible(\n self, room_id: str, requester: Optional[str], origin: Optional[str]\n ) -> bool:\n state_ids = await self._store.get_current_state_ids(room_id)\n\n # If there's no state for the room, it isn't known.\n if not state_ids:\n logger.info(\"room %s is unknown, omitting from summary\", room_id)\n return False\n\n room_version = await self._store.get_room_version(room_id)\n\n # if we have an authenticated requesting user, first check if they are able to view\n # stripped state in the room.\n if requester:\n member_event_id = state_ids.get((EventTypes.Member, requester), None)\n\n # If they're in the room they can see info on it.\n member_event = None\n if member_event_id:\n member_event = await self._store.get_event(member_event_id)\n if member_event.membership in (Membership.JOIN, Membership.INVITE):\n return True\n\n # Otherwise, check if they should be allowed access via membership in a space.\n try:\n await self._event_auth_handler.check_restricted_join_rules(\n state_ids, room_version, requester, member_event\n )\n except AuthError:\n # The user doesn't have access due to spaces, but might have access\n # another way. Keep trying.\n pass\n else:\n return True\n\n # If this is a request over federation, check if the host is in the room or\n # is in one of the spaces specified via the join rules.\n elif origin:\n if await self._auth.check_host_in_room(room_id, origin):\n return True\n\n # Alternately, if the host has a user in any of the spaces specified\n # for access, then the host can see this room (and should do filtering\n # if the requester cannot see it).\n if await self._event_auth_handler.has_restricted_join_rules(\n state_ids, room_version\n ):\n allowed_spaces = (\n await self._event_auth_handler.get_spaces_that_allow_join(state_ids)\n )\n for space_id in allowed_spaces:\n if await self._auth.check_host_in_room(space_id, origin):\n return True\n\n # otherwise, check if the room is peekable\n hist_vis_event_id = state_ids.get((EventTypes.RoomHistoryVisibility, \"\"), None)\n if hist_vis_event_id:\n hist_vis_ev = await self._store.get_event(hist_vis_event_id)\n hist_vis = hist_vis_ev.content.get(\"history_visibility\")\n if hist_vis == HistoryVisibility.WORLD_READABLE:\n return True\n\n logger.info(\n \"room %s is unpeekable and user %s is not a member / not allowed to join, omitting from summary\",\n room_id,\n requester,\n )\n return False",
"def test_change_classroom_specific_for_coach_pt1(self):\n self.assertTrue(self.coach2.has_perm('auth.change_classroom', self.classrooms[1]))",
"def validate_login(self, request):\n\n if 'id' not in request.session or 'steam_id' not in request.session:\n raise PermissionDenied('You need to login')\n\n # if self.mode9:\n # if 'team' not in PlayerList[request.session['id']]:\n # raise PermissionDenied('Player is not in a team!')",
"def entered(self, mover):\n pass",
"def level_unlocked(self) -> bool:\r\n return self.player_profile.is_level_unlocked(self.level_num)",
"def verify_privileged(self):\n community_text = self.fetch(self.base_url + \"/community\")\n return \"You must be logged in to see this page.\" not in community_text"
] | [
"0.6933369",
"0.6317256",
"0.6274393",
"0.6119734",
"0.6043591",
"0.6006229",
"0.59973824",
"0.5966571",
"0.59650975",
"0.59650975",
"0.5949079",
"0.590017",
"0.58607984",
"0.5843126",
"0.5832102",
"0.58177775",
"0.57856125",
"0.57780486",
"0.5743955",
"0.57223856",
"0.57223856",
"0.57216465",
"0.5716824",
"0.57089305",
"0.56972337",
"0.5678387",
"0.5654636",
"0.5647491",
"0.56461704",
"0.5635287"
] | 0.66670233 | 1 |
Determines if the mover is permitted to exit the room. | def allow_exit(self, mover):
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def has_exited(self):\n agents = self.board[self.agent_locs_idx]\n return agents & (CellTypes.agent | CellTypes.exit) == CellTypes.exit",
"def can_exit(self) -> bool:\n return False",
"def check_exit(self, position, direction):\n if self.get_room((position[0] + direction[0], position[1] + direction[1])):\n return True\n return False",
"def can_reach_exit(self, position):\n return self.__verify_exit_path(position)",
"def allowedToEnter(self):\n if base.cr.isPaid():\n return True\n place = base.cr.playGame.getPlace()\n myHoodId = ZoneUtil.getCanonicalHoodId(place.zoneId)\n if myHoodId in \\\n (ToontownGlobals.ToontownCentral,\n ToontownGlobals.MyEstate,\n ToontownGlobals.GoofySpeedway,\n ):\n # trialer going to TTC/Estate/Goofy Speedway, let them through\n return True\n return False",
"def EndGame(self):\n check_endgame = not self.player.getPlayer().isGeneralExist()\n\n return check_endgame",
"def isExit(self):\n return self.exit",
"def is_exit(self, x_coordinate, y_coordinate):\n if self.grid[x_coordinate][y_coordinate] == POINT_OF_EXIT:\n return True\n else:\n return False",
"def endState(self):\n return not(self.state.winner() == None and len(self.state.get_actions()) > 0)",
"def can_exist_outside_of_game(self):\n return True",
"def can_exist_outside_of_game(self):\n return True",
"def won(self):\n if self.current_room.name == \"Victory\":\n return True\n else:\n return False",
"def place_exit(self):\r\n x = random.randint(0, (self.__nx - 1))\r\n y = random.randint(0, (self.__ny - 1))\r\n self.__exit_room = x, y\r\n if self.exit_room() == self.pillar_a_room() or \\\r\n self.exit_room() == self.pillar_e_room() or \\\r\n self.exit_room() == self.pillar_i_room() or \\\r\n self.exit_room() == self.pillar_p_room() or \\\r\n self.exit_room() == self.entrance_room():\r\n return self.place_exit()\r\n self.__maze[x][y].set_exit(True)",
"def reservation_mark_exit(user: User, reservation: Reservation):\n owns_restaurant = reservation.restaurant.operator == user\n if owns_restaurant and reservation.status is ReservationState.SEATED:\n #Might want to add user notification\n reservation.exit_time = datetime.datetime.now()\n reservation.status = ReservationState.DONE\n db.session.commit()\n return True\n\n return False",
"def level_unlocked(self) -> bool:\r\n return self.player_profile.is_level_unlocked(self.level_num)",
"def is_legal(self, start, end) -> bool:\n return self.board(end) == 0 \\\n and self.board(start) > 0 \\\n and self._check_zone_locks(start, end) \\\n and self.exists_path(start, end)",
"def checkMissionEnd(self) -> bool:\n if getTimestamp() - self.mission['timestamp'] < self.TAKE_OFF_DELAY:\n return False\n drone: Drone\n for drone in self.dronesSet.getDrones().values():\n if drone['state'] != 'onTheGround' and drone['state'] != 'crashed':\n return False\n\n self.endMission()\n return True",
"def exited(self, mover):\n pass",
"def allow_entry(self, mover):\n return True",
"def is_exit(self):\n return self._exit",
"def leave(self):\n self.pleaseQuit=1",
"def can_leave_team(uid):\n current_user = get_user(uid=uid)\n current_team = api.team.get_team(current_user[\"tid\"])\n if current_team[\"team_name\"] == current_user[\"username\"]:\n return False\n if current_team[\"creator\"] == uid and current_team[\"size\"] != 1:\n return False\n if len(api.submissions.get_submissions(uid=uid)) > 0:\n return False\n return True",
"def check_end_game(self):\n return False if (any(self.p1_pits()) and any(self.p2_pits())) else True",
"def still_valid(self) -> bool:\n return self._data.player_alive(self._data.player_turn)",
"def is_legal_move(self, house_num):\n return True",
"def checkEndLevel(self, player):\n\n playerCoordinates = (player.positionRect.x, player.positionRect.y)\n\n #If the player coordinates match the end coordinates, he/she goes to\n #next level\n if playerCoordinates == self._get_end():\n #We return True because the player has finished the level\n return True\n\n else:\n #We return False\n return False",
"def is_valid_exit(exits, chosen_exit):\r\n return chosen_exit in exits",
"def is_end_game(self):\n win = self.is_game_won()\n tie = self.game_is_tied()\n return win or tie",
"def leave_in_play(self):\n return self._leave_in_play",
"def is_exclusive(self):\n return self.exclusive"
] | [
"0.6301413",
"0.6263654",
"0.62400985",
"0.60136616",
"0.58825636",
"0.58812755",
"0.58409286",
"0.5794512",
"0.5793184",
"0.5782265",
"0.5782265",
"0.57529366",
"0.5692975",
"0.5673591",
"0.56546485",
"0.56517553",
"0.5650926",
"0.5650613",
"0.5642109",
"0.56175417",
"0.5615448",
"0.5610452",
"0.56074816",
"0.5606647",
"0.5593452",
"0.5582624",
"0.5571237",
"0.55623186",
"0.5521368",
"0.5478422"
] | 0.7196453 | 0 |
Called after the mover has entered the room. | def entered(self, mover):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def exited(self, mover):\n pass",
"def after_turn(self):\n pass",
"def game_over(self):\n self.end_of_level()\n self.message_holder.add_widget(self.you_lose_label)\n Clock.schedule_once(self.goto_menu, 5)",
"def end_of_level(self):\n Clock.unschedule(self.update)\n self.no_events()",
"def on_pre_leave(self):\n Logger.info('Application: Leaving the Intro screen.')",
"def on_pre_leave(self):\n Logger.info('Application: Leaving the Intro screen.')",
"def onMasterLost(self):",
"def on_enter(self):\n raise NotImplemented(\"on_enter method should be implemented.\")",
"def onenterready(self, event):\n print('onenterready; event: %s, %s->%s' % (event.event, event.src, event.dst))",
"def notify_game_over(self):\n self.is_game_over = True",
"def updateComplete(self):\n self.livesScreen()\n if self.getWave().getLives() == 0:\n self.deathScreen()\n else:\n self.winScreen()",
"def on_pre_enter(self):\n Logger.info('Application: Changed to the Return screen.')",
"def event_game_over(self):\n print('Game over!')\n self._cmd_exit()",
"def you_won(self):\n self.end_of_level()\n self.message_holder.add_widget(self.you_win_label)\n Clock.schedule_once(self.goto_next_level, 5)",
"def won_battle(self) -> None:\n\n self._battle_scene.pop_scene()\n\n if self._battle_over_callback:\n self._battle_over_callback()",
"def on_client_enter(self, game) -> None:\n pass",
"def on_pre_leave(self):\n Logger.info('Application: Leaving the Combat screen.')\n self.updater.cancel() # Clear the event interval.\n self.stop_soundtrack()",
"def onEnd(self, agent):\n\n pass",
"def on_enter(self, userdata):\n pass",
"def state_finish_enter(cfg, app, win):",
"def _handle_disconnected(self, event):\n self.roster = {}",
"def on_finish(self):\n pass",
"def update_monster(self):\n\n\t\t# if nothing else gets added to this (no other changes to update) you could delete\n\t\t# this function and simply call self.choose_guard() in its place\n\t\tself.guarded_area = self.choose_guard()",
"def on_step(self) -> None:\r\n\r\n if self.board == None:\r\n return\r\n\r\n TkState.disable(self.edit_menu.winfo_children())\r\n TkState.enable([self.reset_button])\r\n self.anim_board.next_gen()\r\n self.on_new_generation()\r\n self.painter.draw_board()",
"def game_over(self):\n self.over = True",
"def onMessageEnd(self):",
"def _on_walk(self):\n pass",
"def do_after(self):\r\n pass",
"def on_event_finished(self, event):",
"def unaway(self):\n self.away()"
] | [
"0.6746905",
"0.6695229",
"0.66767555",
"0.63322276",
"0.627838",
"0.627838",
"0.6254879",
"0.6229721",
"0.6191655",
"0.61626804",
"0.60663325",
"0.6041078",
"0.598377",
"0.59339255",
"0.5903108",
"0.585522",
"0.5828244",
"0.5815816",
"0.5759303",
"0.57528126",
"0.5750263",
"0.5746356",
"0.5740736",
"0.57302654",
"0.572367",
"0.5723495",
"0.5717267",
"0.56988055",
"0.56900376",
"0.56773406"
] | 0.75459206 | 0 |
Called after the mover has exited the room. | def exited(self, mover):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def leave(self):\n self.pleaseQuit=1",
"def on_pre_leave(self):\n Logger.info('Application: Leaving the Intro screen.')",
"def on_pre_leave(self):\n Logger.info('Application: Leaving the Intro screen.')",
"def endGame(self):\n pass",
"def on_client_exit(self, game) -> None:\n pass",
"def onMasterLost(self):",
"def on_unload(self):\n pass",
"def after_turn(self):\n pass",
"def on_exit(self):\n pass",
"def leave_loose_game(self):\n self.update_json_file()\n self.end = True\n self.root.destroy()\n GameOver()",
"def entered(self, mover):\n pass",
"def end_of_level(self):\n Clock.unschedule(self.update)\n self.no_events()",
"def onEnd(self, agent):\n\n pass",
"def death(self):\n print \"{0} has died, like many before. {0} survived {1} rooms.\".format(self.name, self.roomCt)\n exit()",
"def on_exit(self, next_scene):",
"def game_exit(self):\n self.set_state(GameState.EXITING)\n self.game_stop()\n self.game_log_statistics()",
"def on_pre_leave(self):\n Logger.info('Application: Leaving the Combat screen.')\n self.updater.cancel() # Clear the event interval.\n self.stop_soundtrack()",
"def _leave(self, *args):\n if not self.game:\n raise ServerException('not playing a game')\n self.game.leave(self)\n self.game = self.player = None",
"async def on_room_deinit(self, room_obj):\n pass",
"def end(self, won, reason):\n pass\n # replace with your end logic",
"def _handle_disconnected(self, event):\n self.roster = {}",
"def game_over(self):\n self.end_of_level()\n self.message_holder.add_widget(self.you_lose_label)\n Clock.schedule_once(self.goto_menu, 5)",
"def exit(self) -> None:\n self.on_exit(None)",
"def on_finish(self):\n pass",
"def leave(self):\n p = GameOverPopup(self)\n p.open()",
"def endCompetition(self):\n self.robot_exit = True",
"def end_of_game(self, winner):\n pass",
"def event_game_over(self):\n print('Game over!')\n self._cmd_exit()",
"def received_CLOSING(self):\n\n\t\tself.player_frame.notify_rival_closing()\n\t\tself.player_frame.master.go_to_previous_screen(False)",
"def endGame(self, message):\n print(self.board)\n print(\"Game over! \" + message)\n self.gameOver = True"
] | [
"0.6788173",
"0.65788233",
"0.65788233",
"0.65397877",
"0.6511395",
"0.6485533",
"0.6424921",
"0.6411908",
"0.64101607",
"0.6407878",
"0.637953",
"0.63781476",
"0.6357803",
"0.6351377",
"0.63508695",
"0.6343258",
"0.6334145",
"0.63243484",
"0.62887913",
"0.6288441",
"0.6277276",
"0.6260994",
"0.6245237",
"0.62429816",
"0.62426525",
"0.6226485",
"0.61882496",
"0.61756784",
"0.61584896",
"0.6158285"
] | 0.78373396 | 0 |
Creates a tree from list. First element is root value, others are children nodes. (values or subtrees). | def construct(lst):
t = Tree()
t.root = lst[0]
for node in lst[1:]:
if isinstance(node, list):
t.nodes.append(construct(node))
else:
t.nodes.append(node)
return t | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def build_tree(tree_list, nums):\n\n if len(nums) == 0:\n return tree_list\n next_list = []\n j = 0\n for i in tree_list:\n for _ in range(2):\n if i is not None and j < len(nums):\n if nums[j] is None:\n next_list.append(None)\n else:\n next_list.append(TreeNode(nums[j]))\n j += 1\n res_list = build_tree(next_list, nums[j:])\n j = 0\n for i in tree_list:\n if i is None:\n continue\n if j < len(res_list):\n if next_list[j] is not None:\n next_list[j].parent = i\n i.left = next_list[j]\n j += 1\n if j < len(res_list):\n if next_list[j] is not None:\n next_list[j].parent = i\n i.right = next_list[j]\n j += 1\n return tree_list",
"def _treeify(values):\n if len(values) == 1: # this case causes problems later\n return values\n tree = np.empty_like(values)\n # Tree indices work as follows:\n # 0 is the root\n # 2n+1 is the left child of n\n # 2n+2 is the right child of n\n # So we now rearrange `values` into that format...\n\n # The first step is to remove the bottom row of leaves, which might not be exactly full\n last_full_row = int(np.log2(len(values) + 1) - 1)\n len_ragged_row = len(values) - (2 ** (last_full_row + 1) - 1)\n if len_ragged_row > 0:\n bottom_row_ix = np.s_[:2 * len_ragged_row:2]\n tree[-len_ragged_row:] = values[bottom_row_ix]\n values = np.delete(values, bottom_row_ix)\n\n # Now `values` is length 2**n - 1, so can be packed efficiently into a tree\n # Last row of nodes is indices 0, 2, ..., 2**n - 2\n # Second-last row is indices 1, 5, ..., 2**n - 3\n # nth-last row is indices (2**n - 1)::(2**(n+1))\n values_start = 0\n values_space = 2\n values_len = 2 ** last_full_row\n while values_start < len(values):\n tree[values_len - 1:2 * values_len - 1] = values[values_start::values_space]\n values_start += int(values_space / 2)\n values_space *= 2\n values_len = int(values_len / 2)\n return tree",
"def build_tree_from_preorder(values): \r\n \r\n if len(values) == 0 or values[0] == None:\r\n return None\r\n root = TreeNode(values[0])\r\n if len(values) == 1:\r\n return root\r\n root.left = build_tree_from_preorder(values[1:((len(values)-1) // 2 + 1)])\r\n root.right = build_tree_from_preorder(values[((len(values)-1) // 2 + 1):]) \r\n if root.left != None:\r\n root.left.parent = root\r\n if root.right != None:\r\n root.right.parent = root\r\n \r\n return root",
"def create_tree(nums):\n def build_tree(tree_list, nums):\n \"\"\"\n This fuction is to build the next level nodes\n and link them to the previous level nodes\n \"\"\"\n\n if len(nums) == 0:\n return tree_list\n next_list = []\n j = 0\n for i in tree_list:\n for _ in range(2):\n if i is not None and j < len(nums):\n if nums[j] is None:\n next_list.append(None)\n else:\n next_list.append(TreeNode(nums[j]))\n j += 1\n res_list = build_tree(next_list, nums[j:])\n j = 0\n for i in tree_list:\n if i is None:\n continue\n if j < len(res_list):\n if next_list[j] is not None:\n next_list[j].parent = i\n i.left = next_list[j]\n j += 1\n if j < len(res_list):\n if next_list[j] is not None:\n next_list[j].parent = i\n i.right = next_list[j]\n j += 1\n return tree_list\n\n root = TreeNode(nums[0])\n return build_tree([root], nums[1:])[0]",
"def _treeify(values):\n if len(values) == 1: # this case causes problems later\n return values\n tree = np.empty_like(values)\n\n # The first step is to remove the bottom row of leaves, which might not be exactly full\n last_full_row = int(np.log2(len(values) + 1) - 1)\n len_ragged_row = len(values) - (2 ** (last_full_row + 1) - 1)\n if len_ragged_row > 0:\n bottom_row_ix = np.s_[: 2 * len_ragged_row : 2]\n tree[-len_ragged_row:] = values[bottom_row_ix]\n values = np.delete(values, bottom_row_ix)\n\n # Now `values` is length 2**n - 1, so can be packed efficiently into a tree\n # Last row of nodes is indices 0, 2, ..., 2**n - 2\n # Second-last row is indices 1, 5, ..., 2**n - 3\n # nth-last row is indices (2**n - 1)::(2**(n+1))\n values_start = 0\n values_space = 2\n values_len = 2 ** last_full_row\n while values_start < len(values):\n tree[values_len - 1 : 2 * values_len - 1] = values[values_start::values_space]\n values_start += int(values_space / 2)\n values_space *= 2\n values_len = int(values_len / 2)\n return tree",
"def genTree(lst, i=1):\n if lst and i <= len(lst) and lst[i-1] is not None:\n node = TreeNode(lst[i-1])\n node.left = genTree(lst, i*2)\n node.right = genTree(lst, i*2+1)\n return node",
"def list_to_binary_tree(list_input):\n pass",
"def valueSetTree(root, klist):\n try:\n if (root is not None):\n valueSetTree(root['left'], klist)\n lt.addLast(klist, root['value'])\n valueSetTree(root['right'], klist)\n return klist\n except Exception as exp:\n error.reraise(exp, 'RBT:valueSetTree')",
"def construct_tree():\n root = TreeNode(5)\n root.left = TreeNode(3)\n root.right = TreeNode(8)\n root.left.left = TreeNode(2)\n root.left.right = TreeNode(4)\n root.right.left = TreeNode(7)\n return root",
"def _sorted_list_to_bst(cls, items=[], start=None, end=None, parent=None):\n if start > end:\n return None\n mid = start + (end - start) // 2\n node = Node(items[mid], parent)\n node.left = cls._sorted_list_to_bst(items, start, mid - 1, node)\n node.right = cls._sorted_list_to_bst(items, mid + 1, end, node)\n return node",
"def build():\n # root = TreeNode(5)\n # root.left = TreeNode(2)\n # root.right = TreeNode(7)\n # return root\n\n \"\"\"\n 5\n / \\\n 2 6\n / \\\n 1 3\n [5,2,1,3,6]\n \"\"\"\n _5 = TreeNode(5)\n _2 = TreeNode(2)\n _6 = TreeNode(6)\n _1 = TreeNode(1)\n _3 = TreeNode(3)\n _5.left = _2\n _5.right = _6\n _2.left = _1\n _2.right = _3\n return _5",
"def create_bst(lst, start, end):\n if end < start:\n return None\n mid = (start + end) // 2\n root = BinaryTree(lst[mid])\n root.left_child = create_bst(lst, start, mid - 1)\n root.right_child = create_bst(lst, mid + 1, end)\n # post-order traversal\n print(root.get_root_val())\n return root",
"def deserialize(self, data):\n if data == \"[]\": return\n vals, i = data[1:-1].split(','), 1\n root = TreeNode(int(vals[0]))\n queue = collections.deque()\n queue.append(root)\n while queue:\n node = queue.popleft()\n if vals[i] != \"null\":\n node.left = TreeNode(int(vals[i]))\n queue.append(node.left)\n i += 1\n if vals[i] != \"null\":\n node.right = TreeNode(int(vals[i]))\n queue.append(node.right)\n i += 1\n return root",
"def __init__(self, value: T):\n self.value = value\n self.children: List[Tree] = []",
"def deserialize(self, data):\n arr = data[1:-1].split(',')\n self.index = 0\n def construct():\n \n if arr[self.index] == 'null':\n self.index += 1\n return None\n root = TreeNode(int(str(arr[self.index])))\n self.index += 1\n if self.index >= len(arr):\n return root\n root.left = construct()\n root.right = construct()\n return root\n return construct()",
"def deserialize(self, data):\n vals = collections.deque(data.strip('[]').split(','))\n firstVal = vals.popleft()\n if firstVal == '':\n return None\n root = TreeNode(int(firstVal))\n queue = collections.deque([root])\n while queue:\n node = queue.popleft()\n lv, rv = vals.popleft(), vals.popleft()\n if lv != 'null':\n node.left = TreeNode(int(lv))\n queue.append(node.left)\n if rv != 'null':\n node.right = TreeNode(int(rv))\n queue.append(node.right)\n return root",
"def make_tree(self, l):\n\t\tfor el in l:\n\t\t\tself.insert(el)",
"def make_tree(arr):\n\n for i in range(len(arr)):\n arr, val = mid(arr)\n\n if i == 0: \n binary = BinaryNode(val)\n\n else:\n binary.insert(val)\n\n return binary",
"def construct_tree(serialized_data):\n\n if 0 == len(serialized_data):\n return None\n root = TreeNode(serialized_data[0][0])\n prev = [root]\n for row in serialized_data[1:]:\n nodes, cur = [], []\n for el in row:\n if el is not None:\n node = TreeNode(el)\n cur.append(node)\n else:\n node = None\n nodes.append(node)\n prev = prev[::-1]\n nodes = nodes[::-1]\n while prev and nodes:\n parent = prev.pop()\n parent.left = nodes.pop()\n if nodes:\n parent.right = nodes.pop()\n prev = cur\n return root",
"def deserialize(self, data):\n if not data:\n return None\n lst = data.split(\",\")\n lst = [int(i) if i != \"None\" else None for i in lst]\n level = []\n level.append(lst[0])\n index = 0 + 1\n dummy_root = TreeNode(None)\n roots = [dummy_root]\n def build_level(roots, nodes):\n i = 0\n for r in roots:\n if r is None:\n pass\n else:\n left = nodes[i:i+1]\n right = nodes[i+1:i+2]\n r.left = left[0] if left else None\n r.right = right[0] if right else None\n i += 2\n\n while level:\n nodes = []\n nodes = [TreeNode(v) if v is not None else None for v in level]\n # for v in level:\n # if v is not None:\n # nodes.append(TreeNode(v))\n ed = len(nodes) - 1\n while nodes[ed] is None:\n nodes.pop()\n ed -= 1\n build_level(roots, nodes)\n roots = nodes\n real_nodes = []\n for n in nodes:\n if n is not None:\n real_nodes.append(n)\n count = 2 * len(real_nodes)\n nxt = lst[index:index+count]\n index = index + count\n level = nxt\n return dummy_root.left",
"def build_tree(elements):\n print(\"Building tree with these elements:\",elements)\n root = BinarySearchTreeNode(elements[0])\n\n for i in range(1, len(elements)):\n root.add_child(elements[i])\n\n return root",
"def deserialize1(self, data):\n if data == \"\":\n return None\n serial = data.split(\",\")\n stack = []\n for val in serial:\n if not stack:\n root = TreeNode(int(val))\n stack.append([root, True])\n else:\n node, state = stack[-1]\n if not node.left and state:\n if val == \"null\":\n stack[-1][1] = False\n else:\n left_node = TreeNode(int(val))\n node.left = left_node\n print(left_node.val)\n stack.append([left_node, True])\n else:\n stack.pop()\n if not val == \"null\":\n right_node = TreeNode(int(val))\n node.right = right_node\n print(right_node.val)\n stack.append([right_node, True])\n return root",
"def construct_tree(data):\n\troot = Node(\"root\")\n\tcurrent_parent = root\n\tstack = []\n\n\tfor token in data:\n\t\t# push tokens unto stack until \")\" is detected\n\t\t# when \")\" is detected, pop the stack and create children to be inserted\n\t\tif token == \")\":\n\t\t\tn = Node(\"\")\n\n\t\t\tsymbol = stack.pop()\n\n\t\t\twhile (symbol) != \"(\":\n\t\t\t\tn.add_child_front(symbol)\n\t\t\t\tsymbol = stack.pop()\n\n\t\t\t# semantic analyzer can be implemented here\n\n\t\t\t#n = code_generator(n)\n\n\t\t\tstack.append(n)\n\n\t\telse:\n\t\t\tstack.append(token)\n\n\treturn stack.pop()",
"def deserialize(self, data):\n if data == '[]':\n return None\n \n strs = data[1:-1].split(',')\n root = TreeNode(int(strs[0]))\n Q = collections.deque([root])\n leftChild = True\n for s in strs[1:]:\n if s != 'null':\n node = TreeNode(int(s))\n if leftChild:\n Q[0].left = node\n else:\n Q[0].right = node\n Q.append(node)\n if not leftChild:\n Q.popleft()\n leftChild = not leftChild\n return root",
"def deserialize(self, data):\n self.index += 1\n if self.index == 0:\n self.li = data.split(',')\n root = None\n if self.li[self.index] != 'null':\n root = TreeNode(int(self.li[self.index]))\n root.left = self.deserialize(data)\n root.right = self.deserialize(data)\n return root",
"def deserialize(self, data):\n l = []\n _tmp = data.split(\",\")\n for item in _tmp:\n if item:\n l.append(item)\n l.reverse()\n\n def rdeserialize():\n if not l:\n return None\n val = l.pop()\n if val == \"null\":\n return None\n root = TreeNode(val)\n root.left = rdeserialize()\n root.right = rdeserialize()\n return root\n\n return rdeserialize()",
"def deserialize(self, data):\n\n def doit():\n val = next(vals)\n if val == '*':\n return None\n root = TreeNode(int(val))\n root.left = doit() # during recursion, vals can be called iteratively\n root.right = doit()\n return root\n\n vals = iter(data.split(\",\")) # an iteration object\n return doit()",
"def test_binarytree_instantiate_list():\n input = [13, 42, 7]\n c = BinaryTree(input)\n assert isinstance(c, BinaryTree)",
"def level_deserialize(self, data):\n if not data: return None\n data = data.split(',')\n\n def make_node(i):\n if i >= len(data): return None\n if data[i] == 'None': return None\n return TreeNode(int(data[i]))\n\n root = make_node(0)\n q = [root]\n i = 1\n while q:\n new_q = []\n for node in q:\n left_node, i = make_node(i), i + 1\n new_q.append(left_node)\n if node: node.left = left_node\n\n right_node, i = make_node(i), i + 1\n new_q.append(right_node)\n if node: node.right = right_node\n\n if i >= len(data): break\n q = new_q\n return root",
"def tree(plist, l, a, f):\n if l > 3:\n lst = []\n for p in plist:\n p.forward(l)\n q = p.clone()\n p.left(a)\n q.right(a)\n lst.append(p)\n lst.append(q)\n\n tree(lst,l*f,a,f)"
] | [
"0.70470774",
"0.69341266",
"0.6903691",
"0.6826804",
"0.67401844",
"0.6710746",
"0.65991884",
"0.6522682",
"0.63863814",
"0.6339975",
"0.63145685",
"0.63064444",
"0.6274908",
"0.6264302",
"0.62484676",
"0.62479275",
"0.6221631",
"0.62191725",
"0.6189883",
"0.6159182",
"0.61578023",
"0.61497265",
"0.613509",
"0.6122198",
"0.6116635",
"0.60937244",
"0.60894984",
"0.60826313",
"0.6082611",
"0.60720617"
] | 0.7338503 | 0 |
convert list of dicts to ndarray of type np.float32 | def dicts2ndarray(data_dicts):
# NEVER make any assumption about the order of .keys() return
aps = [ap for ap in data_dicts[0].keys() if ap != 'tag']
aps.sort()
data_num = len(data_dicts)
data_len = len(data_dicts[0][aps[0]])
ndary = np.zeros([data_num, len(aps), data_len], dtype=np.float32)
for idx, d in enumerate(data_dicts):
for aidx, ap in enumerate(aps):
ndary[idx, aidx, :] = d[ap]
return ndary | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def translate_pandas_to_numpy(data_list:list) -> list:\n list_size = len(data_list)\n for i in range(list_size):\n data_list[i] = data_list[i].values.astype('float32')\n return data_list",
"def convert_dict_to_ndarray(*dictionaries):\n\n array_list = []\n\n # Loop all dicts\n for dictionary in dictionaries:\n # Loop all keys\n for key in dictionary.keys():\n # Skip non-ndarray types\n if not isinstance(dictionary[key], np.ndarray):\n continue\n # Append each item to a list\n array_list.append(dictionary[key])\n\n # Check non-uniform length between arrays\n for item in array_list:\n assert len(item) == len(array_list[0]), 'All arrays must have the same length'\n\n return np.vstack(array_list) # .swapaxes(0, 1)",
"def to_ndarray(item):\n \n return type(item), sp.array(item, sp.float64, ndmin=1)",
"def dict_to_array(self, d):\n n_fit_p = len(self.fit_parameters)\n n_nui_p = len(self.nuisance_parameters)\n n_wc = len(self.fit_wc_names)\n arr = np.zeros(n_fit_p + n_nui_p + n_wc)\n arr[:n_fit_p] = [d['fit_parameters'][p] for p in self.fit_parameters]\n arr[n_fit_p:n_fit_p+n_nui_p] = [d['nuisance_parameters'][p] for p in self.nuisance_parameters]\n arr[n_fit_p+n_nui_p:] = [d['fit_wc'][c] for c in self.fit_wc_names]\n return arr",
"def convert_from_list_to_numpy(data_as_list):\n\n if len(data_as_list['points']) == 0:\n return {'points': None, 'evaluations': None, 'var_noise': None}\n\n data = {}\n n_points = len(data_as_list['points'])\n dim_point = len(data_as_list['points'][0])\n\n points = np.zeros((n_points, dim_point))\n evaluations = np.zeros(n_points)\n var_noise = None\n\n if len(data_as_list['var_noise']) > 0:\n var_noise = np.zeros(n_points)\n iterate = zip(data_as_list['points'], data_as_list['evaluations'],\n data_as_list['var_noise'])\n else:\n iterate = zip(data_as_list['points'], data_as_list['evaluations'])\n\n for index, point in enumerate(iterate):\n points[index, :] = point[0]\n evaluations[index] = point[1]\n if len(data_as_list['var_noise']) > 0:\n var_noise[index] = point[2]\n\n data['points'] = points\n data['evaluations'] = evaluations\n data['var_noise'] = var_noise\n\n return data",
"def _to_ndarray(data):\n return np.atleast_1d(getattr(data, 'values', data))",
"def jsonify(data):\n\n for key in data:\n if type(data[key]) == numpy.ndarray:\n data[key] = data[key].tolist()\n\n if isinstance(data[key], list):\n data[key] = [0 if isinstance(x, float) and math.isnan(x) else x for x in data[key]]\n\n return data",
"def json_numpy_obj_hook(dct):\n if isinstance(dct, dict) and '__ndarray__' in dct:\n data = base64.b64decode(dct['__ndarray__'])\n return np.frombuffer(data, dct['dtype']).reshape(dct['shape'])\n return dct",
"def json_numpy_obj_hook(dct):\n if isinstance(dct, dict) and '__ndarray__' in dct:\n data = base64.b64decode(dct['__ndarray__'])\n return np.frombuffer(data, dct['dtype']).reshape(dct['shape'])\n return dct",
"def json_numpy_obj_hook(dct):\n if isinstance(dct, dict) and '__ndarray__' in dct:\n data = base64.b64decode(dct['__ndarray__'])\n return np.frombuffer(data, dct['dtype']).reshape(dct['shape'])\n return dct",
"def to_numpy(x: torch.Tensor) -> np.ndarray:\n if isinstance(x, dict):\n r = {}\n for k, v in x.items():\n if isinstance(v, torch.Tensor):\n if v.device.type == 'cuda':\n r.update({k: v.detach().cpu().numpy()})\n else:\n r.update({k: v.detach().numpy()})\n else:\n r.update({k: v})\n return r\n else:\n if x.device.type == 'cuda':\n return x.detach().cpu().numpy()\n else:\n return x.detach().numpy()",
"def convert_dict_to_arr(features: {}, labels: {}) -> ([], []):\n\n features_arr = []\n labels_arr = []\n\n for id, featuresList in features.items():\n\n labels_arr.append([labels.get(str(id))[0], labels.get(str(id))[1], id])\n\n # Elementary features\n v = featuresList[\"volume\"]\n a = featuresList[\"area\"]\n c = featuresList[\"compactness\"]\n bb = featuresList[\"bbox_volume\"]\n d = featuresList[\"diameter\"]\n e = featuresList[\"eccentricity\"]\n elem_features = [v, a, c, bb, d, e]\n \n # Global features\n a3, d1, d2, d3, d4 = [], [], [], [], []\n for x in featuresList[\"A3\"][0]:\n a3.append(x)\n for x in featuresList[\"D1\"][0]:\n d1.append(x)\n for x in featuresList[\"D2\"][0]:\n d2.append(x)\n for x in featuresList[\"D3\"][0]:\n d3.append(x)\n for x in featuresList[\"D4\"][0]:\n d4.append(x)\n glob_features = np.concatenate((a3, d1, d2, d3, d4))\n features_arr.append(np.concatenate((elem_features, glob_features)))\n\n np.savetxt(s.SAVED_DATA + 'features_arr.txt', np.asarray(features_arr), delimiter=',')\n\n return np.asarray(features_arr), np.asarray(labels_arr)",
"def _json_numpy_obj_hook(dct):\n if isinstance(dct, dict) and '__ndarray__' in dct:\n data = base64.b64decode(dct['__ndarray__'])\n return np.frombuffer(data, dct['dtype']).reshape(dct['shape'])\n return dct",
"def make_ndarray(data: list, convert=False):\n data_height = data[0].shape[0]\n data_width = data[0].shape[1]\n if len(data[0].shape) == 3:\n data_channels = data[0].shape[2]\n nd_data = np.zeros((len(data), data_height, data_width, data_channels), dtype=np.float32)\n\n else:\n nd_data = np.zeros((len(data), data_height, data_width), dtype=np.float32)\n\n if convert:\n for _ in range(len(data)):\n nd_data[_] = tf.keras.layers.Lambda(lambda x: x / 255)(data[_])\n\n else:\n for _ in range(len(data)):\n nd_data[_] = data[_]\n\n return nd_data",
"def dict_to_array(dic):\n # From dictionary of lists\n if type(list(dic.items())[0][1]) == list:\n list_items = list(dic.items())\n col_names = []\n col_data = []\n for i in range(0, len(list_items)):\n col_names.append(list_items[i][0])\n col_data.append(list_items[i][1])\n return col_data, col_names\n\n # From dictionary of numpy arrays\n elif type(list(dic.items())[0][1]) is np.ndarray:\n list_items = list(dic.items())\n col_names = []\n col_data = []\n for i in range(0, len(list_items)):\n col_names.append(list_items[i][0])\n col_data.append(list_items[i][1].tolist())\n return col_data, col_names",
"def to_np_arr_and_then_mean(list_of_lists):\n # print(list_of_lists)\n np_arr = np.array(list_of_lists)\n return np_arr.mean(axis=0)",
"def json_numpy_obj_hook(dct):\n if isinstance(dct, dict) and '__ndarray__' in dct:\n todecode = dct['__ndarray__'].encode(\"ascii\")\n data = base64.b64decode(todecode)\n return np.frombuffer(data, dct['dtype']).reshape(dct['shape'])\n return dct",
"def serialize_ndarrays(d):\n def dict_handler(d):\n return d.items()\n\n handlers = {list: enumerate, tuple: enumerate,\n set: enumerate, frozenset: enumerate,\n dict: dict_handler}\n\n def serialize(o):\n for typ, handler in handlers.items():\n if isinstance(o, typ):\n for key, val in handler(o):\n if isinstance(val, np.ndarray):\n o[key] = val.tolist()\n else:\n o[key] = serialize_ndarrays(o[key])\n return o\n\n return serialize(d)",
"def cvt2array(tuples):\n rc = []\n for t in tuples:\n rc.append(point3d(np.float32(t[X]), np.float32(t[Y]), np.float32(t[Z])))\n return rc",
"def dict_to_array(dict_array):\n plottable_array = []\n for k in dict_array:\n for i in range(len(dict_array[k])):\n plottable_array.append(dict_array[k][i])\n\n return np.array(plottable_array)",
"def to_np_arr_and_then_mean(list_of_lists):\n np_arr = np.array(list_of_lists)\n return np_arr.mean(axis=0)",
"def flatten_features_array(features: {}) -> []:\n flattened = []\n flattened.append(features[\"volume\"])\n flattened.append(features[\"area\"])\n flattened.append(features[\"compactness\"])\n flattened.append(features[\"bbox_volume\"])\n flattened.append(features[\"diameter\"])\n flattened.append(features[\"eccentricity\"])\n for i in features[\"A3\"][0]:\n flattened.append(i)\n for i in features[\"D1\"][0]:\n flattened.append(i)\n for i in features[\"D2\"][0]:\n flattened.append(i)\n for i in features[\"D3\"][0]:\n flattened.append(i)\n for i in features[\"D4\"][0]:\n flattened.append(i)\n\n return flattened",
"def _dict2arr(self, key):\r\n # Prepare the matrix for the output:\r\n arr = np.empty((self._n_process,\r\n self._n_process,\r\n self.frequencies.shape[0]))\r\n\r\n arr.fill(np.nan)\r\n\r\n # 'Translate' from dict form into matrix form:\r\n for i, j in self.ij:\r\n arr[j, i, :] = self._granger_causality[key][i, j]\r\n return arr",
"def nanify_dict_of_lists(dict_):\n return {k: [float('nan')]*len(v) for k, v in dict_.items()}",
"def to_ndarray(sample_list: List[CIFARSample], normalize: bool=False, flatten: bool=False):\n x = np.asarray([s.image for s in sample_list])\n if normalize:\n x = x.astype(np.float32) / 255. - 0.5 # zero centering\n if flatten:\n x = x.reshape(-1, 32 * 32 * 3)\n y = np.asarray([s.label for s in sample_list])\n return x, y",
"def sur_dict2mat(dicts):\n n_dicts = len(dicts.keys())\n mat = np.vstack((dicts[t] for t in range(n_dicts)))\n return(mat)",
"def dict(self) -> Dict[str, List[NumericType]]:\n return {r.name: r.floats() for r in self._records}",
"def scalarise(dct):\n d = dct.copy()\n for subkey in [DEP, INDEP]:\n for k, v in d[subkey].items():\n if isinstance(v, np.ndarray) and np.size(v) == 1:\n dct[subkey][k] = v.item()",
"def _get_list_from_dict(d, l):\n\n new_list = []\n\n for val in l:\n subdict = d[val]\n inner_list = []\n for subval in l:\n inner_list.append(subdict[subval])\n new_list.append(inner_list)\n\n return np.array(new_list)",
"def dict_to_vector(self, dictionary):\n vector = []\n for key in dictionary:\n vector = np.concatenate((vector,dictionary[f'{key}'].flatten()))\n return vector"
] | [
"0.6651815",
"0.65400195",
"0.61362433",
"0.6098978",
"0.60989004",
"0.60879093",
"0.60031056",
"0.59976625",
"0.59976625",
"0.59976625",
"0.59841895",
"0.59794885",
"0.5843597",
"0.5822316",
"0.5761593",
"0.57518655",
"0.57207453",
"0.57119364",
"0.571017",
"0.570412",
"0.5686313",
"0.5645657",
"0.5641301",
"0.5631575",
"0.5602915",
"0.55933464",
"0.5568045",
"0.55630785",
"0.5559473",
"0.55090874"
] | 0.70575464 | 0 |
Assembles a list of circuits into a qobj which can be run on the backend. | def assemble_circuits(circuits, run_config=None, qobj_header=None, qobj_id=None):
qobj_header = qobj_header or QobjHeader()
run_config = run_config or RunConfig()
if isinstance(circuits, QuantumCircuit):
circuits = [circuits]
userconfig = QobjConfig(**run_config.to_dict())
experiments = []
max_n_qubits = 0
max_memory_slots = 0
for circuit in circuits:
# header stuff
n_qubits = 0
memory_slots = 0
qubit_labels = []
clbit_labels = []
qreg_sizes = []
creg_sizes = []
for qreg in circuit.qregs:
qreg_sizes.append([qreg.name, qreg.size])
for j in range(qreg.size):
qubit_labels.append([qreg.name, j])
n_qubits += qreg.size
for creg in circuit.cregs:
creg_sizes.append([creg.name, creg.size])
for j in range(creg.size):
clbit_labels.append([creg.name, j])
memory_slots += creg.size
# TODO: why do we need creq_sizes and qreg_sizes in header
# TODO: we need to rethink memory_slots as they are tied to classical bit
experimentheader = QobjExperimentHeader(qubit_labels=qubit_labels,
n_qubits=n_qubits,
qreg_sizes=qreg_sizes,
clbit_labels=clbit_labels,
memory_slots=memory_slots,
creg_sizes=creg_sizes,
name=circuit.name)
# TODO: why do we need n_qubits and memory_slots in both the header and the config
experimentconfig = QobjExperimentConfig(n_qubits=n_qubits, memory_slots=memory_slots)
instructions = []
for opt in circuit.data:
current_instruction = QobjInstruction(name=opt.name)
if opt.qargs:
qubit_indices = [qubit_labels.index([qubit[0].name, qubit[1]])
for qubit in opt.qargs]
current_instruction.qubits = qubit_indices
if opt.cargs:
clbit_indices = [clbit_labels.index([clbit[0].name, clbit[1]])
for clbit in opt.cargs]
current_instruction.memory = clbit_indices
if opt.params:
params = list(map(lambda x: x.evalf(), opt.params))
params = [sympy.matrix2numpy(x, dtype=complex)
if isinstance(x, sympy.Matrix) else x for x in params]
if len(params) == 1 and isinstance(params[0], numpy.ndarray):
# TODO: Aer expects list of rows for unitary instruction params;
# change to matrix in Aer.
params = params[0]
current_instruction.params = params
# TODO (jay): I really dont like this for snapshot. I also think we should change
# type to snap_type
if opt.name == "snapshot":
current_instruction.label = str(opt.params[0])
current_instruction.type = str(opt.params[1])
if opt.control:
mask = 0
for clbit in clbit_labels:
if clbit[0] == opt.control[0].name:
mask |= (1 << clbit_labels.index(clbit))
current_instruction.conditional = QobjConditional(mask="0x%X" % mask,
type='equals',
val="0x%X" % opt.control[1])
instructions.append(current_instruction)
experiments.append(QobjExperiment(instructions=instructions, header=experimentheader,
config=experimentconfig))
if n_qubits > max_n_qubits:
max_n_qubits = n_qubits
if memory_slots > max_memory_slots:
max_memory_slots = memory_slots
userconfig.memory_slots = max_memory_slots
userconfig.n_qubits = max_n_qubits
return Qobj(qobj_id=qobj_id or str(uuid.uuid4()), config=userconfig,
experiments=experiments, header=qobj_header,
type=QobjType.QASM.value) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def assemble_circuits(circuits, qobj_id=None, qobj_header=None, run_config=None):\n qobj_config = QasmQobjConfig()\n if run_config:\n qobj_config = QasmQobjConfig(**run_config.to_dict())\n\n # Pack everything into the Qobj\n experiments = []\n max_n_qubits = 0\n max_memory_slots = 0\n for circuit in circuits:\n # header stuff\n n_qubits = 0\n memory_slots = 0\n qubit_labels = []\n clbit_labels = []\n\n qreg_sizes = []\n creg_sizes = []\n for qreg in circuit.qregs:\n qreg_sizes.append([qreg.name, qreg.size])\n for j in range(qreg.size):\n qubit_labels.append([qreg.name, j])\n n_qubits += qreg.size\n for creg in circuit.cregs:\n creg_sizes.append([creg.name, creg.size])\n for j in range(creg.size):\n clbit_labels.append([creg.name, j])\n memory_slots += creg.size\n\n # TODO: why do we need creq_sizes and qreg_sizes in header\n # TODO: we need to rethink memory_slots as they are tied to classical bit\n experimentheader = QobjExperimentHeader(qubit_labels=qubit_labels,\n n_qubits=n_qubits,\n qreg_sizes=qreg_sizes,\n clbit_labels=clbit_labels,\n memory_slots=memory_slots,\n creg_sizes=creg_sizes,\n name=circuit.name)\n # TODO: why do we need n_qubits and memory_slots in both the header and the config\n experimentconfig = QasmQobjExperimentConfig(n_qubits=n_qubits, memory_slots=memory_slots)\n\n # Convert conditionals from QASM-style (creg ?= int) to qobj-style\n # (register_bit ?= 1), by assuming device has unlimited register slots\n # (supported only for simulators). Map all measures to a register matching\n # their clbit_index, create a new register slot for every conditional gate\n # and add a bfunc to map the creg=val mask onto the gating register bit.\n\n is_conditional_experiment = any(op.control for (op, qargs, cargs) in circuit.data)\n max_conditional_idx = 0\n\n instructions = []\n for op_context in circuit.data:\n instruction = op_context[0].assemble()\n\n # Add register attributes to the instruction\n qargs = op_context[1]\n cargs = op_context[2]\n if qargs:\n qubit_indices = [qubit_labels.index([qubit[0].name, qubit[1]])\n for qubit in qargs]\n instruction.qubits = qubit_indices\n if cargs:\n clbit_indices = [clbit_labels.index([clbit[0].name, clbit[1]])\n for clbit in cargs]\n instruction.memory = clbit_indices\n # If the experiment has conditional instructions, assume every\n # measurement result may be needed for a conditional gate.\n if instruction.name == \"measure\" and is_conditional_experiment:\n instruction.register = clbit_indices\n\n # To convert to a qobj-style conditional, insert a bfunc prior\n # to the conditional instruction to map the creg ?= val condition\n # onto a gating register bit.\n if hasattr(instruction, '_control'):\n ctrl_reg, ctrl_val = instruction._control\n mask = 0\n val = 0\n for clbit in clbit_labels:\n if clbit[0] == ctrl_reg.name:\n mask |= (1 << clbit_labels.index(clbit))\n val |= (((ctrl_val >> clbit[1]) & 1) << clbit_labels.index(clbit))\n\n conditional_reg_idx = memory_slots + max_conditional_idx\n conversion_bfunc = QasmQobjInstruction(name='bfunc',\n mask=\"0x%X\" % mask,\n relation='==',\n val=\"0x%X\" % val,\n register=conditional_reg_idx)\n instructions.append(conversion_bfunc)\n instruction.conditional = conditional_reg_idx\n max_conditional_idx += 1\n # Delete control attribute now that we have replaced it with\n # the conditional and bfuc\n del instruction._control\n\n instructions.append(instruction)\n\n experiments.append(QasmQobjExperiment(instructions=instructions, header=experimentheader,\n config=experimentconfig))\n if n_qubits > max_n_qubits:\n max_n_qubits = n_qubits\n if memory_slots > max_memory_slots:\n max_memory_slots = memory_slots\n\n qobj_config.memory_slots = max_memory_slots\n qobj_config.n_qubits = max_n_qubits\n\n return QasmQobj(qobj_id=qobj_id,\n config=qobj_config,\n experiments=experiments,\n header=qobj_header)",
"def qobj_to_circuits(qobj):\n if qobj.experiments:\n circuits = []\n for x in qobj.experiments:\n if hasattr(x.header, 'compiled_circuit_qasm'):\n circuits.append(\n load_qasm_string(x.header.compiled_circuit_qasm))\n return circuits\n # TODO(mtreinish): add support for converting a qobj if the qasm isn't\n # embedded in the header\n return None",
"def execute(circuits, backend,\n config=None, basis_gates=None, coupling_map=None, initial_layout=None,\n shots=1024, max_credits=10, seed=None, qobj_id=None, hpc=None,\n skip_transpiler=False):\n # pylint: disable=missing-param-doc, missing-type-doc\n if isinstance(backend, str):\n backend = _DEFAULT_PROVIDER.get_backend(backend)\n qobj = compile(circuits, backend,\n config, basis_gates, coupling_map, initial_layout,\n shots, max_credits, seed, qobj_id, hpc,\n skip_transpiler)\n return backend.run(qobj)",
"def from_cirq(cls, circuit:cirq.Circuit):\n qubits = quple.get_circuit_qubits(circuit)\n symbols = quple.get_circuit_symbols(circuit)\n cq = cls(qubits)\n cq.append(circuit)\n return cq",
"def circuit_list(self):\r\n return self.circuits.itervalues()",
"def _create_quantum_circuit(self):\n reg_list = []\n for entry in self.regdefs:\n is_qreg = self._match_entry_type(entry, [ASTType.QREG])\n\n if is_qreg:\n reg_list.append(QuantumRegister(entry.get('qreg_num'), entry.get('qreg_name')))\n else:\n reg_list.append(ClassicalRegister(entry.get('creg_num'), entry.get('creg_name')))\n\n self.circuit = QuantumCircuit(*reg_list)\n return self.circuit",
"def circuits(self, backend: Optional[Backend] = None) -> List[QuantumCircuit]:\n schedule = self.experiment_options.get(\"schedule\", None)\n\n if schedule is None:\n schedule = self._default_gate_schedule(backend=backend)\n else:\n if self.physical_qubits[0] not in set(ch.index for ch in schedule.channels):\n raise CalibrationError(\n f\"User provided schedule {schedule.name} does not contain a channel \"\n \"for the qubit on which to run Rabi.\"\n )\n\n if len(schedule.parameters) != 1:\n raise CalibrationError(\"Schedule in Rabi must have exactly one free parameter.\")\n\n param = next(iter(schedule.parameters))\n\n # Create template circuit\n circuit = self._template_circuit(param)\n circuit.add_calibration(\n self.__rabi_gate_name__, (self.physical_qubits[0],), schedule, params=[param]\n )\n\n # Create the circuits to run\n circs = []\n for amp in self.experiment_options.amplitudes:\n amp = np.round(amp, decimals=6)\n assigned_circ = circuit.assign_parameters({param: amp}, inplace=False)\n assigned_circ.metadata = {\n \"experiment_type\": self._type,\n \"qubits\": (self.physical_qubits[0],),\n \"xval\": amp,\n \"unit\": \"arb. unit\",\n \"amplitude\": amp,\n \"schedule\": str(schedule),\n }\n\n if backend:\n assigned_circ.metadata[\"dt\"] = getattr(backend.configuration(), \"dt\", \"n.a.\")\n\n circs.append(assigned_circ)\n\n return circs",
"def get_circuits(self) -> List[Circuit]:\n return [Circuit(c, self, self.easee) for c in self[\"circuits\"]]",
"def circuits(self) -> List[QuantumCircuit]:\n circ0 = QuantumCircuit(1, 1)\n circ0.measure(0, 0)\n\n circ1 = QuantumCircuit(1, 1)\n circ1.x(0)\n circ1.measure(0, 0)\n\n for i, circ in enumerate([circ0, circ1]):\n circ.metadata = {\n \"experiment_type\": self._type,\n \"qubit\": self.physical_qubits[0],\n \"xval\": i,\n }\n\n return [circ0, circ1]",
"def pre_defined_circuit(env: 'QEnv', q: List['QRegStorage'], gate_list: List[operatorType]) -> 'QEnv':\n if gate_list:\n for gate in gate_list:\n if gate.bits == 1:\n gate(q[0])\n elif gate.bits == 2:\n gate(q[0], q[1])\n return env",
"def run(self, circuit, **kwargs): \n # If the circuit was created using qiskit.assemble,\n # disassemble into QASM here\n if isinstance(circuit, QasmQobj) or isinstance(circuit, Qobj):\n from qiskit.assembler import disassemble\n circuits, run, _ = disassemble(circuit)\n circuit = circuits[0]\n if kwargs.get(\"shots\") is None:\n # Note that the default number of shots for QObj is 1024\n # unless the user specifies the backend.\n kwargs[\"shots\"] = run[\"shots\"]\n\n ionq_circ, _, meas_map = qiskit_circ_to_ionq_circ(circuit)\n input_data = json.dumps({\n \"qubits\": circuit.num_qubits,\n \"circuit\": ionq_circ,\n })\n\n # Options are mapped to input_params\n # Take also into consideration options passed in the kwargs, as the take precedence\n # over default values:\n input_params = vars(self.options)\n for opt in kwargs.copy():\n if opt in input_params:\n input_params[opt] = kwargs.pop(opt)\n\n logger.info(f\"Submitting new job for backend {self.name()}\")\n job = AzureQuantumJob(\n backend=self,\n name=circuit.name,\n target=self.name(),\n input_data=input_data,\n blob_name=\"inputData\",\n content_type=\"application/json\",\n provider_id=\"ionq\",\n input_data_format=\"ionq.circuit.v1\",\n output_data_format=\"ionq.quantum-results.v1\",\n input_params = input_params,\n metadata= self._job_metadata(circuit=circuit, meas_map=meas_map),\n **kwargs\n )\n\n logger.info(f\"Submitted job with id '{job.id()}' for circuit '{circuit.name}':\")\n logger.info(input_data)\n\n return job",
"def create_qaoa_circuit(graph, params):\n num_of_iterations = int(len(params)/2)\n gammas = params[:num_of_iterations] # Let the first half of the params list be gamma parameters\n betas = params[num_of_iterations:] # Let the second half of the params list be beta parameters\n\n # Initialize Circuit\n qr = QuantumRegister(len(graph.nodes))\n cr = ClassicalRegister(len(graph.nodes))\n circuit = QuantumCircuit(qr, cr)\n\n # Put all qubits in superposition with hadamard gates\n circuit.h(qr)\n for iteration in range(num_of_iterations):\n # Get Cost and Mixer Unitaries\n cost_unitary = create_cost_unitary(graph, gammas[iteration])\n mixer_unitary = create_mixer_unitary(graph, betas[iteration])\n circuit.append(cost_unitary, qr)\n circuit.append(mixer_unitary, qr)\n\n circuit.measure(qr, cr)\n\n return circuit",
"def get_circuits(self):\n circuits = {}\n for circuit in self.load_circuits():\n circuits[circuit.id] = circuit.as_dict()\n\n return jsonify({'circuits': circuits}), 200",
"def assemble_circuit(self, circuit: Circuit):\n\n if self.depth is None:\n return circuit.invert()\n gates_qubits_pairs = find_gates_qubits_pairs(circuit)\n circuit_gates = len(gates_qubits_pairs)\n if circuit_gates == 0:\n raise ValueError(\"The circuit must contain at least a two qubit gate.\")\n repetitions, remainder = divmod(self.depth, circuit_gates)\n assembled_gates_qubits_pairs = []\n for _ in range(repetitions):\n assembled_gates_qubits_pairs += gates_qubits_pairs[:]\n gates_qubits_pairs.reverse()\n assembled_gates_qubits_pairs += gates_qubits_pairs[0:remainder]\n new_circuit = Circuit(circuit.nqubits)\n for qubits in assembled_gates_qubits_pairs:\n # As only the connectivity is important here we can replace everything with CZ gates\n new_circuit.add(gates.CZ(qubits[0], qubits[1]))\n return new_circuit.invert()",
"def test_create_several_circuits_noname(self):\n q_program = QuantumProgram()\n qr1 = q_program.create_quantum_register(size=3)\n cr1 = q_program.create_classical_register(size=3)\n qr2 = q_program.create_quantum_register(size=3)\n cr2 = q_program.create_classical_register(size=3)\n qc1 = q_program.create_circuit(qregisters=[qr1], cregisters=[cr1])\n qc2 = q_program.create_circuit(qregisters=[qr2], cregisters=[cr2])\n qc3 = q_program.create_circuit(qregisters=[qr1, qr2], cregisters=[cr1, cr2])\n self.assertIsInstance(qc1, QuantumCircuit)\n self.assertIsInstance(qc2, QuantumCircuit)\n self.assertIsInstance(qc3, QuantumCircuit)",
"def __init__(self, result, circuits, measured_qubits):\n self._circuits = circuits\n self._result = result\n self._qubit_list = sorted(measured_qubits)\n\n self._meas_basis = None\n self._prep_basis = None\n super().set_measure_basis(\"Pauli\")\n super().set_preparation_basis(\"Pauli\")\n self._data = {}",
"def circuits(self) -> List[QuantumCircuit]:\n # Convert interleaved element to transpiled circuit operation and store it for speed\n self.__set_up_interleaved_op()\n\n # Build circuits of reference sequences\n reference_sequences = self._sample_sequences()\n reference_circuits = self._sequences_to_circuits(reference_sequences)\n for circ, seq in zip(reference_circuits, reference_sequences):\n circ.metadata = {\n \"xval\": len(seq),\n \"group\": \"Clifford\",\n \"physical_qubits\": self.physical_qubits,\n \"interleaved\": False,\n }\n # Build circuits of interleaved sequences\n interleaved_sequences = []\n for seq in reference_sequences:\n new_seq = []\n for elem in seq:\n new_seq.append(elem)\n new_seq.append(self._interleaved_cliff)\n interleaved_sequences.append(new_seq)\n interleaved_circuits = self._sequences_to_circuits(interleaved_sequences)\n for circ, seq in zip(interleaved_circuits, reference_sequences):\n circ.metadata = {\n \"xval\": len(seq), # set length of the reference sequence\n \"group\": \"Clifford\",\n \"physical_qubits\": self.physical_qubits,\n \"interleaved\": True,\n }\n\n if self.experiment_options.circuit_order == \"RRRIII\":\n return reference_circuits + interleaved_circuits\n # Default order: RIRIRI\n return list(itertools.chain.from_iterable(zip(reference_circuits, interleaved_circuits)))",
"def construct_qcbm(circuit, n_qubits, depth):\n\n for d in range(depth):\n for i in range(n_qubits):\n circuit.append_gate(Gate('X', target = i, angle = np.random.random()*np.pi*2))\n circuit.append_gate(Gate('Z', target = i, angle = np.random.random()*np.pi*2))\n if n_qubits != 1:\n for i in range(n_qubits):\n circuit.append_gate(Gate('CNOT', control = i, target = (i+1)%n_qubits))\n return circuit",
"def get_large_circuit(backend: IBMQBackend) -> QuantumCircuit:\n n_qubits = min(backend.configuration().n_qubits, 20)\n circuit = QuantumCircuit(n_qubits, n_qubits)\n for n in range(n_qubits-1):\n circuit.h(n)\n circuit.cx(n, n+1)\n circuit.measure(list(range(n_qubits)), list(range(n_qubits)))\n\n return circuit",
"def compile(circuits, backend,\n config=None, basis_gates=None, coupling_map=None, initial_layout=None,\n shots=1024, max_credits=10, seed=None, qobj_id=None, hpc=None,\n skip_transpiler=False):\n # pylint: disable=redefined-builtin\n if isinstance(backend, str):\n backend = _DEFAULT_PROVIDER.get_backend(backend)\n\n pass_manager = None # default pass manager which executes predetermined passes\n if skip_transpiler: # empty pass manager which does nothing\n pass_manager = transpiler.PassManager()\n\n return transpiler.compile(circuits, backend,\n config, basis_gates, coupling_map, initial_layout,\n shots, max_credits, seed, qobj_id, hpc,\n pass_manager)",
"def qlm_to_qiskit(qlm_circuit, qubits=None):\n # Init measured qubits\n if qubits is None:\n qubits = list(range(qlm_circuit.nbqbits))\n\n qreg = QuantumRegister(qlm_circuit.nbqbits)\n creg = None\n param_list = []\n if qlm_circuit.nbcbits > 0:\n creg = ClassicalRegister(max(qlm_circuit.nbcbits, len(qubits)))\n q_circ = QuantumCircuit(qreg, creg)\n else:\n q_circ = QuantumCircuit(qreg)\n dic = _gen_qiskit_gateset(q_circ)\n for gate_op in qlm_circuit:\n if gate_op.type == OpType.GATETYPE:\n name, params = extract_syntax(\n qlm_circuit.gateDic[gate_op.gate], qlm_circuit.gateDic,\n var_dic=qlm_circuit.var_dic)\n nbctrls = name.count('C-')\n # changes variables and expressions to format used by Qiskit\n for index, param in enumerate(params):\n if isinstance(param, Variable):\n params[index] = _variable_to_parameter(\n param_list, variable=param)\n elif isinstance(param, ArithExpression):\n arith_expr_list = param.to_thrift().split()\n params[index] = _arith_expr_list_to_parameter_expression(\n param_list, arith_expr_list, param)\n try:\n if name == \"MS\":\n q_circ.ms(params[0], [qreg[i] for i in gate_op.qbits])\n else:\n if (nbctrls > 0 and name not in SUPPORTED_CTRLS):\n tmp = name\n count = 0\n gate = None\n while True:\n last = tmp\n tmp = tmp.replace(\"C-\", \"\", 1)\n if last == tmp:\n raise ValueError(\n \"Gate {} not supported by Qiskit API\".format(name)\n )\n else:\n count += 1\n gate = _get_qiskit_gate_from_name(tmp)\n if gate != None:\n gate = gate(*params).control(count)\n break\n if gate != None:\n q_circ.append(gate, [qreg[i] for i in gate_op.qbits])\n else:\n dic[name](* params + [qreg[i] for i in gate_op.qbits])\n except KeyError:\n raise ValueError(\n \"Gate {} not supported by Qiskit API\".format(name)\n )\n elif gate_op.type == OpType.MEASURE:\n for index in range(len(gate_op.qbits)):\n q_circ.measure(gate_op.qbits[index], gate_op.cbits[index])\n\n # Adding measures to unify the interface\n for qbit_index, cbit in zip(qubits, creg):\n q_circ.measure(qreg[qbit_index], cbit)\n return q_circ",
"def unitary_builder(qubit_register, circuit): \n \n no_of_qubits = math.log(next(x for x in qubit_register.shape if x != 1), 2)\n qubit_ordering = []\n operations_in_slice = []\n operation_list = None\n for slice in circuit:\n for step in slice[\"operations\"]:\n qubit_ordering.extend(step[1])\n operations_in_slice.extend([step[0]])\n identity_operation_count = int(no_of_qubits - len(qubit_ordering))\n operations_in_slice.extend([qeye(2)] * identity_operation_count)\n qubit_ordering.extend([x for x in range(int(no_of_qubits)) if x not in qubit_ordering])\n operation_slice = tensor(operations_in_slice).permute(qubit_ordering)\n if operation_list is None:\n operation_list = [operation_slice]\n else:\n operation_list.extend([operation_slice])\n qubit_ordering = []\n operations_in_slice = [] \n \n circuit_unitary = reduce((lambda x, y: x * y), operation_list)\n \n return circuit_unitary",
"def _large_circuit():\n qr = QuantumRegister(9, name='qr')\n cr = ClassicalRegister(9, name='cr')\n circuit = QuantumCircuit(qr, cr)\n\n for i in range(3):\n zero = 3 * i\n first = 3 * i + 1\n second = 3 * i + 2\n\n circuit.x(qr[zero])\n circuit.y(qr[first])\n circuit.z(qr[second])\n\n circuit.h(qr[zero])\n circuit.s(qr[first])\n circuit.sdg(qr[second])\n\n circuit.t(qr[zero])\n circuit.tdg(qr[first])\n circuit.iden(qr[second])\n\n circuit.reset(qr[zero])\n circuit.reset(qr[first])\n circuit.reset(qr[second])\n\n circuit.rx(pi / 8, qr[zero])\n circuit.ry(pi / 8, qr[first])\n circuit.rz(pi / 8, qr[second])\n\n circuit.u1(pi / 8, qr[zero])\n circuit.u2(pi / 8, pi / 8, qr[first])\n circuit.u3(pi / 8, pi / 8, pi / 8, qr[second])\n\n circuit.swap(qr[zero], qr[first])\n\n circuit.cx(qr[zero], qr[first])\n circuit.cy(qr[first], qr[second])\n circuit.cz(qr[second], qr[zero])\n circuit.ch(qr[zero], qr[first])\n\n circuit.cu1(pi / 8, qr[zero], qr[first])\n circuit.cu3(pi / 8, pi / 8, pi / 8, qr[first], qr[second])\n\n circuit.barrier(qr)\n\n circuit.measure(qr, cr)\n\n return circuit",
"def convert_to_circuit(x):\n y = np.arcsin(x)\n z = np.arccos(x**2)\n qubits = cirq.GridQubit.rect(5, 1)\n circuit = cirq.Circuit()\n for i in range(5):\n circuit.append(cirq.ry(y).on(qubits[i]))\n circuit.append(cirq.rz(z).on(qubits[i]))\n return circuit",
"def _make_circuit_instructions(n_qubits, depth, type_circuit):\n\n if type_circuit in [0, 1, 2]:\n\n # if type_circuit == 1:\n # if depth > 8:\n # raise ValueError(\n # \"For type-1 circuits, only at most depth=8 allowed!\"\n # )\n\n # define rotations for circuit in each layer, 0: identity, 1:X, 2:Y 3:Z\n ini_pauli = np.zeros([depth, n_qubits], dtype=int)\n\n # set first and second layer, rest comes later\n ini_pauli[0, :] = 2 # y rotation\n if depth > 1:\n ini_pauli[1, :] = 3 # z rotation\n\n # construct natural parameterized circuit\n # gives which type of entangling gates at each layer -- first entry is\n # first qubit index, second is second qubit index, third entry is type\n # of entangling gate\n entangling_gate_index_list = [[] for i in range(depth)]\n orderList = []\n for i in range(n_qubits//2):\n if i % 2 == 0:\n orderList.append(i//2)\n else:\n orderList.append((n_qubits-i)//2)\n\n if n_qubits > 1:\n shiftList = [orderList[0]]\n else:\n shiftList = []\n for i in range(1, n_qubits//2):\n shiftList.append(orderList[i])\n shiftList += shiftList[:-1]\n\n # this list gives which entangling gates are applied in which layer\n if type_circuit == 0:\n # deep natural PQC, includes non-nearest neighbor gates\n for j in range(min(len(shiftList), int(np.ceil(depth/2))-1)):\n entangling_gate_index_list[1+2*j] = [\n [2*i, (2*i+1+2*shiftList[j]) % n_qubits, 0]\n for i in range(n_qubits//2)\n ]\n elif type_circuit == 1:\n # only do 2 entangling layers at max, and only do gates with\n # nearest neighbor and no ring\n for j in range(min(len(shiftList), 3)):\n if j == 0:\n entangling_gate_index_list[1+2*j] = [\n [2*i, (2*i+1+2*shiftList[j]) % n_qubits, 0]\n for i in range(n_qubits//2)\n ]\n elif (j == 1 or j == 2):\n # exclude ring gate and gate 0,1 on third entangling layer\n entangling_gate_index_list[1+2*j] = [\n [2*i, (2*i+1+2*shiftList[j]) % n_qubits, 0]\n for i in range(1, n_qubits//2)\n ]\n\n elif type_circuit == 2:\n # only do 3 regular entangling layers in a ring topology, then two\n # more phase gates with next-nearst neighbor, which requires one\n # swap. This adds 4 more parameters\n for j in range(min(len(shiftList), 3)):\n entangling_gate_index_list[1+2*j] = [\n [2*i, (2*i+1+2*shiftList[j]) % n_qubits, 0]\n for i in range(n_qubits//2)\n ]\n # entangling_gate_index_list[1+2*3]=[[0,n_qubits-1,1],[0,1,0],[n_qubits-1,n_qubits-2,0]]\n # entangling_gate_index_list[1+2*3]=[[0,n_qubits-1,1],[0,1,0],[n_qubits-1,n_qubits-2,0]]\n entangling_gate_index_list[1+2*3] = [\n [n_qubits-1, 1, 0],\n [0, n_qubits-2, 0]\n ]\n\n for i in range(len(entangling_gate_index_list)-1):\n if len(entangling_gate_index_list[i]) > 0:\n for j in range(len(entangling_gate_index_list[i])):\n qubit_index = entangling_gate_index_list[i][j][0]\n ini_pauli[i+1, qubit_index] = 2\n if i+2 < depth:\n ini_pauli[i+2, qubit_index] = 3\n\n elif type_circuit == 3:\n\n ini_pauli = np.ones([depth, n_qubits], dtype=int)*2\n\n for i in range(1, depth, 2):\n ini_pauli[i, :] = 3\n\n if n_qubits % 2 == 0:\n # even qubits ALT circuit needs to get rid of boundary rotations at\n # even entangling layers\n for i in range(4, depth, 4):\n ini_pauli[i, 0] = 0\n ini_pauli[i, -1] = 0\n if i+1 < depth:\n ini_pauli[i+1, 0] = 0\n ini_pauli[i+1, -1] = 0\n else:\n # for odd qubits, get rid of boundary either on top or bottom qubit\n for i in range(2, depth, 4):\n ini_pauli[i, -1] = 0\n if i+1 < depth:\n ini_pauli[i+1, -1] = 0\n for i in range(4, depth, 4):\n ini_pauli[i, 0] = 0\n if i+1 < depth:\n ini_pauli[i+1, 0] = 0\n\n # CNOT entangling gates\n entangling_gate_index_list = [[] for i in range(depth)]\n counter = 0\n # third index indicates type of entangling gate\n for k in range(1, depth-1, 2):\n\n # place entangler every second layer, do not place any at last\n if counter % 2 == 0:\n # even layer\n entangling_gate_index_list[k] = [\n [2*j, 2*j+1, 1] for j in range(n_qubits//2)\n ]\n else:\n # odd layer\n entangling_gate_index_list[k] = [\n [2*j+1, 2*j+2, 1] for j in range((n_qubits-1)//2)\n ]\n counter += 1\n\n else:\n raise ValueError('type_circuit='+f'{type_circuit}'+' not recognised.')\n\n return ini_pauli, entangling_gate_index_list",
"def test_constructor(self, circuit):\n assert list(circuit.wires) == [jet.Wire(i, 0, False) for i in range(4)]\n assert list(circuit.operations) == [jet.Operation(jet.Qubit(), [i]) for i in range(4)]",
"def construct_circuit(self, parameters, q=None):\n if len(parameters) != self._num_parameters:\n raise ValueError('The number of parameters has to be {}'.format(self._num_parameters))\n\n if q is None:\n q = QuantumRegister(self._num_qubits, name='q')\n if self._initial_state is not None:\n circuit = self._initial_state.construct_circuit('circuit', q)\n else:\n circuit = QuantumCircuit(q)\n \n for b in range(self._depth):\n for i in range(len(self._h_list)):\n if not self._h_list[i].is_empty():\n circuit+=self._h_list[i].evolve(evo_time=parameters[i], quantum_registers=q)\n \n for i in range(len(self._h_list)-1,-1,-1):\n if not self._h_list[i].is_empty():\n circuit+=self._h_list[i].evolve(evo_time=parameters[i], quantum_registers=q)\n return circuit",
"def rabi_schedules(amp_list, qubits, pulse_width, pulse_sigma=None,\n width_sigma_ratio=4, drives=None, cmd_def=None,\n inst_map=None, meas_map=None):\n\n xdata = amp_list\n\n # copy the instruction to schedule mapping\n inst_map = copy.deepcopy(inst_map)\n if not inst_map:\n inst_map = copy.deepcopy(cmd_def)\n\n if pulse_sigma is None:\n pulse_sigma = pulse_width / width_sigma_ratio\n\n # Construct the circuits\n qr = qiskit.QuantumRegister(max(qubits) + 1)\n cr = qiskit.ClassicalRegister(len(qubits))\n\n circuits = []\n\n for circ_index, g_amp in enumerate(amp_list):\n\n circ = qiskit.QuantumCircuit(qr, cr)\n circ.name = 'rabicircuit_%d_0' % circ_index\n\n rabi_pulse = pulse_lib.gaussian(duration=pulse_width,\n amp=g_amp,\n sigma=pulse_sigma,\n name='rabi_pulse_%d' % circ_index)\n\n rabi_gate = Gate(name='rabi_%d' % circ_index, num_qubits=1, params=[])\n\n for _, qubit in enumerate(qubits):\n\n # add commands to schedule\n schedule = pulse.Schedule(name='rabi_pulse_%f_%d' % (g_amp,\n qubit))\n\n schedule += rabi_pulse(drives[qubit])\n\n # append this schedule to the inst_map\n inst_map.add('rabi_%d' % circ_index, qubits=[qubit],\n schedule=schedule)\n\n circ.append(rabi_gate, [qr[qubit]])\n\n for qind, qubit in enumerate(qubits):\n circ.measure(qr[qubit], cr[qind])\n\n circuits.append(circ)\n\n # schedule\n schedule_config = ScheduleConfig(inst_map, meas_map)\n rabi_sched = [schedule_circuit(qcirc,\n schedule_config)\n for qcirc in circuits]\n\n return rabi_sched, xdata",
"def translate_cirq_to_qsim(\n self, qubit_order: cirq.QubitOrderOrList = cirq.QubitOrder.DEFAULT\n ) -> qsim.Circuit:\n\n qsim_circuit = qsim.Circuit()\n ordered_qubits = cirq.QubitOrder.as_qubit_order(qubit_order).order_for(\n self.all_qubits()\n )\n qsim_circuit.num_qubits = len(ordered_qubits)\n\n # qsim numbers qubits in reverse order from cirq\n ordered_qubits = list(reversed(ordered_qubits))\n\n def to_matrix(op: cirq.GateOperation):\n mat = cirq.unitary(op.gate, None)\n if mat is None:\n return NotImplemented\n\n return cirq.MatrixGate(mat).on(*op.qubits)\n\n qubit_to_index_dict = {q: i for i, q in enumerate(ordered_qubits)}\n time_offset = 0\n gate_count = 0\n moment_indices = []\n for moment in self:\n ops_by_gate = [\n cirq.decompose(\n op, fallback_decomposer=to_matrix, keep=_has_cirq_gate_kind\n )\n for op in moment\n ]\n moment_length = max((len(gate_ops) for gate_ops in ops_by_gate), default=0)\n\n # Gates must be added in time order.\n for gi in range(moment_length):\n for gate_ops in ops_by_gate:\n if gi >= len(gate_ops):\n continue\n qsim_op = gate_ops[gi]\n time = time_offset + gi\n add_op_to_circuit(qsim_op, time, qubit_to_index_dict, qsim_circuit)\n gate_count += 1\n time_offset += moment_length\n moment_indices.append(gate_count)\n\n return qsim_circuit, moment_indices",
"def construct_circuit(self, x, qr=None, inverse=False):\n if not isinstance(x, np.ndarray):\n raise TypeError(\"x must be numpy array.\")\n if x.ndim != 1:\n raise ValueError(\"x must be 1-D array.\")\n if x.shape[0] != self._num_qubits:\n raise ValueError(\"number of qubits and data dimension must be the same.\")\n if qr is None:\n qr = QuantumRegister(self._num_qubits, name='q')\n qc = self._constructor_function(x, qr, inverse, *self._feature_param)\n #qc.draw(output='mpl', filename='C:/Users/RaphaelLambert/Pictures/custom.png')\n return qc"
] | [
"0.7279348",
"0.6496389",
"0.59102285",
"0.5772845",
"0.57442355",
"0.5727163",
"0.55746835",
"0.55432576",
"0.55362433",
"0.5509834",
"0.5474253",
"0.5412955",
"0.5407767",
"0.53782344",
"0.53507787",
"0.53283745",
"0.5326846",
"0.5310684",
"0.52896637",
"0.52684003",
"0.52645165",
"0.52475506",
"0.52405316",
"0.5232066",
"0.5181159",
"0.51756585",
"0.5167512",
"0.5130543",
"0.5115967",
"0.5075754"
] | 0.69339204 | 1 |
Convert all config sections to have unique names. Adds unique suffixes to config sections for compability with configparser. | def unique_config_sections(config_file):
section_counters = defaultdict(int)
output_stream = io.StringIO()
with open(config_file) as fin:
for line in fin:
if line.startswith('['):
section = line.strip().strip('[]')
_section = section + '_' + str(section_counters[section])
section_counters[section] += 1
line = line.replace(section, _section)
output_stream.write(line)
output_stream.seek(0)
return output_stream | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fix_compname_configs(ibs):\n # ibs.MANUAL_CONFIG_SUFFIX = '_MANUAL_' #+ ut.get_computer_name()\n # ibs.MANUAL_CONFIGID = ibs.add_config(ibs.MANUAL_CONFIG_SUFFIX)\n # We need to fix the manual config suffix to not use computer names anymore\n\n configid_list = ibs.get_valid_configids()\n cfgsuffix_list = ibs.get_config_suffixes(configid_list)\n\n ibs.MANUAL_CONFIG_SUFFIX = 'MANUAL_CONFIG'\n ibs.MANUAL_CONFIGID = ibs.add_config(ibs.MANUAL_CONFIG_SUFFIX)\n\n for rowid, suffix in filter(\n lambda tup: tup[1].startswith('_MANUAL_'), zip(configid_list, cfgsuffix_list)\n ):\n logger.info('EVALUATING: {!r}, {!r}'.format(rowid, suffix))\n # Fix the tables with bad config_rowids\n ibs.db.executeone(\n \"\"\"\n UPDATE {AL_RELATION_TABLE}\n SET config_rowid=?\n WHERE config_rowid=?\n \"\"\".format(\n **const.__dict__\n ),\n params=(ibs.MANUAL_CONFIGID, rowid),\n )\n\n # Delete the bad config_suffixes\n ibs.db.executeone(\n \"\"\"\n DELETE\n FROM {CONFIG_TABLE}\n WHERE config_rowid=?\n \"\"\".format(\n **const.__dict__\n ),\n params=(rowid,),\n )",
"def find_unique_keys(base_config, comp_config, base_name):\n unique_keys = []\n unique_sections = []\n\n for section in base_config:\n if str(section) == 'DEFAULT':\n continue #.cfg has DEFAULT key, we do not use\n if not comp_config.has_section(section):\n unique_label = base_name + '.' + str(section)\n unique_sections.append(unique_label)\n continue\n\n for key in base_config[section]:\n if not comp_config.has_option(section, key):\n unique_label = str(section) + '.' + str(key)\n unique_keys.append(unique_label)\n continue\n #TODO: compare values?\n return unique_sections, unique_keys",
"def get_suffix_configuration(lst):\n suffix_conf = ''\n for elem in lst: \n suffix_conf += '_'\n if type(elem) != str: \n elem = str(elem)\n suffix_conf += elem\n return suffix_conf",
"def _move_all_to_config_section(self):\n for section in self.OLD_SECTIONS:\n if not self.has_section(section):\n continue\n\n all_configs = self.keys(section)\n for key in all_configs:\n self.set('config',\n key,\n super().getraw(section, key))\n\n self._conf.remove_section(section)",
"def _format_bases_config(bases_config: BasesConfiguration) -> str:\n return \"_\".join([_format_run_on_base(r) for r in bases_config.run_on])",
"def loadconfigtable(ui, extname, configtable):\n for section, items in sorted(configtable.items()):\n knownitems = ui._knownconfig.setdefault(section, itemregister())\n knownkeys = set(knownitems)\n newkeys = set(items)\n for key in sorted(knownkeys & newkeys):\n msg = b\"extension '%s' overwrite config item '%s.%s'\"\n msg %= (extname, section, key)\n ui.develwarn(msg, config=b'warn-config')\n\n knownitems.update(items)",
"def _parse_yaml_configs(args, anon_component_prefix=\"anon_app\"):\n # Configuration files are basically nested dictionaries and the command-line arguments\n # are a list with each element being a dictionary. If the dict in the args has the key\n # 'class', then it is anonymous and we should just give it a sequential unique name to\n # ensure it is run. If, however, it does not, then we should assume that it's a NAMED\n # configuration and so we can actually use that to overwrite/modify the configurations\n # pulled in from a file.\n\n new_configs = {}\n for arg in args:\n try:\n arg = yaml.load(arg)\n except (yaml.parser.ParserError, yaml.scanner.ScannerError) as e:\n raise ValueError(\"error parsing manual configuration: %s\\nError:%s\" % (arg, e))\n\n # If this config is anonymous, give it a unique name and add it to configs\n # since it couldn't possibly overwrite another config entry.\n # NOTE: if user specified a 'name' entry directly, we will still take that later on...\n if 'class' in arg:\n # TODO: perhaps register these names somewhere to ensure uniqueness?\n global __scale_client_n_anon_apps_added__\n unique_key = anon_component_prefix + str(__scale_client_n_anon_apps_added__)\n __scale_client_n_anon_apps_added__ += 1\n new_configs[unique_key] = arg\n else:\n try:\n new_configs.update(arg)\n except TypeError as e:\n raise ValueError(\"error in your manual configuration: %s\\n\"\n \"couldn't be interpreted as a dict due to error: %s\" % (arg, e))\n\n return new_configs",
"def create_name_mappings(\n config: Dict[str, Dict[str, Union[str, List]]], map_full_to_short: bool = True\n) -> Dict:\n\n csv_to_excel = {}\n for name, params in config.items():\n try:\n csv_to_excel[name] = params[\"short_name\"]\n except KeyError:\n if len(name) > 31:\n logger.info(f\"{name} does not have a 'short_name'\")\n continue\n\n if map_full_to_short:\n return csv_to_excel\n else:\n return {v: k for k, v in csv_to_excel.items()}",
"def make_unique_index(config, stash_code):\n for data in get_section_new_indices(config):\n section_base, old_index, new_index = data\n key = config.value.keys()\n isec_item = stash_code['section']+stash_code['item']\n old_index_sections = old_index.split('_')\n old_section = SECTION_FORMAT.format(section_base, old_index_sections[0], old_index_sections[1])\n new_section = SECTION_FORMAT.format(section_base, isec_item, new_index)\n print 'old_section, new', old_section, new_section\n\n old_node = config.unset([old_section])\n old_id_opt_values = []\n for opt, node in old_node.value.items():\n old_id = rose.CONFIG_DELIMITER.join([old_section, opt])\n old_id_opt_values.append((old_id, opt, node.value))\n # update key value\n config.value.update({new_section: old_node})",
"def sanitize_configuration_option_name(self, name: str) -> str:\n\n sanitized = name.replace(\"-\", \"_\")\n prefix = self.parent.objtype.split(\"_\")[-1]\n\n if prefix not in sanitized:\n sanitized = f\"{prefix}_{sanitized}\"\n\n return f\"autodoc_pydantic_{sanitized}\"",
"def section_name(name, n, prefix='py-{pid}'.format(pid=os.getpid())):\n return '.'.join(filter(bool, [prefix, name, str(n)]))",
"def suffixes(self):\n suffixes = []\n for constraint, suffix in self.conf.get(\"suffixes\", {}).items():\n if constraint in self.spec:\n suffixes.append(suffix)\n suffixes = list(dedupe(suffixes))\n if self.hash:\n suffixes.append(self.hash)\n return suffixes",
"def reverse_aliases():\n result = {}\n aliases = construct_aliases()\n for key in aliases:\n cat, idx = key.split(':')\n prp = ':'.join(aliases[key].split(':')[1:])\n # TODO TODO\n result[cat + '.' + prp] = cat + ':' + idx\n return result",
"def create_config(ta_map, aliasfile, newaliasfile):\n\n def config_split(l):\n k, v = l.split(\":\")\n return k, [x.strip() for x in v.split(\",\")]\n\n config_dict = defaultdict(set)\n\n with open(aliasfile, \"r\") as config:\n for l in config:\n k, v = config_split(l)\n config_dict[k] = v\n\n with open(newaliasfile, \"w\") as config:\n\n config.truncate()\n\n while ta_map:\n k, v = ta_map.popitem()\n\n if len(v) == 1:\n config.write(k + \":\\n\")\n else:\n k_decide_key = k[:]\n v_decide_key = set(v)\n ta_name, ta_aliases = decide_on_key(\n k_decide_key, v_decide_key, config_dict\n )\n config.write(\"{}: {}\\n\".format(ta_name, \",\".join(ta_aliases)))\n v.remove(k)\n for x in v:\n ta_map.pop(x)",
"def save_devices_names_file(self, all_lamps):\n self._devices_names = {}\n for lamp_object in all_lamps.values():\n self._devices_names[lamp_object.short_address.address] = {\n \"friendly_name\": str(lamp_object.short_address.address)\n }\n try:\n with open(self._path, \"w\") as outfile:\n yaml.dump(\n self._devices_names,\n outfile,\n default_flow_style=False,\n allow_unicode=True,\n )\n except Exception as err:\n logger.error(\"Could not save device names config: %s\", err)",
"def guard_from_parts(parts):\n name = parts[-1]\n\n # Remove .in suffix from config files\n if name.endswith(\".in\"):\n name = name[:-3]\n\n parts[-1] = name.replace(\".\", \"_\")\n return \"_\".join(part.upper() for part in parts)",
"def add_latesettings_aliases(self):\n aliases = {\n mconst.DEF_SETTINGNAME_siteurl_absolute: self.settings.get_subvalue(mconst.DEF_SETTINGSEC_config, mconst.DEF_SETTINGNAME_siteurl_absolute),\n mconst.DEF_SETTINGNAME_siteurl_relative: self.settings.get_subvalue(mconst.DEF_SETTINGSEC_config, mconst.DEF_SETTINGNAME_siteurl_relative,''),\n mconst.DEF_SETTINGNAME_sitefilepath: self.settings.get_subvalue(mconst.DEF_SETTINGSEC_config, mconst.DEF_SETTINGNAME_sitefilepath),\n mconst.DEF_SETTINGNAME_sitename: self.settings.get_subvalue(mconst.DEF_SETTINGSEC_config, mconst.DEF_SETTINGNAME_sitename),\n }\n self.settings.merge_settings_key(mconst.DEF_SETTINGSEC_aliases, aliases)\n self.alias_settings_change()",
"def override(self, **kwds):\n # Replace prelude section name if it has been changed.\n self._rename_prelude_section(**kwds)\n\n for name, val in kwds.items():\n if name not in self._OPTS:\n LOG.warning('ignoring unknown configuration value %r = %r',\n name, val)\n else:\n if name == \"sections\":\n val = Section.from_raw_yaml(val)\n setattr(self, name, val)",
"def test_replace_namespaced_build_config(self):\n pass",
"def test_load_config_with_aliases(self):\n with open(\".scuba.yml\", \"w\") as f:\n f.write(\"image: bosybux\\n\")\n f.write(\"aliases:\\n\")\n f.write(\" foo: bar\\n\")\n f.write(\" snap: crackle pop\\n\")\n\n config = scuba.config.load_config(\".scuba.yml\")\n assert config.image == \"bosybux\"\n assert len(config.aliases) == 2\n assert config.aliases[\"foo\"].script == [\"bar\"]\n assert config.aliases[\"snap\"].script == [\"crackle pop\"]",
"def restoreConfigName( ):\n subNo = s.getSubarrayNo()\n configMpName = \"Control.Subarray%d.configName\" %subNo\n retries = 24 \n arrayConfig = queryString( configMpName, retries )\n configName(arrayConfig)",
"def prepare_filenames(config: Dict[str, Any]) -> Dict[str, Any]:\n for handler_name in config[\"handlers\"].keys():\n handler_config = config[\"handlers\"][handler_name]\n if \"filename\" in handler_config:\n filename = Path(handler_config[\"filename\"]).name\n handler_config[\"filename\"] = str(LOGS_DIR.joinpath(filename))\n return config",
"def handle_config_inited(app, config):\n\n def handle_legacy(new, orig):\n if getattr(config, new) is None and getattr(config, orig) is not None:\n config[new] = config[orig]\n\n # copy over deprecated configuration names to new names (if any)\n handle_legacy('confluence_publish_allowlist', 'confluence_publish_subset')\n handle_legacy('confluence_purge_from_root', 'confluence_purge_from_master')\n handle_legacy('confluence_root_homepage', 'confluence_master_homepage')\n handle_legacy('confluence_space_key', 'confluence_space_name')",
"def config_section_map(configer):\n conf_dict = {}\n for section in configer.sections():\n conf_dict[section] = {}\n for key, val in configer.items(section):\n conf_dict[section][key] = val\n return conf_dict",
"def test_config_from_ini(self):\n\n # Make ini-file\n path = self.write_temp_file(\"\"\"\n[section1]\nstring1:\nstring2: string2\nint1: 0\nint2: 1\nfloat1: 0.0\nfloat2: 1.1\nboolean1: false\nboolean2: true\n\n[section2]\nstring2: string2\nint2: 2\nfloat2: 2.2\nboolean2: false\n\"\"\")\n\n for namespace in [None, 'namespace']:\n config = Config()\n config.load_from_ini(path, namespace=namespace)\n\n namespace_prefix = '%s.' % namespace if namespace is not None else ''\n\n # Test section 1\n self.assert_equal_deep(8, len(config('%ssection1' % namespace_prefix)))\n self.assert_equal_deep('', config('%ssection1.string1' % namespace_prefix))\n self.assert_equal_deep('string2', config('%ssection1.string2' % namespace_prefix))\n self.assert_equal_deep(0, config('%ssection1.int1' % namespace_prefix))\n self.assert_equal_deep(1, config('%ssection1.int2' % namespace_prefix))\n self.assert_equal_deep(0.0, config('%ssection1.float1' % namespace_prefix))\n self.assert_equal_deep(1.1, config('%ssection1.float2' % namespace_prefix))\n self.assert_equal_deep(False, config('%ssection1.boolean1' % namespace_prefix))\n self.assert_equal_deep(True, config('%ssection1.boolean2' % namespace_prefix))\n\n # Test section 2\n self.assert_equal_deep(4, len(config('%ssection2' % namespace_prefix)))\n self.assert_equal_deep('string2', config('%ssection2.string2' % namespace_prefix))\n self.assert_equal_deep(2, config('%ssection2.int2' % namespace_prefix))\n self.assert_equal_deep(2.2, config('%ssection2.float2' % namespace_prefix))\n self.assert_equal_deep(False, config('%ssection2.boolean2' % namespace_prefix))\n\n # Test section 3\n self.assert_equal(None, config('%ssection3' % namespace_prefix))",
"def _getConfigName(self):\n pass",
"def get_config_names():\r\n return sorted(CONFIGS.keys())",
"def parse_config_sections(self, namespace, sections):\n with patch(\"snakeoil.cli.arghparse.ArgumentParser.error\", self._config_error):\n for section in (x for x in sections if x in self.config):\n config_args = [\n f\"--{k}={v}\" if v else f\"--{k}\" for k, v in self.config.items(section)\n ]\n namespace, args = self.parser.parse_known_optionals(config_args, namespace)\n if args:\n self.parser.error(f\"unknown arguments: {' '.join(args)}\")\n return namespace",
"def generate_name(config):\n\n name = basename(config.name)\n if config.prepro is not None:\n name += \"_\" + config.prepro\n if config.extract_pos:\n name += \"_pos\"\n return name",
"def _add_parsed_config_file(self, filename, sections, normalized):\n for s in sections:\n self._sections_to_file[s] = filename\n self._parsed.insert(0, sections)\n self._normalized.insert(0, normalized)"
] | [
"0.6045993",
"0.5563847",
"0.5368636",
"0.5358432",
"0.5267798",
"0.51614577",
"0.51502544",
"0.5139199",
"0.5124081",
"0.50915194",
"0.507346",
"0.50681233",
"0.49954256",
"0.4976946",
"0.49577245",
"0.49545503",
"0.4944312",
"0.492063",
"0.4905497",
"0.490392",
"0.48970628",
"0.4877987",
"0.48711643",
"0.48378214",
"0.48243266",
"0.48201296",
"0.48185",
"0.48156133",
"0.48056394",
"0.4781314"
] | 0.5965138 | 1 |
The last value for the W array is correct | def test_W_end(self):
self.assertAlmostEqual(attempt.W[-1], 9.494852380803035) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def w(self) -> np.ndarray:\n return self.array[:, 0] if self.scalar_vector else self.array[:, 3]",
"def get_w(self, i_order):\n\n return np.array([]), np.array([])",
"def w(self) -> float:\n return self.A[0] if self.scalar_vector else self.A[3]",
"def new_w(w, d):\n\n if w.sum() > 0:\n next_w = w.copy()\n next_w[next_w > 0] -= 1\n return next_w\n else:\n if d[0] == 1:\n return np.array([51,0,0])\n elif d[1] == 1:\n return np.array([0,51,0])\n else:\n return np.array([0,0,51])",
"def uw(self):\n return sm.unitvec(self.w)",
"def w(self):\n return self._data[3]",
"def get_lw_to_sw_array(self):\n if self.lw_to_sw_array is None:\n lw_to_sw_array = self.basis.get_dO_I_ddelta_alpha(self.sw_survey.geo,self.sw_survey.get_dO_I_ddelta_bar_array())\n else:\n lw_to_sw_array = self.lw_to_sw_array\n return lw_to_sw_array",
"def _mpo_get_d(self, W):\n din = W.shape[3]\n dout = W.shape[1]\n return dout, din",
"def get_shape(self):\n if not self.channels_first:\n return [None] + self.w + [self.numoffeatures]\n else:\n return [None] + [self.numoffeatures] + self.w",
"def get_hardwired_speed_weights(self):\n \n phase_shift=self.speed_phase_shift\n \n # row 1 has the weights of speed cells to grid cell 1\n self.W_speed_east=np.zeros_like(self.W_ee) \n self.W_speed_west=np.zeros_like(self.W_ee) \n self.W_speed_north=np.zeros_like(self.W_ee) \n self.W_speed_south=np.zeros_like(self.W_ee) \n\n if self.use_eight_directions is True:\n self.W_speed_north_east=np.zeros_like(self.W_ee) \n self.W_speed_north_west=np.zeros_like(self.W_ee) \n self.W_speed_south_east=np.zeros_like(self.W_ee) \n self.W_speed_south_west=np.zeros_like(self.W_ee) \n\n\n for phase_idx,phase in enumerate(self.gp.phases):\n shifted_north_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(np.pi/2.),self.gp.phases)\n shifted_south_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(-np.pi/2.),self.gp.phases)\n shifted_east_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(0),self.gp.phases)\n shifted_west_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(-np.pi),self.gp.phases)\n\n self.W_speed_north[phase_idx,:]=self.W_ee[shifted_north_phase_idx,:]\n self.W_speed_south[phase_idx,:]=self.W_ee[shifted_south_phase_idx,:]\n self.W_speed_east[phase_idx,:]=self.W_ee[shifted_east_phase_idx,:]\n self.W_speed_west[phase_idx,:]=self.W_ee[shifted_west_phase_idx,:] \n \n if self.use_eight_directions is True:\n shifted_north_east_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(np.pi/4),self.gp.phases)\n shifted_north_west_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(np.pi*3/4),self.gp.phases)\n shifted_south_east_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(-np.pi/4),self.gp.phases)\n shifted_south_west_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(-np.pi*3/4),self.gp.phases)\n \n self.W_speed_north_east[phase_idx,:]=self.W_ee[shifted_north_east_phase_idx,:]\n self.W_speed_north_west[phase_idx,:]=self.W_ee[shifted_north_west_phase_idx,:]\n self.W_speed_south_east[phase_idx,:]=self.W_ee[shifted_south_east_phase_idx,:]\n self.W_speed_south_west[phase_idx,:]=self.W_ee[shifted_south_west_phase_idx,:]",
"def W(self):\n if not self.isVaild():\n pass\n return self.Wq() + 1.0/self.muy",
"def _get_wimage(self, arr_np):\n #return result\n raise NotImplementedError",
"def get_wm_ws_Gx_bot(self):\n # BASICALLY SETS self.Gm1_bot, self.dGm1_dS_bot, self.Gt1_bot, self.dGt1_dS_bot \n z_u_r = self.grid_dict['z_u_r']\n z_u_w = self.grid_dict['z_u_w']\n [Ly,N] = self.b.shape\n #---> j-loop\n for j in range(Ly): \n self.kbl[j] = N # initialize search\n #-> end j-loop\n\n #--> k-loop\n for k in range(N-1,0,-1):\n k_w = k\n k_r = k-1\n # --> j loop \n for j in range(Ly):\n if z_u_r[j,k_r] - z_u_w[j,0] > self.hbbl[j]:\n self.kbl[j] = k_w\n\n #--> end k\n # --> end j\n\n\n '''\n Compute nondimenisonal shape function coefficeints Gx() by\n matching values and vertical derivatives of interior mixing\n coefficients at hbbl (sigma=1)\n '''\n\n self.Gm1_bot = np.zeros([Ly])\n self.dGm1_dS_bot = np.zeros([Ly])\n self.Gt1_bot = np.zeros([Ly])\n self.dGt1_dS_bot = np.zeros([Ly]) \n self.Av_bl_bot = np.zeros([Ly])\n self.dAv_bl_bot = np.zeros([Ly]) \n self.cff_up_bot = np.zeros([Ly])\n self.cff_dn_bot = np.zeros([Ly])\n\n\n\n\n\n self.wm_bot = np.zeros([Ly])\n self.ws_bot = np.zeros([Ly]) \n\n # CALCULATE ustar for the bottom based on bototm velocities\n \n \n \n # CALCULATE r_D\n self.r_D = TTTW_func.get_r_D(self.u,self.v,self.Zob,self.grid_dict) \n u = self.u\n v_upts = TTTW_func.v2u(self.v)\n \n ubar = np.mean(u,axis=1)\n vbar = np.mean(v_upts,axis=1)\n\n # --> j loop\n for j in range(Ly):\n # turbulent velocity sclaes with buoyancy effects neglected\n if self.CD_SWITCH:\n # DEPTH AVERAGED APPROACH\n uref = u[j,0]\n vref = v_upts[j,0]\n ustar2 = self.C_D * (uref**2 + vref**2)\n else:\n ustar2 = self.r_D[j] * np.sqrt(u[j,0]**2 + v_upts[j,0]**2)\n wm = self.vonKar * np.sqrt(ustar2)\n ws = wm\n\n self.wm_bot[j] = wm\n self.ws_bot[j] = ws\n \n k_w = self.kbl[j] \n z_bl = z_u_w[j,0] + self.hbbl[j]\n\n if z_bl < z_u_w[j,k_w-1]:\n k_w = k_w-1\n\n cff = 1. / (z_u_w[j,k_w] - z_u_w[j,k_w-1])\n cff_up = cff * (z_bl - z_u_w[j,k_w])\n cff_dn = cff * (z_u_w[j,k_w] - z_bl)\n \n Av_bl = cff_up * self.Kv_old[j,k_w] + cff_dn * self.Kv_old[j,k_w-1]\n dAv_bl = cff * ( self.Kv_old[j,k_w] - self.Kv_old[j,k_w-1])\n self.Av_bl_bot[j] = Av_bl\n self.dAv_bl_bot[j] = dAv_bl\n\n\n self.Gm1_bot[j] = Av_bl / (self.hbbl[j] * wm + self.eps)\n self.dGm1_dS_bot[j] = np.min([0,-dAv_bl/(ws+self.eps)])\n\n At_bl = cff_up * self.Kt_old[j,k_w] + cff_dn * self.Kt_old[j,k_w-1]\n dAt_bl = cff * ( self.Kt_old[j,k_w] - self.Kt_old[j,k_w-1])\n self.Gt1_bot[j] = At_bl / (self.hbbl[j] * ws + self.eps)\n self.dGt1_dS_bot[j] = np.min([0,-dAt_bl/(ws+self.eps)])",
"def __init__(self, w):\n self.w = np.array(w) if isinstance(w, list) else w\n self.c = np.zeros_like(self.w)",
"def y(self):\n return self._arr[1]",
"def worddist(self):\n #return (self.n_z_t + self.beta) / (self.n_z[:, numpy.newaxis] + self.V * self.beta), len(self.n_z)\n return (self.n_z_t + self.n_w_d + self.beta) / (self.n_z[:, numpy.newaxis] + self.n_w[:, numpy.newaxis] + self.V * self.beta), len(self.n_z)",
"def set_initial_wb(self):\n self.w = np.append(self.iniW, self.iniB)",
"def z(self):\n return self._arr[2]",
"def array(self):\n return np.array([self.w, self.x, self.y, self.z])",
"def initW_ard(self, alpha=None):\n if alpha is None:\n alpha = self.initAlpha()\n W = [ s.zeros((self.D[m],self.K)) for m in range(self.M) ]\n for m in range(self.M):\n for k in range(self.K):\n W[m][:,k] = norm.rvs(loc=0, scale=1/s.sqrt(alpha[m][k]), size=self.D[m])\n return W,alpha",
"def get_main_array_bottom(self):\n return self.bitcell_array_inst.by()",
"def w(self):\n # w must be a CArray\n raise NotImplementedError(\"Linear normalizer should define the slope.\")",
"def unkW(self):\n if self._unkW is None:\n self._unkW = self.W.mean(0)\n return self._unkW",
"def w(self):\n return self._w",
"def Create_Constant_WavelengthArray(spec_cube,final_wave_start,final_wave_end):\n\tdwave = np.zeros(len(spec_cube))\n\tfor n in xrange(len(spec_cube)):\n\t\ttemp_final_wave = spec_cube[n][0] # Take one of the spectrum use its resolution\n\t\tdwave[n] = np.median(temp_final_wave[1:] - temp_final_wave[:-1])\n\tdwave = np.max(dwave)\n\tfinal_wave = np.arange(final_wave_start,final_wave_end,dwave)\n\tprint 'Since input dv = 0 -> median resolution (constant) dwave = %f angstrom is used.' % dwave\n\treturn final_wave",
"def __len__(self):\r\n\r\n return self.yInput.shape[1]",
"def get_tuned_excitatory_weights(self):\n \n self.W_ee=np.zeros((self.N_e,self.N_e))\n \n if not hasattr(self,'fixed_connectivity_tuning'):\n self.fixed_connectivity_tuning=1\n \n num_tuned_conns=int(np.floor(self.fixed_connectivity_tuning*self.num_conns_ee))\n num_untuned_conns=self.num_conns_ee-num_tuned_conns\n \n for i in xrange(self.N_e):\n ref_phase=self.gp.phases[i,:]\n dists=gl.get_periodic_dist_on_rhombus(self.n_e,ref_phase,self.gp.phases,self.gp.u1,self.gp.u2)\n sorted_idxs=np.argsort(dists)\n \n tuned_idxs=sorted_idxs[:self.num_conns_ee]\n np.random.shuffle(tuned_idxs)\n\n #untuned_idxs=np.setdiff1d(np.arange(self.N_e),tuned_idxs)\n all_idxs=np.arange(self.N_e)\n np.random.shuffle(all_idxs)\n \n self.W_ee[i,tuned_idxs[0:num_tuned_conns]]=self.W_max_ee\n self.W_ee[i,all_idxs[:num_untuned_conns]]=self.W_max_ee\n \n \n self.W[:self.N_e,:self.N_e]=self.W_ee",
"def w(self):\r\n return self.size.x",
"def get_main_array_right(self):\n return self.bitcell_array_inst.rx()",
"def build_W(points):\n return None"
] | [
"0.6439141",
"0.6317358",
"0.6140997",
"0.60708946",
"0.60476446",
"0.6045793",
"0.6034233",
"0.5948805",
"0.5945211",
"0.58441013",
"0.58261454",
"0.5818163",
"0.57634544",
"0.5760683",
"0.5717286",
"0.5704453",
"0.5694879",
"0.5665707",
"0.5622648",
"0.56216794",
"0.56203514",
"0.56194425",
"0.56021523",
"0.55950195",
"0.5576624",
"0.5571594",
"0.5561269",
"0.5559168",
"0.5556991",
"0.5547157"
] | 0.639363 | 1 |
The maxIndex variables are correct | def test_maxIndex(self):
self.assertEqual(attempt.maxIndexZ, 113)
self.assertEqual(attempt.maxIndexW, 134) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def maxQualifiedIndex(self, indices):\n entry = self.getConfig()\n # the leader keep its own record updated to the newest\n indices[self.datacenter_id] = len(self.log) - 1\n # print('!!!!!', indices)\n if entry['config'] == 'single':\n return sorted([indices[x] for x in entry['data']])[(len(entry['data'])-1)/2]\n maxOld = sorted([indices[x] for x in entry['data'][0]])[(len(entry['data'][0])-1)/2]\n maxNew = sorted([indices[x] for x in entry['data'][1]])[(len(entry['data'][1])-1)/2]\n return min(maxOld, maxNew)",
"def get_max_index(a):\n return a.argmax()",
"def get_max_index(a):\n return a.argmax()",
"def get_max_index(a):\n return a.argmax()",
"def worstVectorIndex(self):\n return max(range(len(self.costs)), key=self.costs.__getitem__)",
"def getIndexRef(row, col, frame, midRange, maxIndex): \n indexRef = numpy.zeros(12).reshape(4,3)\n indexRef[:,0] = numpy.arange(-1, 3) * midRange + row\n indexRef[:,1] = numpy.arange(-1, 3) * midRange + col\n indexRef[:,2] = numpy.arange(-1, 3) * midRange + frame\n \n for i in range(4):\n for j in range(3):\n if indexRef[i,j] < 0 or indexRef[i,j] > maxIndex:\n indexRef[i,j] = -1\n return indexRef",
"def max_position(self):\n raise NotImplementedError",
"def _findMaxIndex(data, mark):\n # assume the maximum value is at initial mark position\n maxIndex = mark\n # loop over the remaining positions greater than the mark\n for mark in range(mark+1, len(data)):\n # if a bigger value is found, record its index\n if data[mark][1][2] > data[maxIndex][1][2]:\n maxIndex = mark\n return maxIndex",
"def storage_upper_bound(index):\n i = index[0]\n return storage_para[i].pmax",
"def max_pos(self, start, end, header) -> int:",
"def index_max(v):\n return max(range(len(v)), key=v.__getitem__)",
"def max_positions(self):\n return None",
"def _get_end_index(self):\n return max(self.index + self.source_window,\n self._get_target_index() + self.target_window)",
"def maxIG(data):\n \n index = -1\n max_gain = -1\n \n for i in range(len(data[0]) - 1):\n gain = informationGain2(data, i)\n if gain > max_gain:\n index = i\n max_gain = gain\n \n return (index, max_gain)",
"def max_positions(self):\n return int(100000.0)",
"def updateMax(self, maxIndex=None):\n \n if not maxIndex is None:\n self.maxIndex = maxIndex\n \n if (self.indexMode == True):\n maxValue = self.maxIndex + self.startIndex\n else:\n maxValue = self.axisValues[self.maxIndex]\n\n self.bottomLabel.setText(str(maxValue))\n self.axisCombo.setMaxValue(maxValue)",
"def calcMaxIDX(fls, noct):\n freq_l = fls[-1] / (2.0 ** (1 / (2.0 * noct)))\n max_idx = np.array(abs(fls - freq_l)).argmin()\n return max_idx",
"def indexOfMax(list):\n max = -np.Infinity\n index = 0\n i = 0\n for value in list:\n if value > max:\n max = value\n index = i\n i += 1\n return index",
"def N_states_for_learner(self):\n idx_max = []\n limits = 50, 2*_math.pi, 50, 50, 50, 50, 50, 50, 50\n for idx, limit in enumerate(limits):\n test = [0 for i in xrange(len(limits))]\n check = _arange(-limit,limit,limit/1000.)\n maxi = 0\n for v in check:\n test[idx]=v\n ret = self._state_index(*test)\n maxi = max((maxi, ret[idx]))\n idx_max.append(maxi)\n\n return tuple([idx+1 for idx in idx_max])",
"def _getBestIndex(self, validQvals):\r\n maxVal = max(validQvals) # FIXME\r\n bestMoves = [index for index, move in enumerate(validQvals) if move == maxVal]\r\n\r\n # heuristic: choose last bucket\r\n return int(bestMoves[-1])",
"def find_longest_axis(self, vector):\n max_value_index = 0\n for i in range(1, 5):\n if abs(vector[i]) > abs(vector[max_value_index]):\n max_value_index = i\n return max_value_index",
"def max_op(*substrate_index_arrays):\n result = numpy.max(\n numpy.stack([x.flatten() for x in substrate_index_arrays]) *\n self.species_substrate_suitability_index_array, axis=0)\n result = result.reshape(substrate_index_arrays[0].shape)\n result[substrate_index_arrays[0] == _INDEX_NODATA] = _INDEX_NODATA\n return result",
"def find_max_row_idx(self) -> int:\n return np.argmax([r.free_spots for r in self.rows])",
"def imax(self):\n return self.elem.index(max(self))",
"def max_level(self):\n return self.__max",
"def get_max(bij, exploration, bij_bool):\n\n#\tbij[bij_bool] = -sys.maxint - 1\n\n\tm = bij.argmax()\n\tc = np.unravel_index(m, bij.shape)\n\t#c = np.unravel_index(bij.argmax(), bij.shape)\n\n############################## A MODIFIER EVENTUELLEMENT #################\n#\tb = bij[bij_bool]\n#\tm = b.argmax()\n#\tind = np.unravel_index(m, b.shape)\n#\tc = np.where(bij == b[ind])\n#\tc = (c[0][0], c[1][0])\n#\tprint('mMAXx', bij[c])\n\treturn (c)",
"def _get_max_group_index(self):\n cursor = self.mongo.db.userfield.find({}).sort('index', -1)\n model = []\n for group in cursor:\n model = group\n break\n if not model:\n return 0\n else:\n return model['index']",
"def max(self, i):\n x=self.val(i,0)\n lm=len(self)\n t=1\n while t<lm:\n y=self.val(i,t)\n if x<y:\n x=y\n t+=1\n return x",
"def max_positions(self):\r\n return (self.args.max_source_positions, self.args.max_target_positions)",
"def create_random_index(self, max:int):\n return random.randint(0, max - 1)"
] | [
"0.7089196",
"0.67561334",
"0.67561334",
"0.67561334",
"0.6644873",
"0.6621024",
"0.65653",
"0.6489624",
"0.64309424",
"0.6412204",
"0.6361595",
"0.63488233",
"0.63382834",
"0.63159776",
"0.62829936",
"0.6254711",
"0.6254558",
"0.6239701",
"0.6237676",
"0.61731935",
"0.6165807",
"0.61589736",
"0.6146927",
"0.61468446",
"0.61393917",
"0.6127306",
"0.61269987",
"0.61156785",
"0.6078371",
"0.60670316"
] | 0.7708999 | 0 |
Checks mount point is owned by swift | def is_ug_swift(d, r):
stats = os.stat(d.mount)
uid = stats.st_uid
gid = stats.st_gid
user = pwd.getpwuid(uid).pw_name
group = grp.getgrgid(gid).gr_name
if user == group == 'swift':
return True
else:
r.msgkey('user', user)
r.msgkey('group', group)
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def ownercheck(self, userhost):\n if self.cfg and self.cfg.owner:\n if userhost in self.cfg.owner: return True\n return False",
"def _check_mounted_system(self):\n res = self.su_cmd('touch /system/.dwarf_check')\n if res == '':\n res = self._do_adb_command('shell ls -la /system')\n if '.dwarf_check' in res:\n res = self.su_cmd('rm /system/.dwarf_check')\n if res == '':\n return True\n elif res == 'Read-only file system':\n return False\n\n return False",
"def _mount_point_exists(self, mountpoint):\n cmd = ['dir', mountpoint]\n logger.debug('running command: %s' % (' '.join(cmd)))\n stdout, stderr, retval = self._run_cli_process(cmd)\n\n if not retval:\n logger.debug(\"mountpoint %s ready\" % mountpoint)\n else:\n logger.debug(\"mountpoint %s reported not ready with error '%s'\" %\n (mountpoint, stderr.strip()))\n\n return not retval",
"def check_owner(data=None, **kw):\n if data and 'owner_id' in data and not data['owner_id'] == current_user.id:\n raise ProcessingException(description=\"No write privileges\",\n code=401)",
"def mounted(self):\n return os.path.ismount(self.get(\"~mountpoint\", \"/\"))",
"def is_mounted(self):\n try:\n _ = openmediavault.subprocess.check_output(\n [\n 'findmnt',\n '--canonicalize',\n '--first-only',\n '--noheadings',\n '--raw',\n '--nofsroot',\n self.canonical_device_file,\n ]\n )\n return True\n except subprocess.CalledProcessError:\n pass\n return False",
"def is_still_owner(self):\n raise tooz.NotImplemented",
"async def cog_check(self, ctx:utils.Context):\n\n if ctx.author.id in self.bot.config['owners']:\n return True\n raise commands.NotOwner",
"def is_sys(self):\n if self.mountpoint is not None and self.mountpoint in ['/', '/boot']:\n return True\n return False",
"def is_sys(self):\n if self.mountpoint is not None and self.mountpoint in ['/', '/boot']:\n return True\n return False",
"def is_owner(self):\n return self._is_owner",
"def is_mountpoint(path):\r\n return path in [m['dest'] for m in mounts()]",
"def is_mountpoint(path: str) -> bool:\n mtpt = subprocess.run([\"mountpoint\", path], check=False, capture_output=True)\n return mtpt.returncode == 0",
"def test_mount_status_nas_share(self):\n pass",
"def isowner(self, o):\n return self._owner is o",
"def is_owner(self, resource: Model) -> bool:\n\n try:\n self.raise_for_ownership(resource)\n except SupersetSecurityException:\n return False\n\n return True",
"def assert_same_owner(path):\n try:\n assert find_owner(path) == getuser(), f\"{path} must be owned by {getuser()}\"\n except AssertionError as error:\n raise click.UsageError(str(error))\n except FileNotFoundError:\n pass",
"def ismount(path):\n return True if not get_instance(path).relpath(path) else False",
"def test_mount_status_nas_share_by_pool(self):\n pass",
"def test_mount_status_nas_share_by_nas(self):\n pass",
"def _is_pool_owned(self, pdata):\n svc = '/api/system/v1/version'\n ret = self.rest_get(svc, restclient.Status.OK)\n vdata = jsonutils.loads(ret.data)\n return (vdata['version']['asn'] == pdata['pool']['asn'] and\n vdata['version']['nodename'] == pdata['pool']['owner'])",
"def hasRootAccessToDisplay(display):\n # not necessary on windows\n return True",
"def fs_ok(fs_info):\n if fs_info.mountpoint == '/':\n return True\n\n if (fs_info.device == fs_info.fstype or fs_info.fstype == 'nullfs' or\n '/docker' in fs_info.mountpoint or\n fs_info.mountpoint.startswith('/etc') or\n fs_info.mountpoint.startswith('/lib/modules')):\n return False\n\n if fs_info.device.startswith('/dev/'):\n return True\n\n return False",
"def is_channel_owner():\n\n async def check(ctx):\n if ctx.guild:\n owner = ctx.author == ctx.guild.owner\n if not owner:\n await ctx.send(\"I guess you are not this server's pogchamp. Bruh.\")\n return owner\n return True\n\n return commands.check(check)",
"def is_mounted(volume):\n mounts = sudo(\"mount\", quiet=True).split(\"\\n\")\n for m in mounts:\n if m.startswith(volume + \" \"):\n return True\n return False",
"def check_valid_device(self, path, run_as_root=True):\n sheepdog_handle = path\n\n if sheepdog_handle is None:\n return False\n\n original_offset = sheepdog_handle.tell()\n\n try:\n sheepdog_handle.read(4096)\n except Exception as e:\n LOG.error(\"Failed to access sheepdog device \"\n \"handle: %(error)s\",\n {\"error\": e})\n return False\n finally:\n sheepdog_handle.seek(original_offset, 0)\n\n return True",
"async def owner(c, m):\n if not m.id in ids:\n await c.send('You must be an owner to use this command.')\n raise Exception()\n return True",
"def is_owner_or_privileged_user(obj_user, request):\n return (\n obj_user == request.user or request.user.is_superuser or is_admin_user(request)\n )",
"def available(self, o):\n return not self.locked() or self.isowner(o)",
"def has_ownership(self):\n user = self.request.user\n object = self.get_object()\n if object.owned_by(user):\n return True\n else:\n return False"
] | [
"0.6451464",
"0.6307461",
"0.6153694",
"0.6151752",
"0.61308235",
"0.6095205",
"0.60757345",
"0.60000616",
"0.5997545",
"0.5997545",
"0.5976451",
"0.59735596",
"0.5972847",
"0.5931066",
"0.59120023",
"0.59047663",
"0.58646756",
"0.58345026",
"0.5809635",
"0.5780444",
"0.5762077",
"0.5753108",
"0.57131326",
"0.5705592",
"0.56952524",
"0.5674087",
"0.5661446",
"0.56251633",
"0.56143355",
"0.5596934"
] | 0.6578928 | 0 |
Checks the relevant swift mount points and diskusage | def main():
results = []
results.extend(check_mounts())
results.extend(diskusage())
return results | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _check_mounted_system(self):\n res = self.su_cmd('touch /system/.dwarf_check')\n if res == '':\n res = self._do_adb_command('shell ls -la /system')\n if '.dwarf_check' in res:\n res = self.su_cmd('rm /system/.dwarf_check')\n if res == '':\n return True\n elif res == 'Read-only file system':\n return False\n\n return False",
"def disk():\n run(env.disk_usage_command % env)",
"def check_disk_usage(disk):\n du= shutil.disk_usage(disk)\n free =du.free/du.total * 100\n return free > 30",
"def get_mount_usage(paths):\n\n mount_usage = {}\n for mount, stats in get_disk_usage().items():\n for path in paths:\n if (mount == get_mount_point(path)):\n mount_usage[path] = stats\n return mount_usage",
"def disk_usage(self):\n self.monitoring_object['disk_usage'] =\\\n psutil.disk_usage('/')",
"def check_disk_usage(disk):\n du = shutil.disk_usage(disk)\n free = du.free / du.total * 100\n return free > 20",
"def getDiskUsage (self, disksSet = None):\n if disksSet == None:\n disksSet = self._disksSet\n\n if platform.system() == \"Linux\":\n \n totalBlocks = 0\n totalUsedBlocks = 0\n\n if len(disksSet)==0:\n return 0\n \n for disk in disksSet:\n\n if self._isPrediction is False:\n mediaRootDir = os.path.join(self._cfg.mediaBaseDir, \"%02u\" % disk)\n actualPath = os.path.realpath(mediaRootDir)\n # Get the statistics for the file system\n statistics = os.statvfs(actualPath)\n # Sum used and total blocks\n totalUsedBlocks += float(statistics[statvfs.F_BLOCKS]) - float(statistics[statvfs.F_BFREE])\n totalBlocks += float(statistics[statvfs.F_BLOCKS])\n else:\n\n diskData = self.getVirtualDiskData(disk)\n\n if diskData is not None:\n totalUsedBlocks += float(diskData[\"totalUsedBytes\"])\n totalBlocks += float(diskData[\"totalDiskBytes\"])\n else:\n totalBlocks+=1.0\n\n \n # Calculate the \"disk\" usage \n diskUsage = 100*(totalUsedBlocks/totalBlocks)\n\n else:\n self._logGeneral().error(\"not on linux. can't calculate disk usage\")\n raise Exception(\"platformError\",\"not linux\")\n \n self._logGeneral(\"disk-usage\").debug2(\"diskUsage=%s\", diskUsage)\n return diskUsage",
"def DiskUsage(cls):\n\t\t# >> df -iP\n\t\t# Sys. de fich. Inodes IUtil. ILib. IUti% Monte sur\n\t\t# /dev/sda1 915712 241790 673922 27% /\n\t\t# none 210977 788 210189 1% /dev\n\t\t# none 215028 19 215009 1% /dev/shm\n\t\t# none 215028 71 214957 1% /var/run\n\t\t# none 215028 2 215026 1% /var/lock\n\t\t# /dev/sda5 8364032 500833 7863199 6% /home\n\t\t# /home/sebastien/.Private 8364032 500833 7863199 6% /home/sebastien\n\t\tres = {}\n\t\tfor line in popen(\"df -kP\").split(\"\\n\")[1:-1]:\n\t\t\tline = RE_SPACES.sub(\" \", line).strip().split(\" \")\n\t\t\tsystem, inodes, used_inodes, free_inodes, usage, mount = line\n\t\t\ttry:\n\t\t\t\tusage = float(usage[:-1])\n\t\t\texcept ValueError:\n\t\t\t\tusage = 0\n\t\t\tres[mount] = float(usage) / 100.0\n\t\treturn res",
"def disk_usage(path):\n st = os.statvfs(path)\n total = st.f_blocks * st.f_frsize\n used = (st.f_blocks - st.f_bfree) * st.f_frsize\n return total, used",
"def test_check_disk_space_sufficient(self):\n self.assertTrue(self.command.check_disk_space(1, self.temp_dir))\n self.assertTrue(self.command.check_disk_space(\n 1, self.temp_dir,\n label=\"Hello\", context=\"Contextual detail\", die=True))",
"def check_disk_space(self, required_disk_space, fs='/opt'):\n\n stats = admin_tasks.df_stats(fs)\n if stats:\n __, __, available = stats\n\n space_left = available - required_disk_space\n\n if space_left > 0.5:\n self.log.info(\"%.1fG of disk space is available from approximately %.1fG in %s\" %\n (required_disk_space, available, fs))\n elif space_left > 0 and space_left <= 0.5:\n self.log.warning(\"Low disk space. Only %.1fG will be free from approximately available space of %.1fG in %s.\" % (\n space_left, available, fs))\n else:\n self.log.error(\"Not enough disk space. %.1fG is not available from approximately avaiable space of %.1fG in %s.\" % (\n required_disk_space, available, fs))\n sys.exit(1)",
"def is_mounted(self):\n try:\n _ = openmediavault.subprocess.check_output(\n [\n 'findmnt',\n '--canonicalize',\n '--first-only',\n '--noheadings',\n '--raw',\n '--nofsroot',\n self.canonical_device_file,\n ]\n )\n return True\n except subprocess.CalledProcessError:\n pass\n return False",
"def _get_drive_usage(path):\n if sys.version_info >= (3, 3):\n usage = shutil.disk_usage(path)\n return {\"total\": usage.total, \"used\": usage.used, \"free\": usage.free}\n if on_android():\n from jnius import autoclass\n\n StatFs = autoclass(\"android.os.StatFs\")\n AndroidString = autoclass(\"java.lang.String\")\n stats = StatFs(AndroidString(path))\n return {\n \"total\": stats.getBlockCountLong() * stats.getBlockSizeLong(),\n \"free\": stats.getAvailableBlocksLong() * stats.getBlockSizeLong(),\n }\n # with os.statvfs, we need to multiple block sizes by block counts to get bytes\n stats = os.statvfs(path)\n total = stats.f_frsize * stats.f_blocks\n free = stats.f_frsize * stats.f_bavail\n return {\"total\": total, \"free\": free, \"used\": total - free}",
"def disk_usage(path):\n fs.disk_usage(path)",
"def test_903_disk_usage_action(self):\n u.log.info(\"Testing diskusage action\")\n action_id = u.run_action(self.swift_proxy_sentry, \"diskusage\")\n assert u.wait_on_action(action_id), \"diskusage action failed.\"\n\n u.log.info('OK')",
"def disk_usage(path):\n st = os.statvfs(path)\n free = (st.f_bavail * st.f_frsize)\n total = (st.f_blocks * st.f_frsize)\n used = (st.f_blocks - st.f_bfree) * st.f_frsize\n try:\n percent = ret = (float(used) / total) * 100\n except ZeroDivisionError:\n percent = 0\n # NB: the percentage is -5% than what shown by df due to\n # reserved blocks that we are currently not considering:\n # http://goo.gl/sWGbH\n #return usage_ntuple(total, used, free, round(percent, 1))\n return round(percent,1)",
"def main():\n mount_file = '/proc/mounts'\n if os.path.isfile(mount_file):\n try:\n f = open(mount_file, 'r')\n except IOError:\n print 'cannot open', mount_file\n else:\n lines = []\n lines = f.readlines()\n f.close()\n\n matching = [line for line in lines if \"rootfs\" in line]\n #print matching\n \n removed = [lines.remove(m) for m in matching]\n #print removed\n \n for line in lines:\n if line.endswith(\"0 0\\n\"):\n line = line[:-5] \n #print line\n # line = line.rstrip(\" 0\\n\") does not work if\n # the line contains 0. \n # i.e. \"...gid=5,mode=620,ptmxmode=000 0 0\\n\"\n\n fields = line.split(\" \")\n #print fields\n\n if (len(fields) != 4):\n print 'cannot format', line\n else:\n print fields[0], 'on', fields[1], 'type', fields[2], \\\n '('+ fields[3] + ')'\n else:\n print 'cannot find', mount_file\n\n return 0",
"def check_root_full():\n return check_disk_full(disk=\"/\", min_gb=2, min_percent=10)",
"def check_disk_space(self):\n mm = MicroManager(self.hostname)\n drives = mm.get_disks()\n env = mm.get_env()\n for drive in drives:\n if drive['Name'].startswith(env['HOMEDRIVE']):\n if drive['TotalFreeSpace'] >= 367001600:\n return [CheckStatus(self, CheckStatus.CHECK_DISK_SPACE, CheckStatus.STATUS_PASS), ]\n else:\n return [CheckStatus(self, CheckStatus.CHECK_DISK_SPACE, CheckStatus.STATUS_FAIL, \"Only {} bytes of available disk space remain, expecting at least 367001600\"), ]",
"def space_usage(\n self, path=None, warning_level=None, previous_result=None,\n can_fail_build=False, name=None, **kwargs):\n path = path or self.m.path['start_dir']\n name = name or 'disk space usage'\n warning_level = warning_level or 0.9\n kwargs.setdefault(\n 'step_test_data',\n lambda: self.m.json.test_api.output_stream(\n self.test_api.space_usage_result()))\n\n if self.m.platform.is_win:\n # Not supported. Feel free to implement.\n return\n\n step = None\n try:\n step = self.m.python(\n name,\n self.resource('statvfs.py'),\n stdout=self.m.json.output(),\n args=[path],\n **kwargs)\n capacity_mb = step.stdout['capacity'] / 1024.0 / 1024.0\n used_mb = step.stdout['used'] / 1024.0 / 1024.0\n percent = used_mb / capacity_mb\n step.presentation.step_text = '%.2f/%.2f GiB (%d%%) used' % (\n used_mb / 1024.0, capacity_mb / 1024.0, percent * 100)\n if percent >= warning_level:\n step.presentation.status = self.m.step.WARNING\n if previous_result:\n step.presentation.step_text += '. Delta: %+.2f MiB' % (\n used_mb - previous_result['used'])\n return {\n 'capacity': capacity_mb,\n 'used': used_mb,\n }\n except Exception as ex:\n # Do not fail entire build because of a disk space step failure.\n if step:\n step.presentation.logs['exception'] = ['%r' % ex]\n step.presentation.status = self.m.step.WARNING\n if can_fail_build:\n raise recipe_api.StepFailure('Could not get disk info: %s' % ex)\n return",
"def get_disk_usage():\n query = {\n \"type\": \"op\",\n \"cmd\": \"<show><system><disk-space></disk-space></system></show>\",\n }\n\n return __proxy__[\"panos.call\"](query)",
"def get_mount_points():\n\n points = []\n t = subprocess.check_output(['mount'])\n t = t.decode()\n\n for line in t.splitlines():\n t = line.find('smbfs')\n if t < 0: continue\n b = line.find(' on ')\n points.append(line[b+4: t-2])\n # //[email protected]/storage on /Volumes/storage (smbfs, nodev, nosuid, mounted by ruan)\n return points",
"def log_free_disk_space():\n cmd = 'df -h'\n p = Popen(cmd, shell=True, stdout=PIPE)\n res = p.communicate()\n if res[0]:\n res = res[0]\n else:\n res = res[1]\n logger.warning('Disk usage statisticks:')\n logger.warning(res)",
"def test_mount_status_nas_share(self):\n pass",
"def check_filesystem(ssh_connection, disk_fmt, disk):\n if disk_fmt == \"squashfs\":\n return\n cmd = \"fsck.{} -n {}\".format(disk_fmt, disk)\n exit_code, _, stderr = ssh_connection.run(cmd)\n assert exit_code == 0, stderr",
"def check_disk_usage(disk, min_gb, min_percent):\n\tdu = shutil.disk_usage(disk)\n\t#calculate the percentage of free space\n\tpercent_free = 100 * du.free / du.total\n\t#calculate how many free gigabytes\n\tgigabytes_free = du.free / 2**30\n\tif percent_free < min_percent or gigabytes_free < min_gb:\n\t\treturn False\n\treturn True",
"def check_mount_state(self, nodes=None):\n state = {\n \"mounted\": NodeSet(),\n \"unmounted\": NodeSet(),\n \"nodirectory\": NodeSet()\n }\n if not nodes:\n nodes = NodeSet.fromlist(self.hosts)\n check_mounted = NodeSet()\n\n # Detect which hosts have mount point directories defined\n command = \"test -d {0} -a ! -L {0}\".format(self.mount_dir.value)\n retcodes = pcmd(nodes, command, expect_rc=None)\n for retcode, hosts in list(retcodes.items()):\n if retcode == 0:\n check_mounted.add(hosts)\n else:\n command = \"grep 'dfuse {}' /proc/mounts\" .format(self.mount_dir.value)\n retcodes = pcmd(hosts, command, expect_rc=None)\n for ret_code, host_names in list(retcodes.items()):\n if ret_code == 0:\n check_mounted.add(host_names)\n else:\n state[\"nodirectory\"].add(host_names)\n\n if check_mounted:\n # Detect which hosts with mount point directories have it mounted as a fuseblk device\n command = \"stat -c %T -f {0} | grep -v fuseblk\".format(self.mount_dir.value)\n retcodes = pcmd(check_mounted, command, expect_rc=None)\n for retcode, hosts in list(retcodes.items()):\n if retcode == 1:\n state[\"mounted\"].add(hosts)\n else:\n state[\"unmounted\"].add(hosts)\n\n return state",
"def getSpaceUsage(path):\n st = os.statvfs(path)\n \n flash = { \"free\" : st.f_bavail * st.f_frsize, \"used\":(st.f_blocks - st.f_bfree) * st.f_frsize }\n \n #free = st.f_bavail * st.f_frsize\n #total = st.f_blocks * st.f_frsize\n #used = (st.f_blocks - st.f_bfree) * st.f_frsize\n return flash",
"def _check_controller_multi_fs_data(context, controller_fs_list_new):\n\n cgtsvg_growth_gib = 0\n\n lvdisplay_keys = [constants.FILESYSTEM_LV_DICT[constants.FILESYSTEM_NAME_DATABASE],\n constants.FILESYSTEM_LV_DICT[constants.FILESYSTEM_NAME_PLATFORM]]\n\n lvdisplay_dict = pecan.request.rpcapi.get_controllerfs_lv_sizes(context)\n\n for key in lvdisplay_keys:\n if not lvdisplay_dict.get(key, None):\n raise wsme.exc.ClientSideError(_(\"Unable to determine the \"\n \"current size of %s. \"\n \"Rejecting modification \"\n \"request.\" % key))\n\n for fs in controller_fs_list_new:\n lv = fs.logical_volume\n if lvdisplay_dict.get(lv, None):\n orig = int(float(lvdisplay_dict[lv]))\n new = int(fs.size)\n if fs.name == constants.FILESYSTEM_NAME_DATABASE:\n orig = orig // 2\n\n if orig > new:\n raise wsme.exc.ClientSideError(_(\"'%s' must be at least: \"\n \"%s\" % (fs.name, orig)))\n if fs.name == constants.FILESYSTEM_NAME_DATABASE:\n cgtsvg_growth_gib += 2 * (new - orig)\n else:\n cgtsvg_growth_gib += (new - orig)\n\n LOG.info(\"_check_controller_multi_fs_data cgtsvg_growth_gib=%s\" %\n cgtsvg_growth_gib)\n\n return cgtsvg_growth_gib",
"def disk_usage(path):\n if PY3 and isinstance(path, bytes):\n # XXX: do we want to use \"strict\"? Probably yes, in order\n # to fail immediately. After all we are accepting input here...\n path = path.decode(ENCODING, errors=\"strict\")\n total, free = cext.disk_usage(path)\n used = total - free\n percent = usage_percent(used, total, round_=1)\n return _common.sdiskusage(total, used, free, percent)"
] | [
"0.65840626",
"0.6307514",
"0.6279391",
"0.626951",
"0.6082033",
"0.60685736",
"0.60032076",
"0.5917685",
"0.5890955",
"0.58835125",
"0.5840331",
"0.58330125",
"0.5823619",
"0.58203804",
"0.5819363",
"0.581247",
"0.58020556",
"0.5750178",
"0.5736998",
"0.5730349",
"0.5725064",
"0.5693403",
"0.5692493",
"0.56770325",
"0.564714",
"0.5606462",
"0.56020594",
"0.55999625",
"0.55439365",
"0.55371076"
] | 0.6437659 | 1 |
Sort variables based on their rank and shift. Note that this relies on all variables having a unique rank. | def sort_variables(variables):
return tuple(sorted(variables, key=lambda v: (v.rank, v.shift))) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sort(self, varnames):\n varnames = self._find_vars(varnames, unique=True, empty_ok=False)\n var_ind_list = list(map(self._varlist.index, varnames))\n new_srtlist = var_ind_list + [None]*(self._nvar - len(varnames))\n if self._srtlist == new_srtlist:\n return\n sort_key = lambda row: [row[i] for i in var_ind_list]\n self._varvals.sort(key = sort_key)\n self._srtlist = new_srtlist\n self._changed = True",
"def set_rank_order(self):\n for k in self._run:\n self._run[k].sort(key=lambda x:x.get_rank(),reverse=False)\n tot_res = len(self._run[k])\n for r in self._run[k]:\n r.set_score(tot_res - int(r.get_rank()) + 1)\n print r.get_str()",
"def reorder( self ):\n self.sorted.sort(self.compareFunction)",
"def _rank(measure):\n sort_idx = np.argsort(-measure)\n ranks = np.empty(len(measure), int)\n ranks[sort_idx] = np.arange(1, len(measure)+1)\n return ranks",
"def _sort(self):\n self.rows.sort(key=lambda x: (x['PERC1'], x['EQ'], x['PASS'], x['W2']),\n reverse=True)\n\n rank = 0\n prev_perc = 0\n prev_rank = 0\n for row in self.rows:\n if row[\"NR\"] == 0:\n # Something has already populated NR as 0 - so we set rank as\n # 0 too\n row['_RANK'] = 0\n row['_NR'] = 0\n continue\n\n # Increment our count\n rank += 1\n if row['PERC1'] == prev_perc:\n row['NR'] = \"\"\n row['_NR'] = prev_rank # I.e. joint 6th will be 6 here\n row['_RANK'] = rank # I.e. joint 6th could be 7, or 8 etc. here\n else:\n row['NR'] = rank\n row['_NR'] = rank\n row['_RANK'] = rank\n prev_perc = row['PERC1']\n prev_rank = rank",
"def init_sorted_variables(self):\n variables_by_neighbors = [] # A list of (var_name, |neighbors|)\n for variable in self.var_names:\n variables_by_neighbors.append(\n (self.variables[variable].get_name(), len(self.variables[variable].get_neighbors())))\n\n # In this part we sort the variables according to the heuristic:\n variables_by_neighbors = sorted(variables_by_neighbors, key=lambda tup: tup[1], reverse=True)\n # (J) Notice that there can be many variables with same neighbour, thus the order between them isn't determined.\n self.sorted_variables = [*map(lambda x: x[0], variables_by_neighbors)]",
"def tensor_resort(inputs, tensor_order):\n pass",
"def rearrangeMovieArray():\n # using lambda to sort by values of dict and return list \n new_ranked= sorted(movieViewCounts, key=lambda v:movieViewCounts[v], reverse=True)\n moviesRanked = new_ranked",
"def arrange_variables(self, variables='all', measures=None,\n parameters='varied', obj_types='all',\n scenarios='all', index=False):\n return self.arrange(variables=variables, measures=measures,\n parameters=parameters, obj_types=obj_types,\n scenarios=scenarios, index=index)",
"def sort_by_reranker_scores(self):\n self.parses.sort(key=lambda parse: (parse.reranker_score,\n parse.parser_score),\n reverse=True)",
"def _sort(self):\n self.population.sort()\n self.population.reverse()",
"def sort_cards(self):\n self.cards.sort(key=operator.attrgetter('persona', 'rank'))\n self.update_position()",
"def _rank(self):\r\n return sorted(self.player_points.items(),key=lambda x:x[1],reverse=True)",
"def sort(self): # sort all entries to make room for new ones, determine best and worst\n ns = self.num_stored.value\n ys = np.asarray(self.ys[:ns])\n yi = ys.argsort()\n sortRuns = []\n for i in range(len(yi)):\n y = ys[yi[i]]\n xs = self.get_x(yi[i])\n sortRuns.append((y, xs))\n numStored = min(len(sortRuns),int(0.9*self.capacity)) # keep 90% best \n for i in range(numStored):\n self.replace(i, sortRuns[i][0], sortRuns[i][1])\n self.num_sorted.value = numStored \n self.num_stored.value = numStored \n return numStored",
"def sort(self):\n\n # momentarily convert into numpy, to take advantage of their easy \n # sorting.\n top_indices = np.argsort([-n for n in self.Nx])\n self.Nx = [self.Nx[i] for i in top_indices]\n self.dictionary = h.dictionary.Dictionary([\n self.dictionary.tokens[i] for i in top_indices])\n\n self.sorted = True\n\n return top_indices",
"def sort_data(self):\n\n # zips the game_list and game_Scores, sorts the result by scores, and then puts them back.\n self.game_list, self.game_scores = zip(*sorted(zip(self.game_list, self.game_scores), key=lambda pair: pair[1]))",
"def sort(self):\r\n\t\t\r\n\t\t# get variables, add i\r\n\t\tv = self.scan(p=False)\r\n\t\tv.append('i')\r\n\t\t\r\n\t\t# reverse so least weighted variables come first\r\n\t\tv.reverse()\r\n\t\t\r\n\t\t# assign a weight to each variable, based on position in list\r\n\t\tw = {}\r\n\t\tfor n,i in enumerate(v):\r\n\t\t\tw[i] = 1000 ** (n + 1)\r\n\t\t\t\r\n\t\t# assign score based on weights and exponents\r\n\t\ts = {}\r\n\t\tfor i in self:\r\n\t\t\t\r\n\t\t\t# sum weights\r\n\t\t\tc = 0\r\n\t\t\tfor k,j in i.items():\r\n\t\t\t\t\r\n\t\t\t\t# adjust weights based on exponent\r\n\t\t\t\tif k != 'i':\r\n\t\t\t\t\tc += w.get(k,0) * (100 + j)\r\n\t\t\t\t\t\r\n\t\t\t\t# i is adjusted based on even or odd exponents\r\n\t\t\t\telse:\r\n\t\t\t\t\tc += w.get(k,0) * (100 + j % 2)\r\n\t\t\t\t\t\r\n\t\t\t# use score as key\r\n\t\t\ts[c] = i\r\n\t\t\t\t\r\n\t\t# sort keys largest to smallest\r\n\t\ty = s.keys()\r\n\t\ty.sort()\r\n\t\ty.reverse()\r\n\t\t\r\n\t\t# new term list\r\n\t\tn = [s[k] for k in y]\r\n\t\t\r\n\t\treturn Li(n,c=False)",
"def double_sort(data, last_var=0):\n \n # doing simply np.sort(np.sort(pairs, axis=1), axis=0)\n # would uncouple first and second elements of pairs\n # during the second sorting (axis=0)\n data = np.sort(data, axis=1)\n x_sort = np.argsort(data[:, 0])\n data = data[x_sort]\n \n return data",
"def rank_vars(xTrain, yTrain, scoreFunc):\r\n funcsDic = {\r\n 'pearsonr': [np.arange(xTrain.shape[1]), 1], \r\n 'mutual_info_score': np.arange(xTrain.shape[0]),\r\n 'ttest_ind': [np.arange(xTrain.shape[1]), 1], \r\n }\r\n \r\n scores = list()\r\n for feat in np.arange(xTrain.shape[1]):\r\n if scoreFunc.func_name == 'pearsonr':\r\n scores.append(scoreFunc(xTrain[:, feat], yTrain))\r\n elif scoreFunc.func_name == 'ttest_ind':\r\n scores.append(scoreFunc(xTrain[yTrain == 1, feat], xTrain[yTrain==-1, feat]))\r\n \r\n scores = np.asarray(scores)\r\n pvals = scores[funcsDic[scoreFunc.func_name]]\r\n sortedIndices = [i[0] for i in sorted(enumerate(pvals), key=lambda x:x[1])]\r\n return sortedIndices",
"def order_players_by_initial_rank(self):\n pass",
"def get_sorted_results(self):\n results = self.results.values()\n return sorted(results, key=lambda r: r.rank(), reverse=True)",
"def reorder(self,order='nodes'):\n if order == 'nodes':\n a = sort(self,axis=-1) # first sort rows\n order = sortByColumns(a)\n elif order == 'reverse':\n order = arange(self.nelems()-1,-1,-1)\n elif order == 'random':\n order = random.permutation(self.nelems())\n else:\n order = asarray(order)\n if not (order.dtype.kind == 'i' and \\\n (sort(order) == arange(order.size)).all()):\n raise ValueError,\"order should be a permutation of range(%s)\" % self.nelems()\n return order",
"def check_order(racers):\n racers_orderd = sorted(racers, key=lambda x: (x.group, x.time))\n a = 1\n b = 1\n for r in racers_orderd:\n if r.group == \"ALL\":\n r.rank = str(a)\n a += 1\n else:\n r.rank = str(b)\n b += 1\n return racers_orderd",
"def radix_sort_rot(self, labels):\n n = len(labels)\n result = 0\n if n == 0:\n return result\n\n for b in range(self.bits):\n # The output array elements that will have sorted arr\n output = [0]*n\n\n # initialize count array as 0\n count = [0, 0]\n\n # Store count of occurrences in count[]\n for i in range(n):\n count[(labels[i] >> b) % 2] += 1\n\n # Change count[i] so that count[i] now contains actual\n # position of this digit in output array\n count[1] += count[0]\n\n # Build the output array\n for i in range(n-1, -1, -1):\n index = (labels[i] >> b)\n output[count[index % 2] - 1] = labels[i]\n count[index % 2] -= 1\n\n # Copying the output array to arr[],\n # so that arr now contains sorted numbers\n labels = output\n\n previous, occ = labels[0], 1\n for i in range(1, len(labels)):\n label = labels[i]\n if label == previous:\n occ += 1\n else:\n result ^= self.ROT(previous ^ occ, occ)\n occ = 1\n previous = label\n if occ > 0:\n result ^= self.ROT(previous ^ occ, occ)\n return result",
"def _sort_data(self, sort_data_by='position'):\n all_mutants = iter(self)\n if sort_data_by=='position':\n sorted_data = sorted(all_mutants, key = lambda m: (m.position, m.IB))\n # x.position here is an Insertion_position object and has a sensible cmp function\n # TODO do unaligned/multi-aligned/unknown positions sort sensibly here?\n elif sort_data_by=='read_count':\n if self.multi_dataset: \n raise MutantError(\"Sorting by readcount in print_data not implemented for multi-datasets!\")\n sorted_data = sorted(all_mutants, key = lambda m: (m.total_read_count, m.perfect_read_count, m.position, m.IB), \n reverse=True)\n else:\n raise MutantError(\"Can't sort mutants by %s - only position or readcount are implemented!\"%sort_data_by)\n return sorted_data",
"def argsort(self, **kwargs): # noqa: PR02\n return SeriesDefault.register(pandas.Series.argsort)(self, **kwargs)",
"def sort(self):\r\n\t\tif ScoreOpt.isGroupVassals():\r\n\t\t\tself._playerScores.sort(lambda x, y: cmp(x.sortKey(), y.sortKey()))\r\n\t\t\tself._playerScores.reverse()\r\n\t\tmaxPlayers = ScoreOpt.getMaxPlayers()\r\n\t\tif maxPlayers > 0 and len(self._playerScores) > maxPlayers:\r\n\t\t\tself._playerScores = self._playerScores[len(self._playerScores) - maxPlayers:]",
"def calculate_movement_order(self):\n # type: () -> List[SquadDrone]\n return _.sortByAll(self.members, 'name')",
"def _apply_rank(U, S, VT, r, verbose=False):\n if r is None:\n r = len(S)\n S_r = S[:r]\n U_r = U[:, :r]\n VT_r = VT[:r]\n if verbose:\n print(\"Rank:\", r, \"SVD shape:\", U_r.shape, S_r.shape, VT_r.shape)\n return U_r, S_r, VT_r",
"def sort(self):\n for i in range(self.num):\n for j in range(i,0,-1):\n if self.genepool[0][j].fitness < self.genepool[0][j-1].fitness:\n self.genepool[0][j], self.genepool[0][j-1] = self.genepool[0][j-1], self.genepool[0][j]\n else:\n break\n for i in range(self.num):\n for j in range(i,0,-1):\n if self.genepool[1][j].fitness < self.genepool[1][j-1].fitness:\n self.genepool[1][j], self.genepool[1][j-1] = self.genepool[1][j-1], self.genepool[1][j]\n else:\n break"
] | [
"0.6356814",
"0.59954023",
"0.5840146",
"0.57373804",
"0.5699405",
"0.56901157",
"0.5626263",
"0.5604119",
"0.55457306",
"0.55165404",
"0.55016434",
"0.5487663",
"0.5474271",
"0.5438897",
"0.5397592",
"0.5390707",
"0.53346616",
"0.5334493",
"0.53060263",
"0.5277838",
"0.5180582",
"0.5180428",
"0.51724875",
"0.5167429",
"0.5165699",
"0.5141077",
"0.5138314",
"0.51201534",
"0.5108997",
"0.50843745"
] | 0.74932057 | 0 |
Given a set of criteria, find the matching variables(s). | def get_matching(variables, strict=True, single=True, **criteria):
matching = []
for var in variables:
for crit_name, crit_info in criteria.items():
if getattr(var, crit_name) == crit_info:
continue
else:
break
else:
matching.append(var)
if not matching and strict:
raise RuntimeError("No matching variables were found.")
if single:
if len(matching) > 1:
raise RuntimeError(
f"Expected to find 1 matching variable. Found '{matching}'."
)
if not matching:
return ()
return matching[0]
return tuple(matching) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def query_variables(md):\n\n # save as dictionaries with searchers as keys\n x_searchers = {}\n b_target = {}\n\n t_max = 0\n\n for var in md.getVars():\n my_var_name = var.varName\n my_var_value = var.x\n # print('%s %g' % (my_var_name, my_var_value))\n\n if 'x' in my_var_name:\n s = int(my_var_name[2:my_var_name.find(\",\")])\n v = int(my_var_name[my_var_name.find(\",\") + 1:my_var_name.rfind(\",\")])\n t = int(my_var_name[my_var_name.rfind(\",\") + 1:-1])\n\n # print('%s = %f ' % (my_var_name, my_var_value))\n x_searchers[(s, v, t)] = my_var_value\n\n if t > t_max:\n t_max = t\n\n elif 'beta' in my_var_name and '_s' not in my_var_name:\n # print('%s %g' % (my_var_name, my_var_value))\n # remember: b[0] is probability of capture\n v = int(my_var_name[5:my_var_name.find(\",\")])\n t = int(my_var_name[my_var_name.find(\",\") + 1:my_var_name.rfind(\"]\")])\n b_target[(v, t)] = my_var_value\n\n # make sure x is binary\n x_searchers = enforce_binary(x_searchers, t_max)\n b_target = enforce_sum_1(b_target, t_max)\n\n # x_searchers[(s, v, t)] and b_target[(v, t)]\n return x_searchers, b_target",
"def findall_var(formula, variable):\n res = []\n s = Solver()\n s.add(formula)\n while True:\n if s.check() == sat:\n m = s.model()\n res.append(m)\n value = m[variable]\n if value == None:\n return res\n s.add(variable != value)\n else:\n return res",
"def _find_vars(self, varnames, unique=False, evars=False, all_ok=False, \n empty_ok=False, single=False):\n if isinstance(varnames, str):\n varnames = (varnames,)\n elif not isinstance(varnames, collections.Iterable):\n raise TypeError(\"variable names should be str or iterable of str\")\n \n # first split into list of single abbrevs per str\n split_names = []\n for name in varnames:\n if not isinstance(name, str):\n raise TypeError(\"must specify variables as string(s)\")\n split_names += name.split()\n nnames = len(split_names)\n \n # check for _all, check for proper usage, and return copy of varlist\n # if evars==False or ['_dta'] + varlist if evars==True\n all_specified = False\n if '_all' in split_names:\n if not all_ok:\n raise ValueError(\"\\\"_all\\\" not allowed in this context\")\n elif not nnames == 1:\n raise ValueError(\n \"\\\"_all\\\" may not be combined with other names\")\n all_specified = True\n all_names = (['_dta'] if evars else []) + list(self._varlist)\n nnames = len(all_names)\n \n # check that more than 0 names specified if empty_ok==False, and\n # ignore extras (with message) if single==True\n if not empty_ok and nnames == 0:\n raise ValueError(\"no variables specified\")\n if single and nnames > 1:\n if not self._quiet:\n smcl = \"{err}\" if IN_STATA else \"\"\n msg = smcl + \"only one {}varname allowed; ignoring the rest\"\n print(msg.format('e' if evars else ''))\n split_names = split_names[:1]\n \n # if all_specified, return aleady-constructed all_names\n if all_specified:\n return all_names\n \n # Create match list of [abbrev, match1, match2, ...].\n # The loops below identify when exact varname given, but that varname\n # happens to be abbreviation of other varnames.\n varlist = self._varlist\n matches = []\n append = matches.append\n if evars:\n for name in split_names:\n if name == \"_dta\":\n append([name, name])\n else:\n match = [var for var in varlist if var.startswith(name)]\n append([name, name] if name in match else [name] + match)\n else:\n for name in split_names:\n match = [var for var in varlist if var.startswith(name)]\n append([name, name] if name in match else [name] + match)\n \n # abbreviation was a good, unambiguous abbreviation if exactly\n # one match found, i.e. if the corresponding entry in -matches- \n # is [abbrev, match1]\n if not all(len(m) == 2 for m in matches):\n # there were unmatched or ambiguous abbreviations\n zeros = \" \".join([m[0] for m in matches if len(m) == 1])\n twos = \" \".join([m[0] for m in matches if len(m) >= 3])\n if zeros != \"\" and twos != \"\":\n msg = \"no variables found for {}; multiple found for {}\"\n raise ValueError(msg.format(zeros, twos))\n if zeros != \"\":\n raise ValueError(\n \"no variables found for {}\".format(zeros, twos))\n # if getting here, twos != \"\" and zeros == \"\"\n raise ValueError(\"multiple variables found for '{}'\".format(twos))\n \n if not unique:\n return [m[1] for m in matches]\n seen = set()\n # if name has not been encountered, add to list and set of encountered\n return [m[1] for m in matches \n if m[1] not in seen and not seen.add(m[1])]",
"def check_occuring_variables(formula,variables_to_consider,allowed_variables) :\n variable_set=set(allowed_variables)\n for clause in formula :\n variables_in_clause = {abs(l) for l in clause if abs(l) in variables_to_consider}\n if not variables_in_clause <= variable_set:\n return False, [v for v in variables_in_clause if not v in variable_set] \n return True, []",
"def cnf_variables(cnf):\n variabs = set()\n\n for clause in cnf:\n for var in clause:\n var = abs(var)\n\n if var not in variabs:\n variabs.add(var)\n\n return variabs",
"def basic_find_one_independent_choose(all_set_variables):\n task_list = []\n for choose_keyword in list(all_set_variables):\n # for choose_keyword, set_vars in six.iteritems(value):\n task_list.append(choose_keyword)\n task_list = basic_add_more_important_tasks(\n choose_keyword, all_set_variables, task_list\n )\n logging.debug(task_list)\n return task_list[0]",
"def find_one_independent_choose(all_set_variables):\n task_list = []\n for key in all_set_variables:\n value = all_set_variables[key]\n choose_keywords = list(value)\n for choose_keyword in choose_keywords:\n set_vars = value[choose_keyword]\n task_list.append((key, choose_keyword))\n task_list = add_more_important_tasks(\n choose_keyword, all_set_variables, task_list\n )\n logging.debug(task_list)\n return task_list[0]",
"def query_and_print_variables(md):\n\n # save x variable as dictionary with keys (s, v, t)\n x_searchers = {}\n # save beta variable as dictionary with keys (v, t)\n b_target = {}\n\n for var in md.getVars():\n my_var_name = var.varName\n my_var_value = var.x\n print('%s %g' % (my_var_name, my_var_value))\n\n if 'x' in my_var_name:\n s = int(my_var_name[2])\n v = int(my_var_name[4])\n t = int(my_var_name[6])\n\n if my_var_value >= 0.5:\n x_searchers[(s, v, t)] = 1\n else:\n x_searchers[(s, v, t)] = 0\n\n elif 'beta' in my_var_name:\n # print('%s %g' % (my_var_name, my_var_value))\n # remember b[0] is probability of capture\n v = int(my_var_name[5])\n t = int(my_var_name[7])\n b_target[v, t] = my_var_value\n\n obj = md.getObjective()\n print(obj.getValue())\n\n return x_searchers, b_target",
"def find(self, *args):\n return _ida_hexrays.lvars_t_find(self, *args)",
"def _get_criteria(self):\n for molecule in self.values():\n molecule.get_criterion()",
"def parameter_finder(target_list, search_list, msgflag=False, exact=False):\n target_list = [x.lower() for x in target_list]\n\n indexes = []\n\n if isinstance(search_list, str):\n cont = 0\n search_list = search_list.lower()\n for t in target_list:\n if exact == False and search_list in t:\n indexes.append(cont)\n elif exact == True and search_list == t:\n indexes.append(cont)\n cont += 1\n if isinstance(search_list, list):\n search_list = [x.lower() for x in search_list]\n\n for s in search_list:\n s = str(s)\n for cont, t in enumerate(target_list):\n if exact == False and s in t:\n print((s, t))\n indexes.append(cont)\n elif exact == True and s == t:\n print((s, t))\n indexes.append(cont)\n\n if msgflag == True:\n length = len(indexes)\n if length > 1: print(\"There were several ocurrences\")\n if length == 0: print(\"No ocurrences found\")\n\n return indexes",
"def variables(s):\n result = set([])\n def walk(s):\n if is_variable(s):\n result.add(s)\n else:\n for arg in s.args:\n walk(arg)\n walk(s)\n return result",
"def find_direct_containing(rules, param):\n\n return_list = []\n for rule in rules:\n if param in rules[rule]:\n return_list.append(rule)\n\n return return_list",
"def used_variables(*terms):\n\n t = terms[0] if len(terms) == 1 else terms\n\n if type(t) is Var:\n return frozenset((t,))\n\n elif type(t) in (tuple, Const, Apply, Eq, Ite, Not, And, Or,\n Implies, Iff):\n return union(*(used_variables(x) for x in t))\n\n elif type(t) in (ForAll, Exists, Lambda, NamedBinder):\n return union(used_variables(t.body), t.variables)\n\n elif hasattr(t,'args'):\n return union(*(used_variables(x) for x in t.args))\n\n else:\n assert False, type(t)",
"def get_variable_matches(text):\n return _property_pattern.findall(text)",
"def get_variables_binds(self, predicate, bound_variables=None, variables_binds=None, recursion_level=1):\n\n # print(\"EXPLORING\", recursion_level, predicate, variables_binds)\n\n # Set of bound variables in predicate body\n if bound_variables is None:\n bound_variables = set()\n\n # Possible binds\n if variables_binds is None:\n variables_binds = [{}]\n\n recursion_level -= 1\n\n new_possible_binds = []\n\n for body_clause in predicate.body:\n adornments = self.compute_adornments(body_clause.parameters, bound_variables)\n\n # For each fact search if we can match every bound variable and assign free ones\n if body_clause.name in self._facts:\n for fact in self._facts[body_clause.name]:\n possible_binds = self.check_fact_with_adornment(fact, body_clause, adornments, variables_binds)\n if len(possible_binds):\n # A fact matched, we add variables binds to sup\n new_possible_binds.extend(possible_binds)\n\n # if len(new_possible_binds):\n # variables_binds = new_possible_binds\n\n if recursion_level > 0:\n # For each rule\n if body_clause.name in self._rules:\n for applicable_rule in self._rules[body_clause.name]:\n\n n_bound_variables = set()\n n_variables_binds = [{}]\n\n for index, argument in enumerate(body_clause.parameters):\n rule_corresponding_parameter = applicable_rule.head.parameters[index]\n\n if rule_corresponding_parameter.is_constant():\n if argument.is_constant():\n if rule_corresponding_parameter.value != argument.value:\n break\n else:\n if adornments[index]:\n if argument.is_constant():\n n_bound_variables.add(rule_corresponding_parameter.name)\n n_variables_binds[0][rule_corresponding_parameter.name] = argument.value\n elif argument.name in bound_variables and argument.name in variables_binds[0]:\n n_bound_variables.add(rule_corresponding_parameter.name)\n n_variables_binds[0][rule_corresponding_parameter.name] = variables_binds[0][argument.name]\n\n applicable_predicate_binds = self.get_variables_binds(applicable_rule, n_bound_variables, n_variables_binds, recursion_level)\n for n_bind in applicable_predicate_binds:\n adapted_bind = self.substitute_variable_names(n_bind, applicable_rule.head, body_clause)\n new_possible_binds.extend(adapted_bind)\n\n if len(new_possible_binds):\n variables_binds = new_possible_binds.copy()\n new_possible_binds.clear()\n else:\n variables_binds = [{}]\n\n new_possible_binds_no_duplicates = self.remove_duplicate_binds(variables_binds)\n\n if len(new_possible_binds_no_duplicates):\n yield new_possible_binds_no_duplicates",
"def find_matching_varieties(self, a_variety, conditions=[], limit=None):\n\n var_conditions = [\n (\"name =\", a_variety.get_name()),\n ]\n var_conditions.extend(conditions)\n\n conditions = []\n values = []\n for cond, val in var_conditions:\n conditions.append(cond)\n values.append(val)\n\n row_list = []\n for table in self._db.variety_tables:\n rows = self._db.select(\n [\"id\", \"name\", \"hash\", \"content\"], [table], conditions, values, [\"id\"]\n )\n row_list.extend(rows)\n if not (limit is None) and len(row_list) >= limit:\n _logger.info(\"Limit for find_matching_resources reached\")\n break\n\n variety_list = []\n for row in row_list:\n translated_hash = list(row[\"hash\"][1:-2].split(\" \"))\n\n variety = Resource.Variety(\n content=row[\"content\"],\n hash=translated_hash,\n )\n variety.set_name(row[\"name\"])\n variety.set_id(row[\"id\"])\n variety_list.append(variety)\n return variety_list",
"def findall(self, **kwargs):\n found = []\n searches = kwargs.items()\n\n for obj in self.list():\n try:\n if all(getattr(obj, attr) == value\n for (attr, value) in searches):\n found.append(obj)\n except AttributeError:\n continue\n\n return found",
"def findall(self, **kwargs):\n found = []\n searches = kwargs.items()\n\n for obj in self.list():\n try:\n if all(getattr(obj, attr) == value\n for (attr, value) in searches):\n found.append(obj)\n except AttributeError:\n continue\n\n return found",
"def extract_variables(cnf_formula: str) -> list[str]:\n variables = set()\n cnf_notation = identify_notation(cnf_formula)\n\n and_symbol_pattern = ' ' + cnf_notation.value[CNFLogicConnective.AND] + ' '\n clauses = list(map(lambda c: c[1:len(c)-1], cnf_formula.split(and_symbol_pattern))) # Remove initial and final parenthesis\n\n # Remove final parenthesis of last clause (because of the possible end of line: '\\n')\n if ')' in clauses[len(clauses)-1]:\n clauses[len(clauses)-1] = clauses[len(clauses)-1][:-1] \n\n for c in clauses:\n tokens = c.split(' ')\n tokens = list(filter(lambda t: t != cnf_notation.value[CNFLogicConnective.OR], tokens))\n for feature in tokens:\n if feature == cnf_notation.value[CNFLogicConnective.NOT]:\n continue\n elif feature.startswith(cnf_notation.value[CNFLogicConnective.NOT]):\n variables.add(feature.replace(cnf_notation.value[CNFLogicConnective.NOT], '', 1))\n else:\n variables.add(feature)\n return list(variables)",
"def match(self, key):\n position = key.index(RANGE) # which index to skip\n\n def predicate(keys_0, keys_1):\n \"\"\"whether all other indices match search key\"\"\"\n num_matching = 0\n for i, (k_0, k_1) in enumerate(zip(keys_0, keys_1)):\n if i != position and k_0 == k_1:\n num_matching += 1\n return num_matching == len(key) - 1\n\n # all variables\n keys = list(self.variables.keys())\n # only those which match, including any from the RANGE index\n keys = [k for k in keys if predicate(k, key)]\n # sort along the RANGE index\n keys.sort(key=lambda k: k[position])\n\n return [self.variables[k] for k in keys]",
"def check_criteria(self, criteria, case_control=False):\n\n if case_control:\n pts_meeting_criteria = {key : [] for key in ['case', 'control']}\n else:\n pts_meeting_criteria = []\n\n if len(criteria) == 0: # mostly for exclusion criteria.\n return np.array([])\n\n for name, criterion in criteria.items():\n print(name, criterion)\n feature_inds = self.find_feature(name)\n pts_meeting_criterion = self.search_by_chunk(self.dataset, feature_inds, criterion, case_control)\n \n if case_control:\n pts_meeting_criteria['case'].append(pts_meeting_criterion['case'])\n pts_meeting_criteria['control'].append(pts_meeting_criterion['control'])\n else:\n pts_meeting_criteria.append(pts_meeting_criterion)\n\n if case_control:\n return reduce(np.intersect1d, pts_meeting_criteria['case']), \\\n reduce(np.intersect1d, pts_meeting_criteria['control'])\n else:\n return reduce(np.intersect1d, pts_meeting_criteria)",
"def project_soln(variables, model):\n if variables == []:\n return True\n res = []\n for variable in variables:\n res.append(variable == model[variable])\n return And(*res)",
"def get_vars(triple):\n return set([v for k, v in triple.items() if v.startswith('?')])",
"def variables_referenced(text):\n return set(substitution_pattern.findall(text))",
"def _search(self, *args, **kwargs): # should return Formulas obj\n # Find all Matches\n if kwargs:\n col = list(kwargs)[0]\n args = kwargs[col]\n if isinstance(args, str):\n args = (args, )\n else:\n col = self._formula_col\n match = self.data[col].str.contains('|'.join(args))\n return self.data[match]\n\n # creat a subset of data that is a Formulas obj",
"def get_search_criteria_values(**kwargs):\n result = {}\n for item in kwargs.items():\n search_criteria = SearchCriteria.query.filter_by(text=item[0]).first()\n if search_criteria:\n uctm = UserCreatedTextMapper.query.filter_by(\n user_text=item[1],\n search_criteria=search_criteria.id).first()\n if uctm:\n result[item[0]] = uctm.search_criteria_value\n else:\n return None\n else:\n app.logger.error('Wrong search criteria: {}'.format(search_criteria))\n return None\n return result",
"def _select_variables(\n source_ds: xr.Dataset,\n source_gm: GridMapping,\n var_names: Union[None, str, Sequence[str]]\n) -> Mapping[str, xr.DataArray]:\n spatial_var_names = source_gm.xy_var_names\n spatial_shape = tuple(reversed(source_gm.size))\n spatial_dims = tuple(reversed(source_gm.xy_dim_names))\n if var_names is None:\n var_names = [var_name\n for var_name, var in source_ds.data_vars.items()\n if var_name not in spatial_var_names\n and _is_2d_spatial_var(var, spatial_shape, spatial_dims)]\n elif isinstance(var_names, str):\n var_names = (var_names,)\n elif len(var_names) == 0:\n raise ValueError(f'empty var_names')\n src_vars = {}\n for var_name in var_names:\n src_var = source_ds[var_name]\n if not _is_2d_spatial_var(src_var, spatial_shape, spatial_dims):\n raise ValueError(\n f\"cannot rectify variable {var_name!r}\"\n f\" as its shape or dimensions \"\n f\"do not match those of {spatial_var_names[0]!r}\"\n f\" and {spatial_var_names[1]!r}\"\n )\n src_vars[var_name] = src_var\n return src_vars",
"def match(pattern, data, myvars=None):\n if myvars is None:\n myvars = {}\n if type(pattern) is ListType and len(pattern) >= 1:\n # 'variables' are ['varname']\n myvars[pattern[0]] = data\n return 1, myvars\n if type(pattern) is not TupleType:\n return (pattern == data), myvars\n if len(data) != len(pattern):\n return 0, myvars\n for pattern, data in map(None, pattern, data):\n same, myvars = match(pattern, data, myvars)\n if not same:\n break\n return same, myvars",
"def variables(self):\n return np.array(list(self._match_result_dict.keys()))"
] | [
"0.6416395",
"0.6279023",
"0.61213815",
"0.5955996",
"0.5882055",
"0.58465207",
"0.58342683",
"0.5712536",
"0.56871027",
"0.56653214",
"0.54515976",
"0.5413046",
"0.54008603",
"0.5400208",
"0.53811234",
"0.5380468",
"0.53650934",
"0.53530097",
"0.53530097",
"0.5344481",
"0.53388053",
"0.5334782",
"0.53198516",
"0.53178436",
"0.5311006",
"0.52868265",
"0.526297",
"0.52604777",
"0.5243888",
"0.52276653"
] | 0.7268741 | 0 |
Match variable to VariableFactory using rank, name, and units. | def match_factory(variable, factories):
if not isinstance(factories, tuple):
factories = (factories,)
for factory in factories:
if (
variable.rank == factory.rank
and variable.name == factory.name
and variable.units == factory.units
):
return True
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def variable_factory(p, variable_name):\n if isinstance(variable_name, (Variable,)):\n return variable_name\n if not hasattr(p, \"variable_mapping\"):\n setattr(p, \"variable_mapping\", {})\n if variable_name not in p.variable_mapping:\n p.variable_mapping[variable_name] = Variable(variable_name)\n return p.variable_mapping[variable_name]",
"def variable(initializer=None, shape=None, dtype=None, name=None, **kwargs):\n return get_var(name, shape=shape, dtype=dtype, initializer=initializer, **kwargs)",
"def _find_or_create_variable(self, cname, vname, source):\n try:\n var = self.model.get_variable_by_name(cname, source.name)\n raise KeyError()\n except KeyError:\n # Have we created it already?\n try:\n var = self.model.get_variable_by_name(cname, vname)\n except KeyError:\n # Create it and add to model\n units = source.component.get_units_by_name(source.units)\n var = self.add_variable(cname, vname, units)\n return var",
"def Variable(name):\n placeholder_node = placeholder_op()\n placeholder_node.name = name\n return placeholder_node",
"def var(self, init_repr, name):\n randomness = self.get_ground_vector('!Var:{}-Var'.format(name))\n return self.varmodel(torch.cat([init_repr, randomness]))",
"def __getitem__(self, varName):\n # Static variables\n if varName in self.statVars:\n staticFV = StaticFileVariable(self, varName)\n return staticFV\n\n # Time variables\n elif varName in self.timeVars:\n timeVariables = TimeFileVariable(self, varName)\n return timeVariables",
"def __init__(self, variables, name='TPUReplicatedVariable'):\n if not isinstance(variables, abc.Sequence) or not variables or any(\n not isinstance(v, variables_lib.Variable) for v in variables):\n raise TypeError('Argument `variables` should be a non-empty list of '\n f'`variables.Variable`s. Received {variables}')\n\n if any(v.dtype != variables[0].dtype for v in variables):\n raise ValueError(\n 'All elements in argument `variables` must have the same dtype. '\n f'Received dtypes: {[v.dtype for v in variables]}')\n\n if any(v.shape != variables[0].shape for v in variables):\n raise ValueError(\n 'All elements in argument `variables` must have the same shape. '\n f'Received shapes: {[v.shape for v in variables]}')\n\n self._vars = variables\n self._name = name\n self._common_name = self._name.split(':')[0]\n self._cached_value = None",
"def var(*args, **kwargs):\n return Variable(*args, **kwargs)",
"def get_var(my_vars: dict, name: str):\n desired_var = my_vars.get(name)\n if desired_var is not None:\n return desired_var\n else:\n var_names = 'x, y, alpha, beta, zeta, psi'\n print('No variable with this name, current model accepts only:' + var_names)\n return None",
"def __init__(self, name, variable, variable_info):\n self._name = name\n self.var_id = variable\n self.var_period = variable_info[0]\n self.var_type = variable_info[1]\n self.var_detail = variable_info[2]\n self.var_units = variable_info[3]\n self.var_icon = variable_info[4]\n self.var_state = None",
"def assert_variable_name(parsed_file: ModelRunInfo, variable_name: str):\n assert parsed_file.variable_name == variable_name",
"def assign_variable(variable_name):\n # check variable name, call function 5 to check type\n if not in_alphabet(variable_name):\n print(f\"Syntax Error.\")\n return\n # prompt user to enter a value\n value = input(f\"Enter a value for {variable_name}: \")\n # check value type, call function 6 to check type\n if not is_digit(value):\n print(f\"Syntax Error.\")\n return\n # update dictionary\n lookUpTable[variable_name] = int(value)",
"def _name_to_variable(self, name: str) -> Parameter:\n return cast(Parameter, super()._name_to_variable(name))",
"def _variable(self, name, vars_set):\n if not re.match(r\"[_a-zA-Z][_a-zA-Z0-9]*$\", name):\n self._syntax_error(\"Not a valid name\", name)\n vars_set.add(name)",
"def _fn(dtype, shape, name, trainable, add_variable_fn):\n loc = add_variable_fn(\n name=name + \"_loc\",\n shape=shape,\n initializer=loc_initializer,\n regularizer=loc_regularizer,\n constraint=loc_constraint,\n dtype=dtype,\n trainable=trainable)\n if is_singular:\n return loc, None\n untransformed_scale = add_variable_fn(\n name=name + \"_untransformed_scale\",\n shape=shape,\n initializer=untransformed_scale_initializer,\n regularizer=untransformed_scale_regularizer,\n constraint=untransformed_scale_constraint,\n dtype=dtype,\n trainable=trainable)\n scale = (np.finfo(dtype.as_numpy_dtype).eps +\n nn_ops.softplus(untransformed_scale))\n return loc, scale",
"def model_variable(name, shape=None, dtype=dtypes.float32, initializer=None,\n regularizer=None, trainable=True, collections=None,\n caching_device=None, device=None, partitioner=None,\n custom_getter=None, use_resource=None):\n collections = list(collections or [])\n collections += [ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.MODEL_VARIABLES]\n var = variable(name, shape=shape, dtype=dtype,\n initializer=initializer, regularizer=regularizer,\n trainable=trainable, collections=collections,\n caching_device=caching_device, device=device,\n partitioner=partitioner, custom_getter=custom_getter,\n use_resource=use_resource)\n return var",
"def __get_variable_from_dictionary(dictionary, variable_name):\n if variable_name not in dictionary.keys():\n dictionary[variable_name] = Variable(variable_name, None)\n return dictionary.get(variable_name)",
"def get_var_units(self, var_name, var_val):\n var_val = self._var_units[var_name]\n return True",
"def add_variable(self, name, domain):\n name = str(name)\n vnode = VariableNode(name, domain)\n if name in self.vs:\n raise RuntimeError(\"Variable '{0}' already defined\".format(name))\n self.vs[name] = vnode\n return vnode",
"def getResRatioVarUnit( self, name ):\n\n if not self.resNames:\n self.updateAdb( )\n\n if name not in self.resNames:\n for k, v in self.resNames.items():\n if name == v:\n name = k\n break\n \n unit = self.getVarUnit( name, \"all\" )\n if not unit:\n raise AcuDbAssistError, \"Invalid variable name.\"\n return unit",
"def getVariable( self, name ):\n for uniform in self.uniforms:\n if uniform.name == name:\n return uniform \n return None",
"def getOriVarUnit( self, name ):\n\n if not self.oriVarNames:\n self.getOriVarNames( )\n\n if name not in self.oriVarNames:\n for k, v in self.oriVarNames.items():\n if name == v:\n name = k\n break\n \n unit = self.getVarUnit( name, \"ori\" )\n if not unit:\n raise AcuDbAssistError, \"Invalid variable name.\"\n return unit",
"def make_variable(self, name=None):\r\n return self.Variable(self, name=name)",
"def make_variable(self, name=None):\r\n return self.Variable(self, name=name)",
"def find_variable(self, standard_name, any_scope=True, loop_subst=False):\n if standard_name in self:\n var = self[standard_name]\n elif any_scope and (self._parent_dict is not None):\n var = self._parent_dict.find_variable(standard_name, any_scope)\n else:\n var = None\n # End if\n if (var is None) and loop_subst:\n var = self.find_loop_subst(standard_name, any_scope=any_scope)\n # End if\n return var",
"def _get_existing_variable(name):\n try:\n op = tf.get_default_graph().get_operation_by_name(name)\n except KeyError:\n return None\n\n # Among all cases (partitioned variable, resource variable, or regular one),\n # we assume that there's either a shape attribute to the op or to its output.\n try:\n shape = tf.TensorShape(op.get_attr('shape'))\n except ValueError:\n shape = op.outputs[0].shape\n\n with tf.variable_scope(tf.get_variable_scope(), reuse=True):\n try:\n # tf.Variable and tf.PartitionedVariable are not polymorphic, but\n # both support convert_to_tensor. The result is thus always a\n # tf.Tensor.\n return tf.convert_to_tensor(tf.get_variable(name, shape=shape))\n except ValueError as e:\n if 'Variable %s does not exist' % name in str(e):\n return None\n else:\n raise e # pass through any other exceptions.",
"def get_val(self, name, units=None, indices=None):\n val = self[name]\n\n if indices is not None:\n val = val[indices]\n\n if units is not None:\n base_units = self._get_units(name)\n simp_units = simplify_unit(units)\n\n if base_units is None:\n msg = \"Can't express variable '{}' with units of 'None' in units of '{}'.\"\n raise TypeError(msg.format(name, simp_units))\n\n try:\n scale, offset = unit_conversion(base_units, simp_units)\n except TypeError:\n msg = \"Can't express variable '{}' with units of '{}' in units of '{}'.\"\n raise TypeError(msg.format(name, base_units, simp_units))\n\n val = (val + offset) * scale\n\n return val",
"def make_variable(self, name = None):\r\n return self.Variable(self, name = name)",
"def find_dimension_subst(self, standard_name, any_scope=True, context=None):\n loop_var = standard_name in VarDictionary.__ccpp_dim_subst__\n logger_str = None\n if loop_var:\n # Let us see if we can replace the variable\n dim_name = VarDictionary.__ccpp_dim_subst__[standard_name]\n my_var = self.find_variable(dim_name, any_scope=any_scope)\n if my_var is None:\n raise CCPPError(\"Dimension variable, {} not found{}\".format(dim_name, context_string(context)))\n # End if\n else:\n my_var = None\n # End if\n return my_var",
"def get_variable_object(self, name = None):\n if name is not None and name != \"\":\n if self.fmu is not None:\n try:\n return self.fmu.get_model_variables()[name]\n except Exception:\n logger.error(\"The variable or parameter: {0} is not available in the list: {1}\".format(name, self.fmu.get_model_variables().keys()))\n return None\n else:\n logger.error(\"The FMU model has not yet been set. Impossible return the variable {0}\".format(name))\n return None\n else:\n logger.error(\"Impossible to look for the name because it is None or empty\")\n return None"
] | [
"0.6039259",
"0.5748577",
"0.5715186",
"0.56820124",
"0.56072986",
"0.5535535",
"0.55185264",
"0.53700167",
"0.5367385",
"0.536188",
"0.53586817",
"0.53523105",
"0.5274955",
"0.5252807",
"0.52485764",
"0.5248227",
"0.5243896",
"0.51856565",
"0.5179017",
"0.517816",
"0.51743823",
"0.51737624",
"0.5152167",
"0.5152167",
"0.51487684",
"0.5146344",
"0.51460785",
"0.5141528",
"0.51329786",
"0.5119005"
] | 0.6425409 | 0 |
Get the lags for a given VariableFactory. | def get_variable_lags(var_factory):
if var_factory in shifted_variables:
return lags
return (0,) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_shifted_variables(var_factory):\n shifted = []\n for lag in get_variable_lags(var_factory):\n shifted.append(var_factory[lag])\n return tuple(shifted)",
"def lag(self):\n self._assert_counted_at_lag()\n return self._lag",
"def create_predictors(y): # pragma: no cover\n lags = y[-1:-4:-1]\n\n return lags",
"def lagged_features(df, lags):\n df_list = []\n for lag in lags:\n df_shifted = df.shift(lag)\n df_shifted.columns = [x + \"_lag\" + str(lag) for x in df_shifted.columns]\n df_list.append(df_shifted)\n fea = pd.concat(df_list, axis=1)\n return fea",
"def LDFlags(self):\n return self._g_linkflags",
"def user_iflags_prev(*args):\n return _ida_hexrays.user_iflags_prev(*args)",
"def get_user_iflags(self, *args):\n return _ida_hexrays.cfunc_t_get_user_iflags(self, *args)",
"def _add_lagged_features(self, X: pd.DataFrame, lags: list) -> pd.DataFrame:\n for l in lags:\n X[f'sales_lag_{l + self.shift_days}'] = (X[['id', 'sales', 'd']]\n .groupby('id')['sales']\n .transform(lambda x: x.shift(l + self.shift_days))\n .fillna(0))\n return X",
"def _get_lags_dict(self):\n lags_dict = {}\n for fcst_date in self.dates:\n day_of_year = self.calculate_day_of_year(fcst_date)\n for init_date in self.init_dates:\n lag = day_of_year - self.calculate_day_of_year(init_date)\n days_of_year = lags_dict.get(lag)\n if days_of_year:\n days_of_year.append(day_of_year)\n else:\n lags_dict[lag] = [day_of_year]\n \n return lags_dict",
"def vars(self):\n return [Var(i,self.dims[i]) for i in range(self.nvar)] # TODO: use stored state info (=1 sometimes)",
"def get_user_iflags(self, *args):\n return _ida_hexrays.cfuncptr_t_get_user_iflags(self, *args)",
"def get_lagged_list(binned_series, lags):\n lagged_list = []\n for s in range(lags):\n lagged_list.append(binned_series.shift(s))\n\n lagged_frame = pd.concat(lagged_list, 1).dropna()\n\n train_x = lagged_frame.iloc[:, 1:]\n train_y = lagged_frame.iloc[:, 0]\n return train_x, train_y",
"def get_idx_lag(idx_start, ar_iteration, forecast_cycle, input_k):\n return idx_start + (forecast_cycle * ar_iteration) + input_k",
"def get_Delta_weigths():\n ls = []\n for i_lay in range(1, len(layers)):\n ls.append(layers[i_lay][\"Delta_w\"])\n return ls",
"def make_multi_lagger(lags, groupby_kwargs=None):\n laggers = [SingleLagger(l, groupby_kwargs=groupby_kwargs) for l in lags]\n feature_union = FeatureUnion([\n (repr(lagger), lagger) for lagger in laggers\n ])\n return feature_union",
"def user_iflags_new(*args):\n return _ida_hexrays.user_iflags_new(*args)",
"def get_variables(self) -> np.array:\n pass",
"def get_variables(self):\n return [self.g_t, self.m_t]",
"def features(self, state):\n jdecays = state[\"decays\"]\n cor_mean = state[\"means\"] / (1 - jdecays**(state[\"iteration\"]))\n # longest running decay\n approx_max = cor_mean[1:]\n cor_mean = cor_mean[0:-1]\n running_min = state[\"running_min\"][0:-1]\n\n den = jnp.maximum(1e-8, (approx_max - running_min))\n pre_center = (cor_mean - running_min) / den\n feature1 = (pre_center - 1.0)\n feature1 = jnp.clip(feature1, -1, 1)\n # first couple features are bad.\n return jnp.where(state[\"iteration\"] <= 2, feature1 * 0, feature1)",
"def get_maxlag(self, showall=False):\n params = {\"action\": \"query\", \"meta\": \"siteinfo\", \"siprop\": \"dbrepllag\"}\n if showall:\n params[\"sishowalldb\"] = 1\n with self._api_lock:\n result = self._api_query(params, ignore_maxlag=True)\n if showall:\n return [server[\"lag\"] for server in result[\"query\"][\"dbrepllag\"]]\n return result[\"query\"][\"dbrepllag\"][0][\"lag\"]",
"def feature_func(ims):\n # Set eval mode\n # Force all BN layers to use global mean and variance, also disable\n # dropout.\n utils.may_set_mode(self.modules_optims, 'eval')\n ims = TVT(Variable(torch.from_numpy(ims).float()))\n feats, _ = self.googlenet(ims)\n feats = feats.data.cpu().numpy()\n return feats",
"def _get_dependent_variables(input_ops, output_ops):\n\n # avoids the edge-case when input_ops == output_ops.\n output_ops = nest.map_structure(gen_array_ops.identity, output_ops)\n inbetween_ops = op_selector.get_backward_walk_ops(\n seed_ops=output_ops,\n stop_at_ts=input_ops,\n inclusive=False,\n only_differentiable=True)\n var_ops = (op for op in inbetween_ops if op.type in VAR_OP_TYPES)\n var_names = (op.name for op in var_ops)\n tf_vars = (get_variable_by_name(var_name) for var_name in var_names)\n tf_vars = [v for v in tf_vars if v is not None]\n return tf_vars",
"def _add_lags(self, X, y=None, extrapolate=1, update_features_df=False):\n\n # Add lag target to the features if required\n # This will create an additional feature for each sample i.e. the previous value of y \n if y is not None and self.model.lag_target:\n X[\"previous_y\"] = y.shift(1)\n \n if update_features_df:\n # Check the target's data type\n dt = 'float' if is_numeric_dtype(y.iloc[:,0]) else 'str'\n # Set the preprocessing feature strategy for the lag targets\n if self.model.estimator_type == 'classifier':\n fs = 'one hot encoding' \n elif self.model.scale_lag_target and not self.model.scale_target:\n fs = 'scaling'\n else:\n fs = 'none'\n self.model.scale_lag_target\n # Update feature definitions for the model\n self.model.features_df.loc['previous_y'] = [self.model.name, 'previous_y', 'feature', dt, fs, '']\n\n if self.model.lags:\n # Add the lag observations\n X = utils.add_lags(X, lag=self.model.lags, extrapolate=extrapolate, dropna=True, suffix=\"t\")\n \n if update_features_df:\n # Duplicate the feature definitions by the number of lags\n self.model.features_df = pd.concat([self.model.features_df] * (self.model.lags+extrapolate))\n # Set the new feature names as the index of the feature definitions data frame\n self.model.features_df['name'] = X.columns\n self.model.features_df = self.model.features_df.set_index('name', drop=True)\n\n if self.model.debug:\n self._print_log(11, data=X)\n\n return X",
"def get_lagged_subsequences(\n F,\n sequence: Tensor,\n sequence_length: int,\n indices: List[int],\n subsequences_length: int = 1,\n ) -> Tensor:\n # we must have: sequence_length - lag_index - subsequences_length >= 0\n # for all lag_index, hence the following assert\n assert max(indices) + subsequences_length <= sequence_length, (\n f\"lags cannot go further than history length, \"\n f\"found lag {max(indices)} while history length is only \"\n f\"{sequence_length}\"\n )\n assert all(lag_index >= 0 for lag_index in indices)\n\n lagged_values = []\n for lag_index in indices:\n begin_index = -lag_index - subsequences_length\n end_index = -lag_index if lag_index > 0 else None\n lagged_values.append(\n F.slice_axis(\n sequence, axis=1, begin=begin_index, end=end_index\n )\n )\n\n return F.stack(*lagged_values, axis=-1)",
"def fdm(self,trc,fd_step,lags,noise_scalar):\n ress=[]\n trc_out=trc/np.amax(np.abs(trc))\n noise=np.random.normal(0,1,len(trc_out))*(np.std(trc_out)/noise_scalar)\n trc_out=trc_out+noise\n for i,lag in enumerate(lags):\n trc_cp=trc_out.copy()\n t=len(trc)-1\n trc_cp[0:fd_step]=0\n while t>fd_step-1:\n trc_win=trc_out[t-fd_step:t+1]\n t_win=fd_step-1\n res=0\n while t_win>lag-1:\n res+=np.square(trc_win[t_win-lag]-trc_win[t_win])\n t_win-=1\n res=np.log10(1/(fd_step-lag)*res)\n trc_cp[t]=res\n t-=1\n if len(ress)==0:\n ress=np.reshape(trc_cp,(len(trc_cp),1))\n else:\n ress=np.concatenate((ress,np.reshape(trc_cp,(len(trc_cp),1))),axis=1)\n for i,j in enumerate(ress):\n slope = linregress(lags,ress[i,:])[0]\n trc_out[i]=slope\n \n return trc_out",
"def get_lagged_subsequences_inference(\n sequence: torch.Tensor,\n subsequences_length: int,\n lags_seq: List[int]) -> torch.Tensor:\n sequence_length = sequence.shape[1]\n batch_size = sequence.shape[0]\n lagged_values = []\n for lag_index in lags_seq:\n begin_index = -lag_index - subsequences_length\n end_index = -lag_index if lag_index > 0 else None\n if end_index is not None and end_index < -sequence_length:\n lagged_values.append(torch.zeros([batch_size, subsequences_length, *sequence.shape[2:]]))\n continue\n if begin_index < -sequence_length:\n if end_index is not None:\n pad_shape = [batch_size, subsequences_length - sequence_length - end_index, *sequence.shape[2:]]\n lagged_values.append(torch.cat([torch.zeros(pad_shape), sequence[:, :end_index, ...]], dim=1))\n else:\n pad_shape = [batch_size, subsequences_length - sequence_length, *sequence.shape[2:]]\n lagged_values.append(torch.cat([torch.zeros(pad_shape), sequence], dim=1))\n continue\n else:\n lagged_values.append(sequence[:, begin_index:end_index, ...])\n\n lagged_seq = torch.stack(lagged_values, -1).transpose(-1, -2).reshape(batch_size, subsequences_length, -1)\n return lagged_seq",
"def polynomial_variables(self):\n return self._polynomial_variables",
"def get_dep_funcs(start, var_name):\n func_nodes = []\n \n # Search for all uses of this variable\n for _,f,_ in backward_var_iter(start):\n if var_name in [v.get_variable().name for v in f.inputs]:\n func_nodes.append(f)\n \n if not len(func_nodes):\n raise ValueError('Could not find variable with name %s in graph'%var_name)\n \n return func_nodes",
"def get_dataset_lags(data_wide: pd.DataFrame, target_col: str, win_len: int) -> pd.DataFrame:\n # get range of columns from the target backwards\n cols = get_columns_range(data_wide, target_col, win_len)\n return data_wide.loc[:, cols]",
"def user_iflags_next(*args):\n return _ida_hexrays.user_iflags_next(*args)"
] | [
"0.67575127",
"0.5803747",
"0.5776367",
"0.54579306",
"0.54395133",
"0.5426974",
"0.53509307",
"0.53438425",
"0.5341233",
"0.52827334",
"0.5247959",
"0.5246997",
"0.5176305",
"0.50930464",
"0.5059406",
"0.50513554",
"0.5010745",
"0.4938749",
"0.4918975",
"0.49158582",
"0.48737428",
"0.48645195",
"0.4826547",
"0.47907507",
"0.4768742",
"0.47254503",
"0.47254273",
"0.4714872",
"0.47064048",
"0.4689553"
] | 0.87622905 | 0 |
Get all possible shifted variables given a VariableFactory. | def get_shifted_variables(var_factory):
shifted = []
for lag in get_variable_lags(var_factory):
shifted.append(var_factory[lag])
return tuple(shifted) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_variable_lags(var_factory):\n if var_factory in shifted_variables:\n return lags\n return (0,)",
"def vars(self):\n return [Var(i,self.dims[i]) for i in range(self.nvar)] # TODO: use stored state info (=1 sometimes)",
"def get_all_variables(self):\n out = []\n for i in self.items:\n out += i.get_all_variables()\n return out",
"def get_all_variables(self):\n out = []\n for i in self.items:\n out += i.get_all_variables()\n return out",
"def get_all_variables(self):\n out = []\n for i in self.items:\n out += i.get_all_variables()\n return out",
"def calc_variables ( ):\n\n # In this example we simulate using the shifted-force potential only\n # The values of < p_sf >, < e_sf > and density should be consistent (for this potential)\n # There are no long-range or delta corrections\n\n from averages_module import VariableType\n \n # Preliminary calculations\n vol = box**3 # Volume\n rho = n / vol # Density\n\n # Variables of interest, of class VariableType, containing three attributes:\n # .val: the instantaneous value\n # .nam: used for headings\n # .method: indicating averaging method\n # If not set below, .method adopts its default value of avg\n # The .nam and some other attributes need only be defined once, at the start of the program,\n # but for clarity and readability we assign all the values together below\n\n # Move acceptance ratio\n m_r = VariableType ( nam = 'Move ratio', val = m_ratio, instant = False )\n\n # Internal energy per molecule (shifted-force potential)\n # Ideal gas contribution (assuming nonlinear molecules) plus total PE divided by N\n e_sf = VariableType ( nam = 'E/N shifted force', val = 3.0*temperature + total.pot/n )\n\n # Pressure (shifted-force potential)\n # Ideal gas contribution plus total virial divided by V\n p_sf = VariableType ( nam = 'P shifted force', val = rho*temperature + total.vir/vol )\n\n # Collect together into a list for averaging\n return [ m_r, e_sf, p_sf ]",
"def variables(self):\n for state in self.states:\n yield self.assert_state(state)\n yield self.deassert_state(state)",
"def get_all_variables(self):\n return [self.item]",
"def get_all_variables(self):\n raise NotImplementedError()",
"def variables(self) -> AbstractSet[Variable]:\n return self._variables",
"def get_all_variables(self):\n return []",
"def get_variables(self):\n\n self._enforce_coupling()\n\n dv = []\n for scenario in self.scenarios:\n if scenario.group_master:\n dv.extend(scenario.active_variables())\n else:\n dv.extend(scenario.uncoupled_variables())\n\n for body in self.bodies:\n if body.group_master:\n dv.extend(body.active_variables())\n else:\n dv.extend(body.uncoupled_variables())\n\n return dv",
"def get_transform_vars(self):\n return [v for v in (self.rotation_vars + self.translation_vars)\n if isinstance(v, tf.Variable)]",
"def _get_dependent_variables(input_ops, output_ops):\n\n # avoids the edge-case when input_ops == output_ops.\n output_ops = nest.map_structure(gen_array_ops.identity, output_ops)\n inbetween_ops = op_selector.get_backward_walk_ops(\n seed_ops=output_ops,\n stop_at_ts=input_ops,\n inclusive=False,\n only_differentiable=True)\n var_ops = (op for op in inbetween_ops if op.type in VAR_OP_TYPES)\n var_names = (op.name for op in var_ops)\n tf_vars = (get_variable_by_name(var_name) for var_name in var_names)\n tf_vars = [v for v in tf_vars if v is not None]\n return tf_vars",
"def get_variables(self):\n return [self.variables[key] for key in sorted(self.variables)]",
"def _schedule_per_var(self):\n # sg = self.mg.region_graph.copy() # no mutation\n sg = self.mg.region_graph\n var_elim_schedule = []\n for v in self.elim_order:\n nodes_with_v = [node for node in sg.nodes_iter() if v in sg.node[node]['sc']]\n var_elim_schedule.append(nodes_with_v)\n return var_elim_schedule",
"def get_variables(self):\n return [self.g_t, self.m_t]",
"def variables(self):\n for name in self._nodes:\n if isinstance(self._nodes[name], RandomVariable):\n yield name",
"def get_variables(self) -> typing.List:\n parts = (self.neural_net.encoder, self.neural_net.predictor, self.neural_net.dynamics)\n return [v for v_list in map(lambda n: n.weights, parts) for v in v_list]",
"def get_all_variables(self):\n return self.start.get_all_variables() + self.end.get_all_variables()",
"def getMaskVariables(self, product):\r\n mask_variable_names = self.getMaskVariableNames(product)\r\n mask_variables = [self.createMaskVariable(product, n) for n in mask_variable_names]\r\n mask_variables = [self.editMaskVariable(product, v) for v in mask_variables]\r\n\r\n return mask_variables",
"def all_variables(formula):\n return collect_unique_nodes(formula, lambda x: isinstance(x, Variable))",
"def prepare_for_sat(self):\n\n for state_group in self.state_groups:\n new_variables = set()\n for variable in state_group.variables():\n new_variables.add(variable.variable_name())\n\n self.add_variable_names(new_variables)\n\n for variable in new_variables:\n assert variable not in self.variable_to_state_group\n self.variable_to_state_group[variable] = state_group\n\n for clause in state_group.clauses():\n self.add_clause(clause)\n\n self.variable_names = sorted(self.variable_names)\n self.variable_name_to_index = {}\n\n # Assign SAT variables indicies to variable names\n for idx, variable_name in enumerate(self.variable_names):\n assert variable_name not in self.variable_name_to_index\n self.variable_name_to_index[variable_name] = idx + 1\n\n # Convert abstract clauses using variable names to SAT clauses\n concrete_clauses = set()\n for abstract_clause in self.abstract_clauses:\n for clause in abstract_clause.clauses():\n concrete_clause = []\n for part in clause:\n concrete_clause.append(part.variable(self))\n\n assert len(set(concrete_clause)) == len(concrete_clause)\n concrete_clauses.add(tuple(sorted(concrete_clause)))\n\n return sorted(concrete_clauses)",
"def _extract_tfparams(\n params: Iterable[zfit.Parameter] | zfit.Parameter,\n) -> List[tf.Variable]:\n return params\n # TODO(WrappedVariable): this is needed if we want to use wrapped Variables\n # import zfit\n # params = convert_to_container(params)\n # tf_params = []\n # for param in params:\n # if isinstance(param, tf.Variable):\n #\n # # TODO: reactivate if WrappedVariables are used\n # # if isinstance(param, zfit.Parameter):\n # # raise ValueError(\"The parameter cannot be a tf.Variable and a zfit.Parameter at the same time.\")\n # variable = param\n # else:\n # if not isinstance(param, zfit.Parameter):\n # raise ValueError(\"The parameter has to be either a tf.Variable or a zfit.Parameter.\")\n # variable = param.variable\n # tf_params.append(variable)\n # return tf_params",
"def get_variable_values(self, vars):\n raise NotImplementedError()",
"def get_vars(self):\n return [self.mu, self.var]",
"def get_variables(self) -> np.array:\n return np.array([self.a, self.b, self.c])",
"def fetch_variables(self):\r\n fgraph = self.fgraph\r\n self.inputs = fgraph.inputs\r\n self.outputs = fgraph.outputs\r\n\r\n # list(fgraph.variables)\r\n # We need to include the not used inputs in our variables,\r\n # otherwise we can't pass them to the module.\r\n self.variables = [var for var in self.inputs if not len(var.clients)]\r\n self.variables += graph.variables(self.inputs, self.outputs)\r\n\r\n # The orphans field is listified to ensure a consistent order.\r\n #list(fgraph.orphans.difference(self.outputs))\r\n self.orphans = list(r for r in self.variables\r\n if isinstance(r, graph.Constant) and\r\n r not in self.inputs)\r\n self.temps = list(set(self.variables).difference(\r\n self.inputs).difference(self.outputs).difference(self.orphans))\r\n self.consts = []\r\n self.node_order = self.schedule(fgraph)",
"def get_variables(self) -> np.array:\n pass",
"def _create_variables(self):\n\n \n with tf.name_scope(\"variable\"):\n if self.reg_type == 'L2':\n regularizer = tf.contrib.layers.l2_regularizer(scale=self.reg_scale)\n else:\n regularizer = tf.contrib.layers.l1_regularizer(scale=self.reg_scale)\n \n self.dim_lst = [self.dim_inputs] + self.dim_hidden_lst + [self.number_structures]\n print(self.dim_lst)\n \n self.W_lst = []\n self.b_lst = []\n for i in range(len(self.dim_lst)-1):\n self.W_lst.append(tf.get_variable(\n \"W{}\".format(i+1),\n shape=[self.dim_lst[i], self.dim_lst[i+1]],\n initializer=tf.contrib.layers.xavier_initializer(),\n regularizer=regularizer)\n )\n # not output layer, has bias term\n if i < len(self.dim_lst) - 2:\n self.b_lst.append(tf.get_variable(\"b{}\".format(i+1), shape=[self.dim_lst[i+1]]))"
] | [
"0.69359124",
"0.5861063",
"0.5553798",
"0.5553798",
"0.5553798",
"0.5403311",
"0.53172415",
"0.5283232",
"0.5274266",
"0.5179303",
"0.5173764",
"0.51522267",
"0.51142913",
"0.50858605",
"0.5057335",
"0.5056801",
"0.5049597",
"0.50440574",
"0.49971545",
"0.49796465",
"0.49775988",
"0.49619332",
"0.49505776",
"0.49375126",
"0.49312696",
"0.49252418",
"0.49212652",
"0.49177998",
"0.49002266",
"0.489742"
] | 0.8306546 | 0 |
Returns the path to our major ldso symlink. (Which allows us to change which ldso we are actively using without patching a bunch of binaries) | def ld_linux_path(root):
return os.path.join(root, 'lib', 'ld-linux-xpkg.so') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _find_ld_version():\n if sys.platform == 'darwin':\n return _find_exe_version('ld -v', _MAC_OS_X_LD_VERSION)\n else:\n return _find_exe_version('ld -v')",
"def get_linked_libpython():\n if is_windows():\n return\n libdl = ctypes.CDLL(ctypes.util.find_library(\"dl\"))\n libdl.dladdr.argtypes = [ctypes.c_void_p, ctypes.POINTER(_Dl_info)]\n libdl.dladdr.restype = ctypes.c_int\n\n dlinfo = _Dl_info()\n retcode = libdl.dladdr(\n ctypes.cast(ctypes.pythonapi.Py_GetVersion, ctypes.c_void_p),\n ctypes.pointer(dlinfo))\n if retcode == 0: # means error\n return\n path = os.path.realpath(dlinfo.dli_fname.decode())\n if path == os.path.realpath(sys.executable):\n return\n return path",
"def fixLDPath( root, ldpath, directory ):\n\n if os.path.exists( directory ):\n shutil.rmtree( directory )\n\n start = os.getcwd()\n os.mkdir( directory )\n os.chdir( directory )\n uniqueLD = uniquePath( ldpath )\n\n if DEBUG:\n print 'Unique LD LIBRARY PATH is:'\n print uniqueLD\n sys.stdout.flush()\n\n ldlist = string.split( uniqueLD, ':' )\n if DEBUG:\n print ''\n print 'LD List is:'\n print ldlist\n print ''\n sys.stdout.flush()\n\n for path in ldlist:\n if os.path.exists( path ):\n\n if DEBUG:\n print 'Searching for shared libraries in:'\n print path\n print '-----------------------------------------------'\n res = shellCall( 0, 'ls ' + path + '/*.so*' )\n if res['OK']:\n print res['Value']\n else:\n print res\n print '-----------------------------------------------'\n\n output = shellCall( 0, 'ls ' + path + '/*.so*' )\n #must be tidied for Windows (same below)\n\n if DEBUG:\n if not output['OK']:\n print '**************************'\n print 'Warning, problem with ls:'\n print output\n print '**************************'\n\n if not output['Value'][0]:\n ldlibs = output['Value'][1].split( '\\n' )\n for lib in ldlibs:\n if os.path.exists( lib ):\n filename = os.path.basename( lib )\n output = shellCall( 0, 'ln -s ' + str( lib ) + ' ' + str( filename ) )\n #N.B. for Windows this should be a copy...\n if DEBUG:\n if not output['OK']:\n print '********************************'\n print 'Warning, problem creating link:'\n print 'File: ', filename\n print 'Path: ', lib\n print output\n print '********************************'\n\n if DEBUG:\n print 'Searching for rootmap file in:'\n print path\n print '-----------------------------------------------'\n res = shellCall( 0, 'ls ' + path + '/*rootmap*' )\n if res['OK']:\n print res['Value']\n else:\n print res\n print '-----------------------------------------------'\n\n output = shellCall( 0, 'ls ' + path + '/*rootmap*' )\n\n if DEBUG:\n if not output['OK']:\n print '**************************'\n print 'Warning, problem with rootmap:'\n print output\n print '**************************'\n\n if not output['Value'][0]:\n ldlibs = output['Value'][1].split( '\\n' )\n for lib in ldlibs:\n if os.path.exists( lib ):\n if re.search( 'RELAX', lib ) is not None:\n filename = os.path.basename( lib )\n output = shellCall( 0, 'ln -s ' + str( lib ) + ' ' + str( filename ) )\n if DEBUG:\n if not output['OK']:\n print '********************************'\n print 'Warning, problem creating link:'\n print 'File: ', filename\n print 'Path: ', lib\n print output\n print '********************************'\n\n os.chdir( start )\n sys.stdout.flush()",
"def realPath(self):\n \n return (self.useLink and [self.linkPath] or [self.installPath])[0]",
"def get_dlsym_offset():\n import ctypes\n libdl = ctypes.PyDLL('libdl.so')\n dlopen = ctypes.cast(libdl.dlopen, ctypes.c_void_p).value\n dlsym = ctypes.cast(libdl.dlsym, ctypes.c_void_p).value\n return dlsym - dlopen",
"def get_reference_binary():\n return \"./Binary/linux-x64/astcenc\"",
"def new_realpath(name):\n if name.startswith('link-to-ham'):\n return name[len('link-to-'):]\n else:\n return name",
"def load_linux_so():\n shared_name = get_project_root() / \"build/libastyle.so\"\n\n shared = str(pl.Path(shared_name).absolute())\n # file_ = {f for f in pl.Path().iterdir() if f.name == shared_name}\n\n try:\n libc = cdll.LoadLibrary(shared)\n except OSError as err:\n # \"cannot open shared object file: No such file or directory\"\n print(err)\n raise FileNotFoundError(\"Cannot find \" + shared)\n return libc",
"def _so_symlinks(path):\n if not os.path.isdir(path):\n assert AssertionError(\"Failed to make so symlinks: path '%s' is not a directory.\", path)\n for dirent in os.listdir(path):\n fname = os.path.join(path, dirent)\n if os.path.isdir(fname) or os.path.islink(fname):\n continue\n m = re.match(r'(.+\\.so)\\.(\\d+)\\.(\\d+)\\.(\\d+)$', fname)\n if m:\n so,x,y,z = m.groups()\n symlink(fname, \"%s.%s.%s\" % (so, x, y))\n symlink(fname, \"%s.%s\" % (so, x))\n symlink(fname, so)",
"def get_lib_extension():\r\n if sys.platform == 'win32':\r\n return 'pyd'\r\n else:\r\n return 'so'",
"def get_plato_path():\n\treturn \"/tsi/\"",
"def find_lld(required=True):\n lld_list = []\n major = tvm.target.codegen.llvm_version_major(allow_none=True)\n if major is not None:\n lld_list += [f\"ld.lld-{major}.0\"]\n lld_list += [f\"ld.lld-{major}\"]\n lld_list += [\"ld.lld\"]\n valid_list = [utils.which(x) for x in lld_list]\n valid_list = [x for x in valid_list if x]\n if not valid_list and required:\n raise RuntimeError(\"cannot find ld.lld, candidates are: \" + str(lld_list))\n return valid_list",
"def symlink_target(pth):\n\n if os.path.islink(pth):\n return os.readlink(pth)\n return pth",
"def _GetLibraryPath(self, platform, backupPlatform=''):\n if platform == Environment.GetPlatform() or \\\n (backupPlatform and backupPlatform == Environment.GetPlatform()):\n return os.path.split(self._libraryPath)[1]\n return ''",
"def find_tool():\n return shutil.which('readelf')",
"def getfullnameof(mod, xtrapath=None):\n pywin32_paths = []\n if is_win:\n pywin32_paths = [os.path.join(get_python_lib(), 'pywin32_system32')]\n if is_venv:\n pywin32_paths.append(\n os.path.join(base_prefix, 'Lib', 'site-packages',\n 'pywin32_system32')\n )\n\n epath = (sys.path + # Search sys.path first!\n pywin32_paths +\n winutils.get_system_path() +\n compat.getenv('PATH', '').split(os.pathsep))\n if xtrapath is not None:\n if type(xtrapath) == type(''):\n epath.insert(0, xtrapath)\n else:\n epath = xtrapath + epath\n for p in epath:\n npth = os.path.join(p, mod)\n if os.path.exists(npth) and matchDLLArch(npth):\n return npth\n return ''",
"def _blink_base(self):\n module_path = self._filesystem.path_to_module(self.__module__)\n tools_index = module_path.rfind('tools')\n assert tools_index != -1, 'could not find location of this checkout from %s' % module_path\n return self._filesystem.normpath(module_path[0:tools_index - 1])",
"def get_ext_filename(self, fullname):\n ext_path = fullname.split('.')\n ext_suffix = '.so'\n return os.path.join(*ext_path) + ext_suffix",
"def fix_executable(fname):\n default_encoding = sys.getdefaultencoding()\n try:\n ostype = subprocess.check_output(\n ['uname', '-s']).strip().decode(default_encoding)\n except subprocess.CalledProcessError:\n return\n except OSError as reason:\n if getattr(reason, 'winerror', None) is not None:\n return\n raise reason\n\n if ostype != \"Linux\":\n return\n\n if not os.path.exists(\"/etc/NIXOS\"):\n return\n if os.path.exists(\"/lib\"):\n return\n\n # At this point we're pretty sure the user is running NixOS\n nix_os_msg = \"info: you seem to be running NixOS. Attempting to patch\"\n print(nix_os_msg, fname)\n\n try:\n interpreter = subprocess.check_output(\n [\"patchelf\", \"--print-interpreter\", fname])\n interpreter = interpreter.strip().decode(default_encoding)\n except subprocess.CalledProcessError as reason:\n print(\"warning: failed to call patchelf:\", reason)\n return\n\n loader = interpreter.split(\"/\")[-1]\n\n try:\n ldd_output = subprocess.check_output(\n ['ldd', '/run/current-system/sw/bin/sh'])\n ldd_output = ldd_output.strip().decode(default_encoding)\n except subprocess.CalledProcessError as reason:\n print(\"warning: unable to call ldd:\", reason)\n return\n\n for line in ldd_output.splitlines():\n libname = line.split()[0]\n if libname.endswith(loader):\n loader_path = libname[:len(libname) - len(loader)]\n break\n else:\n print(\"warning: unable to find the path to the dynamic linker\")\n return\n\n correct_interpreter = loader_path + loader\n\n try:\n subprocess.check_output(\n [\"patchelf\", \"--set-interpreter\", correct_interpreter, fname])\n except subprocess.CalledProcessError as reason:\n print(\"warning: failed to call patchelf:\", reason)\n return",
"def get_version():\n version_file = Path(__file__).resolve().parent / \"clinker\" / \"__init__.py\"\n version_match = re.search(\n r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\", version_file.read_text(), re.M\n )\n if version_match:\n return version_match.group(1)\n raise RuntimeError(\"Failed to find version string\")",
"def get_library_name(self, op):\n if op.startswith(\"/\"):\n return op\n # Check if the library is specified verbatim. If yes, no need to expand.\n if re.match(r'lib.+\\.so(\\..*)?', op):\n return op\n libname = \"lib%s.so\" % (op)\n # Shared object may be linker script, if so, it will tell actual shared object.\n for ii in self.__library_directories:\n current_libname = locate(ii, libname)\n if current_libname and file_is_ascii_text(current_libname):\n fd = open(current_libname, \"r\")\n match = re.search(r'GROUP\\s*\\(\\s*(\\S+)\\s+', fd.read(), re.MULTILINE)\n fd.close()\n if match:\n ret = os.path.basename(match.group(1))\n if is_verbose():\n print(\"Using shared library '%s' instead of '%s'.\" % (ret, libname))\n return ret\n return libname",
"def lightLinkPath(self):\n\t\treturn fl.File( self._path + '/lights.data' )",
"def _make_lib_file_symbolic_links(self):\n so_file_dict = {\n 'rpmio': {\n 'sym_src_dir': self.rpm.lib_dir,\n 'sym_dst_dir': 'rpmio/.libs',\n 'require': True,\n },\n 'rpm': {\n 'sym_src_dir': self.rpm.lib_dir,\n 'sym_dst_dir': 'lib/.libs',\n 'require': True,\n },\n 'rpmbuild': {\n 'sym_src_dir': self.rpm.lib_dir,\n 'sym_dst_dir': 'build/.libs',\n 'require': True,\n },\n 'rpmsign': {\n 'sym_src_dir': self.rpm.lib_dir,\n 'sym_dst_dir': 'sign/.libs',\n },\n }\n\n self._update_sym_src_dirs_conditionally(so_file_dict)\n\n for name in so_file_dict:\n so_dict = so_file_dict[name]\n pattern = 'lib{0}.so*'.format(name)\n so_files = Cmd.find(so_dict['sym_src_dir'], pattern)\n if not so_files:\n is_required = so_dict.get('require', False)\n if not is_required:\n message_format = (\n \"Skip creating symbolic link of \"\n \"not existing so file '{0}'\"\n )\n Log.debug(message_format.format(name))\n continue\n\n message = 'so file pattern {0} not found at {1}'.format(\n pattern, so_dict['sym_src_dir']\n )\n raise InstallError(message)\n sym_dst_dir = os.path.abspath('../{0}'.format(\n so_dict['sym_dst_dir']))\n if not os.path.isdir(sym_dst_dir):\n Cmd.mkdir_p(sym_dst_dir)\n\n cmd = 'ln -sf {0} {1}/lib{2}.so'.format(so_files[0],\n sym_dst_dir,\n name)\n Cmd.sh_e(cmd)",
"def _spdr_engine_location():\n return os.path.realpath(__file__).rpartition('/')[0]",
"def get_sharedlib_suffix():\n suffix = sysconfig.get_config_var(\"SHLIB_SUFFIX\")\n if suffix is None:\n if is_windows():\n suffix = \".dll\"\n else:\n suffix = \".so\"\n if is_apple():\n # sysconfig.get_config_var(\"SHLIB_SUFFIX\") can be \".so\" in macOS.\n # Let's not use the value from sysconfig.\n suffix = \".dylib\"\n return suffix",
"def _get_lsp_config_path_select_primary(self):\n return self.__lsp_config_path_select_primary",
"def symlink():\n releases()\n env.current_path = '/root/your_project/current'\n run('rm %(current_path)s' % env)\n run('ln -s %(current_release)s %(current_path)s' % env)",
"def link(self):\n \n self.__enter__()\n return self.stable_path",
"def get_httpstls_mount():\n if 'dex_https_tlsCert' in DEFINES:\n return os.path.dirname(DEFINES['dex_https_tlsCert'])\n # The default matches oic-auth-apps flucd manifest defaults\n return DEFAULT_HTTPSTLS_MOUNT",
"def find_real_dso_path(dso_path_in_record_file, binary_cache_path):\n if dso_path_in_record_file[0] != '/' or dso_path_in_record_file == '//anon':\n return None\n if binary_cache_path:\n tmp_path = os.path.join(binary_cache_path, dso_path_in_record_file[1:])\n if os.path.isfile(tmp_path):\n return tmp_path\n if os.path.isfile(dso_path_in_record_file):\n return dso_path_in_record_file\n return None"
] | [
"0.66215503",
"0.6279569",
"0.5894043",
"0.5856724",
"0.5856588",
"0.5700655",
"0.56694955",
"0.55614096",
"0.5547654",
"0.55200994",
"0.5495592",
"0.54438263",
"0.5425426",
"0.5324425",
"0.5300233",
"0.5298378",
"0.52957606",
"0.5281993",
"0.52785194",
"0.5256666",
"0.5255568",
"0.5245657",
"0.5240401",
"0.5232594",
"0.5230197",
"0.52212644",
"0.52119803",
"0.5211712",
"0.51871836",
"0.5169672"
] | 0.6632493 | 0 |
Diagonal distance h_diagonal(n) = min(abs(n.x goal.x), abs(n.y goal.y)) h_straight(n) = (abs(n.x goal.x) + abs(n.y goal.y)) h(n) = D_diagnoal h_diagonal(n) + D_straight (h_straight(n) 2h_diagonal(n))) | def heuristic_cost_estimate(start, goal,d_diagnoal,d_straight):
start_x = start.x
start_y = start.y
goal_x = goal.x
goal_y = goal.y
h_diagonal = min(np.abs(start_x - goal_x),np.abs(start_y - goal_y))
h_straight = np.abs(start_x - goal_x) + np.abs(start_y - goal_y)
h = d_diagnoal * h_diagonal + d_straight * (h_straight - 2 * h_diagonal)
return h | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def dist_between(current, neighbor,d_diagnoal,d_straight):\n start_x = current.x\n start_y = current.y\n goal_x = neighbor.x\n goal_y = neighbor.y\n\n h_diagonal = min(np.abs(start_x - goal_x),np.abs(start_y - goal_y))\n h_straight = np.abs(start_x - goal_x) + np.abs(start_y - goal_y)\n h = d_diagnoal * h_diagonal + d_straight * (h_straight - 2 * h_diagonal)\n return h",
"def share_diagonal(x0, y0, x1, y1):\n dy = abs((x0-x1)/(y0-y1))\n return dy",
"def hyperdiagonal(coords):\n \n mini = coords.min(axis=0)\n maxi = coords.max(axis=0)\n dist = (maxi - mini)**2\n dist = np.sqrt(dist.sum())\n return dist",
"def h(p1, p2): # returns diagonal distance\n\n\tx1, y1 = p1\n\tx2, y2 = p2\n\tdx = abs(x1 - x2)\n\tdy = abs(y1 - y2)\n\td = 1 # distance between spots\n\td2 = 1 # diagonal distance between spots\n\n\th = d * (dx + dy) + (d2 - 2 * d) * min(dx, dy)\n\treturn h",
"def diagonal(t, x, y):\n from math import atan2, sqrt, pi\n angle = atan2(y, x) * 180 / pi\n dist = sqrt(x**2 + y**2)\n lt(t, angle)\n fdbk(t, dist)\n rt(t, angle)",
"def diagonal_distance(pa : Tuple[int, int], pb : Tuple[int, int]) -> int:\n (ax, ay) = pa\n (bx, by) = pb\n xdist = abs(ax - bx)\n ydist = abs(ay - by)\n dist = min(xdist, ydist) + abs(xdist - ydist)\n return dist",
"def share_diagonal(x0, y0, x1, y1):\n dy = abs(y1 - y0) # Calc the absolute y distance\n dx = abs(x1 - x0) # CXalc the absolute x distance\n return dx == dy # They clash if dx == dy",
"def share_diagonal(x0, y0, x1, y1):\r\n dy = abs(y1 - y0) # Calc the absolute y distance\r\n dx = abs(x1 - x0) # CXalc the absolute x distance\r\n return dx == dy # They clash if dx == dy\r",
"def share_diagonal(x0, y0, x1, y1):\r\n dy = abs(y1 - y0) # Calc the absolute y distance\r\n dx = abs(x1 - x0) # CXalc the absolute x distance\r\n return dx == dy # They clash if dx == dy\r",
"def share_diagonal(x0, y0, x1, y1):\n dy = abs(y1 - y0) # absolute y distance\n dx = abs(x1 - x0) # absolute x distance\n return dx == dy # they clash if dx == dy, share diagonal",
"def diagonal(step='Metropolis', iters=5000):\n X = mc.Uniform('X', lower=-1., upper=1., value=[0., 0.])\n\n @mc.potential\n def near_diag(X=X):\n if abs(X[0] - X[1]) < .1:\n return 0\n else:\n return -inf\n\n mod = setup_and_sample(vars(), step, iters)\n mod.shape = pl.array([[-1,-1], [-1,-.9], [.9,1], [1,1], [1,.9], [-.9,-1], [-1,-1]])\n mod.true_mean = [0,0]\n mod.true_iqr = ['(-.5,.5)', '(-.5,5)']\n return mod",
"def mrr_diagonal(geom: base.BaseGeometry) -> float:\n if len(geom) <= 1:\n return 0\n if len(geom) == 2:\n return geo.distance( # type: ignore\n lat1=geom[0].y, lon1=geom[0].x, lat2=geom[1].y, lon2=geom[1].x\n )\n mrr = LineString(geom).minimum_rotated_rectangle\n if isinstance(mrr, Point):\n return 0\n try: # in most cases, mrr is a Polygon\n x, y = mrr.exterior.coords.xy\n except AttributeError: # then it should be a LineString\n p0, p1 = mrr.coords[0], mrr.coords[-1]\n return geo.distance(p0[1], p0[0], p1[1], p1[0]) # type: ignore\n return geo.distance(y[0], x[0], y[2], x[2]) # type: ignore",
"def share_diagonal(x0,y0,x1,y1):\r\n return abs(x0 - x1) == abs(y0 - y1)",
"def diagonal(nd):\n assert nd.ndim == 2, \"diagonal requires 2 dimensional ndarray\"\n shape_min = hl.min(nd.shape[0], nd.shape[1])\n return hl.nd.array(hl.range(hl.int32(shape_min)).map(lambda i: nd[i, i]))",
"def diag(B,s,H,ia,ib,ic,chia,chic):\n # Get a guess for the ground state based on the old MPS\n d = B[0].shape[0]\n theta0 = np.tensordot(np.diag(s[ia]),np.tensordot(B[ib],B[ic],axes=(2,1)),axes=(1,1))\n theta0 = np.reshape(np.transpose(theta0,(1,0,2,3)),((chia*chic)*(d**2)))\n\n # Diagonalize Hamiltonian\n e0,v0 = arp.eigsh(H,k=1,which='SA',return_eigenvectors=True,v0=theta0,ncv=20)\n \n return np.reshape(v0.squeeze(),(d*chia,d*chic)),e0",
"def h(self,node):\n \"*** YOUR CODE HERE ***\"\n dist_arr = []\n for goal in self.goals:\n dist_arr.append(manhattan_distance_with_heading(node.state, goal))\n return min(dist_arr)",
"def diagonal(self):\r\n return math.sqrt((self.width ** 2) + (self.height ** 2))",
"def diagonals(self):\n left_top_shifts = map(lambda i: (-(i + 1), -(i + 1)), range(min(\n self.left_distance, self.top_distance)))\n left_bottom_shifts = map(lambda i: (-(i + 1), +(i + 1)), range(min(\n self.left_distance, self.bottom_distance)))\n right_top_shifts = map(lambda i: (+(i + 1), -(i + 1)), range(min(\n self.right_distance, self.top_distance)))\n right_bottom_shifts = map(lambda i: (+(i + 1), +(i + 1)), range(min(\n self.right_distance, self.bottom_distance)))\n return set(chain(\n left_top_shifts, left_bottom_shifts,\n right_top_shifts, right_bottom_shifts))",
"def create_diags(self):\n\n num_diags = self.rows + self.cols - 2\n diag_counts = [0 for i in range(num_diags)]\n for diag_index in range(num_diags):\n first = (0,0)\n second = (0,0)\n if diag_index < self.rows - 1:\n first = (diag_index+1,0)\n elif diag_index == self.rows - 1:\n first = (diag_index,0)\n else:\n first = (self.rows-1,diag_index-self.rows+1)\n if diag_index < self.cols - 1:\n second = (0,diag_index+1)\n elif diag_index == self.cols - 1:\n second = (0,diag_index)\n else:\n second = (diag_index-self.cols+1,self.cols-1)\n #print str(first) + \" \" + str(second)\n diag_counts[diag_index] = dist_points(first,second) \n \n \"\"\"holds the sum of edges in diagonals previous to a given edge\"\"\"\n diag_full = [0 for i in range(num_diags + 1)]\n for i in range(1,num_diags+1):\n diag_full[i] = diag_full[i-1] + diag_counts[i-1]\n\n #print diag_counts\n #print diag_full\n return diag_full",
"def manhattan_distance(state, goal):\r\n hval = 0\r\n for index, value in enumerate(state):\r\n if value == 0: # Underestimate by excluding calculation of the blank tile\r\n continue\r\n abs_x = abs((co_ords[index])[0] - (co_ords[goal.index(value)])[0])\r\n abs_y = abs((co_ords[index])[1] - (co_ords[goal.index(value)])[1])\r\n hval += abs_x + abs_y\r\n return hval",
"def heuristic(current, goal):\r\n # First tried manhattan distance but wasn't good enough so did direct distance which makes sense since the robot came move diagonally \r\n #return abs(current[0]-goal[0])+abs(current[1]-goal[1])\r\n return math.sqrt((current[0]-goal[0])**2+(current[1]-goal[1])**2)",
"def h(self,node):\n \"*** YOUR CODE HERE ***\"\n dist_arr = [] #Initialize Array\n for goal in self.goals: # Iterate through Goals\n dist_arr.append(manhattan_distance_with_heading(node.state, goal)) # Add distance between node and goal\n return min(dist_arr) # Return minimum",
"def heuristic_1(node):\n x_node, y_node = node.state.location()\n goals = node.state.grid.components.white_walkers\n goals.append(node.state.grid.components.dragon_stone)\n distance = [np.sqrt((x_node - x)**2 + (y_node - y)**2) for x, y in goals]\n return distance[np.argmin(distance)]",
"def win_diagonal(playerid):\n if board[0][0] is playerid and board[1][1] is playerid and board[2][2] is playerid:\n return (True, \"Diagonal Left-up to Right-down\")\n\n if board[0][2] is playerid and board[1][1] is playerid and board[2][0] is playerid:\n return (True, \"Diagonal Right-up to Left-down\")\n\n return False",
"def addDiagonal(self, orig, dest, test_vertex=None):\n if orig is None or dest is None:\n return None\n\n if test_vertex is not None:\n if orig.boundary_chain == \"lowest\":\n if dest.boundary_chain == \"left\":\n v_i = HalfEdge(orig, dest).getOriginVector()\n v_j = HalfEdge(orig, test_vertex).getOriginVector()\n else:\n v_i = HalfEdge(orig, test_vertex).getOriginVector()\n v_j = HalfEdge(orig, dest).getOriginVector()\n \n elif dest.boundary_chain == \"highest\":\n if orig.boundary_chain == \"left\":\n v_i = HalfEdge(orig, dest).getOriginVector()\n v_j = HalfEdge(orig, test_vertex).getOriginVector()\n else:\n v_i = HalfEdge(orig, test_vertex).getOriginVector()\n v_j = HalfEdge(orig, dest).getOriginVector()\n\n else:\n if orig.boundary_chain == \"left\":\n v_i = HalfEdge(orig, dest).getOriginVector()\n v_j = HalfEdge(orig, test_vertex).getOriginVector()\n else:\n v_i = HalfEdge(orig, test_vertex).getOriginVector()\n v_j = HalfEdge(orig, dest).getOriginVector()\n \n if det(v_i, v_j) <= 0:\n return None\n\n \n diagonal = self.addEdge(orig, dest, edge_type=\"diagonal\")\n \n return diagonal",
"def is_diagonal(i, j):\n return 1 if i == j else 0",
"def is_diagonal(i, j):\n return 1 if i == j else 0",
"def diagonalIntersection(self):\n l1 = self.diagonalAtPoint(idx=0)\n l2 = self.diagonalAtPoint(idx=1)\n return l1.intersectionWith(l2)",
"def diagonal(self):\n return self.rep.diagonal()",
"def directed_HD(A,B):\n # get coordinates\n coords_A = np.vstack(np.where(A)).transpose()\n coords_B = np.vstack(np.where(B)).transpose()\n if (len(coords_A) == 0) and (len(coords_B)==0):\n return 1.\n if (len(coords_A) == 0) or (len(coords_B)==0):\n return 1.\n\n #normalize by max possible distance\n max_distance = float(np.sqrt(np.sum(np.asarray(A.shape)**2)))\n\n # calculate all distances between points in A and B\n min_dist = []\n for ii in np.arange(coords_A.shape[0]):\n min_dist.append(np.min(np.sqrt(np.sum((coords_B-coords_A[ii,:])**2, axis=1))))\n \n return min_dist"
] | [
"0.8049952",
"0.6934964",
"0.69334024",
"0.6747959",
"0.644159",
"0.62714773",
"0.6160783",
"0.6144977",
"0.6144977",
"0.61420053",
"0.6101649",
"0.6096123",
"0.60647726",
"0.6007809",
"0.59770143",
"0.5892348",
"0.5882317",
"0.5860657",
"0.5833256",
"0.58259183",
"0.57475424",
"0.57441145",
"0.5723509",
"0.5720337",
"0.5714258",
"0.5713492",
"0.5713492",
"0.57130444",
"0.5703405",
"0.56642616"
] | 0.7815696 | 1 |
Diagonal distance h_diagonal(n) = min(abs(n.x goal.x), abs(n.y goal.y)) h_straight(n) = (abs(n.x goal.x) + abs(n.y goal.y)) h(n) = D_diagnoal h_diagonal(n) + D_straight (h_straight(n) 2h_diagonal(n))) | def dist_between(current, neighbor,d_diagnoal,d_straight):
start_x = current.x
start_y = current.y
goal_x = neighbor.x
goal_y = neighbor.y
h_diagonal = min(np.abs(start_x - goal_x),np.abs(start_y - goal_y))
h_straight = np.abs(start_x - goal_x) + np.abs(start_y - goal_y)
h = d_diagnoal * h_diagonal + d_straight * (h_straight - 2 * h_diagonal)
return h | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def heuristic_cost_estimate(start, goal,d_diagnoal,d_straight):\n start_x = start.x\n start_y = start.y\n goal_x = goal.x\n goal_y = goal.y\n\n h_diagonal = min(np.abs(start_x - goal_x),np.abs(start_y - goal_y))\n h_straight = np.abs(start_x - goal_x) + np.abs(start_y - goal_y)\n h = d_diagnoal * h_diagonal + d_straight * (h_straight - 2 * h_diagonal)\n return h",
"def share_diagonal(x0, y0, x1, y1):\n dy = abs((x0-x1)/(y0-y1))\n return dy",
"def hyperdiagonal(coords):\n \n mini = coords.min(axis=0)\n maxi = coords.max(axis=0)\n dist = (maxi - mini)**2\n dist = np.sqrt(dist.sum())\n return dist",
"def h(p1, p2): # returns diagonal distance\n\n\tx1, y1 = p1\n\tx2, y2 = p2\n\tdx = abs(x1 - x2)\n\tdy = abs(y1 - y2)\n\td = 1 # distance between spots\n\td2 = 1 # diagonal distance between spots\n\n\th = d * (dx + dy) + (d2 - 2 * d) * min(dx, dy)\n\treturn h",
"def diagonal(t, x, y):\n from math import atan2, sqrt, pi\n angle = atan2(y, x) * 180 / pi\n dist = sqrt(x**2 + y**2)\n lt(t, angle)\n fdbk(t, dist)\n rt(t, angle)",
"def diagonal_distance(pa : Tuple[int, int], pb : Tuple[int, int]) -> int:\n (ax, ay) = pa\n (bx, by) = pb\n xdist = abs(ax - bx)\n ydist = abs(ay - by)\n dist = min(xdist, ydist) + abs(xdist - ydist)\n return dist",
"def share_diagonal(x0, y0, x1, y1):\n dy = abs(y1 - y0) # Calc the absolute y distance\n dx = abs(x1 - x0) # CXalc the absolute x distance\n return dx == dy # They clash if dx == dy",
"def share_diagonal(x0, y0, x1, y1):\r\n dy = abs(y1 - y0) # Calc the absolute y distance\r\n dx = abs(x1 - x0) # CXalc the absolute x distance\r\n return dx == dy # They clash if dx == dy\r",
"def share_diagonal(x0, y0, x1, y1):\r\n dy = abs(y1 - y0) # Calc the absolute y distance\r\n dx = abs(x1 - x0) # CXalc the absolute x distance\r\n return dx == dy # They clash if dx == dy\r",
"def share_diagonal(x0, y0, x1, y1):\n dy = abs(y1 - y0) # absolute y distance\n dx = abs(x1 - x0) # absolute x distance\n return dx == dy # they clash if dx == dy, share diagonal",
"def diagonal(step='Metropolis', iters=5000):\n X = mc.Uniform('X', lower=-1., upper=1., value=[0., 0.])\n\n @mc.potential\n def near_diag(X=X):\n if abs(X[0] - X[1]) < .1:\n return 0\n else:\n return -inf\n\n mod = setup_and_sample(vars(), step, iters)\n mod.shape = pl.array([[-1,-1], [-1,-.9], [.9,1], [1,1], [1,.9], [-.9,-1], [-1,-1]])\n mod.true_mean = [0,0]\n mod.true_iqr = ['(-.5,.5)', '(-.5,5)']\n return mod",
"def mrr_diagonal(geom: base.BaseGeometry) -> float:\n if len(geom) <= 1:\n return 0\n if len(geom) == 2:\n return geo.distance( # type: ignore\n lat1=geom[0].y, lon1=geom[0].x, lat2=geom[1].y, lon2=geom[1].x\n )\n mrr = LineString(geom).minimum_rotated_rectangle\n if isinstance(mrr, Point):\n return 0\n try: # in most cases, mrr is a Polygon\n x, y = mrr.exterior.coords.xy\n except AttributeError: # then it should be a LineString\n p0, p1 = mrr.coords[0], mrr.coords[-1]\n return geo.distance(p0[1], p0[0], p1[1], p1[0]) # type: ignore\n return geo.distance(y[0], x[0], y[2], x[2]) # type: ignore",
"def share_diagonal(x0,y0,x1,y1):\r\n return abs(x0 - x1) == abs(y0 - y1)",
"def diagonal(nd):\n assert nd.ndim == 2, \"diagonal requires 2 dimensional ndarray\"\n shape_min = hl.min(nd.shape[0], nd.shape[1])\n return hl.nd.array(hl.range(hl.int32(shape_min)).map(lambda i: nd[i, i]))",
"def diag(B,s,H,ia,ib,ic,chia,chic):\n # Get a guess for the ground state based on the old MPS\n d = B[0].shape[0]\n theta0 = np.tensordot(np.diag(s[ia]),np.tensordot(B[ib],B[ic],axes=(2,1)),axes=(1,1))\n theta0 = np.reshape(np.transpose(theta0,(1,0,2,3)),((chia*chic)*(d**2)))\n\n # Diagonalize Hamiltonian\n e0,v0 = arp.eigsh(H,k=1,which='SA',return_eigenvectors=True,v0=theta0,ncv=20)\n \n return np.reshape(v0.squeeze(),(d*chia,d*chic)),e0",
"def h(self,node):\n \"*** YOUR CODE HERE ***\"\n dist_arr = []\n for goal in self.goals:\n dist_arr.append(manhattan_distance_with_heading(node.state, goal))\n return min(dist_arr)",
"def diagonal(self):\r\n return math.sqrt((self.width ** 2) + (self.height ** 2))",
"def diagonals(self):\n left_top_shifts = map(lambda i: (-(i + 1), -(i + 1)), range(min(\n self.left_distance, self.top_distance)))\n left_bottom_shifts = map(lambda i: (-(i + 1), +(i + 1)), range(min(\n self.left_distance, self.bottom_distance)))\n right_top_shifts = map(lambda i: (+(i + 1), -(i + 1)), range(min(\n self.right_distance, self.top_distance)))\n right_bottom_shifts = map(lambda i: (+(i + 1), +(i + 1)), range(min(\n self.right_distance, self.bottom_distance)))\n return set(chain(\n left_top_shifts, left_bottom_shifts,\n right_top_shifts, right_bottom_shifts))",
"def create_diags(self):\n\n num_diags = self.rows + self.cols - 2\n diag_counts = [0 for i in range(num_diags)]\n for diag_index in range(num_diags):\n first = (0,0)\n second = (0,0)\n if diag_index < self.rows - 1:\n first = (diag_index+1,0)\n elif diag_index == self.rows - 1:\n first = (diag_index,0)\n else:\n first = (self.rows-1,diag_index-self.rows+1)\n if diag_index < self.cols - 1:\n second = (0,diag_index+1)\n elif diag_index == self.cols - 1:\n second = (0,diag_index)\n else:\n second = (diag_index-self.cols+1,self.cols-1)\n #print str(first) + \" \" + str(second)\n diag_counts[diag_index] = dist_points(first,second) \n \n \"\"\"holds the sum of edges in diagonals previous to a given edge\"\"\"\n diag_full = [0 for i in range(num_diags + 1)]\n for i in range(1,num_diags+1):\n diag_full[i] = diag_full[i-1] + diag_counts[i-1]\n\n #print diag_counts\n #print diag_full\n return diag_full",
"def manhattan_distance(state, goal):\r\n hval = 0\r\n for index, value in enumerate(state):\r\n if value == 0: # Underestimate by excluding calculation of the blank tile\r\n continue\r\n abs_x = abs((co_ords[index])[0] - (co_ords[goal.index(value)])[0])\r\n abs_y = abs((co_ords[index])[1] - (co_ords[goal.index(value)])[1])\r\n hval += abs_x + abs_y\r\n return hval",
"def heuristic(current, goal):\r\n # First tried manhattan distance but wasn't good enough so did direct distance which makes sense since the robot came move diagonally \r\n #return abs(current[0]-goal[0])+abs(current[1]-goal[1])\r\n return math.sqrt((current[0]-goal[0])**2+(current[1]-goal[1])**2)",
"def h(self,node):\n \"*** YOUR CODE HERE ***\"\n dist_arr = [] #Initialize Array\n for goal in self.goals: # Iterate through Goals\n dist_arr.append(manhattan_distance_with_heading(node.state, goal)) # Add distance between node and goal\n return min(dist_arr) # Return minimum",
"def heuristic_1(node):\n x_node, y_node = node.state.location()\n goals = node.state.grid.components.white_walkers\n goals.append(node.state.grid.components.dragon_stone)\n distance = [np.sqrt((x_node - x)**2 + (y_node - y)**2) for x, y in goals]\n return distance[np.argmin(distance)]",
"def win_diagonal(playerid):\n if board[0][0] is playerid and board[1][1] is playerid and board[2][2] is playerid:\n return (True, \"Diagonal Left-up to Right-down\")\n\n if board[0][2] is playerid and board[1][1] is playerid and board[2][0] is playerid:\n return (True, \"Diagonal Right-up to Left-down\")\n\n return False",
"def addDiagonal(self, orig, dest, test_vertex=None):\n if orig is None or dest is None:\n return None\n\n if test_vertex is not None:\n if orig.boundary_chain == \"lowest\":\n if dest.boundary_chain == \"left\":\n v_i = HalfEdge(orig, dest).getOriginVector()\n v_j = HalfEdge(orig, test_vertex).getOriginVector()\n else:\n v_i = HalfEdge(orig, test_vertex).getOriginVector()\n v_j = HalfEdge(orig, dest).getOriginVector()\n \n elif dest.boundary_chain == \"highest\":\n if orig.boundary_chain == \"left\":\n v_i = HalfEdge(orig, dest).getOriginVector()\n v_j = HalfEdge(orig, test_vertex).getOriginVector()\n else:\n v_i = HalfEdge(orig, test_vertex).getOriginVector()\n v_j = HalfEdge(orig, dest).getOriginVector()\n\n else:\n if orig.boundary_chain == \"left\":\n v_i = HalfEdge(orig, dest).getOriginVector()\n v_j = HalfEdge(orig, test_vertex).getOriginVector()\n else:\n v_i = HalfEdge(orig, test_vertex).getOriginVector()\n v_j = HalfEdge(orig, dest).getOriginVector()\n \n if det(v_i, v_j) <= 0:\n return None\n\n \n diagonal = self.addEdge(orig, dest, edge_type=\"diagonal\")\n \n return diagonal",
"def is_diagonal(i, j):\n return 1 if i == j else 0",
"def is_diagonal(i, j):\n return 1 if i == j else 0",
"def diagonalIntersection(self):\n l1 = self.diagonalAtPoint(idx=0)\n l2 = self.diagonalAtPoint(idx=1)\n return l1.intersectionWith(l2)",
"def diagonal(self):\n return self.rep.diagonal()",
"def directed_HD(A,B):\n # get coordinates\n coords_A = np.vstack(np.where(A)).transpose()\n coords_B = np.vstack(np.where(B)).transpose()\n if (len(coords_A) == 0) and (len(coords_B)==0):\n return 1.\n if (len(coords_A) == 0) or (len(coords_B)==0):\n return 1.\n\n #normalize by max possible distance\n max_distance = float(np.sqrt(np.sum(np.asarray(A.shape)**2)))\n\n # calculate all distances between points in A and B\n min_dist = []\n for ii in np.arange(coords_A.shape[0]):\n min_dist.append(np.min(np.sqrt(np.sum((coords_B-coords_A[ii,:])**2, axis=1))))\n \n return min_dist"
] | [
"0.7815963",
"0.6935118",
"0.69333106",
"0.6748271",
"0.6441967",
"0.6272037",
"0.616098",
"0.614516",
"0.614516",
"0.6142244",
"0.61005026",
"0.6096975",
"0.60649806",
"0.60074985",
"0.59759593",
"0.58921903",
"0.5882251",
"0.58594304",
"0.58325016",
"0.5826039",
"0.57475173",
"0.5744074",
"0.57245",
"0.5719648",
"0.5714199",
"0.5713641",
"0.5713641",
"0.5713254",
"0.57029486",
"0.5665337"
] | 0.80509466 | 0 |
convert the path in real to grid, e.g. 21 > 2.15 sx= ix reso + reso/2 | def convertGridPathToReal(pathInGrid, sx, sy, gx, gy, grid_reso = 0.1):
pathInReal = (pathInGrid * grid_reso + grid_reso / 2)
stepNum = pathInReal.shape[1]
# Replace head and tail
pathInReal[:, 0] = [sx, sy]
pathInReal[:, 0] = [sx, sy]
pathInReal[:, stepNum - 1] = [gx, gy]
pathInReal[:, stepNum - 1] = [gx, gy]
return pathInReal | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fifteen():\r\n\r\n currentcell = 1.0\r\n cellpaths = 2.0\r\n \r\n while currentcell < 20.0:\r\n currentcell += 1.0\r\n cellpaths = cellpaths * (4.0 - 2.0/currentcell)\r\n \r\n return cellpaths",
"def gen_grids(self):\n self.dx = self.grid_width / self.grid_resol\n self.dk = 2 * np.pi/self.grid_width\n self.grid_x_shifted = -self.grid_width/2 + self.dx * np.arange(0, self.grid_resol)\n self.grid_x = self.grid_x_shifted + self.grid_center\n self.grid_k = - (np.pi * self.grid_resol)/self.grid_width + self.dk * np.arange(0, self.grid_resol)\n self.grid_k = np.roll(self.grid_k, int((self.grid_resol)/2))\n self.grid_kin = np.square(self.h)/ (2*self.m) * np.square(self.grid_k)",
"def cell_path(self,i):\n cell_nodes = self.cell_to_nodes(i)\n cell_codes = np.ones(len(cell_nodes),np.int32)*Path.LINETO\n cell_codes[0] = Path.MOVETO \n cell_codes[-1] = Path.CLOSEPOLY\n return Path(self.nodes['x'][cell_nodes])",
"def CalculatePaths(self):\n agrid = self.agrid \n self.apath = array([0 for y in range(self.T)], dtype=float)\n self.cpath = array([0 for y in range(self.T)], dtype=float)\n self.npath = array([0 for y in range(self.T)], dtype=float)\n # generate each generation's asset, consumption and labor supply forward\n for y in range(self.T-1): # y = 0, 1,..., 58\n self.apath[y+1] = max(0,interp1d(agrid, self.a[y], kind='cubic')(self.apath[y]))\n if y >= self.W:\n self.cpath[y], self.npath[y] = (1+self.r)*self.apath[y] + self.b - self.apath[y+1], 0\n else:\n self.cpath[y], self.npath[y] = self.solve(self.apath[y], self.apath[y+1])\n self.upath[y] = self.util(self.cpath[y], self.npath[y])\n # the oldest generation's consumption and labor supply\n self.cpath[self.T-1], self.npath[self.T-1] = (1+self.r)*self.apath[self.T-1]+self.b, 0\n self.upath[self.T-1] = self.util(self.cpath[self.T-1], self.npath[self.T-1])",
"def adjust_grid(route, grid):\n\n for location in route:\n # If the position in the route is the end destination, dont make it a 1. \n if location == route[-1]:\n grid[location[0]][location[1]][location[2]] = 0\n\n # Else if the location on the grid is not a 1, make it one.\n elif grid[location[0]][location[1]][location[2]] == 0 or grid[location[0]][location[1]][location[2]] == 'x' or grid[location[0]][location[1]][location[2]] == 'y':\n grid[location[0]][location[1]][location[2]] = 1\n else:\n continue\n\n return grid",
"def path_convert(self):\n pub_path = Exp_msg()\n for i in self.path:\n epoint = Cordi()\n (epoint.x, epoint.y) = i\n pub_path.bliss.append(epoint)\n return(pub_path)",
"def create_grid(self):\n\n # initial point of the path\n self.p0 = np.array([self.start_x, self.start_y])\n self.pf = np.array([self.end_x, self.end_y]) # final point of the path\n\n self.l1 = np.linalg.norm(np.subtract(self.pf, self.p0))\n self.l2 = 0.2 * self.l1\n\n # we define the 2 unit vectors self.d1 and self.d2 in which directions we are moving along\n # then we apply to them the size of the desired displacement\n self.d1 = np.subtract(self.pf, self.p0) / \\\n (np.linalg.norm(np.subtract(self.pf, self.p0)))\n\n self.d2 = np.array([-self.d1[1], self.d1[0]])\n self.d2 = self.d2/(np.linalg.norm(self.d2))\n\n # we create our grid moving with self.d1 and self.d2\n # each point of the grid has the following properties:\n # position, distance to self.d1, distance to self.pf, velocity\n n_displacements_2 = 0\n self.lines_list = []\n\n # variables for making easier the plotting afterwards\n self.x_list_grid = []\n self.y_list_grid = []\n time_n = 1\n\n while True:\n current_point = np.subtract(\n self.p0, self.d2*(n_displacements_2*self.displacement))\n current_length_2 = np.linalg.norm(\n np.subtract(current_point, self.p0))\n if current_length_2 > self.l2/2:\n if time_n == 2:\n break\n else:\n time_n += 1\n self.lines_list = list(reversed(self.lines_list))\n self.d2 = -1 * self.d2\n n_displacements_2 = 1\n current_point = np.subtract(\n self.p0, self.d2*(n_displacements_2*self.displacement))\n\n line_points = []\n self.x_list_grid.append(current_point[0])\n self.y_list_grid.append(current_point[1])\n\n # TODO: add fish real prediction\n line_points.append({\n \"position\": current_point.tolist(),\n \"distance_to_l1\": n_displacements_2*self.displacement,\n \"distance_to_pf\": np.linalg.norm(np.subtract(self.pf, current_point)),\n \"fish\": random.random()\n })\n\n initial_point_1 = current_point\n while True:\n current_point = np.sum(\n [current_point, self.d1 * self.displacement], axis=0)\n current_length_1 = np.linalg.norm(\n np.subtract(current_point, initial_point_1))\n if current_length_1 >= self.l1:\n current_point = self.pf - self.d2*n_displacements_2*self.displacement\n self.x_list_grid.append(current_point[0])\n self.y_list_grid.append(current_point[1])\n # TODO: add fish real prediction\n line_points.append({\n \"position\": current_point.tolist(),\n \"distance_to_l1\": n_displacements_2*self.displacement,\n \"distance_to_pf\": np.linalg.norm(np.subtract(self.pf, current_point)),\n \"fish\": random.random()\n })\n break\n\n self.x_list_grid.append(current_point[0])\n self.y_list_grid.append(current_point[1])\n\n # TODO: add fish real prediction\n line_points.append({\n \"position\": current_point.tolist(),\n \"distance_to_l1\": n_displacements_2*self.displacement,\n \"distance_to_pf\": np.linalg.norm(np.subtract(self.pf, current_point)),\n \"fish\": random.random()\n })\n\n self.lines_list.append(line_points)\n n_displacements_2 += 1",
"def draw_grid(grid):\n rows = grid.shape[0]\n cols = grid.shape[1]\n for row in range(rows):\n for col in range(cols):\n if grid[row, col] == 0: # empty\n sys.stdout.write(\" . \")\n elif grid[row, col] == 1: # path\n sys.stdout.write(\" X \")\n elif grid[row, col] == 2:\n sys.stdout.write(\" O \")\n else:\n sys.stdout.write(\" @ \")\n\n if col % cols == cols - 1:\n sys.stdout.write(\"\\n\")",
"def get_grid_size(self, ui, res_dir):\r\n print_it('determining grid size', PrintOpts.lvl1.value)\r\n self.sun.simple_clone()\r\n self.sun.clone.make_profile(PreSol.res_x.value, PreSol.res_y.value,\r\n self.init_force)\r\n self.planet.simple_clone()\r\n self.planet.clone.make_slave_to(self.sun.clone)\r\n\r\n init_displ = hertz_displ(self.sun.clone.e, self.planet.e,\r\n self.sun.clone.ny, self.planet.ny,\r\n self.sun.clone.r_hertz_x,\r\n self.sun.clone.r_hertz_y,\r\n self.planet.clone.r_hertz_x,\r\n self.planet.clone.r_hertz_y,\r\n self.sun.norm_forces[0])\r\n too_many_els_in_y = 1\r\n too_many_els_in_x = 1\r\n contact_width_y = 0.05\r\n contact_width_x = 0.05\r\n while too_many_els_in_y != 0 or \\\r\n too_many_els_in_x != 0:\r\n self.sun.clone.make_profile(self.sun.clone.res_x,\r\n self.sun.clone.res_y, self.init_force,\r\n contact_width=contact_width_y,\r\n contact_length=contact_width_x)\r\n self.planet.clone.make_slave_to(self.sun.clone)\r\n\r\n pressure, init_displ = \\\r\n pre_solve_half_space(self.sun.clone.profile,\r\n self.planet.clone.profile,\r\n self.sun.clone.x_axis,\r\n self.sun.clone.y_axis,\r\n self.sun.clone.res_x, self.sun.clone.res_y,\r\n self.sun.clone.delta_x,\r\n self.sun.clone.delta_y, self.sun.clone.e,\r\n self.planet.clone.e, self.sun.clone.ny,\r\n self.planet.clone.ny,\r\n self.sun.norm_forces[0],\r\n init_displ=init_displ, print_prog=False)\r\n\r\n pressure_els_y = sum(\r\n pressure[math.floor(self.sun.clone.res_y / 2), :] > 0)\r\n too_many_els_in_y = self.sun.clone.res_y - pressure_els_y - 2\r\n if too_many_els_in_y:\r\n contact_width_y += -np.sign(\r\n too_many_els_in_y) * contact_width_y / 25\r\n\r\n pressure_els_x = sum(\r\n pressure[:, math.floor(self.sun.clone.res_x / 2)] > 0)\r\n too_many_els_in_x = self.sun.clone.res_x - pressure_els_x - 2\r\n if too_many_els_in_x:\r\n contact_width_x += -np.sign(\r\n too_many_els_in_x) * contact_width_x / 25\r\n\r\n self.sun.make_profile(self.sun.res_x, self.sun.res_y, self.init_force,\r\n contact_width=contact_width_y,\r\n contact_length=contact_width_x)\r\n self.planet.make_slave_to(self.sun)\r\n return init_displ",
"def test_path7():\n path = [(0, 0, 1)]\n path += [\n [('A', 3, 0)],\n (0, 1, 1),\n [('A', 2, 0)],\n (np.pi/2, 1, 1),\n [('B',3,0)],\n (0, 1, 1),\n [('B',2,0)],\n (np.pi/2, 1, 1),\n [('C',3,0)],\n (0, 1, 1),\n [('C',2,0)],\n (np.pi/2, 1, 1),\n [('D', 3, 0)],\n (0, 1, 1),\n [('D', 2,0)],\n (np.pi/2, 1, 1),\n ] * 4\n execute_path(path,True)",
"def to_grid(point: np.array) -> np.array:\n return np.array((2.5, 2.5)) + point * 5",
"def generate_all_locations(grid, shape):",
"def grid_points(self):\n for i in range(self.rows):\n for j in range(self.cols):\n min_lat,max_lat,min_lon,max_lon = self.coords_to_min_max_lat_lon((i,j))\n if i == 0:\n print_gps(max_lat,max_lon,\"grid\")\n if j == 0:\n print_gps(max_lat,min_lon,\"grid\")\n if j == 0:\n print_gps(min_lat,min_lon,\"grid\")\n print_gps(min_lat,max_lon,\"grid\")",
"def create_grid(tree):\n\t\n\twp = tree['misc']['working precision'] \n\tndim = tree['eqns']['ndim']\n\tgrid = tree['grid']['size']\n\tgeom = tree['grid']['geom']\n\n\tnxgb, nygb, nzgb = grid['nxgb'], grid['nygb'], grid['nzgb'] \n\tLx , Ly , Lz = geom['Lx'] , geom['Ly'] , geom['Lz'] \n\n\tdmpi = tree['mpi']['dMpi']\n\tibeg,jbeg,kbeg = dmpi.ibeg,dmpi.jbeg,dmpi.kbeg\n\tiend,jend,kend = dmpi.iend,dmpi.jend,dmpi.kend\n\n\thlo = tree['num']['hlo'] \n\n\t# create domain\n\t# pt 1 2 n-1 n period\n\t# 0 L |\n\t# full domain is [0,L] but careful: |--o--|--o--| ... |--o--|--o--|--V\n\t# \\___________________________/ \n\t\n\tbc = tree['bc']['allbc']\n\n\tif ('i1' in bc) or ('imax' in bc):\n\t\tdx = Lx/cst(nxgb+2*hlo-1)\n\t\tx = np.linspace(cst(0.0),Lx,nxgb+2*hlo,dtype=wp)\n\telse:\t\n\t\tdx = Lx/cst(nxgb)\n\t\tx = np.arange(dx/cst(2.),Lx,dx,dtype=wp)\n\n\tif nygb > 1:\n\t\tif ('j1' in bc) or ('jmax' in bc):\n\t\t\tdy = Ly/cst(nygb+2*hlo-1)\n\t\t\ty = np.linspace(cst(0.0),Ly,nygb+2*hlo,dtype=wp)\n\t\telse:\t\t\n\t\t\tdy = Ly/cst(nygb)\n\t\t\ty = np.arange(dy/cst(2.),Ly,dy,dtype=wp)\n\telse:\n\t\tLy = cst(0.); y = []; dy = cst(0.)\n\t\t\n\tif nzgb > 1:\n\t\tif ('k1' in bc) or ('kmax' in bc):\n\t\t\tdz = Lz/cst(nzgb+2*hlo-1)\n\t\t\tz = np.linspace(cst(0.0),Lz,nzgb+2*hlo,dtype=wp)\n\t\telse:\t\t\t\n\t\t\tdz = Lz/cst(nzgb)\n\t\t\tz = np.arange(dz/cst(2.),Lz,dz,dtype=wp)\n\telse:\n\t\tLz = cst(0.); z = []; dz = cst(0.)\n\n\tgeom['dx'], geom['dy'], geom['dz'] = dx, dy, dz\n\tgeom['x'] , geom['y'] , geom['z'] = x , y , z\n\n\t# global iend ! ibeg iend ! ibeg\n\t# pt# n-1 n ! 1 2 n-1 n ! 1 2\n\t# ... |--o--|--o--|!|--o--|--o--| ... |--o--|--o--|!|--o--|--o--| ...\n\t# w/ hlo [<------------------------------------------------------->]\n\t# loc py ind 0 1 hlo hlo+n-1 n+2*hlo-1\n\t# glo py ind ibeg+hlo-1 iend+hlo-1\n\t\n\tif ndim == 3:\n\t\txloc = x[ibeg-1:iend] # without halos\n\t\tyloc = y[jbeg-1:jend]\n\t\tzloc = z[kbeg-1:kend]\n\t\txx,yy,zz = np.meshgrid(xloc,yloc,zloc,sparse=False,indexing='ij')\n\t\tgeom['xloc'],geom['yloc'], geom['zloc'] = xloc,yloc,zloc\n\telif ndim == 2:\n\t\txloc = x[ibeg-1:iend] # without halos\n\t\tyloc = y[jbeg-1:jend]\n\t\txx,yy = np.meshgrid(xloc,yloc,sparse=False,indexing='ij')\n\t\tgeom['xloc'],geom['yloc'] = xloc,yloc\n\telse:\n\t\txloc = x[ibeg-1:iend] # without halos\n\t\tgeom['xloc'] = xloc\n\treturn tree",
"def make_grid(self):\n\n\t\tinit_grid = (self.grid_width//2, self.grid_height//2)\n\t\tgrid_list = []\n\n\t\tfor i in range(self.canv_width//self.grid_width):\n\t\t\tfor j in range(self.canv_height//self.grid_height):\n\t\t\t\tif j == 0 or j%2 ==0:\n\t\t\t\t\tgrid_list.append((init_grid[0]+i*self.grid_width, init_grid[1]+j*self.grid_height))\n\t\t\t\t\t\n\t\t\t\telse:\n\t\t\t\t\tgrid_list.append((grid_list[-1][0]+(self.grid_width//2), init_grid[1]+j*self.grid_height))\n\n\t\treturn grid_list",
"def grid_numbering(n, x_0, y_0, x_1, y_1):\n \n if n == 0:\n return \"\"\n\n arg = complex_number(x_0 + 0.5 - x_1, y_0 + 0.5 - y_1).argument()\n\n if arg >= 0 and arg < np.pi / 2: \n x = \"1\"\n x_1 += 2 ** (n - 2)\n y_1 += 2 ** (n - 2)\n elif arg >= np.pi / 2 and arg <= np.pi:\n x = \"2\"\n x_1 -= 2 ** (n - 2)\n y_1 += 2 ** (n - 2)\n elif arg < 0 and arg >= -np.pi / 2:\n x = \"4\"\n x_1 += 2 ** (n - 2)\n y_1 -= 2 ** (n - 2)\n else:\n x = \"3\"\n x_1 -= 2 ** (n - 2)\n y_1 -= 2 ** (n - 2)\n\n return str(x) + grid_numbering(n - 1, x_0, y_0, x_1, y_1)",
"def _build_grid(self):\n n = self.params['n']\n\n x_min, x_max = min(self.node[:, 0]), max(self.node[:, 0])\n y_min, y_max = min(self.node[:, 1]), max(self.node[:, 1])\n xv = np.linspace(x_min, x_max, num=n, endpoint=True)\n yv = np.linspace(y_min, y_max, num=n, endpoint=True)\n xg, yg = np.meshgrid(xv, yv, sparse=False, indexing='xy')\n\n return xg, yg",
"def idx_to_grid(n):\n\n x = n % MAX_Y\n y = int(n / MAX_X)\n return(x, y)",
"def _buildGridPoints(self):\n self.spacings = []\n for level in xrange(self.depth):\n levelSpacings = []\n refLevel = level + 1\n level = 2**level\n axisData = []\n for axis in self.size:\n spacing = axis / (level+1)\n levelSpacings.append(spacing)\n axisData.append([gridValue*spacing for gridValue in xrange(1, level+1)])\n pointList = [((i, j, k), np.array([axisData[0][i], axisData[1][j], axisData[2][k]]))\n for i in xrange(level)\n for j in xrange(level)\n for k in xrange(level)]\n self.grid[refLevel] = {point[0]: point[1] for point in pointList}\n self.spacings.append(levelSpacings)",
"def regex_grid(n):\n cx = 2 ** (n - 1)\n cy = 2 ** (n - 1)\n grid = [[grid_numbering(n, i , j, cx, cy) for i in range(2 ** n)] for j in range(2 ** n)]\n \n return grid",
"def reset_path(self):\n for i in self.grid:\n for y in i:\n y.g = 0\n y.h = 0\n y.f = 0\n y.parent = None\n y.visited = False",
"def make_maps_of_2x1_pix_coordinates (sp) : \n x_rhs = np.arange(sp.colsh)*sp.pixs + sp.pixw - sp.pixsh\n x_rhs[0] = sp.pixwh # set x-coordinate of the wide pixel \n x_arr = np.hstack([-x_rhs[::-1],x_rhs])\n\n y_arr = np.arange(sp.rows) * sp.pixs\n y_arr -= y_arr[-1]/2 # move origin to the center of array\n\n sp.x_map2x1, sp.y_map2x1 = np.meshgrid(x_arr, y_arr)",
"def easegrid(iopt, alat, alon, ascale):\n # ported from easegrid.m by JPB 21 Sept 2011\n pi2 = np.pi / 2.0\n dtr = pi2 / 90.0\n\n if iopt == 11: # ease grid north\n thelon = ascale * sin(alon * dtr) * sin(dtr * (45.0 - 0.5 * alat))\n thelat = ascale * cos(alon * dtr) * sin(dtr * (45.0 - 0.5 * alat))\n elif iopt == 12: # ease grid south\n thelon = ascale * sin(alon * dtr) * cos(dtr * (45.0 - 0.5 * alat))\n thelat = ascale * cos(alon * dtr) * cos(dtr * (45.0 - 0.5 * alat))\n elif iopt == 13: # ease cylindrical\n thelon = ascale * pi2 * alon * cos(30.0 * dtr) / 90.0\n thelat = ascale * sin(alat * dtr) / cos(30.0 * dtr)\n\n return thelon, thelat",
"def to_grid (cart_x, cart_y):\n return (cart_x + cart_y + screen_x) / GRID_WIDTH, (cart_y - cart_x + screen_y) / GRID_HEIGHT",
"def path_plotter(self, res):\n # define edgepoint of the plot\n x_start = np.min(self.trans_path_x) - self.p * res\n x_end = np.max(self.trans_path_x) + (self.f + self.p) * res\n y_start = np.min(self.trans_path_y) - self.p * res\n y_end = np.max(self.trans_path_y) + (self.f + self.p) * res\n\n # define length of arrays\n x_len = int((x_end - x_start) / res)\n y_len = int((y_end - y_start) / res)\n\n # define x- and y-axis\n self.x = np.arange(x_start, x_end, res)\n self.y = np.arange(y_start, y_end, res)\n\n # define matrix that will be plotted\n self.meas_path = np.ones((y_len, x_len))\n\n # fill the matrix with the measured frames\n for k, frame in enumerate(self.trans_frames):\n start = ((self.trans_frame_start[k][0] - x_start) / res,\n (self.trans_frame_start[k][1] - y_start) / res)\n end = ((self.trans_frame_start[k][0] + (2 * self.p + self.f) * res - x_start) / res,\n (self.trans_frame_start[k][1] + (2 * self.p + self.f) * res - y_start) / res)\n # start = (int((self.trans_path_x[k] - self.p * res - x_start) / res),\n # int((self.trans_path_y[k] - self.p * res - y_start) / res))\n # end = (int((self.trans_path_x[k] + (self.p + self.f) * res - x_start) / res),\n # int((self.trans_path_y[k] + (self.p + self.f) * res - y_start) / res))\n self.meas_path[start[1]:end[1], start[0]:end[0]] = frame\n\n # Plot the path\n fig, ax = plt.subplots(1)\n ax.pcolormesh(self.x, self.y, self.meas_path)\n ax.plot(self.trans_path_x, self.trans_path_y, color='red')\n for k in range(len(self.trans_frames)):\n width = self.f * res\n rect = patches.Rectangle((self.trans_path_x[k], self.trans_path_y[k]), width, width,\n linewidth=1, edgecolor='black', facecolor='none')\n ax.add_patch(rect)\n plt.show()",
"def Solution(self):\n self.solver.check()\n m = self.solver.model()\n answer = [[0] * self.width for i in range(self.height)]\n for y in range(self.height):\n for x in range(self.width):\n answer[y][x] = int(str(m[self.grid[(x, y)]]))\n return answer",
"def setSquareGrid(nx,ny):\n dislin.grid(nx,ny)",
"def calc_grid(self):\n return int(self._posn.x / cell_size), int(self._posn.y / cell_size)",
"def displayPathToPrincess(n, grid):\n for i, row in enumerate(grid):\n if 'p' in row:\n princess = [i, row.index('p')]\n if 'm' in row:\n me = [i, row.index('m')]\n\n r, c = find_path(princess, me)\n path = [r]\n path.append(c)\n return '\\n'.join(path)",
"def convert_path_units(self, path_to_conv):\n\n converted_path = copy.deepcopy(path_to_conv) # TODO: figure out if this is the right way to do this- I have not yet understood how data should be handled here\n\n for i, coord in enumerate(converted_path):\n converted_path[i][0] = coord[0] / self.units_in_meter\n converted_path[i][1] = coord[1] / self.units_in_meter\n converted_path[i][2] = coord[2] / self.units_in_meter\n\n return converted_path"
] | [
"0.5953606",
"0.5806127",
"0.57640606",
"0.5755406",
"0.57439196",
"0.5740067",
"0.5653222",
"0.5591101",
"0.55833936",
"0.5580926",
"0.5577852",
"0.5570575",
"0.5565106",
"0.55632424",
"0.5562481",
"0.5536194",
"0.55283296",
"0.5520637",
"0.5520081",
"0.5514781",
"0.55122167",
"0.5494811",
"0.5493348",
"0.5490664",
"0.5470201",
"0.54672366",
"0.5453951",
"0.545372",
"0.54508984",
"0.5448659"
] | 0.7544195 | 0 |
One dimensional exponential cutoff power law derivative with respect to parameters | def fit_deriv(x, amplitude, x_0, alpha, x_cutoff):
xx = x / x_0
xc = x / x_cutoff
d_amplitude = xx ** (-alpha) * np.exp(-xc)
d_x_0 = alpha * amplitude * d_amplitude / x_0
d_alpha = -amplitude * d_amplitude * np.log(xx)
d_x_cutoff = amplitude * x * d_amplitude / x_cutoff ** 2
return [d_amplitude, d_x_0, d_alpha, d_x_cutoff] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def energy_function(self, x):\n \n return -T.dot(T.transpose(x), T.dot(self.W, x)) -\\\n T.dot(T.transpose(self.b), x)",
"def exponential(self, data=[], init_lambdas=[1,0.75], max_iteration=500):\r\n xaxis = np.arange(1, len(data)+1)\r\n data = np.array(data)\r\n idx = 1\r\n lambdas = np.array(init_lambdas)\r\n while idx < max_iteration:\r\n y = [lmbda*np.exp(data*(-lmbda)) for lmbda in lambdas]\r\n weights = y/np.sum(y, axis=0)\r\n coefficients = np.mean(weights, axis=1)\r\n lambdas = np.sum(weights, axis=1)/np.sum(weights*data, axis=1)\r\n idx+=1 \r\n print lambdas, coefficients\r\n return lambdas, coefficients",
"def _de_exp_const_w(z,w):\n return np.log((z+1.)**(3.*(1.+w)))/3.",
"def dd_xpowalpha(cls,grid,alpha,cutoff=False):\n grid.l.info('bc.hom: Setting initial data to (-x)^alpha.')\n grid.l.debug('bc.hom: Parameters to dd_xpowalpha: alpha={},cutoff={}'.format(alpha,cutoff))\n if alpha is 0:\n def tmp(x): return float(x[1]<=0)\n return cls._tpl(grid, tmp) \n\n if cutoff:\n def tmp(x):\n return sum(pow(-1*float(x[i]<0)*x[i],alpha) for i in range(0,grid.u.ndim))\n else:\n def tmp(x):\n return sum(pow(float(x[i]>=0)*x[i],alpha)-pow(-1*float(x[i]<0)*x[i],alpha) for i in range(0,grid.u.ndim))\n return cls._tpl(grid, tmp)",
"def powerlaw(E,alpha,A):\n\n\treturn A*E**alpha",
"def expdiff(x, a=a, n=5):\n return a**n * np.exp(a*x)",
"def fit_exp_decay(x, y):\n def _func(z, z0):\n return np.exp(-z/z0)\n popt, pcov = curve_fit(_func, x, y)\n return popt[0]",
"def decayexp(self, p, x, y, fixed_delay=0., mode=0):\n tm = p[0] * np.exp(-(x-fixed_delay)/p[1])\n if mode == 0:\n return tm - y\n elif mode == 1:\n return np.linalg.norm(tm-y)\n elif mode == -1:\n return tm\n else:\n raise ValueError('doubleexp: Mode must be 0 (diff), 1 (linalg.norm) or -1 (just value)')",
"def fit_deriv(x, amplitude, x_0, alpha):\n\n xx = x / x_0\n\n d_amplitude = xx ** (-alpha)\n d_x_0 = amplitude * alpha * d_amplitude / x_0\n d_alpha = -amplitude * d_amplitude * np.log(xx)\n\n return [d_amplitude, d_x_0, d_alpha]",
"def calculate_exponent():\n pass",
"def func_ludwigson(eps,k1,n1,k2,n2,):\n return k1*eps**n1+np.exp(k2+n2*eps)",
"def epow(data,dt,epow=0,etpow=1,tmin=0):\n nt = data.shape[0]\n t_array = tmin+np.squeeze((dt*np.array([list(range(nt))])))\n etpowfac = t_array**etpow\n data = np.apply_along_axis(lambda m: np.multiply(m, np.exp(epow*etpowfac)), axis=0, arr=data)\n return data",
"def exp(self):\n return Factor().__build( VarSet(self.v) , np.exp(self.t) )",
"def f(x, a, d1, d2):\n A = 10*a\n D1 = 10*d1\n D2 = 10*d2\n y = e * (frequency) * (1e9) * ( np.exp(-np.exp(-A*x+D1)) + np.exp(-np.exp(-A*x+D2)) + N)\n return y",
"def X_DE(self, z):\n return (1.+z)**(3.*(1.+self.w0+self.wa))*np.exp(-3.*self.wa*(z/(1.+z)))",
"def free_energy_function(self, x):\n \n wx_b = T.dot(x, self.W) + self.bhid\n \n return -T.sum(T.log(1 + T.exp(wx_b)), axis=1) -T.dot(x, self.b)",
"def fit_deriv(x, amplitude, x_0, alpha, beta):\n\n xx = x / x_0\n log_xx = np.log(xx)\n exponent = -alpha - beta * log_xx\n\n d_amplitude = xx ** exponent\n d_beta = -amplitude * d_amplitude * log_xx ** 2\n d_x_0 = amplitude * d_amplitude * (beta * log_xx / x_0 - exponent / x_0)\n d_alpha = -amplitude * d_amplitude * log_xx\n return [d_amplitude, d_x_0, d_alpha, d_beta]",
"def fit_deriv(x, amplitude, x_break, alpha_1, alpha_2):\n\n alpha = np.where(x < x_break, alpha_1, alpha_2)\n xx = x / x_break\n\n d_amplitude = xx ** (-alpha)\n d_x_break = amplitude * alpha * d_amplitude / x_break\n d_alpha = -amplitude * d_amplitude * np.log(xx)\n d_alpha_1 = np.where(x < x_break, d_alpha, 0)\n d_alpha_2 = np.where(x >= x_break, d_alpha, 0)\n\n return [d_amplitude, d_x_break, d_alpha_1, d_alpha_2]",
"def __pow__(self, a: float) -> np.ndarray:\n return np.e**(a*self.logarithm)",
"def step_vdfdx_exponential(f, dt):\n\n return np.real(\n fft.ifft(np.exp(-1j * kx[:, None] * dt * v) * fft.fft(f, axis=0), axis=0)\n )",
"def __pow__(self,power):\n return Factor().__build( VarSet(self.v) , np.power(self.t,power) )",
"def get_est_exp_discount_function(self,params):\n params = params[0:5]\n df = pd.DataFrame(self.maturity.apply(lambda x: x ** i) for i in range(1, 6)).T\n df.columns = ['M1', 'M2', 'M3', 'M4', 'M5']\n return np.exp(df.dot(params))",
"def f(x):\n return -math.exp(x[0]**3/-3 + x[0] - x[1]**2)",
"def f(x):\n return -math.exp(x[0]**3/-3 + x[0] - x[1]**2)",
"def F(x):\n return math.exp(-0.5 * (x ** 2))",
"def exp_grad(self, xs, *args, **kwargs):\n raise NotImplementedError",
"def evaluate(x, amplitude, x_0, alpha, x_cutoff):\n\n xx = x / x_0\n return amplitude * xx ** (-alpha) * np.exp(-x / x_cutoff)",
"def _func_pen(self, coeffs_ext):\n l_elastic_net = self.l_elastic_net\n eta = self.eta\n n_features = self.n_features\n coeffs = coeffs_ext[:n_features] - coeffs_ext[n_features:]\n return l_elastic_net * ((1. - eta) * coeffs_ext.sum()\n + 0.5 * eta * np.linalg.norm(coeffs) ** 2)",
"def exp_func(x, initial, lifetime):\n return initial * np.exp(-x/lifetime)",
"def df(self, x):\n\n return 2*math.exp(x*2) - math.exp(x)"
] | [
"0.6435805",
"0.641947",
"0.63531476",
"0.6331413",
"0.6303507",
"0.6297258",
"0.62600297",
"0.62595487",
"0.6257403",
"0.6225996",
"0.619326",
"0.6160021",
"0.6141522",
"0.61281365",
"0.6099849",
"0.6092616",
"0.6091917",
"0.6058767",
"0.6058701",
"0.6036378",
"0.60332775",
"0.60297275",
"0.6005953",
"0.6005953",
"0.5995476",
"0.59876645",
"0.5985066",
"0.59799695",
"0.59693414",
"0.59654737"
] | 0.6778236 | 0 |
Generate strong password to add to csv file and clipboard. | def generate_pw():
chars = string.ascii_letters + string.digits + '!@#$%^&*()'
password = ''.join(random.choice(chars) for i in range(16))
pyperclip.copy(password)
print('Password copied to clipboard.')
return password | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def password_generate_strong(self, ctx, delimeter: str = \"\"):\n d = delimeter\n rc = random.choice\n rr = random.randint\n await ctx.send(\n d.join(rc(RANDOM_WORDS).capitalize() for i in range(3)) + f\"{d}{rr(1,1000)}\"\n )",
"def giveReadablePassword():\n import random\n words = [\n 'Alpha',\n 'Bravo',\n 'Charlie',\n 'Delta',\n 'Echo',\n 'Foxtrot',\n 'Golf',\n 'Hotel',\n 'India',\n 'Juliet',\n 'Kilo',\n 'Lima',\n 'Mike',\n 'November',\n 'Oscar',\n 'Papa',\n 'Quebec',\n 'Romeo',\n 'Sierra',\n 'Tango',\n 'Uniform',\n 'Victor',\n 'Whiskey',\n 'Xray',\n 'Yankee',\n 'Zulu']\n\n chars = [\n '!',\n '#',\n '$',\n '%',\n '&',\n '*',\n '-',\n '.',\n ':',\n '?',\n '@' \n ]\n\n\n random.seed()\n pw = ''\n pw += random.choice(words)\n pw += random.choice(words)\n pw += random.choice(chars)\n pw += \"{:04d}\".format(random.randint(0,10000))\n return pw",
"def create_password(self):\r\n alphabet = string.ascii_letters + string.digits\r\n password = ''.join(secrets.choice(alphabet) for i in range(30))\r\n\r\n QtWidgets.QMessageBox.information(self, \"Password generated\", \r\n \"{}\".format(password))",
"def passwordGen() :\n\treturn __randomString(12)",
"async def password_generate_complex(self, ctx):\n await ctx.send(\n \"\".join(\n random.choice(string.ascii_letters[:94]) for i in range(random.randint(20, 35))\n )\n )",
"def shuffle_pass(cls, p):\n password = ''.join(random.sample(p, len(p)))\n print(f\"Generated password is:{password}\")\n pyperclip.copy(password)\n print(f\"Your {len(password)} Digit Password is copied to clipboard!\")",
"def password():\n chars = \"abcdefghijklmnopqsrtuvwxyzABCDEFGHIJKLMNOPQSRTUVWXYZ\"\\\n \"123456890!#%&-_*<>+=()\"\n return ''.join(random.sample(chars, 15))",
"def password(self) -> str:",
"def generate_password():\n selection = string.ascii_letters + string.digits\n\n while True:\n password = \"\".join(secrets.choice(selection) for i in range(16))\n\n if (\n any(c.isupper() for c in password)\n and any(c.islower() for c in password)\n and any(c.isdigit() for c in password)\n ):\n break\n\n return password",
"def _random_password(self):\n return ''.join([\n random.choice(string.ascii_letters + string.digits)\n for _ in range(12)\n ])",
"def new_password():\n new_pass = generate_password()\n entry_pass.delete(0, END)\n entry_pass.insert(0, new_pass)",
"def generate_random_password(self):\r\n self.symbols = self.__set_symbol_dict() # set new symbol subset dict\r\n self.i = randrange(len(self.symbols)) # set new dict key pointer\r\n return \"\".join(self.__get_random_symbol() for _ in range(self.pw_len))",
"def passwordGen(self):\n password = ''\n while len(password) < self.length:\n ls = []\n if self.numeric: ls.append(random.choice(list(string.digits)))\n if self.lower : ls.append(random.choice(list(string.ascii_lowercase)))\n if self.upper : ls.append(random.choice(list(string.ascii_uppercase)))\n if self.symbol : ls.append(random.choice(list(string.punctuation)))\n if not ls: sys.exit(0)\n random.shuffle(ls)\n if self.length - len(password) > len(ls):\n password += ''.join(ls) \n else:\n password += ''.join(ls[:self.length - len(password)])\n\n return password",
"def generate_password():\n chars = string.ascii_letters + string.digits\n key = random.sample(chars, 10)\n keys = \"\".join(key)\n return keys",
"def generate(self):\n\n four_digits = random.choice(string.ascii_uppercase) + random.choice(string.ascii_lowercase) + \\\n random.choice(string.digits) + random.choice(string.punctuation)\n\n if self.pass_length == 4:\n\n # if password is 4 letter long\n self.shuffle_pass(four_digits)\n else:\n\n # if password length is higher than 4 it add some printable letter and add to the four_digit variable\n diff = self.pass_length - 4\n password_long = ''\n i = 1\n while i <= diff:\n i += 1\n p = random.choice(string.printable)\n password_long += p\n self.shuffle_pass(four_digits + password_long)",
"def generate_password(path: str, number: int) -> str:\n password = \"\"\n for i in range(number):\n rand_line = generate_random_numbers_string()\n password += Program.find_string_by_number(rand_line, path)\n\n return password",
"def generate_password() -> str:\n list_letters = [choice(LETTERS) for _ in range(randint(8, 10))]\n list_symbols = [choice(SYMBOLS) for _ in range(randint(2, 4))]\n list_numbers = [choice(NUMBERS) for _ in range(randint(2, 4))]\n password_list = [n for n in list_letters + list_symbols + list_numbers]\n shuffle(password_list)\n return \"\".join(password_list)",
"def password(customer_info, stringLength=5):\n letters = string.ascii_lowercase\n rand_string = ''.join(random.choice(letters) for i in range(stringLength))\n user_password = rand_string + str(customer_info[0][0:2] + customer_info[1][-2:])\n return user_password",
"def main():\n\n print(\"Password Generator Service\")\n # If no input is given by user then the maximum length password is genearted\n lengthOfPassword = int(input(\"Enter length of password (8 or greater) or leave blank to generate a password of maximum length i.e. 77 characters\\n\") or int(77))\n # Additional Input Validation\n if lengthOfPassword < 8 or lengthOfPassword > 77:\n print(\"Invalid Entry. Enter a value that is 8 or greater and less than 77 characters as they make secure passwords. Please try again\")\n sys.exit()\n\n upperCaseLowerLimit = 65\n upperCaseUpperLimit = 90\n\n lowerCaseLowerLimit = 97\n lowerCaseUpperLimit = 122\n\n specialSymbolsLowerLimit = 33\n specialSymbolsUpperLimit = 47\n\n upperCaseList = [chr(i) for i in range(upperCaseLowerLimit, upperCaseUpperLimit + 1)]\n lowerCaseList = [chr(i) for i in range(lowerCaseLowerLimit, lowerCaseUpperLimit + 1)]\n specialSymbolsList = [chr(i) for i in range(specialSymbolsLowerLimit, specialSymbolsUpperLimit + 1)]\n numbersList = [i for i in range(0,10)]\n\n \"\"\"\n To generate random characters of even greater length the list might have to be duplicated\n This has not be done now due to practical reasons.\n Sample code for doing so can be seen below\n random.sample(upperCaseList*2, len(upperCaseList)*2)\n \"\"\"\n possibleSymbols = random.sample(upperCaseList, len(upperCaseList)) + random.sample(lowerCaseList, len(lowerCaseList)) \\\n + random.sample(specialSymbolsList, len(specialSymbolsList)) + random.sample(numbersList, len(numbersList))\n # the core functionality that determines the complex password\n random.shuffle(possibleSymbols)\n\n finalPassword = ''.join(str(s) for s in possibleSymbols[:lengthOfPassword])\n\n print(\"Your new password of length {} is generated ==> {}\".format(lengthOfPassword, finalPassword))",
"def random_password():\n pass_len = secrets.choice(range(32, 49))\n return ''.join(secrets.choice(string.printable)\n for _ in range(pass_len))",
"def generate_password(c, user=\"root\"):\n passw = subprocess.run(\n [\n \"nix\",\n \"run\",\n \"--inputs-from\",\n \".#\",\n \"nixpkgs#xkcdpass\",\n \"--\",\n \"-d-\",\n \"-n3\",\n \"-C\",\n \"capitalize\",\n ],\n text=True,\n check=True,\n stdout=subprocess.PIPE,\n ).stdout.strip()\n hash = subprocess.run(\n [\n \"nix\",\n \"run\",\n \"--inputs-from\",\n \".#\",\n \"nixpkgs#mkpasswd\",\n \"--\",\n \"-m\",\n \"sha-512\",\n \"-s\",\n ],\n text=True,\n check=True,\n stdout=subprocess.PIPE,\n input=passw,\n ).stdout.strip()\n print(\"# Add the following secrets\")\n print(f\"{user}-password: {passw}\")\n print(f\"{user}-password-hash: {hash}\")",
"def generate_pw(self):\n\n chunks = []\n for chunk_no in range(self.CHUNKS):\n if chunk_no < self.chunk:\n chunks.append(self.verified_chunks[chunk_no])\n elif chunk_no == self.chunk:\n chunks.append(str(self.counter).zfill(self.PASSWORD_LENGTH /\n self.CHUNKS))\n else:\n chunks.append(\"000\")\n\n return \"\".join(chunks)",
"def randompassword():\n characters = string.ascii_uppercase + string.ascii_lowercase + string.digits\n size = random.randint(8, 12)\n return ''.join(random.choice(characters) for x in range(size))",
"def generate_passwd(length=6):\n ret = ''\n if length < 6 :\n length = 6\n elif length > 10 :\n length = 10\n for x in xrange(length) :\n if x == 3 :\n ret += '-'\n ret += chr(random.randrange(ord('a'),ord('z'),1))\n return ret",
"def genPwd(alpha, length):\n # be sure that each character is exactly once present\n alpha = list(set(alpha))\n # return the created password\n return \"\".join([random.choice(alpha) for _ in range(length)])",
"def genpass(length):\n password = \"\"\n choice = string.ascii_letters + string.digits\n for i in range(length):\n password += random.choice(choice)\n return password",
"def anypassword():\n\n characters = string.ascii_uppercase + string.ascii_lowercase + string.digits\n size = random.randint(8, 12)\n password = ''.join(random.choice(characters) for x in range(size))\n\n return password",
"def generate_password(self): \n\n password = []\n length = input(\"Enter Length for Password (At least 8): \")\n\n if length.lower().strip() == \"exit\":\n raise UserExits\n elif length.strip() == \"\":\n raise EmptyField\n elif int(length) < 8:\n raise PasswordNotLongEnough\n else:\n # generating a password\n spinner = Halo(text=colored(\"Generating Password\", \"green\"), spinner=self.dots_, color=\"green\")\n spinner.start()\n for i in range(0, int(length)):\n #choose character from one of the lists randomly\n password.append(random.choice(random.choice([string.ascii_lowercase, string.ascii_uppercase, string.digits, self.specialChar_])))\n\n finalPass = \"\".join(password)\n spinner.stop()\n\n return finalPass",
"def cryptate(self):\r\n\r\n intab1 = \"abcdefghijklomnopqrstuvwxyz\"\r\n outtab1 = \"?2p=o)7i(u9/y&t3%r¤5e#w1q!>)\"\r\n# Fetching the writing in textbox\r\n s = self.textbox.toPlainText()\r\n a = s.lower()\r\n# The crypting process, replaces letters in intab1 with outtab1\r\n crypted = (a.translate({ord(x): y for (x, y) in zip(intab1, outtab1)}))\r\n# Clear the textbox\r\n self.textbox.clear()\r\n# Write the crypted text within textbox\r\n self.textbox.setPlainText(crypted)",
"def passphrase():\n a = []\n for i in range(1,6):\n a.append(password())\n # join words into phrase\n p = \" \".join(a)\n # split phrase into a list\n p = list(p)\n # substitute a random character\n rc = \"\"\"1~!#$%^2&*()-=3+[]\\{}4:;\"'<>5?/01236456789\"\"\"\n p[secrets.choice(range(0,len(p)))] = rc[secrets.choice(range(0,len(rc)))]\n # put phrase back together\n p = \"\".join(p)\n return p"
] | [
"0.6699509",
"0.66523397",
"0.66337395",
"0.65883833",
"0.6544499",
"0.651341",
"0.64864033",
"0.6481383",
"0.64227664",
"0.63991195",
"0.63651586",
"0.63424796",
"0.6329188",
"0.6299538",
"0.62843406",
"0.6273658",
"0.62399065",
"0.6230091",
"0.6219999",
"0.6194473",
"0.61783314",
"0.6159625",
"0.6140925",
"0.6133897",
"0.6130855",
"0.6056771",
"0.60514414",
"0.6050881",
"0.60455984",
"0.6037183"
] | 0.7241065 | 0 |
Add new account to pw.csv and generate a strong password. | def main(script):
try:
# ensure user entered account name and user name
account_name = sys.argv[1]
user_name = sys.argv[2]
except IndexError:
print('python add_pw.py [account name] [user name]')
else:
# read in csv file
pw_file = open('pw.csv')
pw_object = csv.reader(pw_file)
# ensure account does not already exist in pw.csv
for row in pw_object:
if row[0] == account_name:
print('Account already exists.')
break
# append account name, user name, and password generated by function
else:
with open('pw.csv', 'a', newline='') as csvfile:
writer = csv.writer(csvfile)
password = generate_pw()
writer.writerow([account_name, user_name, password]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def new_password():\n new_pass = generate_password()\n entry_pass.delete(0, END)\n entry_pass.insert(0, new_pass)",
"def add_user(self, user, pw):\n self.db.execute(\"INSERT INTO user_credentials VALUES (?, ?)\", [user, pw])\n self.db.commit()",
"def save_password(self, new_password):\n # 55 iterations takes about 100 ms on a Netgear WNDR3800 or about 8ms on a\n # Core2 Duo at 1200 MHz.\n hashed = pbkdf2.crypt(new_password, iterations=55)\n self.write(self.password_filename, hashed)",
"def new_account(firstname, lastname, pin):\n pass",
"def set_pass(self, pw):\n\t\tself.passhash = generate_password_hash(pw)",
"def passwd(self, plaintext):\n self._password = bcrypt.generate_password_hash(plaintext.encode('utf8')).decode('utf8')",
"def new_account():\n file_name = 'user_list.txt'\n user_list = {}\n\n file = open(file_name)\n line = file.readline()\n logger.debug(\"Our user-file list was opened and read.\")\n while line:\n logger.debug(\"Each line is being stripped of the '\\ n ' character\")\n index = line.rstrip(\"\\n\")\n line = file.readline()\n logger.debug(\"The password is being set as the value to the username as the key\")\n user_list[index] = line.rstrip(\"\\n\")\n line = file.readline()\n file.close()\n\n logger.debug(\"Asks the users if they would like to create an account.\")\n reg_user = input(\"Would you like to create an account to play the game? \")\n a = True\n while a:\n if reg_user.lower() == \"yes\":\n new_user = input(\"Please enter a username. \")\n logger.debug(\"Creates a new username to be input into the list\")\n logger.debug(\"Valid input would be two passwords that are the same.\")\n if new_user in user_list.keys():\n print(\"I'm sorry, that username already exists. Please try again. \")\n else:\n final_user = new_user\n\n b = True\n while b:\n\n new_pass = input(\"Now, please enter a password. \")\n confirm_pass = input(\"Please type in your password again for confirmation. \")\n\n logger.debug(\"Valid input must be a username that is not already within the users list.\")\n # for k, v in user_list.items():\n\n if new_pass == confirm_pass:\n logger.debug(\"If the new passwords match, then they are saved to a final password variable.\")\n final_pass = new_pass\n b = False\n\n if new_pass != confirm_pass:\n logger.debug(\"Valid input must have matching passwords.\")\n print(\"I'm sorry, your passwords don't match. Please try again.\")\n\n new_users = {final_user: final_pass}\n user_list.update(new_users)\n print(user_list)\n logger.debug(\"The user-list text file is now updated with the new user account.\")\n with open(file_name, 'a') as f:\n f.writelines(final_user + \"\\n\")\n f.writelines(final_pass + \"\\n\")\n logger.debug(\"Program now moves to the game_play() function.\")\n game_play()\n\n elif reg_user.lower() == \"no\":\n logger.debug(\"If the user says no, the program should quit.\")\n print(\"See you next time!\")\n quit()\n else:\n logger.debug(\"Valid input must be yes or no.\")\n logger.warning(\"This is invalid input.\")\n reg_user = input(\"Please enter either yes or no. \")",
"def new_password(self):\n # create new password\n return password_generator.create_password()\n # have password reset",
"def password(self, password):\n self.password_hash = generate_password_hash(password)",
"def add_entry(hostname, port, database, username):\n prefix = \":\".join([hostname, port, database, username])\n with open(pass_file_path(), 'a+') as fs:\n for line in fs:\n if line.startswith(prefix):\n entries = line.split(\":\")\n return entries[4]\n\n # If here, the entry doesn't exist, append to the file:\n passw = create_password()\n passw = passw.strip()\n fs.write(\"{}:{}{}\".format(prefix, passw, os.linesep))\n return passw",
"def add_user(self, username, password): #WORKS\n password_hash = generate_password_hash(password) # Generates a SHA256 hash.\n try:\n self.cur.execute(\"INSERT INTO users VALUES(\\\"{}\\\", \\\"{}\\\")\".format(username, password_hash))\n self.db.commit()\n except:\n self.db.rollback()",
"def write_pass(service, password, user_id):\r\n global sql_cursor\r\n global database\r\n global passwords\r\n\r\n query = f'INSERT INTO passwords(service,pass,user_id) values(\"{service}\",\"{password}\",\"{user_id}\");'\r\n sql_cursor.execute(query)\r\n print(\"Saving ...\")\r\n database.commit()\r\n\r\n passwords = fetch_data(sql_cursor, \"passwords\")\r\n\r\n print(\"Password saved successfully\\n\")",
"def save_password(self):\n Credential.passwords.append(self)",
"def change_password(change_account):\n change_data(change_account, changed_data='password')",
"def add_details(username, password):\n login_details = open(\"login_details.txt\",\"a\")\n login_details.write(username + \"\\n\" + password + \"\\n\")\n login_details.close()\n welcome_user(username)\n start()",
"def add_account(self, log, pword):\r\n #Placeholder : insert variables in sqlite3\r\n self.curs.execute(f\"\"\"INSERT INTO main_table VALUES (?, ?)\"\"\", (log, pword))\r\n self.conn.commit()",
"def cryptsetup_add_password(config, slot):\n\n (password, mainslot) = config.first_password()\n\n pwfile = os.path.join(iotests.test_dir, \"passwd.txt\")\n with open(pwfile, \"w\") as fh:\n fh.write(config.passwords[slot])\n\n try:\n args = [\"luksAddKey\", config.image_path(),\n \"--key-slot\", slot,\n \"--key-file\", \"-\",\n \"--iter-time\", \"10\",\n pwfile]\n\n cryptsetup(args, password)\n finally:\n os.unlink(pwfile)",
"def new_password(self, login, password):\n login = self._sha512('{:s}{:s}'.format(login, self.salt))\n pw = self._pepper_hash(self._get_peppers(login).next(), password, self.salt)\n hashed = bcrypt.hashpw(pw, bcrypt.gensalt(7))\n return login, hashed",
"def add_password():\n website = entry_web.get()\n email = entry_email.get()\n password = entry_pass.get()\n if website == \"\" or email == \"\" or password == \"\":\n message = \"Please don't leave any fields empty!\"\n messagebox.showinfo(title=APP_TITLE, message=message)\n else:\n new_data = {\n website: {\n \"email\": email,\n \"password\": password,\n }\n }\n try:\n with open(\"data.json\", \"r\") as data_file:\n data = json.load(data_file)\n data.update(new_data)\n except FileNotFoundError:\n data = new_data\n with open(\"data.json\", \"w\") as data_file:\n json.dump(data, data_file, indent=4)\n entry_web.delete(0, END)\n entry_pass.delete(0, END)",
"async def _before_save(self) -> None:\n await super()._before_save()\n\n # Convert password to hash if is plain text (works for first insert and updates)\n if self.password is not None and 'argon2' not in self.password:\n self.password = pwd.create(self.password)",
"def password(self, value):\n self.password_hashed = func.crypt(value, func.gen_salt('bf'))",
"def save_password():\n title = core.get_value(TITLE_ID)\n identifier = core.get_value(IDENTIFIER_ID)\n password = core.get_value(PASSWORD_ID)\n note = core.get_value(NOTE_ID)\n\n is_valid = True\n if not title:\n logger.add_error_message('Title is required. Please set the Title.')\n is_valid = False\n if not identifier:\n logger.add_error_message('Identifier is required. Please set the Identifier.')\n is_valid = False\n if not password:\n logger.add_error_message('Password is required. Please set the Password')\n is_valid = False\n\n if not is_valid:\n return\n\n password_info = model.PasswordInfo(\n title=title,\n identifier=identifier,\n password=encrypt(password),\n note=note\n )\n\n try:\n model.insert_one_item(password_info)\n except Exception:\n core.add_error_message('Failed to save password.')\n return\n\n logger.add_info_message('Password was saved successfully.')\n table.update_password_table()",
"def encryptUserPassword(self, instance, username, password):\n\n\t\tself.crypto.setPrivateKeyFile(configuration.get(\"Credentials\", \"private_key\"))\n\t\tself.crypto.setPublicKeyFile(configuration.get(\"Credentials\", \"public_key\"))\n\n\t\tstrToEncrypt = \"%s %s\\n\"%(username, password)\n\t\tencryptedStr = self.crypto.encrypt(strToEncrypt)\n\n\t\tif encryptedStr == None or encryptedStr == \"\":\n\t\t\tlogging.error(\"Cant encrypt username and password. Please contact support\")\n\t\t\tself.remove_temporary_files()\n\t\t\tsys.exit(1)\n\n\t\tsession = self.configDBSession()\n\t\tdbimportInstances = aliased(configSchema.dbimportInstances)\n\n\t\tDBImportInstance = (session.query(\n\t\t\t\tdbimportInstances\n\t\t\t)\n\t\t\t.filter(dbimportInstances.name == instance)\n\t\t\t.one())\n\n\t\tDBImportInstance.db_credentials = encryptedStr\n\t\tsession.commit()",
"def __init__(self, first_name, second_name, gender, account_type):\n self.first_name = first_name\n self.second_name = second_name\n self.gender = gender\n self.account_type = account_type\n self.account_number = '531'+ ''.join(random.choices(string.digits, k=6)) #todo: Generate new number if it exissts in database\n self.account_password = ''.join(secrets.choice(string.ascii_letters + string.digits) for i in range(10))\n self.account_balance = 0.0",
"def create_password(self):\r\n alphabet = string.ascii_letters + string.digits\r\n password = ''.join(secrets.choice(alphabet) for i in range(30))\r\n\r\n QtWidgets.QMessageBox.information(self, \"Password generated\", \r\n \"{}\".format(password))",
"def insert_users():\r\n\r\n data = [(\"Dave\", hash_password(\"123\"), 3),\r\n (\"Jeff\", hash_password(\"1234\"), 2),\r\n (\"Fred\", hash_password(\"111\"), 1)]\r\n try:\r\n conn = sqlite3.connect('account.db')\r\n c = conn.cursor()\r\n c.executemany(\"INSERT INTO accounts VALUES (?, ?, ?)\", data)\r\n conn.commit()\r\n except sqlite3.IntegrityError:\r\n print(\"Error. Tried to add duplicate record!\")\r\n else:\r\n print(\"Successfully entered records\")\r\n finally:\r\n if c is not None:\r\n c.close()\r\n if conn is not None:\r\n conn.close()",
"def store_passwd(self, clr_passwd):\n aes_cipher = AESCipher()\n self.__aes_key = aes_cipher.AES_KEY\n self.__password = aes_cipher.encrypt(clr_passwd)",
"def set_password(self, password):\n self.password = generate_password_hash(password, method='pbkdf2:sha256')",
"def _update_password(self, email, new_password):\r\n user = User.objects.get(email=email)\r\n user.set_password(new_password)\r\n user.save()\r\n history = PasswordHistory()\r\n history.create(user)",
"def add_account(insert_dict):\n return ar.add_account(insert_dict)"
] | [
"0.68463534",
"0.6339493",
"0.6226887",
"0.6119043",
"0.6009385",
"0.59483135",
"0.58453876",
"0.5821747",
"0.5782089",
"0.5744898",
"0.5735216",
"0.57338196",
"0.573278",
"0.57301116",
"0.5707284",
"0.5689022",
"0.5674017",
"0.5670386",
"0.56640315",
"0.56365216",
"0.5633996",
"0.56195045",
"0.5554552",
"0.55529064",
"0.55370957",
"0.5512909",
"0.5504897",
"0.5487489",
"0.54727536",
"0.5464616"
] | 0.6759446 | 1 |
Add Site Static Resource Directory | def addMobileStaticResourceDir(self, dir: str) -> None:
self.__rootMobileResource.addFileSystemRoot(dir) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_dirs_to_static(static_webapp_name):\n static_dir = '$HOME/webapps/%s' % static_webapp_name\n with settings(warn_only=True):\n with cd(static_dir):\n run(\"mkdir static && mkdir media\")\n run(\"rm index.html\")\n run(\"touch index.html\")\n with cd(code_dir):\n run(\"mkdir %s/static\" % project_name)",
"def glr_path_static():\n return os.path.join(base_path, \"static\")",
"def add_static_paths(app):\n app.env.book_theme_resources_changed = False\n\n output_static_folder = Path(app.outdir) / \"_static\"\n theme_static_files = resources.contents(theme_static)\n\n if (\n app.config.html_theme_options.get(\"theme_dev_mode\", False)\n and output_static_folder.exists()\n ):\n # during development, the JS/CSS may change, if this is the case,\n # we want to remove the old files and ensure that the new files are loaded\n for path in output_static_folder.glob(\"sphinx-book-theme*\"):\n if path.name not in theme_static_files:\n app.env.book_theme_resources_changed = True\n path.unlink()\n # note sphinx treats theme css different to regular css\n # (it is specified in theme.conf), so we don't directly use app.add_css_file\n for fname in resources.contents(theme_static):\n if fname.endswith(\".css\"):\n if not (output_static_folder / fname).exists():\n (output_static_folder / fname).write_bytes(\n resources.read_binary(theme_static, fname)\n )\n app.env.book_theme_resources_changed = True\n\n # add javascript\n for fname in resources.contents(theme_static):\n if fname.endswith(\".js\"):\n app.add_js_file(fname)",
"def path_static():\n return os.path.abspath(os.path.dirname(__file__))+'/_static'",
"def __get_server_static__(app_path,static_dir):\n import os\n # from . import config_loader\n\n # root_path = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))\n _path = (static_dir).replace(\"/\", os.path.sep)\n return os.sep.join([app_path, _path])",
"def copy_static_resources(self):\n if not hasattr(settings, 'STATIC_ROOT'):\n raise MissingStaticRoot()\n destination = os.path.join(STORAGE_PATH, 'static')\n if os.path.exists(destination):\n shutil.rmtree(destination)\n shutil.copytree(settings.STATIC_ROOT, destination)",
"def include_static_files(app):\n file_path = sphinx_prolog.get_static_path(STATIC_FILE)\n if file_path not in app.config.html_static_path:\n app.config.html_static_path.append(file_path)",
"def ensure_static_exists():\n for entry in html_static_path:\n static_path = os.path.join(__repo_docs__, entry)\n if not os.path.isdir(static_path):\n os.makedirs(static_path)",
"def copy_static(self, outdir):\n pass",
"def get_swagger_static_root():\n return os.path.join(CURDIR, \"static\")",
"def assemble_resource_directories(project, base_dir):\n resource_path = os.path.join(base_dir, project.resources_path)\n os.makedirs(os.path.join(resource_path, 'images'))\n os.makedirs(os.path.join(resource_path, 'fonts'))\n os.makedirs(os.path.join(resource_path, 'data'))",
"def update_config(self, config):\n toolkit.add_template_directory(config, 'templates')\n toolkit.add_public_directory(config, 'public')\n toolkit.add_resource('fanstatic', 'syngenta')",
"def setup_output_path(self):\n self.logger.info('setting up output path')\n try:\n self.output_path.mkdir()\n except FileExistsError:\n pass\n try:\n (self.output_path / 'simple').mkdir()\n except FileExistsError:\n pass\n for filename in resource_listdir(__name__, 'static'):\n if filename == 'index.html':\n # Skip template\n continue\n with (self.output_path / filename).open('wb') as f:\n source = resource_stream(__name__, 'static/' + filename)\n f.write(source.read())\n source.close()",
"def lesson_static_generator_dir(lesson_slug, static_dir, search_dir):\n if not search_dir.exists():\n return\n\n for static_file in search_dir.iterdir():\n\n if static_file.is_dir():\n yield from lesson_static_generator_dir(lesson_slug, static_dir, static_file)\n continue\n\n relative = static_file.relative_to(static_dir)\n\n yield (\"lesson_static\", {\"lesson\": lesson_slug, \"path\": str(relative)})",
"def DjangoStaticResource(path, rel_url='static'):\r\n rel_url = rel_url.strip('/')\r\n StaticFilesResource = MediaResource(path)\r\n StaticFilesResource.namespace = rel_url\r\n return StaticFilesResource",
"def static(path):\n return static_file(path, root='media')",
"def copy_static(root_directory, dist_directory, sdk_directory):\n\n for static in configuration.STATICS:\n context = {\n \"root\": root_directory,\n \"sdk\": sdk_directory,\n \"dist\": dist_directory\n }\n\n source = templates.from_string(static[\"source\"], context)\n target = templates.from_string(static[\"target\"], context)\n target = os.path.join(dist_directory, target)\n\n # Perform the action.\n sys.stdout.write(\"Copying '%s'\\n\" % source)\n\n if static[\"type\"] == \"directory\":\n recursive_overwrite(source, target)\n else:\n shutil.copy(source, target)",
"def deploy_static(): \n from fabdeploy.django import collectstatic as django_collectstatic\n# run(\"rm -rf %(root_path)s%(project_name)s/static/*\" % env) # call again git_add_commit_pull\n django_collectstatic()",
"def get_dirurl(self, dirpath):\n paths = dirpath.split(\"/\")\n\n return \"/\".join(paths[paths.index(\"static\"):])",
"def collectstatic():\n sudo(env.activate)\n sudo('cd %s' % env.whole_path_symlinked + '/aurora; python manage.py collectstatic;')",
"def add_latesettings_assets(self):\n\n # setting up static file serving\n assetmanager = self.comp('assetmanager')\n\n # add external asset mount point where we can copy public static files so they can be served by a separate traditional web server\n # presumably this directory is being served by a more traditional webserver, at this url we specify below\n assetmanager.add_assetmount(\n massetmanager.MewloAssetMount_ExternalServer('external_assets', filepath = '${mewlofilepath}/public_assets', urlabs = 'http://127.0.0.1/mewlo/mewlo/public_assets' )\n )\n\n # add internal asset mount point where we will serve files internally; a route will be automatically created for any asset source attached to this mount point; we can choose the path prefix for urls served by the route\n assetmanager.add_assetmount(\n massetmanager.MewloAssetMount_InternalRoute('internal_assets', urlpath='assets')\n )\n\n\n # now that we have some mount points, we can specify some files to be hosted on them\n # note that the ids for all asset sources MUST be unique (ATTN:TODO elaborate on this please)\n # first we mount the files in the staticfilesource/ directory as internal assets that we will serve internally via mewlo; the id will be used for alias creation, and for the route\n assetmanager.add_assetsource(\n massetmanager.MewloAssetSource(id='siteinternal', mountid = 'internal_assets', filepath = '${sitefilepath}/staticfilesource', mnamespace=None)\n )\n # then as a test, lets mount same files on the external mount point -- this will cause mewlo to physically copy the files to the external filepath, where presumably another web server can serve them\n assetmanager.add_assetsource(\n massetmanager.MewloAssetSource(id='siteexternal', mountid = 'external_assets', filepath = '${sitefilepath}/staticfilesource', mnamespace=None)\n )\n\n # remember that one should never refer to the assets by a hardcoded url or file path; always use the aliases created by these functions, which will take the form (where ID is the id of the asset source):\n # 'asset_ID_urlrel' | 'asset_ID_urlabs' | 'asset_ID_filepath'\n # you can also use helper function to build these names, which would be better.",
"def ext_static(context, extension, path):\n return static('ext/%s/%s' % (extension.id, path))",
"def deploy_static_media(env=None, asset_version='', quick=False, haus_vars={}):\n print green('Deploying static media {}'.format('__quick__' if quick else ''))\n collectstatic(no_input=True, skip_admin=quick)",
"def server_static (filename):\n return static_file(filename, root=\"./static\")",
"def static(request):\n return {\n 'STATIC_URL': getattr(settings, 'STATIC_URL', settings.MEDIA_URL)\n }",
"def source_paths(self):\n paths = self.config.get('static_dirs')\n if paths:\n return paths\n return [self.config.get('static_dir')]",
"def mount_static_directory(self, prefix, directory, remote=False,\n index_page=None):\n prefix = tuple(prefix.strip('/').split('/'))\n if remote or re.match(r'https?://', directory):\n directory = RemoteDirectory(directory)\n else:\n directory = abs_path(directory)\n directory = LocalDirectory(directory, index_page=index_page)\n self.register('static_directory', directory, prefix)",
"def collectstatic(where=None):\n config = get_config(where)\n with settings(host_string=config['host_string']), cd(config['installation_dir']):\n run('bin/django collectstatic --noinput')",
"def static(website, request, **etc):\n return website.static.respond(request)",
"def add_static(ext):\n ext = ext.lower()\n\n compiler = StaticCompiler(ext)\n file_list = compiler.get_staticfiles_list()\n\n return render_to_string(\n \"mub/context_%s.html\" % ext,\n {\n \"items\": file_list,\n \"STATIC_URL\": settings.STATIC_URL,\n \"IS_MINIFIED\": compiler.is_minified\n }\n )"
] | [
"0.71481174",
"0.6910498",
"0.66968936",
"0.66562533",
"0.66559666",
"0.65018547",
"0.64497256",
"0.6352136",
"0.6325666",
"0.6246173",
"0.6058434",
"0.6023625",
"0.5898131",
"0.5875398",
"0.58405364",
"0.5814434",
"0.5805225",
"0.57971865",
"0.5774547",
"0.57634854",
"0.5763397",
"0.57417446",
"0.57180226",
"0.57082015",
"0.56528836",
"0.5635937",
"0.5628587",
"0.56232405",
"0.55973685",
"0.5573284"
] | 0.7166486 | 0 |
This function is for viewing the plot of your cost history. | def plot_cost_history(alpha, cost_history):
cost_df = pandas.DataFrame({
'Cost_History': cost_history,
'Iteration': range(len(cost_history))
})
return ggplot(cost_df, aes('Iteration', 'Cost_History')) + geom_point() + ggtitle('Cost History for alpha = %.3f' % alpha ) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def plot_costs(j_history):\n plt.figure(figsize=(14, 8))\n plt.plot(range(len(j_history)), j_history)\n plt.grid(True)\n plt.title('J (Cost)')\n plt.xlabel('Iteration')\n plt.ylabel('Cost function')\n plt.xlim([0, 1.05 * ITERATIONS])\n plt.ylim([4, 7])\n plt.show()\n plt.close()",
"def plot_cost_history(alpha, cost_history):\n cost_df = pandas.DataFrame({\n 'Cost_History': cost_history,\n 'Iteration': range(len(cost_history))\n })\n return ggplot(cost_df, aes('Iteration', 'Cost_History')) + \\\n geom_point() + ggtitle('Cost History for alpha = %.3f' % alpha)",
"def plot_history(self, filename):\r\n plt.figure(figsize=(12, 9))\r\n plt.plot(self.Objective_value)\r\n plt.xlabel('Iteration')\r\n plt.ylabel('Value')\r\n plt.title('Objective Function Values')\r\n # plt.savefig(filename)\r\n plt.show()\r\n return",
"def plot_cost(self):\n steps = np.arange(len(self.cost_values))\n plt.plot(steps, self.cost_values, '-o')\n plt.xlabel(\"Steps\")\n plt.ylabel(\"Cost value\")\n plt.title(\"Cost value per step using Gradient Descent\")\n plt.show()",
"def plots(self, history):\n print(history.history.keys())\n plt.plot(history.history['acc'])\n plt.plot(history.history['val_acc'])\n plt.title('model accuracy')\n plt.ylabel('accuracy')\n plt.xlabel('epoch')\n plt.legend(['train','test'], loc='upper left')\n plt.show()\n\n plt.plot(history.history['loss'])\n plt.plot(history.history['val_loss'])\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train','test'], loc='upper left')\n plt.show()",
"def curve_plot(self):\n if self.session.active['mode'] == 'database':\n self.curvePlot.set_scroll_interval()\n self.curvePlot.update_depth()\n self.curvePlot.show()",
"def plot_metrics(history):\n\n pyplot.plot(history.history['loss'], label='loss')\n\n pyplot.plot(history.history['val_loss'], label='val_loss')\n\n pyplot.legend()\n\n pyplot.show()",
"def plot_history(H, epochs):\n plt.style.use(\"fivethirtyeight\")\n plt.figure()\n plt.plot(np.arange(0, epochs), H.history[\"loss\"], label=\"train_loss\")\n plt.plot(np.arange(0, epochs), H.history[\"val_loss\"], label=\"val_loss\")\n plt.plot(np.arange(0, epochs), H.history[\"accuracy\"], label=\"train_acc\")\n plt.plot(np.arange(0, epochs), H.history[\"val_accuracy\"], label=\"val_acc\")\n plt.title(\"Training Loss and Accuracy\")\n plt.xlabel(\"Epoch #\")\n plt.ylabel(\"Loss/Accuracy\")\n plt.legend()\n plt.tight_layout()\n plt.show()",
"def plot_costs(self, threshold=0):\n epochs_range = np.arange(threshold, len(self.costs), 1)\n plt.plot(epochs_range, self.costs[threshold:], color='green', marker='o')\n plt.title('Cost function plot. Eta={:.2f} Lambda={:2.2f}'.format(self.eta, self.lambda_r))\n plt.xlabel('Epochs')\n plt.ylabel('Cost')\n plt.grid(True)\n plt.show()",
"def show_training_history(self):\n hist = [i.history[\"loss\"][0] for i in self.history]\n plt.plot(hist)",
"def plot_history(data):\n fig = go.Figure()\n for col in data.columns:\n fig.add_trace(go.Scatter(x=data.index, y=data[col], mode='lines', name=col))\n fig.update_xaxes(title_text=\"Time\",\n showline=True, mirror=True, linewidth=1, linecolor='black',\n zeroline=True, zerolinewidth=1, zerolinecolor='lightgrey',\n showgrid=True, gridwidth=1, gridcolor='lightgrey')\n fig.update_yaxes(title_text=\"Share Price ($ USD)\",\n showline=True, mirror=True, linewidth=1, linecolor='black',\n zeroline=True, zerolinewidth=1, zerolinecolor='lightgrey',\n showgrid=True, gridwidth=1, gridcolor='lightgrey')\n fig.update_layout(legend=dict(orientation=\"h\", yanchor=\"bottom\", y=-0.2, xanchor=\"center\", x=0.5),\n font=dict(family='Times New Roman', size=15), plot_bgcolor='rgba(0,0,0,0)',\n margin_l=20, margin_r=20, margin_t=20, margin_b=20,)\n\n fig.write_image(join('..', 'docs', 'share_prices_all_time.png'), height=700, width=900, engine='kaleido')\n fig.write_html(join('..', 'docs', 'share_prices_all_time.html'))\n fig.show()\n\n recent = data[:data.first_valid_index() - pd.Timedelta(weeks=52)]\n fig = go.Figure()\n for col in data.columns:\n fig.add_trace(go.Scatter(x=recent.index, y=recent[col], mode='lines', name=col))\n fig.update_xaxes(title_text=\"Time\",\n showline=True, mirror=True, linewidth=1, linecolor='black',\n zeroline=True, zerolinewidth=1, zerolinecolor='lightgrey',\n showgrid=True, gridwidth=1, gridcolor='lightgrey')\n fig.update_yaxes(title_text=\"Share Price ($ USD)\",\n showline=True, mirror=True, linewidth=1, linecolor='black',\n zeroline=True, zerolinewidth=1, zerolinecolor='lightgrey',\n showgrid=True, gridwidth=1, gridcolor='lightgrey')\n fig.update_layout(legend=dict(orientation=\"h\", yanchor=\"bottom\", y=-0.2, xanchor=\"center\", x=0.5),\n font=dict(family='Times New Roman', size=15), plot_bgcolor='rgba(0,0,0,0)',\n margin_l=20, margin_r=20, margin_t=20, margin_b=20,)\n\n fig.write_image(join('..', 'docs', 'share_prices_past_year.png'), height=700, width=900, engine='kaleido')\n fig.write_html(join('..', 'docs', 'share_prices_past_year.html'))\n fig.show()",
"def plot_opt_history(self, figsize=(15,5)):\n import matplotlib.pyplot as plt\n import seaborn as sns\n sns.set_style(style=\"darkgrid\")\n best_score_ls = []\n opt_df = pd.DataFrame(self.history_trials)\n for i, score in enumerate(opt_df.score_opt):\n if i == 0:\n best_score = score\n best_score_ls.append(score)\n else:\n if self.direction == 'maximize':\n if best_score < score:\n best_score = score\n best_score_ls.append(best_score)\n else:\n best_score_ls.append(best_score)\n else:\n if best_score > score:\n best_score = score\n best_score_ls.append(best_score)\n else:\n best_score_ls.append(best_score)\n\n opt_df['best_score'] = best_score_ls\n opt_df['Id'] = list(opt_df.index)\n\n plt.figure(figsize=figsize) \n points = plt.scatter(x=opt_df.Id, y=opt_df.score_opt, label='Iter Score',\n c=opt_df.score_opt, s=25, cmap=\"coolwarm\")\n plt.colorbar(points)\n plt.plot(opt_df.best_score, color='red', label='Best Score',)\n plt.xlabel(\"Iter\")\n plt.ylabel(\"Score\")\n plt.title('Plot optimization history')\n plt.legend()\n return(plt.show())",
"def plotHistory(history):\n \n acc = history.history['acc']\n val_acc = history.history['val_acc']\n loss = history.history['loss']\n val_loss = history.history['val_loss']\n\n epochs = range(len(acc))\n \n # Make and save the plot for our accuracy\n plt.plot(epochs, acc, 'bo', label='Training acc')\n plt.plot(epochs, val_acc, 'b', label='Validation acc')\n plt.title('Training and validation accuracy')\n plt.legend()\n plt.savefig(\"trainValAccSecond.png\")\n\n # Make and save the plots for our loss \n plt.figure()\n plt.plot(epochs, loss, 'bo', label='Training loss')\n plt.plot(epochs, val_loss, 'b', label='Validation loss')\n plt.title('Training and validation loss')\n plt.legend()\n plt.show()\n plt.savefig(\"trainValLossSecond.png\")",
"def draw(self):\r\n dt = m.get_instance().dt\r\n self.perception_history = m.get_instance().larvae[0].history\r\n t = np.arange(0,len(self.perception_history)*dt,dt)\r\n plt.plot(t,self.perception_history)\r\n plt.title('Perception History')\r\n plt.xlabel('Time (s)')\r\n plt.ylabel('Perception (uM)')\r\n plt.show()",
"def plot_history(history):\r\n\r\n fig, axs = plt.subplots(2)\r\n\r\n # create accuracy subplot\r\n axs[0].plot(history.history[\"accuracy\"], label=\"accuracy\")\r\n axs[0].plot(history.history['val_accuracy'], label=\"val_accuracy\")\r\n axs[0].set_ylabel(\"Accuracy\")\r\n axs[0].legend(loc=\"lower right\")\r\n axs[0].set_title(\"Accuracy evaluation\")\r\n\r\n # create loss subplot\r\n axs[1].plot(history.history[\"loss\"], label=\"loss\")\r\n axs[1].plot(history.history['val_loss'], label=\"val_loss\")\r\n axs[1].set_xlabel(\"Epoch\")\r\n axs[1].set_ylabel(\"Loss\")\r\n axs[1].legend(loc=\"upper right\")\r\n axs[1].set_title(\"Loss evaluation\")\r\n\r\n plt.show()",
"def plot(self):\n pass",
"def plot_fit_history(fit_history_obj):\r\n plt.plot(fit_history_obj.history['loss'])\r\n plt.plot(fit_history_obj.history['val_loss'])\r\n plt.title('model mean squared error loss')\r\n plt.ylabel('mean squared error loss')\r\n plt.xlabel('epoch')\r\n plt.legend(['training set', 'validation set'], loc='upper right')\r\n plt.show()",
"def plot_history(history):\n\n fig, axs = plt.subplots(2)\n\n # create accuracy subplot\n axs[0].plot(history.history[\"accuracy\"], label=\"accuracy\")\n axs[0].plot(history.history['val_accuracy'], label=\"val_accuracy\")\n axs[0].set_ylabel(\"Accuracy\")\n axs[0].legend(loc=\"lower right\")\n axs[0].set_title(\"Accuracy evaluation\")\n\n # create loss subplot\n axs[1].plot(history.history[\"loss\"], label=\"loss\")\n axs[1].plot(history.history['val_loss'], label=\"val_loss\")\n axs[1].set_xlabel(\"Epoch\")\n axs[1].set_ylabel(\"Loss\")\n axs[1].legend(loc=\"upper right\")\n axs[1].set_title(\"Loss evaluation\")\n\n plt.show()",
"def plot_history(history):\n ## First retrieve metrics names ## \n metrics_names = [a for a in history.history.keys() if a[:3]!='val']\n \n rows = 1\n cols = len(metrics_names)\n\n for i,a in enumerate(metrics_names,1):\n plt.subplot(rows,cols,i)\n plt.title(a)\n plt.plot(history_epoch.history[a], label=\"Train\")\n plt.plot(history_epoch.history[\"val_\"+a], label=\"Validation\")\n plt.legend()",
"def cost_profile_plot(cost_values):\n \n ax = plt.figure(figsize = (7.5,4.5)).gca()\n cost_values = np.array(cost_values)\n span = np.arange(1,len(cost_values)+1)\n ax.plot(span,cost_values, color = 'k', alpha = 0.7)\n ax.xaxis.set_major_locator(MaxNLocator(integer=True))\n ax.set_xlabel('Epoch')\n ax.set_ylabel('Cost (MSE) value')\n plt.show()\n plt.close('all')",
"def visualize_data(total_rewards):\n\n x_values = arange(0, len(total_rewards), 1)\n y_values = total_rewards\n plot(x_values, y_values)\n xlabel('episodes')\n ylabel('cumulative rewards')\n title('Reward by Episode')\n grid(True)\n show()",
"def plot_distance(distance_history, plot_num):\n\n plt.plot(distance_history)\n plt.title(\"Arm Distance from Object\")\n plt.ylabel(\"Distance\")\n plt.xlabel(\"Number iterations\")\n plt.savefig(\"./plots/distance/distance_history_{}.png\".format(plot_num))\n plt.clf()",
"def plot_graph(costs):\n plt.figure()\n for i in range(len(np.array(costs).T)):\n plt.plot(np.array(costs)[:, i])\n plt.title(\"Costs\")\n plt.show()",
"def plot_cost(c_v, c_t, save_plots_path):\n\n plt.figure()\n plt.plot(c_v, label='Validation loss')\n plt.plot(c_t, label='Training loss')\n plt.legend()\n title = 'Loss per epoch'\n plt.title(title)\n plt.xlabel(\"Epoch\")\n plt.ylabel(\"Loss\")\n plt.savefig(save_plots_path + \"swag_loss_plot.png\")",
"def learning_viz(self) :\n self.train\n history = self.history\n plot_loss(history)",
"def visualize(dcf_prices, current_share_prices, regress = True):\n # TODO: implement\n return NotImplementedError",
"def plot(self):\n\t\tself.plotOfHeatingCurrent().plot()",
"def visualizations():\r\n raise NotImplementedError\r\n # df = pandas.read_csv('accidents_by_hour.csv', index_col=0, header=0)\r\n # plt.plot(0, 0, data=df)\r\n # plt.show()\r",
"def visualize(self, time, pred, true):\n plt.plot(time, true, label='Actual')\n plt.plot(time, pred, label='Predicted')\n plt.xlabel('Time')\n plt.ylabel('Price ($)')\n plt.legend(bbox_to_anchor=(0.1, 1), loc=2, borderaxespad=0.,\n prop={'size': 14})\n plt.show()",
"def show_plots(history):\n loss_vals = history['loss']\n val_loss_vals = history['val_loss']\n epochs = range(1, len(history['accuracy'])+1)\n \n f, ax = plt.subplots(nrows=1,ncols=2,figsize=(16,4))\n \n # plot losses on ax[0]\n ax[0].plot(epochs, loss_vals, color='navy',marker='o', linestyle=' ', label='Training Loss')\n ax[0].plot(epochs, val_loss_vals, color='firebrick', marker='*', label='Validation Loss')\n ax[0].set_title('Training & Validation Loss')\n ax[0].set_xlabel('Epochs')\n ax[0].set_ylabel('Loss')\n ax[0].legend(loc='best')\n ax[0].grid(True)\n \n # plot accuracies\n acc_vals = history['accuracy']\n val_acc_vals = history['val_accuracy']\n\n ax[1].plot(epochs, acc_vals, color='navy', marker='o', ls=' ', label='Training Accuracy')\n ax[1].plot(epochs, val_acc_vals, color='firebrick', marker='*', label='Validation Accuracy')\n ax[1].set_title('Training & Validation Accuracy')\n ax[1].set_xlabel('Epochs')\n ax[1].set_ylabel('Accuracy')\n ax[1].legend(loc='best')\n ax[1].grid(True)\n \n plt.show()\n plt.close()\n \n # delete locals from heap before exiting\n del loss_vals, val_loss_vals, epochs, acc_vals, val_acc_vals"
] | [
"0.745946",
"0.7305429",
"0.6926057",
"0.68891627",
"0.6870632",
"0.65292275",
"0.65152884",
"0.65122676",
"0.650241",
"0.6492996",
"0.646902",
"0.6411578",
"0.64004505",
"0.63721967",
"0.63554156",
"0.6354006",
"0.63402677",
"0.6333534",
"0.63082844",
"0.63080597",
"0.62924623",
"0.62901354",
"0.6283304",
"0.62276",
"0.6198868",
"0.61933815",
"0.61822104",
"0.6169212",
"0.6158622",
"0.61553496"
] | 0.7419536 | 1 |
Searches inside the index for umbra3d | def search_umbra(text):
result = _search_blog('umbra3d', text)
_print_results(result)
return result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def clustering_dbscan_o3d():\n pass",
"def cloud_index():\n import alltheitems.cloud\n return alltheitems.cloud.index()",
"def test_figure3(self):\n\n ssearcher = SimpleSearcher.from_prebuilt_index('msmarco-passage')\n encoder = TctColBertQueryEncoder('castorini/tct_colbert-msmarco')\n dsearcher = SimpleDenseSearcher.from_prebuilt_index('msmarco-passage-tct_colbert-hnsw', encoder)\n hsearcher = HybridSearcher(dsearcher, ssearcher)\n\n hits = hsearcher.search('what is a lobster roll')\n\n self.assertAlmostEqual(hits[0].score, 71.56023, delta=0.0001)\n self.assertEqual(hits[0].docid, '7157715')\n\n self.assertAlmostEqual(hits[9].score, 70.07635, delta=0.0001)\n self.assertEqual(hits[9].docid, '7157708')\n\n self.assertEqual(len(hits), 10)",
"def search():\n\tif not request.vars.search_term:\n\t\tredirect(URL('index'))\n\tterm = request.vars.search_term\n\torigterm = term\n\tterm = term.replace(' ','|')\n\tartists = db.executesql(\"select distinct(m1.id), m1.art_name, m1.artist_type, m1.country, m1.b_year,m1.b_month,m1.b_date,m1.e_year,m1.e_month,m1.e_day,ts_rank(to_tsvector(m1.art_name),to_tsquery('\"+term+\"')) rank from art_info m1 where to_tsvector('english',m1.art_name) @@ to_tsquery('\"+term+\"') order by rank desc limit 20;\")\n\talbums = db.executesql(\"select distinct(m1.id),m2.name,m1.art_id,m1.art_name,m1.rel_type,m1.count,ts_rank(to_tsvector(m2.name),to_tsquery('\"+term+\"')) rank from rel_art m1, release_name m2, release_group m3 where m3.name = m2.id and m3.id = m1.id and to_tsvector('english',m2.name) @@ to_tsquery('\"+term+\"') order by rank desc limit 20;\")\n\tsongs = db.executesql(\"select m2.id, m1.name, m3.art_id, m3.art_name, m3.rel_id, m3.rel_name from track_name m1, recording m2, rec_rel_art m3 where m1.id = m2.name and m2.id = m3.rec_id and lower(m1.name) LIKE lower('%%\"+origterm+\"%%') limit 20;\")\n\treturn dict(songs=songs, albums=albums, artists=artists)",
"def search_es(es, query_embeddings, k):\n es_query ={\n \"query\": {\n \"knn\": {\n \"embeddings\": {\n \"vector\": query_embeddings,\n \"k\": k\n }\n }\n }\n }\n \n res = es.search(index=ES_INDEX, body=es_query, size=k)\n uris = [hit['_source']['uri'] for hit in res['hits']['hits']]\n return uris",
"def query(args):\n import ruido\n ruido.query('.index', 'find {} return .')\n return \"[]\"",
"def test_3d():\n dic, data = ng.bruker.read(os.path.join(DATA_DIR, \"bruker_3d\"))\n assert dic['FILE_SIZE'] == 91226112\n assert data.shape == (116, 128, 768)\n assert round(data[0, 0, 40].real, 2) == 18.0\n assert round(data[0, 0, 40].imag, 2) == -66.0\n assert round(data[5, 13, 91].real, 2) == 1138.0\n assert round(data[5, 13, 91].imag, 2) == 3482.0\n write_readback(dic, data)",
"def dummy_search(query):\n ii = InvertedIndex()\n return ii.lookup_query(query)",
"def _load_labels_3d(self, results):\n results[\"gt_labels_3d\"] = results[\"ann_info\"][\"gt_labels_3d\"]\n return results",
"def test_word_found_in_file(self):\n\n # create indexer object\n indexer = indexing_module.IndexModule()\n\n # index the location (storage/data/test/empty_directory)\n indexer.index(_path_prefix + 'word_not_found')\n\n # search for few words and check that the result is empty\n result = indexer.search(\"unit\")\n self.assertTrue(result != [])\n\n result = indexer.search(\"index\")\n self.assertTrue(result != [])\n print(result)",
"def album_search(query, term):\n table = Album.__table__\n search_statement = or_(\n table.c.name.ilike('%' + term + '%'),\n cast(table.c.release_date, Text).ilike('%' + term + '%'),\n table.c.image.ilike('%' + term + '%'),\n table.c.label.ilike('%' + term + '%'),\n table.c.tracks.ilike('%' + term + '%'),\n table.c.spotify_uri.ilike('%' + term + '%'),\n cast(table.c.id, Text).ilike('%' + term + '%'))\n return query.filter(search_statement)",
"def globes(self, code):\n return {\n 'ariel': 'http://www.wikidata.org/entity/Q3343',\n 'bennu': 'http://www.wikidata.org/entity/Q11558',\n 'callisto': 'http://www.wikidata.org/entity/Q3134',\n 'ceres': 'http://www.wikidata.org/entity/Q596',\n 'deimos': 'http://www.wikidata.org/entity/Q7548',\n 'dione': 'http://www.wikidata.org/entity/Q15040',\n 'earth': 'http://www.wikidata.org/entity/Q2',\n 'enceladus': 'http://www.wikidata.org/entity/Q3303',\n 'eros': 'http://www.wikidata.org/entity/Q16711',\n 'europa': 'http://www.wikidata.org/entity/Q3143',\n 'ganymede': 'http://www.wikidata.org/entity/Q3169',\n 'gaspra': 'http://www.wikidata.org/entity/Q158244',\n 'hyperion': 'http://www.wikidata.org/entity/Q15037',\n 'iapetus': 'http://www.wikidata.org/entity/Q17958',\n 'io': 'http://www.wikidata.org/entity/Q3123',\n 'jupiter': 'http://www.wikidata.org/entity/Q319',\n 'lutetia': 'http://www.wikidata.org/entity/Q107556',\n 'mars': 'http://www.wikidata.org/entity/Q111',\n 'mercury': 'http://www.wikidata.org/entity/Q308',\n 'mimas': 'http://www.wikidata.org/entity/Q15034',\n 'miranda': 'http://www.wikidata.org/entity/Q3352',\n 'moon': 'http://www.wikidata.org/entity/Q405',\n 'oberon': 'http://www.wikidata.org/entity/Q3332',\n 'phobos': 'http://www.wikidata.org/entity/Q7547',\n 'phoebe': 'http://www.wikidata.org/entity/Q17975',\n 'pluto': 'http://www.wikidata.org/entity/Q339',\n 'rhea': 'http://www.wikidata.org/entity/Q15050',\n 'ryugu': 'http://www.wikidata.org/entity/Q1385178',\n 'steins': 'http://www.wikidata.org/entity/Q150249',\n 'tethys': 'http://www.wikidata.org/entity/Q15047',\n 'titan': 'http://www.wikidata.org/entity/Q2565',\n 'titania': 'http://www.wikidata.org/entity/Q3322',\n 'triton': 'http://www.wikidata.org/entity/Q3359',\n 'umbriel': 'http://www.wikidata.org/entity/Q3338',\n 'venus': 'http://www.wikidata.org/entity/Q313',\n 'vesta': 'http://www.wikidata.org/entity/Q3030',\n }",
"def query3() :",
"def _load_bboxes_3d(self, results):\n results[\"gt_bboxes_3d\"] = results[\"ann_info\"][\"gt_bboxes_3d\"]\n results[\"bbox3d_fields\"].append(\"gt_bboxes_3d\")\n return results",
"def search():\n # Get search request from GET requst\n query = str(request.query.q)\n print('Search query: ' + query)\n \n # Log to file\n try:\n with open(\"log.txt\", \"a\") as file:\n file.write(query + \"\\n\")\n except:\n print(\"Error saving log.txt!\")\n \n ## Search in the database\n search = es.search(\n index='webpages',\n doc_type='webpage',\n body={\n 'size': 25,\n \"fields\" : [\"title\", \"url\", \"description\"],\n 'query': {\n \"multi_match\" : {\n \"query\" : query,\n \"fields\" : [\"title^3\", \"url^5\", \"description^2\", \"content\"]\n }\n },\n \"highlight\" : {\n \"fields\" : {\n \"content\" : {\n \"pre_tags\" : [\"<b>\"],\n \"post_tags\" : [\"</b>\"],\n \"order\": \"score\",\n \"index_options\" : \"offsets\",\n \"fragment_size\" : 220,\n \"number_of_fragments\" : 1,\n \"require_field_match\" : \"false\"\n }\n }\n }\n }\n )\n\n ## Work through the results\n # Number of hits\n hits = search['hits']['total']\n\n # No points in continuing if there are no results..\n if hits == 0:\n return {'hits': 0}\n \n # Array containing results\n results = search['hits']['hits']\n\n cleanResults = list()\n \n # The 'results' array contain a lot of \"useless\" data,\n # here we work through it, and strip it down to the minimum\n for result in results:\n url = result['fields']['url']\n title = result['fields']['title']\n\n # If highlighting in the page body is available, set description to the highlighted paragraph\n # If no highlighting available, set the description to the description of the page (from its <meta> tag)\n try:\n description = result['highlight']['content']\n except:\n description = result['fields']['description']\n\n # Add the search result to the 'cleanResults' list\n cleanResults.append({\n 'url': url,\n 'title': title,\n 'description': description\n })\n ## Freebase\n # Try searching freebase for topics related to our query\n try:\n fb = freebase(query)\n except:\n # If topic doesnt exist in freebase, set fb = false\n # In the JavaScript, we can easily check if 'freebase == false'\n fb = False\n \n # Construct response\n response = {\n 'hits': hits,\n 'results': cleanResults,\n 'freebase': fb\n }\n\n return response",
"def test_search_qlp():\n search = search_lightcurve(\"TIC 277554109\", author=\"QLP\", sector=11)\n assert len(search) == 1\n assert search.table[\"author\"][0] == \"QLP\"\n lc = search.download()\n assert type(lc).__name__ == \"TessLightCurve\"\n assert lc.sector == 11\n assert lc.author == \"QLP\"",
"def assets_search(ctx, text, pretty):\n ocean = ctx.obj['ocean']\n response = ocean.search(text, pretty)\n echo(response)",
"def getindexu(self,name,searchby='name'):\n name = name.replace(':','_').lower()\n result = []\n\n for (i,elem) in enumerate(self.lat):\n if fnmatch.fnmatch(elem[searchby],name):\n result.append(i)\n return result",
"def main():\n domain_list = []\n base_url = \"http://localhost:9200/latest-tor/_search?pretty&size=9000&_source=title,domain\"\n keywords_list = ['preteen', 'loli', 'lolita', 'jailbait', 'pthc', 'best cp',\n '\"child porn\"', '\"kid porn\"', '\"child sex\"', '\"cp video\"',\n '\"nude children\"', '\"cp porn\"', '\"free child porn\"', 'kinderporn',\n '\"child rape\"', '\"toddler porn\"', '\"kids videos\"', '\"cp videos\"',\n 'lolilust', '\"pedo porno\"', '\"pedo content\"', 'underage', '\"cp pack\"',\n 'loliporn', 'pedofamily', '\"cp database\"', '\"pedo webcams\"', 'lolitacity']\n '\"xxx child\"', '\"xxx underage\"', '\"young forbidden\"']\n search_terms = []\n for index, term in enumerate(keywords_list):\n search_terms.append(term)\n if len(search_terms) >= 10 or index + 1 == len(keywords_list):\n url = base_url + \"&q=(\" + \" OR \".join(search_terms).replace(\" \", \"%20\") + \")\"\n search(url, domain_list)\n search_terms = []",
"def _getitem3d(self, index):\n\n lovects = self._getlovects()\n hivects = self._gethivects()\n fields = self._getfields()\n\n ix = index[0]\n iy = index[1]\n iz = index[2]\n\n if len(fields[0].shape) > self.dim:\n ncomps = fields[0].shape[-1]\n else:\n ncomps = 1\n\n if len(index) > self.dim:\n if ncomps > 1:\n ic = index[-1]\n else:\n raise Exception('Too many indices given')\n else:\n ic = None\n\n nx = hivects[0,:].max() - self.nghosts\n ny = hivects[1,:].max() - self.nghosts\n nz = hivects[2,:].max() - self.nghosts\n\n if npes > 1:\n nx = comm_world.allreduce(nx, op=mpi.MAX)\n ny = comm_world.allreduce(ny, op=mpi.MAX)\n nz = comm_world.allreduce(nz, op=mpi.MAX)\n\n if isinstance(ix, slice):\n ixstart = max(ix.start or -self.nghosts, -self.nghosts)\n ixstop = min(ix.stop or nx + 1 + self.nghosts, nx + self.overlaps[0] + self.nghosts)\n else:\n ixstart = ix\n ixstop = ix + 1\n if isinstance(iy, slice):\n iystart = max(iy.start or -self.nghosts, -self.nghosts)\n iystop = min(iy.stop or ny + 1 + self.nghosts, ny + self.overlaps[1] + self.nghosts)\n else:\n iystart = iy\n iystop = iy + 1\n if isinstance(iz, slice):\n izstart = max(iz.start or -self.nghosts, -self.nghosts)\n izstop = min(iz.stop or nz + 1 + self.nghosts, nz + self.overlaps[2] + self.nghosts)\n else:\n izstart = iz\n izstop = iz + 1\n\n # --- Setup the size of the array to be returned and create it.\n # --- Space is added for multiple components if needed.\n sss = (max(0, ixstop - ixstart),\n max(0, iystop - iystart),\n max(0, izstop - izstart))\n if ncomps > 1 and ic is None:\n sss = tuple(list(sss) + [ncomps])\n resultglobal = np.zeros(sss, dtype=_libwarpx._numpy_real_dtype)\n\n datalist = []\n for i in range(len(fields)):\n\n # --- The ix1, 2 etc are relative to global indexing\n ix1 = max(ixstart, lovects[0,i])\n ix2 = min(ixstop, lovects[0,i] + fields[i].shape[0])\n iy1 = max(iystart, lovects[1,i])\n iy2 = min(iystop, lovects[1,i] + fields[i].shape[1])\n iz1 = max(izstart, lovects[2,i])\n iz2 = min(izstop, lovects[2,i] + fields[i].shape[2])\n\n if ix1 < ix2 and iy1 < iy2 and iz1 < iz2:\n\n sss = (slice(ix1 - lovects[0,i], ix2 - lovects[0,i]),\n slice(iy1 - lovects[1,i], iy2 - lovects[1,i]),\n slice(iz1 - lovects[2,i], iz2 - lovects[2,i]))\n if ic is not None:\n sss = tuple(list(sss) + [ic])\n\n vslice = (slice(ix1 - ixstart, ix2 - ixstart),\n slice(iy1 - iystart, iy2 - iystart),\n slice(iz1 - izstart, iz2 - izstart))\n\n datalist.append((vslice, fields[i][sss]))\n\n if npes == 1:\n all_datalist = [datalist]\n else:\n all_datalist = comm_world.allgather(datalist)\n\n for datalist in all_datalist:\n for vslice, ff in datalist:\n resultglobal[vslice] = ff\n\n # --- Now remove any of the reduced dimensions.\n sss = [slice(None), slice(None), slice(None)]\n if not isinstance(ix, slice):\n sss[0] = 0\n if not isinstance(iy, slice):\n sss[1] = 0\n if not isinstance(iz, slice):\n sss[2] = 0\n\n return resultglobal[tuple(sss)]",
"def quickSearch():\n calDB = db.TinyDB('../calDB.json')\n pars = db.Query()\n recList = calDB.search(pars.key.matches(\"wf\"))\n print len(recList)\n for idx in range(len(recList)):\n key = recList[idx]['key']\n vals = recList[idx]['vals']\n print key\n for ch in vals:\n\n print ch, vals[ch]\n return",
"def search(terms):\n indexdir = data_folder\n try:\n ix = windex.open_dir(indexdir)\n except EmptyIndexError as e:\n print('No Index found! Clone some repos or run index!')\n exit(0)\n\n with ix.searcher() as searcher:\n query = QueryParser(\"body\", schema).parse(' '.join(terms))\n results = searcher.search(query, terms=True)\n results.formatter = TermFormatter()\n #hi = whighlight.Highlighter(fragmenter=PinpointFragmenter)\n results.fragmenter = ContextFragmenter()\n for result in results:\n print('{0:-<40}'.format(term.bold(result['path'])))\n print(term.bold(\"[\" + result['type'] + \"]\") + '--preview:')\n print(result.highlights('body'))\n print('\\n')",
"def is3D(data):\n return data.find(\"x3\") != -1 and data.find(\"y3\") != -1 and data.find(\"z3\") != -1",
"def lookup(index,keyword):\n\tif keyword in index:\n\t\treturn index[keyword]\n\treturn None",
"def search(self, query):",
"def getAllWhereNameIs3(table, name, objectName, orgName):\n\ttry:\n\t\tcon = sqlite3.connect('PampDb.db')\n\t\tcur = con.cursor()\n\t\tcur.execute(\"SELECT * FROM \" + table + \" WHERE name like'\" + name + \"%' and measuringObjectId like (SELECT measureingObjectId FROM MeasuringObject WHERE name like'\" + objectName + \"' and organisationId like (SELECT organisationId FROM Organisation WHERE name like '\" + orgName + \"' ))\")\n\t\tob = cur.fetchall()\n\t\tif not ob:\n\t\t\treturn \"Den fanns inte\"\n\t\telse:\n\t\t\tobje = ob[0]\n\t\t\treturn obje\n\t\tcon.commit()\n\t\tcon.close()\n\texcept:\n\t\tprint('Could not run function getAllWhereNameIs3 from DbController')",
"def test_word_not_found_in_file(self):\n\n # create indexer object\n indexer = indexing_module.IndexModule()\n\n # index the location (storage/data/test/word_not_found)\n indexer.index(_path_prefix + 'word_not_found')\n\n # search for few words and check that the result is empty\n result = indexer.search(\"hello\")\n print(result)\n self.assertTrue(result == {})\n\n result = indexer.search(\"world\")\n self.assertTrue(result == {})",
"def test_semantic_search(self):\n # A name in the database\n search_string = \"football\"\n # Search For Umut\n resp = SearchTest.client.get('/api/search/',{\"token\":SearchTest.valid_token,\"search_string\":search_string})\n search_result = json.loads(resp.content)\n # Umut must be in the results because basketball and football are semanticly related\n self.assertEqual(\"Umut\",search_result[0]['name'],\"Semantic Search functionality doesn't work.\")",
"def test_3d_lowmem():\n dic, data = ng.bruker.read_lowmem(os.path.join(DATA_DIR, \"bruker_3d\"))\n assert dic['FILE_SIZE'] == 91226112\n assert data.shape == (116, 128, 768)\n assert round(data[0, 0, 40].real, 2) == 18.0\n assert round(data[0, 0, 40].imag, 2) == -66.0\n assert round(data[5, 13, 91].real, 2) == 1138.0\n assert round(data[5, 13, 91].imag, 2) == 3482.0\n lowmem_write_readback(dic, data)",
"def fetchwikidata(a_wid):\n\n sparql = SPARQLWrapper(\"https://query.wikidata.org/sparql\", 'natural_earth_name_localizer v1.1.1 (github.com/nvkelso/natural-earth-vector)')\n query_template = \"\"\"\n SELECT\n ?e ?i ?r ?population\n ?name_ar\n ?name_bn\n ?name_de\n ?name_el\n ?name_en\n ?name_es\n ?name_fa\n ?name_fr\n ?name_he\n ?name_hi\n ?name_hu\n ?name_id\n ?name_it\n ?name_ja\n ?name_ko\n ?name_nl\n ?name_pl\n ?name_pt\n ?name_ru\n ?name_sv\n ?name_tr\n ?name_uk\n ?name_ur\n ?name_vi\n ?name_zh\n ?name_zh_hans\n ?name_zh_hant\n WHERE {\n {\n SELECT DISTINCT ?e ?i ?r\n WHERE{\n VALUES ?i { wd:Q2102493 wd:Q1781 }\n OPTIONAL{ ?i owl:sameAs ?r. }\n BIND(COALESCE(?r, ?i) AS ?e).\n }\n }\n SERVICE wikibase:label {bd:serviceParam wikibase:language \"en\".}\n OPTIONAL{?e wdt:P1082 ?population .}\n OPTIONAL{?e rdfs:label ?name_ar FILTER((LANG(?name_ar))=\"ar\").}\n OPTIONAL{?e rdfs:label ?name_bn FILTER((LANG(?name_bn))=\"bn\").}\n OPTIONAL{?e rdfs:label ?name_de FILTER((LANG(?name_de))=\"de\").}\n OPTIONAL{?e rdfs:label ?name_el FILTER((LANG(?name_el))=\"el\").}\n OPTIONAL{?e rdfs:label ?name_en FILTER((LANG(?name_en))=\"en\").}\n OPTIONAL{?e rdfs:label ?name_es FILTER((LANG(?name_es))=\"es\").}\n OPTIONAL{?e rdfs:label ?name_fa FILTER((LANG(?name_fa))=\"fa\").}\n OPTIONAL{?e rdfs:label ?name_fr FILTER((LANG(?name_fr))=\"fr\").}\n OPTIONAL{?e rdfs:label ?name_he FILTER((LANG(?name_he))=\"he\").}\n OPTIONAL{?e rdfs:label ?name_hi FILTER((LANG(?name_hi))=\"hi\").}\n OPTIONAL{?e rdfs:label ?name_hu FILTER((LANG(?name_hu))=\"hu\").}\n OPTIONAL{?e rdfs:label ?name_id FILTER((LANG(?name_id))=\"id\").}\n OPTIONAL{?e rdfs:label ?name_it FILTER((LANG(?name_it))=\"it\").}\n OPTIONAL{?e rdfs:label ?name_ja FILTER((LANG(?name_ja))=\"ja\").}\n OPTIONAL{?e rdfs:label ?name_ko FILTER((LANG(?name_ko))=\"ko\").}\n OPTIONAL{?e rdfs:label ?name_nl FILTER((LANG(?name_nl))=\"nl\").}\n OPTIONAL{?e rdfs:label ?name_pl FILTER((LANG(?name_pl))=\"pl\").}\n OPTIONAL{?e rdfs:label ?name_pt FILTER((LANG(?name_pt))=\"pt\").}\n OPTIONAL{?e rdfs:label ?name_ru FILTER((LANG(?name_ru))=\"ru\").}\n OPTIONAL{?e rdfs:label ?name_sv FILTER((LANG(?name_sv))=\"sv\").}\n OPTIONAL{?e rdfs:label ?name_tr FILTER((LANG(?name_tr))=\"tr\").}\n OPTIONAL{?e rdfs:label ?name_uk FILTER((LANG(?name_uk))=\"uk\").}\n OPTIONAL{?e rdfs:label ?name_ur FILTER((LANG(?name_ur))=\"ur\").}\n OPTIONAL{?e rdfs:label ?name_vi FILTER((LANG(?name_vi))=\"vi\").}\n OPTIONAL{?e rdfs:label ?name_zh FILTER((LANG(?name_zh))=\"zh\").}\n OPTIONAL{?e rdfs:label ?name_zh_hans FILTER((LANG(?name_zh_hans))=\"zh-hans\").}\n OPTIONAL{?e rdfs:label ?name_zh_hant FILTER((LANG(?name_zh_hant))=\"zh-hant\").}\n }\n\n \"\"\"\n\n wikidata_sparql_ids = \"\"\n for wid in a_wid:\n wikidata_sparql_ids += \" wd:\"+wid\n\n print(\"fetch: \", wikidata_sparql_ids.split()[1], \"... \", wikidata_sparql_ids.split()[-1])\n ne_query = query_template.replace('wd:Q2102493 wd:Q1781', wikidata_sparql_ids)\n\n # compress the Query - removing the extra spaces\n while ' ' in ne_query:\n ne_query = ne_query.replace(' ', ' ')\n\n results = None\n retries = 0\n while results is None and retries < 8:\n try:\n results = None\n sparql.setQuery(ne_query)\n sparql.setTimeout(1000)\n sparql.setReturnFormat(JSON)\n results = sparql.query().convert()\n\n except SPARQLExceptions.EndPointNotFound:\n print('ERRwikidata-SPARQLExceptions-EndPointNotFound: Retrying in 30 seconds.')\n time.sleep(30)\n retries += 1\n continue\n\n except SPARQLExceptions.EndPointInternalError as e:\n print(\"ERRwikidata-SPARQLExceptions-EndPointInternalError: Retrying in 30 seconds.\",e)\n time.sleep(30)\n retries += 1\n continue\n\n except SPARQLExceptions.QueryBadFormed as e:\n print(\"ERRwikidata-SPARQLExceptions-QueryBadFormed : Check! \",e)\n return \"error\"\n\n except TimeoutError as e:\n print(\"ERRwikidata-SPARQLExceptions TimeOut : Retrying in 1 seconds.\",e)\n time.sleep(1)\n retries += 1\n continue\n\n except KeyboardInterrupt:\n # quit\n sys.exit()\n\n except:\n wait = retries*5\n print(\"ERRwikidata: other error. Retrying in \"+str(wait)+\" seconds.\")\n print('error: %s ' % sys.exc_info()[0])\n time.sleep(3)\n retries += 1\n continue\n\n if results is None and retries >= 8:\n print(\"Wikidata request failed ; system stopped! \")\n sys.exit(1)\n\n\n return results"
] | [
"0.5886022",
"0.55750906",
"0.5477457",
"0.51528597",
"0.5148673",
"0.5101667",
"0.50694734",
"0.4972866",
"0.4953324",
"0.49039114",
"0.48821348",
"0.48311734",
"0.4818827",
"0.4813475",
"0.47986737",
"0.47905108",
"0.4765872",
"0.47636506",
"0.47565117",
"0.47055826",
"0.46839812",
"0.4657376",
"0.46313286",
"0.46239114",
"0.46180823",
"0.46159565",
"0.46144128",
"0.46123198",
"0.4604808",
"0.4594408"
] | 0.6636374 | 0 |
Create an embed with the lyrics | def _lyrics_embed(colour, page: Dict[str, Any], data: Dict[str, Any]) -> discord.Embed:
title = [
x.get("value")
for x in data.get("names")
if x.get("language") == LANGUAGE_MAP.get(page["cultureCode"])
]
em = discord.Embed(
title=title[0] if title else data.get("defaultName"),
colour=colour,
)
em.set_thumbnail(url=data.get("thumbUrl") or "")
if data.get("id"):
em.url = f"https://vocadb.net/S/{data['id']}"
em.description = page["value"][:4090] if page.get("value") else "No lyrics found."
if page.get("url"):
em.add_field(
name="Source",
value=f"[{page.get('source') or 'Source'}]({page['url']})",
)
return em | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def embed():",
"async def _create_embed(self, event, info):\n\n e = discord.Embed(url=info.get(\"url\"))\n e.title = \"%s %s!\" % (info.get(\"streamer\"), info.get(\"live_status\"))\n e.add_field(name=\"Stream title\", value=info.get(\"title\"), inline=False)\n e.add_field(name=\"Begin:\", value=event.begin.format(\"HH:mm:ss ZZZ\") + \" (\" + event.begin.humanize() + \")\", inline=False)\n e.add_field(name=\"Duration: \", value=str(event.duration), inline=False)\n #e.add_field(name=\"Link\", value=info.get(\"url\"), inline=False)\n e.set_image(url=info.get(\"thumbnail\") or e.Empty)\n return e",
"async def sayembed(self, ctx, text_channel: typing.Union[discord.TextChannel, str] = None, *, embed_format=None):\n embed_creator_url = \"https://embedbuilder.nadekobot.me/\"\n if isinstance(text_channel, str):\n if isinstance(embed_format, str):\n embed_format = text_channel + embed_format\n text_channel = ctx.channel\n try:\n if not embed_format or not text_channel:\n return await ctx.send(f\"> **This command follows the format from {embed_creator_url}**\")\n else:\n author_name = None\n author_icon_url = None\n embed_footer_text = None\n embed_footer_url = None\n embed_format = json.loads(embed_format)\n embed_image = embed_format.get('image')\n embed_footer = embed_format.get('footer')\n embed_thumbnail = embed_format.get('thumbnail')\n embed_author = embed_format.get('author')\n if embed_author:\n author_name = embed_author.get(\"name\")\n author_icon_url = embed_author.get(\"icon_url\")\n if embed_footer:\n embed_footer_text = embed_footer.get('text')\n embed_footer_url = embed_footer.get('icon_url')\n author_url = embed_format.get('url')\n\n if author_icon_url or author_url:\n embed_format.pop('author')\n if embed_footer_url:\n embed_format.pop('footer')\n if embed_image:\n embed_format.pop('image')\n if embed_thumbnail:\n embed_format.pop('thumbnail')\n\n embed = discord.Embed.from_dict(embed_format)\n\n if embed_image:\n embed.set_image(url=embed_image)\n if embed_footer_url:\n embed.set_footer(text=embed_footer_text, icon_url=embed_footer_url)\n if embed_thumbnail:\n embed.set_thumbnail(url=embed_thumbnail)\n if author_url and author_icon_url:\n embed.set_author(name=author_name, url=author_url, icon_url=author_icon_url)\n elif not author_icon_url and author_url:\n embed.set_author(name=author_name, url=author_url)\n elif not author_url and author_icon_url:\n embed.set_author(name=author_name, icon_url=author_icon_url)\n\n plain_body = embed_format.get('plainText')\n if plain_body:\n return await text_channel.send(plain_body, embed=embed)\n else:\n return await text_channel.send(embed=embed)\n except Exception as e:\n await ctx.send(f\"ERROR - {e}.\\nFollow the format from {embed_creator_url}\")\n log.console(e)",
"def reply_embed(self, message: str):\n embed = discord.Embed(color=discord.Color.blurple())\n embed.title = \"\"\n embed.description = message\n return embed",
"def lyrics_plaintext(song):\n output = \"\"\n\n song = validate_song(song)\n\n output += song.default_arrangement\n output += \"\\n\\n\\n\\n\"\n output += song.composer\n output += \"\\n\"\n output += song.copyright\n output += \"\\n\\n\"\n\n for section, lyrics in song.lyrics.items():\n output += section\n output += \"\\n\"\n output += lyrics\n output += \"\\n\\n\"\n return output",
"def create_artist_new_music_line(spotify_artist_music):\n body = ''\n for item in spotify_artist_music:\n if item['thumbnail']:\n artist_string = '<p><img src=\"{}\" width=\"{}\" height=\"{}\" /> {} released on {}--{}</p>\\n'\n body += artist_string.format(item['thumbnail'][0]['url'], item['thumbnail'][0]['width'],\n item['thumbnail'][0]['height'], item['name'], item['releaseDate'], item['url'])\n return body",
"def quote_to_embed(self,result):\n thedate = datetime.date.fromtimestamp(result[3])\n thechannel = self.bot.get_channel(result[2])\n themember = thechannel.server.get_member(result[1])\n theauthor = themember.name\n if hasattr(themember, \"nick\"):\n if themember.nick is not None:\n theauthor = themember.nick\n embed = discord.Embed(title=\"Quote #{}\".format(result[4]), description=result[0])\n embed.set_author(name=theauthor, icon_url=themember.avatar_url)\n embed.set_footer(text=\"Saved on: {}\".format(thedate.strftime(\"%d %B %y\")))\n return embed",
"def build_embed(self, source_object) -> discord.Embed:\n url, location, first_line = self.get_github_url(source_object)\n\n if isinstance(source_object, commands.HelpCommand):\n title = \"Help Command\"\n help_cmd = self.bot.get_command(\"help\")\n description = help_cmd.help\n elif isinstance(source_object, commands.Command):\n description = source_object.short_doc\n title = f\"Command: {source_object.qualified_name}\"\n elif isinstance(source_object, ModuleType):\n title = f\"Extension: {source_object.__name__}.py\"\n description = discord.Embed.Empty\n else:\n title = f\"Cog: {source_object.qualified_name}\"\n description = source_object.description.splitlines()[0]\n\n embed = discord.Embed(title=title, description=description, colour=0x87CEEB)\n embed.add_field(name=\"Source Code\", value=f\"[Here's the Github link!]({url})\")\n line_text = f\":{first_line}\" if first_line else \"\"\n embed.set_footer(text=f\"{location}{line_text}\")\n\n return embed",
"def add_lyrics(self):\n\n conn = self.conn\n conn.text_factory = str\n c = conn.cursor()\n\n c.execute(\"SELECT songs.id, artist, title, url FROM songs LEFT JOIN lyrics ON songs.id = lyrics.song_id WHERE lyrics.song_id IS NULL\")\n all_songs_to_scrape = c.fetchall()\n for song in all_songs_to_scrape:\n song_id = song[0]\n song_artist = song[1]\n song_title = song[2]\n song_url = song[3]\n print(\"Looking for lyrics for \" + song_title + \" by \" + song_artist)\n try:\n lyrics = pygenius_songs.searchURL(song_url, 'lyrics')\n for lyric in lyrics:\n for line in lyric.split('\\n'):\n c.execute('INSERT INTO lyrics(song_id, line) VALUES (?,?)', (song_id, line))\n conn.commit()\n except Exception as e:\n print(e)\n print song_url\n print(\"Exception caught! ... continuing.\")\n pass",
"def make_twitch_embed(member: discord.Member, response: dict):\n e = discord.Embed(title=\"Playing \" + response[\"stream\"][\"game\"], url=member.game.url,\n description=member.game.name, color=member.color)\n e.set_author(name=member.display_name, url=member.game.url, icon_url=member.avatar_url)\n e.set_thumbnail(url=response[\"stream\"][\"preview\"][\"small\"] + \"?date=\" + datetime.now().ctime().replace(\" \", \"%20\"))\n return e",
"async def _lyrics(self, ctx: commands.Context):\n if not ctx.voice_state.is_playing:\n raise commands.CommandError('Nothing being played at the moment.')\n\n # Get song name listed on youtube\n song_title = ctx.voice_state.current.source.track\n if not song_title:\n return await ctx.send(\"Couldn't find lyrics for this track!\")\n\n song_title = re.sub(\"[(\\[].*?[)\\]]\", \"\", song_title).strip() # Remove parenthesis from song title\n # Get artist name listed on youtube\n artist_name = ctx.voice_state.current.source.artist\n # Instance of GeniusSong class using the Genius API\n genius_song = GeniusSong(song_title, artist_name)\n # Try getting the lyrics using the lyricsgenius library\n lyrics = genius_song.fastlyrics()\n\n # In case of no lyrics found. Use the other (slower) method\n if not lyrics:\n res = genius_song.get_response() # Generate a response using the Genius API to get the songs\n if res:\n # Find the most similar artist comparing the artist on YouTube and Genius\n artist_name = genius_song.return_similar_artist(res)\n # Artist didn't match\n if not artist_name:\n await ctx.send(\"Couldn't find similar artists. The lyrics might not be the expected.\")\n\n # Get the lyrics using the lyricsgenius library with the new artist\n lyrics = genius_song.fastlyrics(artist_name)\n\n else:\n return await ctx.send(\n \"**Error!**\\nThere is a problem with Genius.\\nTry again in a few minutes. \"\n \"\\nYou can also try the command `fastlyrics`.\")\n\n if lyrics:\n # Split lyrics into fields\n fields = genius_song.split_lyrics(lyrics)\n # Create an embed message\n embed = embed_msg(\n title=song_title.capitalize() + \"\\n{}\".format(artist_name),\n description=\"\",\n footer=\"Lyrics provided by Genius.\",\n field_values=fields,\n inline=False\n )\n return await ctx.send(embed=embed)\n\n return await ctx.send(\"Lyrics couldn't be found.\")",
"async def setup_embed(self):\n\n # init\n embed = Embed()\n embed.colour = 0xF54719\n\n # setting up\n if(self.title != None):\n embed.title = self.title\n \n if(self.description != None):\n embed.description = self.description\n \n if(self.colour != None):\n embed.colour = self.colour\n \n if(self.footer != None):\n embed.set_footer(text = self.footer, icon_url = self.client.user.avatar_url)\n \n else:\n embed.set_footer(text = f\"v{Bot_config.version} - {Bot_config.phase} | Credit : DrLarck & DrMegas\", icon_url = self.client.user.avatar_url)\n \n if(self.thumb != None):\n embed.set_thumbnail(url = self.thumb)\n\n embed.set_author(name = self.client.user.name, icon_url = self.client.user.avatar_url)\n \n return(embed)",
"def embed(ctx=None, title=None, description=None, fields=None, customFooter=False, customThumbnail=None, customColor=None, image=None):\n\n e = discord.Embed(title=title, description=description)\n if customColor is None:\n e.color = color()\n else:\n e.color = color(customColor)\n \n if fields != None:\n index = 0\n # Please fix the code below, There's nothing wrong with it, it's just messy and I'm sure that's not the right way to do it.\n for field in fields:\n session = []\n for key, value in field.items():\n session.append(key)\n\n if key == \"n\":\n name = value \n \n if key == \"v\":\n xValue = value \n \n if key == \"inline\":\n inline = value \n \n if not \"inline\" in session:\n inline = False\n \n e.add_field(name=f\"{name}\", value=xValue, inline=inline)\n \n if not customFooter:\n footer(e, ctx)\n \n if image is None:\n try:\n if customThumbnail is None:\n e.set_thumbnail(url=ctx.author.avatar_url)\n else:\n e.set_thumbnail(url=customThumbnail)\n except:\n pass \n else:\n e.set_image(url=image)\n return e",
"async def genius(self, ctx, *args):\n args = argsmachine(args)\n async with ctx.channel.typing():\n if len(args) > 0:\n headers = {'Authorization': 'Bearer ' + token}\n search_url = f'https://api.genius.com/search?q={args}'\n response = requests.get(search_url, headers=headers)\n response = response.json()\n allitems = []\n for item in response['response']['hits']:\n new = item['result']\n newsong = Song(new['full_title'], new['url'], new)\n allitems.append(newsong)\n embed = Embed()\n embed.description = concatenator(allitems)\n await ctx.channel.send('Here are some results of the songs that you wanted. Type in the # of which result you want the lyrics to, or \"no\" to back out!', embed=embed)\n while True:\n try:\n message = await self.bot.wait_for('message', check = check, timeout=30)\n message = message.content.strip()\n if message == 'no':\n break\n else:\n message = int(message)-1\n break\n except asyncio.TimeoutError:\n await ctx.send(\"You didn't reply in time! Enter the #.\")\n continue\n except:\n await ctx.send(f\"Try entering the # again, or enter 'no' to exit the search command.\")\n continue\n\n try:\n chosensong = allitems[message]\n site = requests.get(chosensong.url)\n site = bs4.BeautifulSoup(site.text, features='html.parser')\n chosensong.lyrics = site.find(\"div\", class_=\"lyrics\").get_text()\n \n #Discord supports only 2048 characters in each embed message so this is used to break it up into multiple messages\n messages_needed = math.ceil(len(chosensong.lyrics) / 2048)\n lyricsembed=Embed()\n counter = 1\n currentchar = 0\n nextchar = 2048\n while messages_needed >= counter:\n lyrics = chosensong.lyrics[currentchar:nextchar]\n lyricsembed.description = lyrics\n await ctx.send(f'Here are the lyrics for `{chosensong.title}`, `{counter}`/`{messages_needed}`!', embed=lyricsembed)\n currentchar += 2048\n nextchar += 2048\n counter += 1\n except:\n await ctx.send(f\"Stopping the genius command.\")\n else:\n await ctx.send(f\"Can't really search for lyrics if there are none provided, right? Try again with words, song titles, or artist names.\")",
"def _build_about_embed(self) -> discord.Embed:\n with self.about_aoc_filepath.open(\"r\", encoding=\"utf8\") as f:\n embed_fields = json.load(f)\n\n about_embed = discord.Embed(title=self._base_url, colour=Colours.soft_green, url=self._base_url)\n about_embed.set_author(name=\"Advent of Code\", url=self._base_url)\n for field in embed_fields:\n about_embed.add_field(**field)\n\n about_embed.set_footer(text=f\"Last Updated (UTC): {datetime.utcnow()}\")\n\n return about_embed",
"def lyrics(self):\n return get_lyrics(self.artist, self.title,'')",
"def _embed(slug):\n context = get_factcheck_context();\n context['slug'] = slug\n contents = context['contents']\n annotations = [post for post in contents if post['type'] == 'annotation' and post['published'] == 'yes']\n filtered = [post for post in annotations if post['slug'] == slug]\n filtered = filtered[0]\n context['filtered'] = filtered\n\n index = contents.index(filtered)\n paragraphs = int(filtered.get('prior', 1))\n start = index - paragraphs;\n prior = contents[start:index]\n context['prior'] = prior\n return make_response(render_template('embed.html', **context))",
"async def more(self, ctx: Context) -> None:\r\n e_title: str = MORE_TITLE\r\n e_message: str = \"\\n\".join(self.yt_result.titles)\r\n embed: Embed = Embed(title=e_title, description=e_message, color=int(\"CC181E\", 16))\r\n\r\n await ctx.send(embed=embed)\r\n await ctx.message.delete()",
"def get_embed(self, content: str = None, **kwargs) -> Embed:\n author = kwargs.pop('author', {\n 'name': self.author.display_name,\n 'icon_url': self.author.avatar_url\n })\n color = kwargs.pop('color', self.color)\n\n return Embed(description=content, author=author, color=color, **kwargs)",
"async def vocadb(self, ctx: commands.Context, *, query: str):\n await ctx.trigger_typing()\n data = await self._fetch_data(ctx, query)\n\n if type(data) == str:\n return await ctx.send(data)\n if not data:\n return await ctx.send(\"No results found.\")\n\n await ctx.send(embed=self._info_embed(await ctx.embed_colour(), data))\n # Added a small delay to improve UX for initial embed\n await asyncio.sleep(2.0)\n\n embeds = []\n for i, page in enumerate(data[\"lyrics\"], start=1):\n language = f\"Language: {LANGUAGE_MAP.get(page.get('cultureCode', 'na'))}\"\n emb = self._lyrics_embed(await ctx.embed_colour(), page, data)\n emb.set_footer(text=f\"{language} • Page {i} of {len(data['lyrics'])}\")\n embeds.append(emb)\n\n controls = {\"\\N{CROSS MARK}\": close_menu} if len(embeds) == 1 else DEFAULT_CONTROLS\n await menu(ctx, embeds, controls=controls, timeout=90.0)",
"async def discord(self, ctx):\n embed = discord.Embed(title='Join the discord today!', color=0x5643fd, description=\"This server is where \"\n \"all of \"\n \"NOVA's updates and \"\n \"important \"\n \"announcements will pass \"\n \"through. The creator of \"\n \"this \"\n \"bot, YeetVegetabales#5313, \"\n \"will also be there testing \"\n \"and letting the communtiy \"\n \"in \"\n \"on things first hand!\")\n embed.set_thumbnail(url='https://images-ext-2.discordapp.net/external/AQCEqCF4Yl_PWAfuA-GReZoDify6'\n '--y4hXOJVkqaDHo/%3Fsize%3D1024/https/cdn.discordapp.com/avatars/709922850953494598'\n '/f78ed19924e8c95abc30f406d47670d7.png')\n embed.add_field(name='Server Invite', value='<:news:730866149109137520> '\n '[Join here](https://discord.gg/Uqh9NXY)')\n await ctx.send(embed=embed)",
"async def message(description = None, **kwargs):\n if not kwargs.get(\"color\"):\n kwargs[\"color\"] = discord.Color(0x82b1ff)\n\n return discord.Embed(type = \"rich\",\n description = description,\n **kwargs)",
"async def prepembed(ctx, channel:discord.TextChannel, *, jsonInput):\n jso = json.loads(jsonInput)\n title = jso['title'] if 'title' in jso else \"\"\n desc = jso['description'] if 'description' in jso else \"\"\n titleUrl = jso['titleUrl'] if 'titleUrl' in jso else \"\"\n hexcolor = jso['hexColor'] if 'hexColor' in jso else \"#2E66B6\"\n webcolor = jso['webColor'] if 'webColor' in jso else \"\"\n thumbnailUrl = jso['thumbnailUrl'] if 'thumbnailUrl' in jso else \"\"\n authorName = jso['authorName'] if 'authorName' in jso else \"\"\n authorUrl = jso['authorUrl'] if 'authorUrl' in jso else \"\"\n authorIcon = jso['authorIcon'] if 'authorIcon' in jso else \"\"\n if 'author' in jso:\n authorName = ctx.message.author.name\n authorIcon = ctx.message.author.avatar_url_as(format=\"jpg\")\n fields = jso['fields'] if 'fields' in jso else \"\"\n footerText = jso['footerText'] if 'footerText' in jso else \"\"\n footerUrl = jso['footerUrl'] if 'footerUrl' in jso else \"\"\n imageUrl = jso['imageUrl'] if 'imageUrl' in jso else \"\"\n embed = assemble_embed(\n title=title,\n desc=desc,\n titleUrl=titleUrl,\n hexcolor=hexcolor,\n webcolor=webcolor,\n thumbnailUrl=thumbnailUrl,\n authorName=authorName,\n authorUrl=authorUrl,\n authorIcon=authorIcon,\n fields=fields,\n footerText=footerText,\n footerUrl=footerUrl,\n imageUrl=imageUrl\n )\n await channel.send(embed=embed)",
"def _info_embed(self, colour, data: Dict[str, Any]) -> discord.Embed:\n minutes = data.get(\"lengthSeconds\", 0) // 60\n seconds = data.get(\"lengthSeconds\", 0) % 60\n pub_date = self._parse_date(data.get(\"publishDate\"))\n all_artists = \", \".join(\n f\"[{x.get('name')}](https://vocadb.net/Ar/{x.get('id')}) ({x.get('categories')})\"\n for x in data.get(\"artists\")\n )\n embed = discord.Embed(colour=colour)\n embed.title = f\"{data.get('defaultName')} - {data.get('artistString')}\"\n embed.url = f\"https://vocadb.net/S/{data.get('id')}\"\n embed.set_thumbnail(url=data.get(\"thumbUrl\", \"\"))\n embed.add_field(name=\"Duration\", value=f\"{minutes} minutes, {seconds} seconds\")\n favorites, score = (data.get(\"favoritedTimes\", 0), data.get(\"ratingScore\", 0))\n embed.add_field(name=\"Published On\", value=pub_date)\n embed.add_field(name=\"Statistics\", value=f\"{favorites} favourite(s), {score} total score\")\n embed.add_field(name=\"Artist(s)\", value=all_artists)\n embed.set_footer(text=\"Powered by VocaDB\")\n return embed",
"def ExecuteEmbed(self):\r\n \r\n Embed = DiscordEmbed(title=\"Test Title 123\", \r\n description=\"Test Description 321\",\r\n color=\"eb5e34\") \r\n Embed.set_timestamp()\r\n \r\n self.WEBHOOK.add_embed(Embed)\r\n Execute = self.WEBHOOK.execute()",
"async def action(bot, msg):\n match = match_pattern.match(msg.clean_content)\n if match:\n term = get_term(match.groups()[0])\n if term:\n embed = Embed()\n embed.set_footer(text=term[\"example\"][:max_embed_footer_length])\n await bot.send_message(msg.channel, content=term[\"definition\"], embed=embed)\n else:\n await bot.send_message(msg.channel, \"¯\\\\_(ツ)_/¯\")",
"def embed(self, h, r, t):\n emb_h = self.ent_embeddings(h)\n emb_r = self.rel_embeddings(r)\n emb_t = self.ent_embeddings(t)\n return emb_h, emb_r, emb_t",
"def embed(self, h, r, t):\n emb_h = self.ent_embeddings(h)\n emb_r = self.rel_embeddings(r)\n emb_t = self.ent_embeddings(t)\n return emb_h, emb_r, emb_t",
"def embed(self, h, r, t):\n emb_h = self.ent_embeddings(h)\n emb_r = self.rel_embeddings(r)\n emb_t = self.ent_embeddings(t)\n return emb_h, emb_r, emb_t",
"async def snippet_to_embed(file_contents, file_path, start_line, end_line):\n\n split_file_contents = file_contents.splitlines()\n\n if start_line is None:\n start_line, end_line = 1, len(split_file_contents)\n elif end_line is None:\n start_line = end_line = int(start_line)\n else:\n start_line = int(start_line)\n end_line = int(end_line)\n\n if start_line > end_line:\n start_line, end_line = end_line, start_line\n if start_line > len(split_file_contents) or end_line < 1:\n return ''\n\n start_line = max(1, start_line)\n end_line = min(len(split_file_contents), end_line)\n\n required = '\\n'.join(split_file_contents[start_line - 1:end_line])\n required = textwrap.dedent(required).rstrip().replace('`', '`\\u200b')\n\n language = file_path.split('/')[-1].split('.')[-1]\n if not language.replace('-', '').replace('+', '').replace('_', '').isalnum():\n language = ''\n\n if start_line == end_line:\n ret = f'`{file_path}` line {start_line}\\n'\n else:\n ret = f'`{file_path}` lines {start_line} to {end_line}\\n'\n if len(required) != 0:\n return f'{ret}```{language}\\n{required}```\\n'\n return f'{ret}``` ```\\n'"
] | [
"0.70255935",
"0.63106394",
"0.62139446",
"0.61759347",
"0.60498273",
"0.60261035",
"0.5952487",
"0.59314024",
"0.5826032",
"0.5814283",
"0.57923996",
"0.5775373",
"0.57529444",
"0.5735012",
"0.56990874",
"0.56855226",
"0.56756103",
"0.5667003",
"0.56360406",
"0.5626547",
"0.5611735",
"0.5603666",
"0.560142",
"0.55933",
"0.5592721",
"0.5564208",
"0.55580825",
"0.55580825",
"0.55580825",
"0.5556579"
] | 0.7571321 | 0 |
Fuse conv and bn into one module. | def _fuse_conv_bn(conv, bn):
conv_w = conv.weight
conv_b = conv.bias if conv.bias is not None else torch.zeros_like(
bn.running_mean)
factor = bn.weight / torch.sqrt(bn.running_var + bn.eps)
conv.weight = nn.Parameter(conv_w *
factor.reshape([conv.out_channels, 1, 1, 1]))
conv.bias = nn.Parameter((conv_b - bn.running_mean) * factor + bn.bias)
return conv | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fuse_conv_bn(module):\n last_conv = None\n last_conv_name = None\n\n for name, child in module.named_children():\n if isinstance(child,\n (nn.modules.batchnorm._BatchNorm, nn.SyncBatchNorm)):\n if last_conv is None: # only fuse BN that is after Conv\n continue\n fused_conv = _fuse_conv_bn(last_conv, child)\n module._modules[last_conv_name] = fused_conv\n # To reduce changes, set BN as Identity instead of deleting it.\n module._modules[name] = nn.Identity()\n last_conv = None\n elif isinstance(child, nn.Conv2d):\n last_conv = child\n last_conv_name = name\n else:\n fuse_conv_bn(child)\n return module",
"def __init__(self):\n\n super(ConvModule, self).__init__()\n\n self.conv1 = nn.Conv2d(in_channels=1, out_channels=64, kernel_size=5, stride=[1, 2])\n self.conv1_bn = nn.BatchNorm2d(64)\n self.conv2 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=5, stride=[1, 2])\n self.conv2_bn = nn.BatchNorm2d(128)\n self.pool1 = nn.MaxPool2d(kernel_size=4, stride=2)\n self.dropout0 = nn.Dropout(p=0.4)\n\n self.conv3 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=[1, 2])\n self.conv3_bn = nn.BatchNorm2d(256)\n self.conv4 = nn.Conv2d(in_channels=256, out_channels=64, kernel_size=3, stride=[1, 2])\n self.conv4_bn = nn.BatchNorm2d(64)\n self.pool2 = nn.MaxPool2d(kernel_size=4, stride=2)\n #\n # self.conv5 = nn.Conv2d(in_channels=128, out_channels=64, kernel_size=3, stride=[1, 2])\n # self.conv5_bn = nn.BatchNorm2d(64)\n # self.pool3 = nn.MaxPool2d(kernel_size=3, stride=[1, 2])",
"def all_conv_ops(self):\n pass",
"def preprocess_module(mod):\n\n def alter_conv(attrs, inputs, tinfos, out_type):\n new_attrs = dict(attrs)\n data_info = tinfos[0]\n weight_info = tinfos[1]\n (desired_data_layout, desired_kernel_layout) = (\"NCHW\", \"OIHW\")\n new_attrs[\"data_layout\"] = desired_data_layout\n new_attrs[\"kernel_layout\"] = desired_kernel_layout\n\n if is_depthwise_conv2d(\n data_info.shape,\n attrs[\"data_layout\"],\n weight_info.shape,\n attrs[\"kernel_layout\"],\n attrs[\"groups\"],\n ):\n dkl = desired_kernel_layout\n new_attrs[\"kernel_layout\"] = dkl[1] + dkl[0] + dkl[2] + dkl[3]\n return relay.nn.conv2d(*inputs, **new_attrs)\n\n with OpAttrContext(\"nn.conv2d\", \"FTVMAlterOpLayout\", alter_conv):\n seq = tvm.transform.Sequential(\n [\n transform.ConvertLayout({\"nn.conv2d\": [\"NCHW\", \"OIHW\"]}),\n transform.ConvertLayout({\"nn.conv2d_transpose\": [\"NCHW\", \"OIHW\"]}),\n transform.AlterOpLayout(),\n transform.FoldConstant(),\n ]\n )\n with tvm.transform.PassContext(opt_level=3):\n preprocessed_mod = seq(mod)\n return preprocessed_mod",
"def split_conv_module(cls, model: tf.keras.Model, layer: tf.keras.layers, rank, svd_lib_ref) \\\n -> (tf.keras.layers.Conv2D, tf.keras.layers.Conv2D):\n\n name = layer.name\n logger.debug('Splitting conv op: %s with rank %d', name, rank)\n split_weights, weight_sizes = [], []\n split_biases, bias_sizes = [], []\n bias_present = False\n\n conv_parameters = layer.get_weights()\n if len(conv_parameters) > 1:\n bias_present = True\n\n _, _, in_channels, out_channels = conv_parameters[0].shape\n data_format_channels = layer.data_format\n padding = layer.padding\n\n # TF weights are in [H,W,I,O] order. We must reshape the split weights to SVD format [O,I,H,W]\n # and then transpose back\n\n conv_a_weight_shape = (rank, in_channels, 1, 1)\n conv_a_weight = np.zeros(conv_a_weight_shape)\n\n split_weights.append(conv_a_weight.flatten().tolist())\n weight_sizes.append(conv_a_weight.size)\n\n conv_b_weight_shape = (out_channels, rank, *layer.kernel_size)\n conv_b_weight = np.zeros(conv_b_weight_shape)\n\n split_weights.append(conv_b_weight.flatten().tolist())\n weight_sizes.append(conv_b_weight.size)\n\n split_weights = svd_lib_ref.SplitLayerWeights(str(name), split_weights, weight_sizes,\n [rank])\n\n if bias_present:\n conv_a_bias = np.zeros(rank)\n split_biases.append(conv_a_bias.flatten().tolist())\n bias_sizes.append(conv_a_bias.size)\n\n conv_b_bias = np.zeros(out_channels)\n split_biases.append(conv_b_bias.flatten().tolist())\n bias_sizes.append(conv_b_bias.size)\n\n split_biases = svd_lib_ref.SplitLayerBiases(str(name), split_biases, bias_sizes,\n [rank])\n\n logger.debug(\"Splitting conv module weight of shape %r into %r and %r\",\n conv_parameters[0].shape, conv_a_weight.shape, conv_b_weight.shape)\n\n conv_a = tf.keras.layers.Conv2D(filters=rank, kernel_size=(1, 1),\n strides=(1, 1), data_format=data_format_channels,\n activation=None, padding=padding,\n name=layer.name + '_a', use_bias=bias_present)\n\n conv_b = tf.keras.layers.Conv2D(filters=out_channels, kernel_size=layer.kernel_size,\n strides=layer.strides,\n name=layer.name + '_b',\n data_format=data_format_channels, padding=padding, use_bias=bias_present)\n\n # Replace the layer in the model\n replace_layer_in_functional_model(model, layer, [conv_a, conv_b])\n\n if bias_present:\n conv_a.set_weights([np.array(split_weights[0], dtype=np.float32).reshape(conv_a_weight_shape).transpose(2, 3, 1, 0),\n np.array(split_biases[0], dtype=np.float32)])\n conv_b.set_weights([np.array(split_weights[1], dtype=np.float32).reshape(conv_b_weight_shape).transpose(2, 3, 1, 0),\n np.array(split_biases[1], dtype=np.float32)])\n else:\n conv_a.set_weights([np.array(split_weights[0], dtype=np.float32).reshape(conv_a_weight_shape).transpose(2, 3, 1, 0)])\n conv_b.set_weights([np.array(split_weights[1], dtype=np.float32).reshape(conv_b_weight_shape).transpose(2, 3, 1, 0)])\n\n return conv_a, conv_b",
"def fuse_model(self):\n\n for m in self.modules():\n if type(m) == QuantizableBasicConv2d:\n m.fuse_model()",
"def forward(self, x): \n pal1_sources = list()\n pal2_sources = list()\n loc_pal1 = list()\n conf_pal1 = list()\n loc_pal2 = list()\n conf_pal2 = list()\n\n # apply vgg up to conv3_3 relu\n for k in range(16):\n x = self.vgg[k](x)\n\n of1 = x\n s = self.L2Normof1(of1)\n pal1_sources.append(s)\n \n # apply vgg up to conv4_3 relu\n for k in range(16, 23):\n x = self.vgg[k](x)\n\n of2 = x\n s = self.L2Normof2(of2)\n pal1_sources.append(s)\n\n # apply vgg up to conv5_3 relu\n for k in range(23, 30):\n x = self.vgg[k](x)\n of3 = x\n s = self.L2Normof3(of3)\n pal1_sources.append(s)\n\n # apply vgg up to fc7\n for k in range(30, len(self.vgg)):\n x = self.vgg[k](x)\n of4 = x\n pal1_sources.append(of4)\n \n # apply extra layers and cache source layer outputs\n for k in range(2):\n x = F.relu(self.extras[k](x), inplace=True)\n of5 = x\n pal1_sources.append(of5)\n for k in range(2, 4):\n x = F.relu(self.extras[k](x), inplace=True)\n of6 = x\n pal1_sources.append(of6)\n\n ## fpn module\n \"\"\"\n lfpn6 = self.fpn_topdown6(of6)\n lfpn5 = self._upsample_product(self.fpn_topdown5(of6), self.fpn_latlayer5(of5))\n lfpn4 = self._upsample_product(self.fpn_topdown4(of5), self.fpn_latlayer4(of4))\n lfpn3 = self._upsample_product(self.fpn_topdown3(of4), self.fpn_latlayer3(of3))\n lfpn2 = self._upsample_product(self.fpn_topdown2(of3), self.fpn_latlayer2(of2))\n lfpn1 = self._upsample_product(self.fpn_topdown1(of2), self.fpn_latlayer1(of1))\n\n\n ef1 = self.fpn_fem3_3(lfpn1)\n ef1 = self.L2Normef1(ef1)\n ef2 = self.fpn_fem4_3(lfpn2)\n ef2 = self.L2Normef2(ef2)\n ef3 = self.fpn_fem5_3(lfpn3)\n ef3 = self.L2Normef3(ef3)\n\n ef4 = self.fpn_fem7(lfpn4)\n ef5 = self.fpn_fem6_2(lfpn5)\n ef6 = self.fpn_fem7_2(lfpn6)\n \"\"\"\n\n conv7 = F.relu(self.fpn_topdown[0](of6), inplace=True)\n x = F.relu(self.fpn_topdown[1](conv7), inplace=True)\n conv6 = F.relu(self._upsample_product(x, self.fpn_latlayer[0](of5)), inplace=True)\n\n x = F.relu(self.fpn_topdown[2](conv6), inplace=True)\n convfc7_2 = F.relu(self._upsample_product(x, self.fpn_latlayer[1](of4)), inplace=True)\n\n x = F.relu(self.fpn_topdown[3](convfc7_2), inplace=True)\n conv5 = F.relu(self._upsample_product(x, self.fpn_latlayer[2](of3)), inplace=True)\n\n x = F.relu(self.fpn_topdown[4](conv5), inplace=True)\n conv4 = F.relu(self._upsample_product(x, self.fpn_latlayer[3](of2)), inplace=True)\n\n x = F.relu(self.fpn_topdown[5](conv4), inplace=True)\n conv3 = F.relu(self._upsample_product(x, self.fpn_latlayer[4](of1)), inplace=True)\n\n ef1 = self.fpn_fem[0](conv3)\n ef1 = self.L2Normef1(ef1)\n ef2 = self.fpn_fem[1](conv4)\n ef2 = self.L2Normef2(ef2)\n ef3 = self.fpn_fem[2](conv5)\n ef3 = self.L2Normef3(ef3)\n ef4 = self.fpn_fem[3](convfc7_2)\n ef5 = self.fpn_fem[4](conv6)\n ef6 = self.fpn_fem[5](conv7)\n\n pal2_sources = (ef1, ef2, ef3, ef4, ef5, ef6)\n\n ## first shot \n for (x, l, c) in zip(pal1_sources, self.loc_pal1, self.conf_pal1):\n loc_pal1.append(l(x).permute(0, 2, 3, 1).contiguous())\n conf_pal1.append(c(x).permute(0, 2, 3, 1).contiguous())\n \n ## second shot\n for (x, l, c) in zip(pal2_sources, self.loc_pal2, self.conf_pal2):\n loc_pal2.append(l(x).permute(0, 2, 3, 1).contiguous())\n conf_pal2.append(c(x).permute(0, 2, 3, 1).contiguous())\n\n # first shot\n loc_pal1 = torch.cat([o.view(o.size(0), -1) for o in loc_pal1], 1)\n conf_pal1 = torch.cat([o.view(o.size(0), -1) for o in conf_pal1], 1)\n \n # second shot\n loc_pal2 = torch.cat([o.view(o.size(0), -1) for o in loc_pal2], 1)\n conf_pal2 = torch.cat([o.view(o.size(0), -1) for o in conf_pal2], 1)\n\n if self.phase == 'test':\n # 测试时, 仅使用shot2 的输出\n output = self.detect(\n loc_pal2.view(loc_pal2.size(0), -1, 4),\n self.softmax(conf_pal2.view(conf_pal2.size(0), -1,\n self.num_classes)), # conf preds\n )\n else:\n ## 训练时,使用shot1 和 shot2 的输出\n output = (\n loc_pal1.view(loc_pal1.size(0), -1, 4),\n conf_pal1.view(conf_pal1.size(0), -1, self.num_classes),\n loc_pal2.view(loc_pal2.size(0), -1, 4),\n conf_pal2.view(conf_pal2.size(0), -1, self.num_classes))\n return output",
"def __init__(self, nfeat, nhid, nclass, dropout, alpha):\n super(GCN, self).__init__()\n self.dropout = dropout\n\n self.conv1 = GraphConvolutionLayer(nfeat, nhid, dropout=dropout, alpha=alpha, not_final=True)\n \n self.add_module('conv1', self.conv1)\n\n self.conv2 = GraphConvolutionLayer(nhid, nclass, dropout=dropout, alpha=alpha, not_final=False)",
"def __init__(self):\n super(Backbone, self).__init__()\n\n # input size: (128, 282, 282)\n # Block 1:\n # relu + 4 conv + bn\n self.conv1 = torch.nn.Conv2d(128, 64, kernel_size=3, stride=1, padding=0)\n self.conv2 = torch.nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=0)\n self.conv3 = torch.nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=0)\n self.conv4 = torch.nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=0)\n\n self.bn1 = torch.nn.BatchNorm2d(64)\n self.bn2 = torch.nn.BatchNorm2d(64)\n self.bn3 = torch.nn.BatchNorm2d(64)\n self.bn4 = torch.nn.BatchNorm2d(64)\n\n # Block 2:\n # relu + 6 conv + stride 2 + bn\n self.conv5 = torch.nn.Conv2d(64, 128, kernel_size=3, stride=2, padding=0)\n self.conv6 = torch.nn.Conv2d(128, 128, kernel_size=3, stride=2, padding=0)\n self.conv7 = torch.nn.Conv2d(128, 64, kernel_size=3, stride=1, padding=0)\n self.conv8 = torch.nn.Conv2d(64, 32, kernel_size=3, stride=1, padding=0)\n self.conv9 = torch.nn.Conv2d(32, 16, kernel_size=3, stride=1, padding=0)\n self.conv10 = torch.nn.Conv2d(16, 8, kernel_size=3, stride=1, padding=0)\n\n self.bn5 = torch.nn.BatchNorm2d(128)\n self.bn6 = torch.nn.BatchNorm2d(128)\n self.bn7 = torch.nn.BatchNorm2d(64)\n self.bn8 = torch.nn.BatchNorm2d(32)\n self.bn9 = torch.nn.BatchNorm2d(16)\n self.bn10 = torch.nn.BatchNorm2d(8)\n\n # Block 3:\n # 2 fully connected with drop out.\n\n self.fc1 = torch.nn.Linear( 8 * 59 * 59, 32)\n self.fc1_bn = torch.nn.BatchNorm1d(32)\n self.fc_out = torch.nn.Linear(32, 3)",
"def build_bisenet(inputs, num_classes):\n\n ### The spatial path\n ### The number of feature maps for each convolution is not specified in the paper\n ### It was chosen here to be equal to the number of feature maps of a classification\n ### model at each corresponding stage\n # spatial_net = fluid.layers.resize_bilinear(inputs, [Image_Height/8, Image_Width/8])\n # print('spatial_net_1',spatial_net)\n\n ## spatial path\n spatial_net = ConvBlock(inputs, num_filters=64, kernel_size=3, stride=2)\n spatial_net = ConvBlock(spatial_net, num_filters=128, kernel_size=3, stride=2)\n spatial_net = ConvBlock(spatial_net, num_filters=256, kernel_size=3, stride=2)\n # print(\"spatial_net:\", spatial_net)\n\n # spatial_net = fluid.layers.resize_bilinear(spatial_net, [Image_Height/8, Image_Width/8])\n # print('spatial_net_2',spatial_net)\n ### Context path\n model = ResNet(is_test=False)\n # spatial_net = model.bottleneck_block1(inputs)\n end_points_16, end_points_32 = model.net(inputs)\n net_4 = AttentionRefinementModule(end_points_16, num_filters=512)\n net_5 = AttentionRefinementModule(end_points_32, num_filters=1024)\n global_channels = fluid.layers.reduce_mean(net_5, [2, 3], keep_dim=True)\n net_5_scaled = fluid.layers.elementwise_mul(net_5, global_channels, axis=0)\n\n ### Combining the paths\n net_4 = Upsample(net_4, scale=2)\n net_5_scaled = Upsample(net_5_scaled, scale=4)\n # print('net_4, net_5:', [net_4, net_5_scaled])\n # layers_concat = list()\n # layers_concat.append(spatial_net)\n ## layers_concat.append(net_4)\n # layers_concat.append(net_5_scaled)\n context_net = fluid.layers.concat([spatial_net, net_4, net_5_scaled], axis=1) #\n # context_net = fluid.layers.concat(input=layers_concat,axis=1)\n # print('context_net', context_net)\n # context_net = fluid.layers.concat([net_4, net_5_scaled], axis=1)\n # print('context_net', context_net)\n # context_net = fluid.layers.concat([spatial_net,context_net], axis=1)\n # print('context_net2',context_net)\n\n ### FFM\n # net = FeatureFusionModule(input_1=spatial_net, input_2=context_net, num_filters=num_classes)\n net = FeatureFusionModule(inputs=context_net, num_filters=num_classes)\n\n # print('net', net)\n\n ## [batch_zize, num_filters, 128, 64]\n\n ### Final upscaling and finish\n # net = fluid.layers.conv2d_transpose(input=net, num_filters=num_classes, output_size=[256, 128])\n # print('conv2d_transpose', net)\n net = batch_normalization(net, relu=True, name='conv2d_transpose_bn1')\n net = fluid.layers.conv2d_transpose(input=net, num_filters=num_classes, output_size=[128, 256])\n net = batch_normalization(net, relu=True, name='conv2d_transpose_bn2')\n net = fluid.layers.conv2d_transpose(input=net, num_filters=num_classes, output_size=[256, 512])\n net = batch_normalization(net, relu=True, name='conv2d_transpose_bn3')\n #net = fluid.layers.conv2d_transpose(input=net, num_filters=num_classes, output_size=[512, 1024])\n #net = batch_normalization(net, relu=True, name='conv2d_transpose_bn4')\n # print('net',net)\n net = fluid.layers.image_resize(net, out_shape=[512, 1024], resample='BILINEAR')\n\n net = fluid.layers.conv2d(net, num_classes, 1)\n return net",
"def forward(self, inputs, **kwargs):\n x = self.ff_module1(inputs)\n x = self.mha_module(x, **kwargs)\n x = self.conv_module(x)\n x = self.ff_module2(x)\n\n return x",
"def conv_bn(in_channels:int, out_channels:int, conv, *args, **kwargs):\n # Convolutionnal layer combined with a batch normalisation\n return nn.Sequential(conv(in_channels, out_channels, *args, **kwargs), nn.BatchNorm2d(out_channels))",
"def convert_network(network, dtype, convert_bn):\n for module in network.modules():\n if not convert_bn and isinstance(module, torch.nn.modules.batchnorm._BatchNorm) and module.affine is True:\n continue\n apex.fp16_utils.convert_module(module, dtype)\n\n return network",
"def __conv_block(self, x, stage, branch, nb_filter, dropout_rate=None, weight_decay=1e-4):\n\t\teps = 1.1e-5\n\t\tconv_name_base = \"conv\" + str(stage) + \"_\" + str(branch)\n\t\trelu_name_base = \"relu\" + str(stage) + \"_\" + str(branch)\n\n\t\t# 1x1 Convolution (Bottleneck layer)\n\t\tinter_channel = nb_filter * 4 \n\t\tx = BatchNormalization(epsilon=eps, axis=self.concat_axis, name=conv_name_base+\"_x1_bn\")(x)\n\t\tx = Scale(axis=self.concat_axis, name=conv_name_base+\"_x1_scale\")(x)\n\t\tx = Activation(\"relu\", name=relu_name_base+\"_x1\")(x)\n\t\tx = Conv2D(inter_channel, (1, 1), name=conv_name_base+\"_x1\", use_bias=False)(x)\n\n\t\tif dropout_rate:\n\t\t\tx = Dropout(dropout_rate)(x)\n\n\t\t# 3x3 Convolution\n\t\tx = BatchNormalization(epsilon=eps, axis=self.concat_axis, name=conv_name_base+\"_x2_bn\")(x)\n\t\tx = Scale(axis=self.concat_axis, name=conv_name_base+\"_x2_scale\")(x)\n\t\tx = Activation(\"relu\", name=relu_name_base+\"_x2\")(x)\n\t\tx = ZeroPadding2D((1, 1), name=conv_name_base+\"_x2_zeropadding\")(x)\n\t\tx = Conv2D(nb_filter, (3, 3), name=conv_name_base+\"_x2\", use_bias=False)(x)\n\n\t\tif dropout_rate:\n\t\t\tx = Dropout(dropout_rate)(x)\n\n\t\treturn x",
"def _wrap_modules(self, layer: LayerInfo, config: Dict):\n _logger.debug(\"Module detected to compress : %s.\", layer.name)\n assert self.bound_model is not None\n # TODO: merge with _create_scalers after nni v3.0\n if self.sparse_granularity and self.sparse_granularity == 'auto' and self._model_parser:\n if self._model_parser.is_attention(layer.name):\n num_heads = self._model_parser.get_num_heads(layer.name, self.bound_model)\n if num_heads <= 0:\n score_size = None\n else:\n if layer.module.weight.shape[0] % num_heads != 0 or layer.module.weight.shape[1] % num_heads != 0: # type: ignore\n score_size = None\n else:\n score_size = [num_heads, num_heads]\n elif self._model_parser.is_ffn(layer.name, ffn_num=1):\n score_size = [layer.module.weight.shape[0], 1] # type: ignore\n elif self._model_parser.is_ffn(layer.name, ffn_num=2):\n score_size = [1, layer.module.weight.shape[1]] # type: ignore\n else:\n score_size = None\n else:\n score_size = None\n wrapper = PrunerScoredModuleWrapper(layer.module, layer.name, config, score_size)\n assert hasattr(layer.module, 'weight'), \"module %s does not have 'weight' attribute\" % layer.name\n # move newly registered buffers to the same device of weight\n wrapper.to(layer.module.weight.device) # type: ignore\n return wrapper",
"def forward(self, x):\n sources = list()\n tcb_source = list()\n odm_loc = list()\n odm_conf = list()\n if self.phase == 'test':\n feat_sizes = list()\n\n # apply vgg up to conv4_3 relu and conv5_3 relu\n for k in range(self.conv5_3_layer):\n x = self.vgg[k](x)\n if self.size != 512 and self.size != 320 and self.conv3_3_layer - 1 == k:\n s = self.conv3_3_L2Norm(x)\n sources.append(s)\n if self.phase == 'test':\n feat_sizes.append(x.shape[2:])\n if self.conv4_3_layer - 1 == k:\n s = self.conv4_3_L2Norm(x)\n sources.append(s)\n if self.phase == 'test':\n feat_sizes.append(x.shape[2:])\n elif self.conv5_3_layer - 1 == k:\n s = self.conv5_3_L2Norm(x)\n sources.append(s)\n if self.phase == 'test':\n feat_sizes.append(x.shape[2:])\n\n # apply vgg up to fc7\n for k in range(self.conv5_3_layer, len(self.vgg)):\n x = self.vgg[k](x)\n sources.append(x)\n if self.phase == 'test':\n feat_sizes.append(x.shape[2:])\n\n # apply extra layers and cache source layer outputs\n for k in range(len(self.extras)):\n x = self.extras[k](x)\n if self.extra_1_layer - 1 == k:\n sources.append(x)\n if self.phase == 'test':\n feat_sizes.append(x.shape[2:])\n if (self.size == 640 or self.size == 5126) and self.extra_2_layer - 1 == k:\n sources.append(x)\n if self.phase == 'test':\n feat_sizes.append(x.shape[2:])\n\n # calculate TCB features\n p = None\n for k, v in enumerate(sources[::-1]):\n s = v\n for i in range(3):\n s = self.tcb0[(self.step-k)*3 + i](s)\n if k != 0:\n u = p\n u = self.tcb1[self.step-k](u)\n s += u\n for i in range(3):\n s = self.tcb2[(self.step-k)*3 + i](s)\n p = s\n tcb_source.append(s)\n tcb_source.reverse()\n\n # apply ODM to source layers\n for (x, l, c) in zip(tcb_source, self.odm_loc, self.odm_conf):\n odm_loc.append(l(x).permute(0, 2, 3, 1).contiguous())\n odm_conf.append(c(x).permute(0, 2, 3, 1).contiguous())\n odm_loc = torch.cat([o.view(o.size(0), -1) for o in odm_loc], 1)\n odm_conf = torch.cat([o.view(o.size(0), -1) for o in odm_conf], 1)\n\n if self.phase == \"test\":\n output = (\n odm_loc.view(odm_loc.size(0), -1, 4), # odm loc preds\n self.softmax(odm_conf.view(odm_conf.size(0), -1,\n self.num_classes)), # odm conf preds\n feat_sizes\n )\n else:\n output = (\n odm_loc.view(odm_loc.size(0), -1, 4),\n odm_conf.view(odm_conf.size(0), -1, self.num_classes),\n )\n return output",
"def build_modules(self):\n self.backbone = Backbone(\n self.configs['backbone'],\n freeze_backbone=self.configs['freeze_backbone'],\n freeze_batchnorm=True\n )\n\n backbone_channel_sizes = get_backbone_channel_sizes(self.backbone)\n\n self.fpn = FeaturePyramidNetwork(\n backbone_channel_sizes=backbone_channel_sizes,\n min_feature_level=self.configs['min_feature_level'],\n max_feature_level=self.configs['max_feature_level'],\n feature_size=self.configs['pyramid_feature_size']\n )\n\n self.shared_conv_model = SharedConvModel(\n input_feature_size=self.configs['pyramid_feature_size'],\n feature_size=self.configs['shared_conv_feature_size'],\n num_layers=self.configs['shared_conv_num_layers']\n )\n\n if self.configs['shared_conv_num_layers'] > 0:\n shared_conv_output_size = self.configs['shared_conv_feature_size']\n else:\n shared_conv_output_size = self.configs['pyramid_feature_size']\n\n self.ofn = ObjectFinderNetwork(\n input_feature_size=shared_conv_output_size,\n feature_size=self.configs['finder_feature_size'],\n num_layers=self.configs['finder_num_layers']\n )\n\n self.ofn_loss_fn\n\n # self.classification_model = ClassificationModel()\n #\n # self.regression_model = RegressionModel()",
"def forward(self, x):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n\n x = self.pool1(F.relu(self.batch1(self.conv1(x))))\n x = self.pool2(F.relu(self.batch2(self.conv2(x))))\n x = F.relu(self.batch3a(self.conv3a(x)))\n x = self.pool3(F.relu(self.batch3b(self.conv3b(x))))\n x = F.relu(self.batch4a(self.conv4a(x)))\n x = self.pool4(F.relu(self.batch4b(self.conv4b(x))))\n x = F.relu(self.batch5a(self.conv5a(x)))\n x = self.pool5(F.relu(self.batch5b(self.conv5b(x))))\n x = self.avgpool(x)\n x = x.reshape(x.shape[0], -1)\n out = self.fc1(x)\n\n# raise NotImplementedError\n ########################\n # END OF YOUR CODE #\n #######################\n\n return out",
"def __init__(self):\n #conv1\n n = inp_width*inp_height\n #poczatkowe wagi sieci sa ustalane losowo z rozkladu normalnego. Umieszczane sa one na liscie matryc wag\n self.Weights = [np.random.randn(layers[0][1],inp_channels,layers[0][2],layers[0][2])/np.sqrt(n)]\n out_Size = inp_width - layers[0][2] + 1 #zmienna zawiera rozmiar wyjscia danej warstwy\n #inicjalizacja progow \n self.Biases = [initBias*np.ones( layers[0][1] )]\n #przypisanie parametrow warstwie poolingu\n self.poolParams = [(layers[1][1], layers[1][2])]\n out_Size = out_Size/2 \n #conv 2\n n = out_Size*out_Size*layers[0][1]\n self.Weights.append(np.random.randn(layers[2][1],layers[0][1],layers[2][2],layers[2][2])/np.sqrt(n))\n out_Size = out_Size - layers[2][2]+1\n self.Biases.append(initBias*np.ones(layers[2][1]))\n #pool 2\n self.poolParams.append((layers[3][1],layers[3][2]))\n out_Size = out_Size/2 \n #conv 3\n n = out_Size*out_Size*layers[2][1]\n self.Weights.append(np.random.randn(layers[4][1],layers[2][1],out_Size,out_Size)/np.sqrt(n))\n out_Size = 1\n self.Biases.append(initBias*np.ones(layers[4][1]))\n #fully connected 1\n n = layers[4][1]\n self.Weights.append(np.random.randn(layers[5][1],layers[4][1])/np.sqrt(n))\n self.Biases.append(initBias*np.ones(layers[5][1]))\n #fully connected 2\n n = layers[5][1]\n self.Weights.append(np.random.randn(layers[6][1],layers[5][1])/np.sqrt(n))\n self.Biases.append(initBias*np.ones(layers[6][1]))\n\n self.Weights = np.asarray(self.Weights)\n self.Biases = np.asarray(self.Biases)\n \n delta_W = []\n delta_B = []\n for i in range(5):\n delta_W.append(np.zeros(self.Weights[i].shape))\n delta_B.append(np.zeros(self.Biases[i].shape))\n self.delta_W = np.asarray(delta_W)\n self.delta_B = np.asarray(delta_B)",
"def _conv_bn(**conv_params):\n filters = conv_params[\"filters\"]\n kernel_size = conv_params[\"kernel_size\"]\n strides = conv_params.setdefault(\"strides\", (1, 1))\n kernel_initializer = conv_params.setdefault(\"kernel_initializer\", \"he_normal\")\n padding = conv_params.setdefault(\"padding\", \"same\")\n kernel_regularizer = conv_params.setdefault(\"kernel_regularizer\", l2(1.e-4))\n name = conv_params[\"name\"]\n\n def f(input):\n x = Conv2D(filters=filters, kernel_size=kernel_size,\n strides=strides, padding=padding,\n kernel_initializer=kernel_initializer,\n kernel_regularizer=kernel_regularizer,\n name=name + '_conv')(input)\n x = BatchNormalization(axis=CHANNEL_AXIS, name=name + '_bn')(x)\n return x \n return f",
"def dir_conv_block(model, nb_layers, nb_filters, rate):\n\n for _ in range(nb_layers):\n model, rate = dir_conv_layer(model, nb_filters, rate)\n model = MaxPooling1D()(model)\n model = Dropout(0.1)(model)\n return model, rate",
"def arch(self, nn) -> 'final node of the tensor flow graph (y_conv)':\n\n print(self)\n\n # first conv. layer \n # 5x5 filter, 1 input channel, 32 output channels\n W_conv1 = nn.weight_variable([5, 5, 1, 32])\n b_conv1 = nn.bias_variable([32])\n stride1 = 1\n h_conv1 = tf.nn.relu(nn.conv2d(nn.x_image, W_conv1, stride1, 'VALID') + b_conv1) \n # outputs a 24x24x32 image\n \n # first pooling layer (2x2) \n h_pool1 = nn.max_pool_2x2(h_conv1) \n # outputs a 12x12x32 image\n\n # second conv. layer \n # 3x3 filter, 32 input channel, 32 output channels\n W_conv2 = nn.weight_variable([3, 3, 32, 32])\n b_conv2 = nn.bias_variable([32])\n stride2 = 1\n h_conv2 = tf.nn.relu(nn.conv2d(h_pool1, W_conv2, stride2, 'VALID') + b_conv2)\n # outputs a 10x10x32 image\n\n # third conv. layer\n # 3x3 filter, 32 input channel, 32 output channels\n W_conv3 = nn.weight_variable([3, 3, 32, 32])\n b_conv3 = nn.bias_variable([32])\n stride3 = 1\n h_conv3 = tf.nn.relu(nn.conv2d(h_conv2, W_conv3, stride3, 'VALID') + b_conv3)\n # outputs a 8x8x32 image\n\n # reshape (flatten) output\n h_conv3_flat = tf.reshape(h_conv3, [-1, 8*8*32])\n\n # first fully connected layer\n W_fc1 = nn.weight_variable([8 * 8 * 32, 1024])\n b_fc1 = nn.bias_variable([1024])\n h_fc1 = tf.nn.relu(tf.matmul(h_conv3_flat, W_fc1) + b_fc1)\n\n # dropout\n h_fc1_drop = tf.nn.dropout(h_fc1, nn.keep_prob)\n\n # second (final) fully connected layer (softmax)\n W_fc2 = nn.weight_variable([1024, 10])\n b_fc2 = nn.bias_variable([10])\n y_conv=tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)\n\n return y_conv",
"def forward(self, x, vars=None, bn_training=True):\n\n if vars is None:\n vars = self.vars\n\n idx = 0\n bn_idx = 0\n\n for name, param in self.config:\n if name is 'conv2d':\n w, b = vars[idx], vars[idx + 1]\n # remember to keep synchrozied of forward_encoder and forward_decoder!\n x = F.conv2d(x, w, b, stride=param[4], padding=param[5])\n idx += 2\n # print(name, param, '\\tout:', x.shape)\n elif name is 'convt2d':\n w, b = vars[idx], vars[idx + 1]\n # remember to keep synchrozied of forward_encoder and forward_decoder!\n x = F.conv_transpose2d(x, w, b, stride=param[4], padding=param[5])\n idx += 2\n # print(name, param, '\\tout:', x.shape)\n elif name is 'linear':\n w, b = vars[idx], vars[idx + 1]\n o = F.linear(x, w, b)\n idx += 2\n # print('forward:', idx, x.norm().item())\n elif name is 'bn':\n w, b = vars[idx], vars[idx + 1]\n running_mean, running_var = self.vars_bn_mean[bn_idx], self.vars_bn_var[bn_idx]\n x = F.batch_norm(x, running_mean, running_var, weight=w, bias=b, training=bn_training)\n idx += 2\n bn_idx += 1\n\n elif name is 'flatten':\n x = x.reshape(((x.shape)[0], -1))\n elif name is 'reshape':\n # [b, 8] => [b, 2, 2, 2]\n x = x.view(x.size(0), *param)\n elif name is 'relu':\n x = F.relu(x)\n elif name is 'leakyrelu':\n x = F.leaky_relu(x, negative_slope=param[0], inplace=param[1])\n elif name is 'tanh':\n x = F.tanh(x)\n elif name is 'sigmoid':\n x = F.sigmoid(x)\n elif name is 'upsample':\n x = F.upsample_nearest(x, scale_factor=param[0])\n elif name is 'max_pool2d':\n x = F.max_pool2d(x, param[0], param[1], param[2])\n elif name is 'avg_pool2d':\n x = F.avg_pool2d(x, param[0], param[1], param[2])\n\n else:\n raise NotImplementedError\n\n # make sure variable is used properly\n assert idx == len(vars)\n assert bn_idx == len(self.vars_bn_mean)\n\n\n return o",
"def __init__(\n self,\n spatial_dims: int,\n in_channels: int,\n out_channels: int,\n kernel_size: int,\n stride: int,\n image_size: list[int],\n expand_ratio: int,\n se_ratio: float | None,\n id_skip: bool | None = True,\n norm: str | tuple = (\"batch\", {\"eps\": 1e-3, \"momentum\": 0.01}),\n drop_connect_rate: float | None = 0.2,\n ) -> None:\n super().__init__()\n\n # select the type of N-Dimensional layers to use\n # these are based on spatial dims and selected from MONAI factories\n conv_type = Conv[\"conv\", spatial_dims]\n adaptivepool_type = Pool[\"adaptiveavg\", spatial_dims]\n\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.id_skip = id_skip\n self.stride = stride\n self.expand_ratio = expand_ratio\n self.drop_connect_rate = drop_connect_rate\n\n if (se_ratio is not None) and (0.0 < se_ratio <= 1.0):\n self.has_se = True\n self.se_ratio = se_ratio\n else:\n self.has_se = False\n\n # Expansion phase (Inverted Bottleneck)\n inp = in_channels # number of input channels\n oup = in_channels * expand_ratio # number of output channels\n if self.expand_ratio != 1:\n self._expand_conv = conv_type(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)\n self._expand_conv_padding = _make_same_padder(self._expand_conv, image_size)\n\n self._bn0 = get_norm_layer(name=norm, spatial_dims=spatial_dims, channels=oup)\n else:\n # need to have the following to fix JIT error:\n # \"Module 'MBConvBlock' has no attribute '_expand_conv'\"\n\n # FIXME: find a better way to bypass JIT error\n self._expand_conv = nn.Identity()\n self._expand_conv_padding = nn.Identity()\n self._bn0 = nn.Identity()\n\n # Depthwise convolution phase\n self._depthwise_conv = conv_type(\n in_channels=oup,\n out_channels=oup,\n groups=oup, # groups makes it depthwise\n kernel_size=kernel_size,\n stride=self.stride,\n bias=False,\n )\n self._depthwise_conv_padding = _make_same_padder(self._depthwise_conv, image_size)\n self._bn1 = get_norm_layer(name=norm, spatial_dims=spatial_dims, channels=oup)\n image_size = _calculate_output_image_size(image_size, self.stride)\n\n # Squeeze and Excitation layer, if desired\n if self.has_se:\n self._se_adaptpool = adaptivepool_type(1)\n num_squeezed_channels = max(1, int(in_channels * self.se_ratio))\n self._se_reduce = conv_type(in_channels=oup, out_channels=num_squeezed_channels, kernel_size=1)\n self._se_reduce_padding = _make_same_padder(self._se_reduce, [1] * spatial_dims)\n self._se_expand = conv_type(in_channels=num_squeezed_channels, out_channels=oup, kernel_size=1)\n self._se_expand_padding = _make_same_padder(self._se_expand, [1] * spatial_dims)\n\n # Pointwise convolution phase\n final_oup = out_channels\n self._project_conv = conv_type(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)\n self._project_conv_padding = _make_same_padder(self._project_conv, image_size)\n self._bn2 = get_norm_layer(name=norm, spatial_dims=spatial_dims, channels=final_oup)\n\n # swish activation to use - using memory efficient swish by default\n # can be switched to normal swish using self.set_swish() function call\n self._swish = Act[\"memswish\"](inplace=True)",
"def __init__(self):\n torch.nn.Module.__init__(self)\n ######################### Convolution and pooling layers of VGG-16.\n self.features = torchvision.models.vgg16(pretrained=True).features # fine tune?\n self.features = torch.nn.Sequential(*list(self.features.children())\n [:-22]) # Remove pool2 and rest, lack of computational resource\n # No grad for convVGG\n # for param in self.features.parameters():\n # param.requires_grad = False\n\n #################### Channel Grouping Net\n # self.fc1_ = torch.nn.Linear(128, 128*16)#lack of resource\n # self.fc2_ = torch.nn.Linear(128, 128*16)\n # self.fc3_ = torch.nn.Linear(128, 128*16)\n #\n # torch.nn.init.kaiming_normal_(self.fc1_.weight.data, nonlinearity='relu')\n # if self.fc1_.bias is not None:\n # torch.nn.init.constant_(self.fc1_.bias.data, val=0) # fc层的bias进行constant初始化\n # torch.nn.init.kaiming_normal_(self.fc2_.weight.data, nonlinearity='relu')\n # if self.fc2_.bias is not None:\n # torch.nn.init.constant_(self.fc2_.bias.data, val=0) # fc层的bias进行constant初始化\n # torch.nn.init.kaiming_normal_(self.fc3_.weight.data, nonlinearity='relu')\n # if self.fc3_.bias is not None:\n # torch.nn.init.constant_(self.fc3_.bias.data, val=0) # fc层的bias进行constant初始化\n\n self.fc1 = torch.nn.Linear(128*28*28, 128)\n self.fc2 = torch.nn.Linear(128*28*28, 128)\n self.fc3 = torch.nn.Linear(128*28*28, 128)\n\n\n torch.nn.init.kaiming_normal_(self.fc1.weight.data, nonlinearity='relu')\n if self.fc1.bias is not None:\n torch.nn.init.constant_(self.fc1.bias.data, val=0) # fc层的bias进行constant初始化\n torch.nn.init.kaiming_normal_(self.fc2.weight.data, nonlinearity='relu')\n if self.fc2.bias is not None:\n torch.nn.init.constant_(self.fc2.bias.data, val=0) # fc层的bias进行constant初始化\n torch.nn.init.kaiming_normal_(self.fc3.weight.data, nonlinearity='relu')\n if self.fc3.bias is not None:\n torch.nn.init.constant_(self.fc3.bias.data, val=0) # fc层的bias进行constant初始化\n\n self.layerNorm=nn.LayerNorm([224,224])\n\n # global grad for hook\n self.image_reconstruction = None\n self.register_hooks()\n self.GradWeight=1e-1\n\n # ################### STN input N*3*448*448\n # self.localization = [\n # nn.Sequential(\n # nn.MaxPool2d(4,stride=4),#112\n # nn.ReLU(True),\n #\n # nn.Conv2d(3, 32, kernel_size=5,stride=1,padding=2), # 112\n # nn.MaxPool2d(2, stride=2), # 56\n # nn.ReLU(True),\n #\n # nn.Conv2d(32, 48, kernel_size=3,stride=1,padding=1),\n # nn.MaxPool2d(2, stride=2), # 56/2=28\n # nn.ReLU(True),\n #\n # nn.Conv2d(48, 64, kernel_size=3, stride=1, padding=1),\n # nn.MaxPool2d(2, stride=2), # 28/2=14\n # nn.ReLU(True) #output 64*14*14\n # ).cuda(),\n # nn.Sequential(\n # nn.MaxPool2d(4, stride=4), # 112\n # nn.ReLU(True),\n #\n # nn.Conv2d(3, 32, kernel_size=5, stride=1, padding=2), # 112\n # nn.MaxPool2d(2, stride=2), # 56\n # nn.ReLU(True),\n #\n # nn.Conv2d(32, 48, kernel_size=3, stride=1, padding=1),\n # nn.MaxPool2d(2, stride=2), # 56/2=28\n # nn.ReLU(True),\n #\n # nn.Conv2d(48, 64, kernel_size=3, stride=1, padding=1),\n # nn.MaxPool2d(2, stride=2), # 28/2=14\n # nn.ReLU(True) # output 64*14*14\n # ).cuda(),\n # nn.Sequential(\n # nn.MaxPool2d(4, stride=4), # 112\n # nn.ReLU(True),\n #\n # nn.Conv2d(3, 32, kernel_size=5, stride=1, padding=2), # 112\n # nn.MaxPool2d(2, stride=2), # 56\n # nn.ReLU(True),\n #\n # nn.Conv2d(32, 48, kernel_size=3, stride=1, padding=1),\n # nn.MaxPool2d(2, stride=2), # 56/2=28\n # nn.ReLU(True),\n #\n # nn.Conv2d(48, 64, kernel_size=3, stride=1, padding=1),\n # nn.MaxPool2d(2, stride=2), # 28/2=14\n # nn.ReLU(True) # output 64*14*14\n # ).cuda()\n # ]\n # # Regressor for the 3 * 2 affine matrix\n # self.fc_loc = [\n # nn.Sequential(\n # nn.Linear(64 * 14 * 14, 32),\n # nn.ReLU(True),\n # nn.Linear(32, 3 * 2)\n # ).cuda(),\n # nn.Sequential(\n # nn.Linear(64 * 14 * 14, 32),\n # nn.ReLU(True),\n # nn.Linear(32, 3 * 2)\n # ).cuda(),\n # nn.Sequential(\n # nn.Linear(64 * 14 * 14, 32),\n # nn.ReLU(True),\n # nn.Linear(32, 3 * 2)\n # ).cuda()\n # ]\n # # Initialize the weights/bias with identity transformation\n # for fc_locx in self.fc_loc:\n # fc_locx[2].weight.data.zero_()\n # fc_locx[2].bias.data.copy_(torch.tensor([1, 0, 0, 0, 1, 0], dtype=torch.float))\n\n ########################Bilinear CNN output 256 channels\n self.bcnnConv_1=torch.nn.Sequential(*list(torchvision.models.vgg16(pretrained=True).features.children())\n [:-1]) # Remove pool3 and rest.\n self.bcnnConv_2 = torch.nn.Sequential(*list(torchvision.models.vgg16(pretrained=True).features.children())\n [:-1]) # Remove pool3 and rest.\n self.bcnnConv_3 = torch.nn.Sequential(*list(torchvision.models.vgg16(pretrained=True).features.children())\n [:-1]) # Remove pool3 and rest.\n #BCNN Linear classifier.\n self.bfc1 = torch.nn.Linear(512*512, 200)\n self.bfc2 = torch.nn.Linear(512*512, 200)\n self.bfc3 = torch.nn.Linear(512*512, 200)\n torch.nn.init.kaiming_normal_(self.bfc1.weight.data) # 何凯明初始化\n if self.bfc1.bias is not None:\n torch.nn.init.constant_(self.bfc1.bias.data, val=0) # fc层的bias进行constant初始化\n torch.nn.init.kaiming_normal_(self.bfc2.weight.data) # 何凯明初始化\n if self.bfc2.bias is not None:\n torch.nn.init.constant_(self.bfc2.bias.data, val=0) # fc层的bias进行constant初始化\n torch.nn.init.kaiming_normal_(self.bfc3.weight.data) # 何凯明初始化\n if self.bfc3.bias is not None:\n torch.nn.init.constant_(self.bfc3.bias.data, val=0) # fc层的bias进行constant初始化\n\n # self.CBP1 = CompactBilinearPooling(512, 512, 50000)\n # self.CBP2 = CompactBilinearPooling(512, 512, 50000)\n # self.CBP3 = CompactBilinearPooling(512, 512, 50000)",
"def __init__(self, options):\r\n nn.Module.__init__(self)\r\n # Convolution and pooling layers of VGG-16.\r\n self.basemodel = torchvision.models.resnet18(pretrained=True)\r\n self.options = options\r\n\r\n #label\r\n self.label_primary = nn.Linear(options['primary_dim'], options['proj_dim'])\r\n self.label_dual = nn.Linear(options['dual_dim'], options['proj_dim'])\r\n\r\n #classifer/regressor\r\n self.fc_primary = nn.Linear(512 + options['proj_dim'], options['primary_dim'])\r\n self.fc_dual = nn.Linear(512 + options['proj_dim'], options['dual_dim'])\r\n\r\n\r\n if self.options['fc'] == True:\r\n # Freeze all previous layers.\r\n for param in self.basemodel.parameters():\r\n param.requires_grad = False\r\n # Initialize the fc layers.\r\n nn.init.kaiming_normal_(self.fc_primary.weight.data)\r\n if self.fc_primary.bias is not None:\r\n nn.init.constant_(self.fc_primary.bias.data, val=0)\r\n\r\n nn.init.kaiming_normal_(self.fc_dual.weight.data)\r\n if self.fc_dual.bias is not None:\r\n nn.init.constant_(self.fc_dual.bias.data, val=0)\r\n\r\n nn.init.kaiming_normal_(self.label_primary.weight.data)\r\n if self.label_primary.bias is not None:\r\n nn.init.constant_(self.label_primary.bias.data, val=0)\r\n\r\n nn.init.kaiming_normal_(self.label_dual.weight.data)\r\n if self.label_dual.bias is not None:\r\n nn.init.constant_(self.label_dual.bias.data, val=0)\r\n\r\n\r\n else:\r\n for param in self.basemodel.conv1.parameters():\r\n param.requires_grad = False\r\n for param in self.basemodel.bn1.parameters():\r\n param.requires_grad = False\r\n for param in self.basemodel.layer1.parameters():\r\n param.requires_grad = False\r\n #for param in self.basemodel.layer2.parameters():\r\n # param.requires_grad = False\r\n #for param in self.basemodel.layer3.parameters():\r\n # param.requires_grad = False\r",
"def __init__(self, in_ch=2048, out_ch=256):\n super(ChannelCompress, self).__init__()\n num_bottleneck = 1000\n add_block = []\n add_block += [nn.Linear(in_ch, num_bottleneck)]\n add_block += [nn.BatchNorm1d(num_bottleneck)]\n add_block += [nn.ReLU()]\n\n add_block += [nn.Linear(num_bottleneck, 500)]\n add_block += [nn.BatchNorm1d(500)]\n add_block += [nn.ReLU()]\n add_block += [nn.Linear(500, out_ch)]\n\n # Extra BN layer, need to be removed\n #add_block += [nn.BatchNorm1d(out_ch)]\n\n add_block = nn.Sequential(*add_block)\n add_block.apply(weights_init_kaiming)\n self.model = add_block",
"def merge_conv_bn(net):\n previous = None\n has_seen_cnn = False\n conv_replace_queue = []\n bn_replace_queue = []\n for s in net.children():\n if has_seen_cnn and isinstance(s, nn.BatchNorm2d):\n conv_replace_queue.append(previous)\n bn_replace_queue += [s]\n if isinstance(s, nn.Conv2d):\n has_seen_cnn = True\n else:\n has_seen_cnn = False\n previous = s\n if len(conv_replace_queue):\n if isinstance(net, nn.Sequential):\n for i, sub in enumerate(net):\n if isinstance(sub, nn.Conv2d) and sub in conv_replace_queue:\n idx = conv_replace_queue.index(sub)\n bn = bn_replace_queue[idx]\n new_conv = fuse(sub, bn)\n net[i] = new_conv\n net[i + 1] = nn.Identity()\n else:\n for n in dir(net):\n sub = getattr(net, n)\n if isinstance(sub, nn.Conv2d) and sub in conv_replace_queue:\n idx = conv_replace_queue.index(sub)\n bn = bn_replace_queue[idx]\n new_conv = fuse(sub, bn)\n setattr(net, n, new_conv)\n for n in dir(net):\n sub = getattr(net, n)\n if isinstance(sub, nn.BatchNorm2d) and sub in bn_replace_queue:\n setattr(net, n, nn.Identity())",
"def _composite_conv(self, input_data, out_channel, name):\n with tf.variable_scope(name):\n bn_1 = self.layer_bn(input_data=input_data, is_training=self._is_training, name='bn_1')\n\n relu_1 = self.relu(input_data=bn_1, name='relu_1')\n\n if self._with_bc:\n conv_1 = self.conv2d(input_data=relu_1, out_channel=out_channel, kernel_size=1,\n padding='SAME', stride=1, use_bias=False, name='conv_1')\n\n bn_2 = self.layer_bn(input_data=conv_1, is_training=self._is_training, name='bn_2')\n relu_2 = self.relu(input_data=bn_2, name='relu_2')\n conv_2 = self.conv2d(input_data=relu_2, out_channel=out_channel, kernel_size=3,\n padding='SAME', stride=1, use_bias=False, name='conv_2')\n\n else:\n conv_2 = self.conv2d(input_data=relu_1, out_channel=out_channel, kernel_size=3,\n padding='SAME', stride=1, use_bias=False, name='conv_2')\n\n return conv_2",
"def HighResolutionModule(inputs, num_branches, num_inchannels, \n num_channels, bottleneck, block_fn, \n num_blocks, training, name, data_format, \n multi_scale_output=True):\n if bottleneck:\n num_outchannels = [c * 4 for c in num_channels]\n else:\n num_outchannels = num_channels\n\n strides = 1\n # compute the output for each branch\n outputs = []\n for i in range(num_branches):\n branch_name = name + '_branch_{}'.format(i)\n with tf.variable_scope(branch_name):\n branch = block_layer(\n inputs=inputs[i], inp_filters=num_inchannels[i],\n filters=num_channels[i], bottleneck=bottleneck,\n block_fn=block_fn, blocks=num_blocks,\n strides=strides, training=training,\n name=branch_name, data_format=data_format)\n\n outputs.append(branch)\n\n if num_branches == 1:\n return outputs\n\n def fusion(inputs, inp_index, out_index):\n \"\"\"Create multi-resolution fusion module\n\n Args:\n inputs: A list of tensor of size [batch, channels, height_in, width_in] \n or [batch, height_in, width_in, channels] depending on data_format.\n inp_index: The indexes of inputs that are fused together.\n out_index: The index of the output, which decides the output resolution.\n \n Return:\n The fused output.\n \"\"\"\n shortcut = inputs[out_index]\n if data_format == 'channel_first':\n _, _, out_h, out_w = shortcut.get_shape().as_list()\n else:\n _, out_h, out_w, _ = shortcut.get_shape().as_list()\n \n for ind in inp_index:\n input_branch = inputs[ind]\n with tf.variable_scope('input_{}_output_{}'.format(ind, out_index)):\n if ind > out_index:\n input_branch = conv2d_fixed_padding(\n inputs=input_branch, filters=num_outchannels[out_index],\n kernel_size=1, strides=1, data_format=data_format)\n input_branch = batch_norm(input_branch, training, data_format)\n input_branch = tf.nn.relu(input_branch)\n if data_format == 'channel_first':\n input_branch = tf.transpose(input_branch, perm=[0, 2, 3, 1])\n input_branch = tf.image.resize(input_branch, (out_h, out_w), \n method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)\n input_branch = tf.transpose(input_branch, perm=[0, 3, 1, 2])\n else:\n input_branch = tf.image.resize(input_branch, (out_h, out_w), \n method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)\n elif ind == out_index:\n continue\n else:\n for k in range(out_index - ind):\n if k == out_index - ind - 1:\n input_branch = conv2d_fixed_padding(\n inputs=input_branch, filters=num_outchannels[out_index],\n kernel_size=3, strides=2, data_format=data_format)\n input_branch = batch_norm(input_branch, training, data_format)\n else:\n input_branch = conv2d_fixed_padding(\n inputs=input_branch, filters=num_outchannels[out_index],\n kernel_size=3, strides=2, data_format=data_format)\n input_branch = batch_norm(input_branch, training, data_format)\n input_branch = tf.nn.relu(input_branch)\n\n shortcut += input_branch\n shortcut = tf.nn.relu(shortcut)\n return shortcut\n \n with tf.variable_scope('ms_fusion_' + name):\n if not multi_scale_output:\n fused_outputs = [fusion(outputs, list(range(num_branches)), 0)]\n else:\n fused_outputs = []\n for i in range(num_branches):\n fused_outputs.append(fusion(outputs, list(range(num_branches)), i))\n return fused_outputs"
] | [
"0.66424954",
"0.6055714",
"0.602726",
"0.59107614",
"0.5825306",
"0.57937133",
"0.57798207",
"0.5775232",
"0.5716006",
"0.56922126",
"0.5688817",
"0.5679025",
"0.566316",
"0.56493765",
"0.56161374",
"0.5604259",
"0.5571019",
"0.5565932",
"0.5558572",
"0.5545696",
"0.5540742",
"0.5529363",
"0.55284345",
"0.5519743",
"0.55171204",
"0.5513041",
"0.5505943",
"0.5500352",
"0.5492472",
"0.54827213"
] | 0.651783 | 1 |
Recursively fuse conv and bn in a module. During inference, the functionary of batch norm layers is turned off but only the mean and var alone channels are used, which exposes the chance to fuse it with the preceding conv layers to save computations and simplify network structures. | def fuse_conv_bn(module):
last_conv = None
last_conv_name = None
for name, child in module.named_children():
if isinstance(child,
(nn.modules.batchnorm._BatchNorm, nn.SyncBatchNorm)):
if last_conv is None: # only fuse BN that is after Conv
continue
fused_conv = _fuse_conv_bn(last_conv, child)
module._modules[last_conv_name] = fused_conv
# To reduce changes, set BN as Identity instead of deleting it.
module._modules[name] = nn.Identity()
last_conv = None
elif isinstance(child, nn.Conv2d):
last_conv = child
last_conv_name = name
else:
fuse_conv_bn(child)
return module | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _fuse_conv_bn(conv, bn):\n conv_w = conv.weight\n conv_b = conv.bias if conv.bias is not None else torch.zeros_like(\n bn.running_mean)\n\n factor = bn.weight / torch.sqrt(bn.running_var + bn.eps)\n conv.weight = nn.Parameter(conv_w *\n factor.reshape([conv.out_channels, 1, 1, 1]))\n conv.bias = nn.Parameter((conv_b - bn.running_mean) * factor + bn.bias)\n return conv",
"def fuse_model(self):\n\n for m in self.modules():\n if type(m) == QuantizableBasicConv2d:\n m.fuse_model()",
"def _fuse(self):\n with tf.variable_scope('fusion'):\n self.fuse_p_encodes, _ = rnn('bi-lstm', self.match_p_encodes, self.p_length,\n self.hidden_size, layer_num=1)\n if self.use_dropout:\n self.fuse_p_encodes = tf.nn.dropout(self.fuse_p_encodes, self.dropout_keep_prob)",
"def preprocess_module(mod):\n\n def alter_conv(attrs, inputs, tinfos, out_type):\n new_attrs = dict(attrs)\n data_info = tinfos[0]\n weight_info = tinfos[1]\n (desired_data_layout, desired_kernel_layout) = (\"NCHW\", \"OIHW\")\n new_attrs[\"data_layout\"] = desired_data_layout\n new_attrs[\"kernel_layout\"] = desired_kernel_layout\n\n if is_depthwise_conv2d(\n data_info.shape,\n attrs[\"data_layout\"],\n weight_info.shape,\n attrs[\"kernel_layout\"],\n attrs[\"groups\"],\n ):\n dkl = desired_kernel_layout\n new_attrs[\"kernel_layout\"] = dkl[1] + dkl[0] + dkl[2] + dkl[3]\n return relay.nn.conv2d(*inputs, **new_attrs)\n\n with OpAttrContext(\"nn.conv2d\", \"FTVMAlterOpLayout\", alter_conv):\n seq = tvm.transform.Sequential(\n [\n transform.ConvertLayout({\"nn.conv2d\": [\"NCHW\", \"OIHW\"]}),\n transform.ConvertLayout({\"nn.conv2d_transpose\": [\"NCHW\", \"OIHW\"]}),\n transform.AlterOpLayout(),\n transform.FoldConstant(),\n ]\n )\n with tvm.transform.PassContext(opt_level=3):\n preprocessed_mod = seq(mod)\n return preprocessed_mod",
"def trainFreezeBN(self):\n\t\tprint(\"Freezing Mean/Var of BatchNorm2D.\")\n\t\tprint(\"Freezing Weight/Bias of BatchNorm2D.\")\n\t\tfor m in self.modules():\n\t\t\tif isinstance(m, nn.BatchNorm2d):\n\t\t\t\tm.eval()\n\t\t\t\tm.weight.requires_grad = False\n\t\t\t\tm.bias.requires_grad = False",
"def trainFreezeBN(self):\n\t\tprint(\"Freezing Mean/Var of BatchNorm2D.\")\n\t\tprint(\"Freezing Weight/Bias of BatchNorm2D.\")\n\t\tfor m in self.modules():\n\t\t\tif isinstance(m, nn.BatchNorm2d):\n\t\t\t\tm.eval()\n\t\t\t\tm.weight.requires_grad = False\n\t\t\t\tm.bias.requires_grad = False",
"def full_fusion(self):\n if self.fully_fused:\n return\n\n if not self.partially_fused:\n self.partial_fusion()\n\n if self.use_post_bn:\n eq_kernel, eq_bias = self._fuse_bn_tensor(\n self.rbr_reparam.weight,\n self.rbr_reparam.bias,\n self.post_bn.running_mean,\n self.post_bn.running_var,\n self.post_bn.weight,\n self.post_bn.bias,\n self.post_bn.eps,\n )\n\n self.rbr_reparam.weight.data = eq_kernel\n self.rbr_reparam.bias.data = eq_bias\n\n for para in self.parameters():\n para.detach_()\n\n if hasattr(self, \"post_bn\"):\n self.__delattr__(\"post_bn\")\n\n self.partially_fused = False\n self.fully_fused = True",
"def replace_conv(module: nn.Module):\n for name, mod in module.named_children():\n target_mod = getattr(module, name)\n if type(mod) == torch.nn.Conv2d:\n setattr(module, name, WSConv2d(target_mod.in_channels, target_mod.out_channels, target_mod.kernel_size,\n target_mod.stride, target_mod.padding, target_mod.dilation, target_mod.groups, target_mod.bias))\n \n if type(mod) == torch.nn.BatchNorm2d:\n setattr(module, name, torch.nn.Identity())\n\n for name, mod in module.named_children():\n replace_conv(mod)",
"def inference(image, keep_prob):\r\n '''\r\n print(\"setting up vgg initialized conv layers ...\")\r\n model_data = utils.get_model_data(FLAGS.model_dir, MODEL_URL)\r\n\r\n mean = model_data['normalization'][0][0][0]\r\n mean_pixel = np.mean(mean, axis=(0, 1))\r\n\r\n weights = np.squeeze(model_data['layers'])\r\n print(\"weights.shape\",weights.shape)\r\n\r\n processed_image = utils.process_image(image, mean_pixel)'''\r\n\r\n with tf.variable_scope(\"inference\"):\r\n pooling_net,conv_final_layer = inference_op(image)\r\n #conv_final_layer = image_net[\"conv5_3\"]\r\n\r\n pool5 = utils.max_pool_2x2(conv_final_layer)\r\n\r\n W6 = utils.weight_variable([7, 7, 512, 4096], name=\"W6\")\r\n b6 = utils.bias_variable([4096], name=\"b6\")\r\n conv6 = utils.conv2d_basic(pool5, W6, b6)\r\n relu6 = tf.nn.relu(conv6, name=\"relu6\")\r\n if FLAGS.debug:\r\n utils.add_activation_summary(relu6)\r\n relu_dropout6 = tf.nn.dropout(relu6, keep_prob=keep_prob)\r\n\r\n W7 = utils.weight_variable([1, 1, 4096, 4096], name=\"W7\")\r\n b7 = utils.bias_variable([4096], name=\"b7\")\r\n conv7 = utils.conv2d_basic(relu_dropout6, W7, b7)\r\n relu7 = tf.nn.relu(conv7, name=\"relu7\")\r\n if FLAGS.debug:\r\n utils.add_activation_summary(relu7)\r\n relu_dropout7 = tf.nn.dropout(relu7, keep_prob=keep_prob)\r\n\r\n W8 = utils.weight_variable([1, 1, 4096, NUM_OF_CLASSESS], name=\"W8\")\r\n b8 = utils.bias_variable([NUM_OF_CLASSESS], name=\"b8\")\r\n conv8 = utils.conv2d_basic(relu_dropout7, W8, b8)\r\n # annotation_pred1 = tf.argmax(conv8, dimension=3, name=\"prediction1\")\r\n\r\n # now to upscale to actual image size\r\n deconv_shape1 = pooling_net[\"pool4\"].get_shape()\r\n W_t1 = utils.weight_variable([4, 4, deconv_shape1[3].value, NUM_OF_CLASSESS], name=\"W_t1\")\r\n b_t1 = utils.bias_variable([deconv_shape1[3].value], name=\"b_t1\")\r\n # 对第8层的结果进行反卷积(上采样),通道数也由NUM_OF_CLASSESS变为第4层的通道数\r\n conv_t1 = utils.conv2d_transpose_strided(conv8, W_t1, b_t1, output_shape=tf.shape(pooling_net[\"pool4\"]))\r\n fuse_1 = tf.add(conv_t1, pooling_net[\"pool4\"], name=\"fuse_1\")\r\n\r\n deconv_shape2 = pooling_net[\"pool3\"].get_shape()\r\n W_t2 = utils.weight_variable([4, 4, deconv_shape2[3].value, deconv_shape1[3].value], name=\"W_t2\")\r\n b_t2 = utils.bias_variable([deconv_shape2[3].value], name=\"b_t2\")\r\n conv_t2 = utils.conv2d_transpose_strided(fuse_1, W_t2, b_t2, output_shape=tf.shape(pooling_net[\"pool3\"]))\r\n fuse_2 = tf.add(conv_t2, pooling_net[\"pool3\"], name=\"fuse_2\")\r\n\r\n shape = tf.shape(image)\r\n deconv_shape3 = tf.stack([shape[0], shape[1], shape[2], NUM_OF_CLASSESS])\r\n W_t3 = utils.weight_variable([16, 16, NUM_OF_CLASSESS, deconv_shape2[3].value], name=\"W_t3\")\r\n b_t3 = utils.bias_variable([NUM_OF_CLASSESS], name=\"b_t3\")\r\n conv_t3 = utils.conv2d_transpose_strided(fuse_2, W_t3, b_t3, output_shape=deconv_shape3, stride=8)\r\n\r\n annotation_pred = tf.argmax(conv_t3, dimension=3, name=\"prediction\")\r\n print(\"annotation_pred.shape\",annotation_pred.shape)\r\n print(\"conv_t3\",conv_t3)\r\n print(\"tf.expand_dims(annotation_pred, dim=3)\",tf.expand_dims(annotation_pred, dim=3))\r\n return tf.expand_dims(annotation_pred, dim=3), conv_t3",
"def inference(self, images, inference_name, channel, mean_pixel, keep_prob):\n print(\"setting up resnet101 initialized conv layers ...\")\n #mean_pixel = np.mean(mean, axis=(0, 1))\n\n processed_images = utils.process_image(images, mean_pixel)\n\n processed_images = tf.nn.dropout(processed_images, self.input_keep_prob)\n\n with tf.variable_scope(inference_name):\n \n conv1 = utils_layers.conv2d_layer(processed_images, '1', [7, 7, channel, 64], pool_=3)\n #Resnet\n conv_final_layer, image_net = self.resnet.resnet_op(conv1, if_avg_pool=0)\n\n #dropout\n conv_final_layer = tf.nn.dropout(conv_final_layer, keep_prob)\n W_last = utils.weight_variable([1, 1, 2048, 2], name='W_last')\n b_last = utils.bias_variable([2], name='b8')\n conv_last = utils.conv2d_basic(conv_final_layer, W_last, b_last)\n print('conv_last: ', conv_last.get_shape())\n #Deconv operator\n #1. output_shape=[self.batch_size, 14, 14, 1024]\n conv_t1 = utils_layers.deconv2d_layer(conv_last, 't1', [4,4,1024,2], output_shape=tf.shape(image_net['block3_b22']))\n fuse_1 = tf.add(conv_t1, image_net['block3_b22'], name=\"fuse_1\")\n #2. output_shape=[self.batch_size, 28, 28, 512]\n conv_t2 = utils_layers.deconv2d_layer(fuse_1, 't2', [4,4,512,1024], output_shape=tf.shape(image_net['block2_b3']))\n fuse_2 = tf.add(conv_t2, image_net['block2_b3'], name=\"fuse_2\")\n #3. output_shape = [self.batch_size, 56, 56, 256]\n conv_t3 = utils_layers.deconv2d_layer(fuse_2, 't3', [4,4,256,512], output_shape=tf.shape(image_net['block1_b2']))\n fuse_3 = tf.add(conv_t3, image_net['block1_b2'], name='fuse_3')\n #4. output_shape=[self.batch_size, 224, 224, 2]\n shape = tf.shape(images)\n conv_t4 = utils_layers.deconv2d_layer(fuse_3, 't4', [16, 16, 2, 256], output_shape=[shape[0], shape[1], shape[2], 2], stride=4)\n \n annotation_pred = tf.argmax(conv_t4, dimension=3, name=\"prediction\")\n\n logits = conv_t4\n print('logits shape', logits.shape)\n\n\n return logits",
"def _rescale_module(module):\n for sub in module.modules():\n if isinstance(sub, (nn.Conv1d, nn.ConvTranspose1d, nn.Conv2d, nn.ConvTranspose2d)):\n std = sub.weight.std().detach()\n scale = (std / 0.1) ** 0.5\n sub.weight.data /= scale\n if sub.bias is not None:\n sub.bias.data /= scale",
"def build_bisenet(inputs, num_classes):\n\n ### The spatial path\n ### The number of feature maps for each convolution is not specified in the paper\n ### It was chosen here to be equal to the number of feature maps of a classification\n ### model at each corresponding stage\n # spatial_net = fluid.layers.resize_bilinear(inputs, [Image_Height/8, Image_Width/8])\n # print('spatial_net_1',spatial_net)\n\n ## spatial path\n spatial_net = ConvBlock(inputs, num_filters=64, kernel_size=3, stride=2)\n spatial_net = ConvBlock(spatial_net, num_filters=128, kernel_size=3, stride=2)\n spatial_net = ConvBlock(spatial_net, num_filters=256, kernel_size=3, stride=2)\n # print(\"spatial_net:\", spatial_net)\n\n # spatial_net = fluid.layers.resize_bilinear(spatial_net, [Image_Height/8, Image_Width/8])\n # print('spatial_net_2',spatial_net)\n ### Context path\n model = ResNet(is_test=False)\n # spatial_net = model.bottleneck_block1(inputs)\n end_points_16, end_points_32 = model.net(inputs)\n net_4 = AttentionRefinementModule(end_points_16, num_filters=512)\n net_5 = AttentionRefinementModule(end_points_32, num_filters=1024)\n global_channels = fluid.layers.reduce_mean(net_5, [2, 3], keep_dim=True)\n net_5_scaled = fluid.layers.elementwise_mul(net_5, global_channels, axis=0)\n\n ### Combining the paths\n net_4 = Upsample(net_4, scale=2)\n net_5_scaled = Upsample(net_5_scaled, scale=4)\n # print('net_4, net_5:', [net_4, net_5_scaled])\n # layers_concat = list()\n # layers_concat.append(spatial_net)\n ## layers_concat.append(net_4)\n # layers_concat.append(net_5_scaled)\n context_net = fluid.layers.concat([spatial_net, net_4, net_5_scaled], axis=1) #\n # context_net = fluid.layers.concat(input=layers_concat,axis=1)\n # print('context_net', context_net)\n # context_net = fluid.layers.concat([net_4, net_5_scaled], axis=1)\n # print('context_net', context_net)\n # context_net = fluid.layers.concat([spatial_net,context_net], axis=1)\n # print('context_net2',context_net)\n\n ### FFM\n # net = FeatureFusionModule(input_1=spatial_net, input_2=context_net, num_filters=num_classes)\n net = FeatureFusionModule(inputs=context_net, num_filters=num_classes)\n\n # print('net', net)\n\n ## [batch_zize, num_filters, 128, 64]\n\n ### Final upscaling and finish\n # net = fluid.layers.conv2d_transpose(input=net, num_filters=num_classes, output_size=[256, 128])\n # print('conv2d_transpose', net)\n net = batch_normalization(net, relu=True, name='conv2d_transpose_bn1')\n net = fluid.layers.conv2d_transpose(input=net, num_filters=num_classes, output_size=[128, 256])\n net = batch_normalization(net, relu=True, name='conv2d_transpose_bn2')\n net = fluid.layers.conv2d_transpose(input=net, num_filters=num_classes, output_size=[256, 512])\n net = batch_normalization(net, relu=True, name='conv2d_transpose_bn3')\n #net = fluid.layers.conv2d_transpose(input=net, num_filters=num_classes, output_size=[512, 1024])\n #net = batch_normalization(net, relu=True, name='conv2d_transpose_bn4')\n # print('net',net)\n net = fluid.layers.image_resize(net, out_shape=[512, 1024], resample='BILINEAR')\n\n net = fluid.layers.conv2d(net, num_classes, 1)\n return net",
"def _equalize_weights_unfolding_pact(self, bn_dict={}, verbose=False, eps=None):\n\n if not bn_dict:\n bn_dict = get_bn_dict_from_supernodes(self)\n\n module_dict = {}\n for n,m in self.named_modules():\n if (m.__class__.__name__ == \"PACT_Conv2d\" or \\\n m.__class__.__name__ == \"PACT_Conv1d\" or \\\n m.__class__.__name__ == \"PACT_Linear\" or \\\n m.__class__.__name__ == \"BatchNorm2d\" or \\\n m.__class__.__name__ == \"BatchNorm1d\" ):\n module_dict[n] = m\n for n_before in bn_dict.keys():\n n_after = bn_dict[n_before]\n m_before = module_dict[n_before]\n m_after = module_dict[n_after]\n if eps is None:\n eps = m_after.eps\n range_before = weight_range(m_before, 0)\n if verbose:\n logging.info(\"[Equalization by Unfolding] %s: wrange_min=%.5f wrange_max=%.5f\" % (n_before, range_before.min().item(), range_before.max().item()))\n m_before.weight.data[:] = m_before.weight.data[:] / reshape_before(m_before, range_before)\n try:\n m_before.bias.data[:] = m_before.bias.data[:] / range_before\n except AttributeError:\n pass\n m_after.running_mean.data[:] = m_after.running_mean.data[:] / range_before\n m_after.weight.data[:] = m_after.weight.data[:] * reshape_after(m_after, range_before)\n if verbose:\n logging.info(\"[Equalization by Unfolding] %s: wrange_min=%.5f wrange_max=%.5f\" % (n_before, weight_range(m_before, 0).min().item(), weight_range(m_before, 0).max().item()))",
"def conv_bn_relu_forward(x, w, b, gamma, beta, conv_param, bn_param):\n a, conv_cache = conv_forward_fast(x, w, b, conv_param)\n an, sbn_cache = spatial_batchnorm_forward(a, gamma, beta, bn_param)\n out, relu_cache = relu_forward(an)\n cache = (conv_cache, sbn_cache, relu_cache)\n return out, cache",
"def HighResolutionModule(inputs, num_branches, num_inchannels, \n num_channels, bottleneck, block_fn, \n num_blocks, training, name, data_format, \n multi_scale_output=True):\n if bottleneck:\n num_outchannels = [c * 4 for c in num_channels]\n else:\n num_outchannels = num_channels\n\n strides = 1\n # compute the output for each branch\n outputs = []\n for i in range(num_branches):\n branch_name = name + '_branch_{}'.format(i)\n with tf.variable_scope(branch_name):\n branch = block_layer(\n inputs=inputs[i], inp_filters=num_inchannels[i],\n filters=num_channels[i], bottleneck=bottleneck,\n block_fn=block_fn, blocks=num_blocks,\n strides=strides, training=training,\n name=branch_name, data_format=data_format)\n\n outputs.append(branch)\n\n if num_branches == 1:\n return outputs\n\n def fusion(inputs, inp_index, out_index):\n \"\"\"Create multi-resolution fusion module\n\n Args:\n inputs: A list of tensor of size [batch, channels, height_in, width_in] \n or [batch, height_in, width_in, channels] depending on data_format.\n inp_index: The indexes of inputs that are fused together.\n out_index: The index of the output, which decides the output resolution.\n \n Return:\n The fused output.\n \"\"\"\n shortcut = inputs[out_index]\n if data_format == 'channel_first':\n _, _, out_h, out_w = shortcut.get_shape().as_list()\n else:\n _, out_h, out_w, _ = shortcut.get_shape().as_list()\n \n for ind in inp_index:\n input_branch = inputs[ind]\n with tf.variable_scope('input_{}_output_{}'.format(ind, out_index)):\n if ind > out_index:\n input_branch = conv2d_fixed_padding(\n inputs=input_branch, filters=num_outchannels[out_index],\n kernel_size=1, strides=1, data_format=data_format)\n input_branch = batch_norm(input_branch, training, data_format)\n input_branch = tf.nn.relu(input_branch)\n if data_format == 'channel_first':\n input_branch = tf.transpose(input_branch, perm=[0, 2, 3, 1])\n input_branch = tf.image.resize(input_branch, (out_h, out_w), \n method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)\n input_branch = tf.transpose(input_branch, perm=[0, 3, 1, 2])\n else:\n input_branch = tf.image.resize(input_branch, (out_h, out_w), \n method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)\n elif ind == out_index:\n continue\n else:\n for k in range(out_index - ind):\n if k == out_index - ind - 1:\n input_branch = conv2d_fixed_padding(\n inputs=input_branch, filters=num_outchannels[out_index],\n kernel_size=3, strides=2, data_format=data_format)\n input_branch = batch_norm(input_branch, training, data_format)\n else:\n input_branch = conv2d_fixed_padding(\n inputs=input_branch, filters=num_outchannels[out_index],\n kernel_size=3, strides=2, data_format=data_format)\n input_branch = batch_norm(input_branch, training, data_format)\n input_branch = tf.nn.relu(input_branch)\n\n shortcut += input_branch\n shortcut = tf.nn.relu(shortcut)\n return shortcut\n \n with tf.variable_scope('ms_fusion_' + name):\n if not multi_scale_output:\n fused_outputs = [fusion(outputs, list(range(num_branches)), 0)]\n else:\n fused_outputs = []\n for i in range(num_branches):\n fused_outputs.append(fusion(outputs, list(range(num_branches)), i))\n return fused_outputs",
"def inference(image,norm = True,phase_train = True):\n batch_size = image.shape[0]\n r,g,b = tf.split(axis = 3,num_or_size_splits = 3,value = image)\n p_image = tf.concat([r - 123.68,\n g - 116.78,\n b - 103.94],axis = 3)\n with tf.variable_scope('vgg_16'):\n with tf.variable_scope('conv1'):\n conv1_1 = layer.conv_layer('conv1_1',p_image,[3,3,3,64])\n conv1_2 = layer.conv_layer('conv1_2',conv1_1,[3,3,64,64])\n pool1 = layer.pool_layer('pool1',conv1_2)\n with tf.variable_scope('conv2'):\n conv2_1 = layer.conv_layer('conv2_1',pool1,[3,3,64,128])\n conv2_2 = layer.conv_layer('conv2_2',conv2_1,[3,3,128,128])\n pool2 = layer.pool_layer('pool2',conv2_2)\n with tf.variable_scope('conv3'):\n conv3_1 = layer.conv_layer('conv3_1',pool2,[3,3,128,256])\n conv3_2 = layer.conv_layer('conv3_2',conv3_1,[3,3,256,256])\n conv3_3 = layer.conv_layer('conv3_3',conv3_2,[3,3,256,256])\n pool3 = layer.pool_layer('pool3',conv3_3)\n with tf.variable_scope('conv4'):\n conv4_1 = layer.conv_layer('conv4_1',pool3,[3,3,256,512])\n conv4_2 = layer.conv_layer('conv4_2',conv4_1,[3,3,512,512])\n conv4_3 = layer.conv_layer('conv4_3',conv4_2,[3,3,512,512])\n pool4 = layer.pool_layer('pool4',conv4_3)\n with tf.variable_scope('conv5'):\n conv5_1 = layer.conv_layer('conv5_1',pool4,[3,3,512,512])\n conv5_2 = layer.conv_layer('conv5_2',conv5_1,[3,3,512,512])\n conv5_3 = layer.conv_layer('conv5_3',conv5_2,[3,3,512,512])\n pool5 = layer.pool_layer('pool5',conv5_3,ksize = [1,3,3,1],strides = [1,1,1,1])\n with tf.variable_scope('ssd'):\n conv6 = layer.atrous_conv('conv6',pool5,[3,3,512,1024],rate = 6,\n batch_normalization = norm,phase_train = phase_train)\n conv7 = layer.conv_layer('conv7',conv6,[1,1,1024,1024],\n batch_normalization = norm,phase_train = phase_train)\n with tf.variable_scope('conv8'):\n conv8_1 = layer.conv_layer('conv8_1',conv7,[1,1,1024,256],\n batch_normalization = norm,phase_train = phase_train)\n conv8_2 = layer.conv_layer('conv8_2',conv8_1,[3,3,256,512],\n stride = [1,2,2,1],batch_normalization = norm,phase_train = phase_train)\n with tf.variable_scope('conv9'):\n conv9_1 = layer.conv_layer('conv9_1',conv8_2,[1,1,512,128],\n batch_normalization = norm,phase_train = phase_train)\n conv9_2 = layer.conv_layer('conv9_2',conv9_1,[3,3,128,256],\n stride = [1,2,2,1],batch_normalization = norm,phase_train = phase_train)\n with tf.variable_scope('conv10'):\n conv10_1 = layer.conv_layer('conv10_1',conv9_2,[1,1,256,128],\n batch_normalization = norm,phase_train = phase_train)\n conv10_2 = layer.conv_layer('conv10_2',conv10_1,[3,3,128,256],\n padding = 'VALID',batch_normalization = norm,phase_train = phase_train)\n with tf.variable_scope('conv11'):\n conv11_1 = layer.conv_layer('conv11_1',conv10_2,[1,1,256,128],\n batch_normalization = norm,phase_train = phase_train)\n conv11_2 = layer.conv_layer('conv11_2',conv11_1,[3,3,128,256],\n padding = 'VALID',batch_normalization = norm,phase_train = phase_train)#vgg300\n with tf.variable_scope('multibox'):\n\n l2_conv4_3 = layer.l2_normalization('l2_normalization',conv4_3,scaling = True)\n cls4 = layer.conv_layer('cls4',l2_conv4_3,[3,3,512,84],activation = None)\n loc4 = layer.conv_layer('loc4',l2_conv4_3,[3,3,512,16],activation = None)\n\n cls4_reshape = tf.reshape(cls4,[batch_size,-1,21])\n loc4_reshape = tf.reshape(loc4,[batch_size,-1,4])\n\n\n cls7 = layer.conv_layer('cls7',conv7,[3,3,1024,126],activation = None)\n loc7 = layer.conv_layer('loc7',conv7,[3,3,1024,24],activation = None)\n\n cls7_reshape = tf.reshape(cls7,[batch_size,-1,21])\n loc7_reshape = tf.reshape(loc7,[batch_size,-1,4])\n\n cls8 = layer.conv_layer('cls8',conv8_2,[3,3,512,126],activation = None)\n loc8 = layer.conv_layer('loc8',conv8_2,[3,3,512,24],activation = None)\n\n cls8_reshape = tf.reshape(cls8,[batch_size,-1,21])\n loc8_reshape = tf.reshape(loc8,[batch_size,-1,4])\n\n cls9 = layer.conv_layer('cls9',conv9_2,[3,3,256,126],activation = None)\n loc9 = layer.conv_layer('loc9',conv9_2,[3,3,256,24],activation = None)\n\n cls9_reshape = tf.reshape(cls9,[batch_size,-1,21])\n loc9_reshape = tf.reshape(loc9,[batch_size,-1,4])\n\n cls10 = layer.conv_layer('cls10',conv10_2,[3,3,256,84],activation = None)\n loc10 = layer.conv_layer('loc10',conv10_2,[3,3,256,16],activation = None)\n\n cls10_reshape = tf.reshape(cls10,[batch_size,-1,21])\n loc10_reshape = tf.reshape(loc10,[batch_size,-1,4])\n\n cls11 = layer.conv_layer('cls11',conv11_2,[1,1,256,84],activation = None)\n loc11 = layer.conv_layer('loc11',conv11_2,[1,1,256,16],activation = None)\n\n cls11_reshape = tf.reshape(cls11,[batch_size,-1,21])\n loc11_reshape = tf.reshape(loc11,[batch_size,-1,4])\n\n cls_logit = tf.concat([\n cls4_reshape,\n cls7_reshape,\n cls8_reshape,\n cls9_reshape,\n cls10_reshape,\n cls11_reshape\n ],1)\n loc_logit = tf.concat([\n loc4_reshape,\n loc7_reshape,\n loc8_reshape,\n loc9_reshape,\n loc10_reshape,\n loc11_reshape\n ],1)\n \n return cls_logit,loc_logit",
"def _FoldFusedBatchNorms(graph):\n for match in _FindFusedBatchNorms(graph):\n scope, sep, _ = match.layer_op.name.rpartition('/')\n # Make sure new ops are added to `graph` and put on the same device as\n # `bn_op`. The '/' (i.e. `sep`) ensures that we reuse the existing scope\n # named `scope`. Otherwise, TF creates a unique scope whose name starts with\n # `scope`.\n with graph.as_default(), graph.name_scope(scope + sep), ops.device(\n match.bn_op.device):\n with graph.name_scope(scope + sep + 'BatchNorm_Fold' + sep):\n # new weights = old weights * gamma / sqrt(variance + epsilon)\n # new biases = -mean * gamma / sqrt(variance + epsilon) + beta\n multiplier_tensor = match.gamma_tensor * math_ops.rsqrt(\n match.variance_tensor + match.bn_op.get_attr('epsilon'))\n bias_tensor = math_ops.subtract(\n match.beta_tensor,\n match.mean_tensor * multiplier_tensor,\n name='bias')\n\n # The shape of depthwise weights is different, so we need to reshape the\n # multiplier_tensor to ensure that the scaled_weight_tensor has the\n # expected shape.\n if match.layer_op.type == 'DepthwiseConv2dNative':\n new_shape = [\n match.weight_tensor.get_shape().as_list()[2],\n match.weight_tensor.get_shape().as_list()[3]\n ]\n multiplier_tensor = array_ops.reshape(\n multiplier_tensor, new_shape, name='scale_reshape')\n\n # TODO(suharshs): This naming of the following ops needs to carefully\n # follow the naming expected by quantize.py. Generalize the quantize code\n # to not require these delicate naming conventions.\n scaled_weight_tensor = math_ops.multiply(\n match.weight_tensor, multiplier_tensor, name='mul_fold')\n\n new_layer_tensor = _CloneWithNewOperands(\n match.layer_op, match.input_tensor, scaled_weight_tensor)\n\n bias_add_tensor = math_ops.add(\n new_layer_tensor, bias_tensor, name='add_fold')\n\n nodes_modified_count = graph_editor.reroute_ts(bias_add_tensor,\n match.output_tensor)\n if nodes_modified_count != 1:\n raise ValueError(\n 'Unexpected inputs to op: %s' % match.output_tensor.name)",
"def down_optimized_block(x, out_channels, name, act=tf.nn.relu):\n with tf.variable_scope(name):\n x_0 = x\n x = ops.snconv2d(x, out_channels, 3, 3, 1, 1, name='sn_conv1')\n x = act(x)\n x = ops.snconv2d(x, out_channels, 3, 3, 1, 1, name='sn_conv2')\n x = dsample(x)\n x_0 = dsample(x_0)\n x_0 = ops.snconv2d(x_0, out_channels, 1, 1, 1, 1, name='sn_conv3')\n return x + x_0",
"def __init__(self):\n\n super(ConvModule, self).__init__()\n\n self.conv1 = nn.Conv2d(in_channels=1, out_channels=64, kernel_size=5, stride=[1, 2])\n self.conv1_bn = nn.BatchNorm2d(64)\n self.conv2 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=5, stride=[1, 2])\n self.conv2_bn = nn.BatchNorm2d(128)\n self.pool1 = nn.MaxPool2d(kernel_size=4, stride=2)\n self.dropout0 = nn.Dropout(p=0.4)\n\n self.conv3 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=[1, 2])\n self.conv3_bn = nn.BatchNorm2d(256)\n self.conv4 = nn.Conv2d(in_channels=256, out_channels=64, kernel_size=3, stride=[1, 2])\n self.conv4_bn = nn.BatchNorm2d(64)\n self.pool2 = nn.MaxPool2d(kernel_size=4, stride=2)\n #\n # self.conv5 = nn.Conv2d(in_channels=128, out_channels=64, kernel_size=3, stride=[1, 2])\n # self.conv5_bn = nn.BatchNorm2d(64)\n # self.pool3 = nn.MaxPool2d(kernel_size=3, stride=[1, 2])",
"def train(self, mode=True):\n super(RCRNN, self).train(mode)\n if self.freeze_bn:\n print(\"Freezing Mean/Var of BatchNorm2D.\")\n if self.freeze_bn:\n print(\"Freezing Weight/Bias of BatchNorm2D.\")\n if self.freeze_bn:\n for m in self.modules():\n if isinstance(m, nn.BatchNorm2d):\n m.eval()\n if self.freeze_bn:\n m.weight.requires_grad = False\n m.bias.requires_grad = False",
"def conv_block(input_tensor, kernel_size, filters, stage, block_id, strides=(2, 2), data_format='channels_last', train_bn=True):\n\n filter1, filter2, filter3 = filters\n conv_name_base = 'res' + str(stage) + block_id + '_branch'\n bn_name_base = 'bn' + str(stage) + block_id + '_branch'\n\n x = tl.layers.Conv2d(input_tensor, filter1, (1, 1), strides=strides, padding='VALID', data_format=data_format,\n W_init=W_init, b_init=b_init, name=conv_name_base + '2a')\n x = tl.layers.BatchNormLayer(x, act=tf.nn.relu, is_train=train_bn, name=bn_name_base + '2a')\n x = tl.layers.Conv2d(x, filter2, (kernel_size, kernel_size), padding='SAME', data_format=data_format,\n W_init=W_init, b_init=b_init, name=conv_name_base + '2b')\n x = tl.layers.BatchNormLayer(x, act=tf.nn.relu, is_train=train_bn, name=bn_name_base + '2b')\n\n x = tl.layers.Conv2d(x, filter3, (1, 1), padding='VALID', data_format=data_format,\n W_init=W_init, b_init=b_init, name=conv_name_base + '2c')\n x = tl.layers.BatchNormLayer(x, act=tf.nn.relu, is_train=train_bn, name=bn_name_base + '2c')\n\n # add shortcut path changes\n shortcut = tl.layers.Conv2d(input_tensor, filter3, (1, 1), strides=strides, padding='VALID', data_format=data_format,\n W_init=W_init, b_init=b_init, name=conv_name_base + '1')\n shortcut = tl.layers.BatchNormLayer(shortcut, act=tf.nn.relu, is_train=train_bn, name=bn_name_base + '1')\n\n x = tl.layers.ElementwiseLayer([x, shortcut], combine_fn=tf.add, act=tf.nn.relu, name=\"add\")\n return x",
"def __init__(\n self,\n spatial_dims: int,\n in_channels: int,\n out_channels: int,\n kernel_size: int,\n stride: int,\n image_size: list[int],\n expand_ratio: int,\n se_ratio: float | None,\n id_skip: bool | None = True,\n norm: str | tuple = (\"batch\", {\"eps\": 1e-3, \"momentum\": 0.01}),\n drop_connect_rate: float | None = 0.2,\n ) -> None:\n super().__init__()\n\n # select the type of N-Dimensional layers to use\n # these are based on spatial dims and selected from MONAI factories\n conv_type = Conv[\"conv\", spatial_dims]\n adaptivepool_type = Pool[\"adaptiveavg\", spatial_dims]\n\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.id_skip = id_skip\n self.stride = stride\n self.expand_ratio = expand_ratio\n self.drop_connect_rate = drop_connect_rate\n\n if (se_ratio is not None) and (0.0 < se_ratio <= 1.0):\n self.has_se = True\n self.se_ratio = se_ratio\n else:\n self.has_se = False\n\n # Expansion phase (Inverted Bottleneck)\n inp = in_channels # number of input channels\n oup = in_channels * expand_ratio # number of output channels\n if self.expand_ratio != 1:\n self._expand_conv = conv_type(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)\n self._expand_conv_padding = _make_same_padder(self._expand_conv, image_size)\n\n self._bn0 = get_norm_layer(name=norm, spatial_dims=spatial_dims, channels=oup)\n else:\n # need to have the following to fix JIT error:\n # \"Module 'MBConvBlock' has no attribute '_expand_conv'\"\n\n # FIXME: find a better way to bypass JIT error\n self._expand_conv = nn.Identity()\n self._expand_conv_padding = nn.Identity()\n self._bn0 = nn.Identity()\n\n # Depthwise convolution phase\n self._depthwise_conv = conv_type(\n in_channels=oup,\n out_channels=oup,\n groups=oup, # groups makes it depthwise\n kernel_size=kernel_size,\n stride=self.stride,\n bias=False,\n )\n self._depthwise_conv_padding = _make_same_padder(self._depthwise_conv, image_size)\n self._bn1 = get_norm_layer(name=norm, spatial_dims=spatial_dims, channels=oup)\n image_size = _calculate_output_image_size(image_size, self.stride)\n\n # Squeeze and Excitation layer, if desired\n if self.has_se:\n self._se_adaptpool = adaptivepool_type(1)\n num_squeezed_channels = max(1, int(in_channels * self.se_ratio))\n self._se_reduce = conv_type(in_channels=oup, out_channels=num_squeezed_channels, kernel_size=1)\n self._se_reduce_padding = _make_same_padder(self._se_reduce, [1] * spatial_dims)\n self._se_expand = conv_type(in_channels=num_squeezed_channels, out_channels=oup, kernel_size=1)\n self._se_expand_padding = _make_same_padder(self._se_expand, [1] * spatial_dims)\n\n # Pointwise convolution phase\n final_oup = out_channels\n self._project_conv = conv_type(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)\n self._project_conv_padding = _make_same_padder(self._project_conv, image_size)\n self._bn2 = get_norm_layer(name=norm, spatial_dims=spatial_dims, channels=final_oup)\n\n # swish activation to use - using memory efficient swish by default\n # can be switched to normal swish using self.set_swish() function call\n self._swish = Act[\"memswish\"](inplace=True)",
"def fc(input, output, reuse=False, norm=None, activation=tf.nn.relu, dropout=0.7, is_training=True, name='fc'):\n with tf.variable_scope(name, reuse=reuse):\n x = slim.fully_connected(input, output, activation_fn=activation, normalizer_fn=norm, reuse=reuse)\n x = tf.nn.dropout(x, dropout)\n return x",
"def _fuse_back(self):\n with tf.variable_scope('fusion'):\n #self.fuse_p_encodes, _ = rnn('lstm', self.match_p_encodes, self.p_length,\n # self.hidden_size, layer_num=1)\n self.fuse_p_encodes = tc.layers.fully_connected(self.match_p_encodes, self.hidden_size)\n if self.use_dropout:\n self.fuse_p_encodes = tf.nn.dropout(self.fuse_p_encodes, self.dropout_keep_prob)",
"def merge_conv_bn(net):\n previous = None\n has_seen_cnn = False\n conv_replace_queue = []\n bn_replace_queue = []\n for s in net.children():\n if has_seen_cnn and isinstance(s, nn.BatchNorm2d):\n conv_replace_queue.append(previous)\n bn_replace_queue += [s]\n if isinstance(s, nn.Conv2d):\n has_seen_cnn = True\n else:\n has_seen_cnn = False\n previous = s\n if len(conv_replace_queue):\n if isinstance(net, nn.Sequential):\n for i, sub in enumerate(net):\n if isinstance(sub, nn.Conv2d) and sub in conv_replace_queue:\n idx = conv_replace_queue.index(sub)\n bn = bn_replace_queue[idx]\n new_conv = fuse(sub, bn)\n net[i] = new_conv\n net[i + 1] = nn.Identity()\n else:\n for n in dir(net):\n sub = getattr(net, n)\n if isinstance(sub, nn.Conv2d) and sub in conv_replace_queue:\n idx = conv_replace_queue.index(sub)\n bn = bn_replace_queue[idx]\n new_conv = fuse(sub, bn)\n setattr(net, n, new_conv)\n for n in dir(net):\n sub = getattr(net, n)\n if isinstance(sub, nn.BatchNorm2d) and sub in bn_replace_queue:\n setattr(net, n, nn.Identity())",
"def conv_block(inputs, out_channels, name='conv', training=False, block_idx=0):\n with tf.variable_scope(name):\n conv = tf.keras.layers.Conv2D(\n filters=out_channels,\n kernel_size=3,\n padding='same')(inputs)\n conv = bn[block_idx](conv, training=training)\n conv = tf.nn.relu(conv)\n out = tf.contrib.layers.max_pool2d(conv, 2)\n return out",
"def u_net_bn(x, is_train=False, reuse=False, pad='SAME', n_out=3):\n _, nx, ny, nz = x.shape\n print(\" * Input: size of image: (%d %d %d)\" % (nx, ny, nz))\n w_init = tf.truncated_normal_initializer(stddev=0.01)\n b_init = tf.constant_initializer(value=0.0)\n decay = 0.9\n gamma_init=tf.random_normal_initializer(1., 0.02)\n lrelu = lambda x: tf.nn.leaky_relu(x, 0.2)\n with tf.variable_scope(\"u_net_bn\", reuse=reuse):\n inputs = InputLayer(x, name='in')\n\n conv1 = Conv2d(inputs, 64, (4, 4), (2, 2), act=None, padding=pad, W_init=w_init, b_init=b_init, name='conv1')\n conv2 = Conv2d(conv1, 128, (4, 4), (2, 2), act=None, padding=pad, W_init=w_init, b_init=None, name='conv2')\n conv2 = BatchNormLayer(conv2, decay=decay, act=lrelu, is_train=is_train, gamma_init=gamma_init, name='bn2')\n\n conv3 = Conv2d(conv2, 256, (4, 4), (2, 2), act=None, padding=pad, W_init=w_init, b_init=None, name='conv3')\n conv3 = BatchNormLayer(conv3, decay=decay, act=lrelu, is_train=is_train, gamma_init=gamma_init, name='bn3')\n\n conv4 = Conv2d(conv3, 512, (4, 4), (2, 2), act=None, padding=pad, W_init=w_init, b_init=None, name='conv4')\n conv4 = BatchNormLayer(conv4, decay=decay, act=lrelu, is_train=is_train, gamma_init=gamma_init, name='bn4')\n\n conv5 = Conv2d(conv4, 512, (4, 4), (2, 2), act=None, padding=pad, W_init=w_init, b_init=None, name='conv5')\n conv5 = BatchNormLayer(conv5, decay=decay, act=lrelu, is_train=is_train, gamma_init=gamma_init, name='bn5')\n\n conv6 = Conv2d(conv5, 512, (4, 4), (2, 2), act=None, padding=pad, W_init=w_init, b_init=None, name='conv6')\n conv6 = BatchNormLayer(conv6, decay=decay, act=lrelu, is_train=is_train, gamma_init=gamma_init, name='bn6')\n\n conv7 = Conv2d(conv6, 512, (4, 4), (2, 2), act=None, padding=pad, W_init=w_init, b_init=None, name='conv7')\n conv7 = BatchNormLayer(conv7, decay=decay, act=lrelu, is_train=is_train, gamma_init=gamma_init, name='bn7')\n\n conv8 = Conv2d(conv7, 512, (4, 4), (2, 2), act=lrelu, padding=pad, W_init=w_init, b_init=b_init, name='conv8')\n print(\" * After conv: %s\" % conv8.outputs)\n\n up7 = DeConv2d(conv8, 512, (4, 4), strides=(2, 2),\n padding=pad, act=None, W_init=w_init, b_init=None, name='deconv7')\n up7 = BatchNormLayer(up7, decay=decay, act=tf.nn.relu, is_train=is_train, gamma_init=gamma_init, name='dbn7')\n\n # print(up6.outputs)\n up6 = ConcatLayer([up7, conv7], concat_dim=3, name='concat6')\n up6 = DeConv2d(up6, 1024, (4, 4), strides=(2, 2),\n padding=pad, act=None, W_init=w_init, b_init=None, name='deconv6')\n up6 = BatchNormLayer(up6, decay=decay, act=tf.nn.relu, is_train=is_train, gamma_init=gamma_init, name='dbn6')\n # print(up6.outputs)\n\n up5 = ConcatLayer([up6, conv6], concat_dim=3, name='concat5')\n up5 = DeConv2d(up5, 1024, (4, 4), strides=(2, 2),\n padding=pad, act=None, W_init=w_init, b_init=None, name='deconv5')\n up5 = BatchNormLayer(up5, decay=decay, act=tf.nn.relu, is_train=is_train, gamma_init=gamma_init, name='dbn5')\n # print(up5.outputs)\n\n up4 = ConcatLayer([up5, conv5] ,concat_dim=3, name='concat4')\n up4 = DeConv2d(up4, 1024, (4, 4), strides=(2, 2),\n padding=pad, act=None, W_init=w_init, b_init=None, name='deconv4')\n up4 = BatchNormLayer(up4, decay=decay, act=tf.nn.relu, is_train=is_train, gamma_init=gamma_init, name='dbn4')\n\n up3 = ConcatLayer([up4, conv4] ,concat_dim=3, name='concat3')\n up3 = DeConv2d(up3, 256, (4, 4), strides=(2, 2),\n padding=pad, act=None, W_init=w_init, b_init=None, name='deconv3')\n up3 = BatchNormLayer(up3, decay=decay, act=tf.nn.relu, is_train=is_train, gamma_init=gamma_init, name='dbn3')\n\n up2 = ConcatLayer([up3, conv3] ,concat_dim=3, name='concat2')\n up2 = DeConv2d(up2, 128, (4, 4), strides=(2, 2),\n padding=pad, act=None, W_init=w_init, b_init=None, name='deconv2')\n up2 = BatchNormLayer(up2, decay=decay, act=tf.nn.relu, is_train=is_train, gamma_init=gamma_init, name='dbn2')\n\n up1 = ConcatLayer([up2, conv2] ,concat_dim=3, name='concat1')\n up1 = DeConv2d(up1, 64, (4, 4), strides=(2, 2),\n padding=pad, act=None, W_init=w_init, b_init=None, name='deconv1')\n up1 = BatchNormLayer(up1, decay=decay, act=tf.nn.relu, is_train=is_train, gamma_init=gamma_init, name='dbn1')\n\n up0 = ConcatLayer([up1, conv1] ,concat_dim=3, name='concat0')\n up0 = DeConv2d(up0, 64, (4, 4), strides=(2, 2),\n padding=pad, act=None, W_init=w_init, b_init=None, name='deconv0')\n up0 = BatchNormLayer(up0, decay=decay, act=tf.nn.relu, is_train=is_train, gamma_init=gamma_init, name='dbn0')\n # print(up0.outputs)\n\n out = Conv2d(up0, n_out, (1, 1), act=tf.nn.sigmoid, name='out')\n\n print(\" * Output: %s\" % out.outputs)\n\n return out",
"def __call__(self, **kwargs):\n segname = 'block_{}_expand_relu'\n blocks = [13, 6, 3, 1]\n skips = [self._backbone.get_layer(segname.format(i)) for i in blocks]\n backbone_out = self._backbone.get_layer('block_16_project')\n\n p5 = self._fpn_block(backbone_out.output, skips[0].output)\n p4 = self._fpn_block(p5, skips[1].output)\n p3 = self._fpn_block(p4, skips[2].output)\n p2 = self._fpn_block(p3, skips[3].output)\n\n s5 = self._conv_block(p5, 128)\n s4 = self._conv_block(p4, 128)\n s3 = self._conv_block(p3, 128)\n s2 = self._conv_block(p2, 128)\n\n s5 = tf.keras.layers.UpSampling2D(\n size=(8, 8),\n interpolation='nearest'\n )(s5)\n\n s4 = tf.keras.layers.UpSampling2D(\n size=(4, 4),\n interpolation='nearest'\n )(s4)\n\n s3 = tf.keras.layers.UpSampling2D(\n size=(2, 2),\n interpolation='nearest'\n )(s3)\n\n concat = [s5, s4, s3, s2]\n x = tf.keras.layers.Concatenate()(concat)\n x = tf.keras.layers.Conv2D(\n 64,\n kernel_size=3,\n padding='same',\n kernel_initializer='he_uniform'\n )(x)\n\n x = tf.keras.layers.BatchNormalization()(x)\n x = tf.keras.layers.Activation('relu')(x)\n x = tf.keras.layers.UpSampling2D((2, 2))(x)\n\n x = tf.keras.layers.Conv2D(\n 1,\n kernel_size=3,\n padding='same',\n kernel_initializer='he_uniform'\n )(x)\n\n out = tf.keras.layers.Activation('sigmoid')(x)\n model = tf.keras.models.Model(\n inputs=self._backbone.input,\n outputs=out\n )\n\n return model",
"def global_discriminator(images,\n # is_training,\n reuse=None):\n # batch_size = images.get_shape().as_list()[0]\n conv_layers = []\n # bn_layers = []\n with tf.variable_scope('global_discriminator', reuse=reuse):\n conv1 = Conv2dLayer(images, [3, 3, 3, 64], stride=2, name='conv1')\n # bn1_layer = BatchNormLayer(conv1.output, is_training, name='bn1')\n bn1 = tf.nn.leaky_relu(conv1.output)\n conv_layers.append(conv1)\n # bn_layers.append(bn1_layer)\n\n conv2 = Conv2dLayer(bn1, [3, 3, 64, 128], stride=2, name='conv2')\n # bn2_layer = BatchNormLayer(conv2.output, is_training, name='bn2')\n bn2 = tf.nn.leaky_relu(conv2.output)\n conv_layers.append(conv2)\n # bn_layers.append(bn2_layer)\n\n conv3 = Conv2dLayer(bn2, [3, 3, 128, 256], stride=2, name='conv3')\n # bn3_layer = BatchNormLayer(conv3.output, is_training, name='bn3')\n bn3 = tf.nn.leaky_relu(conv3.output)\n conv_layers.append(conv3)\n # bn_layers.append(bn3_layer)\n\n conv4 = Conv2dLayer(bn3, [3, 3, 256, 512], stride=2, name='conv4')\n # bn4_layer = BatchNormLayer(conv4.output, is_training, name='bn4')\n bn4 = tf.nn.leaky_relu(conv4.output)\n conv_layers.append(conv4)\n # bn_layers.append(bn4_layer)\n\n conv5 = Conv2dLayer(bn4, [3, 3, 512, 512], stride=2, name='conv5')\n # bn5_layer = BatchNormLayer(conv5.output, is_training, name='bn5')\n bn5 = tf.nn.leaky_relu(conv5.output)\n conv_layers.append(conv5)\n # bn_layers.append(bn5_layer)\n\n conv6 = Conv2dLayer(bn5, [3, 3, 512, 512], stride=2, name='conv6')\n # bn6_layer = BatchNormLayer(conv6.output, is_training, name='bn6')\n bn6 = tf.nn.leaky_relu(conv6.output)\n conv_layers.append(conv6)\n # bn_layers.append(bn6_layer)\n\n fc7 = FCLayer(bn6, 1, name='fc7')\n conv_layers.append(fc7)\n\n print('Print the global discriminator network constructure:')\n for conv_layer in conv_layers:\n tf.add_to_collection('global_dis_params_conv', conv_layer.w)\n tf.add_to_collection('global_dis_params_conv', conv_layer.b)\n tf.add_to_collection('weight_decay_global_dis', tf.nn.l2_loss(conv_layer.w))\n print('conv_{} shape:{}'.format(conv_layers.index(conv_layer) + 1, conv_layer.output_shape))\n\n # for bn_layer in bn_layers:\n # tf.add_to_collection('global_dis_params_bn', bn_layer.scale)\n # tf.add_to_collection('global_dis_params_bn', bn_layer.beta)\n\n return fc7.output[:, 0]",
"def convolutional_block(X, f, filters, stage, block, s = 2):\n \n # Defines name basis.\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n \n # Retrieves Filters.\n F1, F2, F3 = filters\n \n # Saves the input value.\n X_shortcut = X\n\n\n ##### MAIN PATH #####\n # First component of main path \n X = Conv2D(F1, (1, 1), strides = (s,s), padding = 'valid',name = conv_name_base + '2a', kernel_initializer = glorot_uniform())(X)\n X = BatchNormalization(axis = 3, name = bn_name_base + '2a')(X)\n X = Activation('relu')(X)\n\n # Second component of main path.\n X = Conv2D(F2, (f, f), strides = (1,1), padding = 'same',name = conv_name_base + '2b', kernel_initializer = glorot_uniform())(X)\n X = BatchNormalization(axis = 3, name = bn_name_base + '2b')(X)\n X = Activation('relu')(X)\n\n # Third component of main path.\n X = Conv2D(F3, (1, 1), strides = (1,1), padding = 'valid',name = conv_name_base + '2c', kernel_initializer = glorot_uniform())(X)\n X = BatchNormalization(axis = 3, name = bn_name_base + '2c')(X)\n \n ##### SHORTCUT PATH ####\n X_shortcut= Conv2D(F3, (1, 1), strides = (s,s), padding = 'valid',name = conv_name_base + '1', kernel_initializer = glorot_uniform())(X_shortcut)\n X_shortcut = BatchNormalization(axis = 3, name = bn_name_base + '1')(X_shortcut)\n \n # Final step: Adds shortcut value to main path, and pass it through a RELU activation.\n X = Add()([X, X_shortcut])\n X = Activation('relu')(X)\n\n return X"
] | [
"0.695171",
"0.6805263",
"0.6269793",
"0.62578046",
"0.6063939",
"0.6063939",
"0.60563296",
"0.6027096",
"0.60044354",
"0.5954461",
"0.5929739",
"0.5882636",
"0.58481455",
"0.5835012",
"0.5830163",
"0.57766056",
"0.57479167",
"0.5734447",
"0.57262725",
"0.5723586",
"0.5721918",
"0.5712495",
"0.57062346",
"0.5702138",
"0.56999576",
"0.56987315",
"0.568548",
"0.56841516",
"0.56804836",
"0.56791735"
] | 0.7612941 | 0 |
Updates this configuration object from a dictionary. | def update_from_dict(self, dct):
if not dct:
return
all_props = self.__class__.CONFIG_PROPERTIES
for key, value in six.iteritems(dct):
attr_config = all_props.get(key)
if attr_config:
setattr(self, key, value)
else:
self.update_default_from_dict(key, value) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update(self, config_dict):\r\n self._update(config_dict, allow_new_keys=True)",
"def update(self, config_dict):\n self._update(config_dict, allow_new_keys=True)",
"def update_from_dict(self, dictionary):\n for key in dictionary:\n setattr(self, key, dictionary[key])\n return self.to_dict()",
"def updateFromDict(self, data):\n for key, value in data.items():\n setattr(self, key, value)",
"def _update(self, config_dict, allow_new_keys=True):\n if not config_dict:\n return\n\n for k, v in six.iteritems(config_dict):\n if k not in self.__dict__.keys():\n if allow_new_keys:\n self.__setattr__(k, v)\n else:\n raise KeyError('Key `{}` does not exist for overriding. '.format(k))\n else:\n if isinstance(v, dict):\n self.__dict__[k]._update(v, allow_new_keys)\n else:\n self.__dict__[k] = copy.deepcopy(v)",
"def update_config(self, update_dict):\n self.config = recursive_merge_dicts(self.config, update_dict)",
"def _update(self, config_dict, allow_new_keys=True):\r\n if not config_dict:\r\n return\r\n\r\n for k, v in six.iteritems(config_dict):\r\n if k not in self.__dict__:\r\n if allow_new_keys:\r\n self.__setattr__(k, v)\r\n else:\r\n raise KeyError('Key `{}` does not exist for overriding. '.format(k))\r\n else:\r\n if isinstance(self.__dict__[k], Config) and isinstance(v, dict):\r\n self.__dict__[k]._update(v, allow_new_keys)\r\n elif isinstance(self.__dict__[k], Config) and isinstance(v, Config):\r\n self.__dict__[k]._update(v.as_dict(), allow_new_keys)\r\n else:\r\n self.__setattr__(k, v)",
"def config(self, config_dict):\r\n self._cfg.config = config_dict",
"def update_config(cls, **kwargs):\n for key, val in kwargs.items():\n setattr(cls, key, val)",
"def load_from_dict(self, dict_):\n for key, value in six.iteritems(dict_):\n setattr(self, util.as_attr(key), value)\n self._check_against_schema()",
"def update(self, **kwargs):\n for k, v in kwargs.items():\n if k not in VALID_CONFIG_KEYS:\n cprint(\"war\", f\"'{k}' is not a valid key, skipping...\")\n continue\n\n if v:\n v = self._validate_option(k, v)\n self.data[k] = v",
"def update(self, config):\n if not isinstance(config, dict):\n raise ValueError(\"Argument `config` should be dictionary\")\n self.__data.update(config)",
"def update(self, other: Mapping[str, Any]) -> None:\n self._config.update(self._flatten_dict(other))",
"def from_dict(self, dictionary: dict):\n raise NotImplementedError()",
"def from_dict(cls, dict_obj):\n config = cls()\n for k, v in dict_obj.items():\n setattr(config, k, v)\n return config",
"def override(self, config_dict_or_str):\n if isinstance(config_dict_or_str, str):\n config_dict = self.parse_from_str(config_dict_or_str)\n elif isinstance(config_dict_or_str, dict):\n config_dict = config_dict_or_str\n else:\n raise ValueError('Unknown value type: {}'.format(config_dict_or_str))\n\n self._update(config_dict, allow_new_keys=False)",
"def _update_loose (self, dict):\n self.__dict__.update(dict)",
"def load_from_dict(self, dict_):\n policies = dict_.get('policies', None)\n super(Config, self).load_from_dict(\n {k: v for k, v in six.iteritems(dict_) if k != 'policies'})\n if policies is not None:\n self.policies = policies",
"def update_from_dict(self, data: dict) -> \"Device\":\n if \"info\" in data and data[\"info\"]:\n self.info = Info.from_dict(data[\"info\"])\n\n if \"locations\" in data and data[\"locations\"]:\n locations = [Location.from_dict(location) for location in data[\"locations\"]]\n self.locations = locations\n\n return self",
"def read_dict(self, dictionary, **kwds):\n self._dict.update(dictionary)",
"def _update_config_dict(self, config_fpath: str, config_dict: Optional[dict[str, Any]] = None) -> dict[str, Any]:\n if config_dict is None:\n to_update = {}\n else:\n to_update = deepcopy(config_dict)\n with open(config_fpath, 'rb') as f:\n to_update.update(tomli.load(f))\n return to_update",
"def load(self):\n try:\n _config_file = open(self.config, 'r+')\n data = json.loads(_config_file.read())\n except (ValueError, IOError):\n data = {}\n\n self.update(data)",
"def _deep_update_config(config, updates):\n for key, value in updates.iteritems():\n if isinstance(value, collections.Mapping):\n config[key] = DexNet._deep_update_config(config.get(key, {}), value)\n else:\n config[key] = value\n return config",
"def from_dict(self, dict_=None):\n for key in dict_:\n if hasattr(self, key):\n setattr(self, key, dict_[key])",
"def from_dict(d):\n c = ConfigParser()\n for section in d.keys():\n c.add_section(section)\n for option, value in d[section].iteritems():\n c.set(section, option, value)\n return c",
"def set_attr_from_dict(self, dictionary):\n for key in dictionary:\n self.__setattr__(key, dictionary.get(key))",
"def from_dict(self, dict_=None):\n for key, value in dict_.items():\n if hasattr(self, key):\n setattr(self, key, value)",
"def update_config(self, kv: dict):\n self._configs.update(kv)\n self._save()",
"def from_dict(cls, d):\n s = cls()\n s.update_from_dict(d)\n return s",
"def load_state_dict(self, state_dict):\n self.__dict__.update(state_dict)"
] | [
"0.7539606",
"0.7526402",
"0.73409635",
"0.6924898",
"0.6826344",
"0.68139464",
"0.67985266",
"0.6760424",
"0.6756888",
"0.66938514",
"0.65406144",
"0.6525585",
"0.6479391",
"0.64561343",
"0.6436355",
"0.6408912",
"0.6408051",
"0.63989556",
"0.63800776",
"0.63728046",
"0.6356945",
"0.6315604",
"0.6314238",
"0.6305533",
"0.63013333",
"0.62936425",
"0.627742",
"0.62591344",
"0.62468433",
"0.6230012"
] | 0.7746895 | 0 |
Merges listbased attributes into one list including unique elements from both lists. When ``lists_only`` is set to ``False``, updates dictionaries and overwrites singlevalue attributes. The resulting configuration is 'clean', i.e. input values converted and validated. If the conversion is not possible, a ``ValueError`` is raised. | def merge(self, values, lists_only=False):
if isinstance(values, self.__class__):
self.merge_from_obj(values, lists_only=lists_only)
elif isinstance(values, dict):
self.merge_from_dict(values, lists_only=lists_only)
else:
raise ValueError("{0} or dictionary expected; found '{1}'.".format(self.__class__.__name__,
type(values).__name__)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def merge_from_obj(self, obj, lists_only=False):\n self.clean()\n obj.clean()\n obj_config = obj._config\n all_props = self.__class__.CONFIG_PROPERTIES\n for key, value in six.iteritems(obj_config):\n attr_config = all_props[key]\n attr_type, default, __, merge_func = attr_config[:4]\n if (merge_func is not False and value != default and\n (not lists_only or (attr_type and issubclass(attr_type, list)))):\n self._merge_value(attr_type, merge_func, key, value)",
"def canonicalize_attr_map(attr_map):\n for attr, val in attr_map.iteritems():\n if not isinstance(val, list):\n attr_map[attr] = [val] \n return attr_map",
"def do_list_merge(li1, li2=None, attr=None, unique_fn=None, set_fn=set):\n if not li1 and not li2:\n return []\n elif li2 and not li1:\n li1, li2 = li2, li1\n\n new_list = li1[:]\n\n if li2 is None:\n pass\n\n elif attr is None and unique_fn is None:\n new_list.extend(li2)\n\n else:\n if attr is not None:\n if isinstance(attr, basestring):\n def unique_fn(d):\n return d[attr]\n\n if unique_fn is not None:\n unique_fn = GlobalFns(unique_fn)\n\n comparables_1 = {unique_fn(el): idx for idx, el in enumerate(li1)}\n if len(set_fn(comparables_1)) < len(comparables_1):\n raise ValueError(\"li1 is not unique wrt. unique_fn\")\n\n comparables_2 = [unique_fn(el) for el in li2]\n if len(set_fn(comparables_2)) < len(comparables_2):\n raise ValueError(\"li2 is not unique wrt. unique_fn\")\n\n for idx2, cmp_2 in enumerate(comparables_2):\n el2 = li2[idx2]\n if cmp_2 in comparables_1:\n idx1 = comparables_1[cmp_2]\n new_list[idx1] = el2\n else:\n new_list.append(el2)\n\n return new_list",
"def values_list(self, *fields, **kwargs):\r\n flat = kwargs.pop('flat', False)\r\n if kwargs:\r\n raise TypeError('Unexpected keyword arguments to values_list: %s'\r\n % (kwargs.keys(),))\r\n if flat and len(fields) > 1:\r\n raise TypeError(\"'flat' is not valid when values_list is called with more than one field.\")\r\n clone = self.only(fields)\r\n clone._values_list = True\r\n clone._flat_values_list = flat\r\n return clone",
"def merge_from_dict(self, dct, lists_only=False):\n if not dct:\n return\n self.clean()\n all_props = self.__class__.CONFIG_PROPERTIES\n for key, value in six.iteritems(dct):\n attr_config = all_props.get(key)\n if attr_config:\n attr_type, default, input_func, merge_func = attr_config[:4]\n if (merge_func is not False and value != default and\n (not lists_only or (attr_type and issubclass(attr_type, list)))):\n if input_func:\n value = input_func(value)\n self._merge_value(attr_type, merge_func, key, value)\n else:\n self.merge_default_from_dict(key, value, lists_only=lists_only)",
"def hydrate_list(self, values):\n assert isinstance(values, list)\n for i, value in enumerate(values):\n if isinstance(value, (list, dict, Structure)):\n values[i] = self.hydrate_object(value)\n return values",
"def _flatten_lists(\n data: Union[Dict[str, Any], List[Any], Any]\n ) -> Union[Dict[str, Any], Any]:\n if not isinstance(data, dict):\n return data\n copy_data = cast(Dict[str, Any], data.copy())\n for attr, val in copy_data.items():\n if isinstance(val, list):\n if len(cast(List[Any], val)) == 1:\n # pull single values out of lists\n data[attr] = _flatten_lists(cast(Any, val[0]))\n else:\n data[attr] = [_flatten_lists(v) for v in cast(List[Any], val)]\n elif isinstance(val, dict):\n data[attr] = _flatten_lists(cast(Dict[str, Any], val))\n return data",
"def merge_default_from_dict(self, key, value, lists_only=False):\n pass",
"def merge_attrs(self):\n for aid in self.attrs:\n new_val = self.attrs[aid]\n if aid in self.attributes:\n if ('value' in self.attributes[aid] and\n self.attributes[aid]['value'] != new_val):\n pass\n # print \"Updating attribute %s[%s] %s -> %s\" % (\n # self.name, aid, self.attributes[aid]['value'], new_val)\n else:\n # print \"** Warning: non-declaired attribute %s['%s'] set to:\\n'%s'\" % (\n # self.name, aid, new_val)\n self.remember_custom_attribute(self.name, aid, new_val)\n self.attributes[aid] = {}\n self.attributes[aid]['nv'] = new_val",
"def _restore_mutable_attr(args_list, compile_args):\n new_compile_args = ()\n for idx, arg in enumerate(args_list):\n if hasattr(arg, \"__ms_mutable__\") and getattr(arg, \"__ms_mutable__\") and \\\n not (hasattr(arg, \"const_arg\") and getattr(arg, \"const_arg\")):\n if hasattr(arg, \"__ms_dynamic_len__\"):\n new_compile_args += (mutable(compile_args[idx], getattr(arg, \"__ms_dynamic_len__\")),)\n else:\n new_compile_args += (mutable(compile_args[idx], False),)\n else:\n new_compile_args += (compile_args[idx],)\n return new_compile_args",
"def merge_attribute_defs(self, dest, source, changes = {}):\n # print \"in merge_attribute_defs, dest =\"\n # pp.pprint(dest)\n # print \"source =\"\n # pp.pprint(source)\n for aid in source.keys():\n if aid not in dest.keys():\n # copy attribute, then check for append\n dest[aid] = copy.deepcopy(source[aid])\n if 'value' in dest[aid]:\n if type(dest[aid]['value']) is str and dest[aid]['value'][0]=='+':\n dest[aid]['value'] = dest[aid]['value'].lstrip('+')\n changes[aid] = dest[aid]['value']\n continue \n if 'value' not in dest[aid]:\n if 'value' in source[aid]:\n dest[aid]['value'] = source[aid]['value']\n if (type(dest[aid]['value']) is str and dest[aid]['value'][0] == '+'):\n dest[aid]['value'] = dest[aid]['value'].lstrip('+') \n changes[aid] = dest[aid]['value']\n continue\n else:\n print (\"** Error, merging attribute '%s' but value not specified in source\"\n \" or destination\") % aid\n traceback.print_stack()\n sys.exit(1) \n else:\n if 'value' in source[aid]: \n # value given in both source and destination\n self.append_or_replace(dest[aid], source[aid], 'value', \"attribute %s\" % aid)\n changes[aid] = dest[aid]['value'] # save changed value\n else:\n print (\"** Warning, node at:\\n%s\\nmerging attribute '%s'\" \n \" but value to merge not specified.\") % (self.full_path, aid)\n print \"source attributes:\"\n pp.pprint(source)\n print \"dest attributes:\"\n pp.pprint(dest)",
"def normalize_attributions(self, att_list, positive=False, normalizer='MinMaxScaler'):\n all_values = np.concatenate(att_list)\n all_values = all_values[all_values > 0] if positive else all_values\n\n if normalizer == 'QuantileTransformer':\n normalizer = sklearn.preprocessing.QuantileTransformer()\n elif normalizer == 'MaxAbsScaler':\n normalizer = sklearn.preprocessing.MaxAbsScaler()\n else:\n normalizer = sklearn.preprocessing.MinMaxScaler()\n normalizer.fit(all_values.reshape(-1, 1))\n \n new_att = []\n for att in att_list:\n normed_nodes = normalizer.transform(att.reshape(-1, 1)).ravel()\n new_att.append(normed_nodes)\n return new_att",
"def merge_edge_props(attrs: dict, additional_attrs: dict):\n result = attrs\n for (key, value) in additional_attrs.items():\n if key not in ['in', 'out']:\n if type(additional_attrs[key]) is list:\n if key not in result:\n result[key] = []\n result[key].extend(additional_attrs[key])\n result[key] = list(set(result[key])) # silly solution to find unique elements\n else:\n result[key] = value\n return result",
"def normalize_set(self, items, **kwargs):\n values = set()\n for item in ensure_list(items):\n values.update(self.normalize(item, **kwargs))\n return list(values)",
"def clean_attributes(self):\n attrs = {}\n\n # Only fetch the fields we need.\n for a in self.attributes.only('name', 'value', 'attribute').iterator():\n if a.attribute.multi:\n if a.name not in attrs:\n attrs[a.name] = []\n attrs[a.name].append(a.value)\n else:\n attrs[a.name] = a.value\n self._attributes_cache = attrs # Cache the attributes\n\n return attrs",
"def _clean_simple_type_list(value_list: list[Any]) -> list[Any]:\n for i in range(len(value_list)):\n if isinstance(value_list[i], str):\n lower_case_value = value_list[i].lower()\n if lower_case_value == \"true\":\n value_list[i] = True\n if lower_case_value == \"false\":\n value_list[i] = False\n return value_list",
"def merge_schema(self, schema):\n super(BooleanAttributeSchema, self).merge_schema(schema)\n self.values.update(schema.values)",
"def sanitize_values(values: dict):\n for (key, value) in values.items():\n if isinstance(value, list):\n values.update({key: value[0]})",
"def _attributesFromRow(self, attributeList):\n for setAttribute, setValue in attributeList:\n setColumn = self.__attrmap__[setAttribute]\n if setColumn.model.type.name == \"timestamp\" and setValue is not None:\n setValue = parseSQLTimestamp(setValue)\n setattr(self, setAttribute, setValue)",
"def _update_argument_lists(self, argument_lists, idl_types):\n result = []\n for argument_list in argument_lists:\n for idl_type in idl_types:\n new_argument_list = list(argument_list)\n if idl_type is not None:\n new_argument_list.append(idl_type)\n result.append(new_argument_list)\n return result",
"def merge_contextual(self, other):\n # TODO: This is currently dependent on our data model? Make more robust to schema changes\n # Currently we assume all lists at Compound level, with 1 further potential nested level of lists\n for k in self.keys():\n # print('key: %s' % k)\n for item in self[k]:\n # print('item: %s' % item)\n for other_item in other.get(k, []):\n # Skip text properties (don't merge names, labels, roles)\n if isinstance(other_item, six.text_type):\n continue\n for otherk in other_item.keys():\n if isinstance(other_item[otherk], list):\n if len(other_item[otherk]) > 0 and len(item[otherk]) > 0:\n other_nested_item = other_item[otherk][0]\n for othernestedk in other_nested_item.keys():\n for nested_item in item[otherk]:\n if not nested_item[othernestedk]:\n nested_item[othernestedk] = other_nested_item[othernestedk]\n elif not item[otherk]:\n item[otherk] = other_item[otherk]\n log.debug('Result: %s' % self.serialize())\n return self",
"def _merge_attributes(self, workout):\n keys = self.__table__.columns.keys()\n for key in keys:\n if key in [\"id\",\n \"external_id\",\n \"is_duplicate_with\",\n \"manual_check_required_with\",\n ]:\n continue\n elif getattr(self, key) == None:\n # copy attribute if empty; else keep existing \n setattr(self, key, getattr(workout, key))",
"def verify_list_attr(self, indata, attributes_list):\n length = sum(map(len, indata.keys()))\n size = sum(map(len, attributes_list))\n\n self.log.info(\"==Verifying list_attr output:\")\n self.log.info(\" set_attr names: %s\", list(indata.keys()))\n self.log.info(\" set_attr size: %s\", length)\n self.log.info(\" list_attr names: %s\", attributes_list)\n self.log.info(\" list_attr size: %s\", size)\n\n if length != size:\n self.fail(\n \"FAIL: Size does not match for Names in list attr, Expected \"\n \"len={} and received len={}\".format(length, size))\n # verify the Attributes names in list_attr retrieve\n for key in indata.keys():\n if key.decode() not in attributes_list:\n self.fail(\n \"FAIL: Name does not match after list attr, Expected \"\n \"buf={} and received buf={}\".format(key, attributes_list))",
"def merge(self, new_attributes):\n for k, v in new_attributes.items():\n setattr(self, k, v)",
"def convert_old_style_list(list_):\n if not isinstance(list_, (tuple, list)) or len(list_) != 2:\n return list_, False\n first_item, second_item = list_\n if second_item == []:\n return [first_item], True\n try:\n # see if second item is iterable\n iter(second_item)\n except TypeError:\n return list_, False\n old_style_list = True\n new_second_item = []\n for sublist in second_item:\n item, old_style_list = convert_old_style_list(sublist)\n if not old_style_list:\n break\n new_second_item.extend(item)\n if old_style_list:\n second_item = new_second_item\n return [first_item, second_item], old_style_list",
"def validateListOfSomething(asValues, aoNilValues = tuple([[], None]), fAllowNull = True):\n if asValues in aoNilValues or (not asValues and not fAllowNull):\n return (asValues, None if fAllowNull else 'Mandatory.')\n\n if not isinstance(asValues, list):\n return (asValues, 'Invalid data type (%s).' % (type(asValues),));\n\n asValues = list(asValues); # copy the list.\n if asValues:\n oType = type(asValues[0]);\n for i in range(1, len(asValues)):\n if type(asValues[i]) is not oType: # pylint: disable=unidiomatic-typecheck\n return (asValues, 'Invalid entry data type ([0]=%s vs [%d]=%s).' % (oType, i, type(asValues[i])) );\n\n return (asValues, None);",
"def test_merge_list_traditional(self):\n mdict = copy.deepcopy(self.dict1)\n mdict[\"A\"] = [\"B\", \"b\"]\n ret = dictupdate.merge_list(copy.deepcopy(self.dict1), {\"A\": \"b\"})\n self.assertEqual(mdict, ret)",
"def merge_properties_lists(*properties_lists: th.PropertiesList) -> th.PropertiesList:\n result = th.PropertiesList()\n for properties_list in properties_lists:\n for name, prop in properties_list.items():\n result.append(prop)\n return result",
"def make_attributes(kwargs: Dict[str, Any]) -> List:\n\n def _make_attribute(name: str, value: any):\n attribute = {'AttributeName': name}\n if isinstance(value, str):\n attribute['Value'] = {ValueTypes.StringValue.name: value}\n elif isinstance(value, bytes):\n attribute['Value'] = {ValueTypes.BinaryValue.name: value}\n elif isinstance(value, bool):\n attribute['Value'] = {ValueTypes.BooleanValue.name: value}\n elif isinstance(value, int):\n attribute['Value'] = {ValueTypes.NumberValue.name: str(value)}\n # int to str is required by cloud directory\n elif isinstance(value, datetime):\n attribute['Value'] = {ValueTypes.DatetimeValue.name: value}\n else:\n raise ValueError()\n return attribute\n\n return [_make_attribute(name, value) for name, value in kwargs.items()]",
"def copyAttributes(self, other, add_nxpars=False):\n import copy\n \n self.setTitle(other.getTitle())\n self.setDataSetType(other.getDataSetType())\n self.setAllAxisLabels(other.getAllAxisLabels())\n self.setAllAxisUnits(other.getAllAxisUnits())\n self.setYLabel(other.getYLabel())\n self.setYUnits(other.getYUnits())\n if len(self.attr_list.keys()) == 0:\n self.attr_list = copy.copy(other.attr_list)\n else:\n self.attr_list.instrument = copy.copy(other.attr_list.instrument)\n self.attr_list.sample = copy.copy(other.attr_list.sample)\n\n if add_nxpars:\n nxpar_keys = [item[0] for item in self.attr_list.iteritems() \\\n if isinstance(item[1], NxParameter)]\n\n for nxpar_key in nxpar_keys:\n self.attr_list[nxpar_key] += other.attr_list[nxpar_key]\n else:\n # Do nothing\n pass\n \n keys_to_get = [other_key for other_key in other.attr_list \\\n if other_key not in self.attr_list]\n \n for key_to_get in keys_to_get:\n self.attr_list[key_to_get] = \\\n copy.copy(other.attr_list[key_to_get])"
] | [
"0.65139383",
"0.5578662",
"0.5572278",
"0.55066985",
"0.5469693",
"0.53935814",
"0.5371809",
"0.53107864",
"0.5277536",
"0.5260815",
"0.5229918",
"0.52003044",
"0.5156324",
"0.5125335",
"0.5069357",
"0.50458443",
"0.5019574",
"0.4998074",
"0.49735352",
"0.49711102",
"0.49676466",
"0.49558175",
"0.49512455",
"0.49511606",
"0.49222663",
"0.4918181",
"0.49161145",
"0.49099743",
"0.4883349",
"0.48806652"
] | 0.6129385 | 1 |
Creates a copy of the current instance. | def copy(self):
return self.__class__(self) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def copy(self):\n cls = self.__class__\n result = cls.__new__(cls)\n result.__dict__.update(self.__dict__)\n return result",
"def copy(self):\n return self.__class__(dict(self))",
"def copy(self):\n\t\ttemp = self.__class__()\n\t\ttemp.copy_from(self)\n\t\treturn temp",
"def copy(self):\n return object.__new__(type(self))",
"def copy(self):\n return self.__class__(**vars(self))",
"def copy(self):\n new = self\n return new",
"def __copy__(self):\n return self.copy()",
"def copy (self):\n return self.__class__(self.name, self[:])",
"def __copy__(self, *args, **kwargs):\n return self.copy()",
"def copy (self):\n import copy\n return copy.copy(self)",
"def clone(self):\n return self",
"def copy(self):\n return self.from_builder(self)",
"def copy(self):\n import copy as pcopy\n return pcopy.deepcopy(self)",
"def clone(self) -> Self:\n return clone(self, safe=True)",
"def copy(self):\n try:\n return self.__class__(self, copy=True)\n except TypeError:\n new = self.__class__(copy.deepcopy(self))\n return new",
"def copy(self):\n from copy import deepcopy\n return deepcopy(self)",
"def copy(self):\n return copy(self)",
"def copy(self):\n return copy(self)",
"def clone(self):\n return self.__class__(self.name, *self)",
"def _copy_(self):\n return copy.copy(self)",
"def copy(self):\n\t\treturn pythoncopy.deepcopy(self)",
"def copy(self):\r\n return copy.copy(self)",
"def clone(self):\n from copy import deepcopy\n return deepcopy(self)",
"def copy(self):\n return self.__copy__()",
"def copy(self):\n return self.__copy__()",
"def copy(self):\n return self.__copy__()",
"def copy(self):\n return self.__copy__()",
"def copy_with(self):\n return self.copy()",
"def copy(self):\n return super().copy()",
"def copy(self):\r\n return copy.deepcopy(self)"
] | [
"0.8445658",
"0.83826053",
"0.8367299",
"0.83550745",
"0.8290682",
"0.82799333",
"0.8262882",
"0.823516",
"0.8217232",
"0.8166611",
"0.8156989",
"0.81500196",
"0.8146055",
"0.8143642",
"0.81333435",
"0.81235075",
"0.8090402",
"0.8090402",
"0.80738163",
"0.8067191",
"0.80325407",
"0.8030429",
"0.8027809",
"0.80085367",
"0.80085367",
"0.80085367",
"0.80085367",
"0.8007976",
"0.7994091",
"0.7993332"
] | 0.86091197 | 1 |
Cleans the input values of this configuration object. Fields that have gotten updated through properties are converted to configuration values that match the format needed by functions using them. For example, for listlike values it means that input of single strings is transformed into a singleentry list. If this conversion fails, a ``ValueError`` is raised. | def clean(self):
all_props = self.__class__.CONFIG_PROPERTIES
for prop_name in self._modified:
attr_config = all_props.get(prop_name)
if attr_config and attr_config.input_func:
self._config[prop_name] = attr_config.input_func(self._config[prop_name])
self._modified.clear() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def clean(self, value):\n value = self.validate_to_python(value)\n self.run_validators(value)\n return value",
"def clean(self, value):\n value = self.validate_to_python(value)\n self.run_validators(value)\n return value",
"def clean(cls, value):\n return",
"def clean(self, value):\n return [f.clean(v) for v,f in zip(value, self.fields)]",
"def _clean_inputs(self, inputs):\n return inputs",
"def _trans_format(self):\n config_dict = vars(self._config)\n for item, value in config_dict.items():\n if value == 'None':\n config_dict[item] = None\n elif isinstance(value, str) and is_number(value):\n if value.isdigit():\n value = int(value)\n else:\n value = float(value)\n config_dict[item] = value",
"def _clean_attribute_settings(cls, instance, cleaned_input):\n attribute_input_type = cleaned_input.get(\"input_type\") or instance.input_type\n errors = {}\n for field in ATTRIBUTE_PROPERTIES_CONFIGURATION.keys():\n allowed_input_type = ATTRIBUTE_PROPERTIES_CONFIGURATION[field]\n if attribute_input_type not in allowed_input_type and cleaned_input.get(\n field\n ):\n errors[field] = ValidationError(\n f\"Cannot set {field} on a {attribute_input_type} attribute.\",\n code=AttributeErrorCode.INVALID.value,\n )\n if errors:\n raise ValidationError(errors)",
"def clean_fields(self, instance, exclude=None):\n errors = {}\n exclude = exclude or []\n for name, f in self.properties.items():\n raw_value = getattr(instance, name, None)\n is_blank = not bool(raw_value)\n is_nullable = f.null\n is_defaulted = f.column.default or f.column.server_default\n is_required = f.required\n\n is_skippable = is_blank and (is_nullable or is_defaulted or not is_required)\n\n if name in exclude or is_skippable:\n continue\n try:\n setattr(instance, name, f.clean(raw_value, instance))\n except ValidationError as e:\n errors[name] = e.error_list\n if errors:\n raise NestedValidationError(errors)",
"def clean(self):\n if not self.is_input and not self.is_output:\n raise ValidationError(\"TransformationXput with pk={} is neither an input nor an output\".format(self.pk))\n if self.has_structure:\n self.structure.clean()",
"def clean(self, **kwargs):\n super().clean()\n\n # Encode as native values\n if self.is_int():\n self.value = self.as_int()\n\n elif self.is_bool():\n self.value = self.as_bool()\n\n validator = self.__class__.get_setting_validator(self.key, **kwargs)\n\n if validator is not None:\n self.run_validator(validator)\n\n options = self.valid_options()\n\n if options and self.value not in options:\n raise ValidationError(_(\"Chosen value is not a valid option\"))",
"def test_construct_values_raises_on_invalid_normalize(self):\n constructor_fields = ('first', 'second', 'last', )\n values = ['first_value', 'second_value', 'last_value']\n cleaned_data = getattr(self.form, 'cleaned_data', {})\n cleaned_data.update(dict(zip(constructor_fields, values)))\n self.form.cleaned_data = cleaned_data\n normalize = 'not a valid normalize function'\n message = \"The normalize parameter must be a callable or None. \"\n with self.assertRaisesMessage(ImproperlyConfigured, message):\n self.form.construct_value_from_values(constructor_fields, normalize=normalize)",
"def autostrip(cls):\r\n fields = [(key, value)\r\n for key, value in cls.base_fields.iteritems()\r\n if isinstance(value, CharField)]\r\n for field_name, field_object in fields:\r\n def get_clean_func(original_clean):\r\n return lambda value: original_clean(value and value.strip())\r\n clean_func = get_clean_func(getattr(field_object, 'clean'))\r\n setattr(field_object, 'clean', clean_func)\r\n return cls",
"def clean_configurations(self):\n cfg_limits = self.spectrograph.valid_configuration_values()\n if cfg_limits is None:\n # No values specified, so we're done\n return\n\n good = np.ones(len(self), dtype=bool)\n for key in cfg_limits.keys():\n # NOTE: For now, check that the configuration values were\n # correctly assigned in the spectrograph class definition.\n # This should probably go somewhere else or just removed.\n assert isinstance(cfg_limits[key], list), \\\n 'CODING ERROR: valid_configuration_values is not correctly defined ' \\\n 'for {0}; values must be a list.'.format(self.spectrograph.__class__.__name__)\n\n # Check that the metadata are valid for this column.\n indx = np.isin(self[key], cfg_limits[key])\n if not np.all(indx):\n msgs.warn('Found frames with invalid {0}.'.format(key))\n good &= indx\n\n if np.all(good):\n # All values good, so we're done\n return\n\n # Alert the user that some of the frames are going to be\n # removed\n msg = 'The following frames have configurations that cannot be reduced by PypeIt' \\\n ' and will be removed from the metadata table (pypeit file):\\n'\n indx = np.where(np.logical_not(good))[0]\n for i in indx:\n msg += ' {0}\\n'.format(self['filename'][i])\n msgs.warn(msg)\n # And remove 'em\n self.table = self.table[good]",
"def clean_config(self, config):\n return config",
"def autostrip(cls):\n fields = [(key, value) for key, value in cls.base_fields.iteritems()\n if isinstance(value, forms.CharField)]\n for field_name, field_object in fields:\n def get_clean_func(original_clean):\n return lambda value: original_clean(value and value.strip())\n clean_func = get_clean_func(getattr(field_object, 'clean'))\n setattr(field_object, 'clean', clean_func)\n return cls",
"def clear_field_values(self):\n\t\tlogging.info(\"Clearing values in the field[] dictionary of the object\")\n\t\tlogging.debug(\"Before = \" + str(self.field))\n\t\tfor key, value in self.fields.items():\n\t\t\tself.field[str(key)] = None\n\t\tlogging.debug(\"After = \" + str(self.field))\n\t\treturn",
"def clean(self):\r\n # clean categories\r\n filter_categories = \\\r\n self.categories[self.category_index: self.category_index+2]\r\n self.categories = [\r\n category for category in filter_categories if category != '']\r\n del self.fields[\"categories\"]\r\n self.fields[\"category\"] = self.categories[0]\r\n\r\n try:\r\n self.fields[\"sub_category\"] = self.categories[1]\r\n except IndexError:\r\n self.fields[\"sub_category\"] = None\r\n\r\n # clean stores\r\n filter_stores = self.stores[:2]\r\n self.stores = [store for store in filter_stores]\r\n del self.fields[\"stores\"]\r\n\r\n for n in range(len(self.stores)):\r\n field_name = \"store_\" + str(n)\r\n self.fields[field_name] = self.stores[n]\r\n\r\n # clean brand\r\n self.brand = self.brands[0]\r\n self.fields[\"brand\"] = self.brand\r\n del self.fields[\"brands\"]\r\n\r\n # clean others fields\r\n self.fields[\"name\"] = self.fields.pop(\"product_name_fr\")\r\n self.fields[\"description\"] = self.fields.pop(\"generic_name\")\r\n self.fields[\"nutri_score\"] = self.fields.pop(\"nutrition_grade_fr\")",
"def clean(self):\n # If JSON was passed in as a string, try to interpret it as JSON\n if isinstance(self.required_arguments, str):\n try:\n self.required_arguments = json.loads(self.required_arguments)\n except json.JSONDecodeError:\n raise ValidationError(\"'%s' is not valid JSON!\"\n % self.required_arguments)\n\n if isinstance(self.required_arguments_default_values, str):\n try:\n self.required_arguments_default_values = json.loads(\n self.required_arguments_default_values)\n except json.JSONDecodeError:\n raise ValidationError(\"'%s' is not valid JSON!\"\n % self.required_arguments_default_values)\n\n # Make sure arguments are valid\n is_valid, reason = task_type_args_are_valid(self)\n\n # Arguments are not valid!\n if not is_valid:\n raise ValidationError(reason)",
"def __unFixValue(cls,obj):\n\n if type(obj) is dict:\n for k in obj.keys():\n obj[k] = cls.__unFixValue(obj[k])\n elif type(obj) is list:\n for i in xrange(0, len(obj)):\n obj[i] = cls.__unFixValue(obj[i])\n elif type(obj) is str:\n try:\n return int(obj)\n except ValueError:\n pass\n return obj",
"def clean(self):\n cleaned_data = super(ManageLearnersForm, self).clean()\n\n # Here we take values from `data` (and not `cleaned_data`) as we need raw values - field clean methods\n # might \"invalidate\" the value and set it to None, while all we care here is if it was provided at all or not\n email_or_username = self.data.get(self.Fields.EMAIL_OR_USERNAME, None)\n bulk_upload_csv = self.files.get(self.Fields.BULK_UPLOAD, None)\n\n if not email_or_username and not bulk_upload_csv:\n raise ValidationError(ValidationMessages.NO_FIELDS_SPECIFIED)\n\n if email_or_username and bulk_upload_csv:\n raise ValidationError(ValidationMessages.BOTH_FIELDS_SPECIFIED)\n\n if email_or_username:\n mode = self.Modes.MODE_SINGULAR\n else:\n mode = self.Modes.MODE_BULK\n\n cleaned_data[self.Fields.MODE] = mode\n cleaned_data[self.Fields.NOTIFY] = self.clean_notify()\n\n self._validate_course()\n self._validate_program()\n\n if self.data.get(self.Fields.PROGRAM, None) and self.data.get(self.Fields.COURSE, None):\n raise ValidationError(ValidationMessages.COURSE_AND_PROGRAM_ERROR)\n\n return cleaned_data",
"def clean_dict(self,dict_to_clean):\n for i in dict_to_clean: \n try:\n float( dict_to_clean[i] ) \n except:\n dict_to_clean[ i ] = \"'%s'\"%( dict_to_clean[i ].replace(\"'\",\"\").replace('\"',\"\") )",
"def clean_value(self, value):\n return value",
"def clean_up_data(self):\n pass",
"def clean(self):\n pass",
"def clean(self):\n\n pass",
"def _transform_inputs(self) -> None:\n self.inputs = None if self.inputs == {} else self.inputs",
"def sanitize(self):\n # return sanitize_model_dict(self._axl_data)\n super().__setattr__('_axl_data', sanitize_model_dict(self._axl_data))\n return self",
"def clean(self, value):\n value = super().clean(value)\n if value in self.empty_values:\n return value\n return value.replace(' ', '')",
"def _clean(self):\n map(self.__delitem__, self.keys())\n self._original = []\n self._columns = {}\n self._modified, self._deleted = {}, {}",
"def sanitize(cls):"
] | [
"0.60418636",
"0.60418636",
"0.59051543",
"0.5863725",
"0.5829741",
"0.5760881",
"0.5721319",
"0.5714525",
"0.5582324",
"0.5558668",
"0.5509648",
"0.5507196",
"0.55028373",
"0.54199827",
"0.54151857",
"0.5408943",
"0.5379859",
"0.53760797",
"0.5363679",
"0.53535485",
"0.53409225",
"0.5322811",
"0.53188175",
"0.53066444",
"0.53057504",
"0.5305389",
"0.5277628",
"0.52697444",
"0.52675843",
"0.5258506"
] | 0.6916164 | 0 |
Whether the current object is 'clean', i.e. has no nonconverted input. | def is_clean(self):
return not self._modified | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def clean(self) -> bool:\n raise NotImplementedError()",
"def __bool__(self):\n return self.isValid()",
"def __bool__(self):\n return self.is_valid",
"def is_valid(self):\n self.clean()\n return not bool(self.errors)",
"def is_raw(self):\n return not self._isReduced",
"def __bool__(self):\r\n return self.valid",
"def __nonzero__(self):\r\n return bool(assert_(self.obj, 'not %r' % self.obj))",
"def __bool__(self):\n\n return not self.is_empty()",
"def __bool__(self):\n return bool(abs(self))",
"def clean(self):\n if not self.is_input and not self.is_output:\n raise ValidationError(\"TransformationXput with pk={} is neither an input nor an output\".format(self.pk))\n if self.has_structure:\n self.structure.clean()",
"def is_raw(self):\n return not self.has_structure",
"def is_valid(self):\n return not self.errors",
"def is_non_inverting(self):\n\n return False",
"def clean(self, *args, **kwargs):\n self.is_cleaned = True",
"def __bool__(self):\n return bool(self.obj)",
"def is_bool(self):\n return False",
"def __bool__(self):\n return not self.undefine",
"def sanity_check(self):\n return True",
"def IsValid(self):\n return False",
"def isgood(self):\n\t\tanswer = True\n\t\t\n\t\tif self.mes_flux <= 0.0:\n\t\t\tanswer = False\n\n\t\treturn answer",
"def valid(self) -> bool:\n pass",
"def __bool__(self):\n return self.__nonzero__()",
"def __nonzero__(self):\n return True",
"def is_valid(self):\n return self._is_valid",
"def isEmpty(self):\n\t\t\n\t\t# if number and real are empty numbers and the decimal flag is false and\n\t\t# there is no exponent\n\t\tif(self.number == \"\" and self.real == \"\" and not self.decimal and self.exponent == None):\n\t\t\t# return true\n\t\t\treturn True\n\t\t# else return false\n\t\treturn False",
"def valid(self):\n pass",
"def is_empty(self):\n #return not self.vulnerable_in and not self.fixed_in\n return not self.fixed_in",
"def bool(self, obj):\n return True",
"def bool(self, obj):\n return True",
"def is_valid(self):\n\n return True"
] | [
"0.6872073",
"0.63761157",
"0.63610554",
"0.6312004",
"0.63031477",
"0.63022435",
"0.6240924",
"0.61812806",
"0.61137867",
"0.60905904",
"0.60698545",
"0.6046526",
"0.60410285",
"0.603819",
"0.6018456",
"0.60071707",
"0.60055494",
"0.5978157",
"0.5974375",
"0.59508747",
"0.5918997",
"0.59178317",
"0.59085953",
"0.58915585",
"0.58854735",
"0.5871527",
"0.5864699",
"0.58603173",
"0.58603173",
"0.5838669"
] | 0.73114955 | 0 |
Returns the current world size (number of distributed processes). | def world_size() -> int:
return dist.get_world_size() if dist.is_initialized() else 1 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_world_size() -> int:\n return collective.get_world_size()",
"def get_world_size():\n if not torch.distributed.is_available():\n return 1\n if not torch.distributed.is_initialized():\n return 1\n return torch.distributed.get_world_size()",
"def size():\n return int(os.environ['WORLD_SIZE'])",
"def get_world_size(backend) -> int:\n if backend != 'mpi':\n return int(os.environ.get('WORLD_SIZE', 1))\n else:\n return int(os.environ.get('OMPI_COMM_WORLD_SIZE', 1))",
"def world_size(self):\n return self._wsize",
"def world_size(self):\n if self.data_section is None:\n return None\n attrs = self.data_section.attrs\n if bool(attrs)==False:\n return None\n return attrs.get('world_size', None)",
"def getGlobalSize(self):\n return self._get_global_size( )",
"def get_size(self):\n return get_dir_size(self.run_dir)",
"def GlobalSize(self):\n return _hypre.HypreParVector_GlobalSize(self)",
"def geometry_max_world_size(self):\n wsize = c_float()\n ckresult(_dll.FMOD_System_GetGeometrySettings(self._ptr, byref(wsize)))\n return wsize.value",
"def get_size(self):\n tmpsize = 0\n for variable in self.variables:\n tmpsize += variable.get_size()\n for subchunk in self.subchunks:\n tmpsize += subchunk.get_size()\n return tmpsize",
"def size(self) -> pulumi.Output[float]:\n return pulumi.get(self, \"size\")",
"def get_size(self):\n tmpsize = 0\n for variable in self.variables:\n tmpsize += variable.get_size()\n for subchunk in self.subchunks:\n tmpsize += subchunk.get_size()\n self.size.value = tmpsize\n return self.size.value + self.ID.get_size() + self.size.get_size()",
"def domain_size(self):\n all_vars = self.all_variables()\n if not all_vars:\n return 0\n return np.prod([v.size for v in all_vars])",
"def DistributionSize(self):\n return self.distribution_size",
"def size(self) -> int:\n\n return self.sizes.sum()",
"def get_total_memory_size(self):\n return self.drt_manager.get_total_memory_size()",
"def get_size(self) -> int:\n total_size = 0\n for entry in self.__entries:\n total_size += entry.get_size()\n return total_size",
"def dimension(self):\n return np.prod(np.asarray(self.subsystem_dims))",
"def memsize(self):\n return self.xlist(\"get-memsize\")[1][0] * 1024",
"def mem_per_core(self):\n return self.mem_per_node / self.cores_per_node",
"def size(self):\n futures = self.client.map(_call_size, self.vecDask, pure=False)\n sizes = self.client.gather(futures)\n return np.sum(sizes)",
"def size(self):\r\n return sum(pool.size() for pool in self.host_to_pool.values())",
"def psizes(self):\n return self._cache.psizes",
"def size_out(self):\n return self.dimensions",
"def size(self) -> int:\n return self._status['party_size'][0]",
"def get_insternal_size(self):\n return (\n sys.getsizeof(self.theta) +\n sys.getsizeof(self.num_buckets) +\n sys.getsizeof(self.k) +\n sys.getsizeof(self.fp_size) +\n sys.getsizeof(self.max_iter) +\n sys.getsizeof(self.bucket_size)\n )",
"def get_total_distributed(self) -> int:\n return self._total_distributed.get()",
"def get_total_memory_size(self):\n memory = 0\n for i in range(4):\n for j in range(4):\n memory += self.system.operator[i, j].memory\n return memory",
"def voxel_size(self):\n return self.calculation.voxel_size"
] | [
"0.86169875",
"0.8585714",
"0.80531013",
"0.78157955",
"0.7763954",
"0.72711486",
"0.7219287",
"0.69150424",
"0.68911326",
"0.68068975",
"0.671325",
"0.668719",
"0.6650058",
"0.65553606",
"0.6534457",
"0.6533206",
"0.64964646",
"0.6494468",
"0.6457273",
"0.6454149",
"0.6443454",
"0.6441065",
"0.64364606",
"0.64359057",
"0.64275604",
"0.642149",
"0.64067143",
"0.6406349",
"0.6391636",
"0.6380225"
] | 0.86466646 | 0 |
Decorator that only runs the function on the process with rank 0. | def rank_zero_only(fn):
def wrapped(*args, **kwargs):
if rank() == 0:
return fn(*args, **kwargs)
return wrapped | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def rank_zero_only(fn):\n\n @wraps(fn)\n def wrapped_fn(self, *args, **kwargs):\n if self.rank == 0:\n fn(self, *args, **kwargs)\n\n return wrapped_fn",
"def _message_when_root(func):\n\n def decorated(*args, **kwargs):\n from armi import MPI_RANK\n\n if MPI_RANK == 0:\n func(*args, **kwargs)\n\n return decorated",
"def custom_process(f: ProcessFunction):\n process_registry_040.add_hidden(f)\n process_registry_100.add_hidden(f)\n return f",
"def non_standard_process(spec: ProcessSpec) -> Callable[[ProcessFunction], ProcessFunction]:\n\n def decorator(f: ProcessFunction) -> ProcessFunction:\n process_registry_040.add_function(f=f, spec=spec.to_dict_040())\n process_registry_100.add_function(f=f, spec=spec.to_dict_100())\n return f\n\n return decorator",
"def call_in_rank_order(fun, comm=None):\n if comm is None:\n comm = PETSc.COMM_WORLD\n\n for rank in range(comm.size):\n if rank == comm.rank:\n fun(rank, comm)\n comm.barrier()",
"def test_rank_zero_none_set(rank_key, rank):\n\n with mock.patch.dict(os.environ, {rank_key: rank}):\n from pytorch_lightning.utilities.distributed import _get_rank, rank_zero_only\n\n rank_zero_only.rank = _get_rank()\n\n @rank_zero_only\n def foo():\n return 1\n\n x = foo()\n assert x is None",
"def call_by_root(f, root=0):\n MPI = is_running_mpi()\n if MPI:\n comm = MPI.COMM_WORLD\n rank = comm.Get_rank()\n if rank == root:\n return f()\n else:\n return f()",
"def mpisync(func, comm=MPI.COMM_WORLD):\n def mpifunc(*args, **kwargs):\n if comm.Get_rank() == 0:\n res = func(*args, **kwargs)\n else:\n res = None\n res = comm.bcast(res, root=0)\n return res\n return mpifunc",
"def return_first(fn):\n def wrapped(*args, **kwargs):\n res = fn(*args, **kwargs)\n return res if _HVD.rank() == 0 else None\n return wrapped",
"def test_rank_zero_only():\n os.environ[\"RANK\"] = \"0\"\n # check that wrapping instance work\n timer = pt_clb.rank_zero_only(pt_clb.Timer())\n assert hasattr(timer, \"timer\")\n\n os.environ[\"RANK\"] = \"1\"\n # check that wrapping class also works\n timer = pt_clb.rank_zero_only(pt_clb.Timer)()\n assert not hasattr(timer, \"timer\")",
"def process(f: ProcessFunction) -> ProcessFunction:\n process_registry_040.add_function(f)\n process_registry_100.add_function(f)\n return f",
"def process():\n pass",
"def init_processes(fn, local_rank, backend='nccl'):\n dist.init_process_group(backend)\n fn(dist.get_rank(), dist.get_world_size(), local_rank)",
"def clusterprocess(func=None, cluster_nodefile=\"$PBS_NODEFILE\", cluster_pin=None, cluster_hint='blocked', cluster_ssh_port=22):\n if func:\n def _call(*args, **kwargs):\n return ClusterProcess(func, *args, **kwargs)\n _call.__name__ = func.__name__\n return _call\n else:\n def wrap_process(func):\n def _call(*args, **kwargs):\n kwargs['cluster_nodefile'] = cluster_nodefile\n kwargs['cluster_pin'] = cluster_pin\n kwargs['cluster_hint'] = cluster_hint\n kwargs['cluster_ssh_port'] = cluster_ssh_port\n return ClusterProcess(func, *args, **kwargs)\n _call.__name__ = func.__name__\n return _call\n return wrap_process",
"def no_arg():\n run_no_arg()",
"def process_fn(func):\n def wrapper(*args, **kwargs):\n process = multiprocessing.Process(target=func, args=args, kwargs=kwargs)\n process.start()\n return process\n return wrapper",
"def apply_only(self, function, worker, *args, **kwargs):\n pass",
"def test_limit_as_runs_as_seperate_process_fork() -> None:\n this_process_id = os.getpid()\n other_process_id = limited_func_with_decorator_fork()\n assert this_process_id != other_process_id",
"def test_limit_as_runs_as_seperate_process_forkserver() -> None:\n this_process_id = os.getpid()\n other_process_id = limited_func_with_decorator_forkserver()\n assert this_process_id != other_process_id",
"def run_job(local_rank, num_proc, func, init_method, backend, cfg):\n # Initialize the process group.\n world_size = num_proc\n rank = num_proc + local_rank\n\n try:\n torch.distributed.init_process_group(\n backend=backend, init_method=init_method, world_size=world_size, rank=rank,\n )\n except Exception as e:\n raise e\n\n torch.cuda.set_device(local_rank)\n func(cfg)",
"def test_rank_zero_known_cluster_envs(env_vars: Mapping[str, str]):\n from pytorch_lightning.utilities.distributed import _get_rank, rank_zero_only\n\n rank_zero_only.rank = _get_rank()\n\n with mock.patch.dict(os.environ, env_vars):\n from pytorch_lightning.utilities.distributed import _get_rank, rank_zero_only\n\n rank_zero_only.rank = _get_rank()\n\n @rank_zero_only\n def foo(): # The return type is optional because on non-zero ranks it will not be called\n return 1\n\n x = foo()\n assert x == 1",
"def rank():\n return 0",
"def parallelizer(func, arg=False):\n if arg:\n func(arg)\n else:\n func()",
"def init_processes(rank, size, fn, backend='gloo'):\r\n os.environ['MASTER_ADDR'] = '127.0.0.1'\r\n os.environ['MASTER_PORT'] = '29500'\r\n dist.init_process_group(backend, rank=rank, world_size=size)\r\n fn(rank, size)",
"def dist_wrap(run_func: FuncT) -> FuncT:\n\n def dist_init(\n local_rank: int,\n num_procs: int,\n *func_args: list[Any],\n **func_kwargs: dict[str, Any],\n ) -> None:\n \"\"\"Initialize torch.distributed and execute the user function.\"\"\"\n os.environ['MASTER_ADDR'] = '127.0.0.1'\n os.environ['MASTER_PORT'] = '29503'\n os.environ['LOCAL_RANK'] = str(local_rank)\n # NOTE: unit tests don't support multi-node so\n # local_rank == global rank\n os.environ['RANK'] = str(local_rank)\n os.environ['WORLD_SIZE'] = str(num_procs)\n\n dist.init_process_group('gloo')\n\n run_func(*func_args, **func_kwargs)\n\n # Keep faster ranks from exiting and breaking process group\n dist.barrier()\n\n def dist_launcher(\n num_procs: int,\n *func_args: list[Any],\n **func_kwargs: dict[str, Any],\n ) -> None:\n \"\"\"Launch processes and gracefully handle failures.\"\"\"\n # Set multiprocessing to use fork because on MacOS/Windows, the\n # default in Python 3.8 and later is \"spawn\" which cannot\n # pickle lambda functions.\n # NOTE: fork does not work with CUDA tensors but that is okay\n # because the test suite does not use CUDA\n ctx = multiprocessing.get_context('fork')\n\n # Spawn all workers on subprocesses.\n processes = []\n for local_rank in range(num_procs):\n p = ctx.Process(\n target=dist_init,\n args=(local_rank, num_procs, *func_args),\n kwargs=func_kwargs,\n )\n p.start()\n processes.append(p)\n\n # Wait for all other processes to complete\n for p in processes:\n p.join(UNIT_WORKER_TIMEOUT)\n\n failed = [\n (rank, p)\n for rank, p in enumerate(processes)\n if p.exitcode != 0\n ]\n for rank, p in failed:\n # If it still hasn't terminated, kill it because it hung.\n if p.exitcode is None:\n p.terminate()\n pytest.fail(f'Worker {rank} hung.', pytrace=False)\n elif p.exitcode < 0:\n pytest.fail(\n f'Worker {rank} killed by signal {-p.exitcode}',\n pytrace=False,\n )\n elif p.exitcode > 0:\n pytest.fail(\n f'Worker {rank} exited with code {p.exitcode}',\n pytrace=False,\n )\n\n def run_func_decorator(\n *func_args: list[Any],\n **func_kwargs: dict[str, Any],\n ) -> Any:\n \"\"\"Entry point for @distributed_test().\"\"\"\n if isinstance(world_size, int):\n dist_launcher(world_size, *func_args, **func_kwargs)\n elif isinstance(world_size, list):\n for procs in world_size:\n dist_launcher(procs, *func_args, **func_kwargs)\n time.sleep(0.5)\n else:\n raise TypeError(\n 'world_size must be an integer or a list of integers.',\n )\n\n return cast(FuncT, run_func_decorator)",
"def fn():",
"def process(self, func=lambda test: test.run()):\n yield self, func(self.bind(context=None))",
"def print_rank_zero(*args, **kwargs) -> None:\n print(*args, **kwargs)",
"def rob(func):\n\ttry:\n\t\treturn func()\n\n\texcept: #pylint: disable=W0702\n\t\treturn",
"def func():\n pass"
] | [
"0.78528255",
"0.66583896",
"0.6142544",
"0.6070626",
"0.6058951",
"0.60186154",
"0.5988285",
"0.5949869",
"0.5930045",
"0.5898354",
"0.5718907",
"0.5713488",
"0.5649635",
"0.55312955",
"0.5502004",
"0.5497844",
"0.547936",
"0.5471152",
"0.539442",
"0.5339954",
"0.53384835",
"0.5316338",
"0.5301795",
"0.52786756",
"0.51880956",
"0.5161157",
"0.51530564",
"0.5144817",
"0.5108465",
"0.51030415"
] | 0.78286487 | 1 |
Equivalent to print, but only runs on the process with rank 0. | def print_rank_zero(*args, **kwargs) -> None:
print(*args, **kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def l_print_no_barrier(*args):\n print(comm.rank, ':', end=' ')\n for i in args:\n print(i, end=' ')\n # noinspection PyArgumentList\n print()",
"def print_on_master(self, msg: str, process_group: ProcessGroup = None):\n rank = dist.get_rank(group=process_group)\n if rank == 0:\n print(msg)",
"def r_print(*args):\n if comm.rank == 0:\n print('ROOT:', end=' ')\n for i in args:\n print(i, end=' ')\n # noinspection PyArgumentList\n print()",
"def print_on_node_master(self, msg: str):\n self._assert_local_rank_set()\n if self.local_rank == 0:\n print(msg)",
"def l_print(*args):\n for rank in range(0, comm.size):\n comm.Barrier()\n if rank == comm.rank:\n l_print_no_barrier(*args)\n comm.Barrier()",
"def _print_r0(self, msg: str, restart: bool = False) -> None:\n if restart:\n self._tstart = time.time()\n if self.rank == 0:\n memory_info = xm.get_memory_info(xm.xla_device())\n gb_free = memory_info[\"kb_free\"] / 1024 / 1024\n gb_total = memory_info[\"kb_total\"] / 1024 / 1024\n logging.info(\n f\"{msg} free={gb_free: .4f} GB, total={gb_total: .4f} GB, t={time.time()-self._tstart: .1f}\"\n )",
"def to_print_out(self):\n self.error_throw('output')\n\n if self.rank_method == methods_of_ranking[3]: #'diversified_ranking'\n self.output_div('print')\n else:\n self.output('print')",
"def print_player_rank_and_points(self):\r\n pass",
"def printh(*args, **kwargs):\n if hvd.rank() == 0:\n print(*args, **kwargs)",
"def _default_vprint_worker(*args, **kwargs):\r\n print(*args, **kwargs)",
"def subrun(self): # noqa: C901\n\n # this would be the first import of MPI\n # resutling in MPI_INIT being called\n from mpi4py import MPI\n self.MPI = MPI\n\n _string = 'from MPI: %s of %s\\n' % (MPI.COMM_WORLD.Get_rank(), MPI.COMM_WORLD.Get_size())\n _string += 'from mpirank: %s of %s\\n' % (self.rank.get(), self.worldsize.get())\n\n self.rank.set(MPI.COMM_WORLD.Get_rank())\n self.worldsize.set(MPI.COMM_WORLD.Get_size())\n\n _string += ('from mpirank: %s of %s\\n' % (self.rank.get(), self.worldsize.get()))\n _string += ('from MPI: %s of %s\\n' % (MPI.COMM_WORLD.Get_rank(), MPI.COMM_WORLD.Get_size()))\n\n self.queue_map_set.wait()\n self.logger = logging.getLogger(__name__)\n logging_format = '%(asctime)s|%(process)s|%(thread)s|' + ('%05d' % self.rank.get()) + '|%(levelname)s|%(name)s|%(message)s'\n logging_datefmt = '%Y-%m-%d %H:%M:%S'\n logging_filename = 'yoda_droid_%05d.log' % self.rank.get()\n for h in logging.root.handlers:\n logging.root.removeHandler(h)\n logging.basicConfig(level=self.loglevel,\n format=logging_format,\n datefmt=logging_datefmt,\n filename=logging_filename)\n\n self.logger.info('string = %s', _string)\n self.logger.info('setup rank %s of %s', self.rank.get(), self.worldsize.get())\n self.logger.info('queue_map: %s', self.queue_map)\n\n # build queues object:\n self.queues = {}\n for key in self.queue_map.keys():\n self.queues[key] = self.queue_list[self.queue_map[key]]\n\n # set forwarding_map\n self.forwarding_map = {}\n if self.rank.get() == 0:\n self.forwarding_map = MessageTypes.forwarding_map[0]\n else:\n self.forwarding_map = MessageTypes.forwarding_map[1]\n\n # set logging level here\n self.logger.setLevel(self.loglevel)\n self.logger.debug('file: %s', __file__)\n self.logger.info('loglevel: %s', self.loglevel)\n self.logger.info('debug_message_char_length: %s', self.debug_message_char_length)\n self.logger.info('default_message_buffer_size: %s', self.default_message_buffer_size)\n self.logger.info('loop_timeout: %s', self.loop_timeout)\n\n self.receiveRequest = None\n\n self.send_requests = []\n\n # set initial state\n self.set_balanced()\n\n # keep track of when a message arrived previously\n # Only perform blocking receives when no message was\n # received during the previous loop\n no_message_on_last_loop = False\n\n self.logger.info('from mpirank: %s of %s' % (self.rank.get(), self.worldsize.get()))\n self.logger.info('from MPI: %s of %s' % (MPI.COMM_WORLD.Get_rank(), MPI.COMM_WORLD.Get_size()))\n\n while not self.exit.is_set():\n self.logger.debug('starting loop, queue empty = %s, state = %s',\n self.queues['MPIService'].empty(), self.get_state())\n\n # check for incoming message\n if no_message_on_last_loop and self.in_mpi_blocking():\n self.logger.info('block on mpi for %s', self.loop_timeout)\n message = self.receive_message(block=True, timeout=self.loop_timeout)\n elif no_message_on_last_loop and self.in_balanced() and self.queues['MPIService'].empty():\n self.logger.info('block on mpi for %s', self.loop_timeout / 2)\n message = self.receive_message(block=True, timeout=self.loop_timeout / 2)\n else:\n self.logger.info('check for mpi message')\n message = self.receive_message()\n\n # if message received forward it on\n if message is not None:\n # record that we received a message this loop\n no_message_on_last_loop = False\n # shorten our message for printing\n if self.logger.getEffectiveLevel() == logging.DEBUG:\n tmpmsg = str(message)\n if len(tmpmsg) > self.debug_message_char_length:\n tmpslice = slice(0, self.debug_message_char_length)\n tmpmsg = tmpmsg[tmpslice] + '...'\n self.logger.debug('received mpi message: %s', tmpmsg)\n # forward message\n self.forward_message(message)\n else:\n self.logger.info('no message from MPI')\n\n # check for messages on the queue that need to be sent\n try:\n if no_message_on_last_loop and self.in_queue_blocking():\n self.logger.info('block on queue for %s', self.loop_timeout)\n qmsg = self.queues['MPIService'].get(block=True, timeout=self.loop_timeout)\n elif no_message_on_last_loop and self.in_balanced():\n self.logger.info('block on queue for %s', self.loop_timeout / 2)\n qmsg = self.queues['MPIService'].get(block=True, timeout=self.loop_timeout / 2)\n else:\n self.logger.info('check for queue message')\n qmsg = self.queues['MPIService'].get(block=False)\n\n # record that we received a message this loop\n no_message_on_last_loop = False\n\n # shorten our message for printing\n if self.logger.getEffectiveLevel() == logging.DEBUG:\n tmpmsg = str(qmsg)\n if len(tmpmsg) > self.debug_message_char_length:\n tmpslice = slice(0, self.debug_message_char_length)\n tmpmsg = tmpmsg[tmpslice] + '...'\n self.logger.debug('received queue message: %s', tmpmsg)\n\n # determine if destination rank or tag was set\n if 'destination_rank' in qmsg:\n destination_rank = qmsg['destination_rank']\n else:\n self.logger.error('received message to send, but there is no destination_rank specified')\n continue\n tag = None\n if 'tag' in qmsg:\n tag = qmsg['tag']\n\n # send message\n msgbuff = copy.deepcopy(qmsg)\n self.logger.info('sending msg of size %s bytes and type %s with destination %s', sys.getsizeof(msgbuff), msgbuff['type'], destination_rank)\n if tag is None:\n send_request = MPI.COMM_WORLD.isend(msgbuff, dest=destination_rank)\n else:\n send_request = MPI.COMM_WORLD.isend(msgbuff, dest=destination_rank, tag=tag)\n\n # On Theta I saw strange MPI behavior when waiting for the request\n # from a non-blocking send (isend) which caused upto 20minute waits.\n # This request should only be waiting for MPI to copy my data into\n # its own buffer, but takes too long. This is a stop gap, which just\n # appends the message to a dictionary (after a deepcopy of the original)\n # and then moves on. It doesn't come back to check if it completed.\n # I should eventually make this an 'optional' patch to enable and disable.\n self.send_requests.append({'msg': msgbuff, 'dest': destination_rank, 'tag': tag, 'req': send_request})\n\n # This was the previous code, which properly checks the isend request\n # has completed.\n\n # wait for send to complete\n # self.logger.debug('wait for send to complete')\n # send_request.wait()\n # self.logger.debug('send complete')\n\n except Empty:\n self.logger.debug('no message from message queue')\n\n # record no messages received\n if message is None:\n self.logger.debug('no messages received this loop')\n no_message_on_last_loop = True\n\n self.logger.info('waiting for all ranks to reach this point before exiting')\n self.MPI.COMM_WORLD.Barrier()\n self.logger.info('exiting')",
"def mpi_rank(self):\n return 0",
"def rank():\n return 0",
"def print_out():\n pass",
"def prnt(printstring, silent=False):\n if not silent:\n stdout.write(printstring)",
"def print(self):\n for index in range(0, len(self.heap)):\n print(self.heap[index])\n print()",
"def do_print_net(self, line=''):\n self.fibbing.print_net()",
"def prints(self):\r\n\r\n for i in range(len(self.heap_array)):\r\n print(self.heap_array[i])",
"def vprint(*args, **kwargs):\r\n vprint_worker(*args, **kwargs)",
"def sequential_print_statements():\n pass",
"def print_simulation_sequence(self):\n print('-----------------------------------------------')\n for msg_group in self.msg_group_list:\n msg_group.print()\n print('-----------------------------------------------')",
"def print_processor(print_que):\n print(termcolor.colored(\"!--DO NOT CLOSE--!\", \"red\"))\n print(len(print_que))\n ID_LIMIT = 40\n run = True\n jobs_ran = 0\n while run:\n Q_Jobs = 0\n if len(print_que) > 0:\n if \"10.56.54.162\" in print_que[0]:\n Q_Jobs = print_status(\"10.56.54.162\")\n else:\n Q_Jobs = print_status(\"10.56.54.156\")\n if Q_Jobs >= ID_LIMIT:\n print(\"Printed so Far: \", str(jobs_ran))\n print(\"Waiting For Jobs to Clear Up\")\n # input(\n # \"Please Confirm Printers Will Support 40 More Job IDS before pressing enter: \")\n jobs_ran = 0\n time.sleep(100)\n continue\n if len(print_que) > 0:\n if(\"banner\" not in print_que[0]):\n os.system(print_que[0])\n print((str(print_que[0]).replace(\n \"C:/Windows/System32/lpr.exe -S 10.56.54.\", \"\").replace(\n '-P PS \"C:/S/SO/', \"\").split(\"-J\")[0]))\n print_que.pop(0)\n jobs_ran += 1\n else:\n print(termcolor.colored(\"\\n!--PROCESSING CAUGHT UP--!: \", \"green\"))\n run = False\n jobs_ran += 1",
"def __str__(self):\n return str(self.rank)",
"def print(self):\n self.__print_local(self.dataset, 0)",
"def call_in_rank_order(fun, comm=None):\n if comm is None:\n comm = PETSc.COMM_WORLD\n\n for rank in range(comm.size):\n if rank == comm.rank:\n fun(rank, comm)\n comm.barrier()",
"def my_print(self):\n if self.__size == 0:\n print(\"\")\n return\n [print(\"\") for x in range(0, self.__position[1])]\n for i in range(0, self.__size):\n [print(\" \", end=\"\") for i in range(0, self.__position[0])]\n [print(\"#\", end=\"\") for j in range(0, self.__size)]\n print(\"\")",
"def print(self) -> None:\n\n print(\"Name: {}\".format(self.name))\n print(\"Input Queue: {}\".format(self.input_queue))\n print(\"Output Queue: {}\".format(self.output_queue))\n print(\"Restart Required: {}\".format(str(self.restart_required)))\n print(\"Number of Processes: {}\".format(str(self.num_processes)))\n print(\"Process Job: {}\".format(self.process_job.__name__))\n print(\"Timeout Duration: {}\".format(str(self.timeout_duration)))\n self.print_process_list()",
"def out(*args):\r\n print(*args)",
"def mpi_fork(n, bind_to_core=False):\n if n<=1:\n return \"child\"\n if os.getenv(\"IN_MPI\") is None:\n env = os.environ.copy()\n env.update(\n MKL_NUM_THREADS=\"1\",\n OMP_NUM_THREADS=\"1\",\n IN_MPI=\"1\"\n )\n args = [\"mpirun\", \"-np\", str(n)]\n if bind_to_core:\n args += [\"-bind-to\", \"core\"]\n args += [sys.executable] + sys.argv\n subprocess.check_call(args, env=env)\n return \"parent\"\n else:\n return \"child\"",
"def my_print(self):\n if self.__size is not 0:\n for ite in range(self.__position[1]):\n print()\n for ite in range(self.__size):\n print(\" \" * self.__position[0], end=\"\")\n print(\"#\" * self.size)\n else:\n print()"
] | [
"0.67231727",
"0.671745",
"0.6413891",
"0.63975346",
"0.6320614",
"0.6140222",
"0.59592324",
"0.5801671",
"0.5788699",
"0.5557892",
"0.54468316",
"0.53032565",
"0.5294671",
"0.5267618",
"0.5237459",
"0.52162343",
"0.51678485",
"0.5136221",
"0.5076834",
"0.5038921",
"0.5038565",
"0.50325304",
"0.5014458",
"0.5013467",
"0.5006136",
"0.49986678",
"0.49980763",
"0.49939698",
"0.49739555",
"0.4973071"
] | 0.7382136 | 0 |
Identifies uncorrelated samples and updates the arrays of the reduced potential energy and dhdlt retaining data entries of these samples only. 'sta' and 'fin' are the starting and final snapshot positions to be read, both are arrays of dimension K. | def uncorrelate(sta, fin, do_dhdl=False):
if not P.uncorr_threshold:
if P.software.title()=='Sire':
return dhdlt, nsnapshots, None
return dhdlt, nsnapshots, u_klt
u_kln = numpy.zeros([K,K,max(fin-sta)], numpy.float64) # u_kln[k,m,n] is the reduced potential energy of uncorrelated sample index n from state k evaluated at state m
N_k = numpy.zeros(K, int) # N_k[k] is the number of uncorrelated samples from state k
g = numpy.zeros(K,float) # autocorrelation times for the data
if do_dhdl:
dhdl = numpy.zeros([K,n_components,max(fin-sta)], float) #dhdl is value for dhdl for each component in the file at each time.
print "\n\nNumber of correlated and uncorrelated samples:\n\n%6s %12s %12s %12s\n" % ('State', 'N', 'N_k', 'N/N_k')
UNCORR_OBSERVABLE = {'Gromacs':P.uncorr,'Amber':'dhdl', 'Sire':'dhdl', 'Desmond':'dE', 'Gomc':P.uncorr}[P.software.title()]
if UNCORR_OBSERVABLE == 'dhdl':
# Uncorrelate based on dhdl values at a given lambda.
for k in range(K):
# Sum up over those energy components that are changing.
# if there are repeats, we need to use the lchange[k] from the last repeated state.
lastl = k
for l in range(K):
if numpy.array_equal(lv[k],lv[l]):
lastl = l
dhdl_sum = numpy.sum(dhdlt[k, lchange[lastl], sta[k]:fin[k]], axis=0)
# Determine indices of uncorrelated samples from potential autocorrelation analysis at state k
#NML: Set statistical inefficiency (g) = 1 if vector is all 0
if not numpy.any(dhdl_sum):
#print "WARNING: Found all zeros for Lambda={}\n Setting statistical inefficiency g=1.".format(k)
g[k] = 1
else:
# (alternatively, could use the energy differences -- here, we will use total dhdl).
g[k] = pymbar.timeseries.statisticalInefficiency(dhdl_sum)
indices = sta[k] + numpy.array(pymbar.timeseries.subsampleCorrelatedData(dhdl_sum, g=g[k])) # indices of uncorrelated samples
N_uncorr = len(indices) # number of uncorrelated samples
# Handle case where we end up with too few.
if N_uncorr < P.uncorr_threshold:
if do_dhdl:
print "WARNING: Only %s uncorrelated samples found at lambda number %s; proceeding with analysis using correlated samples..." % (N_uncorr, k)
indices = sta[k] + numpy.arange(len(dhdl_sum))
N = len(indices)
else:
N = N_uncorr
N_k[k] = N # Store the number of uncorrelated samples from state k.
if not (u_klt is None):
for l in range(K):
u_kln[k,l,0:N] = u_klt[k,l,indices]
if do_dhdl:
print "%6s %12s %12s %12.2f" % (k, N_uncorr, N_k[k], g[k])
for n in range(n_components):
dhdl[k,n,0:N] = dhdlt[k,n,indices]
if UNCORR_OBSERVABLE == 'dhdl_all':
# Uncorrelate based on dhdl values at a given lambda.
for k in range(K):
# Sum up over the energy components; notice, that only the relevant data is being used in the third dimension.
dhdl_sum = numpy.sum(dhdlt[k,:,sta[k]:fin[k]], axis=0)
# Determine indices of uncorrelated samples from potential autocorrelation analysis at state k
# (alternatively, could use the energy differences -- here, we will use total dhdl).
g[k] = pymbar.timeseries.statisticalInefficiency(dhdl_sum)
indices = sta[k] + numpy.array(pymbar.timeseries.subsampleCorrelatedData(dhdl_sum, g=g[k])) # indices of uncorrelated samples
N = len(indices) # number of uncorrelated samples
# Handle case where we end up with too few.
if N < P.uncorr_threshold:
if do_dhdl:
print "WARNING: Only %s uncorrelated samples found at lambda number %s; proceeding with analysis using correlated samples..." % (N, k)
indices = sta[k] + numpy.arange(len(dhdl_sum))
N = len(indices)
N_k[k] = N # Store the number of uncorrelated samples from state k.
if not (u_klt is None):
for l in range(K):
u_kln[k,l,0:N] = u_klt[k,l,indices]
if do_dhdl:
print "%6s %12s %12s %12.2f" % (k, fin[k], N_k[k], g[k])
for n in range(n_components):
dhdl[k,n,0:N] = dhdlt[k,n,indices]
if UNCORR_OBSERVABLE == 'dE':
# Uncorrelate based on energy differences between lambdas.
for k in range(K):
# Sum up over the energy components as above using only the relevant data; here we use energy differences
# Determine indices of uncorrelated samples from potential autocorrelation analysis at state k
dE = u_klt[k,k+1,sta[k]:fin[k]] if not k==K-1 else u_klt[k,k-1,sta[k]:fin[k]]
g[k] = pymbar.timeseries.statisticalInefficiency(dE)
indices = sta[k] + numpy.array(pymbar.timeseries.subsampleCorrelatedData(dE, g=g[k])) # indices of uncorrelated samples
N = len(indices) # number of uncorrelated samples
# Handle case where we end up with too few.
if N < P.uncorr_threshold:
print "WARNING: Only %s uncorrelated samples found at lambda number %s; proceeding with analysis using correlated samples..." % (N, k)
indices = sta[k] + numpy.arange(len(dE))
N = len(indices)
N_k[k] = N # Store the number of uncorrelated samples from state k.
if not (u_klt is None):
for l in range(K):
u_kln[k,l,0:N] = u_klt[k,l,indices]
if do_dhdl:
return (dhdl, N_k, u_kln)
return (N_k, u_kln) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_data_stoichometry(fasta, bams, regions, features, samples, fracs, \n maxReads=1000, strands=\"+-\", nn=1):\n # get storage\n k = 2*nn+1\n fi = 0\n sam = pysam.AlignmentFile(bams[0])\n region2data = {}\n sample2idx = {s: i for i, s in enumerate(samples)}; print(sample2idx)\n for ri, (ref, pos, mt) in enumerate(regions, 1):\n sys.stderr.write(\" %s / %s %s:%s \\r\"%(ri, len(regions), ref, pos))\n start, end = pos-1, pos\n # extend start/end by nn and end by dt_shift\n ##this is for RNA, for DNA start start needs to be -dt_shift\n parsers = [bam2data(bam, ref, start-nn if start>=nn else 0, end+2*nn, True, \n nn, features, maxReads) for bam in bams]\n refparser = fasta2bases(fasta, ref, start, end, strands)\n for ((pos, _, strand, refbase, mer), *calls) in zip(refparser, *parsers):\n if strand==\"+\":\n sample2data = [np.hstack(c) for c in calls]\n # get min number of reads\n max_reads = int(min(map(len, sample2data))/3)#; print(ref, pos, mt, max_reads, [s.shape for s in sample2data])\n # first get 2 fully unmodified and 1 fully modified sample - those reads won't be used later on\n data_frac = [sample2data[sample2idx[mt]][max_reads:2*max_reads], # this will be used as 0 sample\n sample2data[sample2idx[mt]][-max_reads:], sample2data[sample2idx[\"wt\"]][-max_reads:], # those two will be training set\n ] \n # the get samples with given fractions of modified reads\n data_frac += [get_data_mix(sample2data[sample2idx[mt]], \n sample2data[sample2idx[\"wt\"]], frac, max_reads) \n for frac in fracs]\n region2data[(ref, pos)] = (mer, data_frac)\n return region2data",
"def normalize_lc(self):\n def normalized_subset(regions, t, flux, err, cads):\n time, norm_flux = np.array([]), np.array([])\n norm_flux_err, cadences = np.array([]), np.array([])\n\n for reg in regions:\n f = flux[reg]\n if np.nanmedian(f) > 0:\n norm_flux = np.append(f/np.nanmedian(f), norm_flux)\n time = np.append(t[reg], time)\n e = err[reg]\n norm_flux_err = np.append(e/np.nanmedian(f), norm_flux_err)\n cadences = np.append(cads[reg], cadences)\n\n return time, norm_flux, norm_flux_err, cadences\n\n\n self.time, self.norm_flux = np.array([]), np.array([])\n self.norm_flux_err = np.array([])\n self.cadences = np.array([])\n\n if self.multi is True:\n for d in self.data:\n q = d.quality == 0\n t = d.time[q]\n if self.do_corr == True:\n f = d.corr_flux[q]\n\n elif self.do_raw == True:\n f = d.raw_flux[q]\n\n elif self.do_psf == True:\n f = d.psf_flux[q]\n\n err = d.flux_err[q]\n\n # Searches for breaks based on differences in time array\n regions = self.find_breaks(time=t)\n sector_t, sector_f, sector_e, sector_c = normalized_subset(regions, t, f, err, np.array(d.ffiindex)[q])\n self.time = np.append(sector_t, self.time)\n self.norm_flux = np.append(sector_f, self.norm_flux)\n self.norm_flux_err = np.append(sector_e, self.norm_flux_err)\n self.cadences = np.append(sector_c, self.cadences)\n else:\n q = self.data.quality == 0\n regions = self.find_breaks(time=self.data.time[q])\n self.regions = regions+0.0\n\n if self.do_corr == True:\n sector_t, sector_f, sector_e, sector_c = normalized_subset(regions, self.data.time[q], self.data.corr_flux[q], self.data.flux_err[q], np.array(self.data.ffiindex)[q])\n\n elif self.do_raw == True:\n sector_t, sector_f, sector_e, sector_c = normalized_subset(regions, self.data.time[q], self.data.raw_flux[q], self.data.flux_err[q], np.array(self.data.ffiindex)[q])\n\n elif self.do_psf == True:\n sector_t, sector_f, sector_e, sector_c = normalized_subset(regions, self.data.time[q], self.data.psf_flux[q], self.data.flux_err[q], np.array(self.data.ffiindex)[q])\n\n self.time = sector_t\n self.norm_flux = sector_f\n self.norm_flux_err = sector_e\n self.cadences = sector_c\n\n\n if len(self.time) > 0:\n self.time, self.norm_flux, self.norm_flux_err = zip(*sorted(zip(self.time, self.norm_flux, self.norm_flux_err)))\n self.time, self.norm_flux, self.norm_flux_err = np.array(self.time), np.array(self.norm_flux), np.array(self.norm_flux_err)\n self.cadences = np.sort(self.cadences)",
"def calc_flux_array(self):\n \n # First determine the associated spectrum\n self.compute_template_spectrum()\n\n # Calculate baseline counts to normalise fluxes we scan over\n # Go from 10**(bin_min)*mean up to 10**(bin_max)*mean in nbins steps\n b = self.setup_b_instance(0,add_ps_mask=True)\n mean = np.sum(b.CTB_masked_compressed[0])/len(b.CTB_masked_compressed[0])\n A_array = mean*10**np.linspace(self.bin_min,self.bin_max,self.nbins)\n\n # Array to get LLs when no profile likelihood run\n norun = np.array([1.0, 1.0, 1.0, 1.0])\n\n # Now setup and compute the arrays\n LL_array = np.array([]) \n A_array_short = np.array([])\n spect_array = np.array([])\n\n for i in range(len(A_array)):\n print \"on i =\",i\n # Calculate LL\n if i == 0:\n b1 = self.setup_b_instance(A_array[i],add_ps_mask=True)\n else:\n for key in b1.fixed_template_dict_nested.keys():\n b1.fixed_template_dict_nested[key] = b1.fixed_template_dict_nested[key]*A_array[i]/A_array[i-1]\n ll_val = b1.ll(norun,4,4)\n # Make triangle\n\n # Append to arrays\n LL_array = np.append(LL_array,ll_val)\n A_array_short = np.append(A_array_short,A_array[i])\n spect_array = self.spectrum*np.array(A_array_short)\n\n # Save output\n np.save(work_dir+'ScanOutput/'+self.tag+'/En_array-'+str(self.flux_array_ebin)+'.npy',self.En_center)\n np.save(work_dir+'ScanOutput/'+self.tag+'/LL_array-'+str(self.flux_array_ebin)+'.npy',LL_array)\n np.save(work_dir+'ScanOutput/'+self.tag+'/Flux_array-'+str(self.flux_array_ebin)+'.npy',spect_array)",
"def load_data_train_test_val(fasta, bams, regions, features, samples, maxReads=1000, strands=\"+-\", nn=1):\n # get storage\n k = 2*nn+1\n fi = 0\n sam = pysam.AlignmentFile(bams[0])\n region2data = {}\n sample2idx = {s: i for i, s in enumerate(samples)}; print(sample2idx)\n for ri, (ref, pos, mt) in enumerate(regions, 1):\n sys.stderr.write(\" %s / %s %s:%s \\r\"%(ri, len(regions), ref, pos))\n start, end = pos-1, pos\n # extend start/end by nn and end by dt_shift\n ##this is for RNA, for DNA start start needs to be -dt_shift\n parsers = [bam2data(bam, ref, start-nn if start>=nn else 0, end+2*nn, True, \n nn, features, maxReads) for bam in bams]\n refparser = fasta2bases(fasta, ref, start, end, strands)\n for ((pos, _, strand, refbase, mer), *calls) in zip(refparser, *parsers):\n if strand==\"+\":\n sample2data = [np.hstack(c) for c in calls]\n # get min number of reads\n max_reads = int(min(map(len, sample2data))/3)#; print(ref, pos, mt, max_reads, [s.shape for s in sample2data])\n # first get 2 fully unmodified and 1 fully modified sample - those reads won't be used later on\n data_frac = [sample2data[sample2idx[mt]][max_reads:2*max_reads], # this will be used as 0 sample\n sample2data[sample2idx[mt]][-max_reads:], sample2data[sample2idx[\"wt\"]][-max_reads:], # those two will be training set\n ] \n # get a bit of every sample\n data_frac += [sd[:max_reads] for sd in sample2data]\n region2data[(ref, pos)] = (mer, data_frac)\n return region2data",
"def update_total_fpmu_dict(self):\n # identical for each long-range connection\n # extract parameters\n deltat = self.dt\n trise = self.tau_r\n tdamp = self.tau_d\n\n tr = deltat/trise\n etr = np.exp(-tr) \n td = deltat/tdamp\n etd = np.exp(-td)\n cst = trise/(tdamp-trise)\n\n # nmda should keep in memory which could not be reset to zerooooooo!!!\n \"\"\"\n no resetting to zero --> go directly to refreshing !!! based on pre-value\n \"\"\"\n for c in self.source_connection_list:\n if (c.conn_type == 'LongRange'):\n self.total_INMDA_dict[c.connection_distribution] = self.total_INMDA_dict[c.connection_distribution] * etd + self.total_HNMDA_dict[c.connection_distribution] * cst\n self.total_HNMDA_dict[c.connection_distribution] = self.total_HNMDA_dict[c.connection_distribution] * etr + c.curr_firing_rate * c.nsyn * c.weights * self.tau_r\n\n print 'Change HNMDA: ', c.curr_firing_rate * c.nsyn ,' \\n'\n print 'Inputlr dict: ', self.total_inputlr_dict[c.connection_distribution]\n\n\n\n # for curr_CD in self.source_connection_list:\n # have already exist\n for c in self.source_connection_list:\n if(c.conn_type == 'ShortRange'):\n self.total_fpmu_dict[c.connection_distribution] = 0.0\n # have already clear up all the short range connections\n for c in self.source_connection_list:\n if(c.conn_type == 'ShortRange'):\n self.total_fpmu_dict[c.connection_distribution] += c.curr_firing_rate * c.nsyn * c.weights\n\n # summation\n self.total_fp_vslave = 0.0\n for key,val in self.total_fpmu_dict.items():\n \n try:\n self.total_fp_vslave += val\n except:\n key.initialize()\n self.total_fp_vslave += val\n # and then, summation of Inmda\n for key,val in self.total_INMDA_dict.items():\n try:\n self.total_fp_vslave += val\n except:\n key.initialize()\n self.total_fp_vslave += val\n # and then divided by gL or multiply tau_m\n self.total_fp_vslave = self.total_fp_vslave * self.tau_m",
"def data_separation(self, n_extrema=2, ind_extrema=[0,-1], verbosity=1):\n\n self.fullspace = []\n self.z_requested = np.array([self.z_requested]).flatten()\n\n ## fullspace: array of all multiindex values in the noiseless case \n for z in np.array(self.z_requested):\n for iind in (self.df_ext.loc[self.data_type,z].index):\n ## values of the redshift are only included, if there is data for multiple redshift values\n if self.multiple_z == False:\n self.fullspace.append(list(iind)[1::2])\n else:\n self.fullspace.append(np.array([z]+list(iind)[1::2]))\n self.fullspace=np.array(self.fullspace)\n\n self.size_fullspace = len(self.fullspace) \n self.ind_fullspace = np.array(range(self.size_fullspace))\n\n ## extremaspace: array of multiindex values of the spectra considered \"extrema\"\n self.ind_extremaspace = self.ind_fullspace[ind_extrema]\n self.extremaspace = self.fullspace[self.ind_extremaspace]\n self.size_extremaspace = len(self.extremaspace)\n\n ## midspace: array of all multiindex values except for the extrema\n self.ind_midspace = np.setdiff1d(self.ind_fullspace, self.ind_extremaspace)\n self.midspace = self.fullspace[self.ind_midspace]\n self.size_midspace = len(self.midspace)\n\n ## print details if verbosity >= level\n too.condprint(\"length of full sample space\", self.size_fullspace, level=2, verbosity=verbosity)\n too.condprint(\"full sample space list\", self.fullspace, level=3, verbosity=verbosity)\n too.condprint(\"length of extrema sample space\", self.size_extremaspace, level=2, verbosity=verbosity)\n too.condprint(\"full sample space list\", self.extremaspace, level=3, verbosity=verbosity)\n\n return None",
"def obtain_data(self):\n ##MODIFY THIS\n #ipdb.set_trace()\n print('obtain_data')\n print(self.enabler)\n print(self.index)\n helper = '>'+str(1+int(self.chann_span.get()))+'Q'\n print('helper='+helper)\n while(self.enabler):\n #print('size'+str(1+int(self.chann_span.get())))\n #print('offset'+str(self.index-self.index_offset))\n A2 = struct.unpack(helper, fpga.read('A2', (1+int(self.chann_span.get()))*8, ((self.index-self.index_offset)*8))) \n #print(A2)\n #print(str(10*np.log10(A2))+'dB')\n self.amp_a2[0] = np.mean(A2)\n self.amp_a2 = np.roll(self.amp_a2, -1)\n B2 = struct.unpack(helper, fpga.read('B2', (1+int(self.chann_span.get()))*8, ((self.index-self.index_offset)*8)))\n self.amp_b2[0] = np.mean(B2)\n self.amp_b2 = np.roll(self.amp_b2, -1)\n AB_re = struct.unpack(helper, fpga.read('AB_re', (1+int(self.chann_span.get()))*8, ((self.index-self.index_offset)*8)))\n self.ab_re[0] = np.mean(AB_re)\n self.ab_re = np.roll(self.ab_re, -1)\n AB_im = struct.unpack(helper, fpga.read('AB_im', (1+int(self.chann_span.get()))*8, ((self.index-self.index_offset)*8)))\n self.ab_im[0] = np.mean(AB_im)\n self.ab_im = np.roll(self.ab_im, -1) \n # print('RE:' + str(self.ab_re)+ '\\t IM:' +str(self.ab_im))\n log_a = 10*np.log10(np.mean(self.amp_a2)+1.0)\n log_b = 10*np.log10(np.mean(self.amp_b2)+1.0) \n ang = np.rad2deg(np.arctan2(np.mean(self.ab_im), np.mean(self.ab_re))) #review the way of avg this... i dont know if its the most correct way to do it...\n self.a2.set(log_a)\n self.b2.set(log_b)\n self.ang.set(ang)\n self.amp_rel.set(log_a-log_b)\n return 1",
"def calculate_items(ret_o,snaps, min_neigh=4, cutoff=1.5, MAXnb=100,\n nbins=2000, nbinsq=50, Pe=10, rho_0=0.60,\n spatial_correlation_flag = True,\n cluster_flag = False, CG_flag = True):\n\n ts = len(snaps)\n \n for t1 in range(0,ts):\n\n snap1 = snaps[t1]\n print(t1,ts)\n # for each snapshot in the dump file data\n\n box=snap1['box']\n ref_coords = snap1['ucoords']\n mus = snap1['mus']\n\n # compute (normalized) mean polarisation\n polarisation = np.linalg.norm(np.mean(mus,axis=0))\n\n p6re = np.mean(snap1['c_psi6[1]'])\n p6im = np.mean(snap1['c_psi6[2]'])\n p6 = np.absolute(np.complex(p6re, p6im))\n\n mux = np.mean(snap1['mux'])\n mux2 = np.mean(np.array(snap1['mux'])**2)\n muy = np.mean(snap1['muy'])\n muy2 = np.mean(np.array(snap1['muy'])**2)\n \n theta_Ns = np.arctan2(snap1['muy'], snap1['mux'])\n theta = np.mean(theta_Ns)\n theta2 = np.mean(theta_Ns**2)\n \n nematic_Ns = (2.*np.cos(theta)**2 - 1.)\n nematic = np.mean(nematic_Ns)\n nematic2 = np.mean(nematic_Ns**2)\n \n # compute time averages\n ret_o['g_cnt'] = ret_o.get('g_cnt',0) + 1\n ret_o['sum_psi6'] = ret_o.get('sum_psi6',0) + p6\n ret_o['sum_psi62'] = ret_o.get('sum_psi62',0) + p6*p6\n ret_o['sum_psi6_re'] = ret_o.get('sum_psi6_re',0) + p6re\n ret_o['sum_psi6_im'] = ret_o.get('sum_psi6_im',0) + p6im\n ret_o['sum_mux'] = ret_o.get('sum_mux',0) + mux\n ret_o['sum_mux2'] = ret_o.get('sum_mux2',0) + mux2\n ret_o['sum_muy'] = ret_o.get('sum_muy',0) + muy\n ret_o['sum_muy2'] = ret_o.get('sum_muy2',0) + muy2\n\n \n ret_o['sum_theta'] = ret_o.get('sum_theta',0) + theta\n ret_o['sum_theta2'] = ret_o.get('sum_theta2',0) + theta2\n\n\n ret_o['sum_nematic'] = ret_o.get('sum_nematic',0) + nematic\n ret_o['sum_nematic2'] = ret_o.get('sum_nematic2',0) + nematic2\n ret_o['polarisation'] = ret_o.get('polarisation',0) + polarisation\n\n \n if spatial_correlation_flag:\n \n tmp_list = spatial_correlations(t1,snap1, ret_o,min_neigh=4,\n cutoff=1.5,MAXnb=100,nbins=2000,\n nbinsq=50,Pe=10, rho_0=0.60)\n\n # distance matrix between particle pairs\n ref_distance, ref_dis_x, ref_dis_y = tmp_list[:3]\n # number of neighbours for all particles\n ref_num_nb, ref_list_nb = tmp_list[3:5]\n \n # correlation functions and structure functions\n g, g6, g6re, g6im, sq = tmp_list[5:10]\n g_ori, g_dp, g_dp_tr, g_pr, s_pr = tmp_list[10:]\n\n\n # compute time averages\n\n g_mat = np.matrix(g)\n g6_mat = np.matrix(g6)\n g6re_mat = np.matrix(g6re)\n g6im_mat = np.matrix(g6im)\n sq_mat = np.array(sq)\n\n ret_o['sum_g'] = ret_o.get('sum_g',0*g_mat)+g_mat\n ret_o['sum_g6'] = ret_o.get('sum_g6',0*g6_mat)+g6_mat\n ret_o['sum_g6re'] = ret_o.get('sum_g6re',0*g6re_mat)+g6re_mat \n ret_o['sum_g6im'] = ret_o.get('sum_g6im',0*g6im_mat)+g6im_mat\n\n ret_o['sum_sq'] = ret_o.get('sum_sq',0*sq_mat)+sq_mat\n\n g_ori_mat = np.array(g_ori)\n g_dp_mat = np.array(g_dp)\n g_dp_tr_mat = np.array(g_dp_tr)\n g_pr_mat = np.array(g_pr)\n pij_rij_mat = s_pr\n\n\n ret_o['sum_g_ori'] = (ret_o.get('sum_g_ori',0*g_ori_mat)\n + g_ori_mat)\n ret_o['sum_g_dp'] = (ret_o.get('sum_g_dp',0*g_dp_mat)\n + g_dp_mat)\n ret_o['sum_g_dp_tr'] = (ret_o.get('sum_g_dp_tr',0*g_dp_tr_mat)\n +g_dp_tr_mat)\n ret_o['sum_g_pr'] = (ret_o.get('sum_g_pr',0*g_pr_mat)\n +g_pr_mat)\n ret_o['sum_pij_rij'] = (ret_o.get('sum_pij_rij',0*pij_rij_mat)\n + pij_rij_mat)\n\n\n\n \n if cluster_flag:\n\n tmp_list = cluster_momenta(t1,snap1,\n min_cluster_size=min_cluster_size,\n CG_flag=CG_flag)\n\n RMS_AngMom,RMS_AngMom2 = tmp_list[:2]\n RMS_LinMom,RMS_LinMom2,cluster_size = tmp_list[2:]\n\n\n # beginning of time averages\n\n ret_o['sum_RMS_AngMom'] = (ret_o.get('sum_RMS_AngMom',0)\n + RMS_AngMom)\n ret_o['sum_RMS_AngMom2'] = (ret_o.get('sum_RMS_AngMom2',0)\n + RMS_AngMom2)\n ret_o['sum_RMS_LinMom'] = (ret_o.get('sum_RMS_LinMom',0)\n + RMS_LinMom)\n ret_o['sum_RMS_LinMom2'] = (ret_o.get('sum_RMS_LinMom2',0)\n + RMS_LinMom2)\n ret_o['sum_cluster_size'] = (ret_o.get('sum_cluster_size',0)\n +cluster_size)\n\n\n return ret_o",
"def data(dbfilename = os.path.expanduser('~/python/project/znuc2012.S4.star.el.y.stardb.gz')):\n db = stardb.load(dbfilename) # loads database\n nmass = db.nvalues[0] # finds the number of values\n masses = db.values[0][:nmass] #creates a vector of the initial masses\n isodb = stardb.load(os.path.expanduser('~/python/project/znuc2012.S4.star.deciso.y.stardb.gz'))\n \n massnumber = []\n for x in range(len(isodb.ions)):\n mn = isodb.ions[x].A\n massnumber.append(mn)\n massnumber = np.array(massnumber)\n np.save(os.path.expanduser('~/python/project/filestoload/Massnumber'), massnumber) \n####################### \n# write all energy and mixing values\n\n energyvalues = np.unique(db.fielddata['energy'])\n mixingvalues = np.unique(db.fielddata['mixing'])\n masterremnant = [] # result will be a multidimensional array\n elementdata = []\n isodata = []\n r = len(db.ions) # for loop iteration\n w = len(isodb.ions)\n for energy in energyvalues:\n remmixingarray = [] # reinitialise the next dimension\n elmixingarray = []\n isomixingarray = []\n for mixing in mixingvalues:\n \n \n ii = np.logical_and(np.isclose(db.fielddata['energy'], energy), np.isclose(db.fielddata['mixing'], mixing))\n \n mass = db.fielddata[ii]['remnant']\n remmixingarray.append(mass) # this is an array of remnant masses for one energy and every mixing value\n \n elfill = [] # reinitialise the next dimension again\n isofill = []\n \n \n for m in range(w):\n \n a = isodb.ions[m] #for obtaining the element string\n kk = np.where(isodb.ions==isotope.ion(a)) # finding the indices in db.ions for a particular element\n jj = np.where(ii)\n isotopes = isodb.data[jj, kk][0] # array of abundances for that particular element\n isofill.append(isotopes) # this is an array of element data for every mass for one energy and one mixing value\n\n\n\n\n isomixingarray.append(isofill) \n \n \n masterremnant.append(remmixingarray) # these master arrays have every bit of data under its own energy. so called like elementdata[energy][mixing][elementnumber] gives the element data for every star for a single element.\n \n isodata.append(isomixingarray)\n \n np.save(os.path.expanduser('~/python/project/filestoload/IsoData'), isodata)\n np.save(os.path.expanduser('~/python/project/filestoload/RemnantMasses'), masterremnant)\n np.save(os.path.expanduser('~/python/project/filestoload/Ioninfo'), isodb.ions)\n time = [] \n \n for mass in masses: # for loop will cycle through the masses and grab the lifetime of each star\n s = str(mass) # converts the mass number to a string for file acquiring\n if s.endswith('.0'): # formatting issue, to match the filenames\n s = s[:-2] \n filename = os.path.expanduser('~/python/project/dumps/z{}#presn').format(s)\n # grabs filename corrosponding to this mass\n d = kepdump.load(filename) # loads the kepdump data for this star\n time.append(d.time) \n yr = 365.2425*86400 \n time = np.array(time)/yr\n dataarray = [masses, time]\n\n\n return dataarray",
"def read_statistics(self):\n self.psdata=[]\n self.powerspectra=[]\n self.ds=[]\n self.dsigmasq=[]\n self.dsigma=[]\n self.bsdata=[]\n self.eqbispectra=[]\n self.fNLeq=[]\n\n for sub in range(self.Nsubs):\n self.psdata.append(np.load(self.datadir+self.filebase+\"_\"+str(sub)+\".npy\"))\n self.powerspectra.append(np.trim_zeros(self.psdata[-1][0][1:]))\n self.bsdata.append(np.load(self.datadir+self.fbbispec+\"_\"+str(sub)+\".npy\"))\n self.eqbispectra.append(self.bsdata[-1][0][1:len(self.powerspectra[-1])])\n\n self.ds.append(np.load(self.datadir+\"stat_\"+str(sub)+\".npy\")[0])\n self.dsigmasq.append(np.load(self.datadir+\"stat_\"+str(sub)+\".npy\")[1])\n self.dsigma = np.array([np.sqrt(dsq) for dsq in self.dsigmasq])\n\n self.klist=np.arange(1, len(self.powerspectra[-1]))*(2.*np.pi/self.Lsub)\n # subtract the mean ds\n self.ds = self.ds - np.mean(self.ds)\n self.fNLeq=np.mean(self.eqbispectra, axis=0)\n self.fNLeqsubs=np.mean(self.eqbispectra, axis=1)\n self.fNLeqds=[]\n for i in range(len(self.eqbispectra)):\n self.fNLeqds.append(np.array([self.ds[i]*self.eqbispectra[i][j] for j in range(45)]))",
"def calculate_hysteresis(data,ms,filename):\n\t\n\tfrom numpy import pi, mean\n\tfrom scipy.integrate import cumtrapz\n\timport pandas as pd\n\t\n\tprint('--------------------------')\n\tprint('Evaluation ...')\n\tprint('... calculating hysteresis')\n\t\n\t# prepare results\n\tresult = pd.Series({'Vamp':ms['amp']})\n\tresult['frequency'] = ms['freq']\n\tresult['thickness'] = ms['thickness']\n\tresult['area'] = ms['area']\n\tresult['areaerr'] = ms['areaerr']\n\t\n\t# calculate difference voltage betwen Vset and Vref\n\tdata['Vdiff'] = data.Vset - data.Vref\n\t\n\t#calculate displacement current \n\tdata['I'] = data.Vref / ms['rref']\n\n\t#calc and center electric field from Vset and sample thickness d (+save)\n\tdata['E'] = data.Vdiff / ms['thickness']\n\tE_bias = abs(max(data.E))-abs(min(data.E))\n\tprint('... E_bias: %f MV/m ; %f V'%(E_bias/1e6,E_bias*ms['thickness']))\n\tif ms['correct_Ebias'] == True:\n\t\tprint('... correct Ebias')\n\t\tif E_bias < 0:\n\t\t\tdata['E'] = data.E + abs(E_bias)\n\t\telse:\n\t\t\tdata['E'] = data.E - abs(E_bias)\n\tresult['ebias'] = E_bias\n\t\n\t# correct loss current before removing offset\t\n\tif ms['correct_LossI'] == True:\n\t\tprint('... correct loss current')\n\t\ttry:\n\t\t\tdata['I_Loss'] = data.Vref * 2 * pi * ms['freq'] * ms['cap'] * ms['tand']\n\t\t\tdata['I'] = data.I - data.I_Loss\n\t\t\tprint('... ILoss/IP: %e'%(mean(data.I_Loss)/mean(data.I)))\n\t\texcept ValueError:\n\t\t\tprint('Some values missing! (Capacity, tan d ?)')\n\t\n\t# calc offset current from mean of 1 period\n\tif ms['custom_curr_offs'] == 0:\n\t\t\n\t\t# TEST: get start index from first zero transition of current signal\n\t\t# index_DataFrame = data.iloc[(data['I']-0.0).abs().argsort()[:20]]\t# extract index from nearest values to zero\n\t\t# start_index = index_DataFrame.index.min()\n\t\t\n\t\tstart_index = 50\n\t\t\n\t\tincrement = data.time[1]-data.time[0]\n\t\tsteps = int(1./ ms['freq'] / increment * 2)\n\t\toffset = mean(data.I[start_index:steps+start_index])\n\telse:\n\t\tprint('... auto offset current disabled')\n\t\ttry:\n\t\t\toffset = ms['custom_curr_offs']\n\t\texcept ValueError:\n\t\t\tprint('current offset value missing!')\n\t\t\n\t# remove offset current\n\tdata['I'] = data.I - offset\n\n\t# charge by integrating current\n\tdata['Q'] = cumtrapz(data.I,data.time,initial=0)\n\n\t# polarization\n\tdata['P'] = data.Q / ms['area']\n\n\t# align P around Pmin and Pmax\n\tmaxP = max(data.P)\n\tminP = min(data.P)\n\tPdiff = abs(minP)-abs(maxP)\n\tdata['P'] = data.P + Pdiff/2\n\t\t\n\t# aling P around 0\t\t16 because 8+ und 8-\n\tPNull = mean([max(data.iloc[(data['E']-0).abs().argsort()[:16]].P),min(data.iloc[(data['E']-0).abs().argsort()[:16]].P)])\n\tif PNull < 0:\n\t\tdata['P'] = data.P + abs(PNull)\n\telse:\n\t\tdata['P'] = data.P - abs(PNull)\n\tresult['pnull'] = PNull\n\n\t# calc error of polarization\n\tdata['P_error'] = (ms['vreferr'] / data.Vref + ms['rreferr']/ms['rref'] + ms['areaerr']/ms['area']) * data.P\n\n\t# get EC and PR --> 3 sigma\n\tPR, PR_error = get_PR(data)\n\tresult['PR'], result['PRerr'] = PR, PR_error\n\tresult['EC'] = get_EC(data)\n\t\n\tprint('... PR: (%f +- %f) yC/cm2'%(abs(result['PR'])*100,abs(result['PRerr'])*100))\n#\tprint('... (%.2f)'%(PR_error/PR*100))\n\t#print('Vdiff: %f V'%(data.Vdiff.max()))\n\t\n\treturn data, result",
"def prep_standard(st, segment, data):\n\n from rfpipe import calibration, flagging, util\n\n if not np.any(data):\n return data\n\n # read and apply flags for given ant/time range. 0=bad, 1=good\n if st.prefs.applyonlineflags and st.metadata.datasource in ['vys', 'sdm']:\n flags = flagging.getonlineflags(st, segment)\n data = np.where(flags[None, :, None, None], data, 0j)\n else:\n logger.info('Not applying online flags.')\n\n if not np.any(data):\n return data\n\n if st.prefs.simulated_transient is not None or st.otfcorrections is not None:\n uvw = util.get_uvw_segment(st, segment)\n\n # optionally integrate (downsample)\n if ((st.prefs.read_tdownsample > 1) or (st.prefs.read_fdownsample > 1)):\n data2 = np.zeros(st.datashape, dtype='complex64')\n if st.prefs.read_tdownsample > 1:\n logger.info('Downsampling in time by {0}'\n .format(st.prefs.read_tdownsample))\n for i in range(st.datashape[0]):\n data2[i] = data[\n i*st.prefs.read_tdownsample:(i+1)*st.prefs.read_tdownsample].mean(axis=0)\n if st.prefs.read_fdownsample > 1:\n logger.info('Downsampling in frequency by {0}'\n .format(st.prefs.read_fdownsample))\n for i in range(st.datashape[2]):\n data2[:, :, i, :] = data[:, :, i*st.prefs.read_fdownsample:(i+1)*st.prefs.read_fdownsample].mean(axis=2)\n data = data2\n\n # optionally add transients\n if st.prefs.simulated_transient is not None:\n # for an int type, overload prefs.simulated_transient random mocks\n if isinstance(st.prefs.simulated_transient, int):\n logger.info(\"Filling simulated_transient with {0} random transients\"\n .format(st.prefs.simulated_transient))\n st.prefs.simulated_transient = util.make_transient_params(st, segment=segment,\n ntr=st.prefs.simulated_transient,\n data=data)\n\n assert isinstance(st.prefs.simulated_transient, list), \"Simulated transient must be list of tuples.\"\n\n for params in st.prefs.simulated_transient:\n assert len(params) == 7 or len(params) == 8, (\"Transient requires 7 or 8 parameters: \"\n \"(segment, i0/int, dm/pc/cm3, dt/s, \"\n \"amp/sys, dl/rad, dm/rad) and optionally \"\n \"ampslope/sys\")\n if segment == params[0]:\n if len(params) == 7:\n (mock_segment, i0, dm, dt, amp, l, m) = params\n ampslope = 0\n\n logger.info(\"Adding transient to segment {0} at int {1}, \"\n \"DM {2}, dt {3} with amp {4} and l,m={5},{6}\"\n .format(mock_segment, i0, dm, dt, amp, l, m))\n elif len(params) == 8:\n (mock_segment, i0, dm, dt, amp, l, m, ampslope) = params\n logger.info(\"Adding transient to segment {0} at int {1}, \"\n \" DM {2}, dt {3} with amp {4}-{5} and \"\n \"l,m={6},{7}\"\n .format(mock_segment, i0, dm, dt, amp,\n amp+ampslope, l, m))\n try:\n model = np.require(np.broadcast_to(util.make_transient_data(st, amp, i0, dm, dt, ampslope=ampslope)\n .transpose()[:, None, :, None],\n st.datashape),\n requirements='W')\n except IndexError:\n logger.warning(\"IndexError while adding transient. Skipping...\")\n continue\n\n if st.gainfile is not None:\n model = calibration.apply_telcal(st, model, sign=-1)\n util.phase_shift(model, uvw, -l, -m)\n data += model\n\n if st.otfcorrections is not None:\n # shift phasecenters to first phasecenter in segment\n if len(st.otfcorrections[segment]) > 1:\n ints, ra0, dec0 = st.otfcorrections[segment][0] # new phase center for segment\n logger.info(\"Correcting {0} phasecenters to first at RA,Dec = {1},{2}\"\n .format(len(st.otfcorrections[segment])-1, ra0, dec0))\n for ints, ra_deg, dec_deg in st.otfcorrections[segment][1:]:\n l0 = np.radians(ra_deg-ra0)\n m0 = np.radians(dec_deg-dec0)\n util.phase_shift(data, uvw, l0, m0, ints=ints)\n\n return data",
"def fillDetInfo():\n print('here i am')\n # 1. maps of analysis channel to cpd, and pulser monitor channels\n detCH, pMons = {}, {}\n for ds in [0,1,2,3,4,5,6]:\n f = np.load(\"%s/data/ds%d_detChans.npz\" % (os.environ['LATDIR'], ds))\n detCH[ds] = f['arr_0'].item()\n pMons[ds] = f['arr_1'].item()\n\n # 2. maps of HV and TRAP threshold settings are stored in the DB.\n # make them global, and move them to the runSettings file.\n # FORMAT: {ds : {'det' : [(run1,val1),(run2,val2)...]} }\n detHV, detTH = {}, {}\n\n # load all possible values, as in settingsMgr\n detDB = db.TinyDB(\"%s/calDB-v2.json\" % dsi.latSWDir)\n detPars = db.Query()\n cal = dsi.CalInfo()\n for ds in [0,1,2,3,4,5,6]:\n # for ds in [0]:\n print(\"scanning ds\",ds)\n detTH[ds] = {}\n detHV[ds] = {}\n for key in cal.GetKeys(ds):\n mod = -1\n if \"m1\" in key: mod = 1\n if \"m2\" in key: mod = 2\n for cIdx in range(cal.GetIdxs(key)):\n\n # load the DB records\n dbKeyTH = \"trapThr_%s_c%d\" % (key, cIdx)\n dbValTH = dsi.getDBRecord(dbKeyTH,calDB=detDB,pars=detPars)\n\n dbKeyHV = \"hvBias_%s_c%d\" % (key, cIdx)\n dbValHV = dsi.getDBRecord(dbKeyHV,calDB=detDB,pars=detPars)\n\n # debug: print the record\n # for val in sorted(dbValTH):\n # if len(dbValTH[val])>0:\n # print(val, dbValTH[val])\n # return\n\n # fill the first value\n if len(detTH[ds])==0:\n detTH[ds] = dbValTH\n detHV[ds] = dbValHV\n continue\n\n # check for new threshold values.\n for cpd in detTH[ds]:\n nOld, nNew = len(detTH[ds][cpd]), len(dbValTH[cpd])\n\n # detector just came online\n if nOld==0 and nNew>0:\n detTH[ds][cpd] = dbValTH[cpd]\n continue\n # detector still offline\n if nOld==0 and nNew==0:\n continue\n # detector just went offline\n if nOld>0 and nNew==0:\n continue\n\n # check last run/trap pair against each new one\n prevRun, prevTH = detTH[ds][cpd][-1][0], detTH[ds][cpd][-1][1]\n for val in dbValTH[cpd]:\n thisRun, thisTH = val[0], val[1]\n if thisTH != prevTH:\n detTH[ds][cpd].append([thisRun,thisTH])\n prevTH = thisTH\n\n # check for new HV values.\n for cpd in detHV[ds]:\n\n nOld, nNew = len(detHV[ds][cpd]), len(dbValHV[cpd])\n\n # detector just came online\n if nOld==0 and nNew>0:\n detHV[ds][cpd] = dbValHV[cpd]\n continue\n # detector still offline\n if nOld==0 and nNew==0:\n continue\n # detector just went offline\n if nOld>0 and nNew==0:\n continue\n\n # check last run/trap pair against each new one\n prevRun, prevHV = detHV[ds][cpd][-1][0], detHV[ds][cpd][-1][1]\n for val in dbValHV[cpd]:\n thisRun, thisHV = val[0], val[1]\n if thisHV != prevHV:\n print(\"found HV diff. cpd %d prev %dV (run %d) new %dV (run %d)\" % (cpd, prevHV, prevRun, thisHV, thisRun))\n detHV[ds][cpd].append([thisRun,thisHV])\n prevHV = thisHV\n\n # return\n\n # # load the old file and compare\n # # GOAL: improve on this file.\n # # f = np.load(\"%s/data/runSettings.npz\" % dsi.latSWDir)\n # # detHVOld = f['arr_0'].item()\n # # detTHOld = f['arr_1'].item()\n # # detCHOld = f['arr_2'].item()\n # # pMonsOld = f['arr_3'].item()\n #\n # ds = 3\n # print(\"old results, ds\",ds)\n # for cpd in sorted(detTHOld[ds]):\n # if cpd!=\"122\":continue\n # if len(detTHOld[ds][cpd]) > 0:\n # print(cpd, detTHOld[ds][cpd])\n #\n # # for ds in [0,1,2,3,4,5,6]:\n # print(\"thresh results, ds:\",ds)\n # for cpd in sorted(detTH[ds]):\n # # if cpd!=122:continue\n # if len(detTH[ds][cpd]) > 0:\n # print(cpd, detTH[ds][cpd])\n\n\n np.savez(\"%s/data/runSettings-v2.npz\" % dsi.latSWDir,detHV,detTH,detCH,pMons)",
"def HD_input_snfit_data(self):\n\n dico = cPickle.load(open(SUGAR_parameter_pkl))\n self.read_snfit_results()\n self.read_meta()\n Filtre = np.array([True]*len(self.sn_name))\n self.zcmb = []\n self.z_err = []\n for j in range(len(self.sn_name)):\n if self.sn_name[j] in dico.keys() and self.sn_name[j] :\n\n for i in range (len(self.meta_sn_name_list)):\n if self.sn_name[j] == self.meta_sn_name_list[i]:\n \n self.z_err.append(self.meta_zhl_err[i])\n self.zcmb.append(self.meta_zcmb[i])\n if np.abs(self.x1[j] - self.meta_x1[i]) > 0.001:\n print 'problem with %s include in sample but difference between snfit and meta'%(self.sn_name[j])\n else:\n Filtre[j] = False\n\n for p in dico.keys():\n if p not in self.sn_name:\n print p\n \n self.x1 = self.x1[Filtre]\n self.x1_err = self.x1_err[Filtre] \n self.c = self.c[Filtre]\n self.c_err = self.c_err[Filtre]\n self.mb = self.mb[Filtre]\n self.mb_err = self.mb_err[Filtre]\n self.cov_x0_x1 = self.cov_x0_x1[Filtre]\n self.cov_x0_c = self.cov_x0_c[Filtre]\n self.cov_x1_c = self.cov_x1_c[Filtre]\n self.cov_mb_x1 = self.cov_mb_x1[Filtre]\n self.cov_mb_c = self.cov_mb_c[Filtre]\n self.z = self.z[Filtre]\n self.zcmb = np.array(self.zcmb)\n self.z_err = np.array(self.z_err)\n\n self.cov_y = np.zeros((len(self.mb)*3,len(self.mb)*3))\n\n for i in range (len(self.mb)):\n self.cov_y[i*3,i*3] = self.mb_err[i]**2\n self.cov_y[i*3+ 1,i*3+ 1] = self.x1_err[i]**2\n \n self.cov_y[i*3+ 2,i*3+ 2] = self.c_err[i]**2\n self.cov_y[i*3+ 0,i*3+ 1] = self.cov_mb_x1[i]\n self.cov_y[i*3+ 1,i*3+ 0] = self.cov_mb_x1[i]\n self.cov_y[i*3+ 0,i*3+ 2] = self.cov_mb_c[i]\n self.cov_y[i*3+ 2,i*3+ 0] = self.cov_mb_c[i]\n self.cov_y[i*3+ 1,i*3+ 2] = self.cov_x1_c[i] \n self.cov_y[i*3+ 2,i*3+ 1] = self.cov_x1_c[i] \n \n self.salt_parm = np.array([self.mb,self.x1,self.c]).T\n# print len(self.salt_parm), len(self.cov_y), len(self.z), len(self.zcmb)\n# return self.salt_parm, self.cov_y, self.z, self.meta_zcmb, self.meta_zhl_err, self.sn_name, self.meta_idr\n return self.salt_parm, self.cov_y, self.z, self.zcmb, self.z_err",
"def frequencyEstimator(ctd, ladcp, bathy, rho_neutral, strain,\\\n wl_min=100, wl_max=500, full_set=False):\n \n U, V, p_ladcp = oc.loadLADCP(ladcp)\n S, T, p_ctd, lat, lon = oc.loadCTD(ctd)\n \n \n Ek, Ep, Etotal, eta_power,\\\n Upow, Vpow, UVkx, eta_kx,\\\n N2mean, wl_min, wl_max,\\\n dist, depths, U, V, p_ladcp,\\\n Uspec, Vspec, etaSpec =\\\n internal_wave_energy(ctd, ladcp,\\\n rho_neutral,\\\n bathy, strain, wl_min=wl_min, wl_max=wl_max)\n \n eta_power_export = np.vstack(eta_power)\n eta_kx_export = np.vstack(eta_kx)\n Up_export = np.vstack(Upow)\n Vp_export = np.vstack(Vpow)\n UVkx_export = np.vstack(UVkx)\n \n\n np.savetxt('eta_power.csv',eta_power_export)\n np.savetxt('eta_kx.csv',eta_kx_export)\n np.savetxt('Upow.csv',Up_export)\n np.savetxt('Vpow.csv',Vp_export)\n np.savetxt('UVkx.csv',UVkx_export)\n\n\n \n \n # look for wavenumber maxes\n \n \n # Use ratios to solve for internal frequncys\n f = np.nanmean(gsw.f(lat))\n \n omega = f*np.sqrt(Etotal/(Ek-Ep))\n\n m = np.mean((wl_min, wl_max))\n m = (2*np.pi)/m\n kh = (m/np.sqrt(np.abs(N2mean)))*(np.sqrt(omega**2 - f**2))\n mask = kh == 0\n kh[mask]= np.nan\n lambdaH = 1e-3*(2*np.pi)/kh\n \n # get mean spectra\\\n \n eta_mean = []\n for station in eta_power:\n eta_mean.append(np.nanmean(station, axis=0))\n \n eta_mean = np.vstack(eta_mean).T\n \n \n aspect = kh/m \n \n file2save = pd.DataFrame(lambdaH)\n file2save.index = np.squeeze(depths)\n file2save.to_excel('lambdaH.xlsx')\n file2save = pd.DataFrame(kh)\n file2save.index = np.squeeze(depths)\n file2save.to_excel('Kh.xlsx')\n file2save = pd.DataFrame(omega)\n file2save.index = np.squeeze(depths)\n file2save.to_excel('omega.xlsx')\n file2save = pd.DataFrame(aspect)\n file2save.index = np.squeeze(depths)\n file2save.to_excel('aspect.xlsx')\n \n np.savetxt('eta_mean.csv', eta_mean)\n \n \n np.savetxt('kh.csv', kh)\n np.savetxt('lamdah.csv', lambdaH)\n np.savetxt('omega.csv', omega)\n \n if full_set:\n return lambdaH, kh, omega, N2mean,\\\n dist, depths, U, V, p_ladcp,\\\n Uspec, Vspec, etaSpec, aspect\n \n else:\n return lambdaH, kh, omega, N2mean",
"def specpolfinalstokes(infile_list,polcal='polcal.txt',logfile='salt.log',debug=False):\n\n patternlist = open(datadir+'wppaterns.txt','r').readlines()\n patternpairs = dict(); patternstokes = dict()\n for p in patternlist:\n if p.split()[0] == '#': continue\n patternpairs[p.split()[0]]=(len(p.split())-3)/2\n patternstokes[p.split()[0]]=int(p.split()[1])\n wav_l,heff_l,hpa_l,qeff_l = np.loadtxt(datadir+polcal,dtype=float,unpack=True)\n calversion = open(datadir+polcal, 'r').readlines()[1][2:].rstrip()\n\n with logging(logfile, debug) as log:\n \n # organize data using names\n files = len(infile_list)\n allrawlist = []\n for i in range(files):\n object,config,wvplt,count = os.path.basename(infile_list[i]).split('.')[0].rsplit('_',4)\n if (config[0]!='c')|(wvplt[0]!='h')|(not count.isdigit()):\n log.message('File '+infile_list[i]+' is not a raw stokes file.' , with_header=False) \n continue\n allrawlist.append([i,object,config,wvplt,count])\n configlist = sorted(list(set(ele[2] for ele in allrawlist))) # unique configs\n\n # correct raw stokes for track (TBS)\n\n # do one config at a time, since different configs may have different number of wavelengths\n for conf in configlist:\n log.message(\"\\nConfiguration: %s\" % conf, with_header=False) \n rawlist = [entry for entry in allrawlist if entry[2]==conf]\n for col in (4,3,1,2): rawlist = sorted(rawlist,key=operator.itemgetter(col)) \n rawstokes = len(rawlist)\n cols = pyfits.open(infile_list[rawlist[0][0]])['SCI'].data.shape[-1]\n stokes_jsw = np.zeros((rawstokes,2,cols)); \n var_jsw = np.zeros_like(stokes_jsw); bpm_jsw = np.zeros_like(stokes_jsw).astype(int)\n wav_jw = np.zeros((rawstokes,cols))\n comblist = []\n # get data\n for j in range(rawstokes):\n i,object,config,wvplt,count = rawlist[j]\n if j==0:\n lampid = pyfits.getheader(infile_list[i],0)['LAMPID'].strip().upper()\n telpa = float(pyfits.getheader(infile_list[i],0)['TELPA'])\n if lampid==\"NONE\":\n pacaltype = \"Equatorial\"\n hpa_l -= (telpa % 180)\n else:\n pacaltype =\"Instrumental\"\n calinfo = (pacaltype+' '+calversion)\n log.message(' Calibration: '+calinfo, with_header=False) \n \n wppat = pyfits.getheader(infile_list[i],0)['WPPATERN']\n wav0 = pyfits.getheader(infile_list[i],'SCI')['CRVAL1']\n dwav = pyfits.getheader(infile_list[i],'SCI')['CDELT1']\n stokes_jsw[j] = pyfits.open(infile_list[i])['SCI'].data.reshape((2,-1))\n var_jsw[j] = pyfits.open(infile_list[i])['VAR'].data.reshape((2,-1))\n bpm_jsw[j] = pyfits.open(infile_list[i])['BPM'].data.reshape((2,-1))\n wav_jw[j] = np.mgrid[wav0:(wav0+cols*dwav):dwav]\n if int(count)==1:\n comblist.append((j,object,config,wvplt,count,wppat))\n else:\n comblist[-1] = (j,object,config,wvplt,count,wppat)\n\n # combine multiple instances (count > 1)\n combstokes = len(comblist)\n stokes_ksw = np.zeros((combstokes,2,cols)); \n var_ksw = np.zeros_like(stokes_ksw)\n bpm_ksw = np.zeros_like(stokes_ksw).astype(int)\n wav_kw = np.zeros((combstokes,cols))\n chisqstokes_kw = np.zeros_like(wav_kw)\n obslist = []\n obsobject = ''\n obsconfig = ''\n chisqlist = [[]]\n for k in range(combstokes):\n j,object,config,wvplt,count,wppat = comblist[k]\n stokes_ksw[k] = stokes_jsw[j-int(count)+1:j+1].sum(axis=0)\n var_ksw[k] = var_jsw[j-int(count)+1:j+1].sum(axis=0) \n bpm_ksw[k] = (bpm_jsw[j-int(count)+1:j+1].sum(axis=0) > 0).astype(int)\n wav_kw[k] = wav_jw[j]\n\n # compute chisq/dof for multiple instances\n if int(count) > 1:\n combstokes_w = np.zeros(cols)\n bok = (bpm_ksw[k,1] == 0) \n combstokes_w[bok] = stokes_ksw[k,1,bok]/stokes_ksw[k,0,bok]\n for jj in range(j-int(count)+1,j+1):\n stokes_w = np.zeros(cols); errstokes_w = np.zeros_like(stokes_w)\n stokes_w[bok] = stokes_jsw[jj,1,bok]/stokes_jsw[jj,0,bok]\n errstokes_w[bok] = np.sqrt(var_jsw[jj,1,bok]/(stokes_jsw[jj,0,bok])**2)\n chisqstokes_kw[k,bok] += ((stokes_w[bok]-combstokes_w[bok])/errstokes_w[bok])**2\n chisqstokes_kw[k] /= int(count)-1\n chisqstokes = chisqstokes_kw[k].sum()/bok.sum()\n chisqlist[-1].append(chisqstokes)\n log.message(\" Chisq/dof Filter Pair %s: %7.2f\" % (wvplt,chisqstokes), with_header=False)\n if ((object != obsobject) | (config != obsconfig)):\n obslist.append([k,object,config,wppat,1])\n chisqlist.append([])\n obsobject = object; obsconfig = config\n else:\n obslist[-1][4] +=1\n \n # for each obs combine stokes, apply efficiency and PA calibration as appropriate for pattern, and save\n obss = len(obslist)\n for obs in range(obss):\n k,object,config,wppat,pairs = obslist[obs]\n obsname = object+\"_\"+config\n log.message(\"\\n Observation: %s\" % obsname, with_header=False)\n# print k,object,config,wppat,pairs\n finstokes = patternstokes[wppat]\n if pairs != patternpairs[wppat]:\n log.message(' Not a complete pattern, skipping observation', with_header=False) \n continue\n stokes_fw = np.zeros((finstokes,cols))\n var_fw = np.zeros_like(stokes_fw)\n ok_fw = bpm_ksw[k:k+pairs,:].sum(axis=0) == 0\n ok_w = ok_fw.all(axis=0)\n bpm_fw = np.repeat((np.logical_not(ok_w))[None,:],finstokes,axis=0)\n stokes_fw[0] = stokes_ksw[k:k+pairs,0].sum(axis=0)/pairs\n var_fw[0] = var_ksw[k:k+pairs,0].sum(axis=0)/pairs**2 \n\n if wppat.count('Linear'):\n var_fw = np.vstack((var_fw,np.zeros(cols))) # add QU covariance\n if wppat=='Linear':\n stokes_fw[1:,ok_w] = stokes_ksw[k:k+2,1,ok_w]*(stokes_fw[0,ok_w]/stokes_ksw[k:k+2,0,ok_w])\n var_fw[1:3,ok_w] = var_ksw[k:k+2,1,ok_w]*(stokes_fw[0,ok_w]/stokes_ksw[k:k+2,0,ok_w])**2\n elif wppat=='Linear-Hi':\n # for Linear-Hi, must go to normalized stokes in order for the pair combination to cancel systematic errors\n nstokes_pw = np.zeros((pairs,cols)); nvar_pw = np.zeros((pairs,cols))\n nstokes_fw = np.zeros((finstokes,cols)); nvar_fw = np.zeros((finstokes+1,cols))\n nstokes_pw[:,ok_w] = stokes_ksw[k:k+pairs,1,ok_w]/stokes_ksw[k:k+pairs,0,ok_w]\n nvar_pw[:,ok_w] = var_ksw[k:k+pairs,1,ok_w]/(stokes_ksw[k:k+pairs,0,ok_w])**2\n if debug: \n np.savetxt(obsname+\"_nstokes.txt\",np.vstack((ok_w.astype(int),nstokes_pw)).T,fmt=\"%3i \"+4*\"%10.6f \")\n np.savetxt(obsname+\"_nvar.txt\",np.vstack((ok_w.astype(int),nvar_pw)).T,fmt=\"%3i \"+4*\"%14.9f \")\n nstokes_fw[1] = 0.5*(nstokes_pw[0] + (nstokes_pw[1]-nstokes_pw[3])/np.sqrt(2.))\n nstokes_fw[2] = 0.5*(nstokes_pw[2] + (nstokes_pw[1]+nstokes_pw[3])/np.sqrt(2.))\n nvar_fw[1] = 0.25*(nvar_pw[0] + (nvar_pw[1]+nvar_pw[3])/2.)\n nvar_fw[2] = 0.25*(nvar_pw[2] + (nvar_pw[1]+nvar_pw[3])/2.)\n nvar_fw[3] = 0.25*((nvar_pw[1] - nvar_pw[3])/2.)\n stokes_fw[1:] = nstokes_fw[1:]*stokes_fw[0]\n var_fw[1:] = nvar_fw[1:]*stokes_fw[0]**2\n chisqq = ((nstokes_pw[0,ok_w] - nstokes_fw[1,ok_w])**2/nvar_fw[1,ok_w]).sum()/ok_w.sum() \n chisqu = ((nstokes_pw[2,ok_w] - nstokes_fw[2,ok_w])**2/nvar_fw[2,ok_w]).sum()/ok_w.sum()\n chisqlist[obs].append(chisqq)\n chisqlist[obs].append(chisqu)\n log.message(\" Chisq/dof Linear-Hi Q,U: %7.2f %7.2f\" % (chisqq,chisqu), with_header=False) \n\n # calculate, print estimated systematic error from chisq mean\n if len(chisqlist[obs]):\n chisqdof = np.array(chisqlist[obs]).mean()\n dofs = float(ok_fw[0].sum())\n chisqdoferr = np.sqrt(2./dofs)\n syserr = 0. # estimate systematic error using noncentral chisq distribution\n if (chisqdof - 1.) > 3.*chisqdoferr:\n nvar_fw = np.zeros_like(var_fw)\n nvar_fw[:,ok_fw[0]] = var_fw[:,ok_fw[0]]/stokes_fw[0,ok_fw[0]]**2\n syserr = np.sqrt(dofs*(chisqdof - 1.)/(1./nvar_fw[1,ok_fw[1]]).sum())\n print syserr \n \n log.message((\" Mean chisq/dof: %5.2f Estimated sys %%error: %5.2f\") % \\\n (chisqdof,100.*syserr), with_header=False)\n\n heff_w = interp1d(wav_l,heff_l,kind='cubic')(wav_kw[k])\n par_w = -interp1d(wav_l,hpa_l,kind='cubic')(wav_kw[k])\n c_w = np.cos(2.*np.radians(par_w)); s_w = np.sin(2.*np.radians(par_w))\n stokes_fw[1:] /= heff_w\n var_fw[1:] /= heff_w**2\n stokes_fw[1:] = stokes_fw[1]*c_w - stokes_fw[2]*s_w , \\\n stokes_fw[1]*s_w + stokes_fw[2]*c_w\n var_fw[1:3] = var_fw[1]*c_w**2 + var_fw[2]*s_w**2 , \\\n var_fw[1]*s_w**2 + var_fw[2]*c_w**2\n var_fw[3] = c_w*s_w*(var_fw[1] - var_fw[2]) + (c_w**2-s_w**2)*var_fw[3]\n\n # save final stokes fits file\n infile = infile_list[rawlist[comblist[k][0]][0]]\n hduout = pyfits.open(infile)\n hduout['SCI'].data = stokes_fw.astype('float32').reshape((3,1,-1))\n hduout['SCI'].header.update('CTYPE3','I,Q,U')\n hduout['VAR'].data = var_fw.astype('float32').reshape((4,1,-1))\n hduout['VAR'].header.update('CTYPE3','I,Q,U,QU')\n\n hduout['BPM'].data = bpm_fw.astype('uint8').reshape((3,1,-1))\n hduout['BPM'].header.update('CTYPE3','I,Q,U')\n hduout[0].header.update('POLCAL',calinfo)\n if len(chisqlist[obs]): \n hduout[0].header.update('SYSERR',100.*syserr, \\\n 'estimated % systematic error')\n outfile = object+'_'+config+'_stokes.fits'\n hduout.writeto(outfile,clobber=True,output_verify='warn')\n log.message('\\n '+outfile+' Stokes I,Q,U', with_header=False)\n \n# elif wppat.count('Circular'): TBS \n\n# elif wppat=='All-Stokes': TBS\n\n return",
"def read_data(self, uv, tave=False):\n self.freqs = uv.freq_array[0]\n a1 = uv.ant_1_array[:uv.Nbls]\n a2 = uv.ant_2_array[:uv.Nbls]\n for a in uv.antenna_numbers:\n if not a in a1 and not a in a2:\n if not a in self.dead: self.dead.append(a)\n pid = np.where(uv.polarization_array == pol_lookup[self.pol])[0][0]\n data = uv.data_array[:,0,:,pid].reshape(uv.Ntimes,uv.Nbls,uv.Nfreqs)\n flag = uv.flag_array[:,0,:,pid].reshape(uv.Ntimes,uv.Nbls,uv.Nfreqs)\n ind = np.where(a1!=a2)[0]\n self.mask = output_mask_array(flag[:,ind])\n self.shape_waterfall = (uv.Ntimes, uv.Nfreqs)\n def creat_dict(ii):\n if a1[ii] < 57 or a2[ii] < 57 or a1[ii] == a2[ii]: return # hard coded for MWA Phase II\n if a1[ii] in self.dead or a2[ii] in self.dead: return\n bl = (a1[ii],a2[ii])\n md = np.ma.masked_array(data[:,ii],flag[:,ii])\n diff = md[1:] - md[:-1]\n self.noise[bl] = np.var(diff,axis=0).data/2\n zerofq = np.where(np.sum(np.logical_not(diff.mask),axis=0) < 3)[0]\n md.mask[:,zerofq] = True\n self.data_backup[bl] = {self.pol: np.complex64(md.data)}\n self.flag_backup[bl] = {self.pol: np.copy(md.mask)}\n if tave:\n md = np.mean(md,axis=0,keepdims=True)\n self.data[bl] = {self.pol: np.complex64(md.data)}\n self.flag[bl] = {self.pol: md.mask}\n map(creat_dict, np.arange(uv.Nbls))\n if tave: self.mask= np.product(self.mask, axis=0, keepdims=True).astype(bool)\n mask = np.copy(self.mask)\n if mask.ndim == 2: mask = np.product(self.mask, axis=0).astype(bool)\n self.gains = RedGain(freqs=self.freqs, mask=mask)\n self.gains.get_auto(uv)",
"def main():\n\n\n ## Groups showing similar noise profile\n #grp1 = [ 1, 4, 5, 8, 9 ]\n #grp2 = [ 18, 19, 22, 23, 30, 31 ]\n grp1 = [ 0, 1, 6, 7, 4, 5 ]\n grp2 = [ 12, 13, 16, 17, 18, 19 ]\n #grp3 = [ 18, 19, 22, 23, 26, 27 ]\n with tb.open_file(sys.argv[1], 'r') as dataF:\n\n npm = len(dataF.root.Sensors.DataPMT)#len(dataF.root.RD.pmtrwf[0])\n nevt = len(dataF.root.RD.pmtrwf)\n\n ## Filter definition\n fSample = 40E6\n freqLPF = 100E3\n freqLPFd = 2*freqLPF / fSample\n b, a = signal.butter(1, freqLPFd, 'low', analog=False)\n ##\n fig, axes = plt.subplots(nrows=3, ncols=2, figsize=(20,6))\n #fig.tight_layout()\n fig.show()\n wf_len = len(dataF.root.RD.pmtrwf[0][0])\n if len(sys.argv) > 3:\n wf_len = wf_len/2+1 \n elif len(sys.argv) == 3:\n g1_first = np.zeros(wf_len, np.float64)\n g2_first = np.zeros(wf_len, np.float64)\n g3_first = np.zeros(wf_len, np.float64)\n mean_first = np.zeros(wf_len, np.float64)\n ##\n for ievt in range(nevt):\n ## clear the axies\n for ax in axes.flatten():\n ax.cla()\n plt_frq = np.zeros(wf_len, np.float64)\n fwf_mean = np.zeros(wf_len, np.float64)\n wf_mean = np.zeros(wf_len, np.float64) # No filter\n g1_mean = np.zeros(wf_len, np.float64)\n g2_mean = np.zeros(wf_len, np.float64)\n g3_mean = np.zeros(wf_len, np.float64)\n for ipm in range(npm):\n\n sg = getWF(dataF, ipm, ievt)\n sg = sg - np.mean(sg)\n\n sgf = signal.lfilter(b, a, sg)\n ## remove mean again just in case\n sgf = sgf - np.mean(sgf)\n #sgf = sg\n\n pmID = getPMid(dataF, ipm)\n\n if len(sys.argv) == 3:\n axes[0][0].plot(sgf, label='pmt '+str(pmID))\n fwf_mean += sgf/npm\n wf_mean += sg/npm\n if pmID in grp1:\n g1_mean += sgf/len(grp1)\n elif pmID in grp2:\n g2_mean += sgf/len(grp2)\n elif pmID in grp3:\n g3_mean += sgf/len(grp3)\n else:\n ft = np.fft.rfft(sgf)\n freq = np.fft.rfftfreq(len(sgf), d=25E-9)\n if ipm == 0:\n plt_frq = freq\n if sys.argv[2] == 'mag':\n ft_mag = np.absolute(ft)\n axes[0][0].plot(freq, ft_mag, label='pmt '+str(pmID))\n fwf_mean += ft_mag/npm\n if pmID in grp1:\n g1_mean += ft_mag/len(grp1)\n elif pmID in grp2:\n g2_mean += ft_mag/len(grp2)\n elif pmID in grp3:\n g3_mean += ft_mag/len(grp3)\n elif sys.argv[2] == 'phase':\n ft_pha = np.angle(ft)\n axes[0][0].plot(freq, ft_pha, label='pmt '+str(pmID))\n fwf_mean += ft_pha/npm\n if pmID in grp1:\n g1_mean += ft_pha/len(grp1)\n elif pmID in grp2:\n g2_mean += ft_pha/len(grp2)\n elif pmID in grp3:\n g3_mean += ft_pha/len(grp3)\n \n \n ## The axes not set\n if len(sys.argv) == 3:\n axes[0][1].plot(g1_mean)\n axes[0][1].set_title('Group 1 mean waveform')\n axes[1][0].plot(g2_mean)\n axes[1][0].set_title('Group 2 mean waveform')\n axes[1][1].plot(g3_mean)\n axes[1][1].set_title('Group 3 mean waveform')\n axes[2][0].plot(fwf_mean)\n axes[2][0].set_title('Mean waveform')\n if ievt == 0:\n g1_first = g1_mean\n g2_first = g2_mean\n g3_first = g3_mean\n mean_first = fwf_mean\n else:\n axes[0][1].plot(g1_first)\n axes[1][0].plot(g2_first)\n axes[1][1].plot(g3_first)\n axes[2][0].plot(mean_first)\n axes[2][1].plot(wf_mean)\n axes[2][1].set_title('Mean waveform and corrected')\n axes[2][1].plot(wf_mean-fwf_mean)\n axes[2][1].set_xlim(0, 1000)\n else:\n axes[0][0].set_xlim(0,50000)\n axes[0][1].plot(plt_frq, g1_mean)\n axes[0][1].set_title('Group 1 mean '+sys.argv[2])\n axes[0][1].set_xlim(0,50000)\n axes[1][0].plot(plt_frq, g2_mean)\n axes[1][0].set_title('Group 2 mean '+sys.argv[2])\n axes[1][0].set_xlim(0,50000)\n axes[1][1].plot(plt_frq, g3_mean)\n axes[1][1].set_title('Group 3 mean '+sys.argv[2])\n axes[1][1].set_xlim(0,50000)\n axes[2][0].plot(plt_frq, fwf_mean)\n axes[2][0].set_title('Mean '+sys.argv[2])\n axes[2][0].set_xlim(0,50000)\n plt.draw()\n #fig.legend(loc=0)\n catcher = input(\"next plot?\")\n if catcher == 'q':\n exit()\n plt.cla()",
"def dfluxes(wavelength, s, line1, line2, lowlow= 25, lowhigh=15, highlow=15, highhigh = 25, \n lmin=0, lmax=0, fmin=0, fmax=0,\n broad1=2.355, broad2=2.355, sus_line1=True, sus_line2=True,\n plot=True, verbose=True, plot_sus = False, fcal = True, \n fit_continuum = True, median_kernel=35, warnings = True ): # Broad is FWHM for Gaussian sigma= 1, \n # Setup wavelength limits\n if lmin == 0 :\n lmin = line1-65. # By default, +-65 A with respect to line\n if lmax == 0 :\n lmax = line2+65.\n \n # Extract subrange to fit\n w_spec = []\n f_spec = []\n w_spec.extend((wavelength[i]) for i in range(len(wavelength)) if (wavelength[i] > lmin and wavelength[i] < lmax) ) \n f_spec.extend((s[i]) for i in range(len(wavelength)) if (wavelength[i] > lmin and wavelength[i] < lmax) ) \n \n \n if np.nanmedian(f_spec) == np.nan: print(\" NO HAY DATOS.... todo son NANs!\")\n\n \n # Setup min and max flux values in subrange to fit\n if fmin == 0 :\n fmin = np.nanmin(f_spec) \n if fmax == 0 :\n fmax = np.nanmax(f_spec) \n \n\n # We have to find some \"guess numbers\" for the Gaussian\n # Now guess_centre is line\n guess_centre1 = line1\n guess_centre2 = line2 \n guess_centre = (guess_centre1+guess_centre2)/2. \n # Define continuum regions: [-lowlow, -lowhigh] and [highlow,highhigh] in Angstroms with respect to guess_centre\n \n\n w_cont=[]\n f_cont=[]\n w_cont.extend((w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre-lowlow and w_spec[i] < guess_centre-lowhigh) or (w_spec[i] > guess_centre+highlow and w_spec[i] < guess_centre+highhigh) ) \n f_cont.extend((f_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre-lowlow and w_spec[i] < guess_centre-lowhigh) or (w_spec[i] > guess_centre+highlow and w_spec[i] < guess_centre+highhigh) ) \n\n if fit_continuum:\n # Linear Fit to continuum \n f_cont_filtered=sig.medfilt(f_cont,np.int(median_kernel))\n try: \n mm,bb = np.polyfit(w_cont, f_cont_filtered, 1)\n except Exception:\n bb = np.nanmedian(f_cont_filtered)\n mm = 0.\n if warnings: \n print(\" WARNING: Impossible to get the continuum!\")\n print(\" Scaling the continuum to the median value\") \n continuum = mm*np.array(w_spec)+bb \n c_cont = mm*np.array(w_cont)+bb \n\n else: \n # Median value in each continuum range # NEW 15 Sep 2019\n w_cont_low = []\n f_cont_low = []\n w_cont_low.extend((w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre-lowlow and w_spec[i] < guess_centre-lowhigh) ) \n f_cont_low.extend((f_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre-lowlow and w_spec[i] < guess_centre-lowhigh) ) \n median_w_cont_low = np.nanmedian(w_cont_low)\n median_f_cont_low = np.nanmedian(f_cont_low)\n w_cont_high = []\n f_cont_high = []\n w_cont_high.extend((w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre+highlow and w_spec[i] < guess_centre+highhigh) ) \n f_cont_high.extend((f_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre+highlow and w_spec[i] < guess_centre+highhigh) ) \n median_w_cont_high = np.nanmedian(w_cont_high)\n median_f_cont_high = np.nanmedian(f_cont_high) \n \n b = (median_f_cont_low-median_f_cont_high)/(median_w_cont_low-median_w_cont_high)\n a = median_f_cont_low- b * median_w_cont_low\n \n continuum = a + b*np.array(w_spec)\n c_cont = b*np.array(w_cont)+ a \n \n # rms continuum\n rms_cont = np.nansum([ np.abs(f_cont[i] - c_cont[i]) for i in range(len(w_cont)) ]) / len(c_cont)\n\n # Search for index here w_spec(index) closest to line\n min_w = np.abs(np.array(w_spec)-line1)\n mini = np.nanmin(min_w)\n guess_peak1 = f_spec[min_w.tolist().index(mini)] - continuum[min_w.tolist().index(mini)]\n min_w = np.abs(np.array(w_spec)-line2)\n mini = np.nanmin(min_w)\n guess_peak2 = f_spec[min_w.tolist().index(mini)] - continuum[min_w.tolist().index(mini)]\n\n # Search for beginning/end of emission line, choosing line +-10 \n # 28th Feb 2019: Check central value between low_limit and high_limit\n\n # LOW limit\n low_limit=0\n w_fit = []\n f_fit = []\n w_fit.extend((w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre1-15 and w_spec[i] < guess_centre1)) \n f_fit.extend((f_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre1-15 and w_spec[i] < guess_centre1)) \n if fit_continuum: \n c_fit=mm*np.array(w_fit)+bb \n else: \n c_fit=b*np.array(w_fit)+a\n \n\n fs=[]\n ws=[]\n for ii in range(len(w_fit)-1,1,-1):\n if f_fit[ii]/c_fit[ii] < 1.05 and f_fit[ii-1]/c_fit[ii-1] < 1.05 and low_limit == 0: low_limit = w_fit[ii]\n# if f_fit[ii]/c_fit[ii] < 1.05 and low_limit == 0: low_limit = w_fit[ii]\n fs.append(f_fit[ii]/c_fit[ii])\n ws.append(w_fit[ii])\n if low_limit == 0: \n sorted_by_flux=np.argsort(fs)\n low_limit = ws[sorted_by_flux[0]]\n \n # HIGH LIMIT \n high_limit=0\n w_fit = []\n f_fit = []\n w_fit.extend((w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre2 and w_spec[i] < guess_centre2+15)) \n f_fit.extend((f_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre2 and w_spec[i] < guess_centre2+15)) \n if fit_continuum: \n c_fit=mm*np.array(w_fit)+bb \n else: \n c_fit=b*np.array(w_fit)+a\n \n fs=[]\n ws=[]\n for ii in range(len(w_fit)-1):\n if f_fit[ii]/c_fit[ii] < 1.05 and f_fit[ii+1]/c_fit[ii+1] < 1.05 and high_limit == 0: high_limit = w_fit[ii]\n# if f_fit[ii]/c_fit[ii] < 1.05 and high_limit == 0: high_limit = w_fit[ii]\n fs.append(f_fit[ii]/c_fit[ii])\n ws.append(w_fit[ii])\n if high_limit == 0: \n sorted_by_flux=np.argsort(fs)\n high_limit = ws[sorted_by_flux[0]] \n \n # Fit a Gaussian to data - continuum \n p0 = [guess_centre1, guess_peak1, broad1/2.355, guess_centre2, guess_peak2, broad2/2.355] # broad is the Gaussian sigma, 1.0 for emission lines\n try:\n fit, pcov = curve_fit(dgauss, w_spec, f_spec-continuum, p0=p0, maxfev=10000) # If this fails, increase maxfev...\n fit_error = np.sqrt(np.diag(pcov))\n\n\n # New 28th Feb 2019: Check central value between low_limit and high_limit\n # Better: between guess_centre - broad, guess_centre + broad\n # If not, redo fit fixing central value to the peak (it does not work... just fix FWHM= (high_limit-low_limit)/2.5 )\n\n if verbose != False: print(\" ----------------------------------------------------------------------------------------\")\n if fit[0] < guess_centre1 - broad1 or fit[0] > guess_centre1 + broad1 or fit[3] < guess_centre2 - broad2 or fit[3] > guess_centre2 + broad2:\n if warnings: \n if fit[0] < guess_centre1 - broad1 or fit[0] > guess_centre1 + broad1: \n print(\" Fitted center wavelength\", fit[0],\"is NOT in the expected range [\",guess_centre1 - broad1,\",\",guess_centre1 + broad1,\"]\")\n else:\n print(\" Fitted center wavelength\", fit[0],\"is in the expected range [\",guess_centre1 - broad1,\",\",guess_centre1 + broad1,\"]\")\n if fit[3] < guess_centre2 - broad2 or fit[3] > guess_centre2 + broad2: \n print(\" Fitted center wavelength\", fit[3],\"is NOT in the expected range [\",guess_centre2 - broad2,\",\",guess_centre2 + broad2,\"]\")\n else:\n print(\" Fitted center wavelength\", fit[3],\"is in the expected range [\",guess_centre2 - broad2,\",\",guess_centre2 + broad2,\"]\")\n print(\" Fit failed!\")\n \n fit[0]=guess_centre1\n fit_error[0] = 0.000001\n fit[1]=guess_peak1\n fit_error[1] = 0.000001\n fit[2] = broad1/2.355\n fit_error[2] = 0.000001 \n fit[3]=guess_centre2\n fit_error[3] = 0.000001\n fit[4]=guess_peak2\n fit_error[4] = 0.000001\n fit[5] = broad2/2.355\n fit_error[5] = 0.000001\n else:\n if warnings: print(\" Fitted center wavelength\", fit[0],\"is in the expected range [\",guess_centre1 - broad1,\",\",guess_centre1 + broad1,\"]\")\n if warnings: print(\" Fitted center wavelength\", fit[3],\"is in the expected range [\",guess_centre2 - broad2,\",\",guess_centre2 + broad2,\"]\")\n \n\n if warnings: \n print(\" Fit parameters = \", fit[0], fit[1], fit[2]) \n print(\" \", fit[3], fit[4], fit[5])\n if fit[2] == broad1/2.355 and warnings == True : \n print(\" WARNING: Fit in\",fit[0],\"failed! Using given centre wavelengths (cw), peaks at (cv) & sigmas=broad/2.355 given.\") # CHECK THIS \n\n gaussian_fit = dgauss(w_spec, fit[0], fit[1], fit[2],fit[3], fit[4], fit[5])\n \n gaussian_1 = gauss(w_spec, fit[0], fit[1], fit[2])\n gaussian_2 = gauss(w_spec, fit[3], fit[4], fit[5])\n \n\n # Estimate rms of the Gaussian fit in range [low_limit, high_limit]\n residuals = f_spec-gaussian_fit-continuum\n rms_fit = np.nansum([ ((residuals[i]**2)/(len(residuals)-2))**0.5 for i in range(len(w_spec)) if (w_spec[i] >= low_limit and w_spec[i] <= high_limit) ]) \n \n # Fluxes, FWHM and Eq. Width calculations # CHECK THIS , not well done for dfluxes !!!\n \n gaussian_flux_1 = gauss_flux(fit[1],fit[2])\n gaussian_flux_2 = gauss_flux(fit[4],fit[5]) \n gaussian_flux = gaussian_flux_1+ gaussian_flux_2 \n if warnings: \n print(\" Gaussian flux = \", gaussian_flux_1, \" + \",gaussian_flux_2,\" = \",gaussian_flux)\n print(\" Gaussian ratio = \", gaussian_flux_1/gaussian_flux_2)\n \n error1 = np.abs(gauss_flux(fit[1]+fit_error[1],fit[2]) - gaussian_flux)\n error2 = np.abs(gauss_flux(fit[1],fit[2]+fit_error[2]) - gaussian_flux)\n gaussian_flux_error = 1 / ( 1/error1**2 + 1/error2**2 )**0.5\n \n fwhm=fit[2]*2.355\n fwhm_error = fit_error[2] *2.355\n fwhm_vel = fwhm / fit[0] * C \n fwhm_vel_error = fwhm_error / fit[0] * C \n \n gaussian_ew = gaussian_flux/np.nanmedian(f_cont)\n gaussian_ew_error = gaussian_ew * gaussian_flux_error/gaussian_flux \n \n # Integrated flux\n # IRAF: flux = sum ((I(i)-C(i)) * (w(i2) - w(i1)) / (i2 - i2) \n flux = np.nansum([ (f_spec[i]-continuum[i])*(w_spec[i+1]-w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] >= low_limit and w_spec[i] <= high_limit) ]) \n flux_error = rms_cont * (high_limit - low_limit)\n wave_resolution = (wavelength[-1]-wavelength[0])/len(wavelength)\n ew = wave_resolution * np.nansum ([ (1 - f_spec[i]/continuum[i]) for i in range(len(w_spec)) if (w_spec[i] >= low_limit and w_spec[i] <= high_limit) ]) \n ew_error = np.abs(ew*flux_error/flux) \n gauss_to_integrated = gaussian_flux/flux * 100.\n \n # Plotting \n if plot :\n plt.figure(figsize=(10, 4))\n #Plot input spectrum\n plt.plot(np.array(w_spec),np.array(f_spec), \"blue\", lw=2, alpha = 0.7)\n plt.minorticks_on() \n plt.xlabel(\"Wavelength [$\\mathrm{\\AA}$]\")\n if fcal:\n plt.ylabel(\"Flux [ erg cm$^{-2}$ s$^{-1}$ $\\mathrm{\\AA}^{-1}$ ]\")\n else:\n plt.ylabel(\"Flux [ counts ]\")\n plt.xlim((line1+line2)/2-40,(line1+line2)/2+40)\n plt.ylim(fmin,fmax)\n \n # Vertical line at guess_centre\n plt.axvline(x=guess_centre1, color='r', linestyle='-', alpha=0.5)\n plt.axvline(x=guess_centre2, color='r', linestyle='-', alpha=0.5)\n\n # Horizontal line at y = 0\n plt.axhline(y=0, color='k', linestyle=':', alpha=0.5) \n # Dashed green regions for continuum, defined by [lowlow, lowhigh] and [highlow,highhigh]\n plt.axvspan(guess_centre+highlow, guess_centre+highhigh, facecolor='g', alpha=0.15,zorder=3)\n plt.axvspan(guess_centre-lowlow, guess_centre-lowhigh, facecolor='g', alpha=0.15,zorder=3)\n # Plot linear fit for continuum\n plt.plot(w_spec, continuum,\"g--\")\n # Plot Gaussian fit \n plt.plot(w_spec, gaussian_fit+continuum, 'r-', alpha=0.8) \n # Vertical line at Gaussian center\n plt.axvline(x=fit[0], color='k', linestyle='-', alpha=0.5)\n plt.axvline(x=fit[3], color='k', linestyle='-', alpha=0.5)\n # Plot Gaussians + cont\n plt.plot(w_spec, gaussian_fit+continuum, 'r-', alpha=0.5, lw=3) \n plt.plot(w_spec, gaussian_1+continuum, color=\"navy\",linestyle='--', alpha=0.8)\n plt.plot(w_spec, gaussian_2+continuum, color=\"#1f77b4\",linestyle='--', alpha=0.8)\n plt.plot(w_spec, np.array(f_spec)-(gaussian_fit), 'orange', alpha=0.4, linewidth=5) \n\n # Vertical lines to emission line\n plt.axvline(x= low_limit, color='k', linestyle=':', alpha=0.5)\n plt.axvline(x= high_limit, color='k', linestyle=':', alpha=0.5) \n plt.title('Double Gaussian Fit') # Fit: x0=%.2f y0=%.2e sigma=%.2f flux=%.2e rms=%.3e' % (fit[0], fit[1], fit[2], gaussian_flux, rms_fit))\n plt.show()\n plt.close()\n \n # Plot residuals\n# plt.figure(figsize=(10, 1))\n# plt.xlabel(\"Wavelength [$\\mathrm{\\AA}$]\")\n# plt.ylabel(\"RMS\")\n# plt.xlim((line1+line2)/2-40,(line1+line2)/2+40)\n# plt.axvline(x=fit[0], color='k', linestyle='-', alpha=0.5)\n# plt.axvline(x=fit[3], color='k', linestyle='-', alpha=0.5)\n# plt.plot(w_spec, residuals, 'k')\n# plt.minorticks_on()\n# plt.show()\n# plt.close()\n\n \n # Printing results\n if verbose :\n #print \"\\n> WARNING !!! CAREFUL WITH THE VALUES PROVIDED BELOW, THIS TASK NEEDS TO BE UPDATED!\\n\"\n print(\"\\n> Gauss and continuum fitting + integrated flux calculations:\\n\")\n print(\" rms continuum = %.3e erg/cm/s/A \" % (rms_cont)) \n print(\" Gaussian Fit parameters: x0 = ( %.2f +- %.2f ) A \" % (fit[0], fit_error[0]))\n print(\" y0 = ( %.3f +- %.3f ) 1E-16 erg/cm2/s/A\" % (fit[1]/1E-16, fit_error[1]/1E-16 ))\n print(\" sigma = ( %.3f +- %.3f ) A\" % (fit[2], fit_error[2])) \n print(\" rms fit = %.3e erg/cm2/s/A\" % (rms_fit))\n print(\" Gaussian Flux = ( %.2f +- %.2f ) 1E-16 erg/s/cm2 (error = %.1f per cent)\" % (gaussian_flux/1E-16, gaussian_flux_error/1E-16, gaussian_flux_error/gaussian_flux*100))\n print(\" FWHM = ( %.3f +- %.3f ) A = ( %.1f +- %.1f ) km/s \" % (fwhm, fwhm_error, fwhm_vel, fwhm_vel_error))\n print(\" Eq. Width = ( %.1f +- %.1f ) A\" % (-gaussian_ew, gaussian_ew_error)) \n print(\"\\n Integrated flux = ( %.2f +- %.2f ) 1E-16 erg/s/cm2 (error = %.1f per cent) \" % ( flux/1E-16, flux_error/1E-16, flux_error/flux *100)) \n print(\" Eq. Width = ( %.1f +- %.1f ) A\" % (ew, ew_error))\n print(\" Gauss/Integrated = %.2f per cent \" % gauss_to_integrated)\n \n \n # New 22 Jan 2019: sustract Gaussian fit\n index=0\n s_s=np.zeros_like(s)\n sustract_this = np.zeros_like(gaussian_fit)\n if sus_line1:\n sustract_this = sustract_this + gaussian_1\n if sus_line2:\n sustract_this = sustract_this + gaussian_2 \n \n \n for wave in range(len(wavelength)):\n s_s[wave]=s[wave]\n if wavelength[wave] == w_spec[0] : \n s_s[wave] = f_spec[0]-sustract_this[0]\n index=1\n if wavelength[wave] > w_spec[0] and wavelength[wave] <= w_spec[-1]:\n s_s[wave] = f_spec[index]-sustract_this[index]\n index=index+1\n if plot_sus: \n plt.figure(figsize=(10, 4))\n plt.plot(wavelength,s, \"r\")\n plt.plot(wavelength,s_s, \"c\")\n plt.minorticks_on() \n plt.xlabel(\"Wavelength [$\\mathrm{\\AA}$]\")\n if fcal:\n plt.ylabel(\"Flux [ erg cm$^{-2}$ s$^{-1}$ $\\mathrm{\\AA}^{-1}$ ]\")\n else:\n plt.ylabel(\"Flux [ counts ]\")\n plt.xlim(lmin,lmax)\n plt.ylim(fmin,fmax)\n plt.show()\n plt.close()\n \n # This gaussian_flux in 3 is gaussian 1 + gaussian 2, given in 15, 16, respectively\n # 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16\n resultado = [rms_cont, fit[0], fit_error[0], gaussian_flux, gaussian_flux_error, fwhm, fwhm_error, flux, flux_error, ew, ew_error, s_s, fit[3], fit[4],fit[5], gaussian_flux_1, gaussian_flux_2 ]\n return resultado \n except Exception:\n if verbose: print(\" Double Gaussian fit failed!\")\n resultado = [0, line1, 0, 0, 0, 0, 0, 0, 0, 0, 0, s, 0, 0, 0, 0, 0 ] # line was identified at lambda=line but Gaussian fit failed\n\n # NOTA: PUEDE DEVOLVER EL FLUJO INTEGRADO AUNQUE FALLE EL AJUSTE GAUSSIANO...\n\n # Plotting \n if plot :\n plt.figure(figsize=(10, 4))\n plt.plot(np.array(w_spec),np.array(f_spec), \"b\", lw=3, alpha = 0.5)\n plt.minorticks_on() \n plt.xlabel(\"Wavelength [$\\mathrm{\\AA}$]\")\n if fcal:\n plt.ylabel(\"Flux [ erg cm$^{-2}$ s$^{-1}$ $\\mathrm{\\AA}^{-1}$ ]\")\n else:\n plt.ylabel(\"Flux [ counts ]\") \n plt.xlim(lmin,lmax)\n plt.ylim(fmin,fmax)\n \n # Vertical line at guess_centre\n plt.axvline(x=guess_centre, color='r', linestyle='-', alpha=0.5)\n # Horizontal line at y = 0\n plt.axhline(y=0, color='k', linestyle=':', alpha=0.5) \n # Dashed green regions for continuum, defined by [lowlow, lowhigh] and [highlow,highhigh]\n plt.axvspan(guess_centre+highlow, guess_centre+highhigh, facecolor='g', alpha=0.15,zorder=3)\n plt.axvspan(guess_centre-lowlow, guess_centre-lowhigh, facecolor='g', alpha=0.15,zorder=3)\n # Plot linear fit for continuum\n plt.plot(w_spec, continuum,\"g--\")\n # Plot Gaussian fit \n# plt.plot(w_spec, gaussian_fit+continuum, 'r-', alpha=0.8) \n # Vertical line at Gaussian center\n# plt.axvline(x=fit[0], color='k', linestyle='-', alpha=0.5)\n # Vertical lines to emission line\n plt.axvline(x= low_limit, color='k', linestyle=':', alpha=0.5)\n plt.axvline(x= high_limit, color='k', linestyle=':', alpha=0.5) \n # Plot residuals\n# plt.plot(w_spec, residuals, 'k')\n plt.title(\"No Gaussian fit obtained...\")\n plt.show()\n\n\n return resultado",
"def dataload():\n\t\n\tglobal A, B, fnA, fnB, lPcnA, lPcnB\n\t\n\tdwd = os.getcwd() # Data WD\n\t\t\n\t# First sample A is loaded. This is the \"calibrating\" sample.\n\t# In this case it is the OGLE III LMC small amplitude RGs.\n\t\n\tfnA = '/LMC-CalSample-cleaned_2.fits'\n\tA = Table.read(dwd+fnA)\n\n\t# Then sample B is loaded. For comparison/testing purposes, this is\n\t# again the OGLE III LMC SARGs.\n\t\n\tfnB = '/LMC-CalSample-cleaned_2.fits'\n\tB = Table.read(dwd+fnB)\n\t\n\t\"\"\" Fix tables so only the stars with all three good periods are \n\tconsidered. \"\"\"\n\t\n\tlPcnA = get_logPcn(A)\n\tlPcnB = get_logPcn(B)\n\t\n\tfor cn in lPcnA:\n\t\tA = A[A[cn]>0]\n\tfor cn in lPcnB:\n\t\tB = B[B[cn]>0]",
"def postAnalysis(masterFileName,hashcode):\n import numpy as np\n import mk.library.parser\n numFail=0\n fileFail=[]\n\n fileFLDall = open('allFLD-%s.txt'%hashcode,'w')\n fileFLDmin = open('minFLD-%s.txt'%hashcode,'w')\n\n with open(masterFileName) as FO:\n blocks = FO.read().split('--\\n')[:-1:]\n dat_min_master=[]\n # print 'number of blocks',len(blocks)\n for i in xrange(len(blocks)): ## each block\n eachBlock = blocks[i]\n linesInBlock = eachBlock.split('\\n')[0:-1:]\n # print linesInBlock\n\n ## find the minimum |(E1,E2)|\n min_rad = 2.0\n dat_min = None\n ind_min = None\n data_min_line = None\n matA_FN = None\n matB_FN = None\n ss_FN = None\n for j in xrange(len(linesInBlock)):\n line = linesInBlock[j]\n ind, fn = line.split()\n try:\n data, f, psi0, th, data_line,\\\n matA_FN, matB_FN, ss_FN = read(fn)\n except:\n pass\n else:\n fileFLDall.write('%s'%data_line)\n epsRD, epsTD, psi0, psif, \\\n sigRD,sigTD,sigA,T,dt = data[:9]\n\n if np.isnan(epsRD) or np.isnan(epsTD):\n fileFail.append(fn)\n numFail=numFail+1\n else:\n rad = np.sqrt(epsRD**2+epsTD**2)\n if rad<min_rad:\n dat_min = data[::]\n min_rad = rad\n ind_min = j\n data_min_line = data_line\n\n dat_min_master.append(\n [dat_min,matA_FN,matB_FN,ss_FN])\n\n if type(data_min_line).__name__!='NoneType':\n fileFLDmin.write('%s'%data_min_line)\n\n fileFLDall.close(); fileFLDmin.close()\n\n ## iplot?\n import matplotlib.pyplot as plt\n from mk.library.lib import draw_guide\n fig = plt.figure(figsize=(7,6))\n ax1=fig.add_subplot(221);ax2=fig.add_subplot(222)\n ax3=fig.add_subplot(223);ax4=fig.add_subplot(224)\n dat=np.loadtxt(fileFLDmin.name,dtype='str').T\n dat=dat[:9]\n\n ax1.plot(dat[1],dat[0],'o')\n dat=np.loadtxt(fileFLDall.name,dtype='str').T\n dat=dat[:9]\n ax2.plot(dat[1],dat[0],'o')\n draw_guide(ax1,r_line=[-0.5,0,1,2,2.5],max_r=2)\n draw_guide(ax2,r_line=[-0.5,0,1,2,2.5],max_r=2)\n ax1.set_aspect('equal');ax2.set_aspect('equal')\n\n ##\n for i in xrange(len(dat_min_master)):\n dat_min, matA_FN, matB_FN, ss_FN = dat_min_master[i]\n mk.library.parser.plotMat(matA_FN,ax=ax3,\n color='red',linestyle='-')\n mk.library.parser.plotMat(matB_FN,ax=ax3,\n color='blue',linestyle='--')\n mk.library.parser.plotEtc(ss_FN,ax=ax4)\n fig.savefig('mk_fld_pp_%s.pdf'%hashcode)",
"def warm_region_cal(audio_samples, fs):\n #window the audio\n windowed_samples = timbral_util.window_audio(audio_samples)\n\n # need to define a function for the roughness stimuli, emphasising the 20 - 40 region (of the bark scale)\n min_bark_band = 10\n max_bark_band = 40\n mean_bark_band = (min_bark_band + max_bark_band) / 2.0\n array = np.arange(min_bark_band, max_bark_band)\n x = timbral_util.normal_dist(array, theta=0.01, mean=mean_bark_band)\n x -= np.min(x)\n x /= np.max(x)\n\n wr_array = np.zeros(240)\n wr_array[min_bark_band:max_bark_band] = x\n\n # need to define a second array emphasising the 20 - 40 region (of the bark scale)\n min_bark_band = 80\n max_bark_band = 240\n mean_bark_band = (min_bark_band + max_bark_band) / 2.0\n array = np.arange(min_bark_band, max_bark_band)\n x = timbral_util.normal_dist(array, theta=0.01, mean=mean_bark_band)\n x -= np.min(x)\n x /= np.max(x)\n\n hf_array = np.zeros(240)\n hf_array[min_bark_band:max_bark_band] = x\n\n windowed_loud_spec = []\n windowed_rms = []\n\n wr_vals = []\n hf_vals = []\n\n for i in range(windowed_samples.shape[0]):\n samples = windowed_samples[i, :]\n N_entire, N_single = timbral_util.specific_loudness(samples, Pref=100.0, fs=fs, Mod=0)\n\n # append the loudness spec\n windowed_loud_spec.append(N_single)\n windowed_rms.append(np.sqrt(np.mean(samples * samples)))\n\n wr_vals.append(np.sum(wr_array * N_single))\n hf_vals.append(np.sum(hf_array * N_single))\n\n mean_wr = np.mean(wr_vals)\n mean_hf = np.mean(hf_vals)\n weighted_wr = np.average(wr_vals, weights=windowed_rms)\n weighted_hf = np.average(hf_vals, weights=windowed_rms)\n\n return mean_wr, weighted_wr, mean_hf, weighted_hf",
"def process_trace(n_tr, tr, sta, orig_time, cmps, cfg):\n cmp = tr.stats.channel[2:3]\n sta[cmp] = {}\n sta[cmp][\"times\"] = tr.times(reftime=orig_time)\n\n sta[cmp][\"tr_results\"] = np.zeros(\n (len(cfg.picking.FILT_WINS[\"P\"]), sta[\"lenD\"])\n )\n sta[cmp][\"f1_results\"] = np.zeros(\n (len(cfg.picking.FILT_WINS[\"P\"]), len(cfg.picking.KURT_WINS),\n sta[\"lenD\"])\n )\n sta[cmp][\"f1_mean\"] = np.zeros(sta[\"lenD\"])\n sta[cmp][\"f3_results\"] = np.zeros(\n (len(cfg.picking.FILT_WINS[\"P\"]),\n len(cfg.picking.KURT_WINS), sta[\"lenD\"])\n )\n sta[cmp][\"f3_mean_smooth\"] = np.zeros(\n (len(cfg.picking.CF_MEAN_SMOOTH_WIND), sta[\"lenD\"])\n )\n sta[cmp][\"f4_all\"] = np.zeros((len(cfg.picking.CF_MEAN_SMOOTH_WIND),\n sta[\"lenD\"]))\n sta[cmp][\"f1_mean_smooth\"] = np.zeros(sta[\"lenD\"])\n # Get suitable filters (exclude those fully outside Nyquist freq.)\n for phase in [\"P\", \"S\"]:\n if cmp in cmps[phase]:\n sta[\"picks\"][\"poss_obs\"][phase][cmp] = {}\n sta[cmp][\"filtwins_check\"] = [\n filt_win for filt_win in cfg.picking.FILT_WINS[phase]\n if filt_win[0] < sta[\"samplerate\"] / 2\n ]\n if cfg.picking.INTEGRATE_S is True:\n tr.integrate()\n\n for n_filt, filt in enumerate(sta[cmp][\"filtwins_check\"]):\n # Ensure that filter covers sample rate / 2\n if (tr.stats.sampling_rate / 2) <= filt[0]:\n print(\"Skipping this Kurtosis run due to sample rate/2<f\")\n continue\n tr.filter(\"bandpass\", freqmin=filt[0], freqmax=filt[1])\n try:\n sta[cmp][\"tr_results\"][n_filt] = tr.data\n except ValueError: # If input array length is inconsistent\n continue\n # Loop over kurtosis windows\n for n_kurt, kurt_win_s in enumerate(cfg.picking.KURT_WINS):\n f1 = CF_kurtosis(kurt_win_s, tr)\n sta[cmp][\"f1_results\"][n_filt, n_kurt] = f1 # Needed for weights\n f2 = kurt_transform_f2(f1, kurt_win_s, tr)\n f3 = kurt_transform_f3(f2, kurt_win_s, tr)\n\n sta[cmp][\"f3_results\"][n_filt, n_kurt] = f3\n sta[cmp][\"f1_mean\"] = np.nanmean(sta[cmp][\"f1_results\"], axis=0)[0]\n sta[cmp][\"f1_mean_smooth\"] = do_smooth(\n sta[cmp][\"f1_mean\"], cfg.picking.CF_MEAN_SMOOTH_WIND[0],\n tr.stats.sampling_rate\n )\n # ^ Throws up a warning first time due to NaN slices\n # Compute mean CF and final kurtosis transform\n f3_mean = np.nanmean(sta[cmp][\"f3_results\"], axis=0)[0]\n\n for nsm, smooth_wind in enumerate(cfg.picking.CF_MEAN_SMOOTH_WIND):\n sta[cmp][\"f3_mean_smooth\"][nsm] = do_smooth(\n f3_mean, smooth_wind, tr.stats.sampling_rate\n )\n f4 = kurt_transform_f4(sta[cmp][\"f3_mean_smooth\"][nsm],\n np.max(cfg.picking.KURT_WINS), tr)\n sta[cmp][\"f4_all\"][nsm] = f4\n\n # Now pick (avoiding end and beginning of signal)\n # Pick the P-waves\n if cmp in cmps[\"P\"]:\n sta[\"picks\"][\"poss_obs\"][\"P\"][cmp][nsm] = []\n # Find points where Kurt<0 & doesn't look like S-wave\n p_cands = np.argwhere((f4 < 0.0))\n for idx in p_cands.tolist():\n kurt_wgt = np.min(np.where(np.array(\n cfg.picking.KURT2WGHT[\"P\"]\n <= sta[cmp][\"f1_mean_smooth\"][idx])))\n sta[\"picks\"][\"poss_obs\"][\"P\"][cmp][nsm].append([\n orig_time+sta[cmp][\"times\"][idx][0], f4[idx][0],\n tr.stats.channel, kurt_wgt, idx,\n sta[cmp][\"times\"][idx][0]\n ])\n # Pick the S-waves\n if cmp in cmps[\"S\"]:\n sta[\"picks\"][\"poss_obs\"][\"S\"][cmp][nsm] = []\n\n # Find points where Kurt<0 & doesn't look like S-wave\n s_cands = np.argwhere((f4 < 0.0))\n for idx in s_cands.tolist():\n kurt_wgt = np.min(np.where(np.array(cfg.picking.KURT2WGHT[\"S\"]\n <= sta[cmp][\"f1_mean_smooth\"][idx]))\n )\n sta[\"picks\"][\"poss_obs\"][\"S\"][cmp][nsm].append([\n orig_time+sta[cmp][\"times\"][idx][0], f4[idx][0],\n tr.stats.channel, kurt_wgt, idx,\n sta[cmp][\"times\"][idx][0]\n ])\n return(sta)",
"def process_event(self, evt):\n det_data = {}\n for det, thisDetDict in zip(self.dets, self.targetVarsXtc):\n try:\n det.getData(evt)\n det.processFuncs()\n thisDetDataDict = getUserData(det)\n img = None\n for key in thisDetDataDict.keys():\n if key=='full_area':\n img = thisDetDataDict[key].astype(float) # needed for detectors whose data are uint16 (Rayonix)\n elif key.find('ROI')>=0:\n img = thisDetDataDict[key].astype(float)\n if img is None:\n print('Problem with getting detector area data.')\n continue\n if 'thresADU' in thisDetDict:\n img[img<thisDetDict['thresADU']] = 0\n elif 'thresRms' in thisDetDict:\n img[img<thisDetDict['thresRms']*det.rms] = 0\n\n det_data[det._name] = img # can onky handle full area ROIFunc for now\n \n # calculate variance (see ... for ref)\n \n# if not (key=='full_area' or key.find('ROI')>=0 or key.find('photon_img')>=0):\n# continue\n# if (key=='full_area' or key.find('ROI')>=0):\n# if 'thresADU' in thisDetDict:\n# thisDetDataDict[key][thisDetDataDict[key]<thisDetDict['thresADU']]=0\n# elif 'thresRms' in thisDetDict:\n# thisDetDataDict[key][thisDetDataDict[key]<thisDetDict['thresRms']*det.rms]=0\n# dArray[ib%bins_per_job]=dArray[ib%bins_per_job]+thisDetDataDict[key]\n# else: #if key.find('photon_img')\n# dIArray[ib%bins_per_job]=dIArray[ib%bins_per_job]+thisDetDataDict[key]\n\n# x = thisDetDataDict[key]\n# oldM = dMArray\n# dMArray = dMArray + (x-dMArray)/(ievt+1)\n# dSArray = dSArray + (x-dMArray)*(x-oldM)\n except Exception as e:\n print('Failed to get data for this event for det {}.\\n{}'.format(det._name, e))\n det_data[det._name] = None\n return det_data",
"def process_station(sta, st_sta, dist, ev_dict, evt, orig_time, phase, cmps,\n cfg):\n sta[\"ncha\"] = len(st_sta)\n sta[\"lenD\"] = int(st_sta[0].stats.npts)\n sta[\"samplerate\"] = st_sta[0].stats.sampling_rate\n sta[\"nwinsamp\"] = int(np.ceil(cfg.picking.KURT_WINS[0]*sta[\"samplerate\"]))\n st_sta.detrend()\n st_sta.detrend(type=\"demean\")\n for n_tr, _ in enumerate(st_sta):\n st_sta[n_tr].stats.distance = dist\n # Set empty arrays for filling during process\n sta[\"filtwins_check\"] = {}\n sta[\"picks\"] = {}\n sta[\"picks\"][\"poss_obs\"] = {}\n sta[\"picks\"][\"poss_obs\"][\"P\"] = {}\n sta[\"picks\"][\"poss_obs\"][\"S\"] = {}\n sta[\"noise_levels\"] = np.zeros(len(st_sta))\n for n_tr, tr in enumerate(st_sta):\n sta[\"noise_levels\"][n_tr] = compute_noise_levels(tr)\n if sta[\"noise_levels\"][n_tr] < cfg.sig_noise.MAX_NOISE_LEVEL:\n sta = process_trace(n_tr, tr, sta, orig_time, cmps, cfg)\n return(sta)",
"def refine_dataset(original_data,settings):\n data = original_data[original_data.sweep_primary_load_temperature >= settings['valid_load_temp_range'][0]]\n data = data[data.sweep_primary_load_temperature <= settings['valid_load_temp_range'][1]]\n data = data[data.f_0_err/data.f_0 < settings['fractional_f_0_err_limit']]\n data = data[data.Q_err/data.Q < settings['fractional_Q_err_limit']]\n data = data[data.Q >= settings['valid_Q_range'][0]]\n data = data[data.Q <= settings['valid_Q_range'][1]]\n if settings['max_package_temp_deviation'] is not None:\n median_temp = np.median(data.sweep_primary_package_temperature)\n temp_deviations = np.abs(data.sweep_primary_package_temperature - median_temp)\n data = data[temp_deviations < settings['max_package_temp_deviation']]\n #data = data.sort([\"f_0\"])\n data['f_0_max'] = np.zeros((data.shape[0],))#data.groupby(\"resonator_index\")[\"f_0\"].transform(lambda x: x.max())\n data['Q_i_max'] = np.zeros((data.shape[0],))\n data['responsivity_Hz_per_K'] = np.zeros((data.shape[0],))\n data['responsivity_err'] = np.zeros((data.shape[0],))\n data['responsivity_offset'] = np.zeros((data.shape[0],))\n for index in np.unique(data.resonator_index):\n group = data[data.resonator_index == index]\n max = group[group.sweep_primary_load_temperature < settings['f_0_max_temp_limit']].f_0.max()\n data.f_0_max[data.resonator_index == index] = max\n max = group[group.sweep_primary_load_temperature < settings['f_0_max_temp_limit']].Q_i.max()\n data.Q_i_max[data.resonator_index == index] = max\n \n data['delta_f_0_Hz'] = (data.f_0-data.f_0_max)*1e6\n data['fractional_delta_f_0'] = data.delta_f_0_Hz/(1e6*data.f_0_max)#(1e6*data.noise_measurement_freq_MHz)\n data['fractional_delta_Q_i'] = data.Q_i/data.Q_i_max - 1\n\n for index in np.unique(data.resonator_index):\n group = data[data.resonator_index == index]\n try:\n (slope,offset),cov = np.polyfit(group.sweep_primary_load_temperature,group.delta_f_0_Hz,1,cov=True)\n print slope\n data.responsivity_Hz_per_K[data.resonator_index == index] = slope\n data.responsivity_offset[data.resonator_index == index] = offset\n data.responsivity_err[data.resonator_index == index] = np.sqrt(cov[1,1])\n except ValueError:\n continue\n except np.linalg.LinAlgError:\n continue\n eigvals_Hz = []\n nets = []\n for eigvals,freq,responsivity in zip(data.pca_eigvals,data.noise_measurement_freq_MHz,data.responsivity_Hz_per_K):\n # Convert eigvals spectra from 1/Hz units to Hz/sqrt(Hz)\n spectrum_Hz = np.sqrt(eigvals)*freq*1e6\n eigvals_Hz.append(spectrum_Hz)\n # Calculate net in muK sqrt(s). In the following, 1e6 is K -> uK factor, and sqrt(2) is 1/sqrt(Hz) -> sqrt(s) factor\n net = (1e6*spectrum_Hz/abs(responsivity))/np.sqrt(2)\n nets.append(net)\n data['pca_eigvals_Hz_per_rootHz'] = eigvals_Hz \n data['net_uK_rootsec'] = nets\n return data",
"def HD_input_sncosmo_data(self, sn_list):\n\n dico = cPickle.load(open(SUGAR_parameter_pkl))\n self.read_sncosmo(path='../sugar_analysis_data/results/res_salt2_SNF_5_nomodelcov.txt')\n self.read_meta()\n self.read_snfit_results()\n Filtre = np.array([True]*len(self.sncosmo_sn_name))\n self.zcmb = []\n self.z_err = []\n for j, sn_name in enumerate(self.sncosmo_sn_name):\n# if self.sncosmo_sn_name[j] in dico.keys():\n#\n# for i in range (len(self.meta_sn_name_list)):\n# if self.sncosmo_sn_name[j] == self.meta_sn_name_list[i]:\n# \n# self.z_err.append(self.meta_zhl_err[i])\n# self.zcmb.append(self.meta_zcmb[i])\n# if np.abs(self.sncosmo_x1[j] - self.x1[i]) > 0.01:\n# i print 'problem with %s include in sample but big difference between sncosmo and snfit'%(self.sncosmo_sn_name[j])\n# else:\n# Filtre[j] = False\n if sn_name in sn_list:\n Filtre[j] = True\n else:\n Filtre[j] = False\n\n for p in dico.keys():\n if p not in self.sncosmo_sn_name:\n print p\n\n self.sncosmo_x1 = self.sncosmo_x1[Filtre]\n self.sncosmo_x1_err = self.sncosmo_x1_err[Filtre] \n self.sncosmo_c = self.sncosmo_c[Filtre]\n self.sncosmo_c_err = self.sncosmo_c_err[Filtre]\n self.sncosmo_mb = self.sncosmo_mb[Filtre]\n self.sncosmo_mb_err = self.sncosmo_mb_err[Filtre]\n self.sncosmo_cov_x1_c = self.sncosmo_cov_x1_c[Filtre]\n self.sncosmo_cov_mb_x1 = self.sncosmo_cov_mb_x1[Filtre]\n self.sncosmo_cov_mb_c = self.sncosmo_cov_mb_c[Filtre]\n self.sncosmo_z = self.sncosmo_z[Filtre]\n self.zcmb = np.array(self.zcmb)\n self.z_err = np.array(self.z_err)\n\n self.sncosmo_cov_y = np.zeros((len(self.sncosmo_mb)*3,len(self.sncosmo_mb)*3))\n \n for i in range (len(self.sncosmo_mb)):\n self.sncosmo_cov_y[i*3,i*3] = self.sncosmo_mb_err[i]**2\n self.sncosmo_cov_y[i*3+ 1,i*3+ 1] = self.sncosmo_x1_err[i]**2\n \n self.sncosmo_cov_y[i*3+ 2,i*3+ 2] = self.sncosmo_c_err[i]**2\n self.sncosmo_cov_y[i*3+ 0,i*3+ 1] = self.sncosmo_cov_mb_x1[i]\n self.sncosmo_cov_y[i*3+ 1,i*3+ 0] = self.sncosmo_cov_mb_x1[i]\n self.sncosmo_cov_y[i*3+ 0,i*3+ 2] = self.sncosmo_cov_mb_c[i]\n self.sncosmo_cov_y[i*3+ 2,i*3+ 0] = self.sncosmo_cov_mb_c[i]\n self.sncosmo_cov_y[i*3+ 1,i*3+ 2] = self.sncosmo_cov_x1_c[i] \n self.sncosmo_cov_y[i*3+ 2,i*3+ 1] = self.sncosmo_cov_x1_c[i] \n \n self.salt_parm = np.array([self.sncosmo_mb,self.sncosmo_x1,self.sncosmo_c]).T\n print len(self.salt_parm), len(self.sncosmo_cov_y), len(self.sncosmo_z), len(self.zcmb)\n return self.salt_parm, self.sncosmo_cov_y, self.sncosmo_z, self.zcmb, self.z_err",
"def dstrf_snapshots(rec, model_list, D=11, out_channel=0, time_step=85, snr_threshold=5):\n t_indexes = np.arange(time_step, rec['stim'].shape[1], time_step)\n dlc = rec['dlc'].as_continuous().T\n log.info(f\"Computing dSTRF at {len(t_indexes)} timepoints, {dlc.shape[1]} DLC channels, t_step={time_step}\")\n if rec.meta['batch'] in [346, 347]:\n dicount=didx.shape[0]\n else:\n dicount=4\n\n dstrf = {}\n mdstrf = np.zeros((len(model_list), dicount, rec['stim'].shape[0], D))\n pc1 = np.zeros((len(model_list), dicount, rec['stim'].shape[0], D))\n pc2 = np.zeros((len(model_list), dicount, rec['stim'].shape[0], D))\n pc_count=3\n pc_mag_all = np.zeros((len(model_list), dicount, pc_count))\n for di in range(dicount):\n dlc1 = dlc.copy()\n dcount=dlc1.shape[1]\n didx_ = adjust_didx(dlc, didx)\n\n for t in t_indexes:\n dlc1[(t-didx_.shape[1]+1):(t+1), :] = didx_[di,:,:dcount]\n log.info(f\"DLC values: {np.round(didx[di,-1,:dcount],3)}\")\n #log.info(f'di={di} Applying HRTF for frozen DLC coordinates')\n #rec2 = rec.copy()\n #rec2['dlc'] = rec2['dlc']._modified_copy(data=dlc1.T)\n #rec2 = free_tools.stim_filt_hrtf(rec2, hrtf_format='az', smooth_win=2,\n # f_min=200, f_max=20000, channels=18)['rec']\n\n for mi, m in enumerate(model_list):\n stim = {'stim': rec['stim'].as_continuous().T, 'dlc': dlc1}\n dstrf[di] = m.dstrf(stim, D=D, out_channels=[out_channel], t_indexes=t_indexes)\n\n d = dstrf[di]['stim'][0, :, :, :]\n\n if snr_threshold is not None:\n d = np.reshape(d, (d.shape[0], d.shape[1] * d.shape[2]))\n md = d.mean(axis=0, keepdims=True)\n e = np.std(d - md, axis=1) / np.std(md)\n if (e > snr_threshold).sum() > 0:\n log.info(f\"Removed {(e > snr_threshold).sum()}/{len(d)} noisy dSTRFs for PCA calculation\")\n\n d = dstrf[di]['stim'][0, (e <= snr_threshold), :, :]\n mdstrf[mi, di, :, :] = d.mean(axis=0)\n pc, pc_mag = dtools.compute_dpcs(d[np.newaxis, :, :, :], pc_count=pc_count)\n pc1[mi, di, :, :] = pc[0, 0, :, :] * pc_mag[0, 0]\n pc2[mi, di, :, :] = pc[0, 1, :, :] * pc_mag[1, 0]\n pc_mag_all[mi, di, :] = pc_mag[:, 0]\n return mdstrf, pc1, pc2, pc_mag_all",
"def fillCoreVariables(self, tr, event, isMC):\n tr.fill('run', event.input.eventAuxiliary().id().run())\n tr.fill('lumi',event.input.eventAuxiliary().id().luminosityBlock())\n tr.fill('evt', event.input.eventAuxiliary().id().event()) \n tr.fill('isData', 0 if isMC else 1)\n\n# triggerResults = self.handles['TriggerResults'].product()\n# for T,TC in self.triggerBitCheckers:\n# tr.fill(\"HLT_\"+T, TC.check(event.object(), triggerResults))\n\n if not isMC:\n tr.fill('intLumi', getattr(self.cfg_comp,'intLumi',1.0))\n\n if isMC:\n ## xsection, if available\n tr.fill('xsec', getattr(self.cfg_comp,'xSection',1.0))\n ## PU weights, check if a PU analyzer actually filled it\n if hasattr(event,\"nPU\"):\n tr.fill(\"nTrueInt\", event.nPU)\n tr.fill(\"puWeight\", event.puWeight)\n else :\n tr.fill(\"nTrueInt\", -1)\n tr.fill(\"puWeight\", 1.0)\n\n tr.fill(\"genWeight\", self.mchandles['GenInfo'].product().weight())\n ## PDF weights\n if hasattr(event,\"pdfWeights\") :\n for (pdf,nvals) in self.pdfWeights:\n if len(event.pdfWeights[pdf]) != nvals:\n raise RuntimeError(\"PDF lenght mismatch for %s, declared %d but the event has %d\" % (pdf,nvals,event.pdfWeights[pdf]))\n if self.scalar:\n for i,w in enumerate(event.pdfWeights[pdf]):\n tr.fill('pdfWeight_%s_%d' % (pdf,i), w)\n else:\n tr.vfill('pdfWeight_%s' % pdf, event.pdfWeights[pdf])",
"def zMCEff1D(ZReco, ZAcc, bins, obs, region='inclusive', sel=''):\n print(\">>> make differential efficiencies for \"+obs+\" in \"+region+\" region\")\n\n if sel != '':\n ZReco = ZReco.query(sel)\n ZAcc = ZAcc.query(sel)\n\n ZEff = [np.zeros(len(bins)-1), np.zeros(len(bins)-1)]\n ZEff_sel = [np.zeros(len(bins)-1), np.zeros(len(bins)-1)]\n ZEff_glo = [np.zeros(len(bins)-1), np.zeros(len(bins)-1)]\n\n MuEff_HLT_Sel = [np.zeros(len(bins) - 1), np.zeros(len(bins) - 1)]\n MuEff_Sel_Glo = [np.zeros(len(bins) - 1), np.zeros(len(bins) - 1)]\n MuEff_Glo_StaOrTrk = [np.zeros(len(bins) - 1), np.zeros(len(bins) - 1)]\n MuEff_Trk_Sta = [np.zeros(len(bins) - 1), np.zeros(len(bins) - 1)]\n MuEff_Sta_Trk = [np.zeros(len(bins) - 1), np.zeros(len(bins) - 1)]\n\n ZMuMuEff = [np.zeros(len(bins) - 1), np.zeros(len(bins) - 1)]\n ZMuMuEff_sel = [np.zeros(len(bins) - 1), np.zeros(len(bins) - 1)]\n ZMuMuEff_glo = [np.zeros(len(bins) - 1), np.zeros(len(bins) - 1)]\n\n for i in range(0, len(bins)-1):\n bin_low = bins[i]\n bin_high = bins[i+1]\n sel = '{0}>{1} & {0}<{2}'.format(obs, bin_low, bin_high)\n\n ZReco_i = ZReco.query(sel)\n\n qHLT = 'muon_Category==5 & antiMuon_Category==5'\n qSel = '(muon_Category==5 & antiMuon_Category==4) | (muon_Category==4 & antiMuon_Category==5)'\n qGlo = '(muon_Category==5 & antiMuon_Category==3) | (muon_Category==3 & antiMuon_Category==5)'\n qSta = '(muon_Category==5 & antiMuon_Category==2) | (muon_Category==2 & antiMuon_Category==5)'\n qTrk = '(muon_Category==5 & antiMuon_Category==1) | (muon_Category==1 & antiMuon_Category==5)'\n\n nHLT = len(ZReco_i.query(qHLT))\n nSel = len(ZReco_i.query(qSel))\n nGlo = len(ZReco_i.query(qGlo))\n nSta = len(ZReco_i.query(qSta))\n nTrk = len(ZReco_i.query(qTrk))\n\n MuEff_HLT_Sel[0][i], MuEff_HLT_Sel[1][i] = muTagAndProbeEff(nHLT, 0, nSel)\n MuEff_Sel_Glo[0][i], MuEff_Sel_Glo[1][i] = muTagAndProbeEff(nHLT, nSel, nGlo)\n MuEff_Glo_StaOrTrk[0][i], MuEff_Glo_StaOrTrk[1][i] = muTagAndProbeEff(nHLT, nSel + nGlo, nSta + nTrk)\n MuEff_Trk_Sta[0][i], MuEff_Trk_Sta[1][i] = muTagAndProbeEff(nHLT, nSel + nGlo + nTrk, nSta)\n MuEff_Sta_Trk[0][i], MuEff_Sta_Trk[1][i] = muTagAndProbeEff(nHLT, nSel + nGlo + nSta, nTrk)\n\n ZMuMuEff[0][i], ZMuMuEff[1][i] = eff_ztomumu_old(nHLT, nSel, nGlo, nSta, nTrk)\n ZMuMuEff_sel[0][i], ZMuMuEff_sel[1][i] = eff_ztomumu_sel(nHLT, nSel, nGlo, nSta, nTrk)\n ZMuMuEff_glo[0][i], ZMuMuEff_glo[1][i] = eff_ztomumu_glo(nHLT, nSel, nGlo, nSta, nTrk)\n\n # full true Z efficiency\n ZAcc_i = ZAcc.query(sel)\n ZReco_i = ZReco.query(sel + '& (muon_Category==5 | antiMuon_Category==5) & muon_Category>=4 & antiMuon_Category>=4')\n\n nReco_i = len(ZReco_i)\n nAcc_i = len(ZAcc_i)\n nBoth_i = len(np.intersect1d(ZReco_i['LepPt'], ZAcc_i['LepPt']))\n\n ZEff[0][i], ZEff[1][i] = zEff(nReco_i, nAcc_i, nBoth_i)\n\n # true Z efficiency at selection level (without requiring trigger)\n ZAcc_i = ZAcc.query(sel)\n ZReco_i = ZReco.query(sel + ' & muon_Category>=4 & antiMuon_Category>=4')\n\n nReco_i = len(ZReco_i)\n nAcc_i = len(ZAcc_i)\n nBoth_i = len(np.intersect1d(ZReco_i['LepPt'], ZAcc_i['LepPt']))\n\n ZEff_sel[0][i], ZEff_sel[1][i] = zEff(nReco_i, nAcc_i, nBoth_i)\n\n # true Z efficiency at gobal muon level (requiring two global muons)\n ZAcc_i = ZAcc.query(sel)\n ZReco_i = ZReco.query(sel + '& muon_Category>=3 & antiMuon_Category>=3 ')\n\n nReco_i = len(ZReco_i)\n nAcc_i = len(ZAcc_i)\n nBoth_i = len(np.intersect1d(ZReco_i['LepPt'], ZAcc_i['LepPt']))\n\n ZEff_glo[0][i], ZEff_glo[1][i] = zEff(nReco_i, nAcc_i, nBoth_i)\n\n x = bins[:-1] + (bins[1:] - bins[:-1])/2\n\n def plot_zeff(eff_true, eff_tnp, name):\n plt.clf()\n fig, ax = plt.subplots(2, 1, sharex=True, gridspec_kw={'height_ratios': [2, 1]})\n fig.subplots_adjust(hspace=0)\n\n ax[0].errorbar(x, eff_true[0], xerr=np.zeros(len(x)), yerr=eff_true[1], fmt='bo', label='true')\n ax[0].errorbar(x, eff_tnp[0], xerr=np.zeros(len(x)), yerr=eff_tnp[1], fmt='ro', label='tnp')\n ax[0].set(xlim=(bins[0], bins[-1]), ylim=(0.75, 1.1))\n ax[0].legend()\n ax[0].text(0.05, 0.9, r'$66\\ \\mathrm{GeV} < \\mathrm{M}_{\\mu\\mu} < 116\\ \\mathrm{GeV}$', transform=ax[0].transAxes)\n ax[0].text(0.05, 0.82, r'$p_\\mathrm{t}(\\mu) > 27\\ \\mathrm{GeV} \\qquad |\\eta(\\mu)| < 2.4$', transform=ax[0].transAxes)\n ax[0].text(0.05, 0.74, region, transform=ax[0].transAxes)\n ax[0].set_ylabel(r'$\\epsilon_\\mathrm{Z}$')\n ax[0].set_xlabel(obs)\n ax[0].set_yticks([0.8, 0.85, 0.9, 0.95, 1.0, 1.05])\n ax[0].set_title(\"Z efficiency at {0} muon level\".format(name))\n\n pulls = eff_tnp[0] - eff_true[0]\n pulls_sig = np.sqrt(eff_tnp[1]**2 + eff_true[1]**2)\n\n ax[1].errorbar(x, pulls, xerr=np.zeros(len(x)), yerr=pulls_sig, fmt='ko')# , label='factorized - true')\n ax[1].plot(x, np.zeros(len(x)), color='gray', linestyle='dashed')\n ax[1].set_ylim(-0.02, 0.04)\n ax[1].set_ylabel(r'$\\epsilon^\\mathrm{tnp}_\\mathrm{Z} - \\epsilon^\\mathrm{true}_\\mathrm{Z}$')\n ax[1].set_xlabel(obs)\n ax[1].set_yticks([-0.01, 0., 0.01, 0.02, 0.03])\n plt.savefig(output+'/ZMuMu_{0}_{1}_{2}_level.png'.format(obs, region, name))\n plt.close()\n\n plot_zeff(ZEff, ZMuMuEff, \"hlt\")\n plot_zeff(ZEff_sel, ZMuMuEff_sel, \"selection\")\n plot_zeff(ZEff_glo, ZMuMuEff_glo, \"global\")"
] | [
"0.5908855",
"0.54569936",
"0.5322768",
"0.5313147",
"0.5301625",
"0.53015983",
"0.5290556",
"0.52887326",
"0.528543",
"0.5255901",
"0.52539086",
"0.52324796",
"0.52108496",
"0.52033883",
"0.5170586",
"0.5168382",
"0.5167442",
"0.51477975",
"0.5144637",
"0.5129123",
"0.511288",
"0.5112267",
"0.51109254",
"0.5101862",
"0.50850797",
"0.5081848",
"0.5067126",
"0.5056362",
"0.5049779",
"0.5047296"
] | 0.68044555 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.