query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
listlengths
4
101
negative_scores
listlengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
The user has a valid JWT but needs to log into this app. Do so here and return the status.
def jwt_login(request, jwt_payload): logger.debug("Logging user in via JWT. Is Authenticated? " + str(request.user.is_authenticated)) request.session['profile'] = jwt_payload user = django_auth.authenticate(**jwt_payload) if user: login(request, user) else: logger.debug("Could not log user in.") return request.user.is_authenticated
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def LoginCheck():\n jwt_data = get_jwt()\n if jwt_data['roles'] != 'admin':\n return jsonify(msg=\"Permission denied\"), Status.HTTP_BAD_FORBIDDEN\n\n identity = get_jwt_identity()\n if not identity:\n return jsonify({\"msg\": \"Token invalid\"}), Status.HTTP_BAD_UNAUTHORIZED\n\n data = {\"msg\": \"Loggeed In\"}\n json_response = json.dumps(data)\n return Response(json_response,\n status=Status.HTTP_OK_BASIC,\n mimetype='application/json')", "def _check_auth(self):\n if self.authToken:\n return True\n else:\n msg = \"you need to login\"\n self.raise_error(msg)", "def login():\n print(request.get_json())\n user = request.get_json()['username']\n passwd = request.get_json()['passwd']\n user_check = storage.get_user(User, user)\n if not user:\n return jsonify(message='missing value'), 401\n if not user_check:\n return jsonify(message='error'), 401\n if user == user_check.username and passwd == user_check.passwd:\n token = jwt.encode(\n {\n 'user_id': user_check.id,\n 'exp': datetime.utcnow() + timedelta(minutes=60)\n },\n current_app.config['SECRET_KEY']\n )\n token = token.decode('UTF-8')\n return jsonify(token=token), 200\n if user == user_check.username and passwd != user_check.passwd:\n return jsonify(message='authorization failed'), 403\n return jsonify(message='authorization failed'), 403", "def _validate_token(self):\n if not self.token:\n self.login()\n if not self.token:\n # TODO: create exception for this\n # Access is denied!!\n raise Exception(\"AccessDenied\")", "def check_user_and_login(self) -> Response:\n pass", "def _check_token_data(self, jwt_token_data):\n try:\n self.user = get_user_model().objects.get(pk=jwt_token_data['id'])\n except (TypeError, KeyError):\n return self.render_api_error_response('Not authenticated - Bad authorization header data', status=401)\n except get_user_model().DoesNotExist:\n return self.render_api_error_response('Not authenticated - User not found', status=401)\n self.jwt_token_data = jwt_token_data\n return None", "def login():\r\n if not request.is_json or 'phone_number' not in request.get_json() or 'password' not in request.get_json():\r\n return bad_request('Missing required data.')\r\n return login_user(request)", "def post(self):\n data=api.payload\n if UserModel.check_email_exist(data[\"email\"]):\n if UserModel.validate_password(data[\"email\"],data[\"password\"]):\n # After login successful\n uid=UserModel.get_user_id(data[\"email\"])\n token=create_access_token(identity=uid)\n return {\"token\":token}\n \n else:\n return {\"message\":\"incorrect Login Credentials\"}\n else:\n \n return {\"Message\":\"Incorrect Login credentials\"}", "def login():\n request_data = request.get_json()\n\n if User.authenticate(request_data['username'], request_data['password']):\n expiration = datetime.datetime.now() + datetime.timedelta(minutes=20)\n token = jwt.encode(\n {'exp': expiration},\n app.config['SECRET_KEY'],\n algorithm='HS256'\n ).decode()\n return jsonify({'token': token}), 200\n\n return Response(\n json.dumps({'error': 'Invalid username / password'}),\n 400,\n mimetype='application/json'\n )", "def authcheck():\n user = get_user()\n return jsonify({'current_identity': user.username})", "def login(self):\n if not request.is_json:\n return jsonify({\"msg\": \"Invalid JSON format in request\"}), 400\n\n username = request.json.get('username', None)\n password = request.json.get('password', None)\n\n if not username:\n return jsonify({\"msg\": \"Missing username\"}), 400\n if not password:\n return jsonify({\"msg\": \"Missing password\"}), 400\n if username != 'admin' or password != '123456':\n return jsonify({\"msg\": \"Invalid username or password\"}), 401\n\n access_token = create_access_token(identity=username, expires_delta=timedelta(minutes=2))\n return jsonify(access_token=access_token), 200", "def post(self):\r\n try:\r\n\r\n data = request.get_json()\r\n user = user_login.find_by_username(data['username'])\r\n if user and safe_str_cmp(user.password, data['password']):\r\n access_token = create_access_token(\r\n identity=user.id, fresh=True)\r\n return {\r\n 'access_token': \"Bearer \" + access_token,\r\n }, 200\r\n return {\"message\": \"Invalid Credentials!\"}, 401\r\n except Exception as e:\r\n return {\"message\": str(e)}", "def login():\n if not request.is_json:\n return jsonify({\"msg\": \"Missing JSON in request\"}), 400\n\n username = request.json.get('username', None)\n password = request.json.get('password', None)\n\n user = get_user_by_username(username)\n\n if not user:\n return make_response(CONST_LOGIN_MSG, 401, {\n 'WWW-Authenticate': f'Basic realm=\"{CONST_REALM_MSG}\"'})\n\n if user.check_password(password):\n if user.is_admin:\n claims = {'is_admin': True}\n else:\n claims = {'is_admin': False}\n\n user.last_seen = dt.utcnow()\n db.session.add(user)\n db.session.commit()\n\n now = datetime.datetime.now(datetime.timezone.utc)\n access_expires = (now + jwt_config.access_expires).timestamp()\n refresh_expires = (now + jwt_config.refresh_expires).timestamp()\n\n result = dict(\n access_token=create_access_token(identity=username,\n user_claims=claims),\n access_expires=access_expires,\n refresh_expires=refresh_expires,\n refresh_token=create_refresh_token(identity=username),\n user=get_user_details(user)\n )\n\n return jsonify(dict(result)), 200\n\n return make_response(\n CONST_LOGIN_MSG,\n 401,\n {'WWW-Authenticate': f'Basic realm=\"{CONST_REALM_MSG}\"'})", "def post(self):\r\n post_data = request.get_json()\r\n try:\r\n user = models.User.query.filter_by(email=post_data.get('email')).first()\r\n if user and flask_bcrypt.check_password_hash(user.password_hash, post_data.get('password')):\r\n if auth_token:\r\n response = {\r\n 'status': 'success',\r\n 'message': 'Succesfully logged in.',\r\n 'data': {\r\n 'user_id': user.user_id\r\n }\r\n }\r\n return make_response(jsonify(response), 200)\r\n else:\r\n response = {\r\n 'status': 'failed',\r\n 'message': 'User/Password pair is incorrect.'\r\n }\r\n return make_response(jsonify(response), 404)\r\n except Exception as e:\r\n print(e) # TODO: log this\r\n response = {\r\n 'status': 'failed',\r\n 'message': 'Try again.'\r\n }\r\n return make_response(jsonify(response), 500)", "def check_user():\n token = request.headers['Authorization'].replace('Bearer ', '')\n return jsonify({\"access_token\": token}), 200", "def validate_login(self, login_submission: JsonDict) -> str:\n token = login_submission.get(\"token\", None)\n if token is None:\n raise LoginError(\n 403, \"Token field for JWT is missing\", errcode=Codes.FORBIDDEN\n )\n\n jwt = JsonWebToken([self.jwt_algorithm])\n claim_options = {}\n if self.jwt_issuer is not None:\n claim_options[\"iss\"] = {\"value\": self.jwt_issuer, \"essential\": True}\n if self.jwt_audiences is not None:\n claim_options[\"aud\"] = {\"values\": self.jwt_audiences, \"essential\": True}\n\n try:\n claims = jwt.decode(\n token,\n key=self.jwt_secret,\n claims_cls=JWTClaims,\n claims_options=claim_options,\n )\n except BadSignatureError:\n # We handle this case separately to provide a better error message\n raise LoginError(\n 403,\n \"JWT validation failed: Signature verification failed\",\n errcode=Codes.FORBIDDEN,\n )\n except JoseError as e:\n # A JWT error occurred, return some info back to the client.\n raise LoginError(\n 403,\n \"JWT validation failed: %s\" % (str(e),),\n errcode=Codes.FORBIDDEN,\n )\n\n try:\n claims.validate(leeway=120) # allows 2 min of clock skew\n\n # Enforce the old behavior which is rolled out in productive\n # servers: if the JWT contains an 'aud' claim but none is\n # configured, the login attempt will fail\n if claims.get(\"aud\") is not None:\n if self.jwt_audiences is None or len(self.jwt_audiences) == 0:\n raise InvalidClaimError(\"aud\")\n except JoseError as e:\n raise LoginError(\n 403,\n \"JWT validation failed: %s\" % (str(e),),\n errcode=Codes.FORBIDDEN,\n )\n\n user = claims.get(self.jwt_subject_claim, None)\n if user is None:\n raise LoginError(403, \"Invalid JWT\", errcode=Codes.FORBIDDEN)\n\n return UserID(user, self.hs.hostname).to_string()", "def check_auth():", "def auth_token_api():\n data = request.get_json()\n if not data:\n response = jsonify({\n 'success': False,\n 'message': 'Missing request body'\n })\n response.status_code = 422\n return response\n\n # process argument\n login_type = data.get('auth_type')\n email = data.get('email').strip().lower()\n password = data.get('password')\n\n if not login_type or login_type not in ['email']:\n response = jsonify({\n 'success': False,\n 'message': 'Invalid auth_type'\n })\n response.status_code = 422\n return response\n\n # email authentication\n elif login_type == 'email':\n if not email:\n response = jsonify({\n 'success': False,\n 'message': 'Must provide email when auth_type is \"email\"'\n })\n response.status_code = 422\n return response\n user = db.session.query(User).filter(User.email == email, User.deleted == False).one_or_none()\n if not user:\n response = jsonify({\n 'success': False,\n 'message': 'Not Authorized: invalid email'\n })\n response.status_code = 403\n return response\n # check the user's password\n password_valid = check_password_hash(user.password, password)\n if not password_valid:\n response = jsonify({\n 'success': False,\n 'message': 'Not Authorized: invalid password'\n })\n response.status_code = 403\n return response\n\n token = generate_auth_token(user_id=user.user_id)\n response = jsonify({\n 'success': True,\n 'token': token\n })\n response.status_code == '200'\n return response", "def check_auth(cls, Configuration):\n if not Configuration.auth_token:\n cls.authorize(Configuration)", "def login():\n data = request.get_json()\n user = User.authenticate(**data)\n\n if not user:\n return jsonify({ 'message': 'Invalid credentials', 'authenticated': False }), 401\n \n token = jwt.encode(\n {\n 'exp': datetime.now() + timedelta(minutes=90),\n 'iat': datetime.now(),\n 'sub': user.user_id\n },\n current_app.config['SECRET_KEY'],\n algorithm='HS256')\n #print(token)\n user_id = data['user_id']\n user = User.query.get(user_id)\n return jsonify({ 'user': user.to_dict(), 'token': token.decode('UTF-8') }), 200", "def test_auth_code_negative(self, api):\n resp = api.login_user(\"QWERTY\", \"QWERTY\")\n assert resp.status_code == 400", "def _validate_jwt_token(self):\n # force https so that we don't send around tokens unsecurely\n url = 'https://{}/api/token/verify'.format(urlparse(self.base_url).netloc)\n \n # paranoid: check again that we only send the token to https\n if urlparse(url).scheme != \"https\":\n msg = 'This should not happen, please file a bug report.'\n raise Exception(msg)\n\n if not self.jwt_access_token:\n raise FDSNUnauthorizedException(\"Unauthorized, authentication \"\n \"required.\", )\n\n # convert to json\n data = json.dumps({\"token\": self.jwt_access_token})\n # encode\n data = bytes(data, \"utf-8\")\n headers = {\"Content-Type\": \"application/json\"}\n html = urllib_request.Request(url, data=data, headers=headers)\n # decode('utf-8')\n try:\n result = urllib_request.urlopen(html).read().decode(\"utf-8\")\n dic = json.loads(result)\n valid = not bool(dic)\n if self.debug:\n print('Valid token : {}'.format(valid))\n return valid\n except urllib_error.HTTPError as e:\n return False", "def post(self):\n try:\n body = request.get_json()\n user = User.objects.get(email=body.get('email'))\n authorized = user.check_password(body.get('password'))\n if not authorized:\n raise UnauthorizedError\n expires = datetime.timedelta(days=7)\n user_details = {\n \"user_id\": str(user.id),\n \"first_name\": user.first_name,\n \"last_name\": user.last_name\n }\n access_token = create_access_token(\n identity=user_details, expires_delta=expires)\n res = make_response({\n \"response\": \"You have logged in successfully.\",\n 'token': access_token,\n 'status': 200\n }, 200)\n set_access_cookies(res, access_token)\n return res\n except (UnauthorizedError, DoesNotExist):\n raise UnauthorizedError\n except Exception as e:\n raise InternalServerError", "def login_to_api(self):\n\n # set the API endpoint and POST the username/password to it\n endpoint = app.config['API']['url'] + 'login'\n response = requests.post(\n endpoint,\n verify = app.config['API']['verify_ssl'],\n json = {\n 'username': self.username,\n 'password': self.password\n }\n )\n\n # if the response is good, return True\n if response.status_code == 200:\n user = response.json()\n self._id = ObjectId(user['_id'])\n self.token = user['access_token']\n return True", "def login():\n if not request.is_json:\n return jsonify(msg.MISSING_JSON), 400\n req = request.get_json()['user']\n\n useremail = req.get('email', None)\n password = req.get('password', None)\n\n if not useremail or not password:\n return jsonify(msg.MISSING_PARAMETER), 400\n\n # Verify password and email\n check = views.UserManagement().check(email=useremail, password=password)\n\n if not check:\n return jsonify(msg.UNREGISTERED), 400\n\n # Identity can be any data that is json serializable\n expires = datetime.timedelta(days=2)\n access_token = create_access_token(identity=useremail, fresh=True, expires_delta=expires)\n return jsonify(access_token=access_token), 200", "def login():\n login_hex = request.json.get(\"authentication\")\n if not login_hex:\n return jsonify({\"code\": \"1\", \"type\": \"user\"})\n\n qr_code_password = app.config[\"QRCODE_PASSWORD\"]\n\n if login_hex != qr_code_password:\n return jsonify({\"code\": \"3\"})\n \n jwt_token = generate_token({\"id\": generate_id()})\n\n return jsonify({\"code\": \"0\", \"token\": jwt_token})", "def login():\n errors = None\n try:\n request_data = json.loads(request.data)\n validate_dict_with_schema(request_data, \"user/register/request\")\n user = User.check_user(\n email=request_data[\"email\"],\n password=request_data[\"password\"]\n )\n if user:\n session[\"user_id\"] = user.id\n return json_response(\n status=200,\n response_data={\n \"success\": True, \"data\": {\"user\": user.serialize()}\n }\n )\n except (TypeError, ValueError):\n errors = [\"Invalid JSON\"]\n except ValidationError as e:\n errors = e.message\n\n if errors:\n return json_response(\n status=400, response_data={\"success\": False, \"errors\": errors}\n )\n\n return json_response(\n status=401,\n response_data={\"success\": False, \"errors\": [\"Invalid email/password\"]}\n )", "def test_failing_auth_token_for_invalid_user(self):\n\n response = self.client.post(\n \"/auth/login\",\n data=dict(username='random_test_user', password='random_test_password')\n )\n self.assertEqual(response.status_code, 401)", "def check(self):\n\n us = ServiceLocator.resolve(ServiceLocator.USERS)\n\n user_session = self.get()\n user = self.get_user()\n\n return user is not None and us.verify_auth_token(user_session.token, config.SESSION_EXPIRES)", "def error_on_unauthorized():\n\n username = get_jwt_identity()\n user = Login.query.filter_by(username=username).first()\n\n if user is None:\n raise APIError(400, \"User {username} does not exist on this server\".format(username=username))\n elif user.role is not Role.admin:\n raise APIError(401, \"Only administrators have access to this page\")", "def test_loggin_required(self):\n response = self.client.get(RESGATE_URL)\n\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def login() -> Any:\n user_dict = UserSchema().load(\n request.json, partial=(\"id\", \"qualifications\") + PERMISSIONS\n )\n username = user_dict[\"username\"]\n password = user_dict[\"password\"]\n\n if is_password_correct(username, password):\n user = fetch_user(username)\n session[\"user_id\"] = user[\"id\"]\n response = make_response(user)\n response.set_cookie(\"is_authenticated\", \"1\")\n return response\n\n raise APIError(reason=\"invalid_user_or_password\", status_code=403)", "def post(self):\n\n args = login_parser.parse_args()\n\n user = User.query.filter_by(email=args['email']).first()\n if user and user.authenticate_password(args['password']):\n access_token = user.generate_token(user.uuid)\n if access_token:\n response = {\n 'message': 'You logged in successfully.',\n 'status': 'Logged in!',\n 'token': access_token.decode()\n }\n return response, 200\n else:\n response = {\n 'message': 'Invalid email or password, Please try again',\n 'status': 'Login Failed'\n }\n return response, 401", "def test_auth_code_positive(self, api):\n self.builder.add_user(api.get_user())\n resp = api.login_user(api.get_user().username, api.get_user().password)\n self.builder.del_user(api.get_user())\n assert resp.status_code == 200", "def api_auth():\n form = request.get_json(force=True)\n userdata = None\n if form['register']:\n userdata = userProvider.register_user(\n form['username'].encode('utf8'),\n form['password'].encode('utf8')\n )\n else:\n userdata = userProvider.load_authenticated_user(\n form['username'].encode('utf8'),\n form['password'].encode('utf8')\n )\n if userdata:\n user = userProvider.userdata_to_user(userdata)\n flask_login.login_user(user)\n return \"true\"\n raise Exception(\"No user loaded\")", "def sign_in_existing_user(self, email, password):\r\n signin_url = \"https://www.googleapis.com/identitytoolkit/v3/relyingparty/verifyPassword?key=\" + self.wak\r\n signin_payload = {\"email\": email, \"password\": password, \"returnSecureToken\": True}\r\n signin_request = requests.post(signin_url, data=signin_payload)\r\n sign_up_data = json.loads(signin_request.content.decode())\r\n app = App.get_running_app()\r\n print(signin_request.ok)\r\n print(signin_request.content.decode())\r\n\r\n if signin_request.ok == True:\r\n refresh_token = sign_up_data['refreshToken']\r\n localId = sign_up_data['localId']\r\n idToken = sign_up_data['idToken']\r\n\r\n # Save refreshToken to a file\r\n with open(\"refresh_token.txt\", \"w\") as f:\r\n f.write(refresh_token)\r\n\r\n # Save localId to a variable in main app class\r\n # Save idToken to a variable in main app class\r\n app.local_id = localId\r\n app.id_token = idToken\r\n\r\n # Create new key in database from localI\r\n #app.change_screen(\"sandwiches\")\r\n app.on_start()\r\n elif signin_request.ok == False:\r\n error_data = json.loads(signin_request.content.decode())\r\n error_message = error_data[\"error\"]['message']\r\n app.root.ids['login'].ids['login_message'].text = \"EMAIL EXISTS - \" + error_message.replace(\"_\", \" \")", "def login():\n username = request.json.get('username')\n password = request.json.get('password')\n\n if verify_password(username, password):\n token = g.user.generate_auth_token()\n status = \"token generated successfully\"\n else:\n status = \"Invalid username or password\"\n token = None\n\n return {'status': status, 'token': token}", "def status():\n response_object = {\n 'status': 'fail',\n 'msg': 'Provide a valid auth token.'\n }\n current_user = get_jwt_identity()\n user = User.query.filter_by(email=current_user).first()\n response_object['status'] = 'success'\n response_object['msg'] = 'Success.'\n response_object['data'] = user.to_json()\n return response_object, 200", "def login():\n data = request.get_json()\n if 'username' in data and 'password' in data:\n username = data['username']\n password = data['password']\n access_token = authenticate(username, password)\n if access_token is not None:\n print('access token: ' + access_token)\n return jsonify({'access_token': access_token})\n else:\n abort(403)\n else:\n abort(400)", "def login():\n errors = check_login_keys(request)\n if errors:\n return raise_error(400, \"Invalid {} key\".format(', '.join(errors)))\n details = request.get_json()\n email = details['email']\n password = details['password']\n\n user = json.loads(UsersModel().get_email(email))\n if user:\n password_db = user['password']\n if check_password_hash(password_db, password):\n expires = datetime.timedelta(days=365)\n token = create_access_token(identity=email, expires_delta=expires)\n refresh_token = create_refresh_token(identity=email, expires_delta=expires)\n return make_response(jsonify({\n \"status\": \"200\",\n \"message\": \"Successfully logged in!\",\n \"token\": token,\n \"refresh_token\": refresh_token,\n \"user\": user\n }), 200)\n return make_response(jsonify({\n \"status\": \"401\",\n \"message\": \"Invalid Email or Password\"\n }), 401)\n return make_response(jsonify({\n \"status\": \"401\",\n \"message\": \"Invalid Email or Password\"\n }), 401)", "def login():\n req = flask.request.get_json(force=True)\n username = req.get('username', None)\n password = req.get('password', None)\n user = guard.authenticate(username, password)\n ret = {'access_token': guard.encode_jwt_token(user)}\n return ret, 200", "def login_require(request):\n\n if request.method == \"GET\":\n data = request.GET\n else:\n data = request.POST\n user = authenticate(username=data[\"username\"], password=data[\"password\"])\n if user and user.is_active:\n ret = Response(SUCCESS, error_code[SUCCESS])\n else: \n ret = Response(AUTHENTICATION_FAIL, error_code[AUTHENTICATION_FAIL])\n return HttpResponse(ret.serialize(f))\n\n # Generate a token for authentication\n token = token_generator(30)\n try:\n user_token = Token.objects.get(username=data[\"username\"])\n user_token.token = token\n user_token.start_time = datetime.now()\n except: \n user_token = Token(token=token, username=data[\"username\"])\n user_token.save()\n ret.set_ret(\"auth_token\", token) \n user = User.objects.get(username=data[\"username\"])\n ret.set_ret(\"data\", UserSerializer(user.appuser).serialize())\n return HttpResponse(ret.serialize(f))", "def test_getting_authentication_token_for_valid_user(self):\n\n response = self.client.post(\n \"/auth/login\",\n data=dict(username=self.test_user, password=self.test_password)\n )\n length = len(self.user_token)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(type(self.user_token), unicode)\n self.assertGreater(length, 100)", "def test_login(self):\n response = self.client.post(\"/login\", json=self.payload)\n\n self.assertEqual(response.status_code, 200)\n self.assertIsNotNone(response.json.get(\"access_token\"))\n self.assertIsNotNone(response.json.get(\"refresh_token\"))", "def test_jwt_login_with_expired_token(self):\n payload = utils.jwt_payload_handler(self.user)\n payload['exp'] = 1\n token = utils.jwt_encode_handler(payload)\n\n auth = 'Bearer {0}'.format(token)\n\n response = self.client.post(\n '/auth-token/',\n json.dumps(self.data),\n content_type='application/json',\n HTTP_AUTHORIZATION=auth\n )\n\n response_content = json.loads(smart_text(response.content))\n\n decoded_payload = utils.jwt_decode_handler(response_content['token'])\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(decoded_payload['username'], self.username)", "def logged_in(handler):\n\n def check_login(self, *args, **kwargs):\n jwtToken = self.request.headers.get('Authorization', None)\n\n if jwtToken:\n # validate token\n try:\n userToken = jwt.decode(\n jwtToken,\n defaultConfig.config['jwt_secret'],\n algorithms=[defaultConfig.config['jwt_algorithms']],\n issuer=defaultConfig.config['app_name']\n )\n except (jwt.DecodeError, jwt.ExpiredSignatureError), e:\n self.responseJSON('TOKEN_EXPIRED', **{\n 'data': [str(e)]\n })\n return\n\n # check token created time and changed password time, if < -> token invalid\n try:\n myUser = UserModel.get_by_id(userToken['id'])\n assert myUser is not None\n except:\n self.responseJSON('DATA_NOTFOUND')\n return\n\n if myUser.date_change_password != None:\n if Helper.timestampToDatetime(userToken['iat']) < myUser.date_change_password:\n self.responseJSON('TOKEN_INVALID_TIME')\n return\n\n # check user logged out\n if myUser.status == UserModel.STATUS_LOGOUT:\n self.responseJSON('TOKEN_INVALID')\n return\n\n # authorization system\n else:\n self.abort(403)\n\n return handler(self, userToken, *args, **kwargs)\n return check_login", "def post(self):\n data = request.json\n print data.get('email')\n print data.get('password')\n if(login(data)):\n return \"User successfully logged in\", 200\n else:\n return \"Invalid Username/Password\", 400", "def auth_error():\n return unauthorized('Invalid credentials')", "def users_login(self):\n try:\n assert request.is_json\n except AssertionError:\n self.logger.debug(messages.REQUEST_IS_NOT_JSON)\n return messages.ERROR_JSON % messages.REQUEST_IS_NOT_JSON, 400\n content = request.get_json()\n if not LOGIN_MANDATORY_FIELDS.issubset(content.keys()):\n self.logger.debug((messages.MISSING_FIELDS_ERROR % (LOGIN_MANDATORY_FIELDS - set(content.keys()))))\n return messages.ERROR_JSON % (\n messages.MISSING_FIELDS_ERROR % (LOGIN_MANDATORY_FIELDS - set(content.keys()))), 400\n try:\n login_dict = self.auth_server.user_login(email=content[\"email\"], plain_password=content[\"password\"])\n except InvalidCredentialsError:\n self.logger.debug(messages.WRONG_CREDENTIALS_MESSAGE)\n return messages.ERROR_JSON % messages.WRONG_CREDENTIALS_MESSAGE, 403\n except UnexistentUserError:\n self.logger.debug(messages.USER_NOT_FOUND_MESSAGE % content[\"email\"])\n return messages.ERROR_JSON % (messages.USER_NOT_FOUND_MESSAGE % content[\"email\"]), 404\n\n if \"notification_token\" in content:\n self.notification_database.set_notification_token(content[\"email\"], content[\"notification_token\"])\n\n return json.dumps(login_dict), 200", "def validate_auth():\n try:\n token = oidc.get_access_token()\n except TypeError:\n # raised when the token isn't accessible to the oidc lib\n raise Unauthorized(\"missing auth token\")\n\n if not oidc.validate_token(token):\n terminate_session()\n raise Unauthorized(\"invalid auth token\")\n return token", "def test_token(self):\n api_response = requests.get(self.api_config.get_api_url() + \"greetings/isloggedin\",\n headers={\"Authorization\": \"Bearer \" + self.API_TOKEN})\n\n if api_response.status_code == 401 or 403:\n return False\n else:\n return True", "def login():\n email = request.get_json().get('email')\n password = request.get_json().get('password')\n\n if email is None or password is None:\n message = {'message': 'ensure that email field or password field is present'}\n return message, status.HTTP_400_BAD_REQUEST\n\n try:\n # Get the user object using their email (unique to every user)\n user = Users.check_user(email)\n profile = user.get_full_names()\n \n # Try to authenticate the found user using their password\n if user and user.verify_password(password):\n # Generate the access token. This will be used as the authorization header\n \n access_token = user.generate_token(user.id)\n print('--', access_token)\n if access_token:\n response = {\n 'message': 'You logged in successfully.',\n 'access_token': access_token.decode(),\n 'user': profile\n }\n return response, status.HTTP_200_OK\n\n # else user does not exist. Return error message\n response = {'message': 'Invalid Email or Password, Please Try again'}\n return response, status.HTTP_401_UNAUTHORIZED\n\n except Exception as error:\n # Create a response containing an string error message\n response = {\n 'message': str(error)\n }\n return response, status.HTTP_500_INTERNAL_SERVER_ERROR", "def test_login_valid_user(_db, client, registered_user, new_user):\n rv = client.post(\"/auth/login/\", json=new_user)\n response = rv.get_json()\n\n assert rv.status_code == HTTPStatus.OK\n assert response.get(\"access_token\")", "def login_user_fails(self):\n response = self.client.post(self.login_url,\n self.invalid_user_login_details, format='json')\n return response", "def test_non_registered_user_login(self):\n with self.client:\n response = self.client.post(\n '/api/v1/auth/login',\n data=json.dumps({\n \"password\": \"qwerty@123\",\n \"username\": \"EdwinKyato\"\n }),\n content_type='application/json'\n )\n print(response.data)\n data = json.loads(response.data.decode())\n print(response.status_code)\n self.assertTrue(data['status'] == 'failed')\n self.assertTrue(response.content_type == 'application/json')\n self.assertEqual(response.status_code, 400)", "def login_user_successfull(self):\n response = self.client.post(self.login_url,\n self.valid_user_login_details, format='json')\n return response", "def test_login_login_inexisting_user_false(self):\n logins = {\n \"Email\": \"[email protected]\",\n \"Password\": \"pass1234\"\n }\n resp = self.client().post('/api/v1/auth/login', data=logins)\n self.assertEqual(resp.status_code, 400)\n resp = resp.get_json()\n self.assertEqual(resp['error'],\n 'User not found in our database')", "def login():\n req = request.get_json(force=True)\n username = req.get('username', None)\n password = req.get('password', None)\n user = guard.authenticate(username, password)\n ret = {'access_token': guard.encode_jwt_token(user)}\n return ret, 200", "def post(self, request):\n\n serializer = self.serializer_class(data=request.data)\n serializer.is_valid(raise_exception=True)\n data = ((serializer.validated_data)['auth_token'])\n return BaseApiView.sucess_200(\"User Logged in successfully.\",\n **data)", "def check_authentication():\r\n\r\n #TODO: Reservation based authentication\r\n try:\r\n authenticated_user()\r\n except Exception as e:\r\n return e\r\n\r\n return True", "def test_non_registered_user_login(self):\n response = self.client_app.post(\n '/api/v1/auth/login/',\n data=json.dumps(dict(\n email='[email protected]',\n password='123456'\n )),\n content_type='application/json'\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['Error'] == 'Email not registered')\n self.assertTrue(response.content_type == 'application/json')\n self.assertEqual(response.status_code, 404)", "def login():\n\n\n params = request.get_json()\n username = params.get('username', None)\n password = params.get('password', None)\n\n if not username:\n return jsonify({\"msg\": \"Missing username parameter\"}), Status.HTTP_BAD_REQUEST\n if not password:\n return jsonify({\"msg\": \"Missing password parameter\"}), Status.HTTP_BAD_REQUEST\n\n # TODO Check from DB here\n if (username == 'admin' or username == 'user') and password == 'admin':\n logger.info('Logged in %s', username)\n else:\n return jsonify({\"msg\": \"Bad username or password\"}), Status.HTTP_BAD_UNAUTHORIZED\n # Identity can be any data that is json serializable\n # TODO: rather than passing expiry time here explicitly, decode token on client side. But I'm lazy.\n ret = {'jwt': create_jwt(identity=username), 'exp': datetime.utcnow() + current_app.config['JWT_EXPIRES']}\n return jsonify(ret), 200", "def test_login_request_with_an_incorrect_authentication(self):\n user_res = self.ph.create_user(self.test_user_name, self.test_user_password)\n self.assertEqual(user_res.status_code, status.HTTP_201_CREATED)\n res = self.test_client.get(\n url_for('api.featurelistresource', _external=True))\n self.assertTrue(res.status_code == status.HTTP_401_UNAUTHORIZED)", "def login():\n response = jsonify(), 401\n if request.method == 'POST' and request.get_json():\n data = request.get_json()\n valid = validator.user_login(data)\n\n if valid['status']:\n user = ecomap_user.get_user_by_email(data['email'])\n if user and user.verify_password(data['password']):\n login_user(user, remember=True)\n response = jsonify(id=user.uid,\n name=user.first_name,\n surname=user.last_name,\n role=user.role, iat=\"???\",\n token=user.get_auth_token(),\n email=user.email)\n if not user:\n logger.warning('if not user')\n response = jsonify(error='There is no user with given email.',\n logined=0, ), 401\n elif not user.verify_password(data['password']):\n logger.warning('if not user verify')\n response = jsonify(error='Invalid password, try again.',\n logined=0), 401\n else:\n response = Response(json.dumps(valid),\n mimetype='application/json'), 400\n return response", "def log_in(jwt):\n return current_app.library_registry.admin_controller.log_in(jwt)", "def authenticate():\n if request.environ['PATH_INFO'] == \"/notification\":\n user = getUser()\n \n if user is None:\n raise HTTPResponse(body=\"Forbidden\", status=403)\n \n try:\n if authz.login(user):\n logging.info('Login success: %s', user.username)\n return\n except IOError:\n raise HTTPResponse(body=\"Error reading user file\", status=400)\n except Exception as e:\n raise HTTPResponse(body=\"Unexpected error\", status=400)\n \n raise HTTPResponse(body=\"Invalid username or password\", status=401)", "def test_jwt_login_json_bad_creds(self):\n self.data['password'] = 'wrong'\n\n response = self.client.post(\n '/auth-token/',\n json.dumps(self.data),\n content_type='application/json'\n )\n\n self.assertEqual(response.status_code, 400)", "def post(self):\n data = request.get_json()\n is_verified = actions.verify(data['username'], data['password'])\n if not is_verified:\n abort(404, message='A user with matching credentials does not exist.')\n else:\n token = actions.create_token(data['username'], data['password'])\n token = token.decode('utf-8')\n return{'token': token}, 200\n pass", "def login(self):\r\n user_account = db.find_one({\"cpr_number\": request.form.get(\"CPR\")})\r\n if user_account is not None:\r\n if self.verify_password(user_account[\"password\"], request.form.get(\"password\")):\r\n return self.start_session(user_account)\r\n return jsonify({\"error\": \"Invalid login credentials\"}), 401", "def user_logged_in():\n if not session.get('user_id'):\n return \"nope\", 401\n else:\n return \"yep\", 200", "def auth(request):\n\n if('username' in request.POST and 'password' in request.POST):\n user = authenticate(\n username=request.POST['username'],\n password=request.POST['password'],\n )\n if(user):\n token = Token(user = user)\n token.generateToken()\n token.save()\n data = {}\n data['token'] = token.value\n data['expiry'] = token.expiry\n data['valid'] = token.isValid()\n return JsonResponse(data) \n \n return HttpResponseBadRequest('Username and password must be supplied')", "def post(self, request):\n if 'username' in self.request.data or 'password' in self.request.data:\n username = request.data['username']\n if username.find('@') != -1:\n email = request.data['username']\n password = request.data['password']\n user = None\n if User.objects.filter(email=email, is_active=True).exists():\n obj_user = User.objects.filter(email=email,\n is_active=True).last()\n user = authenticate(username=obj_user.username,\n password=password)\n if user:\n login(request, user)\n else:\n return Response({'status': False})\n else:\n username = request.data['username']\n password = request.data['password']\n user = None\n if User.objects.filter(username=username, is_active=True).exists():\n obj_user = User.objects.filter(username=username,\n is_active=True).last()\n user = authenticate(username=obj_user.username,\n password=password)\n if user:\n login(request, user)\n else:\n return Response({'status': False})\n if user:\n # Token Logic here\n person_id = Person.objects.get(user=user).id\n return Response({'status': True, 'person_id': person_id})\n return Response({'status': False})", "def test_non_registered_user_login(self):\n response = self.client.post(\n '/api/v1/auth/login',\n data=json.dumps(dict(\n username='jimmy',\n password='123456'\n )),\n content_type='application/json'\n )\n data = json.loads(response.data.decode())\n self.assertTrue(response.content_type == 'application/json')\n self.assertEqual(response.status_code, 404)", "def _login(self):\n url = self.server_url + '/api/v4/users/login'\n login_data = json.dumps({'login_id': self._user_id,\n 'password': self._user_pass})\n LOG.debug(\"Sending: %s\", login_data)\n response = self._request(self._session.post, url, data=login_data)\n LOG.debug(\"Received: %s\", response.json())\n\n if response.status_code != 200:\n raise RuntimeError(\"Cannot login. Server reported: %s\"\n % response.content)", "def loginAttempt(request):\n\n userid = request.unauthenticated_userid\n\n if userid:\n\n # TODO: Convert USERS to database object instead of local dictionary. If necessary.\n\n if userid in USERS:\n user = USERS[userid]\n\n else:\n user = _create_user(userid)\n\n if user.check_token(userid):\n headers = remember(request, userid)\n url = request.route_url('home', _app_url=get_app_url(request))\n return HTTPFound(location=url, headers=headers)\n\n url = request.route_url('failed', _app_url=get_app_url(request))\n return HTTPFound(location=url)", "def check_authentication(self):\n try:\n cookies = os.environ['HTTP_COOKIE'].split('; ')\n except KeyError:\n cookies = []\n for c in cookies:\n prefix = Auth.AUTH_COOKIE_NAME + '='\n if (c.startswith(prefix) and\n self.is_authentication_token(c[len(prefix):])):\n return True\n print 'Status: 403 Forbidden'\n print 'Content-Type: application/json'\n print self.logout_headers()\n print json.JSONEncoder().encode({'error': 'Not authenticated.'})\n sys.exit(1)", "def authenticate():\n return abort(401)", "def test_authenticate_success(app, client, session, models):\n user = models[\"user\"][0]\n response = client.post(\n \"/authenticate/local\",\n data={\"email\": user.email, \"password\": \"hunter2\"},\n )\n assert response.status_code == 200\n raw_jwt_token = json.loads(response.data)[\"jwt\"]\n\n returned_claims = jwt.decode(\n raw_jwt_token, app.config[\"RSA_PUBLIC_KEY\"], app.config[\"ALGORITHM\"],\n )\n del returned_claims[\"exp\"]\n assert user.claims == returned_claims", "def checkLogin():\n if 'access_token' in login_session:\n return True\n else:\n return False", "def test_valid_user_login(client, existing_user):\n payload = {\"username\": existing_user[\"email\"], \"password\": existing_user[\"password\"]}\n response = client.post(\"/auth/jwt/login\", data=payload)\n assert response.status_code == 200", "def test_invalid_user_login(client, existing_user):\n payload = {\"username\": existing_user[\"email\"], \"password\": \"5678\"}\n response = client.post(\"/auth/jwt/login\", data=payload)\n assert response.status_code == 400", "def authenticate():\n # Get JSON data from request\n json = request.get_json()\n\n if 'email' not in json or 'password' not in json:\n raise CustomError(400, message='Must include an email and a password')\n\n # Check email\n user = User.query.filter_by(email=json['email']).first()\n if user is None:\n raise CustomError(401, message='Email or password were not found.')\n\n # Check password\n if not check_password_hash(user.password, json['password']):\n raise CustomError(401, message='Email or password were not found.')\n\n return jsonify({'success': True, 'user': user.to_dict()}), 201", "def test_jwt_login_json_missing_fields(self):\n response = self.client.post(\n '/auth-token/',\n json.dumps({'username': self.username}),\n content_type='application/json'\n )\n\n self.assertEqual(response.status_code, 400)", "def login_post():\n logger.debug(\"entering function login_post\")\n response = check_user_credentials(request.json)\n logger.debug(\"exiting function login_post\")\n return jsonify(response)", "def auth_user():\n global token\n app.logger.info(\"Microsoft Planner Service running on /auth port as expected\")\n try:\n request_count = 0\n if request_count == 0:\n token = get_tokens_as_app(client_id, user_code_info, tenant_id)\n request_count = 1 \n if 'access_token' in token:\n app.logger.info('Adding access token to cache...')\n add_token_to_cache(client_id, tenant_id, token)\n return_object = (f\"{token['refresh_token']}\")\n return render_template('token.html', return_object=return_object)\n else:\n return_error = (\"Token response did not result in a proper response. Athenticate again please.\")\n return render_template('token.html', return_error=return_error)\n except AttributeError or TypeError:\n return_error = ('Authentification failed. Please pull and restart your system and authenticate again.')\n return render_template('token.html', return_error=return_error)\n except adal.AdalError as err:\n return_error = (\"You're logged in with the wrong user. Please log out and authenticate again.\")\n return render_template('token.html', return_error=return_error)", "def token_auth_error():\n logger.debug(\"Token authentication failed.\")\n return unauthorized(\"Invalid credentials.\")", "def login():\n token = request.form.get('idtoken')\n if verify_token(token):\n session['logged_in'] = True\n return '', 204\n else:\n return '', 401", "def __token_is_valid(self):\n\n if not self.__login_token or len(self.__login_token) < 10:\n # Token is not set or totally invalid\n return False\n\n try:\n jwt.decode(self.__login_token, verify = False)\n return True\n except:\n # Most likely the token is expired as `exp` is in the past\n return False", "def login(request):\n if request.method != 'POST':\n return JsonResponse(\n {\"detail\": 'Method {} not allowed.'.format(request.method)}, status=status.HTTP_405_METHOD_NOT_ALLOWED\n )\n username = request.POST['username']\n password = request.POST['password']\n if username == 'admin' and password == 'admin':\n payload = {\n \"username\": username,\n \"exp\": time.time() + 300,\n\n }\n token = encode(payload, JWT_KEY, algorithm=\"HS256\")\n return JsonResponse({\"access_token\": token}, status=status.HTTP_200_OK)\n else:\n return JsonResponse({\"detail\": 'Invalid username or password'}, status=status.HTTP_401_UNAUTHORIZED)", "def test_login_user(self):\n\n self.register_user()\n\n self.assertEqual(self.login_user().status_code, 200)\n self.assertTrue(self.login_user().json[\"data\"][0][\"token\"])", "def login_user(self):\n return self.client.post(self.login_url,\n self.valid_user_login_details, format='json')", "def test_api_can_login_a_user(self):\n self.user.is_active = True\n self.user.is_email_verified = True\n self.user.save()\n self.response = self.client.post(\n \"/api/users/login/\",\n self.login_data,\n format=\"json\")\n self.assertEqual(status.HTTP_200_OK, self.response.status_code)\n self.assertIn('token', self.response.data)", "def post(self):\n\t\tobj = request.get_json()\n\n\t\tif ('username' not in obj) or ('session' not in obj):\n\t\t\treturn {\"status\":\"MISSING_PARAMS\"}\n\t\t\n\t\tstatus = authenticate(obj['username'], obj['session'])\n\t\tif status:\n\t\t\treturn {'status':'AUTH_OK'}\n\t\telse:\n\t\t\treturn {'status':'AUTH_FAIL'}", "def jwt_required(self) -> None:\n if not self._TOKEN:\n raise HTTPException(status_code=401,detail=\"Missing Authorization Header\")\n\n if self.get_raw_jwt()['type'] != 'access':\n raise HTTPException(status_code=422,detail=\"Only access tokens are allowed\")", "def authenticate_user():\n if request.headers['content-type'] == 'application/json':\n print(request)\n data = request.get_json()\n if data:\n username = data['username']\n password = data['password']\n else:\n return Response(status=400) # no JSON to parse\n\n if username is None or password is None:\n return Response(status=400) # missing arguments\n\n if not verify_password(username, password):\n return Response(status=403) # User not authenticated\n\n return jsonify({'username': username, 'success': True}), 201\n else:\n print(\"invalid request type, no json\")\n return Response(status=400) # invalid request type", "def check_login(self):\n # read token first\n user_data = self.storage.get_user_data(self.user_id)\n if not \"token\" in user_data:\n sys.exit(\"SEPIA account: No user data found! Please generate a token first (python -m sepia.account --id=[sepia-user-id] --host=[sepia-server-url]).\")\n\n # check token\n token = user_data[\"token\"]\n url = self.host_address + \"/assist/authentication\"\n payload = {\n 'action' : \"check\",\n 'client' : self.client_info,\n 'KEY' : (self.user_id + \";\" + token)\n }\n headers = {\n 'Content-Type': \"application/json\"\n }\n response = requests.request(\"POST\", url, json=payload, headers=headers)\n try:\n res = json.loads(response.text)\n except NameError:\n res = None\n\n if res[\"result\"] and res[\"result\"] == \"success\":\n name = res[\"user_name\"][\"nick\"] or res[\"user_name\"][\"first\"]\n print(\"SEPIA account: Success - Wb \" + name + \", your login token is still valid.\")\n else:\n print(\"SEPIA account: Failed - I think the token is invalid or we got connection problems.\")", "def authenticate_user(data):\n \n try:\n auth_token = data[\"auth_token\"]\n user_token = Token.objects.get(username=data[\"username\"])\n if user_token.token == auth_token:\n return True\n except:\n return False\n return False", "def test_user_login_when_no_data_provided(self):\n\n response= self.client.post('/api/v1/login')\n data = response.get_json()\n\n self.assertEqual(response.status_code, 400)\n self.assertEqual(data['status'], 400)\n self.assertEqual(data['message'], 'No data has provided! Please put your login credentials')", "def login(request):\n if request.method == 'POST':\n parsed_body = json.loads(request.body)\n username = parsed_body[\"username\"]\n password = parsed_body[\"password\"]\n\n if username is not None and password is not None:\n try:\n user = auth.authenticate(username=username, password=password)\n except Exception as exception:\n print exception\n if user is not None:\n if user.is_active:\n try:\n token, _ = Token.objects.get_or_create(user=user)\n return json_response({\n 'token': token.token,\n 'username': user.username\n })\n except Exception as exception:\n print exception\n else:\n return json_response({\n 'error': 'Invalid User'\n }, status=400)\n else:\n return json_response({\n 'error': 'Invalid Username/Password'\n }, status=400)\n else:\n return json_response({\n 'error': 'Invalid Data'\n }, status=400)\n elif request.method == 'OPTIONS':\n return json_response({})\n else:\n return json_response({\n 'error': 'Invalid Method'\n }, status=405)", "def login():\n data = None\n response = {\n 'status': 400,\n 'error': 'Provide email and password as json.'\n }\n try:\n data = request.get_json()\n except:\n return jsonify(response), 400\n\n if not data:\n return jsonify(response), 400\n\n user_data = {\n 'email': data.get('email'),\n 'password': data.get('password')\n }\n valdiator_result = Validator.validate_user(user_data)\n if isinstance(valdiator_result, dict):\n return jsonify(valdiator_result), valdiator_result['status']\n if isinstance(valdiator_result, bool) and valdiator_result:\n result = politico.login_user(user_data)\n\n response = {}\n if result == 'Invalid credentials':\n # notify the user that there was an error.\n response['status'] = 401\n response['error'] = 'Invalid credentials'\n elif isinstance(result, bytes):\n # return a response notifying the user that they logged in successfully\n response['status'] = 200\n response['data'] = []\n response['data'].append({\n 'message': 'Successfull log in',\n 'auth_token': result.decode()\n })\n return make_response(jsonify(response), response['status'])" ]
[ "0.7635263", "0.7127661", "0.6985085", "0.6958775", "0.68365085", "0.6748926", "0.6729194", "0.6707417", "0.6696781", "0.6696681", "0.6647693", "0.66302025", "0.6625759", "0.66238236", "0.6614123", "0.66123486", "0.65989953", "0.659169", "0.6581054", "0.6579671", "0.6572046", "0.6550333", "0.65353113", "0.65177965", "0.65067816", "0.65015644", "0.64907146", "0.6485187", "0.64820784", "0.6462474", "0.64617467", "0.64540434", "0.6450004", "0.6448597", "0.6446054", "0.64388007", "0.6433203", "0.642113", "0.64162374", "0.64061767", "0.6405958", "0.6404273", "0.63951707", "0.6395054", "0.6389132", "0.63861006", "0.63855284", "0.6384013", "0.6378812", "0.63785344", "0.63672984", "0.63669163", "0.63662183", "0.63650274", "0.63527", "0.6351722", "0.63483936", "0.63481987", "0.6340694", "0.6335924", "0.6334696", "0.63335186", "0.63290733", "0.63166624", "0.63080263", "0.63070726", "0.6302119", "0.629745", "0.62907743", "0.6290322", "0.6285082", "0.62828684", "0.62756443", "0.6268001", "0.626199", "0.62571824", "0.6253107", "0.62498766", "0.62426615", "0.6241755", "0.62408584", "0.6238647", "0.62341547", "0.62316906", "0.62301433", "0.62216663", "0.62202907", "0.62189156", "0.6215256", "0.6211439", "0.6211097", "0.6199132", "0.6198972", "0.61919266", "0.6189155", "0.6180634", "0.61784905", "0.61759436", "0.6169617", "0.6169517" ]
0.63848686
47
This will log a user out and redirect them to log in again via the AuthN server.
def logout_redirect(request): logout(request) # Build the URL login_url = furl(login_redirect_url(request, next_url=request.build_absolute_uri())) # Check for branding if hasattr(settings, 'SCIAUTH_BRANDING'): logger.debug('SciAuth branding passed') # Encode it and pass it branding = base64.urlsafe_b64encode(json.dumps(settings.SCIAUTH_BRANDING).encode('utf-8')).decode('utf-8') login_url.query.params.add('branding', branding) # Set the URL and purge cookies response = redirect(login_url.url) response.delete_cookie('DBMI_JWT', domain=dbmi_settings.JWT_COOKIE_DOMAIN) logger.debug('Redirecting to: {}'.format(login_url.url)) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def logout():\n logout_user()\n return redirect(url_for('auth.index'))", "def signout():\n session.pop('oauth2_state', None)\n session.pop('oauth2_token', None)\n session.pop('discord_user', None)\n return redirect('/')", "def logout_user(request):\r\n # We do not log here, because we have a handler registered\r\n # to perform logging on successful logouts.\r\n logout(request)\r\n if settings.FEATURES.get('AUTH_USE_CAS'):\r\n target = reverse('cas-logout')\r\n else:\r\n target = '/'\r\n response = redirect(target)\r\n response.delete_cookie(\r\n settings.EDXMKTG_COOKIE_NAME,\r\n path='/', domain=settings.SESSION_COOKIE_DOMAIN,\r\n )\r\n return response", "def signout(self):\n username = cherrypy.session.get('username')\n if username is None:\n pass\n else:\n cherrypy.lib.sessions.expire()\n raise cherrypy.HTTPRedirect('/')", "def logout():\n logout_user()\n return redirect(url_for(\".login\"))", "def logout():\n\n # remove the username from the session if it is there\n out_user = current_user.get_id()\n logout_user()\n logger.info(out_user + ' has been logged out.')\n return redirect(url_for('home'))", "def logOut(self):\n self.client.logout()", "def log_out_user(self):\n flask_login.logout_user()", "def ldap_logout():\n timed_out = request.args.get('timed_out', False)\n logout_user()\n create_auth_event(\n auth_event_type=event_type.USER_FAILED_LOG_IN,\n user_guid=session[\"user_id\"],\n new_value={\n 'success': True,\n 'type': current_app.config['AUTH_TYPE'],\n 'timed_out': timed_out\n }\n )\n session.clear()\n if timed_out:\n flash(\"Your session timed out. Please login again\", category=\"info\")\n return redirect(url_for(\"main.index\"))", "def logout():\n if \"username\" in session.keys():\n del session[\"username\"]\n if not app.config[\"DISABLE_AUTH\"]:\n return redirect(url_for(\"login\") + \"?slo\")\n else:\n return redirect(url_for(\"index\"))", "def logout(self):\n with self.client.post(\"/logout\", catch_response=True) as response:\n for r_hist in response.history:\n if r_hist.status_code > 200 and r_hist.status_code < 400:\n response.success()\n self.user.username = None\n # go to UnauthenticatedTasks\n self.interrupt()", "def logout():\n session.pop('userinfo', None)\n # no more steps necessary, because we don't keep the token around\n if 'target' not in session.keys():\n return redirect(\"/\")\n return redirect(session['target'])", "def logout():\n logout_user()\n flash('You have successfully been logged out.')\n\n # redirect to the login page\n return redirect(url_for('auth.login'))", "def logout():\n logout_user()\n flash('You have successfully been logged out.')\n\n # redirect to the login page\n return redirect(url_for('auth.login'))", "def logout():\n\n logout_user()\n return redirect(url_for('login'))", "def logout():\n logout_user()\n flash('You have successfully been logged out')\n\n # redirect to login page\n return redirect(url_for('auth.login'))", "def logout_user(request):\n if request.user.is_authenticated:\n print(f\"Logging out user {request.user.username}\")\n logout(request)\n else:\n print(\"No authenticated users found\")\n\n return redirect('index')", "def logout():\n\n logout_user()\n flash('You have successfully been logged out.')\n\n return redirect(url_for('auth.login'))", "def logout():\n logout_user()\n return redirect(\"/\")", "def logout():\n\n logout_user()\n return redirect('/')", "def logout():\n logout_user()\n return redirect(url_for('main.index'))", "def logout_user():\n\n session.clear()\n\n return redirect(\"/\")", "def log_out():\n if 'name' in session:\n PLAN.logout_user(session['name'])\n session.pop('name', None)\n return redirect(url_for('log_in'))\n return redirect(url_for('log_in'))", "def logout():\n logout_user()\n return redirect(url_for('default.home'))", "def log_out(request):\n logout(request)\n return redirect('user_login')", "def logout(self):\r\n # should redirect\r\n check_for_get_code(self, 302, reverse('logout'))", "def logout():\n user = g.user\n do_logout(user)\n\n flash(\"You have successfully logged out.\", 'success')\n return redirect(\"/login\")", "def logout():\n session['user_id'] = None\n session['user_email'] = None\n return redirect(url_for('main'))", "def logout():\n logout_user()\n return redirect(url_for('home'))", "def process_logout():\n\n print \" LOGGED OUT USER \"\n\n del session[\"user_id\"]\n\n flash(\"You have Successfully Logged Out!\")\n\n return redirect(\"/\")", "def process_logout():\n\n print \" LOGGED OUT USER \"\n\n del session[\"user_id\"]\n\n flash(\"You have Successfully Logged Out!\")\n\n return redirect(\"/\")", "def logout():\n response.cookies['curr_user_id'] = -1\n response.cookies['curr_user_id']['expires'] = -10\n response.cookies['curr_user_id']['path'] = '/'\n redirect(URL('default', 'index'))", "def logout():\n logout_user()\n flash(\"Successfully signed out\", category='info')\n return redirect(url_for('url.index'))", "def gdisconnect():\n try:\n access_token = login_session['credentials']\n except KeyError:\n flash('Failed to get access token')\n return redirect(url_for('home'))\n print(\"User's name was {}.\".format(login_session['name']))\n if access_token is None:\n print('Access Token is None')\n response = make_response(\n json.dumps('Current user not connected.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n del login_session['credentials']\n del login_session['user_id']\n del login_session['name']\n del login_session['email']\n print('Successfully logged out.')\n flash('Successfully logged out.')\n return redirect(url_for('home'))", "def logout():\n logout_user()\n return redirect(url_for('index'))", "def logout():\n logout_user()\n return redirect(url_for('index'))", "def signout():\r\n logout_user()\r\n flash(gettext('You are now signed out'), 'success')\r\n return redirect(url_for('home.home'))", "def log_out(request):\n\n if request.user.is_authenticated:\n logout(request)\n\n return redirect(\"/\")", "def logout():\n do_logout()\n return redirect('/login')", "def local_logout(timed_out=False):\n logout_user()\n create_auth_event(\n auth_event_type=event_type.USER_FAILED_LOG_IN,\n user_guid=session[\"user_id\"],\n new_value={\n 'success': True,\n 'type': current_app.config['AUTH_TYPE']\n }\n )\n session.clear()\n if timed_out:\n flash(\"Your session timed out. Please login again\", category=\"info\")\n return redirect(url_for(\"main.index\"))", "def logout():\n\n session.pop(\"leader_logged_in\", False)\n session.pop(\"leader_id\", None)\n session.pop(\"leader_email\", None)\n\n return redirect(f\"{BASEPATH}/login\")", "def logout():\r\n logout_user()\r\n flash('You were logged out.')\r\n return redirect(url_for('index'))", "def logout():\n flash('You were logged out')\n session.pop('user_id', None)\n return redirect(url_for('leaderboard'))", "def logout():\n logout_user()\n flash('Successfully logged out.')\n return redirect(request.referrer)", "def logout():\n timeout = request.args.get(\"timeout\", False)\n forced_logout = request.args.get(\"forced_logout\", False)\n\n if current_app.config[\"USE_LDAP\"]:\n return redirect(\n url_for(\"auth.ldap_logout\", timeout=timeout, forced_logout=forced_logout)\n )\n\n elif current_app.config[\"USE_SAML\"]:\n return redirect(\n url_for(\n \"auth.saml\", slo=\"true\", timeout=timeout, forced_logout=forced_logout\n )\n )\n\n elif current_app.config[\"USE_LOCAL_AUTH\"]:\n logout_user()\n session.clear()\n if timeout:\n flash(\"Your session timed out. Please login again\", category=\"info\")\n return redirect(url_for(\"main.index\"))\n\n return abort(404)", "def signout(self):\r\n return self.app.get('/account/signout', follow_redirects=True)", "def logout_user():\n pass", "def logout_user(request):\n\tlogout(request)\n\treturn HttpResponseRedirect('/')", "def sign_out():\n next_url = request.args.get('next')\n session.pop(\"user\")\n flash(\"Sign Out Successful\", \"success\")\n return redirect(next_url or url_for('index'))", "def logout(self):\n self.change_user(self.username, None, None)", "def logout():\n logout_user()\n flash('You have successfully been logged out.')\n # redirect to the login page\n return redirect(url_for('view.login'))", "def user_logout(request):\r\n logout(request)\r\n return redirect('accounts:login')", "def logout():\n flash(\"You have been logged out\")\n session.pop(\"user\")\n return redirect(url_for(\"login\"))", "def logout():\n session.pop('user', None)\n return redirect(url_for('index'))", "def logout():\n session.pop('user', None)\n return redirect(url_for('index'))", "def sign_out():\n\n session.clear()\n response = make_response(redirect('/'))\n response.delete_cookie(\"logged-in\")\n return response", "def log_out():\n\n del session[\"user_id\"]\n # print session[\"user_id\"]\n flash('You were successfully logged out')\n return render_template('homepage.html')\n\n #Additional reference for log in/log out can be found in project tracker project", "def logout_user():\n\n # Delete session data to log out\n del session[\"user_id\"]\n flash(\"Successfully logged out!\")\n\n return redirect(\"/\")", "def logout(self):\n user = self.get_user()\n if user:\n with atomic(self.conf['auth.dbfile']) as cursor:\n logout_user(cursor, user.username)\n request.user = self.tpls['user'] = None\n response.set_cookie(self.conf['auth.cookie_key'], '',\n secret=self.conf['auth.cookie_secret'], path='/')", "def logout(self):\r\n session.clear()\r\n return redirect(\"/user/login\")", "def signout():\n logout_user()\n flash('Logged Out Successfully')\n return redirect(url_for('home.welcome'))", "def sign_out():\n session.clear()\n return redirect(url_for('index'))", "def auth_logout(request):\n logout(request)\n return HttpResponseRedirect( reverse('startpage') )", "def logout():\n # Log user out if they are authenticated\n if current_user.is_authenticated:\n logout_user()\n # Redirect to index page\n flash(\"Successfully logged out.\", category=\"success\")\n # Redirect back to index\n return redirect(url_for('main.index'))", "def logout_user():\n\n print \"Logging out.\"\n session.clear()\n flash(\"You are now logged out.\")\n\n return redirect('/')", "def logout():\n \n # using the method from the flask module\n logout_user()\n return redirect(url_for('home'))", "def log_out():\n session.pop('logged_in', None)\n flash('You were logged out.')\n\n return redirect(url_for('blog.show_posts'))", "def logout():\n flash(u'Zostałeś wylogowany')\n session.pop('user_id', None)\n return redirect(url_for('index'))", "def logout():\n\tsession.pop(\"username\", None)\n\treturn redirect(url_for(\"default\"))", "def logout():", "def logout():\n session.pop(\"user\")\n return redirect(url_for(\"home\"))", "def logout_view(request):\n if request.user.is_authenticated:\n logout(request)\n callback_url = \"https://login.cern.ch/adfs/ls/?wa=wsignout1.0&ReturnUrl=\"\n callback_url += \"http%3A//\"\n callback_url += request.META[\"HTTP_HOST\"]\n callback_url += reverse(\"certhelper:logout_status\")\n return HttpResponseRedirect(callback_url)\n return HttpResponseRedirect(\"/\")", "def user_logout():\n\n session.pop('logged_in', None)\n flash('You are now logged out')\n\n return redirect('/')", "def logout():\n # remove user from session cookies\n flash(\"You have been logged out\")\n session.pop(\"user\")\n return redirect(url_for(\"login\"))", "def logout():\n session.pop(\"username\")\n\n return redirect(\"/\")", "def sign_out(self):\n self.auth.log_out(self._user)\n self._user = None\n print(\"Signed out successfully\")\n return self.logging_page()", "def get_sign_out():\n log_out_url = get_survey_config().account_service_log_out_url\n\n # Check for GET as we don't want to log out for HEAD requests\n if request.method == \"GET\":\n logout_user()\n\n return redirect(log_out_url)", "def logout():\n return logout_user()", "def logout():\n flash('You were logged out')\n session.pop('user_id', None)\n return redirect(url_for('public_timeline'))", "def logout():\n flash(_('You were logged out'))\n session.pop('user_id', None)\n return redirect(url_for('index'))\n #return redirect(url_for('public_timeline'))", "def logout():\n # Remove session data, this will log the user out\n session.pop('loggedin', None)\n session.pop('userid', None)\n session.pop('username', None)\n # Redirect to login page\n return redirect(url_for('site.login'))", "def logout():\n\n do_logout()\n flash('successfully logged out')\n return redirect(\"/\")", "def logout_redirect():\n login_session.clear()\n flash('You have logged out')\n return redirect(url_for('show_homepage'))", "def logout() -> Response:\n if \"zeus_token\" in session:\n session.pop(\"zeus_token\", None)\n logout_user()\n return redirect(url_for(\"general_bp.home\"))", "def logout():\n login()", "def logout():\n # Remove credentials key and user id from session\n session_helper = SessionHelper(session)\n session_helper.delete_credentials_from_session()\n session_helper.delete_user_from_session()\n return redirect(url_for('homepage.home_page_route'))", "def logout_user():\n session.pop('username')\n return redirect('/login')", "def account_logout(request):\n logout(request)\n return redirect('/')", "def logout(user_id):\n if CURRENT_USER_KEY not in session or session[CURRENT_USER_KEY] != user_id:\n raise Unauthorized()\n do_logout()\n return redirect('/')", "def logoutuser(request):\n logout(request)\n return redirect('login')", "def logout():\n if \"username\" in session:\n session.pop(\"username\", None)\n flash(\"You have been logged out.\")\n return redirect(url_for(\"index\"))", "def logout():\n\n session.pop(\"username\")\n return redirect(\"/login\")", "def logoutUser(request):\n logout(request)\n return redirect('login')", "def logout():\n flash('You were logged out')\n session.pop('username', None)\n return redirect(url_for('welcome_page'))", "def logout():\n session.pop('username', None)\n return redirect('/')", "def logout():\n user = current_user\n user.authenticated = False\n db.session.add(user)\n db.session.commit()\n logout_user()\n return redirect(url_for('index'))", "def logout():\n user = current_user\n user.authenticated = False\n db.session.add(user)\n db.session.commit()\n logout_user()\n return redirect(url_for('index'))", "def logout():\n # clear user data from session and flag as logged out\n for x in ['provider', 'state', 'user']:\n if x in flask.session:\n del flask.session[x]\n flask.session['logged_in'] = False\n\n flash('logout successful', 'info')\n return redirect(request.referrer or url_for('catalog.index'))", "def logout():\r\n form = LoginForm()\r\n user = current_user\r\n user.authenticated = False\r\n db.session.add(user)\r\n db.session.commit()\r\n logout_user()\r\n return redirect(url_for('hello'))", "def log_out(self):\n self.__is_logged_in = False", "def logout():\n session.pop('microsoft_token', None)\n session.pop('state', None)\n return redirect(url_for('index'))" ]
[ "0.74786663", "0.7398715", "0.7309837", "0.7262442", "0.7236717", "0.72231877", "0.72231495", "0.7213908", "0.72030735", "0.7196016", "0.71688986", "0.71640676", "0.7163589", "0.7163589", "0.7159972", "0.71333206", "0.71177167", "0.7101278", "0.709512", "0.7080758", "0.70622957", "0.70619804", "0.70518124", "0.7048902", "0.7047533", "0.70436454", "0.7042506", "0.7041001", "0.70382464", "0.703031", "0.703031", "0.7026971", "0.70203054", "0.7017529", "0.70174754", "0.70174754", "0.7016223", "0.701253", "0.70117897", "0.70097095", "0.70071757", "0.7005287", "0.70049345", "0.6988702", "0.6987223", "0.6974969", "0.69597787", "0.6958145", "0.6952998", "0.6950597", "0.6950349", "0.6944666", "0.69402254", "0.69374025", "0.69374025", "0.69349843", "0.69334227", "0.69254476", "0.6918375", "0.6907361", "0.68931466", "0.6885426", "0.68829924", "0.68812174", "0.68797654", "0.68771106", "0.68763465", "0.68741906", "0.6872497", "0.68723005", "0.68612707", "0.68534946", "0.68507034", "0.6850427", "0.6848891", "0.6842743", "0.68359977", "0.6830859", "0.682779", "0.6825041", "0.6820646", "0.6814114", "0.6813266", "0.68078786", "0.6799622", "0.6793648", "0.67917746", "0.67888343", "0.67846024", "0.6780611", "0.677589", "0.6765902", "0.6764193", "0.6763408", "0.6760628", "0.675786", "0.675786", "0.67439824", "0.6741452", "0.6737437", "0.6734151" ]
0.0
-1
Author overloads `error` method for scanning and parsing. I will define separate methods.
def scan_error(self, line: int, message: str): self.report(line, "", message)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def error(self, error):\n pass", "def error(self, *args, **kwargs):", "def error(self):\n ...", "def _error(self, token, msg):\n self._interpreter.parse_error(token, msg)\n return ParseError()", "def error(self, message):\r\n self._construct_partial_parser().error(message)", "def call_error():\r\n print(\"Error in input format.\")\r\n sys.exit()", "def error(self, message=None, show_help=True):", "def error(self):\n pass", "def error_handler(num, err):\n print(\"Error in input {}\".format(num))\n err = err.decode()\n raise Exception(err)", "def error(self, msg, *args, **kwargs):\n pass", "def __parse_error(self, text):\n m = self.__size_expr.match(text)\n if m is not None:\n self.errcode = b\"\"\n self.errmsg = self.__read_block(int(m.group(1)) + 2)\n return\n\n m = self.__error_expr.match(text)\n if m is None:\n raise Error(\"Bad error message\")\n if m.group(1) is not None:\n self.errcode = m.group(1).strip(b\"()\")\n else:\n self.errcode = b\"\"\n self.errmsg = m.group(2).strip(b'\"')", "def error():\n return None", "def _process_error(self, result):\n self.error = result\n if result['errorCode'] == 901:\n raise Exceptions.APIKeyInvalid\n elif result['errorCode'] == 902:\n raise Exceptions.APISecretInvalid\n elif result['errorCode'] == 903:\n raise Exceptions.InvalidRequestToken\n elif result['errorCode'] == 904:\n raise Exceptions.RequestTokenExpired\n elif result['errorCode'] == 905:\n raise Exceptions.InvalidAccessToken\n elif result['errorCode'] == 906:\n raise Exceptions.TokenExpired(self.access.expire)\n elif result['errorCode'] == 907:\n raise Exceptions.ParameterMissing\n elif result['errorCode'] == 908:\n raise Exceptions.ParameterNotFormatted\n elif result['errorCode'] == 909:\n raise Exceptions.FeatureNotSupported\n elif result['errorCode'] == 910:\n raise Exceptions.EndPointNotSupported\n else:\n raise Exceptions.UnknownJsonError(result)", "def __call__(self, *args, **kwargs):\r\n return self.error(*args, **kwargs)", "def check_errors(self) -> None:", "def __init__(self, error_search=\"error\"):\n self.error_search = error_search", "def error(self):\n raise NotImplementedError(\"subclasses need to override this method\")", "def _parse_error(self, err):\r\n self.logger.debug(err)\r\n stack = err.get(\"stack\", [])\r\n if not err[\"message\"].startswith(\"parse error:\"):\r\n err[\"message\"] = \"error: \" + err[\"message\"]\r\n errmsg = \"Octave evaluation error:\\n%s\" % err[\"message\"]\r\n\r\n if not isinstance(stack, StructArray):\r\n return errmsg\r\n\r\n errmsg += \"\\nerror: called from:\"\r\n for item in stack[:-1]:\r\n errmsg += \"\\n %(name)s at line %(line)d\" % item\r\n try: # noqa\r\n errmsg += \", column %(column)d\" % item\r\n except Exception: # noqa\r\n pass\r\n return errmsg", "def error(self, msg, elem):\n if elem is not None:\n msg += \" (line %d)\" % elem.sourceline\n if self.ignore_errors:\n return self.warn(msg, elem)\n raise ParserException(msg)", "def handle_err(self):\n pass", "def not_found(error):\n pass", "def error(self, message, token=None):\n raise ParseException(\n message,\n self.filename,\n line=self._line,\n line_number=self._line_number,\n token=token)", "def _error(msg):\n\n error(None, msg)", "def grabError(self, error): #$NON-NLS-1$\r", "def parse_error(self, message, exc_cls=VisualizerParseError):\n raise exc_cls(\"Error parsing %s '%s' (%s:%i): %s\" % \n (self.tag, self.ref, self.filename, self.lineno, message))", "def _parse_error(self, error):\n error = str(error)\n # Nvidia\n # 0(7): error C1008: undefined variable \"MV\"\n m = re.match(r'(\\d+)\\((\\d+)\\)\\s*:\\s(.*)', error)\n if m:\n return int(m.group(2)), m.group(3)\n # ATI / Intel\n # ERROR: 0:131: '{' : syntax error parse error\n m = re.match(r'ERROR:\\s(\\d+):(\\d+):\\s(.*)', error)\n if m:\n return int(m.group(2)), m.group(3)\n # Nouveau\n # 0:28(16): error: syntax error, unexpected ')', expecting '('\n m = re.match(r'(\\d+):(\\d+)\\((\\d+)\\):\\s(.*)', error)\n if m:\n return int(m.group(2)), m.group(4)\n # Other ...\n return None, error", "def errors(self):\n raise NotImplementedError", "def error(self, *args, **kwargs):\n if len(args) == 3:\n print(f\"ERROR: {args[1]}\")\n else:\n print(f\"ERROR: {args[0]}\")", "def ERR(self):", "def error(self, message: str) -> None:\n lines = message.split('\\n')\n linum = 0\n formatted_message = ''\n for line in lines:\n if linum == 0:\n formatted_message = 'Error: ' + line\n else:\n formatted_message += '\\n ' + line\n linum += 1\n\n self.print_usage(sys.stderr)\n\n # Format errors with style_warning()\n formatted_message = ansi.style_warning(formatted_message)\n self.exit(2, '{}\\n\\n'.format(formatted_message))", "def unexpected_error(self, exception):", "def check_errors(self):\n raise NotImplementedError(\"Implement it in a subclass.\")", "def _display_syntax_error(self, errorid):\n\n # For total error count\n self.syntax_errors_list.append(errorid)\n\n if errorid == \"start\":\n self.scanner.print_error(self.symbol, self.symbol)\n print(\"Expected START.\")\n elif errorid == self.scanner.END_ID:\n self.scanner.print_error(self.symbol, self.symbol)\n print(\"Expected END.\")\n elif errorid == self.scanner.DEVICES_ID:\n self.scanner.print_error(self.symbol, self.symbol)\n print(\"Expected DEVICES.\")\n elif errorid == self.scanner.CONNECTIONS_ID:\n self.scanner.print_error(self.symbol, self.symbol)\n print(\"Expected CONNECTIONS.\")\n elif errorid == self.scanner.MONITORS_ID:\n self.scanner.print_error(self.symbol, self.symbol)\n print(\"Expected MONITORS.\")\n\n elif errorid == \"devicename\":\n self.scanner.print_error(self.symbol, self.symbol)\n print(\"Invalid device name.\")\n elif errorid == \"devicetype\":\n self.scanner.print_error(self.symbol, self.symbol)\n print(\"Invalid device type.\")\n elif errorid == \"parameter\":\n self.scanner.print_error(self.symbol, self.symbol)\n print(\"Invalid parameter type.\")\n elif errorid == \"semicoloncomma\":\n self.scanner.print_error(self.symbol, self.symbol)\n print(\"Expected a semicolon or a comma.\")\n elif errorid == \"number\":\n self.scanner.print_error(self.symbol, self.symbol)\n print(\"Invalid input number.\")\n elif errorid == \"doutput\":\n self.scanner.print_error(self.symbol, self.symbol)\n print(\"Only DTypes can specify an output. \\\n Either an invalid DType output or should not have an output.\")\n elif errorid == \"arrowperiod\":\n self.scanner.print_error(self.symbol, self.symbol)\n print(\"Expected either an arrow or a DType output\")\n\n elif errorid == \"semicolon\":\n self.scanner.print_error(self.symbol, self.symbol)\n print(\"Expected a semicolon.\")\n elif errorid == \"equal\":\n self.scanner.print_error(self.symbol, self.symbol)\n print(\"Expected an equal sign.\")\n elif errorid == \"comma\":\n self.scanner.print_error(self.symbol, self.symbol)\n print(\"Expected a comma.\")\n elif errorid == \"period\":\n self.scanner.print_error(self.symbol, self.symbol)\n print(\"Expected a period.\")\n elif errorid == \"arrow\":\n self.scanner.print_error(self.symbol, self.symbol)\n print(\"Expected an arrow ->.\")\n elif errorid == \"input\":\n self.scanner.print_error(self.symbol, self.symbol)\n print(\"Inputs must either start with I or be \\\n DATA, CLK, SET, CLEAR.\")\n\n return None", "def error(*args, noContext: bool=True, showLineNumber: bool=True, **kwargs)->None:\n pass", "def error(self, error_msg):\n print(\"ERROR DETECTED\")\n print(error_msg)", "def parse_error(self, kind=ParseError, args=None): # type: () -> None\n line, col = self._to_linecol(self._idx)\n\n if args:\n return kind(line, col, *args)\n else:\n return kind(line, col)", "def error_check(self, message):\n matches = ERROR_SYNTAX.match(message)\n if matches:\n error_code = int(matches.group(1))\n error_message = matches.group(2)\n return error_code, error_message\n return None", "def indicate_error(self):\n pass", "def error(msg):\n return ErrorRule(msg)", "def process_error(self, id, code, error):\n raise NotImplementedError('process_error not implemented in BaseService')", "def error_check(command):\r\n\r\n # TODO\r", "def _ps_error(e):\n\n error(None, str(e))", "def error(self, message):\n print message", "def error(self, handler):\n pass", "def error_analyze(\n self,\n data_dir: Path,\n processed_data_dir: Path,\n result_dir: Path,\n output_report_dir: Path,\n ) -> NoReturn:\n pass", "def __init__(self, error = ''):\n IPRO_Error.__init__(self, error)", "def __init__(self, error = ''):\n IPRO_Error.__init__(self, error)", "def __init__(self, error = ''):\n IPRO_Error.__init__(self, error)", "def parsed_error_msg(self):\r\n return self.error_msg", "def request_validation_error(error):\n return bad_request(error)", "def request_validation_error(error):\n return bad_request(error)", "def request_validation_error(error):\n return bad_request(error)", "def parse_error(self, error: Union[str, Exception],\n elem: Optional[ElementType] = None,\n validation: Optional[str] = None) -> None:\n if validation is not None:\n check_validation_mode(validation)\n else:\n validation = self.validation\n\n if validation == 'skip':\n return\n elif elem is None:\n elem = self.elem\n elif not is_etree_element(elem):\n msg = \"the argument 'elem' must be an Element instance, not {!r}.\"\n raise XMLSchemaTypeError(msg.format(elem))\n\n if isinstance(error, XMLSchemaParseError):\n error.validator = self\n error.namespaces = getattr(self, 'namespaces', None)\n error.elem = elem\n error.source = getattr(self, 'source', None)\n elif isinstance(error, Exception):\n message = str(error).strip()\n if message[0] in '\\'\"' and message[0] == message[-1]:\n message = message.strip('\\'\"')\n error = XMLSchemaParseError(self, message, elem)\n elif isinstance(error, str):\n error = XMLSchemaParseError(self, error, elem)\n else:\n msg = \"'error' argument must be an exception or a string, not {!r}.\"\n raise XMLSchemaTypeError(msg.format(error))\n\n if validation == 'lax':\n self.errors.append(error)\n else:\n raise error", "def _error_check(self, command_response):\n error_list = command_response.find(\"./clierror\")\n command_obj = command_response.find(\"./input\")\n if error_list is not None:\n command = command_obj.text if command_obj is not None else \"Unknown command\"\n msg = etree.tostring(error_list).decode()\n raise NXAPICommandError(command, msg)", "def parser_error(msg):\n global MESSAGES\n if CURRENT_ROW != None:\n msg = \"row \"+str(CURRENT_ROW)+\": \"+msg\n msg += \"<br/>\\n&nbsp;&nbsp;&nbsp;starting with: \"\n for col in range(5):\n val = cellval(CURRENT_ROW, col)\n if val == None:\n val = \"\"\n msg += val+\" | \"\n MESSAGES.append(\"ERROR: \"+msg)", "def parse_error (self, error_str):\r\n\t\t# Regex out the error and channel indices from the string\r\n\t\tob = re.match(ERROR_FORMAT, error_str)\r\n\t\t\r\n\t\t# If error_str doesn't match an error, return None\r\n\t\tif ob is None:\r\n\t\t\treturn None\r\n\t\t\r\n\t\t# Extract the two matched groups (i.e. the error and channel indices)\r\n\t\terrno,chno = ob.groups()\r\n\t\terrno = int(errno)\r\n\t\tchno = int(chno)\r\n\t\t\r\n\t\t# Get the error description; if none is defined, mark as unrecognised\r\n\t\terrdesc = self.error_desc_dict.get(errno, 'Unrecognised error code.').format(ch=chno)\r\n\t\t\r\n\t\treturn {'type':'err', 'id':errno, 'ch':chno, 'desc':errdesc, 'raw':error_str}", "def error ( self , message , *args , **kwargs ) :\n return self.logger.error ( message , *args , **kwargs )", "def error(self, message):\n raise ArgumentParseError(message)", "def error(message: str) -> None:\n print(f\"ERROR: {message}\")", "def error(self, msg):\n\n self(msg, ERROR)", "def log_error(self, fmt, *args):\r\n pass\r\n # log_error\r", "def handle_exception(e):\n print(e)\n return error()", "def ErrorString(self): # real signature unknown; restored from __doc__\n pass", "def error(err):\n\n return str(err) + '\\n'", "def error(msg):\n\n raise Exception(msg)", "def error(self, message):\n self.exit(2, f\"Input error: {message}\\n\")", "def error(self, error_ID, stopping_symbols, symbol_IDs=[]):\n # Increment Error Counter\n self.error_count += 1\n\n # Consider Syntax Errors\n if error_ID == self.NO_END:\n msg = \"'END' keyword required at end of file\"\n option = False\n elif error_ID == self.NO_CURLY_DEVICE:\n msg = \"Expected '{' after 'DEVICES'\"\n option = True\n elif error_ID == self.NEED_DEVICE_KEYWORD:\n msg = \"'DEVICES' keyword required\"\n option = False\n elif error_ID == self.NO_CURLY_CONNECT:\n msg = \"Expected '{' after 'CONNECT'\"\n option = True\n elif error_ID == self.NEED_CONNECT_KEYWORD:\n msg = \"'CONNECT' keyword required\"\n option = False\n elif error_ID == self.NO_CURLY_MONITOR:\n msg = \"Expected '{' after 'MONITOR'\"\n option = True\n elif error_ID == self.NEED_MONITOR_KEYWORD:\n msg = \"'MONITOR' keyword required\"\n option = False\n elif error_ID == self.INTEGER:\n msg = \"Needs to be a positive integer\"\n option = False\n elif error_ID == self.NEED_QUALIFIER:\n msg = (\"Expected a parameter: 'initial',\"\n \"'inputs', 'sequence' or 'period'\")\n option = False\n elif error_ID == self.NEED_PARAM:\n msg = \"Comma has to followed by parameter specification\"\n option = False\n elif error_ID == self.NO_DEVICE_SEMICOLON:\n msg = \"Device definition needs to end in ';'\"\n option = True\n elif error_ID == self.NO_DEVICE_COLON:\n msg = \"Device name has to be followed by ':'\"\n option = True\n elif error_ID == self.DEVICE_NAME:\n msg = \"Valid Device name required\"\n option = False\n elif error_ID == self.LOGIC_GATE:\n msg = \"Valid Logic gate required e.g. 'AND'\"\n option = False\n elif error_ID == self.OUTPUT_PIN:\n msg = \"Output pin has to be 'Q' or 'QBAR'\"\n option = False\n elif error_ID == self.NO_CONNECT_SEMICOLON:\n msg = \"Connection has to be terminated by ';'\"\n option = True\n elif error_ID == self.INPUT_PIN:\n msg = \"Valid input pin required\"\n option = False\n elif error_ID == self.PERIOD_INPUT_PIN:\n msg = \"'.' required to specify input pin\"\n option = True\n elif error_ID == self.NAME_INPUT:\n msg = \"Name string of input device required\"\n option = True\n elif error_ID == self.ASSIGNMENT:\n msg = \"'=' Assignment operator requried\"\n option = True\n elif error_ID == self.NAME_STRING:\n msg = \"Valid string name required\"\n option = False\n elif error_ID == self.NO_MONITOR_SEMICOLON:\n msg = \"Monitor point has to be terminated by ';'\"\n option = True\n elif error_ID == self.MISSING_RIGHT_CURLY:\n msg = \"Missing '}'\"\n option = True\n elif error_ID == self.SIGGEN_QUALIFIER:\n msg = \"SIGGEN signal values can only be '0' or '1'\"\n option = False\n\n # Consider Semantic Errors\n # DEVICES\n elif error_ID == self.devices.DEVICE_PRESENT:\n msg = \"Device Name already used\"\n option = False\n self.symbol = self.old_symbol\n elif error_ID == self.devices.NO_QUALIFIER:\n msg = \"Device qualifier required\"\n option = True\n elif error_ID == self.devices.INVALID_QUALIFIER:\n msg = \"Valid device qualifier requried\"\n option = True\n elif error_ID == self.devices.QUALIFIER_PRESENT:\n msg = \"Qualifier already present\"\n option = True\n elif error_ID == self.devices.BAD_DEVICE:\n msg = \"Invalid device declared\"\n option = True\n elif error_ID == self.devices.EXCESS_QUALIFIER:\n msg = \"Too many qualifiers\"\n option = True\n\n # CONNECTIONS\n elif error_ID == self.network.DEVICE_ABSENT:\n msg = \"Device is not declared\"\n option = False\n self.symbol = self.old_symbol\n elif error_ID == self.network.INPUT_CONNECTED:\n msg = \"Input is already in a connection\"\n option = True\n elif error_ID == self.network.INPUT_TO_INPUT:\n msg = \"Both ports are inputs\"\n option = True\n elif error_ID == self.network.PORT_ABSENT:\n msg = \"Port is absent\"\n option = True\n elif error_ID == self.network.OUTPUT_TO_OUTPUT:\n msg = \"Both ports are outputs\"\n option = True\n\n # MONITORING\n elif error_ID == self.monitors.NOT_OUTPUT:\n msg = \"Cannot monitor a point that is not an output\"\n option = True\n elif error_ID == self.monitors.MONITOR_PRESENT:\n msg = \"This point is already being monitored\"\n option = True\n\n # Floating input pins\n elif error_ID == self.FLOATING_INPUT_PIN:\n msg = \"All input pins haven't been connected\"\n option = True\n\n else:\n msg = \"ERROR\"\n option = True\n\n # Display error message\n print(msg)\n\n # Display Error position and get error object\n this_err = self.scanner.print_location(self.symbol, option)\n this_err.msg = msg\n\n # Append the error object to scanner's list of errors\n self.scanner.error_list.append(this_err)\n\n # Return to recovery point for syntax errors\n\n # Define error IDs where punctuation stopping to not be moved on from\n dont_move_err_IDS = [self.INTEGER, self.NEED_PARAM, self.LOGIC_GATE,\n self.NEED_QUALIFIER, self.SIGGEN_QUALIFIER,\n self.devices.EXCESS_QUALIFIER]\n\n # Define a move_on Boolean state\n move_on = True\n\n if(self.symbol.type == self.scanner.KEYWORD):\n if (self.symbol.id in symbol_IDs):\n move_on = False\n else:\n move_on = True\n else:\n move_on = self.symbol.type not in stopping_symbols and \\\n self.symbol.type != self.scanner.EOF\n if ((not move_on) and (\n self.symbol.type != self.scanner.NAME and\n self.symbol.type != self.scanner.RIGHT_CURLY)):\n # Move on once more after terminating punctuation\n # Only move on for certain error types\n if error_ID not in dont_move_err_IDS:\n self.symbol = self.scanner.get_symbol()\n\n while (move_on):\n self.symbol = self.scanner.get_symbol()\n\n # Update move_on Boolean state\n if(self.symbol.type == self.scanner.KEYWORD):\n if (self.symbol.id in symbol_IDs):\n move_on = False\n else:\n move_on = True\n else:\n move_on = self.symbol.type not in stopping_symbols and \\\n self.symbol.type != self.scanner.EOF\n if ((not move_on) and self.symbol.type != self.scanner.NAME):\n # get next symbol once more after terminating punctuation\n # Only for certain error types\n if error_ID not in dont_move_err_IDS:\n self.symbol = self.scanner.get_symbol()", "def error(pigpio_error):\n for e in _errors:\n if e[0] == pigpio_error:\n return e[1]\n return \"unknown error ({})\".format(pigpio_error)", "def error(message):\n print str(message)", "def errors(self) -> List[Error]:", "def handle_error(self, error):\n html = error.response.content\n raise SystemExit(\"API Error:\\n %s\" %\n \"\\n \".join(html.itertext()))", "def check_get_errors(self):\n raise NotImplementedError(\"Implement it in a subclass.\")", "def error(str):\n\n Utils.send('error', str)", "def _handle_error(self, err: ctypes.c_char_p, method: str) -> Exception:\n if err:\n string = ctypes.string_at(err).decode(\"utf-8\")\n self._free_error(err)\n return RuntimeError(string)\n else:\n return RuntimeError(f\"Unknown error in {method}. \")", "def test_error(cls, err, data):\n do_error_test(cls, err, data)", "def handle_error(self, api, command):\n return self.handle_log(api, command, level=logging.ERROR)", "def test():\n\ttry:\n\t\tprint \"Raising ParseErrror.\"\n\t\traise ParseError\n\texcept ParseError:\n\t\tprint \"Caught ParseError.\"", "def test_parse_results_error():\n error_result = [{\"error\": \"test\"}]\n assert [{\"title\": \"Error\",\n \"subtitle\": \"test\",\n \"valid\": False}] == parse_results(error_result)", "def handle_request_parsing_error(err):\n\n code, msg = getattr(err, 'status_code', 400), getattr(err, 'messages', 'Invalid Request')\n abort(code, msg)", "def raxmlInputErrorHandling(self):\n try:\n # input alignment for raxml\n self.raxmlOperations.inputFilename = self.checkEntryPopulated(self.inputFileEntry, errorTitle='Missing Alignment', errorMessage='Please select an alignment.')\n self.raxmlOperations.windowSize = self.checkEntryInRange(self.windowSizeEntry, min=0, inclusive=False, errorTitle='Invalid Window Size', errorMessage='Window size needs to be a positive integer.')\n self.raxmlOperations.windowOffset = self.checkEntryInRange(self.windowOffsetEntry, min=0, inclusive=False, errorTitle='Invalid Window Offset', errorMessage='Window offset needs to be a positive integer.')\n self.raxmlOperations.outGroup = self.outgroupComboBox.currentText()\n self.raxmlOperations.model = self.modelComboBox.currentText()\n self.raxmlOperations.isCustomRaxmlCommand = self.checkBoxCustomRaxml.isChecked()\n self.raxmlOperations.bootstrap = self.checkboxBootstrap.isChecked()\n self.raxmlOperations.rooted = self.checkboxRooted.isChecked()\n self.rooted = self.checkboxRooted.isChecked()\n\n # if user is generating Top Topologies or scatter plot or donut plor or circle graph run error handling on top topologies entry\n if self.checkboxAllTrees.isChecked() or self.checkboxScatterPlot.isChecked() or self.checkboxDonutPlot.isChecked():\n self.checkEntryPopulated(self.numberOfTopTopologiesEntry, errorTitle='Number of Top Topologies Field is Blank', errorMessage='Please enter a number of top topologies.')\n self.topTopologies = self.checkEntryInRange(self.numberOfTopTopologiesEntry, min=0, max=16, inclusive=False, errorTitle='Invalid Number of Top Topologies', errorMessage='Please enter an integer between 0 and 15.')\n\n # bootstrap error handling\n self.raxmlOperations.numBootstraps = 0\n if self.checkboxBootstrap.isChecked():\n self.confidenceLevel = self.checkEntryInRange(self.confidenceLevelEntry, min=0, max=100, errorTitle='Invalid Confidence Level', errorMessage='Please enter an integer between 0 and 100.')\n self.raxmlOperations.numBootstraps = self.checkEntryInRange(self.numberOfBootstrapsEntry, min=2, errorTitle='Invalid Number of Bootstraps', errorMessage='Please enter an integer greater than 1.')\n\n # if using custom rax -- make sure that the user doesn't use the -s or -n flags\n if self.checkBoxCustomRaxml.isChecked():\n self.raxmlOperations.customRaxmlCommand = self.checkEntryPopulated(self.customRaxmlCommandEntry, errorTitle='No RAxML Command', errorMessage='Please enter a custom raxml command or uncheck the box.')\n if re.search('([\\-][n])|([\\-][s])', self.customRaxmlCommandEntry.text()):\n raise ValueError, ('Invalid RAxML Command', 'Please do not specify the -s or -n flags.', 'the -s and -n flags will be handled internally based on the alignment you input.')\n\n # species tree error handling\n if self.speciesTreeEntry.text() != \"\" and self.newickFileEntry.text() != \"\":\n raise ValueError, ('Multiple Species Trees', 'You have both selected a species tree file and entered a species tree. Please only do one.', 'Both the \"Species Tree File and \"Enter Species Tree\" fields are populated. Please only use one.')\n\n # if the user selects either statistic plot -- open the inputted newick and read it into memory as a string on a single line\n if self.checkboxRobinsonFoulds.isChecked() or self.checkboxPGTST.isChecked():\n if self.newickFileEntry.text() != \"\":\n self.newickFileName = self.checkEntryPopulated(self.newickFileEntry, errorTitle='Missing Species Tree', errorMessage='Please select a species tree.', errorDescription='Please select a species tree.')\n with open(self.newickFileEntry.text(), 'r') as f:\n self.speciesTree = f.read().replace('\\n', '')\n else:\n self.speciesTree = self.checkEntryPopulated(self.speciesTreeEntry, errorTitle='Missing Species Tree', errorMessage='Please select a species tree.', errorDescription='Please select a species tree.')\n\n\n except ValueError, (ErrorTitle, ErrorMessage, ErrorDescription):\n self.message(str(ErrorTitle), str(ErrorMessage), str(ErrorDescription))\n return False\n\n return True", "def error_at(element: Element) -> Callable[[str], TerminalXMLParseError]:\n\n def error(message: str) -> TerminalXMLParseError:\n return TerminalXMLParseError(element.file, element.opening_line, message)\n\n return error", "def error(cls, message):\n print('[ERROR] {0}'.format(message))", "def error(self, e):\n return \"{}: {} ({})\".format(e.__class__.__name__, e.__doc__, e.message)", "def handle_invalid_arguments(e):\n errors = e.message\n return generic_errors(errors, code=400)", "def handle_validation_error(self, error, bundle_errors):\n \n error_str = six.text_type(error)\n error_msg = self.help.format(error_msg=error_str) if self.help else error_str\n msg = {self.name: error_msg}\n\n if bundle_errors:\n return error, msg\n flask_restful.abort(400, message=msg)", "def error_mess():\n print(\"Sorry, I didn't understand that.\")", "def _display_semantic_error(self, errorid):\n\n # For total error count\n self.semantic_errors_list.append(errorid)\n\n if errorid == self.devices.NO_ERROR:\n self.semantic_errors_list.pop()\n elif errorid == self.devices.INVALID_QUALIFIER:\n self.scanner.print_error(\n self.symbol, self.device_paramvalue)\n print(\"This device cannot have this parameter.\")\n elif errorid == self.devices.NO_QUALIFIER:\n self.scanner.print_error(self.symbol, self.device_kind)\n print(\"This device needs a parameter.\")\n elif errorid == self.devices.BAD_DEVICE:\n self.scanner.print_error(self.symbol, self.device_name)\n print(\"Invalid device provided.\")\n elif errorid == self.devices.QUALIFIER_PRESENT:\n self.scanner.print_error(self.symbol, self.device_param)\n print(\"This device should not have a parameter.\")\n elif errorid == self.devices.DEVICE_PRESENT:\n self.scanner.print_error(self.symbol, self.device_name)\n print(\"This device already exists.\")\n elif errorid == self.devices.INVALID_SIGGEN:\n self.scanner.print_error(\n self.symbol, self.device_paramvalue)\n print(\"Only binary waveforms can be specified.\")\n\n elif errorid == self.network.NO_ERROR:\n self.semantic_errors_list.pop()\n elif errorid == self.network.INPUT_TO_INPUT:\n self.scanner.print_error(\n self.symbol, self.connection_second_port)\n print(\"Cannot connect an input to an input.\")\n elif errorid == self.network.OUTPUT_TO_OUTPUT:\n self.scanner.print_error(\n self.symbol, self.connection_second_port)\n print(\"Cannot connect an output to an output.\")\n elif errorid == self.network.INPUT_CONNECTED:\n self.scanner.print_error(\n self.symbol, self.connection_second_port)\n print(\"This port is already in a connection.\")\n elif errorid == self.network.PORT_ABSENT:\n self.scanner.print_error(\n self.symbol, self.connection_second_port)\n print(\"This is not a valid port.\")\n elif errorid == self.network.DEVICE_ABSENT:\n if self.duplicate_error_checker == 0:\n # Error is in connections list\n self.scanner.print_error(\n self.symbol,\n self.connection_first_device,\n self.connection_second_device)\n print(\"One or both of these devices do not exist.\")\n else:\n # Error is in monitors list\n self.scanner.print_error(\n self.symbol, self.monitor_device)\n print(\"Device does not exist.\")\n\n elif errorid == \"network\":\n print(\"Not all inputs in the network are connected.\")\n\n elif errorid == self.monitors.NO_ERROR:\n self.semantic_errors_list.pop()\n elif errorid == self.monitors.NOT_OUTPUT:\n self.scanner.print_error(\n self.symbol, self.monitor_device)\n print(\"Not a valid output.\")\n elif errorid == self.monitors.MONITOR_PRESENT:\n self.scanner.print_error(\n self.symbol, self.monitor_device)\n print(\"This output is already being monitored.\")\n\n return None", "def error(msg=\"Invalid query\", code=400):\n\tjson = {'error': msg}\n\t#return jsonify(json), code\n\tabort(make_response(jsonify(json), code))", "def identify_result_error(self, record):\n return [\"error\"]", "async def convert_error(ctx, error):\n print(error)\n if isinstance(error, commands.UserInputError):\n await ctx.send(\"Invalid input.\")\n else:\n await ctx.send(\"Oops, something bad happened..\")", "def errors(self):\n return self.args[1]", "def __parametersCheck_error(self, RxCSObject, error, strTestName):\n self._iNTests = self._iNTests + 1\n strTestIndex = 'Test #%d: ' % (self._iNTests)\n\n # Correct output is wanted\n if isinstance(error, str):\n try:\n RxCSObject.parametersCheck()\n except:\n print(strTestIndex + strTestName + (self.iMaxCols - len(strTestName) - len(strTestIndex)) * '.' + 'Test failed!')\n else:\n print(strTestIndex + strTestName + (self.iMaxCols - len(strTestName) - len(strTestIndex)) * '.' + 'Test ok!')\n\n # Error is wanted\n else:\n try:\n RxCSObject.parametersCheck()\n except error:\n print(strTestIndex + strTestName + (self.iMaxCols - len(strTestName) - len(strTestIndex)) * '.' + 'Test ok!')\n except:\n print(strTestIndex + strTestName + (self.iMaxCols - len(strTestName) - len(strTestIndex)) * '.' + 'Test failed!')\n else:\n print(strTestIndex + strTestName + (self.iMaxCols - len(strTestName) - len(strTestIndex)) * '.' + 'Test failed!')", "def parse_error(self):\n\n # Check the table_parse_error flag\n return self.__table_parse_error", "def _error(self, *args, **kwargs):\n print(\"[{}]\".format(self.type), *args, file=sys.stderr, **kwargs)\n sys.exit(1)", "def error(self):\n return self['error']", "def doomed_parser(line):\n raise exceptions.LineParseException('Error occurred')", "def _check_errors(self, json_loaded):\n\n content = json_loaded\n try:\n m = content[u'error'][u'message']\n c = content[u'error'][u'code']\n out= \"API Error code: {}\\nError message: {}\".format(c, m)\n raise InvalidQueryException(self.name, out)\n except KeyError:\n pass", "def error(self, message):\n return self.log(\"ERROR\", message)", "def error(*args, **kwargs): # pragma: nocover\n kwargs['file'] = sys.stderr\n print(\"\\n\\tERROR:\", *args, **kwargs)\n if args and args[0].startswith(\"[Errno 2] No such file or directory\"):\n print(\"\\t(Did you forget to include an __init__.py?)\")\n sys.exit(1)", "def error(self) -> Optional[pulumi.Input['ErrorArgs']]:\n return pulumi.get(self, \"error\")" ]
[ "0.72092247", "0.69668496", "0.68345785", "0.67888355", "0.6770503", "0.670923", "0.66309696", "0.661976", "0.65300286", "0.6507165", "0.6377651", "0.6323891", "0.6323672", "0.6305191", "0.6301589", "0.62937343", "0.62933636", "0.6288595", "0.62223047", "0.62183744", "0.6195213", "0.61639893", "0.6153825", "0.6146719", "0.6122413", "0.6115478", "0.60950226", "0.60945684", "0.60933435", "0.60869247", "0.6081102", "0.6046628", "0.60167027", "0.60044074", "0.6003217", "0.6001906", "0.5991041", "0.5975912", "0.5968007", "0.59625226", "0.59495425", "0.59383714", "0.59095407", "0.58984894", "0.5897785", "0.5883165", "0.5883165", "0.5883165", "0.5880533", "0.5863315", "0.5863315", "0.5863315", "0.5845788", "0.58207756", "0.5809516", "0.5805111", "0.57989204", "0.57853603", "0.5782892", "0.57783544", "0.57615834", "0.5761152", "0.5759364", "0.5747094", "0.574572", "0.5743771", "0.574122", "0.5739554", "0.5733681", "0.573108", "0.5721361", "0.5719687", "0.5710198", "0.5704124", "0.5686544", "0.5678209", "0.5670203", "0.56664014", "0.56617683", "0.56477994", "0.5645713", "0.5645098", "0.56419456", "0.56404394", "0.5626867", "0.56239957", "0.5620241", "0.56108403", "0.5604963", "0.56015974", "0.56004745", "0.5600095", "0.55976963", "0.5593496", "0.55811214", "0.5570889", "0.5568258", "0.5567782", "0.5559368", "0.55555457" ]
0.6191369
21
report a runtime error
def runtime_error(self, error: 'LoxRuntimeError'): output = f'{error.get_message()}{os.linesep}[line {error.token.line}]' print(output, file=sys.stderr) self.had_runtime_error = False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def error():\r\n raise RuntimeError('admin ticket generator at your service')", "def reportError(self):\n self.Q['err'].put(sys.exc_info()[:2])", "def serious_error(self, e):\n pass", "def error(self):\n pass", "def unexpected_error(self, exception):", "def error(error):\n print(\"Error\", error)\n erlport.erlang.cast(this.erlang_pid, (erlport.erlterms.Atom(b'python_error'), str(error)))", "def error(self):\n ...", "def _error(self, *args, **kwargs):\n print(\"[{}]\".format(self.type), *args, file=sys.stderr, **kwargs)\n sys.exit(1)", "def report_unexpected_exception(self, *args, **kwargs):\n pass", "def getCompilerError():", "def error(self, error):\n pass", "def error_occured(self) -> None:\r\n \r\n warnings.warn(\r\n '''An Error has occured when processing this photo!\r\n The plants are too emerged in some places to analyze.''',\r\n RuntimeWarning)", "def error(msg):\n print 'ERROR: %s' % msg\n sys.exit(1)", "def call_error():\r\n print(\"Error in input format.\")\r\n sys.exit()", "def _RaiseFatal(cls, sub, subargs, errorcode, *args):\n ScriptForge.InvokeSimpleScript('ScriptForge.SF_Utils._EnterFunction', sub, subargs)\n cls.RaiseFatal(errorcode, *args)\n raise RuntimeError(\"The execution of the method '\" + sub.split('.')[-1] + \"' failed. Execution stops.\")", "def raise_error(Err):\n raise Err()", "def handle_error():\n print \"An error occurred. Trace:\\n\"\n traceback.print_exc()", "def error(msg):\n\n raise Exception(msg)", "def test_init_err_size(self):\n with self.assertRaises(InitializationException):\n pyint = Interpreter(size=INVALID_LOOP)", "def raise_(err):\n raise err", "def provoke_and_handle_TypeError():\n try:\n print(\"loetungdusohn\" + 3)\n except TypeError as te:\n print(f\"Sorry! {te}\")", "def indicate_error(self):\n pass", "def error(msg: str) -> None:\n print('ERROR: {msg}'.format(msg=msg))\n sys.exit()", "def ReportError(text):\n raise IOError(text)", "def error(self, message, location):\n raise CompilerError(message, loc=location)", "def errFunc(runType):\n logger.error('Execution type not recognized! {}'.format(runType))\n raise InvalidExecutionType('{} is not a valid command'.format(runType))", "def _check(error: int) -> None:\n if error < 0:\n raise RuntimeError(ffi.string(lib.TCOD_get_error()).decode())", "def test_does_not_crash(self):\n py_function(6)", "def error(ctx, flow):\n ctx.log(\"error\")", "def error(self, msg, *args, **kwargs):\n pass", "def error(self):\n raise NotImplementedError(\"subclasses need to override this method\")", "def error(self, error_msg):\n print(\"ERROR DETECTED\")\n print(error_msg)", "def error_impresion(self):\n self._info(\"error_impresion\")", "def error(self, *args, **kwargs):", "def pytest_internalerror(self, excrepr, excinfo):\n for line in str(excrepr).split(\"\\n\"):\n sys.stderr.write(\"INTERNALERROR> {}\\n\".format(line))\n sys.stderr.flush()\n tb = _postmortem_traceback(excinfo)\n post_mortem(tb, excinfo)", "def error_throw(self,stage):\n if self.is_table_info == False:\n print(\"please enter table info by table_info()\")\n sys.exit(0)\n if stage == 'rank':\n if self.import_method == 'none':\n self.error_output_import()\n elif stage == 'output':\n if self.import_method == 'none':\n self.error_output_import()\n else: \n if self.rank_method == 'none':\n self.error_output_rank()", "def _handle_error(self, err: ctypes.c_char_p, method: str) -> Exception:\n if err:\n string = ctypes.string_at(err).decode(\"utf-8\")\n self._free_error(err)\n return RuntimeError(string)\n else:\n return RuntimeError(f\"Unknown error in {method}. \")", "def error():\n return None", "def error(message):\n print message\n sys.exit(2)", "def error(self, message):\n sys.stderr.write('error: %s\\n' % message)\n self.print_help()\n sys.exit(2)", "def error(msg):\n print(msg, file=sys.stderr)\n sys.exit()", "def _clean_onerror(func, path, excinfo):\n print(\"%s encountered error when processing %s: %s\" % (func, path, excinfo))", "def error(self, tag, message, exc_info=False):\n \n self.log(logging.error,tag, message, exc_info)", "def send_rpc_error(req, rpcreq, e):", "def fatal(self, *args, **kwargs):", "def _err(self, *args):\n logger.error(*args)\n exit(1)", "def print_unable_to_run(exc: \"CalledProcessError\"):\n _print(str(exc), level=MessageLevel.QUIET)", "def finalize_error():\n print('')\n exit(-1)", "def _error_handling(self,e,func):\n print(self.type, \" sufferred exception in \" , func , \":\" , e)", "def error(self, message):\n ErrorExit('error: {}\\n'.format(message), 2)", "def _error(msg):\n\n error(None, msg)", "def error(error_no):\n print('--] Encountered unrecoverable ERROR [%s] ... leaving' % error_no)\n write_termination_message(error_no)\n sys.exit(0)", "def die(self, msg=None):\r\n raise Exception(msg)", "def error(s):\n sys.stderr.write(\"%s: %s\\n\" % (NAME, s))\n sys.exit(1)", "def unexpectedException(self):", "def prnt_error():\n print \"Error!\\n\"\n return False", "def _ps_error(e):\n\n error(None, str(e))", "def check(self, runtime):", "def handle_err(self):\n pass", "def error(self, message):\n print message", "def error(self, message=None, show_help=True):", "def syntaxError (self, s) :\r\n report = self.generateReport() + s\r\n raise Exception, report", "def auditportallocfail(self) :\n\t\ttry :\n\t\t\treturn self._auditportallocfail\n\t\texcept Exception as e:\n\t\t\traise e", "def testSimpleErrorsTimeCompletion(self):\n sim = Simulation()\n with self.assertRaises(RuntimeError):\n sim.run_simple(30, 11, \"output\", 0.1, 2, 10)", "def test_gcc_crash(self):\r\n self.validate((1, 10, 213, 129), (46, 10, 212, 1), 'valid',\r\n verify_grad=False)", "def error_analyze(\n self,\n data_dir: Path,\n processed_data_dir: Path,\n result_dir: Path,\n output_report_dir: Path,\n ) -> NoReturn:\n pass", "def crash(self, *args, error=\"OGameError\", exit=True):\n print(\"{}:\".format(error), *args)\n if exit: sys.exit()", "def on_error(self, exception):\n traceback.print_exc()", "def main() -> None:\n try:\n run()\n except errors.BaseError as e:\n sys.stderr.write(f'{str(e)}\\n')\n sys.exit(e.code)", "def die(msg):\n errorPrint(msg)\n sys.exit(1)", "def main_log_error() -> None:\n\n try:\n main()\n except Exception:\n error(\"Unhandled exception: {}\".format(traceback.format_exc()))\n raise", "def handle_execution_exception(self, ex):\n if self.config.raise_errors:\n raise(ex)\n warning(str(ex))", "def _raise_performing_request_error(self, *args, **kwargs):", "def error_handler(num, err):\n print(\"Error in input {}\".format(num))\n err = err.decode()\n raise Exception(err)", "def printerror():\n print(traceback.format_exc())", "def error(*args, noContext: bool=True, showLineNumber: bool=True, **kwargs)->None:\n pass", "def sendError():\n exceptionType, exceptionValue, exceptionTraceback = sys.exc_info()\n\n self.sendData((\n RPC_ERROR,\n request_id,\n (exceptionType.__name__,\n exceptionValue.args[0] if len(exceptionValue.args) == 1 else \"\",\n \"\".join(traceback.format_tb(exceptionTraceback)))\n ))", "def hearError(self, errcode, *args):\n print \"recieved error:\",errcode", "def user_exception(self, frame, exc_info):\n pass", "def printError(s):\r\n sys.stderr.write(\"ERROR: %s\\n\" % s)\r\n sys.exit(-1)", "def _check(self,err):\r\n if err < 0:\r\n buf_size = 128\r\n buf = create_string_buffer('\\000' * buf_size)\r\n self.nidaq.DAQmxGetErrorString(err,byref(buf),buf_size)\r\n raise RuntimeError('NI-DAQ call failed with error %d: %s'%(err,repr(buf.value)))", "def test_class_errored(self, cls, exception):", "def main():\n cause_a_bunch_of_exceptions_to_happen()", "def error(message, code=None):\n print_error(message)\n sys.exit(code or 1)", "def failed(self):\n\t\tpass", "def hdf_internal_error(self,reason):\n\t\tmsg = \"\"\"poretools internal error in file '%s':\n%s\nPlease report this error (with the offending file) to:\n https://github.com/arq5x/poretools/issues\"\"\" % (self.filename, reason)\n\t\tsys.exit(msg)", "def failure_callback(self):\n error_filename = self.run_dir / \"eplusout.err\"\n if error_filename.exists():\n with open(error_filename, \"r\") as stderr:\n stderr_r = stderr.read()\n self.exception = EnergyPlusProcessError(\n cmd=self.cmd, stderr=stderr_r, idf=self.idf\n )\n self.cleanup_callback()", "def test_type_error(self):\n self._error_test(TypeError)", "def ERR(self):", "def handle_exception(e):\n print(e)\n return error()", "def _handle_exec_exception(self, err):\r\n\r\n # Log the error if we are debugging\r\n msg = 'Error occurred while evaluating CustomResponse'\r\n log.warning(msg, exc_info=True)\r\n\r\n # Notify student with a student input error\r\n _, _, traceback_obj = sys.exc_info()\r\n raise ResponseError(err.message, traceback_obj)", "def error(self, msg):\n vim.command('call pymode#error(\"%s\")' % str(msg))", "def error_received(self, exc):\n print('Error received:', exc)", "def error(err):\n print(\"ERROR: \" + err)\n avrprog.end()\n while True:\n pass", "def display_error(message, raise_exception = True):\r\n print \"Error:\", message\r\n print\r\n if raise_exception:\r\n raise ExternalCommandFailed\r\n else:\r\n sys.exit(1)", "def error(str):\n\n Utils.send('error', str)", "def show_crash(self):\n print(\"Crash! Oh noes!\")", "def auditmemallocfail(self) :\n\t\ttry :\n\t\t\treturn self._auditmemallocfail\n\t\texcept Exception as e:\n\t\t\traise e", "def die(errmsg):\n eprint(errmsg)\n exit(1)", "def error(self, msg, details = \"\" ):\n\n if details is not None:\n msg += \"\\n\\n\" + details\n\n if not self.is_subprocess:\n self.parser.error(msg)\n else:\n raise Exception(msg)" ]
[ "0.68166107", "0.679111", "0.6713061", "0.64715713", "0.6364697", "0.63401216", "0.6333252", "0.6314151", "0.62917364", "0.6244229", "0.61968243", "0.6178209", "0.6150258", "0.61317104", "0.6127837", "0.6092866", "0.6092197", "0.60902953", "0.60429615", "0.6041189", "0.6018859", "0.60057044", "0.599984", "0.5996369", "0.5976035", "0.5958245", "0.5944744", "0.59354067", "0.59213483", "0.5920147", "0.5915534", "0.59089214", "0.59009016", "0.5900536", "0.58976316", "0.5896494", "0.58801347", "0.58731365", "0.587261", "0.58553237", "0.5835079", "0.5823998", "0.580842", "0.58035344", "0.5801847", "0.5796916", "0.5793619", "0.57926047", "0.579016", "0.5782162", "0.57704425", "0.5763978", "0.5757293", "0.57457715", "0.5721485", "0.57178986", "0.57167643", "0.5712497", "0.571241", "0.5709447", "0.569799", "0.56883794", "0.56870717", "0.5683866", "0.5675616", "0.5668541", "0.5660277", "0.56569093", "0.56470793", "0.56440663", "0.56338227", "0.5629172", "0.56276107", "0.562111", "0.5617635", "0.5612981", "0.5595026", "0.5584026", "0.55814064", "0.5577149", "0.5569806", "0.5559112", "0.5558173", "0.5557346", "0.5553066", "0.5551661", "0.55487823", "0.5542507", "0.5538177", "0.55329657", "0.5532514", "0.55324477", "0.5528462", "0.5526696", "0.5526495", "0.55251706", "0.55245286", "0.55108035", "0.5510741", "0.54997855" ]
0.7311913
0
report a nonruntime error
def report(self, line: int, where: str, message: str): output = f'[line {line}] Error{where}: {message}' print(output, file=sys.stderr) self.had_error = True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unexpected_error(self, exception):", "def report_unexpected_exception(self, *args, **kwargs):\n pass", "def reportError(self):\n self.Q['err'].put(sys.exc_info()[:2])", "def error(self):\n pass", "def unexpectedException(self):", "def serious_error(self, e):\n pass", "def error(self):\n ...", "def failed(self):\n\t\tpass", "def error():\r\n raise RuntimeError('admin ticket generator at your service')", "def raise_(err):\n raise err", "def indicate_error(self):\n pass", "def fatal(self, *args, **kwargs):", "def error():\n return None", "def ERR(self):", "def _raise_performing_request_error(self, *args, **kwargs):", "def error_impresion(self):\n self._info(\"error_impresion\")", "def error(self):\n raise NotImplementedError(\"subclasses need to override this method\")", "def check_errors(self) -> None:", "def raise_error(Err):\n raise Err()", "def handle_err(self):\n pass", "def error(self, error):\n pass", "def error_occured(self) -> None:\r\n \r\n warnings.warn(\r\n '''An Error has occured when processing this photo!\r\n The plants are too emerged in some places to analyze.''',\r\n RuntimeWarning)", "def _check(error: int) -> None:\n if error < 0:\n raise RuntimeError(ffi.string(lib.TCOD_get_error()).decode())", "def rescue(self, instance):\n pass", "def throw(self):\n pass", "def ReportError(text):\n raise IOError(text)", "def _RaiseFatal(cls, sub, subargs, errorcode, *args):\n ScriptForge.InvokeSimpleScript('ScriptForge.SF_Utils._EnterFunction', sub, subargs)\n cls.RaiseFatal(errorcode, *args)\n raise RuntimeError(\"The execution of the method '\" + sub.split('.')[-1] + \"' failed. Execution stops.\")", "def error(self, *args, **kwargs):", "def user_exception(self, frame, exc_info):\n pass", "def runtime_error(self, error: 'LoxRuntimeError'):\n output = f'{error.get_message()}{os.linesep}[line {error.token.line}]'\n print(output, file=sys.stderr)\n self.had_runtime_error = False", "def test_does_not_crash(self):\n py_function(6)", "def _check(self,err):\r\n if err < 0:\r\n buf_size = 128\r\n buf = create_string_buffer('\\000' * buf_size)\r\n self.nidaq.DAQmxGetErrorString(err,byref(buf),buf_size)\r\n raise RuntimeError('NI-DAQ call failed with error %d: %s'%(err,repr(buf.value)))", "def raise_fail(*args, **kwargs):\n raise Exception(\"oops\")", "def failure(self, result):\r\n raise NotImplementedError", "def on_failure(self, exc: BaseException) -> None:", "def invalid(self):\n pass", "def test_class_errored(self, cls, exception):", "def test_fails(self):\n raise FoolishError(\"I am a broken test\")", "def handle_execution_exception(self, ex):\n if self.config.raise_errors:\n raise(ex)\n warning(str(ex))", "def assertion_errored(self, func, exception):", "def fail(self, msg=None):\n raise Exception, msg", "def prnt_error():\n print \"Error!\\n\"\n return False", "def indicate_failure(self):\n pass", "def provoke_and_handle_TypeError():\n try:\n print(\"loetungdusohn\" + 3)\n except TypeError as te:\n print(f\"Sorry! {te}\")", "def error_throw(self,stage):\n if self.is_table_info == False:\n print(\"please enter table info by table_info()\")\n sys.exit(0)\n if stage == 'rank':\n if self.import_method == 'none':\n self.error_output_import()\n elif stage == 'output':\n if self.import_method == 'none':\n self.error_output_import()\n else: \n if self.rank_method == 'none':\n self.error_output_rank()", "def auditportallocfail(self) :\n\t\ttry :\n\t\t\treturn self._auditportallocfail\n\t\texcept Exception as e:\n\t\t\traise e", "def handle_expt(self):\r\n self._perform_on_error_handling()", "def _check_exc(self):\n if self._exc is not None:\n raise self._exc", "def raise_for_failure(self) -> None:\n if not self.is_success():\n raise exc.ExecutionError(self)", "def syntaxError (self, s) :\r\n report = self.generateReport() + s\r\n raise Exception, report", "def assertion_failed(self, func, exception):", "def getCompilerError():", "def scan_error(self, line: int, message: str):\n self.report(line, \"\", message)", "def exception(self, e):\n pass", "def check_errors(self):\n raise NotImplementedError(\"Implement it in a subclass.\")", "def instrument_fail(self, req, where):\n\n if where in req[\"file_details\"][\"backend_filename\"]:\n raise Exception(\"Instrumented Failure: %s\" % where)", "def call_error():\r\n print(\"Error in input format.\")\r\n sys.exit()", "def _raise_if_invalid(self):\n if self._stack_result == -1 and self._recm_data == -1:\n error_message = 'Worker result for request ID {} does not exist yet'.format(\n self.external_request_id)\n logger.exception(error_message)\n raise SARBRequestInvalidException(error_message)", "def _fail(raise_):\n if raise_:\n raise _UnexpectedForm()\n return None", "def __call__(self):\r\n raise self", "def __call__(self):\r\n raise self", "def handle_execution_exception(self, ex):\n raise(ex)", "def error(*args, noContext: bool=True, showLineNumber: bool=True, **kwargs)->None:\n pass", "def error(self, msg, *args, **kwargs):\n pass", "def exception(self):\n raise Exception(\"Exception test\")", "def _clean_onerror(func, path, excinfo):\n print(\"%s encountered error when processing %s: %s\" % (func, path, excinfo))", "def _notYetImplemented(self, val=None):\n raise VimbaException(-1001)", "def error(self, message=None, show_help=True):", "def raiseNonRecoverableError(msg):\n error(msg)\n raise NonRecoverableError(msg)", "def error(ctx, flow):\n ctx.log(\"error\")", "def execute_failure(self, *args, **kwargs):\n return 1, \"\", None", "def errFunc(runType):\n logger.error('Execution type not recognized! {}'.format(runType))\n raise InvalidExecutionType('{} is not a valid command'.format(runType))", "def _check_error(return_value):\n if return_value < 0:\n raise IOError(pm.lib.Pm_GetErrorText(return_value))", "def _error_handling(self,e,func):\n print(self.type, \" sufferred exception in \" , func , \":\" , e)", "def error_analyze(\n self,\n data_dir: Path,\n processed_data_dir: Path,\n result_dir: Path,\n output_report_dir: Path,\n ) -> NoReturn:\n pass", "def bad(self):\n raise NotImplementedError", "def bad(self):\n raise NotImplementedError", "def auditmemallocfail(self) :\n\t\ttry :\n\t\t\treturn self._auditmemallocfail\n\t\texcept Exception as e:\n\t\t\traise e", "def experiment3():\n raise FAKE_ERROR", "def test_value_error(self):\n self._error_test(ValueError)", "def _handle_general_error(self, calculation):\n self.ctx.restart_calc = calculation\n self.ctx.is_finished = True\n self.report('Calculation failed for a reason that can not be resolved automatically')\n self.results()\n return ProcessHandlerReport(True, self.exit_codes.ERROR_SOMETHING_WENT_WRONG)", "def error(msg):\n\n raise Exception(msg)", "def test():\n\ttry:\n\t\tprint \"Raising ParseErrror.\"\n\t\traise ParseError\n\texcept ParseError:\n\t\tprint \"Caught ParseError.\"", "async def rejected(error: Exception) -> Any:\n raise error", "def failure(self, cb: CircuitBreaker, exc: BaseException) -> None:", "def send_rpc_error(req, rpcreq, e):", "def test_runtime_errors(self, graph_entry_class):\n graph_entry_class.return_value.state = \"Pending\"\n graph_entry_class.return_value.path = \"foo/app1\"\n graph_entry_class.return_value.execute.return_value = (0, ['Failure'], False)\n\n graph = ApplyGraph('plan', self.graph, self.post_graph, \"foo\")\n\n self.assertRaises(RuntimeError, graph.execute_graph())\n self.assertRaises(RuntimeError, graph.execute_post_graph())", "def die(self):\n pass", "def main():\n cause_a_bunch_of_exceptions_to_happen()", "def test_init_err_size(self):\n with self.assertRaises(InitializationException):\n pyint = Interpreter(size=INVALID_LOOP)", "def test_info_fail(self):\n path = \"non_existing_audio.wav\"\n with self.assertRaisesRegex(RuntimeError, path):\n self._info(path)", "def flask_force_error():\n raise Exception('forced 500 error')", "def throw(self, type, value=None, traceback=None):\n pass", "def _error(self, *args, **kwargs):\n print(\"[{}]\".format(self.type), *args, file=sys.stderr, **kwargs)\n sys.exit(1)", "def _raise_value_error(is_gt, tracker, seq):\n if is_gt:\n raise TrackEvalException(\n 'GT data for sequence %s cannot be converted to the right format. Is data corrupted?' % seq)\n else:\n raise TrackEvalException(\n 'Tracking data from tracker %s, sequence %s cannot be converted to the right format. '\n 'Is data corrupted?' % (tracker, seq))", "def exception(self, *args, **kwargs):", "def pytest_internalerror(self, excrepr, excinfo):\n for line in str(excrepr).split(\"\\n\"):\n sys.stderr.write(\"INTERNALERROR> {}\\n\".format(line))\n sys.stderr.flush()\n tb = _postmortem_traceback(excinfo)\n post_mortem(tb, excinfo)", "def _err(self, *args):\n logger.error(*args)\n exit(1)", "def test_notrunerror(self, MetricClass):\n m = MetricClass()\n with pytest.raises(NotRunError):\n RandomTrader(seed=42).evaluate(m)", "def test_broken_error_descriptor(self):\r\n with self.assertRaises(TestException):\r\n module = self.descriptor._xmodule", "def _ps_error(e):\n\n error(None, str(e))" ]
[ "0.72789407", "0.7216216", "0.7035961", "0.7010877", "0.69478387", "0.6807928", "0.68002", "0.6673932", "0.6667403", "0.6664058", "0.6661008", "0.66161615", "0.6548039", "0.6492653", "0.6476202", "0.64534765", "0.64285237", "0.64251786", "0.6421437", "0.64072937", "0.63590854", "0.63338715", "0.63032943", "0.6299622", "0.62933505", "0.6281427", "0.62542033", "0.62520003", "0.62325585", "0.6232205", "0.620463", "0.62027127", "0.6200367", "0.6192628", "0.61876065", "0.61577374", "0.61556983", "0.6127782", "0.61126465", "0.6110671", "0.6088474", "0.608345", "0.60818344", "0.60773003", "0.6064965", "0.6051943", "0.6034683", "0.60337627", "0.60229194", "0.60162073", "0.60156643", "0.6001804", "0.5995605", "0.59933895", "0.5984276", "0.5979349", "0.5978349", "0.59711915", "0.5961815", "0.59515375", "0.59515375", "0.5942605", "0.59384066", "0.5936895", "0.5932078", "0.59286743", "0.59253234", "0.5923634", "0.5912061", "0.5908171", "0.5890272", "0.5885423", "0.5880884", "0.58761954", "0.58749586", "0.58749443", "0.58749443", "0.58734757", "0.58661866", "0.5864159", "0.5862098", "0.58557546", "0.5848743", "0.58409894", "0.5835444", "0.5833152", "0.5832697", "0.5824352", "0.5822414", "0.58217347", "0.5813827", "0.57994753", "0.5799458", "0.57956654", "0.5792785", "0.57917947", "0.5789696", "0.578431", "0.5781774", "0.57794696", "0.5773292" ]
0.0
-1
Use an explicit connect/quit here, as other tests use the context manager.
async def test_plain_smtp_connect(preset_client): await preset_client.connect() assert preset_client.is_connected await preset_client.quit() assert not preset_client.is_connected
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_connection():\r\n try:\r\n connect()\r\n except:\r\n pass\r\n print ('Unable to connect.')\r\n else:\r\n main()", "def quit(self):\n \n if 'driver' in self.__dict__:\n self.driver.quit()\n if 'session' in self.__dict__:\n self.session.close()\n if 'conn' in self.__dict__:\n self.conn.close()", "def test_context_manager(self):\n with database.Database() as db:\n pass\n self.assertTrue(self.mocked_connection.close.call_count is 1)", "def test_close_after_handshake(self) -> None:\n self.start_dummy_server()\n\n sock = socket.create_connection((self.host, self.port))\n with SSLTransport(\n sock, self.client_context, server_hostname=\"localhost\"\n ) as ssock:\n ssock.close()\n with pytest.raises(OSError):\n ssock.send(b\"blaaargh\")", "def test_control(self, ping_fixture):\n engine = ping_fixture\n\n with expect_raises_message(\n exc.DBAPIError, \"unhandled disconnect situation\"\n ):\n engine.connect()", "def test_terminate_run(self):\n pass", "def tearDown(self):\n self.conn.close()", "def test_disconnect(self):\n\n\t\twith EchoServer(bind_addr = ('', 12122)):\n\t\t\tyield reactor.schedule()\n\t\t\tclient = tcp.TCPConnection(remote_addr = ('localhost', 12122))\n\t\t\tyield client.connect()\n\n\t\t\tclient.close()", "def test_closed(self):\n server, client = loopback()\n server.sock_shutdown(2)\n with pytest.raises(SysCallError) as err:\n server.sendall(b\"hello, world\")\n if platform == \"win32\":\n assert err.value.args[0] == ESHUTDOWN\n else:\n assert err.value.args[0] == EPIPE", "def testBasicQuit(self):\n self.client_connect()\n self.client_send(\"get keyNotThere\\r\\n\")\n self.mock_recv('get keyNotThere\\r\\n')\n self.mock_send('END\\r\\n')\n self.client_recv(\"END\\r\\n\")\n\n self.client_send(\"quit\\r\\n\")\n self.assertTrue(self.mock_quiet())", "def test_event_handler_can_upgrade_disconnect(self, ping_fixture):\n\n engine = ping_fixture\n\n @event.listens_for(engine, \"handle_error\")\n def setup_disconnect(ctx):\n assert ctx.is_pre_ping\n ctx.is_disconnect = True\n\n conn = engine.connect()\n # no error\n conn.close()", "def test_Connector_close_kills_thread() -> None:\n # open and close Connector object\n connector = Connector()\n # verify background thread exists\n assert connector._thread\n connector.close()\n # check that connector thread is no longer running\n assert connector._thread.is_alive() is False", "async def test_connect_and_disconnect(database_url):\n database = Database(database_url)\n\n assert not database.is_connected\n await database.connect()\n assert database.is_connected\n await database.disconnect()\n assert not database.is_connected", "def test_connect(connection, events, writer, schedule, flush):\n schedule(connection.connect())\n flush()\n assert connection.connected\n assert not writer.closed\n assert events.triggered(\"CLIENT_CONNECT\")", "def test_reconnect(self):\n\n # make a connection\n\n conn = self.db.connect()\n\n # connection works\n\n conn.execute(select(1))\n\n # create a second connection within the pool, which we'll ensure\n # also goes away\n\n conn2 = self.db.connect()\n conn2.close()\n\n # two connections opened total now\n\n assert len(self.dbapi.connections) == 2\n\n # set it to fail\n\n self.dbapi.shutdown()\n\n # force windows monotonic timer to definitely increment\n time.sleep(0.5)\n\n # close on DBAPI connection occurs here, as it is detected\n # as invalid.\n assert_raises(tsa.exc.DBAPIError, conn.execute, select(1))\n\n # assert was invalidated\n\n assert not conn.closed\n assert conn.invalidated\n\n # close shouldn't break\n\n conn.close()\n\n # ensure one connection closed...\n eq_(\n [c.close.mock_calls for c in self.dbapi.connections],\n [[call()], []],\n )\n\n conn = self.db.connect()\n\n eq_(\n [c.close.mock_calls for c in self.dbapi.connections],\n [[call()], [call()], []],\n )\n\n conn.execute(select(1))\n conn.close()\n\n eq_(\n [c.close.mock_calls for c in self.dbapi.connections],\n [[call()], [call()], []],\n )", "def testConnectionMade(self):\n d = self._getClientConnection()\n d.addCallback(lambda _: self.client.disconnect())\n return d", "def test_close(self):\n db = database.Database()\n db.close()\n self.assertTrue(self.mocked_connection.close.call_count is 1)", "def tearDown(self):\n self.loop.close()", "def tearDown(self):\n self.loop.close()", "def tearDown(self):\n self.loop.close()", "def tearDown(self):\n self.loop.close()", "def test_start_closed_socket(self) -> None:\n sock = socket.socket(socket.AF_INET)\n context = ssl.create_default_context()\n sock.close()\n with pytest.raises(OSError):\n SSLTransport(sock, context)", "def test_connection(self) -> None:\n test_connection_fn = get_test_connection_fn(self.service_connection)\n test_connection_fn(self.engine)", "def test_connection(self):\n self._bind_to_service()", "def test_connect(self):\n db = Database.TestDB(self.mktemp())\n self.assertFalse(db.initialized)\n yield db.open()\n self.assertTrue(db.initialized)\n db.close()", "def test_disconnect_closed(self):\n self.sock.close()\n self.inverter.sock.close()\n self.inverter.sock_file.close()\n self.inverter.disconnect() # Should not raise exception", "def test_connect(rgd):\n assert rgd.connected is True", "def test_shutdown_closed(self):\n server, client = loopback()\n server.sock_shutdown(2)\n with pytest.raises(SysCallError) as exc:\n server.shutdown()\n if platform == \"win32\":\n assert exc.value.args[0] == ESHUTDOWN\n else:\n assert exc.value.args[0] == EPIPE", "def test_004_connect(self):\n HEADING()\n self.db.connect()\n\n result = True\n assert result", "def setUp(self):\n\t\tself.conn = Client([\"127.0.0.1:11211\"], debug = 1)", "def test_downgrade_control(self, ping_fixture_all_errs_disconnect):\n\n engine = ping_fixture_all_errs_disconnect\n\n conn = engine.connect()\n conn.close()", "def teardown_module(module):\n cur.close()\n conn.close()", "async def test_connect_ConnectorLoopError() -> None:\n current_loop = asyncio.get_running_loop()\n connector = Connector(loop=current_loop)\n # try to connect using current thread's loop, should raise error\n pytest.raises(\n ConnectorLoopError,\n connector.connect,\n \"my-project:my-region:my-instance\",\n \"pg8000\",\n )", "def test_connection_duplication():", "def pytest_unconfigure() -> None: # pragma: no cover\n if PROC.exitcode is None:\n assert PROC.pid is not None # not sure if this can happen (mypy error); if it does, be explicit\n os.kill(PROC.pid, signal.SIGINT)\n PROC.join(5)\n if PROC.exitcode is None:\n PROC.kill()\n PROC.join()\n print(\"\\nServer app terminated, logs in logs/server.log\")", "def tearDown(self) -> None:\n self.inverter.disconnect()\n self.sock.close()", "def connection_teardown():\n test_str = make_random(100)\n server = start_server()\n client = start_client()\n\n # First write some data at both ends.\n write_to(client, test_str)\n write_to(server, test_str)\n time.sleep(TEST_TIMEOUT)\n\n # Write EOFs on both sides.\n write_to(client, '\\x1a')\n write_to(server, '\\x1a')\n client.stdin.close()\n server.stdin.close()\n time.sleep(TEST_TIMEOUT)\n\n return (\n DEBUG_TEARDOWN in read_debug_messages_from(client) and\n DEBUG_TEARDOWN in read_debug_messages_from(server)\n )", "def test_finish_connection(tchannel_pair):\n server, client = tchannel_pair\n client.ping()\n client._connection._connection.close()\n\n def _handle(data, connection):\n pass\n server.handle_calls(_handle)", "def tearDown(self):\n self.brow.quit()", "def test_correct_connection(self, mock_execute, mock_parse):\n mock_parse.return_value = ARGS_INPUT\n result = CONNECTION_KILL\n mock_execute.return_value = result\n actual_result = connection_to_server_to_kill_iperf()\n self.assertIs(actual_result.exit_code, 0)\n self.assertIs(actual_result.error, '')\n self.assertIs(actual_result.exit_code_execution, '')\n self.assertIs(actual_result.output, 0)", "def tearDown(self):\n self.connection.close()\n ENGINE.clear()", "def test_event_handler_didnt_downgrade_disconnect(\n self, ping_fixture_all_errs_disconnect\n ):\n engine = ping_fixture_all_errs_disconnect\n\n @event.listens_for(engine, \"handle_error\")\n def setup_disconnect(ctx):\n assert ctx.is_pre_ping\n assert ctx.is_disconnect\n\n conn = engine.connect()\n conn.close()", "def test_run_without_message(connection, events, loop):\n loop.run_until_complete(connection.run())\n assert events.triggered(\"CLIENT_CONNECT\")\n assert events.triggered(\"CLIENT_DISCONNECT\")", "def test_close():\n while True:\n yield", "def test_connect(server):\n assert server", "def test_close_cnxn():\n\n cnxn = connect()\n cursor = cnxn.cursor()\n\n cursor.execute(\"drop table if exists t1\")\n cursor.execute(\"create table t1(id integer, s varchar(20))\")\n cursor.execute(\"insert into t1 values (?,?)\", 1, 'test')\n cursor.execute(\"select * from t1\")\n\n cnxn.close()\n\n # Now that the connection is closed, we expect an exception. (If the code attempts to use\n # the HSTMT, we'll get an access violation instead.)\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.execute(\"select * from t1\")", "def test_connect_sockets_test_proxy(self):\n print('\\n--------------------\\nRunning test_connect_sockets_test_proxy')\n\n t1 = threading.Thread(target=self.run_game, args=(0,))\n users_result = []\n t2 = threading.Thread(target=self.connect_sockets, args=(users_result,))\n\n t2.start()\n #wait for clients to exit before server can exit\n t1.start()\n t2.join()\n t1.join()\n self.assertTrue(all(users_result))\n\n print('closing server')\n #self.server.close()", "def test_disconnect_with_rollback(self):\n\n mock_connector = MagicMock()\n database = Database()\n database.connect(connector_impl=mock_connector)\n\n # rollback\n database.disconnect(False)\n\n connection = mock_connector.connect()\n self.assertFalse(connection.commit.called)\n self.assertTrue(connection.close.called)", "async def test_timeout_disconnect():\n communicator = WebsocketCommunicator(ErrorWebsocketApp(), \"/testws/\")\n # Test connection\n connected, subprotocol = await communicator.connect()\n assert connected\n assert subprotocol is None\n # Test sending text (will error internally)\n await communicator.send_to(text_data=\"hello\")\n with pytest.raises(asyncio.TimeoutError):\n await communicator.receive_from()\n # Close out\n await communicator.disconnect()", "def test_main(mock_timesleep,mock_network,mock_machine_pin):\n with pytest.raises(InterruptedError):\n AppSwitch.main()", "def testClose(t, env):\n c = env.c1\n c.init_connection()\n fh, stateid = c.create_confirm(t.code)\n ops = c.use_obj(fh)\n ops += [c.close_op(c.get_seqid(t.code), stateid)]\n _replay(c, ops)", "def perform_teardown():\n global credentials, connection, channel\n connection.close()", "def test_connection_fail(context_fixture):\n with pytest.raises(SystemExit):\n context_fixture('RequestException')", "def test_close_connection(self):\n self.conn2 = SolrConnection(SOLR_HTTP)\n self.conn2.conn.request(\"GET\", SOLR_PATH)\n self.conn2.close()\n\n # Closing the Solr connection should close the underlying\n # HTTPConnection's socket.\n self.assertEquals(self.conn2.conn.sock, None, \"Connection not closed\")", "def test_shutdown_wrong_args(self):\n connection = Connection(Context(SSLv23_METHOD), None)\n with pytest.raises(TypeError):\n connection.set_shutdown(None)", "def test_connect_error():\n queue = RedisQueue('localhost')\n assert not queue.is_connected\n with pytest.raises(ConnectionError):\n queue.put('test')\n with pytest.raises(ConnectionError):\n queue.get()", "async def test_libp2pclientconnection_connect_disconnect(self):\n assert self.connection.is_connected is False\n try:\n await self.connection_node.connect()\n await self.connection.connect()\n assert self.connection.is_connected is True\n\n await self.connection.disconnect()\n assert self.connection.is_connected is False\n except Exception:\n raise\n finally:\n await self.connection_node.disconnect()", "def test_teardown(self):\n with pytest.raises(NotImplementedError):\n self.handler.teardown()", "def test_unix_client_system_connection(core_session, agent_enrolled_unix_system_with_users, proxy_start_stop):\n\n \"\"\"\n Testrail Link:\n https://testrail.centrify.com/index.php?/cases/view/1293084\n https://testrail.centrify.com/index.php?/cases/view/1293085\n https://testrail.centrify.com/index.php?/cases/view/1293086\n \"\"\"\n\n # verfiy the test is run with single thread.\n assert 'PYTEST_XDIST_WORKER_COUNT' not in os.environ, \\\n f'This test cannot be run with multiple threads due to starting and stopping connectors'\n\n enrolledsystems = agent_enrolled_unix_system_with_users\n session = enrolledsystems[0][\"Session\"]\n resourceid = enrolledsystems[0][\"ResourceId\"]\n proxyid = enrolledsystems[0][\"ProxyId\"]\n proxycontrol = proxy_start_stop\n\n logger.info(\"stop the agent\")\n ssh_manager.ssh_stop_agent(session)\n logger.info(\"start the connector\")\n proxycontrol(proxyid, True)\n\n logger.info(\"Testing connection to the computer, Connector is ready\")\n result, success = ResourceManager.get_system_health(core_session, resourceid)\n assert success and result == 'OK', f\"Unable to verify system is reachable {result} {success}\"\n\n # stop Conector , Should fail\n logger.info(\"Stopping the connector\")\n proxycontrol(proxyid, False)\n logger.info(\"Testing connection to the system\")\n result, success = ResourceManager.get_system_health(core_session, resourceid)\n assert success and result != 'OK', f\"cerify system is reachable {result} {success}\"\n\n # Start agent\n logger.info(\"Starting the agent\")\n ssh_manager.ssh_start_agent(session, True)\n logger.info(\"Testing connection to the computer, agent is available.\")\n result, success = ResourceManager.get_system_health(core_session, resourceid)\n assert success and result == 'OK', f\"Unable to verify system is reachable {result} {success}\"\n\n # verify account again, both connector and agent are running \n proxycontrol(proxyid, True)\n logger.info(\"Testing connection to the computer, both agent and connector are available\")\n result, success = ResourceManager.get_system_health(core_session, resourceid)\n assert success and result == 'OK', f\"Unable to verify system is reachable {result} {success}\"", "def runTest(self):\n unittest.main()\n ChoreTest.clean_up()", "async def test_context_manager_disconnect_handling(preset_server, event_loop):\n preset_client = SMTP(\n hostname=preset_server.hostname, port=preset_server.port, loop=event_loop\n )\n\n async with preset_client:\n assert preset_client.is_connected\n\n preset_server.responses.append(b\"250 noop\")\n preset_server.drop_connection_event.set()\n\n try:\n await preset_client.noop()\n except SMTPServerDisconnected:\n pass\n\n assert not preset_client.is_connected", "def test_connect_traced():\n with override_config(\"psycopg\", {\"trace_connect\": True}):\n conn = psycopg2.connect(**POSTGRES_CONFIG)\n assert conn", "def tearDown(self):\n self.client_socket.shutdown(socket.SHUT_RDWR)\n self.client_socket.close()", "def test_disconnect_before_connect(connection, events, schedule, flush):\n schedule(connection.disconnect())\n flush()\n assert not connection.connected\n assert not events.triggered(\"CLIENT_CONNECT\")\n assert not events.triggered(\"CLIENT_DISCONNECT\")", "def test_shutdown(self):\n server = self._server(None)\n server.bio_shutdown()\n with pytest.raises(Error) as err:\n server.recv(1024)\n # We don't want WantReadError or ZeroReturnError or anything - it's a\n # handshake failure.\n assert type(err.value) in [Error, SysCallError]", "def test_rmq_es_connector_connections():\n rmq_es = RmqEs()\n rmq_es.connections(False)", "def test_connect_traced():\n with override_config(\"psycopg\", {\"trace_connect\": True}):\n conn = psycopg.connect(**POSTGRES_CONFIG)\n assert conn", "def nepc_connect(local, dbug):\n if dbug:\n print(\"opening database connection\")\n cnx, cursor = nepc.connect(local, dbug, test=True)\n yield [cnx, cursor]\n if dbug:\n print(\"closing database connection\")\n cursor.close()\n cnx.close()", "def test_already_connected(connection, events, writer, schedule, flush):\n schedule(connection.connect(), connection.connect())\n flush()\n assert not writer.closed\n assert events.triggered(\"CLIENT_CONNECT\")", "def test_client(client):\n\n response = client.get('/hello')\n assert response.data == b'Hello World'\n assert response.status == '200 OK'\n assert db_wrapper.database.is_closed()", "def test_connect_success():\n\n t = Thread(target=setup_socket)\n t.start()\n\n data_sender = DataSender('127.0.0.1', 12345)\n server_response = data_sender.notify('test')\n\n assert server_response == 'ok'\n\n data_sender.close()\n t.join()", "async def test_create_async_connector() -> None:\n connector = await create_async_connector()\n assert connector._loop == asyncio.get_running_loop()\n await connector.close_async()", "def test_connectFailure(self):\n db = Database.TestDB(self.mktemp())\n # Make _db_init fail\n db._db_init = lambda: 1 / 0\n self.assertFalse(db.initialized)\n try:\n yield db.open()\n except:\n pass\n self.assertFalse(db.initialized)\n self.assertEquals(db.pool, None)", "def tearDown(self):\n self.session.close()", "def __exit__(self, exc_type, exc_val, exc_tb):\n self.conn.close()\n if exc_val:\n raise", "def test_start_sameconnector_twice_with_noreconnecting_on_failure(self):\n\n yield self.connect('127.0.0.1', self.pbPort)\n\n localConfig = copy.copy(self.defaultConfig)\n localConfig.reconnectOnConnectionFailure = False\n yield self.add(localConfig)\n yield self.start(localConfig.id)\n startRet = yield self.start(localConfig.id)\n\n self.assertEqual(True, startRet)\n\n yield self.stopall()\n\n # Give a grace time for stopping\n yield waitFor(0.2)", "def test_disconnect_with_commit(self):\n\n mock_connector = MagicMock()\n database = Database()\n database.connect(connector_impl=mock_connector)\n\n # commit\n database.disconnect(True)\n\n connection = mock_connector.connect()\n self.assertTrue(connection.commit.called)\n self.assertTrue(connection.close.called)", "def teardown_zodb(**kw):\n app.db.close()\n mlog.getChild(\"teardown_zodb\").info(\"ZODB connection closed\")", "def __exit__(self, exc_type, exc_val, exc_tb):\n self.session.close()", "def setUp(self) -> None:\n local_sock, remote_sock = socketpair()\n local_sock.settimeout(1.0)\n remote_sock.settimeout(1.0)\n self.inverter = KeepAliveInverter(local_sock, None, keep_alive=0.01)\n self.sock = remote_sock", "def test_connection(self):\n r = main.List.connection()\n self.assertTrue(r.ping(), \"Connection failed.\")", "def tearDown(self):\n if self.__channel:\n self.__channel.close()\n return", "def test_event_handler_didnt_upgrade_disconnect(self, ping_fixture):\n engine = ping_fixture\n\n @event.listens_for(engine, \"handle_error\")\n def setup_disconnect(ctx):\n assert not ctx.is_disconnect\n\n with expect_raises_message(\n exc.DBAPIError, \"unhandled disconnect situation\"\n ):\n engine.connect()", "def tearDown(self) -> None:\n self.inverter.sock.close()\n self.sock.close()", "async def test_disconnect(self):\n await self.webhook_connection.connect()\n assert self.webhook_connection.is_connected is True\n\n await self.webhook_connection.disconnect()\n assert self.webhook_connection.is_connected is False", "async def _teardown(self, commit: bool = None):", "def test_terminate_session(get_interface_params):\n from sail_on_client.protocol.localinterface import LocalInterface\n\n config_directory, config_name = get_interface_params\n local_interface = LocalInterface(config_name, config_directory)\n session_id = _initialize_session(local_interface, \"OND\")\n local_interface.terminate_session(session_id)", "def setUp(self):\n self.c = Client(host=\"localhost\")", "def test_connectEvent(self):\n reactor = self.buildReactor()\n\n self.listen(reactor, ServerFactory.forProtocol(Protocol))\n connected = []\n\n class CheckConnection(Protocol):\n def connectionMade(self):\n connected.append(self)\n reactor.stop()\n\n clientFactory = Stop(reactor)\n clientFactory.protocol = CheckConnection\n\n needsRunningReactor(reactor, lambda: self.connect(reactor, clientFactory))\n\n reactor.run()\n\n self.assertTrue(connected)", "def __exit__(self, exc_type, exc_val, exc_tb):\n\n self.quit()", "def test_connect_default():\n conn = psycopg2.connect(**POSTGRES_CONFIG)\n assert conn", "def test_readersender_context():\n from .context import readersender\n\n with readersender.ReaderSender() as rs:\n assert rs.connected\n rs.disconnect()\n assert not rs.connected", "def test_disconnect(writer, patch_connection, events, connection,\n schedule, flush):\n schedule(connection.connect(), connection.disconnect())\n flush()\n assert not connection.connected\n assert writer.closed\n assert connection.writer is None\n assert events.triggered(\"CLIENT_CONNECT\")\n assert events.triggered(\"CLIENT_DISCONNECT\")", "def teardown_request(exception):\r\n try:\r\n g.conn.close()\r\n except Exception as e:\r\n pass", "def teardown_request(exception):\r\n try:\r\n g.conn.close()\r\n except Exception as e:\r\n pass", "def teardown_request(exception):\r\n try:\r\n g.conn.close()\r\n except Exception as e:\r\n pass", "def teardown(self, rc):\n pass", "def test_cleanup_procs(self):\n self.skip_teardown = True\n pool = self.get_pool()\n container = self.get_container(pool)\n dfuse = get_dfuse(self, self.hostlist_clients)\n start_dfuse(self, dfuse, pool=pool, container=container)", "def test_connect_opens_connection(self):\n\n mock_connector = MagicMock()\n database = Database()\n\n database.connect(connector_impl=mock_connector)\n\n self.assertTrue(mock_connector.connect.called)", "def setUp(self):\n self.transport = StringTransport()\n self.protocol = IRCClient()\n self.protocol.performLogin = False\n self.protocol.makeConnection(self.transport)\n\n # Sanity check - we don't want anything to have happened at this\n # point, since we're not in a test yet.\n self.assertEqualBufferValue(self.transport.value(), \"\")\n\n self.addCleanup(self.transport.loseConnection)\n self.addCleanup(self.protocol.connectionLost, None)" ]
[ "0.6924016", "0.65419316", "0.6448723", "0.6423238", "0.6363327", "0.6345118", "0.62429655", "0.62380254", "0.62205863", "0.6215696", "0.61930496", "0.6161508", "0.61469656", "0.614523", "0.6138414", "0.6116715", "0.60803974", "0.60480607", "0.60480607", "0.60480607", "0.60480607", "0.6015845", "0.6007281", "0.60056853", "0.6003292", "0.6002817", "0.5965771", "0.59640235", "0.59549075", "0.59275067", "0.59269077", "0.5923149", "0.5908869", "0.5902237", "0.58966607", "0.58812195", "0.587675", "0.5872926", "0.5857819", "0.5850618", "0.5838395", "0.5825644", "0.58221287", "0.5819573", "0.5814062", "0.580479", "0.5804746", "0.58018386", "0.5795067", "0.5787593", "0.5783285", "0.5773072", "0.57664853", "0.5763714", "0.5759466", "0.5755282", "0.5749674", "0.57487077", "0.5740028", "0.5731622", "0.572999", "0.5727307", "0.57261217", "0.5725337", "0.5718234", "0.57147634", "0.5713143", "0.57107913", "0.5710184", "0.5709271", "0.5706856", "0.5702482", "0.5701584", "0.56983775", "0.56948", "0.5685763", "0.56762016", "0.56510603", "0.56497145", "0.5643815", "0.563893", "0.5633116", "0.5631576", "0.56292653", "0.5606013", "0.56027526", "0.56018615", "0.5597217", "0.55949634", "0.559188", "0.55862266", "0.5584917", "0.5578642", "0.5576146", "0.5576146", "0.5576146", "0.5570932", "0.55687445", "0.55670947", "0.55650246" ]
0.60078937
22
Note, SMTPTimeoutError vs SMTPConnectError here depends on processing time.
async def test_connect_error_with_no_server(event_loop): client = SMTP(hostname="127.0.0.1", port=65534, loop=event_loop) with pytest.raises(SMTPConnectError): await client.connect(timeout=0.1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _connect_smtp(self):\n smtp = None\n try:\n smtp = smtplib.SMTP(self.servername, timeout = self.timeout)\n except smtplib.SMTPException as err:\n log.critical('smtp service at {} is not currently available'.format(self.servername))\n log.critical(err)\n except Exception as err:\n log.critical('smtp other error {} is not currently available'.format(self.servername))\n log.critical(err)\n \n if self.auth is not None:\n try:\n smtp.login(self.auth[0], self.auth[1])\n except smtplib.SMTPException as err:\n log.warn('smtp service login error for {}'.format(self.servername))\n log.warn(err)\n return smtp", "async def test_timeout_error_with_no_server(event_loop):\n client = SMTP(hostname=\"127.0.0.1\", port=65534, loop=event_loop)\n\n with pytest.raises(SMTPTimeoutError):\n await client.connect(timeout=0.000000001)", "def Check_SMTP(name, my_ip):\n\n if nslookup(name)[0] != 0:\n add_info (name, SMTP_SERVER, \"cannot resolve SMTP server\")\n return 1\n if ping_machine(name) != 0:\n add_info(name, SMTP_SERVER, \"cannot ping SMTP server\")\n return 2\n\n status, err = tryconnect(name, SMTP_PORT)\n if status == 1 or status == 2:\n add_info(name, SMTP_SERVER, err)\n if status == 1:\n # if we time'd out, things can still be OK (say reverse DNS problems)\n # so return only an error if no timeout\n return 3\n\n stat, out = port_talker.TCPTalk(name, SMTP_PORT,\n 60, # timeout (>30sec for messed up servers)\n \"HELO \" + my_ip + \"\\r\\nQUIT\\r\\n\",\n None, # terminator\n 1024, # max len\n 1) # use external resolver\n\n # expected answer:\n #220 'mail.forobozz.com' ESMTP\n #250 mail.frobozz.com Hello grue.frobozz.com [192.168.0.21], pleased to meet ya\n #221 mail.frobozz.com closing connection\n\n # Each line can be repeated several times, so we check that all codes appear\n # and that no other codes appear\n codes = map(lambda x: x[:4], string.split(out, '\\n'))\n valid_codes = ('220 ', '250 ', '221 ', '')\n try:\n for code in codes:\n assert(code in valid_codes)\n for valid in valid_codes:\n assert(valid in codes)\n except:\n # If we wanted, we could check whether reverse DNS lookup is not working.\n # This would be the most likely explanation\n add_info(name, SMTP_SERVER, \"cannot HELO SMTP server\")\n return 4\n add_info(name, SMTP_SERVER, \"OK\")\n return 0", "def send_mail_when_failed(self, body):\r\n pass", "def EmailError(error_messages, to_address, from_address, smtp_server, subject):\n error_string = '\\n'.join(error_messages)\n if( None not in [error_string, to_address, from_address, smtp_server] ):\n email_message = MIMEMultipart('alternative')\n email_message['Subject'] = subject\n email_message['From'] = from_address\n email_message['To'] = to_address\n\n html_message = ''\n for error in error_messages:\n html_message = '%s<br/><h4>%s</h4><p>%s</p>' % (html_message, \n error.split('\\n')[0], error.lstrip('%s\\n' % \\\n error.split('\\n')[0]).replace('\\n','<br/>'))\n html_message = '<html><head></head><body>%s</body></html>' % html_message\n\n email_message.attach(MIMEText(error_string, 'plain'))\n email_message.attach(MIMEText(html_message, 'html'))\n\n smtp_handle = None\n try:\n smtp_handle = smtplib.SMTP(smtp_server)\n except socket.gaierror:\n print '%s is an invalid smtp server.' % smtp_server\n except smtplib.SMTPConnectError:\n print 'Failed to connect to %s.' % smtp_server\n if( smtp_handle is not None ):\n try:\n smtp_handle.sendmail(from_address,[to_address],\n email_message.as_string())\n except smtplib.SMTPRecipientsRefused:\n print '%s is an invalid email address.' % to_address\n smtp_handle.quit()", "def test_failed_email(self):\n self.assertEqual(send_email(\"testtestcom\", \"test\", \"test\"), 'There was an error sending')", "def test_smtp(self):\n self._endpointServerTest(\"smtp\", protocols.SMTPFactory)", "def connect(smtp_url: str, timeout: Optional[float] = None) -> smtplib.SMTP:\n return smtplib.SMTP(smtp_url, timeout=timeout)", "def SendTimeout(self) -> int:", "def SendTimeout(self) -> int:", "async def test_expn_error(\n smtp_client: SMTP, smtpd_server: asyncio.AbstractServer\n) -> None:\n async with smtp_client:\n with pytest.raises(SMTPResponseException):\n await smtp_client.expn(\"a-list\")", "def test_conn_err_retry(self, retry, get_conn):\r\n get_conn.return_value.open.side_effect = SMTPConnectError(424, \"Bad Connection\")\r\n\r\n test_email = {\r\n 'action': 'Send email',\r\n 'send_to': 'myself',\r\n 'subject': 'test subject for myself',\r\n 'message': 'test message for myself'\r\n }\r\n response = self.client.post(self.send_mail_url, test_email)\r\n self.assertEquals(json.loads(response.content), self.success_content)\r\n\r\n self.assertTrue(retry.called)\r\n (__, kwargs) = retry.call_args\r\n exc = kwargs['exc']\r\n self.assertIsInstance(exc, SMTPConnectError)", "def test_endpointSMTP(self):\n self._endpointTest(\"smtp\")", "def postprocess():\n if ERRORS:\n address = '[email protected]'\n body = '\\n\\n'.join( ERRORS )\n msg = create_message( body, address )\n send_mail( msg, address )", "def send_mail_raise_smtp(messages):\n raise SMTPRecipientsRefused(recipients=messages[0].recipients())", "def send_error_email(receiver_email, subject, body):\n\n sender_email = \"[email protected]\"\n\n with open(CWD(\"mailcreds.txt\"), \"r\") as file:\n password = file.read()\n\n # Create a multipart message and set headers\n message = MIMEMultipart()\n message[\"From\"] = sender_email\n message[\"To\"] = receiver_email\n message[\"Subject\"] = subject\n\n # Add body to email\n message.attach(MIMEText(body, \"plain\"))\n\n text = message.as_string()\n\n # Log in to server using secure context and send email\n context = ssl.create_default_context()\n with smtplib.SMTP_SSL(\"smtp.gmail.com\", 465, context=context) as server:\n server.login(sender_email, password)\n server.sendmail(sender_email, receiver_email, text)", "def connection_timedout(self):\n\n try:\n message = (\n f\"Connection to {self.channel} timed out, was the channel\"\n \" spelt correctly and is port 6667 open?\\n\"\n )\n self.send_to_outputfield(message)\n except Exception as e:\n logging.error(str(e))\n logging.exception(\"Exception : \")\n self.close()", "def test_data_err_retry(self, retry, get_conn):\r\n get_conn.return_value.send_messages.side_effect = SMTPDataError(455, \"Throttling: Sending rate exceeded\")\r\n test_email = {\r\n 'action': 'Send email',\r\n 'send_to': 'myself',\r\n 'subject': 'test subject for myself',\r\n 'message': 'test message for myself'\r\n }\r\n response = self.client.post(self.send_mail_url, test_email)\r\n self.assertEquals(json.loads(response.content), self.success_content)\r\n\r\n # Test that we retry upon hitting a 4xx error\r\n self.assertTrue(retry.called)\r\n (__, kwargs) = retry.call_args\r\n exc = kwargs['exc']\r\n self.assertIsInstance(exc, SMTPDataError)", "def send_failed(self, message, exc=None):\n with self.app.peers_lock:\n self.declare_no_connection(self.app.peers[message.to])\n return None", "def __init__(self, smtp_server, smtp_user, smtp_password,\n smtp_port=25, is_with_tls=False):\n self.smtp_server = smtp_server\n self.smtp_port = smtp_port\n self.smtp_user = smtp_user\n self.smtp_password = smtp_password\n self.is_with_tls = is_with_tls", "def send_email(self, text):\n msg_text = MIMEText(text)\n msg_text['Subject'] = '[WebSite Watchdog] Failure'\n msg_text['From'] = self.from_email\n msg_text['To'] = self.to_email\n \n s = smtplib.SMTP(self.smtp_server)\n s.sendmail(self.from_email, [self.to_email], msg_text.as_string())\n s.quit()", "def send(self):\n answers = dns.resolver.query(self.domain, 'MX')\n try:\n for answer in answers:\n ex = answer.exchange.to_text()\n server = smtplib.SMTP(ex)\n server.set_debuglevel(self.verbose)\n server.sendmail(self.sender, [self.recipient], self.message.as_string())\n server.quit()\n except OSError as e:\n if e.errno is errno.ENETUNREACH:\n print('Looks like port 25 is blocked')\n raise e", "def _setup_smtp_server(self):\n\n # Init; Attempt to use external first\n target = 'external'\n\n # ============================================================\n # Attempt (1): External mail server\n # ============================================================\n\n if target == 'external':\n # Assume it's a machine external to company network.\n # We will use an external email account that requires a login.\n\n # msg = f'_setup_smtp_server(): Attempting to launch session as external machine...'\n # fancy_print(msg, fg=COMMUNICATOR_MSG_COLOR, bold=True)\n\n self.host = EXTERNAL_HOST\n self.port = EXTERNAL_PORT\n self.sender_address = EXTERNAL_USER_NAME\n self.sender_pwd = EXTERNAL_USER_PWD\n\n try:\n sess = smtplib.SMTP(host=self.host, port=self.port)\n sess.starttls()\n sess.login(self.sender_address, self.sender_pwd)\n return sess\n except:\n target = 'internal'\n\n # ============================================================\n # Attempt (2): Company internal mail server\n # ============================================================\n\n if target == 'internal':\n # Assume machine is internal to company network.\n # Current user should already be authenticated.\n\n # msg = f'_setup_smtp_server(): Attempting to launch session as internal Cooper machine...'\n # fancy_print(msg, fg=COMMUNICATOR_MSG_COLOR, bold=True)\n\n self.host = INTERNAL_HOST\n self.port = INTERNAL_PORT\n self.sender_address = INTERNAL_USER_NAME\n self.sender_pwd = INTERNAL_USER_PWD\n\n try:\n sess = smtplib.SMTP(self.host)\n return sess\n except:\n msg = f'COMMUNICATOR WARNING: Could not establish SMTP connection. Check configuration.'\n fancy_print(msg, fg=COMMUNICATOR_WARN_COLOR)\n\n msg = f'Could not establish SMTP connection'\n raise ConnectionError(msg)", "def test_send_email_with_incomplete_payload(app, session, email_msg):\n # TEST\n with pytest.raises(QueueException) as excinfo:\n worker.send_email(email_msg, None)\n\n assert 'Unsuccessful sending email' in str(excinfo)", "def tcp_error(self, flow: mitmproxy.tcp.TCPFlow):", "async def connect_async(smtp_url,\n timeout: Optional[float] = None) -> aiosmtplib.SMTP:\n return await aiosmtplib.SMTP(smtp_url, timeout=timeout)", "def check_smtp_server_connection(self):\n try:\n connected = True\n\n while not self.config:\n time.sleep(1)\n\n # Create SMTP server and handshake\n server = smtplib.SMTP(self.config.smtp_host + ':' + self.config.smtp_port)\n server.connect(self.config.smtp_host + ':' + self.config.smtp_port)\n\n self.logger.info(MODULE_NAME + '::check_smtp_server_connection::Successfully '\n 'connected to the configured SMTP server and port at: ' + self.config.smtp_host + ':' + self.config.smtp_port)\n\n server.quit()\n\n return connected\n\n except Exception as e:\n self.logger.error(MODULE_NAME + '::check_smtp_server_connection()::The following '\n 'unhandled exception occurred: ' + e.message)\n connected = False\n return connected", "def _test_retry_after_unlimited_retry_error(self, exception):\r\n num_emails = 8\r\n # We also send email to the instructor:\r\n self._create_students(num_emails - 1)\r\n expected_fails = 0\r\n expected_succeeds = num_emails\r\n # Note that because celery in eager mode will call retries synchronously,\r\n # each retry will increase the stack depth. It turns out that there is a\r\n # maximum depth at which a RuntimeError is raised (\"maximum recursion depth\r\n # exceeded\"). The maximum recursion depth is 90, so\r\n # num_emails * expected_retries < 90.\r\n expected_retries = 10\r\n with patch('bulk_email.tasks.get_connection', autospec=True) as get_conn:\r\n # Cycle through N throttling errors followed by a success.\r\n get_conn.return_value.send_messages.side_effect = cycle(\r\n chain(repeat(exception, expected_retries), [None])\r\n )\r\n self._test_run_with_task(\r\n send_bulk_course_email,\r\n 'emailed',\r\n num_emails,\r\n expected_succeeds,\r\n failed=expected_fails,\r\n retried_nomax=(expected_retries * num_emails)\r\n )", "def raise_timeout_error(api_url, headers, timeout, proxies):\n raise requests.exceptions.Timeout", "def raise_timeout_error(api_url, headers, timeout, proxies):\n raise requests.exceptions.Timeout", "def test_email():\n recipients = configs[\"email_to\"].split(\", \")\n email_body = test_email_content()\n if configs[\"smtp_ssl\"] == 1:\n server = smtplib.SMTP_SSL(configs[\"smtp_server\"])\n elif configs[\"smtp_tls\"] == 1:\n server = smtplib.SMTP(configs[\"smtp_server\"])\n server.starttls()\n else:\n server = smtplib.SMTP(configs[\"smtp_server\"])\n\n if configs[\"smtp_authentication\"] == 1:\n server.login(configs[\"username\"], configs[\"password\"])\n\n server.sendmail(configs[\"email_from\"], recipients, email_body)\n server.quit()", "def connect(self, hostname, timeout=5):\n try:\n socket.gethostbyname(hostname)\n server = _smtp.SMTP(timeout=timeout)\n code, resp = server.connect(hostname)\n if code == 220:\n return server\n except:\n pass\n return None", "def test_data_err_fail(self, retry, result, get_conn):\r\n # have every fourth email fail due to blacklisting:\r\n get_conn.return_value.send_messages.side_effect = cycle([SMTPDataError(554, \"Email address is blacklisted\"),\r\n None, None, None])\r\n students = [UserFactory() for _ in xrange(settings.BULK_EMAIL_EMAILS_PER_TASK)]\r\n for student in students:\r\n CourseEnrollmentFactory.create(user=student, course_id=self.course.id)\r\n\r\n test_email = {\r\n 'action': 'Send email',\r\n 'send_to': 'all',\r\n 'subject': 'test subject for all',\r\n 'message': 'test message for all'\r\n }\r\n response = self.client.post(self.send_mail_url, test_email)\r\n self.assertEquals(json.loads(response.content), self.success_content)\r\n\r\n # We shouldn't retry when hitting a 5xx error\r\n self.assertFalse(retry.called)\r\n # Test that after the rejected email, the rest still successfully send\r\n ((_entry_id, _current_task_id, subtask_status), _kwargs) = result.call_args\r\n self.assertEquals(subtask_status.skipped, 0)\r\n expected_fails = int((settings.BULK_EMAIL_EMAILS_PER_TASK + 3) / 4.0)\r\n self.assertEquals(subtask_status.failed, expected_fails)\r\n self.assertEquals(subtask_status.succeeded, settings.BULK_EMAIL_EMAILS_PER_TASK - expected_fails)", "def _test_email_address_failures(self, exception):\r\n # Select number of emails to fit into a single subtask.\r\n num_emails = settings.BULK_EMAIL_EMAILS_PER_TASK\r\n # We also send email to the instructor:\r\n self._create_students(num_emails - 1)\r\n expected_fails = int((num_emails + 3) / 4.0)\r\n expected_succeeds = num_emails - expected_fails\r\n with patch('bulk_email.tasks.get_connection', autospec=True) as get_conn:\r\n # have every fourth email fail due to some address failure:\r\n get_conn.return_value.send_messages.side_effect = cycle([exception, None, None, None])\r\n self._test_run_with_task(send_bulk_course_email, 'emailed', num_emails, expected_succeeds, failed=expected_fails)", "def test_disconn_err_retry(self, retry, get_conn):\r\n get_conn.return_value.open.side_effect = SMTPServerDisconnected(425, \"Disconnecting\")\r\n test_email = {\r\n 'action': 'Send email',\r\n 'send_to': 'myself',\r\n 'subject': 'test subject for myself',\r\n 'message': 'test message for myself'\r\n }\r\n response = self.client.post(self.send_mail_url, test_email)\r\n self.assertEquals(json.loads(response.content), self.success_content)\r\n\r\n self.assertTrue(retry.called)\r\n (__, kwargs) = retry.call_args\r\n exc = kwargs['exc']\r\n self.assertIsInstance(exc, SMTPServerDisconnected)", "def retry_if_resetpeer_or_timeout(exception):\n return not ((not isinstance(exception, requests_exceptions.ConnectionError)\n and not isinstance(exception, requests_exceptions.ConnectTimeout))\n and not isinstance(exception, BadStatusLine or exception.errno == errno.ECONNRESET))", "def _test_retry_after_limited_retry_error(self, exception):\r\n # If we want the batch to succeed, we need to send fewer emails\r\n # than the max retries, so that the max is not triggered.\r\n num_emails = settings.BULK_EMAIL_MAX_RETRIES\r\n # We also send email to the instructor:\r\n self._create_students(num_emails - 1)\r\n expected_fails = 0\r\n expected_succeeds = num_emails\r\n with patch('bulk_email.tasks.get_connection', autospec=True) as get_conn:\r\n # Have every other mail attempt fail due to disconnection.\r\n get_conn.return_value.send_messages.side_effect = cycle([exception, None])\r\n self._test_run_with_task(\r\n send_bulk_course_email,\r\n 'emailed',\r\n num_emails,\r\n expected_succeeds,\r\n failed=expected_fails,\r\n retried_withmax=num_emails\r\n )", "def _test_max_retry_limit_causes_failure(self, exception):\r\n # Doesn't really matter how many recipients, since we expect\r\n # to fail on the first.\r\n num_emails = 10\r\n # We also send email to the instructor:\r\n self._create_students(num_emails - 1)\r\n expected_fails = num_emails\r\n expected_succeeds = 0\r\n with patch('bulk_email.tasks.get_connection', autospec=True) as get_conn:\r\n # always fail to connect, triggering repeated retries until limit is hit:\r\n get_conn.return_value.send_messages.side_effect = cycle([exception])\r\n with patch('bulk_email.tasks.update_subtask_status', my_update_subtask_status):\r\n self._test_run_with_task(\r\n send_bulk_course_email,\r\n 'emailed',\r\n num_emails,\r\n expected_succeeds,\r\n failed=expected_fails,\r\n retried_withmax=(settings.BULK_EMAIL_MAX_RETRIES + 1)\r\n )", "def mail_error( error, log_file=None, verbose = False, get_config=lambda: {} ):\n return mail_log( error, log_file, True, verbose, get_config=get_config )", "def sendEmails(\n receiverName,\n retainedCompany,\n companyName,\n emailList,\n senderName,\n senderEmail,\n emailPassword,\n senderTitle,\n senderCompany,\n senderCompanyHomePage,\n senderPhone,\n port=465,\n returnHTML = True \n ):\n\n for emailToTry in emailList: \n # change back the next line after testing\n time.sleep(np.random.uniform(5,15)) # I introduced this because I was being rate limited, and I want to see if this will help avoid that - it seems to help\n print(f'trying {emailToTry}')\n message = MIMEMultipart('alternative')\n message['Subject'] = f'Engineering Positions at {companyName}' # change this back when ready to send actual emails\n message['From'] = senderEmail\n message['To'] = emailToTry # note that this only affects the headers - it does not affect to whom the message gets sent to\n\n [text, html] = emailTextHTML(receiverName, retainedCompany, companyName, senderName, senderTitle, senderCompany, senderEmail, senderCompanyHomePage, senderPhone, returnHTML=returnHTML)\n\n\n part1 = MIMEText(text, 'plain')\n part2 = MIMEText(html, 'html')\n\n message.attach(part1)\n message.attach(part2)\n\n # create a secure SSL context\n context = ssl.create_default_context()\n\n # now loop over each email message and extract what we need:\n with smtplib.SMTP_SSL('smtp.gmail.com', port, context=context) as server:\n # Using with smtplib.SMTP_SSL() as server: makes sure that the connection is automatically closed at the end of the indented code block. If port is zero, or not specified, .SMTP_SSL() will use the standard port for SMTP over SSL (port 465).\n server.login(senderEmail, emailPassword)\n server.sendmail(senderEmail, emailToTry, message.as_string())\n # the above line is how we actually change whom the message is sent to", "def send_email(self, user, subject, body, num_attempts=MAX_ATTEMPTS):\r\n fromaddr = u'[email protected]'\r\n toaddr = u'{} <{}>'.format(user.profile.name, user.email)\r\n msg = EmailMessage(subject, body, fromaddr, (toaddr,))\r\n msg.content_subtype = \"html\"\r\n\r\n i = 1\r\n while i <= num_attempts:\r\n try:\r\n msg.send()\r\n return True # Happy path!\r\n except SINGLE_EMAIL_FAILURE_ERRORS:\r\n # Something unrecoverable is wrong about the email acct we're sending to\r\n log.exception(\r\n u\"LinkedIn: Email send failed for user {}, email {}\"\r\n .format(user.username, user.email)\r\n )\r\n return False\r\n except LIMITED_RETRY_ERRORS:\r\n # Something went wrong (probably an intermittent connection error),\r\n # but maybe if we beat our heads against the wall enough times,\r\n # we can crack our way through. Thwack! Thwack! Thwack!\r\n # Give up after num_attempts though (for loop exits), let's not\r\n # get carried away.\r\n log.exception(\r\n u\"LinkedIn: Email send for user {}, email {}, encountered error, attempt #{}\"\r\n .format(user.username, user.email, i)\r\n )\r\n i += 1\r\n continue\r\n except INFINITE_RETRY_ERRORS:\r\n # Dude, it will *totally* work if I just... sleep... a little...\r\n # Things like max send rate exceeded. The smart thing would be\r\n # to do exponential backoff. The lazy thing to do would be just\r\n # sleep some arbitrary amount and trust that it'll probably work.\r\n # GUESS WHAT WE'RE DOING BOYS AND GIRLS!?!\r\n log.exception(\"LinkedIn: temporary error encountered, retrying\")\r\n time.sleep(1)\r\n\r\n # If we hit here, we went through all our attempts without success\r\n return False", "def send(message: Message, smtp_url: str,\n timeout: Optional[float] = None) -> None:\n with smtplib.SMTP(smtp_url, timeout=timeout) as smtp:\n smtp.send_message(message.as_mime())", "def send_email(error_msd, filename, config):\n msg = MIMEMultipart()\n msg['From'] = config['FROM']\n msg['To'] = config['TO']\n password = config['PASSWORD']\n msg['Subject'] = \"Server is down {}\".format(sys.argv[1])\n body = \"Error found in log: \\n\" + '\\n'.join(error_msd) + \"\\n Check logfile: \" + filename\n msg.attach(MIMEText(body, 'html'))\n\n server = smtplib.SMTP(config['SMTP_SERVER'], config['PORT'])\n server.starttls()\n server.login(msg['From'], password)\n try:\n server.sendmail(msg['From'],msg['To'], msg.as_string())\n server.quit()\n return True\n except socket.error as e:\n log.debug('email failed: Exception {}'.format(e))\n raise", "def __init__(self, smtp_server='localhost', smtp_port=None,\n smtp_ssl=False, smtp_user=None, smtp_password=None):\n self._text_body = None\n self._html_body = None\n self._subject = \"\"\n self._reply_to = None\n\n self._smtp_server = smtp_server\n self._smtp_port = smtp_port\n self._smtp_ssl = smtp_ssl\n self._smtp_user = smtp_user\n self._smtp_password = smtp_password\n\n self._re_email = re.compile(\"^([\\\\w \\\\._]+\\\\<[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\\\\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*@(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\\\\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\\\\>|[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\\\\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*@(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\\\\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?)$\")\n self.clear_recipients()\n self.clear_attachments()", "def setup_smtp():\n print(\"INFO: Setting up SMTP variables...\")\n conf = ConfigParser()\n conf.read(os.path.join(os.path.abspath(\n os.path.dirname(__file__)), '.', 'smtp.conf'))\n smtp = {\n \"server\": conf.get('smtp', 'smtp_server'),\n \"login\": conf.get('smtp', 'smtp_login'),\n \"password\": conf.get('smtp', 'smtp_password'),\n \"port\": conf.get('smtp', 'smtp_port'),\n \"sender\": conf.get('smtp', 'smtp_sender'),\n }\n return smtp", "def raise_timeout_error_upload(api_url, headers, data, timeout, proxies):\n raise requests.exceptions.Timeout", "def send_error(self, conn, msg):\n # dst ip becomes src ip to return the message\n\n # src ip becomes this ip\n\n # type becomes \"no route\"\n\n # msg is empty\n\n # send from port incoming...current dst ip?\n\n # TODO\n\n return", "def test_send_error_to_admin(self, process_mock, send_smtp_mock):\n # arrange an mock error during processing\n process_mock.side_effect = RuntimeError('mock error')\n\n with name_of_file_containing('contents') as filename:\n call_command('process_email', email_file=filename)\n\n self.assertTrue(send_smtp_mock.called)\n (msg,) = send_smtp_mock.call_args.args\n self.assertEqual(msg['to'], '[email protected]', 'Admins should be emailed on error')\n self.assertIn('error', msg['subject'].lower(), 'Error email subject should indicate error')\n self.assertTrue(msg.is_multipart(), 'Error email should have attachments')\n parts = msg.get_payload()\n self.assertEqual(len(parts), 3, 'Error email should contain message, traceback, and original message')\n content = parts[0].get_payload()\n traceback = parts[1].get_payload()\n original = parts[2].get_payload(decode=True).decode() # convert octet-stream to string\n self.assertIn('RuntimeError', content, 'Error type should be included in error email')\n self.assertIn('mock.py', content, 'File where error occurred should be included in error email')\n self.assertIn('traceback', traceback.lower(), 'Traceback should be attached to error email')\n self.assertEqual(original, 'contents', 'Original message should be attached to error email')", "def _test_immediate_failure(self, exception):\r\n # Doesn't really matter how many recipients, since we expect\r\n # to fail on the first.\r\n num_emails = 10\r\n # We also send email to the instructor:\r\n self._create_students(num_emails - 1)\r\n expected_fails = num_emails\r\n expected_succeeds = 0\r\n with patch('bulk_email.tasks.get_connection', autospec=True) as get_conn:\r\n # always fail to connect, triggering repeated retries until limit is hit:\r\n get_conn.return_value.send_messages.side_effect = cycle([exception])\r\n self._test_run_with_task(\r\n send_bulk_course_email,\r\n 'emailed',\r\n num_emails,\r\n expected_succeeds,\r\n failed=expected_fails,\r\n )", "def __init__(self, host, user, password, port=25):\n self.host = host\n self.port = port\n self.user = user\n self.password = password\n\n self.smtp = smtplib.SMTP()", "def connection_failed(self, connection, error):\n assert False", "def _send(self,msg):\n attempts = 3\n while attempts > 0:\n self.sock.sendto(msg, self.ip_port)\n ready = select.select([self.sock], [], [], self.timeout)\n if ready[0]:\n data, ip_port = self.sock.recvfrom(60)\n if ip_port != self.ip_port: continue\n return decode(data)\n attempts -= 1\n print(\"Retrying send\")\n return None", "def _readsmtpserver(self):\n # FIXME too much duplicated code in these _readXYZ() methods\n try: \n self.smtpserver = self.conf.get(\"report.email.smtp_server\")\n except:\n # we use print so this messages goes to the stdout\n msg = \"configuration variable 'smtp_server' is not defined. Plugin Email cannot be created\"\n self.log.error(msg)\n raise PluginConfigurationFailure(msg)", "def test_compose_email_somebad(self):\n pass", "def mail():\n mail_server = 'localhost'\n mail_port = 1025\n CustomSMTPServer((mail_server, mail_port), None)\n asyncore.loop()", "def ConnectToSmtpServer():\n payload = ['ehlo [email protected]\\r\\n']\n\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((FLAGS.host, FLAGS.smtp_port))\n SendPayload(s, payload)\n logging.info('Connected to SMTP server.')\n return s", "def send_queued_mail():\r\n now = datetime.datetime.now(g.tz)\r\n if not c.site:\r\n c.site = Default\r\n\r\n clear = False\r\n session = smtplib.SMTP(g.smtp_server)\r\n # convienence funciton for sending the mail to the singly-defined session and\r\n # marking the mail as read.\r\n def sendmail(email):\r\n try:\r\n session.sendmail(email.fr_addr, email.to_addr,\r\n email.to_MIMEText().as_string())\r\n email.set_sent(rejected = False)\r\n # exception happens only for local recipient that doesn't exist\r\n except (smtplib.SMTPRecipientsRefused, smtplib.SMTPSenderRefused):\r\n # handle error and print, but don't stall the rest of the queue\r\n print \"Handled error sending mail (traceback to follow)\"\r\n traceback.print_exc(file = sys.stdout)\r\n email.set_sent(rejected = True)\r\n\r\n\r\n try:\r\n for email in Email.get_unsent(now):\r\n clear = True\r\n\r\n should_queue = email.should_queue()\r\n # check only on sharing that the mail is invalid\r\n if email.kind == Email.Kind.SHARE and should_queue:\r\n email.body = Share(username = email.from_name(),\r\n msg_hash = email.msg_hash,\r\n link = email.thing,\r\n body = email.body).render(style = \"email\")\r\n email.subject = _(\"[reddit] %(user)s has shared a link with you\") % \\\r\n {\"user\": email.from_name()}\r\n sendmail(email)\r\n elif email.kind == Email.Kind.OPTOUT:\r\n email.body = Mail_Opt(msg_hash = email.msg_hash,\r\n leave = True).render(style = \"email\")\r\n email.subject = _(\"[reddit] email removal notice\")\r\n sendmail(email)\r\n\r\n elif email.kind == Email.Kind.OPTIN:\r\n email.body = Mail_Opt(msg_hash = email.msg_hash,\r\n leave = False).render(style = \"email\")\r\n email.subject = _(\"[reddit] email addition notice\")\r\n sendmail(email)\r\n\r\n elif email.kind in (Email.Kind.FEEDBACK, Email.Kind.ADVERTISE):\r\n if email.kind == Email.Kind.FEEDBACK:\r\n email.subject = \"[feedback] feedback from '%s'\" % \\\r\n email.from_name()\r\n else:\r\n email.subject = \"[ad_inq] feedback from '%s'\" % \\\r\n email.from_name()\r\n sendmail(email)\r\n # handle failure\r\n else:\r\n email.set_sent(rejected = True)\r\n\r\n finally:\r\n session.quit()\r\n\r\n # clear is true if anything was found and processed above\r\n if clear:\r\n Email.handler.clear_queue(now)", "def __init__(self, auth=False):\n self.smtp = smtplib.SMTP(host=EMAIL_HOST, port=EMIAL_HOST_PORT)\n self.smtp.ehlo()\n if auth:\n self.smtp.login(EMAIL, EMAIL_PASSWORD)", "def raise_connection_error(api_url, headers, timeout, proxies):\n raise requests.exceptions.ConnectionError", "def raise_connection_error(api_url, headers, timeout, proxies):\n raise requests.exceptions.ConnectionError", "def SocketError(self) -> SocketError:", "async def test_disconnected_server_raises_on_starttls(preset_client):\n await preset_client.connect()\n preset_client.server.responses.append(\n b\"\\n\".join([b\"250-localhost, hello\", b\"250-SIZE 100000\", b\"250 STARTTLS\"])\n )\n await preset_client.ehlo()\n\n preset_client.server.responses.append(b\"220 begin TLS pls\")\n preset_client.server.drop_connection_event.set()\n\n with pytest.raises(SMTPServerDisconnected):\n await preset_client.starttls(validate_certs=False)\n\n # Verify that the connection was closed\n assert not preset_client._connect_lock.locked()\n assert preset_client.protocol is None\n assert preset_client.transport is None", "def error_received(self, exc: Exception) -> None:\n _log.exception('UDP operation failed')", "def mail(server, from_address, from_pass, address_list, msg, port = 25):\n\n smtp_mail = smtplib.SMTP(server,port)\n smtp_mail.starttls()\n smtp_mail.login(from_address, from_pass)\n smtp_mail.sendmail(from_address, address_list, msg) \n smtp_mail.quit()", "def send_email(self, message, mail_server_id=None, smtp_server=None, smtp_port=None,\n smtp_user=None, smtp_password=None, smtp_encryption=None, smtp_debug=False,\n smtp_session=None):\n # Use the default bounce address **only if** no Return-Path was\n # provided by caller. Caller may be using Variable Envelope Return\n # Path (VERP) to detect no-longer valid email addresses.\n if smtp_user:\n _logger.error(\"smpt session --------------------\")\n _logger.error(smtp_user)\n smtp_from = smtp_user\n else:\n smtp_from = message['Return-Path'] or self._get_default_bounce_address() or message['From']\n assert smtp_from, \"The Return-Path or From header is required for any outbound email\"\n\n # The email's \"Envelope From\" (Return-Path), and all recipient addresses must only contain ASCII characters.\n from_rfc2822 = extract_rfc2822_addresses(smtp_from)\n assert from_rfc2822, (\"Malformed 'Return-Path' or 'From' address: %r - \"\n \"It should contain one valid plain ASCII email\") % smtp_from\n # use last extracted email, to support rarities like 'Support@MyComp <[email protected]>'\n smtp_from = from_rfc2822[-1]\n email_to = message['To']\n email_cc = message['Cc']\n email_bcc = message['Bcc']\n del message['Bcc']\n\n smtp_to_list = [\n address\n for base in [email_to, email_cc, email_bcc]\n for address in extract_rfc2822_addresses(base)\n if address\n ]\n assert smtp_to_list, self.NO_VALID_RECIPIENT\n\n x_forge_to = message['X-Forge-To']\n if x_forge_to:\n # `To:` header forged, e.g. for posting on mail.channels, to avoid confusion\n del message['X-Forge-To']\n del message['To'] # avoid multiple To: headers!\n message['To'] = x_forge_to\n\n # Do not actually send emails in testing mode!\n if getattr(threading.currentThread(), 'testing', False) or self.env.registry.in_test_mode():\n _test_logger.info(\"skip sending email in test mode\")\n return message['Message-Id']\n\n try:\n message_id = message['Message-Id']\n smtp = smtp_session\n smtp = smtp or self.connect(\n smtp_server, smtp_port, smtp_user, smtp_password,\n smtp_encryption, smtp_debug, mail_server_id=mail_server_id)\n smtp.sendmail(smtp_from, smtp_to_list, message.as_string())\n # do not quit() a pre-established smtp_session\n if not smtp_session:\n smtp.quit()\n except smtplib.SMTPServerDisconnected:\n raise\n except Exception as e:\n params = (ustr(smtp_server), e.__class__.__name__, ustr(e))\n msg = _(\"Mail delivery failed via SMTP server '%s'.\\n%s: %s\") % params\n _logger.info(msg)\n raise MailDeliveryException(_(\"Mail Delivery Failed\"), msg)\n return message_id", "def _send_smtp(message, subject, to, to_name, sender, sender_name):\n host = app.config.get('MAIL_HOST')\n\n if not host:\n raise MailFailure('SMTP Server Not Configured')\n\n try:\n server = smtplib.SMTP(host)\n except (smtplib.SMTPConnectError, socket.error) as ex:\n app.logger.error('Unable to send mail: %s', str(ex))\n raise MailFailure('Error connecting to SMTP server.')\n\n msg = text.MIMEText(message)\n msg['Subject'] = subject\n msg['To'] = email.utils.formataddr((to_name, to))\n msg['From'] = email.utils.formataddr((sender_name, sender))\n\n try:\n if app.debug:\n server.set_debuglevel(True)\n server.sendmail(sender, [to], msg.as_string())\n except (smtplib.SMTPException, socket.error) as ex:\n app.logger.error('Unable to send mail: %s', str(ex))\n raise MailFailure('Error sending mail to SMTP server.')\n finally:\n try:\n server.quit()\n except smtplib.SMTPException:\n pass", "def send_error(self, conn, msg):\n print(\"ERROR PLACEHOLDER\")\n\n return", "def send_email(to, subject, content): \n \n #Create the message\n msg = MIMEMultipart('alternative')\n msg = addheader(msg, 'Subject', subject)\n msg[\"Subject\"] = subject\n msg[\"From\"] = EMAIL_SENDER\n msg[\"To\"] = listToStr(to)\n content = MIMEText(content.encode('utf-8'), \"html\")\n msg.attach(content);\n \n try:\n smtpObj = SMTP(GMAIL_SMTP, GMAIL_SMTP_PORT)\n #Identify yourself to GMAIL ESMTP server.\n smtpObj.ehlo()\n #Put SMTP connection in TLS mode and call ehlo again.\n smtpObj.starttls()\n smtpObj.ehlo()\n #Login to service\n smtpObj.login(user=EMAIL_SENDER, password=PASSWORD)\n #Send email\n print msg.as_string()\n smtpObj.sendmail(EMAIL_SENDER, to, msg.as_string())\n #close connection and session.\n smtpObj.quit();\n except SMTPException as error:\n print \"Error: unable to send email : {err}\".format(err=error)", "def test_bademail(mock_smtp):\n \n msg_values = {'sender': '[email protected]',\n 'receiver': '[email protected]',\n 'subject': 'Test Message Subject',\n 'body': 'This is a Test.'}\n \n rejected_recipients = ['[email protected]', '[email protected]']\n\n msg = Message(**msg_values)\n \n mock_smtp.return_value.sendmail.return_value = rejected_recipients\n \n with pytest.raises(BadEmailRecipient) as err:\n msg.send()\n \n expected_message = (\"Message '{}' could not be sent to one or more \"\n \"recipients.\").format(msg_values['subject'])\n assert expected_message == err.value.message\n assert Counter(err.value.rejected) == Counter(rejected_recipients)", "def _login(self):\n self._smtp = smtplib.SMTP(host=self._config.host,\n port=self._config.port)\n # send 'hello' to SMTP server\n self._smtp.ehlo()\n # start TLS encryption\n self._smtp.starttls()\n self._smtp.login(self._config.sender_email, self._config.password)\n self._connected = True", "def get_smtpd_stats(logvalues):\n possible_status = ['timeout', 'lost', 'warning:', 'NOQUEUE:', 'connect',\n 'disconnect']\n message_id_or_status = logvalues[5]\n smtp_code = int()\n reason_code = str()\n smtp_status = str()\n mail_to = str()\n mail_from = str()\n smtp_client = str()\n\n if message_id_or_status in possible_status:\n print \"Found DECISION: {}\".format(message_id_or_status)\n if 'NOQUEUE' in message_id_or_status:\n smtp_code = logvalues[6]\n smtp_client = logvalues[9]\n reason_code = logvalues[11]\n if smtp_code == '451' and reason_code == '4.7.1':\n # Recipient address rejected: Intentional policy rejection,\n # please try again later (GREYLISTED)\n mail_to = logvalues[12]\n mail_from = logvalues[34]\n smtp_status = 'Greylisted'\n\n elif smtp_code == '450' and reason_code == '4.1.8':\n # Sender Address Rejected, Sender's Domain not found\n smtp_client = extract_connecting_ip(logvalues[5])\n mail_from = extract_email_address(logvalues[8])\n mail_to = extract_email_address(logvalues[16])\n smtp_status = \"Invalid Sender Domain\"\n\n elif smtp_code == '550' and reason_code == '5.1.0':\n # Sender Address Rejected, user unknown in virtual mailbox\n # table. In this scenario, someone or some thing is attempting\n # to use the local SMTP server as an open relay, but sneakily.\n # They are usually using the local domain as the sending domain\n # and then some other domain, say yahoo, google, hotmail as the\n # recipient\n smtp_client = extract_connecting_ip(logvalues[9])\n mail_from = extract_email_address(logvalues[22])\n mail_to = extract_email_address(logvalues[23])\n smtp_status = \"Invalid Sender\"\n\n elif smtp_code == '550' and reason_code == '5.1.1':\n # Recipient Address Rejected, user unknown in virtual mailbox\n # table. This could be a mis-configuration of the local SMTP\n # mailbox setup - the user specified in the \"To:\" field doesnt\n # exist and the message is rejected. This can also happen when\n # the sending address is Null\n smtp_client = extract_connecting_ip(logvalues[9])\n mail_from = extract_email_address(logvalues[22])\n mail_to = extract_email_address(logvalues[23])\n smtp_status = \"Invalid Recipient\"\n\n elif smtp_code == '554':\n # Sender access DENIED, sent from a dynamic IP range\n smtp_client = extract_connecting_ip(logvalues[9])\n mail_from = extract_email_address(logvalues[41])\n mail_to = extract_email_address(logvalues[42])\n smtp_status = \"Bad sending server\"\n\n elif 'connect' == message_id_or_status:\n smtp_client = extract_connecting_ip(logvalues[7])\n\n elif 'disconnect' in message_id_or_status:\n smtp_client = extract_connecting_ip(logvalues[7])\n\n print \"Rejection information:\\\n SMTP Client: {} \\\n MAIL FROM: {} \\\n MAIL TO: {}\".format(smtp_client, mail_from, mail_to)\n print \" SMTP Codes: {} - {}: {}\".format(smtp_code,\n reason_code,\n smtp_status)\n else:\n print \"Found Message ID: {}\".format(message_id_or_status)\n print logvalues", "def secure_connection(HOST,PORT,PROTOCOL=\"smtp\",TLSSTRENGTH=\"tls1_2\"):\n\n print(\"Connecting to host: {} on Port Number {} using an IMPLICITY SECURE Connection \\r\\n\".format(HOST,PORT))\n\n context = create_tls_context(TLSSTRENGTH)\n secure_client = context.wrap_socket(socket.socket(socket.AF_INET),server_hostname=HOST)\n secure_client.settimeout(SOCKET_TIMEOUT)\n\n try:\n secure_client.connect((HOST,PORT))\n # SMTP NEEDS A EHLO MESSAGE BEFORE OTHER COMMANDS\n # IMAP AND POP DO NOT\n data = secure_client.recv(1024)\n if PROTOCOL==\"smtp\":\n secure_client.send(SMTP_EHLO)\n data = secure_client.recv(1024)\n #print('SMTP EHLO RESPONSE: ', repr(data))\n print_cipher_certificate(secure_client)\n decide_protocol_handler(secure_client,PROTOCOL)\n\n except Exception as e:\n print(\"Connection Could Not Be Established \\r\\n\")\n print(e)", "def on_connection_error(self):\n log.error(\"Stream connection has errored or timed out\")", "def processTimeLeft(sendMail, verbose, proxyInfo, time, mail):\n if proxyInfo:\n if verbose:\n print('Proxy information: {}'.format(proxyInfo))\n timeLeft = []\n for line in proxyInfo:\n if line.find('timeleft') > -1:\n dateReg = re.compile('\\d{1,3}[:/]\\d{2}[:/]\\d{2}')\n timeLeft = dateReg.findall(line)[0]\n timeLeft = timeLeft.split(':')[0]\n continue\n else:\n msg = \"No valid proxy found in %s. \" % HOST\n msg += \"Please create one.\\n\"\n\n if verbose:\n print(msg)\n print(\"Send mail: {}\".format(sendMail))\n\n if sendMail:\n if verbose:\n print(\"Sending mail notification\")\n sendMailNotification(mail, msg)\n sys.exit(4)\n\n ### // build message\n if int(time) >= int(timeLeft):\n msg = \"\\nProxy file in %s is about to expire. \" % HOST\n msg += \"Please renew it.\\n\"\n msg += \"Hours left: %i\\n\" % int(timeLeft)\n if int(timeLeft) == 0:\n msg = \"Proxy file in %s HAS expired.\" % HOST\n msg += \"Please renew it.\\n\"\n\n if verbose:\n print(msg)\n print(\"Send mail: {}\".format(sendMail))\n\n ### // Sends an email\n if sendMail:\n if verbose:\n print(\"Sending mail notification\")\n sendMailNotification(mail, msg, proxyInfo, verbose)", "def raise_connection_error_upload(api_url, headers, data, timeout, proxies):\n raise requests.exceptions.ConnectionError", "def mail_log( log, log_file=None, is_error = False, verbose = False, get_config=lambda: {} ):\n tries = 3\n log_text = \"\"\n while tries:\n try:\n if log != None:\n msg = MIMEText(log)\n elif log_file != None:\n log_text = re.sub(\"^smtp_.*$|^ssh_.*$\",\"\",log_file.read(),flags=re.M)\n msg = MIMEText(log_text[:2*pow(2,20)])\n else:\n return 0\n\n if is_error:\n if verbose:\n print(\"E-mailing log file with errors\", file=sys.stderr)\n msg['Subject'] = \"bkp error: %s \"%(platform.node())\n msg['From'] = get_config()[\"error_email\"]\n msg['To'] = get_config()[\"error_email\"]\n else:\n if verbose:\n print(\"E-mailing log file with no errors\", file=sys.stderr)\n msg['Subject'] = \"bkp complete: %s\"%(platform.node())\n msg['From'] = get_config()[\"log_email\"]\n msg['To'] = get_config()[\"log_email\"]\n\n msg['Date'] = datetime.datetime.now().strftime( \"%m/%d/%Y %H:%M\" )\n send_email(msg)\n return 0\n except:\n time.sleep(tries*10.0)\n tries = tries - 1\n if not tries:\n if is_error:\n print(\"Error couldn't send via e-mail\", file=sys.stderr)\n else:\n print(\"Success couldn't send via e-mail\", file=sys.stderr)\n if log:\n print(log, file=sys.stderr)\n if log_text:\n print(log_text, file=sys.stderr)\n print(traceback.format_exc(), file=sys.stderr)\n raise", "def failed(self, message, reason=None):\n failed_mail.send(\n sender=self.__class__,\n message=message,\n reason=reason\n )", "def _handle_error(self, path, reqs, headers, get=True):\n call = requests.get if get else requests.post\n resp = None\n dump = json.dumps(reqs)\n wait = self.config.start_reconnect_wait\n while resp is None:\n if wait > self.config.max_reconnect_wait:\n raise Exception(\"To many reconnect attempts\")\n time.sleep(wait)\n try:\n resp = call(path, dump, headers=headers)\n except requests.exceptions.ConnectionError:\n resp = None\n wait *= 2\n return resp", "def test_timeout_exceeded():\n connection = FakeBaseConnection(session_timeout=10)\n start = time.time() - 11\n try:\n connection._timeout_exceeded(start)\n except NetmikoTimeoutException as exc:\n assert isinstance(exc, NetmikoTimeoutException)\n return\n\n assert False", "def _retry_occurred(self):", "def sendErrorMessage(msg): #@NoSelf", "def smtp_server(self):\n s = smtplib.SMTP(self.smtp_addr, self.smtp_port)\n s.starttls()\n s.login(self.src_addr, self.src_pass)\n try:\n yield s\n except:\n raise\n finally:\n s.quit()", "def timeoutConnection(self):\n self._cancelCommands(defer.TimeoutError(\"Connection timeout\"))\n self.transport.loseConnection()", "def check_timeout(self, transport, earlier_time, interval, error_msg):\n now = datetime.datetime.now()\n secs = int((now - earlier_time).total_seconds())\n if secs >= interval:\n self.connection_lost(transport, f'{error_msg}: {secs} seconds')", "def send_mail(Email_id,OTP):\r\n try : \r\n s = smtplib.SMTP('smtp.gmail.com', 587) \r\n s.ehlo()\r\n # start TLS for security \r\n s.starttls() \r\n # Authentication \r\n s.login(mail_id,mail_Password) \r\n message = str(OTP)\r\n # sending the mail \r\n s.sendmail(mail_id, Email_id, message) \r\n # terminating the session \r\n s.quit() \r\n msg=\"Mail has been sent to Registered mail id.\"\r\n except :\r\n msg=\"UserName and Password not accepted kindly provide correct UserName and Password.\"\r\n return msg", "def catch_telegram_errors(func):\n\n def result_func(*args, **kwargs):\n while True:\n try:\n return func(*args, **kwargs)\n # Bot was blocked by the user\n except telegram.error.Unauthorized:\n log.debug(f\"Unauthorized to call {func.__name__}(), skipping.\")\n break\n # Telegram API didn't answer in time\n except telegram.error.TimedOut:\n log.warning(f\"Timed out while calling {func.__name__}(),\"\n f\" retrying in {cfg['Telegram']['timed_out_pause']} secs...\")\n time.sleep(cfg.telegram[\"timed_out_pause\"])\n # Telegram is not reachable\n except telegram.error.NetworkError as error:\n log.error(f\"Network error while calling {func.__name__}(),\"\n f\" retrying in {cfg.telegram['error_pause']} secs...\\n\"\n f\"Full error: {error.message}\")\n time.sleep(cfg.telegram[\"error_pause\"])\n # Unknown error\n except telegram.error.TelegramError as error:\n if error.message.lower() in [\"bad gateway\", \"invalid server response\"]:\n log.warning(f\"Bad Gateway while calling {func.__name__}(),\"\n f\" retrying in {cfg.telegram['error_pause']} secs...\")\n time.sleep(cfg.telegram[\"error_pause\"])\n elif error.message.lower() == \"timed out\":\n log.warning(f\"Timed out while calling {func.__name__}(),\"\n f\" retrying in {cfg.telegram['timed_out_pause']} secs...\")\n time.sleep(cfg.telegram[\"timed_out_pause\"])\n else:\n log.error(f\"Telegram error while calling {func.__name__}(),\"\n f\" retrying in {cfg.telegram['error_pause']} secs...\\n\"\n f\"Full error: {error.message}\")\n traceback.print_exception(*sys.exc_info())\n time.sleep(cfg.telegram[\"error_pause\"])\n\n return result_func", "def test_connect_timeout(self):\n rave = RWSConnection('https://innovate.mdsol.com')\n with mock.patch(\"requests.sessions.Session.get\") as mock_get:\n mock_get.side_effect = requests.exceptions.ConnectTimeout()\n with self.assertRaises(RWSException) as exc:\n rave.send_request(rws_requests.ClinicalStudiesRequest(), verify=False,retries=0)\n self.assertEqual('Server Connection Timeout', str(exc.exception))", "def __init__(self, error_msg):\n super(RequestTimeoutException, self).__init__(error_msg)", "def process_error_mail(recipient, sender, journal_id):\n\n if recipient in ['', None]:\n #: Simpley Error Mail!\n #: TODO: error marking...\n return True\n\n try:\n param = return_path_from_address(recipient)\n assert param['message_id'] != \"\"\n assert param['domain'] != \"\"\n\n try:\n #: Jourmal mail object\n journal_msg = Journal.objects.get(id=journal_id).mailobject()\n error_address = journal_msg.get('X-Failed-Recipients')\n except:\n pass\n\n try:\n #: Find message\n msg = Publish.objects.get(\n id=int(param['message_id']),\n publish__site__domain=param['domain'])\n\n # X-Failed-Recipients SHOULD be checked ?\n assert(\n error_address is None or\n error_address == msg.member.address)\n\n #: increment bounce number\n #: this mailbox will be disabled sometimes later.\n msg.member.bounces = msg.member.bounces + 1\n msg.member.save()\n\n #:\n return True\n\n except:\n pass\n\n except exceptions.AttributeError:\n # May be normal address..\n # Other handler will be called.\n return False\n\n return False", "def retry_if_connection_error(exception):\r\n # return True\r\n return isinstance(exception, HttpError)", "def send(message):\n mail_server = smtplib.SMTP('localhost')\n mail_server.send_message(message)\n mail_server.quit()", "def send_email(message):\n mail_server = smtplib.SMTP('localhost')\n mail_server.send_message(message)\n mail_server.quit()", "def _handle_error(error):\n try:\n #Abandon any active timeout triggers\n if timeoutCall.active():\n timeoutCall.cancel()\n #Unexpected error on HTTPS POST, we may want to move to the next node.\n self.log.error(\"Error on HTTPS POST : {cls!r} : {err!r}. {node!r}\",cls=error.type.__name__,err=error.getErrorMessage(),node = self.nodes[self.node_index])\n self._next_node()\n except Exception as ex:\n self.log.failure(\"Error in _handle_error {err!r}. {node!r}\",err=str(ex),node = self.nodes[self.node_index])\n #Add the failed sub-queue back to the command queue, we shall try again soon.\n self.queue = subqueue + self.queue\n ##If something went wrong, the HTTPS POST isn't active anymore.\n self.active_call_count = self.active_call_count - 1\n #Invoke self, possibly sending new queues RPC calls to the current node\n self()", "def TestSendRecvMessageTimeoutRaisesCommsError(self):\n self.txrx.timeout = 0.2 # short timeout so we don't hold up testing too much\n\n byte_array_message = bytes(\"\\x01\\x01\\x01\\x01\\x01\\x01\", encoding=DATA_ENCODING)\n txmsg = TxMessage(byte_array_message, num_response_msg=1, expect_eom=True)\n with self.assertRaises(PercivalCommsError):\n self.txrx.send_recv_message(txmsg)\n\n # Receive the bytes from our test socket\n msg = self.connection.recv(6)\n # Verify the bytes are the same as those sent\n self.assertEqual(msg, byte_array_message)", "def decide_to_retry(error):\n return True", "def _logout(self):\n if self.connected:\n try:\n self._smtp.quit()\n except smtplib.SMTPServerDisconnected:\n pass\n self._connected = False", "def parse_bro_smtp(smtp_path, target_dir, prefix='smtp'):\n\n # the current message we're parsing in the case of multiple emails coming in over the same connection\n smtp_message_index = 0 \n\n with open(smtp_path, 'r', errors='ignore') as fp:\n source_ipv4 = None\n source_port = None\n envelope_from = None\n envelope_to = []\n\n # state flag for when the data isn't quite right (see below)\n _bypass_read = False\n\n # the first line of the file has the source IP address of the smtp connection\n # in the following format: 172.16.139.143:38668/tcp\n\n line = fp.readline()\n m = REGEX_BRO_SMTP_SOURCE_IPV4.match(line)\n\n if not m:\n logging.error(f\"unable to parse soure address from {smtp_path} ({line.strip()})\")\n event_time = saq.LOCAL_TIMEZONE.localize(datetime.datetime.fromtimestamp(os.path.getmtime(smtp_path)))\n # in this case we skip the first readline() call since we've already read it\n _bypass_read = True\n else:\n source_ipv4 = m.group(1)\n source_port = m.group(2)\n\n logging.debug(f\"got source ipv4 {source_ipv4} port {source_port} for {smtp_path}\")\n\n # the second line is the time (in epoch UTC) that bro received the file\n line = fp.readline()\n event_time = datetime.datetime.utcfromtimestamp(int(line.strip()))\n logging.debug(f\"got event time {event_time} for {smtp_path}\")\n\n STATE_SMTP = 1\n STATE_DATA = 2\n\n state = STATE_SMTP\n rfc822_path = None\n rfc822_fp = None\n\n def _reset_state():\n nonlocal rfc822_fp, source_ipv4, source_port, envelope_from, envelope_to, state\n rfc822_fp = None\n #source_ipv4 = None\n #source_port = None\n envelope_from = None\n envelope_to = []\n state = STATE_SMTP\n\n def _finalize():\n # called when we detect the end of an SMTP stream OR the end of the file (data)\n nonlocal rfc822_fp, source_ipv4, source_port, envelope_from, envelope_to, state\n rfc822_fp.close()\n logging.info(\"finished parsing {} from {}\".format(rfc822_path, smtp_path))\n result = RFC822Email(\n source_ipv4=source_ipv4,\n source_port=source_port,\n envelope_from=envelope_from,\n envelope_to=envelope_to,\n received=event_time,\n file_path=rfc822_path)\n _reset_state()\n return result\n\n # smtp is pretty much line oriented\n while True:\n\n # if we read the first line and it wasn't what we expected\n # then we skip reading it here since we already have it\n if _bypass_read:\n _bypass_read = False\n else:\n line = fp.readline()\n\n if line == '':\n break\n\n if state == STATE_SMTP:\n m = REGEX_BRO_SMTP_MAIL_FROM.match(line)\n if m:\n envelope_from = m.group(1)\n logging.debug(\"got envelope_from {} for {}\".format(envelope_from, smtp_path))\n continue\n\n m = REGEX_BRO_SMTP_RCPT_TO.match(line)\n if m:\n envelope_to.append(m.group(1))\n logging.debug(\"got envelope_to {} for {}\".format(envelope_to, smtp_path))\n continue\n\n m = REGEX_BRO_SMTP_DATA.match(line)\n if m or (not line.startswith('<') and not line.startswith('>')):\n state = STATE_DATA\n rfc822_path = os.path.join(target_dir, f'{prefix}.{smtp_message_index}.email.rfc822')\n smtp_message_index += 1\n rfc822_fp = open(rfc822_path, 'w')\n logging.debug(\"created {} for {}\".format(rfc822_path, smtp_path))\n continue\n\n m = REGEX_BRO_SMTP_RSET.match(line)\n if m:\n logging.debug(f\"detected RSET for {smtp_path}\")\n _reset_state()\n continue\n\n # any other command we skip\n logging.debug(f\"skipping SMTP command {line.strip()}\")\n continue\n\n # otherwise we're reading DATA and looking for the end of that\n if line.strip() == ('> . .'):\n yield _finalize()\n continue\n\n rfc822_fp.write(line)\n continue\n\n # did the file end while we were reading SMTP data?\n if state == STATE_DATA:\n yield _finalize()", "def ReceiveTimeout(self) -> int:", "def ReceiveTimeout(self) -> int:", "def _submit_for_retry(entry_id, email_id, to_list, global_email_context, current_exception, subtask_status, skip_retry_max=False):\r\n task_id = subtask_status.task_id\r\n log.info(\"Task %s: Successfully sent to %s users; failed to send to %s users (and skipped %s users)\",\r\n task_id, subtask_status.succeeded, subtask_status.failed, subtask_status.skipped)\r\n\r\n # Calculate time until we retry this task (in seconds):\r\n # The value for max_retries is increased by the number of times an \"infinite-retry\" exception\r\n # has been retried. We want the regular retries to trigger max-retry checking, but not these\r\n # special retries. So we count them separately.\r\n max_retries = _get_current_task().max_retries + subtask_status.retried_nomax\r\n base_delay = _get_current_task().default_retry_delay\r\n if skip_retry_max:\r\n # once we reach five retries, don't increase the countdown further.\r\n retry_index = min(subtask_status.retried_nomax, 5)\r\n exception_type = 'sending-rate'\r\n # if we have a cap, after all, apply it now:\r\n if hasattr(settings, 'BULK_EMAIL_INFINITE_RETRY_CAP'):\r\n retry_cap = settings.BULK_EMAIL_INFINITE_RETRY_CAP + subtask_status.retried_withmax\r\n max_retries = min(max_retries, retry_cap)\r\n else:\r\n retry_index = subtask_status.retried_withmax\r\n exception_type = 'transient'\r\n\r\n # Skew the new countdown value by a random factor, so that not all\r\n # retries are deferred by the same amount.\r\n countdown = ((2 ** retry_index) * base_delay) * random.uniform(.75, 1.25)\r\n\r\n log.warning('Task %s: email with id %d not delivered due to %s error %s, retrying send to %d recipients in %s seconds (with max_retry=%s)',\r\n task_id, email_id, exception_type, current_exception, len(to_list), countdown, max_retries)\r\n\r\n # we make sure that we update the InstructorTask with the current subtask status\r\n # *before* actually calling retry(), to be sure that there is no race\r\n # condition between this update and the update made by the retried task.\r\n update_subtask_status(entry_id, task_id, subtask_status)\r\n\r\n # Now attempt the retry. If it succeeds, it returns a RetryTaskError that\r\n # needs to be returned back to Celery. If it fails, we return the existing\r\n # exception.\r\n try:\r\n send_course_email.retry(\r\n args=[\r\n entry_id,\r\n email_id,\r\n to_list,\r\n global_email_context,\r\n subtask_status.to_dict(),\r\n ],\r\n exc=current_exception,\r\n countdown=countdown,\r\n max_retries=max_retries,\r\n throw=True,\r\n )\r\n except RetryTaskError as retry_error:\r\n # If the retry call is successful, update with the current progress:\r\n log.exception(u'Task %s: email with id %d caused send_course_email task to retry.',\r\n task_id, email_id)\r\n return subtask_status, retry_error\r\n except Exception as retry_exc:\r\n # If there are no more retries, because the maximum has been reached,\r\n # we expect the original exception to be raised. We catch it here\r\n # (and put it in retry_exc just in case it's different, but it shouldn't be),\r\n # and update status as if it were any other failure. That means that\r\n # the recipients still in the to_list are counted as failures.\r\n log.exception(u'Task %s: email with id %d caused send_course_email task to fail to retry. To list: %s',\r\n task_id, email_id, [i['email'] for i in to_list])\r\n num_failed = len(to_list)\r\n subtask_status.increment(subtask_status, failed=num_failed, state=FAILURE)\r\n return subtask_status, retry_exc" ]
[ "0.67359835", "0.6397967", "0.6189997", "0.6074653", "0.60194963", "0.58749896", "0.58332354", "0.58151186", "0.58065194", "0.58065194", "0.57931924", "0.57492477", "0.5701048", "0.5651594", "0.56239885", "0.5590371", "0.5548968", "0.54814523", "0.545041", "0.5434488", "0.5432996", "0.54319906", "0.5427672", "0.5393198", "0.53886634", "0.5386925", "0.52865934", "0.5262578", "0.5209261", "0.5209261", "0.52065873", "0.5199914", "0.5197225", "0.51867986", "0.51865697", "0.51748097", "0.51708853", "0.516997", "0.516557", "0.5161641", "0.5153028", "0.51524675", "0.51451564", "0.51387787", "0.51289845", "0.51274496", "0.51197165", "0.5111481", "0.5087659", "0.5083974", "0.5082628", "0.50689685", "0.5061563", "0.50612724", "0.505994", "0.5059147", "0.5058944", "0.5050654", "0.5025109", "0.5025109", "0.50215644", "0.50091225", "0.5002319", "0.50016975", "0.49853706", "0.49780822", "0.4976904", "0.4974854", "0.4971627", "0.4961066", "0.49569902", "0.4955531", "0.49512345", "0.4949361", "0.49410206", "0.49359876", "0.49354786", "0.4933081", "0.4930154", "0.49270853", "0.49216604", "0.49204564", "0.4912089", "0.49062055", "0.4898298", "0.48971263", "0.48883092", "0.4883038", "0.48771402", "0.48712635", "0.48692167", "0.48639473", "0.48486683", "0.48481482", "0.48406997", "0.48368847", "0.48333982", "0.48288998", "0.48288998", "0.48253483" ]
0.59302324
5
Note, SMTPTimeoutError vs SMTPConnectError here depends on processing time.
async def test_timeout_error_with_no_server(event_loop): client = SMTP(hostname="127.0.0.1", port=65534, loop=event_loop) with pytest.raises(SMTPTimeoutError): await client.connect(timeout=0.000000001)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _connect_smtp(self):\n smtp = None\n try:\n smtp = smtplib.SMTP(self.servername, timeout = self.timeout)\n except smtplib.SMTPException as err:\n log.critical('smtp service at {} is not currently available'.format(self.servername))\n log.critical(err)\n except Exception as err:\n log.critical('smtp other error {} is not currently available'.format(self.servername))\n log.critical(err)\n \n if self.auth is not None:\n try:\n smtp.login(self.auth[0], self.auth[1])\n except smtplib.SMTPException as err:\n log.warn('smtp service login error for {}'.format(self.servername))\n log.warn(err)\n return smtp", "def Check_SMTP(name, my_ip):\n\n if nslookup(name)[0] != 0:\n add_info (name, SMTP_SERVER, \"cannot resolve SMTP server\")\n return 1\n if ping_machine(name) != 0:\n add_info(name, SMTP_SERVER, \"cannot ping SMTP server\")\n return 2\n\n status, err = tryconnect(name, SMTP_PORT)\n if status == 1 or status == 2:\n add_info(name, SMTP_SERVER, err)\n if status == 1:\n # if we time'd out, things can still be OK (say reverse DNS problems)\n # so return only an error if no timeout\n return 3\n\n stat, out = port_talker.TCPTalk(name, SMTP_PORT,\n 60, # timeout (>30sec for messed up servers)\n \"HELO \" + my_ip + \"\\r\\nQUIT\\r\\n\",\n None, # terminator\n 1024, # max len\n 1) # use external resolver\n\n # expected answer:\n #220 'mail.forobozz.com' ESMTP\n #250 mail.frobozz.com Hello grue.frobozz.com [192.168.0.21], pleased to meet ya\n #221 mail.frobozz.com closing connection\n\n # Each line can be repeated several times, so we check that all codes appear\n # and that no other codes appear\n codes = map(lambda x: x[:4], string.split(out, '\\n'))\n valid_codes = ('220 ', '250 ', '221 ', '')\n try:\n for code in codes:\n assert(code in valid_codes)\n for valid in valid_codes:\n assert(valid in codes)\n except:\n # If we wanted, we could check whether reverse DNS lookup is not working.\n # This would be the most likely explanation\n add_info(name, SMTP_SERVER, \"cannot HELO SMTP server\")\n return 4\n add_info(name, SMTP_SERVER, \"OK\")\n return 0", "def send_mail_when_failed(self, body):\r\n pass", "def EmailError(error_messages, to_address, from_address, smtp_server, subject):\n error_string = '\\n'.join(error_messages)\n if( None not in [error_string, to_address, from_address, smtp_server] ):\n email_message = MIMEMultipart('alternative')\n email_message['Subject'] = subject\n email_message['From'] = from_address\n email_message['To'] = to_address\n\n html_message = ''\n for error in error_messages:\n html_message = '%s<br/><h4>%s</h4><p>%s</p>' % (html_message, \n error.split('\\n')[0], error.lstrip('%s\\n' % \\\n error.split('\\n')[0]).replace('\\n','<br/>'))\n html_message = '<html><head></head><body>%s</body></html>' % html_message\n\n email_message.attach(MIMEText(error_string, 'plain'))\n email_message.attach(MIMEText(html_message, 'html'))\n\n smtp_handle = None\n try:\n smtp_handle = smtplib.SMTP(smtp_server)\n except socket.gaierror:\n print '%s is an invalid smtp server.' % smtp_server\n except smtplib.SMTPConnectError:\n print 'Failed to connect to %s.' % smtp_server\n if( smtp_handle is not None ):\n try:\n smtp_handle.sendmail(from_address,[to_address],\n email_message.as_string())\n except smtplib.SMTPRecipientsRefused:\n print '%s is an invalid email address.' % to_address\n smtp_handle.quit()", "async def test_connect_error_with_no_server(event_loop):\n client = SMTP(hostname=\"127.0.0.1\", port=65534, loop=event_loop)\n\n with pytest.raises(SMTPConnectError):\n await client.connect(timeout=0.1)", "def test_failed_email(self):\n self.assertEqual(send_email(\"testtestcom\", \"test\", \"test\"), 'There was an error sending')", "def test_smtp(self):\n self._endpointServerTest(\"smtp\", protocols.SMTPFactory)", "def connect(smtp_url: str, timeout: Optional[float] = None) -> smtplib.SMTP:\n return smtplib.SMTP(smtp_url, timeout=timeout)", "def SendTimeout(self) -> int:", "def SendTimeout(self) -> int:", "async def test_expn_error(\n smtp_client: SMTP, smtpd_server: asyncio.AbstractServer\n) -> None:\n async with smtp_client:\n with pytest.raises(SMTPResponseException):\n await smtp_client.expn(\"a-list\")", "def test_conn_err_retry(self, retry, get_conn):\r\n get_conn.return_value.open.side_effect = SMTPConnectError(424, \"Bad Connection\")\r\n\r\n test_email = {\r\n 'action': 'Send email',\r\n 'send_to': 'myself',\r\n 'subject': 'test subject for myself',\r\n 'message': 'test message for myself'\r\n }\r\n response = self.client.post(self.send_mail_url, test_email)\r\n self.assertEquals(json.loads(response.content), self.success_content)\r\n\r\n self.assertTrue(retry.called)\r\n (__, kwargs) = retry.call_args\r\n exc = kwargs['exc']\r\n self.assertIsInstance(exc, SMTPConnectError)", "def test_endpointSMTP(self):\n self._endpointTest(\"smtp\")", "def postprocess():\n if ERRORS:\n address = '[email protected]'\n body = '\\n\\n'.join( ERRORS )\n msg = create_message( body, address )\n send_mail( msg, address )", "def send_mail_raise_smtp(messages):\n raise SMTPRecipientsRefused(recipients=messages[0].recipients())", "def send_error_email(receiver_email, subject, body):\n\n sender_email = \"[email protected]\"\n\n with open(CWD(\"mailcreds.txt\"), \"r\") as file:\n password = file.read()\n\n # Create a multipart message and set headers\n message = MIMEMultipart()\n message[\"From\"] = sender_email\n message[\"To\"] = receiver_email\n message[\"Subject\"] = subject\n\n # Add body to email\n message.attach(MIMEText(body, \"plain\"))\n\n text = message.as_string()\n\n # Log in to server using secure context and send email\n context = ssl.create_default_context()\n with smtplib.SMTP_SSL(\"smtp.gmail.com\", 465, context=context) as server:\n server.login(sender_email, password)\n server.sendmail(sender_email, receiver_email, text)", "def connection_timedout(self):\n\n try:\n message = (\n f\"Connection to {self.channel} timed out, was the channel\"\n \" spelt correctly and is port 6667 open?\\n\"\n )\n self.send_to_outputfield(message)\n except Exception as e:\n logging.error(str(e))\n logging.exception(\"Exception : \")\n self.close()", "def test_data_err_retry(self, retry, get_conn):\r\n get_conn.return_value.send_messages.side_effect = SMTPDataError(455, \"Throttling: Sending rate exceeded\")\r\n test_email = {\r\n 'action': 'Send email',\r\n 'send_to': 'myself',\r\n 'subject': 'test subject for myself',\r\n 'message': 'test message for myself'\r\n }\r\n response = self.client.post(self.send_mail_url, test_email)\r\n self.assertEquals(json.loads(response.content), self.success_content)\r\n\r\n # Test that we retry upon hitting a 4xx error\r\n self.assertTrue(retry.called)\r\n (__, kwargs) = retry.call_args\r\n exc = kwargs['exc']\r\n self.assertIsInstance(exc, SMTPDataError)", "def send_failed(self, message, exc=None):\n with self.app.peers_lock:\n self.declare_no_connection(self.app.peers[message.to])\n return None", "def __init__(self, smtp_server, smtp_user, smtp_password,\n smtp_port=25, is_with_tls=False):\n self.smtp_server = smtp_server\n self.smtp_port = smtp_port\n self.smtp_user = smtp_user\n self.smtp_password = smtp_password\n self.is_with_tls = is_with_tls", "def send_email(self, text):\n msg_text = MIMEText(text)\n msg_text['Subject'] = '[WebSite Watchdog] Failure'\n msg_text['From'] = self.from_email\n msg_text['To'] = self.to_email\n \n s = smtplib.SMTP(self.smtp_server)\n s.sendmail(self.from_email, [self.to_email], msg_text.as_string())\n s.quit()", "def send(self):\n answers = dns.resolver.query(self.domain, 'MX')\n try:\n for answer in answers:\n ex = answer.exchange.to_text()\n server = smtplib.SMTP(ex)\n server.set_debuglevel(self.verbose)\n server.sendmail(self.sender, [self.recipient], self.message.as_string())\n server.quit()\n except OSError as e:\n if e.errno is errno.ENETUNREACH:\n print('Looks like port 25 is blocked')\n raise e", "def _setup_smtp_server(self):\n\n # Init; Attempt to use external first\n target = 'external'\n\n # ============================================================\n # Attempt (1): External mail server\n # ============================================================\n\n if target == 'external':\n # Assume it's a machine external to company network.\n # We will use an external email account that requires a login.\n\n # msg = f'_setup_smtp_server(): Attempting to launch session as external machine...'\n # fancy_print(msg, fg=COMMUNICATOR_MSG_COLOR, bold=True)\n\n self.host = EXTERNAL_HOST\n self.port = EXTERNAL_PORT\n self.sender_address = EXTERNAL_USER_NAME\n self.sender_pwd = EXTERNAL_USER_PWD\n\n try:\n sess = smtplib.SMTP(host=self.host, port=self.port)\n sess.starttls()\n sess.login(self.sender_address, self.sender_pwd)\n return sess\n except:\n target = 'internal'\n\n # ============================================================\n # Attempt (2): Company internal mail server\n # ============================================================\n\n if target == 'internal':\n # Assume machine is internal to company network.\n # Current user should already be authenticated.\n\n # msg = f'_setup_smtp_server(): Attempting to launch session as internal Cooper machine...'\n # fancy_print(msg, fg=COMMUNICATOR_MSG_COLOR, bold=True)\n\n self.host = INTERNAL_HOST\n self.port = INTERNAL_PORT\n self.sender_address = INTERNAL_USER_NAME\n self.sender_pwd = INTERNAL_USER_PWD\n\n try:\n sess = smtplib.SMTP(self.host)\n return sess\n except:\n msg = f'COMMUNICATOR WARNING: Could not establish SMTP connection. Check configuration.'\n fancy_print(msg, fg=COMMUNICATOR_WARN_COLOR)\n\n msg = f'Could not establish SMTP connection'\n raise ConnectionError(msg)", "def test_send_email_with_incomplete_payload(app, session, email_msg):\n # TEST\n with pytest.raises(QueueException) as excinfo:\n worker.send_email(email_msg, None)\n\n assert 'Unsuccessful sending email' in str(excinfo)", "def tcp_error(self, flow: mitmproxy.tcp.TCPFlow):", "async def connect_async(smtp_url,\n timeout: Optional[float] = None) -> aiosmtplib.SMTP:\n return await aiosmtplib.SMTP(smtp_url, timeout=timeout)", "def check_smtp_server_connection(self):\n try:\n connected = True\n\n while not self.config:\n time.sleep(1)\n\n # Create SMTP server and handshake\n server = smtplib.SMTP(self.config.smtp_host + ':' + self.config.smtp_port)\n server.connect(self.config.smtp_host + ':' + self.config.smtp_port)\n\n self.logger.info(MODULE_NAME + '::check_smtp_server_connection::Successfully '\n 'connected to the configured SMTP server and port at: ' + self.config.smtp_host + ':' + self.config.smtp_port)\n\n server.quit()\n\n return connected\n\n except Exception as e:\n self.logger.error(MODULE_NAME + '::check_smtp_server_connection()::The following '\n 'unhandled exception occurred: ' + e.message)\n connected = False\n return connected", "def _test_retry_after_unlimited_retry_error(self, exception):\r\n num_emails = 8\r\n # We also send email to the instructor:\r\n self._create_students(num_emails - 1)\r\n expected_fails = 0\r\n expected_succeeds = num_emails\r\n # Note that because celery in eager mode will call retries synchronously,\r\n # each retry will increase the stack depth. It turns out that there is a\r\n # maximum depth at which a RuntimeError is raised (\"maximum recursion depth\r\n # exceeded\"). The maximum recursion depth is 90, so\r\n # num_emails * expected_retries < 90.\r\n expected_retries = 10\r\n with patch('bulk_email.tasks.get_connection', autospec=True) as get_conn:\r\n # Cycle through N throttling errors followed by a success.\r\n get_conn.return_value.send_messages.side_effect = cycle(\r\n chain(repeat(exception, expected_retries), [None])\r\n )\r\n self._test_run_with_task(\r\n send_bulk_course_email,\r\n 'emailed',\r\n num_emails,\r\n expected_succeeds,\r\n failed=expected_fails,\r\n retried_nomax=(expected_retries * num_emails)\r\n )", "def raise_timeout_error(api_url, headers, timeout, proxies):\n raise requests.exceptions.Timeout", "def raise_timeout_error(api_url, headers, timeout, proxies):\n raise requests.exceptions.Timeout", "def test_email():\n recipients = configs[\"email_to\"].split(\", \")\n email_body = test_email_content()\n if configs[\"smtp_ssl\"] == 1:\n server = smtplib.SMTP_SSL(configs[\"smtp_server\"])\n elif configs[\"smtp_tls\"] == 1:\n server = smtplib.SMTP(configs[\"smtp_server\"])\n server.starttls()\n else:\n server = smtplib.SMTP(configs[\"smtp_server\"])\n\n if configs[\"smtp_authentication\"] == 1:\n server.login(configs[\"username\"], configs[\"password\"])\n\n server.sendmail(configs[\"email_from\"], recipients, email_body)\n server.quit()", "def connect(self, hostname, timeout=5):\n try:\n socket.gethostbyname(hostname)\n server = _smtp.SMTP(timeout=timeout)\n code, resp = server.connect(hostname)\n if code == 220:\n return server\n except:\n pass\n return None", "def test_data_err_fail(self, retry, result, get_conn):\r\n # have every fourth email fail due to blacklisting:\r\n get_conn.return_value.send_messages.side_effect = cycle([SMTPDataError(554, \"Email address is blacklisted\"),\r\n None, None, None])\r\n students = [UserFactory() for _ in xrange(settings.BULK_EMAIL_EMAILS_PER_TASK)]\r\n for student in students:\r\n CourseEnrollmentFactory.create(user=student, course_id=self.course.id)\r\n\r\n test_email = {\r\n 'action': 'Send email',\r\n 'send_to': 'all',\r\n 'subject': 'test subject for all',\r\n 'message': 'test message for all'\r\n }\r\n response = self.client.post(self.send_mail_url, test_email)\r\n self.assertEquals(json.loads(response.content), self.success_content)\r\n\r\n # We shouldn't retry when hitting a 5xx error\r\n self.assertFalse(retry.called)\r\n # Test that after the rejected email, the rest still successfully send\r\n ((_entry_id, _current_task_id, subtask_status), _kwargs) = result.call_args\r\n self.assertEquals(subtask_status.skipped, 0)\r\n expected_fails = int((settings.BULK_EMAIL_EMAILS_PER_TASK + 3) / 4.0)\r\n self.assertEquals(subtask_status.failed, expected_fails)\r\n self.assertEquals(subtask_status.succeeded, settings.BULK_EMAIL_EMAILS_PER_TASK - expected_fails)", "def _test_email_address_failures(self, exception):\r\n # Select number of emails to fit into a single subtask.\r\n num_emails = settings.BULK_EMAIL_EMAILS_PER_TASK\r\n # We also send email to the instructor:\r\n self._create_students(num_emails - 1)\r\n expected_fails = int((num_emails + 3) / 4.0)\r\n expected_succeeds = num_emails - expected_fails\r\n with patch('bulk_email.tasks.get_connection', autospec=True) as get_conn:\r\n # have every fourth email fail due to some address failure:\r\n get_conn.return_value.send_messages.side_effect = cycle([exception, None, None, None])\r\n self._test_run_with_task(send_bulk_course_email, 'emailed', num_emails, expected_succeeds, failed=expected_fails)", "def test_disconn_err_retry(self, retry, get_conn):\r\n get_conn.return_value.open.side_effect = SMTPServerDisconnected(425, \"Disconnecting\")\r\n test_email = {\r\n 'action': 'Send email',\r\n 'send_to': 'myself',\r\n 'subject': 'test subject for myself',\r\n 'message': 'test message for myself'\r\n }\r\n response = self.client.post(self.send_mail_url, test_email)\r\n self.assertEquals(json.loads(response.content), self.success_content)\r\n\r\n self.assertTrue(retry.called)\r\n (__, kwargs) = retry.call_args\r\n exc = kwargs['exc']\r\n self.assertIsInstance(exc, SMTPServerDisconnected)", "def retry_if_resetpeer_or_timeout(exception):\n return not ((not isinstance(exception, requests_exceptions.ConnectionError)\n and not isinstance(exception, requests_exceptions.ConnectTimeout))\n and not isinstance(exception, BadStatusLine or exception.errno == errno.ECONNRESET))", "def _test_retry_after_limited_retry_error(self, exception):\r\n # If we want the batch to succeed, we need to send fewer emails\r\n # than the max retries, so that the max is not triggered.\r\n num_emails = settings.BULK_EMAIL_MAX_RETRIES\r\n # We also send email to the instructor:\r\n self._create_students(num_emails - 1)\r\n expected_fails = 0\r\n expected_succeeds = num_emails\r\n with patch('bulk_email.tasks.get_connection', autospec=True) as get_conn:\r\n # Have every other mail attempt fail due to disconnection.\r\n get_conn.return_value.send_messages.side_effect = cycle([exception, None])\r\n self._test_run_with_task(\r\n send_bulk_course_email,\r\n 'emailed',\r\n num_emails,\r\n expected_succeeds,\r\n failed=expected_fails,\r\n retried_withmax=num_emails\r\n )", "def _test_max_retry_limit_causes_failure(self, exception):\r\n # Doesn't really matter how many recipients, since we expect\r\n # to fail on the first.\r\n num_emails = 10\r\n # We also send email to the instructor:\r\n self._create_students(num_emails - 1)\r\n expected_fails = num_emails\r\n expected_succeeds = 0\r\n with patch('bulk_email.tasks.get_connection', autospec=True) as get_conn:\r\n # always fail to connect, triggering repeated retries until limit is hit:\r\n get_conn.return_value.send_messages.side_effect = cycle([exception])\r\n with patch('bulk_email.tasks.update_subtask_status', my_update_subtask_status):\r\n self._test_run_with_task(\r\n send_bulk_course_email,\r\n 'emailed',\r\n num_emails,\r\n expected_succeeds,\r\n failed=expected_fails,\r\n retried_withmax=(settings.BULK_EMAIL_MAX_RETRIES + 1)\r\n )", "def mail_error( error, log_file=None, verbose = False, get_config=lambda: {} ):\n return mail_log( error, log_file, True, verbose, get_config=get_config )", "def sendEmails(\n receiverName,\n retainedCompany,\n companyName,\n emailList,\n senderName,\n senderEmail,\n emailPassword,\n senderTitle,\n senderCompany,\n senderCompanyHomePage,\n senderPhone,\n port=465,\n returnHTML = True \n ):\n\n for emailToTry in emailList: \n # change back the next line after testing\n time.sleep(np.random.uniform(5,15)) # I introduced this because I was being rate limited, and I want to see if this will help avoid that - it seems to help\n print(f'trying {emailToTry}')\n message = MIMEMultipart('alternative')\n message['Subject'] = f'Engineering Positions at {companyName}' # change this back when ready to send actual emails\n message['From'] = senderEmail\n message['To'] = emailToTry # note that this only affects the headers - it does not affect to whom the message gets sent to\n\n [text, html] = emailTextHTML(receiverName, retainedCompany, companyName, senderName, senderTitle, senderCompany, senderEmail, senderCompanyHomePage, senderPhone, returnHTML=returnHTML)\n\n\n part1 = MIMEText(text, 'plain')\n part2 = MIMEText(html, 'html')\n\n message.attach(part1)\n message.attach(part2)\n\n # create a secure SSL context\n context = ssl.create_default_context()\n\n # now loop over each email message and extract what we need:\n with smtplib.SMTP_SSL('smtp.gmail.com', port, context=context) as server:\n # Using with smtplib.SMTP_SSL() as server: makes sure that the connection is automatically closed at the end of the indented code block. If port is zero, or not specified, .SMTP_SSL() will use the standard port for SMTP over SSL (port 465).\n server.login(senderEmail, emailPassword)\n server.sendmail(senderEmail, emailToTry, message.as_string())\n # the above line is how we actually change whom the message is sent to", "def send_email(self, user, subject, body, num_attempts=MAX_ATTEMPTS):\r\n fromaddr = u'[email protected]'\r\n toaddr = u'{} <{}>'.format(user.profile.name, user.email)\r\n msg = EmailMessage(subject, body, fromaddr, (toaddr,))\r\n msg.content_subtype = \"html\"\r\n\r\n i = 1\r\n while i <= num_attempts:\r\n try:\r\n msg.send()\r\n return True # Happy path!\r\n except SINGLE_EMAIL_FAILURE_ERRORS:\r\n # Something unrecoverable is wrong about the email acct we're sending to\r\n log.exception(\r\n u\"LinkedIn: Email send failed for user {}, email {}\"\r\n .format(user.username, user.email)\r\n )\r\n return False\r\n except LIMITED_RETRY_ERRORS:\r\n # Something went wrong (probably an intermittent connection error),\r\n # but maybe if we beat our heads against the wall enough times,\r\n # we can crack our way through. Thwack! Thwack! Thwack!\r\n # Give up after num_attempts though (for loop exits), let's not\r\n # get carried away.\r\n log.exception(\r\n u\"LinkedIn: Email send for user {}, email {}, encountered error, attempt #{}\"\r\n .format(user.username, user.email, i)\r\n )\r\n i += 1\r\n continue\r\n except INFINITE_RETRY_ERRORS:\r\n # Dude, it will *totally* work if I just... sleep... a little...\r\n # Things like max send rate exceeded. The smart thing would be\r\n # to do exponential backoff. The lazy thing to do would be just\r\n # sleep some arbitrary amount and trust that it'll probably work.\r\n # GUESS WHAT WE'RE DOING BOYS AND GIRLS!?!\r\n log.exception(\"LinkedIn: temporary error encountered, retrying\")\r\n time.sleep(1)\r\n\r\n # If we hit here, we went through all our attempts without success\r\n return False", "def send(message: Message, smtp_url: str,\n timeout: Optional[float] = None) -> None:\n with smtplib.SMTP(smtp_url, timeout=timeout) as smtp:\n smtp.send_message(message.as_mime())", "def send_email(error_msd, filename, config):\n msg = MIMEMultipart()\n msg['From'] = config['FROM']\n msg['To'] = config['TO']\n password = config['PASSWORD']\n msg['Subject'] = \"Server is down {}\".format(sys.argv[1])\n body = \"Error found in log: \\n\" + '\\n'.join(error_msd) + \"\\n Check logfile: \" + filename\n msg.attach(MIMEText(body, 'html'))\n\n server = smtplib.SMTP(config['SMTP_SERVER'], config['PORT'])\n server.starttls()\n server.login(msg['From'], password)\n try:\n server.sendmail(msg['From'],msg['To'], msg.as_string())\n server.quit()\n return True\n except socket.error as e:\n log.debug('email failed: Exception {}'.format(e))\n raise", "def __init__(self, smtp_server='localhost', smtp_port=None,\n smtp_ssl=False, smtp_user=None, smtp_password=None):\n self._text_body = None\n self._html_body = None\n self._subject = \"\"\n self._reply_to = None\n\n self._smtp_server = smtp_server\n self._smtp_port = smtp_port\n self._smtp_ssl = smtp_ssl\n self._smtp_user = smtp_user\n self._smtp_password = smtp_password\n\n self._re_email = re.compile(\"^([\\\\w \\\\._]+\\\\<[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\\\\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*@(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\\\\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\\\\>|[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\\\\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*@(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\\\\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?)$\")\n self.clear_recipients()\n self.clear_attachments()", "def setup_smtp():\n print(\"INFO: Setting up SMTP variables...\")\n conf = ConfigParser()\n conf.read(os.path.join(os.path.abspath(\n os.path.dirname(__file__)), '.', 'smtp.conf'))\n smtp = {\n \"server\": conf.get('smtp', 'smtp_server'),\n \"login\": conf.get('smtp', 'smtp_login'),\n \"password\": conf.get('smtp', 'smtp_password'),\n \"port\": conf.get('smtp', 'smtp_port'),\n \"sender\": conf.get('smtp', 'smtp_sender'),\n }\n return smtp", "def raise_timeout_error_upload(api_url, headers, data, timeout, proxies):\n raise requests.exceptions.Timeout", "def send_error(self, conn, msg):\n # dst ip becomes src ip to return the message\n\n # src ip becomes this ip\n\n # type becomes \"no route\"\n\n # msg is empty\n\n # send from port incoming...current dst ip?\n\n # TODO\n\n return", "def test_send_error_to_admin(self, process_mock, send_smtp_mock):\n # arrange an mock error during processing\n process_mock.side_effect = RuntimeError('mock error')\n\n with name_of_file_containing('contents') as filename:\n call_command('process_email', email_file=filename)\n\n self.assertTrue(send_smtp_mock.called)\n (msg,) = send_smtp_mock.call_args.args\n self.assertEqual(msg['to'], '[email protected]', 'Admins should be emailed on error')\n self.assertIn('error', msg['subject'].lower(), 'Error email subject should indicate error')\n self.assertTrue(msg.is_multipart(), 'Error email should have attachments')\n parts = msg.get_payload()\n self.assertEqual(len(parts), 3, 'Error email should contain message, traceback, and original message')\n content = parts[0].get_payload()\n traceback = parts[1].get_payload()\n original = parts[2].get_payload(decode=True).decode() # convert octet-stream to string\n self.assertIn('RuntimeError', content, 'Error type should be included in error email')\n self.assertIn('mock.py', content, 'File where error occurred should be included in error email')\n self.assertIn('traceback', traceback.lower(), 'Traceback should be attached to error email')\n self.assertEqual(original, 'contents', 'Original message should be attached to error email')", "def _test_immediate_failure(self, exception):\r\n # Doesn't really matter how many recipients, since we expect\r\n # to fail on the first.\r\n num_emails = 10\r\n # We also send email to the instructor:\r\n self._create_students(num_emails - 1)\r\n expected_fails = num_emails\r\n expected_succeeds = 0\r\n with patch('bulk_email.tasks.get_connection', autospec=True) as get_conn:\r\n # always fail to connect, triggering repeated retries until limit is hit:\r\n get_conn.return_value.send_messages.side_effect = cycle([exception])\r\n self._test_run_with_task(\r\n send_bulk_course_email,\r\n 'emailed',\r\n num_emails,\r\n expected_succeeds,\r\n failed=expected_fails,\r\n )", "def __init__(self, host, user, password, port=25):\n self.host = host\n self.port = port\n self.user = user\n self.password = password\n\n self.smtp = smtplib.SMTP()", "def connection_failed(self, connection, error):\n assert False", "def _send(self,msg):\n attempts = 3\n while attempts > 0:\n self.sock.sendto(msg, self.ip_port)\n ready = select.select([self.sock], [], [], self.timeout)\n if ready[0]:\n data, ip_port = self.sock.recvfrom(60)\n if ip_port != self.ip_port: continue\n return decode(data)\n attempts -= 1\n print(\"Retrying send\")\n return None", "def _readsmtpserver(self):\n # FIXME too much duplicated code in these _readXYZ() methods\n try: \n self.smtpserver = self.conf.get(\"report.email.smtp_server\")\n except:\n # we use print so this messages goes to the stdout\n msg = \"configuration variable 'smtp_server' is not defined. Plugin Email cannot be created\"\n self.log.error(msg)\n raise PluginConfigurationFailure(msg)", "def test_compose_email_somebad(self):\n pass", "def mail():\n mail_server = 'localhost'\n mail_port = 1025\n CustomSMTPServer((mail_server, mail_port), None)\n asyncore.loop()", "def ConnectToSmtpServer():\n payload = ['ehlo [email protected]\\r\\n']\n\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((FLAGS.host, FLAGS.smtp_port))\n SendPayload(s, payload)\n logging.info('Connected to SMTP server.')\n return s", "def send_queued_mail():\r\n now = datetime.datetime.now(g.tz)\r\n if not c.site:\r\n c.site = Default\r\n\r\n clear = False\r\n session = smtplib.SMTP(g.smtp_server)\r\n # convienence funciton for sending the mail to the singly-defined session and\r\n # marking the mail as read.\r\n def sendmail(email):\r\n try:\r\n session.sendmail(email.fr_addr, email.to_addr,\r\n email.to_MIMEText().as_string())\r\n email.set_sent(rejected = False)\r\n # exception happens only for local recipient that doesn't exist\r\n except (smtplib.SMTPRecipientsRefused, smtplib.SMTPSenderRefused):\r\n # handle error and print, but don't stall the rest of the queue\r\n print \"Handled error sending mail (traceback to follow)\"\r\n traceback.print_exc(file = sys.stdout)\r\n email.set_sent(rejected = True)\r\n\r\n\r\n try:\r\n for email in Email.get_unsent(now):\r\n clear = True\r\n\r\n should_queue = email.should_queue()\r\n # check only on sharing that the mail is invalid\r\n if email.kind == Email.Kind.SHARE and should_queue:\r\n email.body = Share(username = email.from_name(),\r\n msg_hash = email.msg_hash,\r\n link = email.thing,\r\n body = email.body).render(style = \"email\")\r\n email.subject = _(\"[reddit] %(user)s has shared a link with you\") % \\\r\n {\"user\": email.from_name()}\r\n sendmail(email)\r\n elif email.kind == Email.Kind.OPTOUT:\r\n email.body = Mail_Opt(msg_hash = email.msg_hash,\r\n leave = True).render(style = \"email\")\r\n email.subject = _(\"[reddit] email removal notice\")\r\n sendmail(email)\r\n\r\n elif email.kind == Email.Kind.OPTIN:\r\n email.body = Mail_Opt(msg_hash = email.msg_hash,\r\n leave = False).render(style = \"email\")\r\n email.subject = _(\"[reddit] email addition notice\")\r\n sendmail(email)\r\n\r\n elif email.kind in (Email.Kind.FEEDBACK, Email.Kind.ADVERTISE):\r\n if email.kind == Email.Kind.FEEDBACK:\r\n email.subject = \"[feedback] feedback from '%s'\" % \\\r\n email.from_name()\r\n else:\r\n email.subject = \"[ad_inq] feedback from '%s'\" % \\\r\n email.from_name()\r\n sendmail(email)\r\n # handle failure\r\n else:\r\n email.set_sent(rejected = True)\r\n\r\n finally:\r\n session.quit()\r\n\r\n # clear is true if anything was found and processed above\r\n if clear:\r\n Email.handler.clear_queue(now)", "def __init__(self, auth=False):\n self.smtp = smtplib.SMTP(host=EMAIL_HOST, port=EMIAL_HOST_PORT)\n self.smtp.ehlo()\n if auth:\n self.smtp.login(EMAIL, EMAIL_PASSWORD)", "def raise_connection_error(api_url, headers, timeout, proxies):\n raise requests.exceptions.ConnectionError", "def raise_connection_error(api_url, headers, timeout, proxies):\n raise requests.exceptions.ConnectionError", "def SocketError(self) -> SocketError:", "async def test_disconnected_server_raises_on_starttls(preset_client):\n await preset_client.connect()\n preset_client.server.responses.append(\n b\"\\n\".join([b\"250-localhost, hello\", b\"250-SIZE 100000\", b\"250 STARTTLS\"])\n )\n await preset_client.ehlo()\n\n preset_client.server.responses.append(b\"220 begin TLS pls\")\n preset_client.server.drop_connection_event.set()\n\n with pytest.raises(SMTPServerDisconnected):\n await preset_client.starttls(validate_certs=False)\n\n # Verify that the connection was closed\n assert not preset_client._connect_lock.locked()\n assert preset_client.protocol is None\n assert preset_client.transport is None", "def error_received(self, exc: Exception) -> None:\n _log.exception('UDP operation failed')", "def mail(server, from_address, from_pass, address_list, msg, port = 25):\n\n smtp_mail = smtplib.SMTP(server,port)\n smtp_mail.starttls()\n smtp_mail.login(from_address, from_pass)\n smtp_mail.sendmail(from_address, address_list, msg) \n smtp_mail.quit()", "def send_email(self, message, mail_server_id=None, smtp_server=None, smtp_port=None,\n smtp_user=None, smtp_password=None, smtp_encryption=None, smtp_debug=False,\n smtp_session=None):\n # Use the default bounce address **only if** no Return-Path was\n # provided by caller. Caller may be using Variable Envelope Return\n # Path (VERP) to detect no-longer valid email addresses.\n if smtp_user:\n _logger.error(\"smpt session --------------------\")\n _logger.error(smtp_user)\n smtp_from = smtp_user\n else:\n smtp_from = message['Return-Path'] or self._get_default_bounce_address() or message['From']\n assert smtp_from, \"The Return-Path or From header is required for any outbound email\"\n\n # The email's \"Envelope From\" (Return-Path), and all recipient addresses must only contain ASCII characters.\n from_rfc2822 = extract_rfc2822_addresses(smtp_from)\n assert from_rfc2822, (\"Malformed 'Return-Path' or 'From' address: %r - \"\n \"It should contain one valid plain ASCII email\") % smtp_from\n # use last extracted email, to support rarities like 'Support@MyComp <[email protected]>'\n smtp_from = from_rfc2822[-1]\n email_to = message['To']\n email_cc = message['Cc']\n email_bcc = message['Bcc']\n del message['Bcc']\n\n smtp_to_list = [\n address\n for base in [email_to, email_cc, email_bcc]\n for address in extract_rfc2822_addresses(base)\n if address\n ]\n assert smtp_to_list, self.NO_VALID_RECIPIENT\n\n x_forge_to = message['X-Forge-To']\n if x_forge_to:\n # `To:` header forged, e.g. for posting on mail.channels, to avoid confusion\n del message['X-Forge-To']\n del message['To'] # avoid multiple To: headers!\n message['To'] = x_forge_to\n\n # Do not actually send emails in testing mode!\n if getattr(threading.currentThread(), 'testing', False) or self.env.registry.in_test_mode():\n _test_logger.info(\"skip sending email in test mode\")\n return message['Message-Id']\n\n try:\n message_id = message['Message-Id']\n smtp = smtp_session\n smtp = smtp or self.connect(\n smtp_server, smtp_port, smtp_user, smtp_password,\n smtp_encryption, smtp_debug, mail_server_id=mail_server_id)\n smtp.sendmail(smtp_from, smtp_to_list, message.as_string())\n # do not quit() a pre-established smtp_session\n if not smtp_session:\n smtp.quit()\n except smtplib.SMTPServerDisconnected:\n raise\n except Exception as e:\n params = (ustr(smtp_server), e.__class__.__name__, ustr(e))\n msg = _(\"Mail delivery failed via SMTP server '%s'.\\n%s: %s\") % params\n _logger.info(msg)\n raise MailDeliveryException(_(\"Mail Delivery Failed\"), msg)\n return message_id", "def _send_smtp(message, subject, to, to_name, sender, sender_name):\n host = app.config.get('MAIL_HOST')\n\n if not host:\n raise MailFailure('SMTP Server Not Configured')\n\n try:\n server = smtplib.SMTP(host)\n except (smtplib.SMTPConnectError, socket.error) as ex:\n app.logger.error('Unable to send mail: %s', str(ex))\n raise MailFailure('Error connecting to SMTP server.')\n\n msg = text.MIMEText(message)\n msg['Subject'] = subject\n msg['To'] = email.utils.formataddr((to_name, to))\n msg['From'] = email.utils.formataddr((sender_name, sender))\n\n try:\n if app.debug:\n server.set_debuglevel(True)\n server.sendmail(sender, [to], msg.as_string())\n except (smtplib.SMTPException, socket.error) as ex:\n app.logger.error('Unable to send mail: %s', str(ex))\n raise MailFailure('Error sending mail to SMTP server.')\n finally:\n try:\n server.quit()\n except smtplib.SMTPException:\n pass", "def send_error(self, conn, msg):\n print(\"ERROR PLACEHOLDER\")\n\n return", "def send_email(to, subject, content): \n \n #Create the message\n msg = MIMEMultipart('alternative')\n msg = addheader(msg, 'Subject', subject)\n msg[\"Subject\"] = subject\n msg[\"From\"] = EMAIL_SENDER\n msg[\"To\"] = listToStr(to)\n content = MIMEText(content.encode('utf-8'), \"html\")\n msg.attach(content);\n \n try:\n smtpObj = SMTP(GMAIL_SMTP, GMAIL_SMTP_PORT)\n #Identify yourself to GMAIL ESMTP server.\n smtpObj.ehlo()\n #Put SMTP connection in TLS mode and call ehlo again.\n smtpObj.starttls()\n smtpObj.ehlo()\n #Login to service\n smtpObj.login(user=EMAIL_SENDER, password=PASSWORD)\n #Send email\n print msg.as_string()\n smtpObj.sendmail(EMAIL_SENDER, to, msg.as_string())\n #close connection and session.\n smtpObj.quit();\n except SMTPException as error:\n print \"Error: unable to send email : {err}\".format(err=error)", "def test_bademail(mock_smtp):\n \n msg_values = {'sender': '[email protected]',\n 'receiver': '[email protected]',\n 'subject': 'Test Message Subject',\n 'body': 'This is a Test.'}\n \n rejected_recipients = ['[email protected]', '[email protected]']\n\n msg = Message(**msg_values)\n \n mock_smtp.return_value.sendmail.return_value = rejected_recipients\n \n with pytest.raises(BadEmailRecipient) as err:\n msg.send()\n \n expected_message = (\"Message '{}' could not be sent to one or more \"\n \"recipients.\").format(msg_values['subject'])\n assert expected_message == err.value.message\n assert Counter(err.value.rejected) == Counter(rejected_recipients)", "def _login(self):\n self._smtp = smtplib.SMTP(host=self._config.host,\n port=self._config.port)\n # send 'hello' to SMTP server\n self._smtp.ehlo()\n # start TLS encryption\n self._smtp.starttls()\n self._smtp.login(self._config.sender_email, self._config.password)\n self._connected = True", "def get_smtpd_stats(logvalues):\n possible_status = ['timeout', 'lost', 'warning:', 'NOQUEUE:', 'connect',\n 'disconnect']\n message_id_or_status = logvalues[5]\n smtp_code = int()\n reason_code = str()\n smtp_status = str()\n mail_to = str()\n mail_from = str()\n smtp_client = str()\n\n if message_id_or_status in possible_status:\n print \"Found DECISION: {}\".format(message_id_or_status)\n if 'NOQUEUE' in message_id_or_status:\n smtp_code = logvalues[6]\n smtp_client = logvalues[9]\n reason_code = logvalues[11]\n if smtp_code == '451' and reason_code == '4.7.1':\n # Recipient address rejected: Intentional policy rejection,\n # please try again later (GREYLISTED)\n mail_to = logvalues[12]\n mail_from = logvalues[34]\n smtp_status = 'Greylisted'\n\n elif smtp_code == '450' and reason_code == '4.1.8':\n # Sender Address Rejected, Sender's Domain not found\n smtp_client = extract_connecting_ip(logvalues[5])\n mail_from = extract_email_address(logvalues[8])\n mail_to = extract_email_address(logvalues[16])\n smtp_status = \"Invalid Sender Domain\"\n\n elif smtp_code == '550' and reason_code == '5.1.0':\n # Sender Address Rejected, user unknown in virtual mailbox\n # table. In this scenario, someone or some thing is attempting\n # to use the local SMTP server as an open relay, but sneakily.\n # They are usually using the local domain as the sending domain\n # and then some other domain, say yahoo, google, hotmail as the\n # recipient\n smtp_client = extract_connecting_ip(logvalues[9])\n mail_from = extract_email_address(logvalues[22])\n mail_to = extract_email_address(logvalues[23])\n smtp_status = \"Invalid Sender\"\n\n elif smtp_code == '550' and reason_code == '5.1.1':\n # Recipient Address Rejected, user unknown in virtual mailbox\n # table. This could be a mis-configuration of the local SMTP\n # mailbox setup - the user specified in the \"To:\" field doesnt\n # exist and the message is rejected. This can also happen when\n # the sending address is Null\n smtp_client = extract_connecting_ip(logvalues[9])\n mail_from = extract_email_address(logvalues[22])\n mail_to = extract_email_address(logvalues[23])\n smtp_status = \"Invalid Recipient\"\n\n elif smtp_code == '554':\n # Sender access DENIED, sent from a dynamic IP range\n smtp_client = extract_connecting_ip(logvalues[9])\n mail_from = extract_email_address(logvalues[41])\n mail_to = extract_email_address(logvalues[42])\n smtp_status = \"Bad sending server\"\n\n elif 'connect' == message_id_or_status:\n smtp_client = extract_connecting_ip(logvalues[7])\n\n elif 'disconnect' in message_id_or_status:\n smtp_client = extract_connecting_ip(logvalues[7])\n\n print \"Rejection information:\\\n SMTP Client: {} \\\n MAIL FROM: {} \\\n MAIL TO: {}\".format(smtp_client, mail_from, mail_to)\n print \" SMTP Codes: {} - {}: {}\".format(smtp_code,\n reason_code,\n smtp_status)\n else:\n print \"Found Message ID: {}\".format(message_id_or_status)\n print logvalues", "def secure_connection(HOST,PORT,PROTOCOL=\"smtp\",TLSSTRENGTH=\"tls1_2\"):\n\n print(\"Connecting to host: {} on Port Number {} using an IMPLICITY SECURE Connection \\r\\n\".format(HOST,PORT))\n\n context = create_tls_context(TLSSTRENGTH)\n secure_client = context.wrap_socket(socket.socket(socket.AF_INET),server_hostname=HOST)\n secure_client.settimeout(SOCKET_TIMEOUT)\n\n try:\n secure_client.connect((HOST,PORT))\n # SMTP NEEDS A EHLO MESSAGE BEFORE OTHER COMMANDS\n # IMAP AND POP DO NOT\n data = secure_client.recv(1024)\n if PROTOCOL==\"smtp\":\n secure_client.send(SMTP_EHLO)\n data = secure_client.recv(1024)\n #print('SMTP EHLO RESPONSE: ', repr(data))\n print_cipher_certificate(secure_client)\n decide_protocol_handler(secure_client,PROTOCOL)\n\n except Exception as e:\n print(\"Connection Could Not Be Established \\r\\n\")\n print(e)", "def on_connection_error(self):\n log.error(\"Stream connection has errored or timed out\")", "def processTimeLeft(sendMail, verbose, proxyInfo, time, mail):\n if proxyInfo:\n if verbose:\n print('Proxy information: {}'.format(proxyInfo))\n timeLeft = []\n for line in proxyInfo:\n if line.find('timeleft') > -1:\n dateReg = re.compile('\\d{1,3}[:/]\\d{2}[:/]\\d{2}')\n timeLeft = dateReg.findall(line)[0]\n timeLeft = timeLeft.split(':')[0]\n continue\n else:\n msg = \"No valid proxy found in %s. \" % HOST\n msg += \"Please create one.\\n\"\n\n if verbose:\n print(msg)\n print(\"Send mail: {}\".format(sendMail))\n\n if sendMail:\n if verbose:\n print(\"Sending mail notification\")\n sendMailNotification(mail, msg)\n sys.exit(4)\n\n ### // build message\n if int(time) >= int(timeLeft):\n msg = \"\\nProxy file in %s is about to expire. \" % HOST\n msg += \"Please renew it.\\n\"\n msg += \"Hours left: %i\\n\" % int(timeLeft)\n if int(timeLeft) == 0:\n msg = \"Proxy file in %s HAS expired.\" % HOST\n msg += \"Please renew it.\\n\"\n\n if verbose:\n print(msg)\n print(\"Send mail: {}\".format(sendMail))\n\n ### // Sends an email\n if sendMail:\n if verbose:\n print(\"Sending mail notification\")\n sendMailNotification(mail, msg, proxyInfo, verbose)", "def raise_connection_error_upload(api_url, headers, data, timeout, proxies):\n raise requests.exceptions.ConnectionError", "def mail_log( log, log_file=None, is_error = False, verbose = False, get_config=lambda: {} ):\n tries = 3\n log_text = \"\"\n while tries:\n try:\n if log != None:\n msg = MIMEText(log)\n elif log_file != None:\n log_text = re.sub(\"^smtp_.*$|^ssh_.*$\",\"\",log_file.read(),flags=re.M)\n msg = MIMEText(log_text[:2*pow(2,20)])\n else:\n return 0\n\n if is_error:\n if verbose:\n print(\"E-mailing log file with errors\", file=sys.stderr)\n msg['Subject'] = \"bkp error: %s \"%(platform.node())\n msg['From'] = get_config()[\"error_email\"]\n msg['To'] = get_config()[\"error_email\"]\n else:\n if verbose:\n print(\"E-mailing log file with no errors\", file=sys.stderr)\n msg['Subject'] = \"bkp complete: %s\"%(platform.node())\n msg['From'] = get_config()[\"log_email\"]\n msg['To'] = get_config()[\"log_email\"]\n\n msg['Date'] = datetime.datetime.now().strftime( \"%m/%d/%Y %H:%M\" )\n send_email(msg)\n return 0\n except:\n time.sleep(tries*10.0)\n tries = tries - 1\n if not tries:\n if is_error:\n print(\"Error couldn't send via e-mail\", file=sys.stderr)\n else:\n print(\"Success couldn't send via e-mail\", file=sys.stderr)\n if log:\n print(log, file=sys.stderr)\n if log_text:\n print(log_text, file=sys.stderr)\n print(traceback.format_exc(), file=sys.stderr)\n raise", "def failed(self, message, reason=None):\n failed_mail.send(\n sender=self.__class__,\n message=message,\n reason=reason\n )", "def _handle_error(self, path, reqs, headers, get=True):\n call = requests.get if get else requests.post\n resp = None\n dump = json.dumps(reqs)\n wait = self.config.start_reconnect_wait\n while resp is None:\n if wait > self.config.max_reconnect_wait:\n raise Exception(\"To many reconnect attempts\")\n time.sleep(wait)\n try:\n resp = call(path, dump, headers=headers)\n except requests.exceptions.ConnectionError:\n resp = None\n wait *= 2\n return resp", "def test_timeout_exceeded():\n connection = FakeBaseConnection(session_timeout=10)\n start = time.time() - 11\n try:\n connection._timeout_exceeded(start)\n except NetmikoTimeoutException as exc:\n assert isinstance(exc, NetmikoTimeoutException)\n return\n\n assert False", "def _retry_occurred(self):", "def sendErrorMessage(msg): #@NoSelf", "def smtp_server(self):\n s = smtplib.SMTP(self.smtp_addr, self.smtp_port)\n s.starttls()\n s.login(self.src_addr, self.src_pass)\n try:\n yield s\n except:\n raise\n finally:\n s.quit()", "def timeoutConnection(self):\n self._cancelCommands(defer.TimeoutError(\"Connection timeout\"))\n self.transport.loseConnection()", "def check_timeout(self, transport, earlier_time, interval, error_msg):\n now = datetime.datetime.now()\n secs = int((now - earlier_time).total_seconds())\n if secs >= interval:\n self.connection_lost(transport, f'{error_msg}: {secs} seconds')", "def send_mail(Email_id,OTP):\r\n try : \r\n s = smtplib.SMTP('smtp.gmail.com', 587) \r\n s.ehlo()\r\n # start TLS for security \r\n s.starttls() \r\n # Authentication \r\n s.login(mail_id,mail_Password) \r\n message = str(OTP)\r\n # sending the mail \r\n s.sendmail(mail_id, Email_id, message) \r\n # terminating the session \r\n s.quit() \r\n msg=\"Mail has been sent to Registered mail id.\"\r\n except :\r\n msg=\"UserName and Password not accepted kindly provide correct UserName and Password.\"\r\n return msg", "def catch_telegram_errors(func):\n\n def result_func(*args, **kwargs):\n while True:\n try:\n return func(*args, **kwargs)\n # Bot was blocked by the user\n except telegram.error.Unauthorized:\n log.debug(f\"Unauthorized to call {func.__name__}(), skipping.\")\n break\n # Telegram API didn't answer in time\n except telegram.error.TimedOut:\n log.warning(f\"Timed out while calling {func.__name__}(),\"\n f\" retrying in {cfg['Telegram']['timed_out_pause']} secs...\")\n time.sleep(cfg.telegram[\"timed_out_pause\"])\n # Telegram is not reachable\n except telegram.error.NetworkError as error:\n log.error(f\"Network error while calling {func.__name__}(),\"\n f\" retrying in {cfg.telegram['error_pause']} secs...\\n\"\n f\"Full error: {error.message}\")\n time.sleep(cfg.telegram[\"error_pause\"])\n # Unknown error\n except telegram.error.TelegramError as error:\n if error.message.lower() in [\"bad gateway\", \"invalid server response\"]:\n log.warning(f\"Bad Gateway while calling {func.__name__}(),\"\n f\" retrying in {cfg.telegram['error_pause']} secs...\")\n time.sleep(cfg.telegram[\"error_pause\"])\n elif error.message.lower() == \"timed out\":\n log.warning(f\"Timed out while calling {func.__name__}(),\"\n f\" retrying in {cfg.telegram['timed_out_pause']} secs...\")\n time.sleep(cfg.telegram[\"timed_out_pause\"])\n else:\n log.error(f\"Telegram error while calling {func.__name__}(),\"\n f\" retrying in {cfg.telegram['error_pause']} secs...\\n\"\n f\"Full error: {error.message}\")\n traceback.print_exception(*sys.exc_info())\n time.sleep(cfg.telegram[\"error_pause\"])\n\n return result_func", "def test_connect_timeout(self):\n rave = RWSConnection('https://innovate.mdsol.com')\n with mock.patch(\"requests.sessions.Session.get\") as mock_get:\n mock_get.side_effect = requests.exceptions.ConnectTimeout()\n with self.assertRaises(RWSException) as exc:\n rave.send_request(rws_requests.ClinicalStudiesRequest(), verify=False,retries=0)\n self.assertEqual('Server Connection Timeout', str(exc.exception))", "def __init__(self, error_msg):\n super(RequestTimeoutException, self).__init__(error_msg)", "def process_error_mail(recipient, sender, journal_id):\n\n if recipient in ['', None]:\n #: Simpley Error Mail!\n #: TODO: error marking...\n return True\n\n try:\n param = return_path_from_address(recipient)\n assert param['message_id'] != \"\"\n assert param['domain'] != \"\"\n\n try:\n #: Jourmal mail object\n journal_msg = Journal.objects.get(id=journal_id).mailobject()\n error_address = journal_msg.get('X-Failed-Recipients')\n except:\n pass\n\n try:\n #: Find message\n msg = Publish.objects.get(\n id=int(param['message_id']),\n publish__site__domain=param['domain'])\n\n # X-Failed-Recipients SHOULD be checked ?\n assert(\n error_address is None or\n error_address == msg.member.address)\n\n #: increment bounce number\n #: this mailbox will be disabled sometimes later.\n msg.member.bounces = msg.member.bounces + 1\n msg.member.save()\n\n #:\n return True\n\n except:\n pass\n\n except exceptions.AttributeError:\n # May be normal address..\n # Other handler will be called.\n return False\n\n return False", "def retry_if_connection_error(exception):\r\n # return True\r\n return isinstance(exception, HttpError)", "def send(message):\n mail_server = smtplib.SMTP('localhost')\n mail_server.send_message(message)\n mail_server.quit()", "def send_email(message):\n mail_server = smtplib.SMTP('localhost')\n mail_server.send_message(message)\n mail_server.quit()", "def _handle_error(error):\n try:\n #Abandon any active timeout triggers\n if timeoutCall.active():\n timeoutCall.cancel()\n #Unexpected error on HTTPS POST, we may want to move to the next node.\n self.log.error(\"Error on HTTPS POST : {cls!r} : {err!r}. {node!r}\",cls=error.type.__name__,err=error.getErrorMessage(),node = self.nodes[self.node_index])\n self._next_node()\n except Exception as ex:\n self.log.failure(\"Error in _handle_error {err!r}. {node!r}\",err=str(ex),node = self.nodes[self.node_index])\n #Add the failed sub-queue back to the command queue, we shall try again soon.\n self.queue = subqueue + self.queue\n ##If something went wrong, the HTTPS POST isn't active anymore.\n self.active_call_count = self.active_call_count - 1\n #Invoke self, possibly sending new queues RPC calls to the current node\n self()", "def TestSendRecvMessageTimeoutRaisesCommsError(self):\n self.txrx.timeout = 0.2 # short timeout so we don't hold up testing too much\n\n byte_array_message = bytes(\"\\x01\\x01\\x01\\x01\\x01\\x01\", encoding=DATA_ENCODING)\n txmsg = TxMessage(byte_array_message, num_response_msg=1, expect_eom=True)\n with self.assertRaises(PercivalCommsError):\n self.txrx.send_recv_message(txmsg)\n\n # Receive the bytes from our test socket\n msg = self.connection.recv(6)\n # Verify the bytes are the same as those sent\n self.assertEqual(msg, byte_array_message)", "def decide_to_retry(error):\n return True", "def _logout(self):\n if self.connected:\n try:\n self._smtp.quit()\n except smtplib.SMTPServerDisconnected:\n pass\n self._connected = False", "def parse_bro_smtp(smtp_path, target_dir, prefix='smtp'):\n\n # the current message we're parsing in the case of multiple emails coming in over the same connection\n smtp_message_index = 0 \n\n with open(smtp_path, 'r', errors='ignore') as fp:\n source_ipv4 = None\n source_port = None\n envelope_from = None\n envelope_to = []\n\n # state flag for when the data isn't quite right (see below)\n _bypass_read = False\n\n # the first line of the file has the source IP address of the smtp connection\n # in the following format: 172.16.139.143:38668/tcp\n\n line = fp.readline()\n m = REGEX_BRO_SMTP_SOURCE_IPV4.match(line)\n\n if not m:\n logging.error(f\"unable to parse soure address from {smtp_path} ({line.strip()})\")\n event_time = saq.LOCAL_TIMEZONE.localize(datetime.datetime.fromtimestamp(os.path.getmtime(smtp_path)))\n # in this case we skip the first readline() call since we've already read it\n _bypass_read = True\n else:\n source_ipv4 = m.group(1)\n source_port = m.group(2)\n\n logging.debug(f\"got source ipv4 {source_ipv4} port {source_port} for {smtp_path}\")\n\n # the second line is the time (in epoch UTC) that bro received the file\n line = fp.readline()\n event_time = datetime.datetime.utcfromtimestamp(int(line.strip()))\n logging.debug(f\"got event time {event_time} for {smtp_path}\")\n\n STATE_SMTP = 1\n STATE_DATA = 2\n\n state = STATE_SMTP\n rfc822_path = None\n rfc822_fp = None\n\n def _reset_state():\n nonlocal rfc822_fp, source_ipv4, source_port, envelope_from, envelope_to, state\n rfc822_fp = None\n #source_ipv4 = None\n #source_port = None\n envelope_from = None\n envelope_to = []\n state = STATE_SMTP\n\n def _finalize():\n # called when we detect the end of an SMTP stream OR the end of the file (data)\n nonlocal rfc822_fp, source_ipv4, source_port, envelope_from, envelope_to, state\n rfc822_fp.close()\n logging.info(\"finished parsing {} from {}\".format(rfc822_path, smtp_path))\n result = RFC822Email(\n source_ipv4=source_ipv4,\n source_port=source_port,\n envelope_from=envelope_from,\n envelope_to=envelope_to,\n received=event_time,\n file_path=rfc822_path)\n _reset_state()\n return result\n\n # smtp is pretty much line oriented\n while True:\n\n # if we read the first line and it wasn't what we expected\n # then we skip reading it here since we already have it\n if _bypass_read:\n _bypass_read = False\n else:\n line = fp.readline()\n\n if line == '':\n break\n\n if state == STATE_SMTP:\n m = REGEX_BRO_SMTP_MAIL_FROM.match(line)\n if m:\n envelope_from = m.group(1)\n logging.debug(\"got envelope_from {} for {}\".format(envelope_from, smtp_path))\n continue\n\n m = REGEX_BRO_SMTP_RCPT_TO.match(line)\n if m:\n envelope_to.append(m.group(1))\n logging.debug(\"got envelope_to {} for {}\".format(envelope_to, smtp_path))\n continue\n\n m = REGEX_BRO_SMTP_DATA.match(line)\n if m or (not line.startswith('<') and not line.startswith('>')):\n state = STATE_DATA\n rfc822_path = os.path.join(target_dir, f'{prefix}.{smtp_message_index}.email.rfc822')\n smtp_message_index += 1\n rfc822_fp = open(rfc822_path, 'w')\n logging.debug(\"created {} for {}\".format(rfc822_path, smtp_path))\n continue\n\n m = REGEX_BRO_SMTP_RSET.match(line)\n if m:\n logging.debug(f\"detected RSET for {smtp_path}\")\n _reset_state()\n continue\n\n # any other command we skip\n logging.debug(f\"skipping SMTP command {line.strip()}\")\n continue\n\n # otherwise we're reading DATA and looking for the end of that\n if line.strip() == ('> . .'):\n yield _finalize()\n continue\n\n rfc822_fp.write(line)\n continue\n\n # did the file end while we were reading SMTP data?\n if state == STATE_DATA:\n yield _finalize()", "def ReceiveTimeout(self) -> int:", "def ReceiveTimeout(self) -> int:", "def _submit_for_retry(entry_id, email_id, to_list, global_email_context, current_exception, subtask_status, skip_retry_max=False):\r\n task_id = subtask_status.task_id\r\n log.info(\"Task %s: Successfully sent to %s users; failed to send to %s users (and skipped %s users)\",\r\n task_id, subtask_status.succeeded, subtask_status.failed, subtask_status.skipped)\r\n\r\n # Calculate time until we retry this task (in seconds):\r\n # The value for max_retries is increased by the number of times an \"infinite-retry\" exception\r\n # has been retried. We want the regular retries to trigger max-retry checking, but not these\r\n # special retries. So we count them separately.\r\n max_retries = _get_current_task().max_retries + subtask_status.retried_nomax\r\n base_delay = _get_current_task().default_retry_delay\r\n if skip_retry_max:\r\n # once we reach five retries, don't increase the countdown further.\r\n retry_index = min(subtask_status.retried_nomax, 5)\r\n exception_type = 'sending-rate'\r\n # if we have a cap, after all, apply it now:\r\n if hasattr(settings, 'BULK_EMAIL_INFINITE_RETRY_CAP'):\r\n retry_cap = settings.BULK_EMAIL_INFINITE_RETRY_CAP + subtask_status.retried_withmax\r\n max_retries = min(max_retries, retry_cap)\r\n else:\r\n retry_index = subtask_status.retried_withmax\r\n exception_type = 'transient'\r\n\r\n # Skew the new countdown value by a random factor, so that not all\r\n # retries are deferred by the same amount.\r\n countdown = ((2 ** retry_index) * base_delay) * random.uniform(.75, 1.25)\r\n\r\n log.warning('Task %s: email with id %d not delivered due to %s error %s, retrying send to %d recipients in %s seconds (with max_retry=%s)',\r\n task_id, email_id, exception_type, current_exception, len(to_list), countdown, max_retries)\r\n\r\n # we make sure that we update the InstructorTask with the current subtask status\r\n # *before* actually calling retry(), to be sure that there is no race\r\n # condition between this update and the update made by the retried task.\r\n update_subtask_status(entry_id, task_id, subtask_status)\r\n\r\n # Now attempt the retry. If it succeeds, it returns a RetryTaskError that\r\n # needs to be returned back to Celery. If it fails, we return the existing\r\n # exception.\r\n try:\r\n send_course_email.retry(\r\n args=[\r\n entry_id,\r\n email_id,\r\n to_list,\r\n global_email_context,\r\n subtask_status.to_dict(),\r\n ],\r\n exc=current_exception,\r\n countdown=countdown,\r\n max_retries=max_retries,\r\n throw=True,\r\n )\r\n except RetryTaskError as retry_error:\r\n # If the retry call is successful, update with the current progress:\r\n log.exception(u'Task %s: email with id %d caused send_course_email task to retry.',\r\n task_id, email_id)\r\n return subtask_status, retry_error\r\n except Exception as retry_exc:\r\n # If there are no more retries, because the maximum has been reached,\r\n # we expect the original exception to be raised. We catch it here\r\n # (and put it in retry_exc just in case it's different, but it shouldn't be),\r\n # and update status as if it were any other failure. That means that\r\n # the recipients still in the to_list are counted as failures.\r\n log.exception(u'Task %s: email with id %d caused send_course_email task to fail to retry. To list: %s',\r\n task_id, email_id, [i['email'] for i in to_list])\r\n num_failed = len(to_list)\r\n subtask_status.increment(subtask_status, failed=num_failed, state=FAILURE)\r\n return subtask_status, retry_exc" ]
[ "0.67359835", "0.6189997", "0.6074653", "0.60194963", "0.59302324", "0.58749896", "0.58332354", "0.58151186", "0.58065194", "0.58065194", "0.57931924", "0.57492477", "0.5701048", "0.5651594", "0.56239885", "0.5590371", "0.5548968", "0.54814523", "0.545041", "0.5434488", "0.5432996", "0.54319906", "0.5427672", "0.5393198", "0.53886634", "0.5386925", "0.52865934", "0.5262578", "0.5209261", "0.5209261", "0.52065873", "0.5199914", "0.5197225", "0.51867986", "0.51865697", "0.51748097", "0.51708853", "0.516997", "0.516557", "0.5161641", "0.5153028", "0.51524675", "0.51451564", "0.51387787", "0.51289845", "0.51274496", "0.51197165", "0.5111481", "0.5087659", "0.5083974", "0.5082628", "0.50689685", "0.5061563", "0.50612724", "0.505994", "0.5059147", "0.5058944", "0.5050654", "0.5025109", "0.5025109", "0.50215644", "0.50091225", "0.5002319", "0.50016975", "0.49853706", "0.49780822", "0.4976904", "0.4974854", "0.4971627", "0.4961066", "0.49569902", "0.4955531", "0.49512345", "0.4949361", "0.49410206", "0.49359876", "0.49354786", "0.4933081", "0.4930154", "0.49270853", "0.49216604", "0.49204564", "0.4912089", "0.49062055", "0.4898298", "0.48971263", "0.48883092", "0.4883038", "0.48771402", "0.48712635", "0.48692167", "0.48639473", "0.48486683", "0.48481482", "0.48406997", "0.48368847", "0.48333982", "0.48288998", "0.48288998", "0.48253483" ]
0.6397967
1
The `data` command is a special case it access protocol directly, rather than using `execute_command`.
async def test_disconnected_server_raises_on_data_read(preset_client): await preset_client.connect() preset_client.server.responses.append(b"250 Hello there") await preset_client.ehlo() preset_client.server.responses.append(b"250 ok") await preset_client.mail("[email protected]") preset_client.server.responses.append(b"250 ok") await preset_client.rcpt("[email protected]") preset_client.server.responses.append(b"354 lets go") preset_client.server.drop_connection_after_request = b"A MESSAGE\r\n.\r\n" with pytest.raises(SMTPServerDisconnected): await preset_client.data("A MESSAGE") # Verify that the connection was closed assert not preset_client._connect_lock.locked() assert preset_client.protocol is None assert preset_client.transport is None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def command(self, inst_data: int, buf: bytes, /) -> None:", "def execute(self, data, options):\n raise NotImplementedError()", "def cmd(self, data, enable):\n pass", "def call_and_feed(cmd, data):\n p = Popen(cmd, shell=True, stdin=PIPE)\n p.stdin.write(data)\n p.stdin.close()\n return p.wait()", "def run(self, data: PipeLineDataObject) -> PipeLineDataObject:\n raise NotImplementedError", "def execute(self, string, data=None):\n raise NotImplementedError", "def executeRemoteCommand(self, data):\n return self.session.request('diag/command/', 'POST',\n self.getXML(data, 'remoteCommand'))", "def send_command(self, data):\n try:\n self.write(data)\n reply = self.read_line()\n \n if reply == \"{}\":\n pass\n else:\n print \"send_command: received bad reply %s\" % (reply)\n sys.exit(1)\n except Exception:\n raise", "def command(dev, code, data='', verbose=False):\n communicate(dev, a2b_hex('A' + code) + data.encode('ascii'), a2b_hex('B' + code), verbose=verbose)", "def get_data(self):\n self._send_command(self._adapter.get_data())", "def post_execute(self, data):\n return data", "def execute_command(command, data={}):\n if data:\n command = command % data\n print \"Executing %s\" % command\n return subprocess.call(command, shell=True)", "async def data_received(self, data):\n prefix, command, args = parse_raw_irc_command(data)\n await self.command_handler.run(command, prefix, *args)", "def sendCommand(self, data):\n #make sure data has an even number of elements\n if(len(data) % 2 == 1):\n data.append(0)\n\n #Initiate message as an empty list\n message = []\n\n #Fill message by combining two bytes in one register\n for i in range(0, len(data)/2):\n message.append((data[2*i] << 8) + data[2*i+1])\n\n #To do!: Implement try/except\n with self.lock:\n self.client.write_registers(0, message)", "def send(self, cmd, data):\n return self.client_provider.send(cmd, data)", "def on_data(self, session, byte_data):\n try:\n str_data = to_str(byte_data).strip()\n\n for cmd in str_data.split(\";\"):\n args = [\n val for val in [\n val.strip() for val in cmd.split(self._seps)\n ] if val]\n\n if not args:\n continue\n\n if not self.on_command(session, args):\n return False\n\n self.put_prompt(session)\n return True\n\n except Exception as exc: # pylint: disable=broad-except\n LOGGER.error(traceback.format_exc())\n self.reply_text(session, \"NG:Error occurred (%s)\" % str(exc))\n return False", "def OnCommand(self, data, onResult=LogAck):\r\n\t\td = self.perspective.callRemote(\"Command\", data)\r\n\t\td.addCallback(onResult)\r\n\t\td.addErrback(self.OnError)", "def sendData (self, data) :\n\n assert len(data) <= 255\n \n return self.sendCommand(\"CMD_IN_DATA_STDIN\", data).addCallback(self._sendData_result)", "def _send_command(self, data):\n # make sure data has an even number of elements\n if(len(data) % 2 == 1):\n data.append(0)\n\n # Initiate message as an empty list\n message = []\n\n # Fill message by combining two bytes in one register\n for i in range(int(len(data)/2)):\n message.append((data[2*i] << 8) + data[2*i+1])\n\n # To do!: Implement try/except\n self.client.write_registers(0x03E8, message, unit=0x0009)", "def execute(self, data, options):\n return escape_if_needed(self.fetch(data), options)", "def _spi_cmd(command, data=None):\n spi_cs.write_digital(CS_ACTIVE)\n spi_dc.write_digital(0)\n spi.write(bytearray([command]))\n if data is not None:\n spi_dc.write_digital(1)\n spi.write(bytearray(data))\n spi_cs.write_digital(CS_INACTIVE)", "def data(self, msg):\n self.putcmd(\"data\")\n (code,repl)=self.getreply()\n if self.debuglevel >0 : print>>sys.stderr, \"data:\", (code,repl)\n if code != 354:\n raise SMTPDataError(code,repl)\n else:\n q = quotedata(msg)\n if q[-2:] != CRLF:\n q = q + CRLF\n q = q + \".\" + CRLF\n\n # begin modified send code\n chunk_size = 10240\n bytes_sent = 0\n\n while bytes_sent != len(q):\n chunk = q[bytes_sent:bytes_sent+chunk_size]\n self.send(chunk)\n bytes_sent += len(chunk)\n if hasattr(self, \"callback\"):\n self.callback(bytes_sent, len(q))\n # end modified send code\n\n (code, msg) = self.getreply()\n if self.debuglevel > 0 : print>>sys.stderr, \"data:\", (code,msg)\n return (code,msg)", "def on_data(self, session, byte_data):\n pass", "def data_to(self, data, name):\r\n return self.interpreter(name)(data)", "async def _async_send_command(self, data_cmd):\n device_id = self._device_id\n if not device_id:\n return\n if not data_cmd:\n return\n\n api_device = f\"{API_DEVICES}/{device_id}\"\n api_command = f\"{api_device}/commands\"\n\n async with self._session.post(\n api_command,\n headers=_headers(self._api_key),\n data=data_cmd,\n raise_for_status=True,\n ) as resp:\n await resp.json()\n\n await self._device_refresh()", "def data(self, data):\n try:\n self.put_nowait(data)\n except Full:\n pass", "def __init__(self, command: str, data, checksum: bool):\r\n self.command = command\r\n \"\"\"Command to perform\"\"\"\r\n self.checksum = checksum\r\n \"\"\"Checksum protocol mode.\"\"\"\r\n self.data = data\r\n \"\"\"Data for PWM (or SET for closed loop) command.\"\"\"", "def data(self, data):\n self.__data = data", "def get_cmdb_data(device_type):\n pass", "def got_data(self, data):\n if self.get_current_state() == SBE37ProtocolState.DIRECT_ACCESS:\n # direct access mode\n if len(data) > 0:\n mi_logger.debug(\"SBE37Protocol._got_data(): <\" + data + \">\") \n if self._driver_event:\n self._driver_event(DriverAsyncEvent.DIRECT_ACCESS, data)\n # TODO: what about logging this as an event?\n return\n \n if len(data)>0:\n # Call the superclass to update line and prompt buffers.\n CommandResponseInstrumentProtocol.got_data(self, data)\n \n # If in streaming mode, process the buffer for samples to publish.\n cur_state = self.get_current_state()\n if cur_state == SBE37ProtocolState.AUTOSAMPLE:\n if SBE37_NEWLINE in self._linebuf:\n lines = self._linebuf.split(SBE37_NEWLINE)\n self._linebuf = lines[-1]\n for line in lines:\n self._extract_sample(line)", "def data_received(self, data):\n print('S> data received ['+str(len(data))+']: '+str(data))\n self.deserializer.append(data)\n if self.deserializer.ready():\n msg = self.deserializer.deserialize()\n status = TSDBStatus.OK # until proven otherwise.\n response = TSDBOp_Return(status, None) # until proven otherwise.\n try:\n op = TSDBOp.from_json(msg)\n except TypeError as e:\n print(e)\n response = TSDBOp_Return(TSDBStatus.INVALID_OPERATION, None)\n if status is TSDBStatus.OK:\n if isinstance(op, TSDBOp_InsertTS):\n response = self._insert_ts(op)\n elif isinstance(op, TSDBOp_UpsertMeta):\n response = self._upsert_meta(op)\n elif isinstance(op, TSDBOp_Select):\n response = self._select(op)\n elif isinstance(op, TSDBOp_AugmentedSelect):\n response = self._augmented_select(op)\n elif isinstance(op, TSDBOp_AddTrigger):\n response = self._add_trigger(op)\n elif isinstance(op, TSDBOp_RemoveTrigger):\n response = self._remove_trigger(op)\n elif isinstance(op, TSDBOp_DeleteTS):\n print('running delete')\n response = self._delete_ts(op)\n else:\n response = TSDBOp_Return(TSDBStatus.UNKNOWN_ERROR,\n op['op'])\n\n self.conn.write(serialize(response.to_json()))\n # print(\"close\")\n self.conn.close()", "def execute(self, devices, command_bytes):", "def data(self, data):\n self._data = data", "def data(self, data):\n self._data = data", "def command_data(self, arg):\n if not self.__rcpt:\n raise errors.BadSequence('Error: need RCPT command')\n if arg:\n raise errors.BadArguments('DATA')\n self.__state = self.DATA\n self.write('354 End data with <CR><LF>.<CR><LF>',\n read_until_delimiter='{0}.{0}'.format(CRLF))", "def _send_data(self):\n pass", "def send_data(self):\n data = self.datastore.use(self.data_name)\n if data is None:\n self.dbg(\"sockets_warning\", \"Data is none for {}\", [self.data_name])\n encoded_data = json.dumps(data).encode()\n self.conn.sendall(encoded_data)\n self.dbg(\"sockets_verbose\", \"Data sent\")", "def sendData(self, program_counter, data_type, data):\n self.stringReceived(program_counter, data_type, data)", "def _send_command(self, command, data=None):\n self._spi_write(_SPI_COMMAND, [command])\n if data is not None:\n self._send_data(data)", "def __init__(self, command=None, data_length=0, data=[]):\n if command is not None:\n self.command = command\n self.data_length = data_length\n self.data = data\n self.encode()\n else:\n self.message_length = 0\n self.command = 0\n self.data_length = 0\n self.data = []\n self.string = \"\"", "def handle_datachan(bot, event):\n event.reply(event.chan.data.tojson())", "def send_data(self, data: dict):\n pass", "def execute(self, data, options):\n return escape_if_needed(\n self.case.execute(self.token.execute(data, {'escape': False})), options)", "def __commandparser(self, data):\n # zum bearbeiten einen String daraus machen\n cmdstr = data.decode('utf-8')\n self.log.debug(\"cmd: %s\" % cmdstr)\n # json parsen und dictonary Objekt daraus machen\n cmd = json.loads(cmdstr)\n #\n # ist es ein GET Kommando?\n #\n if 'get' in cmd:\n self.log.debug(\"get cmd recognized...\")\n return self.__get_cmd_parse(cmd['get'])\n elif 'set' in cmd:\n self.log.debug(\"set cmd recognized...\")\n return self.__set_cmd_parse(cmd['set'])\n elif 'delete' in cmd:\n self.log.debug(\"DELETE cmd recognized...\")\n return self.__delete_cmd_parse(cmd['delete'])\n else:\n self.log.warning(\"unknown command recived! Data: <{}>\".format(cmdstr))\n return json.dumps({'error': 'unknown command or not implemented yet'}).encode(encoding='utf-8')\n # ENDE __commandparser", "def _send_command_to_entity_server(self, command, data=''):\n\t\tself._client_message_lock.acquire()\n\t\treply = self._entity_server_connection.send_message(command + ':' + str(data))\n\t\tself._client_message_lock.release()\n\t\treturn reply", "def send(self, data):", "def package_data(self, data):\n pass", "def read_vsys_data_direct(command):\n global _vs_vsys\n if _vs_vsys is None:\n try:\n _vs_vsys = VsysFrontend(_VSYS_FRONTEND_TARGET)\n _vs_vsys.open()\n except VsysException as err:\n collectd.error('Failed to setup VsysFrontend: %s' % err)\n return {}\n\n try:\n raw_data = _vs_vsys.sendrecv(command)\n except VsysException as err:\n collectd.error('Failed to receive message: %s' % err)\n _vs_vsys.close()\n _vs_vsys = None # Will be re-opened on next call.\n return {}\n\n try:\n data = json.loads(raw_data)\n except ValueError as err:\n collectd.error('Failed to load json from raw data: -%s-' % raw_data)\n collectd.error(str(err))\n return {}\n\n return data", "def transfer_data(self):\n pass", "def exec_command(self, cmd, in_data=None, sudoable=True):\n super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)\n\n self._display.vvv(u\"EXEC {0}\".format(cmd), host=self._host)\n\n cmd_args_list = shlex.split(to_native(cmd, errors='surrogate_or_strict'))\n\n if getattr(self._shell, \"_IS_WINDOWS\", False):\n # Become method 'runas' is done in the wrapper that is executed,\n # need to disable sudoable so the bare_run is not waiting for a\n # prompt that will not occur\n sudoable = False\n\n # Generate powershell commands\n cmd_args_list = self._shell._encode_script(cmd, as_list=True, strict_mode=False, preserve_rc=False)\n\n # TODO(odyssey4me):\n # Implement buffering much like the other connection plugins\n # Implement 'env' for the environment settings\n # Implement 'input-data' for whatever it might be useful for\n request_exec = {\n 'execute': 'guest-exec',\n 'arguments': {\n 'path': cmd_args_list[0],\n 'capture-output': True,\n 'arg': cmd_args_list[1:]\n }\n }\n request_exec_json = json.dumps(request_exec)\n\n display.vvv(u\"GA send: {0}\".format(request_exec_json), host=self._host)\n\n # TODO(odyssey4me):\n # Add timeout parameter\n result_exec = json.loads(libvirt_qemu.qemuAgentCommand(self.domain, request_exec_json, 5, 0))\n\n display.vvv(u\"GA return: {0}\".format(result_exec), host=self._host)\n\n request_status = {\n 'execute': 'guest-exec-status',\n 'arguments': {\n 'pid': result_exec['return']['pid']\n }\n }\n request_status_json = json.dumps(request_status)\n\n display.vvv(u\"GA send: {0}\".format(request_status_json), host=self._host)\n\n # TODO(odyssey4me):\n # Work out a better way to wait until the command has exited\n result_status = json.loads(libvirt_qemu.qemuAgentCommand(self.domain, request_status_json, 5, 0))\n\n display.vvv(u\"GA return: {0}\".format(result_status), host=self._host)\n\n while not result_status['return']['exited']:\n result_status = json.loads(libvirt_qemu.qemuAgentCommand(self.domain, request_status_json, 5, 0))\n\n display.vvv(u\"GA return: {0}\".format(result_status), host=self._host)\n\n if result_status['return'].get('out-data'):\n stdout = base64.b64decode(result_status['return']['out-data'])\n else:\n stdout = b''\n\n if result_status['return'].get('err-data'):\n stderr = base64.b64decode(result_status['return']['err-data'])\n else:\n stderr = b''\n\n # Decode xml from windows\n if getattr(self._shell, \"_IS_WINDOWS\", False) and stdout.startswith(b\"#< CLIXML\"):\n stdout = _parse_clixml(stdout)\n\n display.vvv(u\"GA stdout: {0}\".format(to_text(stdout)), host=self._host)\n display.vvv(u\"GA stderr: {0}\".format(to_text(stderr)), host=self._host)\n\n return result_status['return']['exitcode'], stdout, stderr", "def send(self, data):\n pass", "def data_received(self, data):\n # self.debug(\"received data=%r\", binascii.hexlify(data))\n self.dispatcher.add_data(data)", "def data(self, data):\n if len(data) > 41:\n self._data = b\"\"\n else:\n self._data = data\n return self._data", "def save_data(self):\n # Command to get the download data\n pass", "def send(self, data: bytes):", "def getData(self,cmd):\n self.ser.write(cmd.encode()+END.encode())\n out = self.ser.readline()\n\n if(out == \"\"):\n raise IOError(\"communication failed\")\n return out", "def _run_internal(self, cmd, buf=None, via='set'):\n log.debug(' Command: {}'.format(str(cmd)))\n retval = None\n if str(via).lower().strip() == 'set':\n if type(buf) is list:\n retval = self.ds9.set(cmd, *buf)\n else:\n retval = self.ds9.set(cmd, buf)\n elif str(via).lower().strip() == 'get':\n # workaround for DS9 seg fault with empty frame\n # and wcs requests\n if 'wcs' in cmd and not self._loaded_data():\n log.debug(' No loaded data; skipping frame')\n retval = ''\n else:\n try:\n retval = self.ds9.get(cmd)\n except TypeError as err:\n log.error('Error in pyds9')\n log.debug(err)\n else:\n log.error('Unknown ds9 interaction command: ' + str(via))\n return retval", "def __call__(self,data):\n\n log.debug('got data: %s' % (len(data)))\n\n # if we don't have args yet, these must be them\n if not self.args:\n self.parse_args(data)\n\n else:\n # we've already got args, must\n # be a message\n self.handle_send(data)", "def data(self, data):\n\n self._data = data", "def data(self, data):\n\n self._data = data", "def data(self, data):\n\n self._data = data", "def data(self, data):\n\n self._data = data", "def data(self, data):\n\n self._data = data", "def data(self, data):\n\n self._data = data", "def from_server(self, data):\r\n pass", "def run(self, data):\n\t\t# no processing here\n\t\treturn data", "def setData(self,data):\n self.data = struct.pack(\"!Q\",data)", "def setData(self,data):\n self.data = struct.pack(\"!Q\",data)", "def cb_exec_cmd(data, remaining_calls):\n # Process the entered command.\n data = list(data)\n del data[0]\n data = \"\".join(data)\n # s/foo/bar command.\n if data.startswith(\"s/\"):\n cmd = data\n parsed_cmd = next(csv.reader(StringIO(cmd), delimiter=\"/\",\n escapechar=\"\\\\\"))\n pattern = re.escape(parsed_cmd[1])\n repl = parsed_cmd[2]\n repl = re.sub(r\"([^\\\\])&\", r\"\\1\" + pattern, repl)\n flag = None\n if len(parsed_cmd) == 4:\n flag = parsed_cmd[3]\n count = 1\n if flag == \"g\":\n count = 0\n buf = weechat.current_buffer()\n input_line = weechat.buffer_get_string(buf, \"input\")\n input_line = re.sub(pattern, repl, input_line, count)\n weechat.buffer_set(buf, \"input\", input_line)\n # Shell command.\n elif data.startswith(\"!\"):\n weechat.command(\"\", \"/exec -buffer shell %s\" % data[1:])\n # Commands like `:22`. This should start cursor mode (``/cursor``) and take\n # us to the relevant line.\n # TODO: look into possible replacement key bindings for: ← ↑ → ↓ Q m q.\n elif data.isdigit():\n line_number = int(data)\n hdata_window = weechat.hdata_get(\"window\")\n window = weechat.current_window()\n x = weechat.hdata_integer(hdata_window, window, \"win_chat_x\")\n y = (weechat.hdata_integer(hdata_window, window, \"win_chat_y\") +\n (line_number - 1))\n weechat.command(\"\", \"/cursor go {},{}\".format(x, y))\n # Check againt defined commands.\n else:\n data = data.split(\" \", 1)\n cmd = data[0]\n args = \"\"\n if len(data) == 2:\n args = data[1]\n if cmd in VI_COMMANDS:\n weechat.command(\"\", \"%s %s\" % (VI_COMMANDS[cmd], args))\n # No vi commands defined, run the command as a WeeChat command.\n else:\n weechat.command(\"\", \"/{} {}\".format(cmd, args))\n return weechat.WEECHAT_RC_OK", "def send_serial_command(data):\n print(data)\n serial_command = data\n SERIAL_PARENT.send(serial_command)\n OUTGOING.append(serial_command)", "def dataReceived(self, data):\n print \"received:\", data", "def execute_command(self, command):\n raise NotImplementedError", "def handle_dataevent(bot, event):\n event.reply(event.tojson())", "def COM(cmd,data): #Status: WIP\r\n #Desc CMD Target Address\r\n if cmd == 'U': #get update U\r\n parseLocal(data)\r\n if cmd == 'T':\r\n setETM(data)\r\n# rpc(addr,getLocals,addr, lTime, lSped, lLoca, lStat)\r\n elif cmd == 'M': #merge M\r\n setETM(data)\r\n merge()\r\n elif cmd == 'E': #help E multicasted\r\n setStatus(data)\r\n emergency()\r\n elif cmd == 'D':\r\n getDest()", "def get_data(command):\n command = subprocess.run(\n command,\n shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n encoding='utf-8',\n )\n\n # if command not succesfully executed, stop script\n if command.returncode != 0:\n print(f'ERROR:~ {command.stderr}')\n return False\n\n if not command.stdout:\n print(f'ERROR:~ Command output [{command}] is empty')\n return command.stdout", "def data(msg=None, *args, **kwargs):\n log(DATA, msg, *args, **kwargs)", "def update(self, data: bytes):\n self.send(data)", "def dataReceived(self, data: bytes):\n\n if self.output:\n self.output.write(data) # redirect the message to the server", "def setData(self,data):\n self.data = struct.pack(\"!I\",data)", "def pass_data(self, data):\n self.data = data\n self.load_input_fields()", "def request(self, command_code, data):\n name, request_func, response_func = afpcommands.commands[command_code]\n return request_func(data)", "def setData(self, data):\n self.data = struct.pack(\"!I\",data)", "def handle(self, data):\n pass", "def _run_dummy_command(self, *args, **kwargs):\r\n out = StringIO()\r\n DummyCommand().execute(*args, stdout=out, **kwargs)\r\n out.seek(0)\r\n return json.loads(out.read())", "def handle_databot(bot, event):\n event.reply(bot.tojson())", "async def commanddata(self, ctx: vbu.Context):\n\n # Get info from database\n async with self.bot.database() as db:\n command_data = await db(\"SELECT * FROM command_counter\")\n\n # Make sure we have data\n if not command_data:\n return await ctx.send(\"No command data was found in the database.\")\n\n # Set up the command list\n sorted_commands_singlelist = []\n commands_list = {} # List of strings \"**command name**: command count\"\n total_count = 0 # To count the total number of commands\n for command in command_data:\n count = command['count']\n total_count += count\n for command in command_data:\n count = command['count']\n commands_list[f\"**{command['command_name']}**: {count} times `({(count / total_count) * 100:.2f}%)`\\n\"] = count\n #commands_list.append({count: f\"**{command['command_name']}**: {count} times `({(count / total_count) * 100}%)`\\n\"})\n\n sorted_commands = sorted(commands_list.items(), key=lambda x: x[1], reverse=True)\n for i in sorted_commands:\n sorted_commands_singlelist.append(i[0])\n # Paginate\n\n # Set up the paginator formatter\n def formatter(menu, items):\n # Create the embed\n commands_embed = vbu.Embed(title = \"Command Data (times run)\")\n # Add the total count footer\n commands_embed.set_footer(text=f\"Total: {total_count}\")\n # Add the command list to the emebd\n commands_embed.description = \"\\n\".join(items)\n\n # Return the embed\n return commands_embed\n\n # Begin paginating\n pagin = vbu.Paginator(sorted_commands_singlelist, formatter=formatter, per_page=10)\n await pagin.start(ctx)", "async def send_api_cmd(self, command: str) -> dict or None:\n try:\n # open reader and writer streams to the api\n reader, writer = await asyncio.open_connection(self.ip, self.api_port)\n\n # write the command\n api_command = json.dumps({\"command\": command}).encode('utf-8')\n\n # send the command\n writer.write(api_command)\n await writer.drain()\n\n data = b\"\"\n\n # loop to receive all the data\n while True:\n d = await reader.read(4096)\n if not d:\n break\n data += d\n\n # load the data into a dict\n data = json.loads(data.decode('utf-8')[:-1])\n\n # close the connection\n writer.close()\n await writer.wait_closed()\n\n # return the data\n return data\n except:\n # if an error happens, return none\n return None", "def do_command(command):\n send_command(command)\n response = get_response()\n print(\"Rcvd: <<< \\n\" + response)\n return response", "def dataReceived(self, data):", "def __call__(self, data: bytearray):\n for name in self._argo:\n value = getattr(self._argv, name, None)\n if value is self.PendingUpdate:\n raise RuntimeError(F'Attempting to resolve {name} while an update for this argument is in flight')\n if value and pending(value):\n self._args[name] = self.PendingUpdate\n self._args[name] = manifest(value, data)\n self._store(_guid=id(data))\n return data", "def data_received(self, data):\n pass", "def setData(self,data):\n self.data = struct.pack(\"!d\",data)", "def data(self, int_role=None, *args, **kwargs): # real signature unknown; NOTE: unreliably restored from __doc__\r\n pass", "def test_command(self):\n out = io.StringIO()\n management.call_command('import_data', stdout=out)\n self.assertIn(\"Successfully imported\", out.getvalue())", "async def process_command(self, data):\n result = None\n\n command = data[40:60]\n if command == COMMAND_CODE['update_user_list']:\n result = await update_user_list(data)\n elif command == COMMAND_CODE['invalid_credentials']:\n result = await sent_invalid_credentials()\n elif command == COMMAND_CODE['valid_credentials']:\n result = await successfully_authenticated()\n elif command == COMMAND_CODE['server_shutdown']:\n result = await server_shutdown()\n return result", "def _run_command(self, command, parameters=()):\n\t\tconn = sqlite3.connect(self.filename)\n\t\tcursor = conn.execute(\"PRAGMA synchronous=OFF;\")\n\t\tcursor = conn.execute(command, parameters)\n\t\tdata = tuple(i for i in cursor)\n\t\tconn.commit()\n\t\tconn.close()\n\n\t\treturn data", "def test_sendCommand(self):\n self.p.sendCommand(\"CMD\", (\"param1\", \"param2\"))\n self.check(\"CMD param1 param2\\r\\n\")", "def send_data(self, data):\r\n try:\r\n self.sock.sendto(data, self.addr)\r\n except Exception:\r\n print(\"Cant't send a package\")", "def on_data(self, data):\n if data is not None:\n # Send the data to the parent process\n logging.debug('Received raw data : ' + str(data))\n self.mp_queue.put(data)", "def stdin_read(self, data):\n self.write_master(data)", "def dataReceived(self, data):\n try:\n try:\n if not hasattr(self, \"_data\"):\n self._data = data\n else:\n self._data += data\n\n self._checkDataLength(self._data)\n\n git_command = self._data[4:]\n if git_command[:15] == b\"git-upload-pack\":\n self._buildResponseAndSend(git_command.decode(\"utf-8\"))\n else:\n raise ProtocolError()\n\n except GitCommandLengthMismatch:\n pass\n\n except ProtocolError:\n self.transport.loseConnection()\n return" ]
[ "0.7060531", "0.6570072", "0.6510051", "0.63995844", "0.6374746", "0.6351639", "0.63384765", "0.62793285", "0.6275475", "0.62704265", "0.61805314", "0.61763734", "0.6167132", "0.61131483", "0.60688186", "0.60665303", "0.6035471", "0.60299027", "0.5990287", "0.5981012", "0.59661806", "0.59530485", "0.5949353", "0.59401345", "0.593415", "0.59253323", "0.5917382", "0.58955085", "0.5879562", "0.5877454", "0.587461", "0.5865461", "0.5819105", "0.5819105", "0.57802105", "0.5777682", "0.5770051", "0.57677275", "0.576469", "0.57470113", "0.5736588", "0.5723546", "0.5715587", "0.57099956", "0.5688357", "0.56641316", "0.5662441", "0.5658962", "0.5658304", "0.5657546", "0.5642665", "0.5637435", "0.5634655", "0.56261337", "0.5612717", "0.5612147", "0.560539", "0.55997705", "0.55974203", "0.55974203", "0.55974203", "0.55974203", "0.55974203", "0.55974203", "0.5596276", "0.5580344", "0.55766064", "0.55766064", "0.55695534", "0.5558039", "0.5550664", "0.5549074", "0.5536979", "0.5533347", "0.55319077", "0.5531465", "0.5529315", "0.5526017", "0.55112636", "0.55089283", "0.55062234", "0.55036736", "0.5501537", "0.549785", "0.5491912", "0.5491808", "0.5491328", "0.54836065", "0.548342", "0.54802626", "0.54789156", "0.5468699", "0.54686075", "0.54644847", "0.5448205", "0.54427886", "0.54350585", "0.54328275", "0.5431288", "0.54308164", "0.5430505" ]
0.0
-1
The `data` command is a special case it accesses protocol directly, rather than using `execute_command`.
async def test_disconnected_server_raises_on_data_write(preset_client): await preset_client.connect() preset_client.server.responses.append(b"250 Hello there") await preset_client.ehlo() preset_client.server.responses.append(b"250 ok") await preset_client.mail("[email protected]") preset_client.server.responses.append(b"250 ok") await preset_client.rcpt("[email protected]") preset_client.server.responses.append(b"354 lets go") preset_client.server.drop_connection_after_request = b"A MESS" with pytest.raises(SMTPServerDisconnected): await preset_client.data("A MESSAGE") # Verify that the connection was closed assert not preset_client._connect_lock.locked() assert preset_client.protocol is None assert preset_client.transport is None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def command(self, inst_data: int, buf: bytes, /) -> None:", "def cmd(self, data, enable):\n pass", "def command(dev, code, data='', verbose=False):\n communicate(dev, a2b_hex('A' + code) + data.encode('ascii'), a2b_hex('B' + code), verbose=verbose)", "def execute(self, data, options):\n raise NotImplementedError()", "def send_command(self, data):\n try:\n self.write(data)\n reply = self.read_line()\n \n if reply == \"{}\":\n pass\n else:\n print \"send_command: received bad reply %s\" % (reply)\n sys.exit(1)\n except Exception:\n raise", "def run(self, data: PipeLineDataObject) -> PipeLineDataObject:\n raise NotImplementedError", "def executeRemoteCommand(self, data):\n return self.session.request('diag/command/', 'POST',\n self.getXML(data, 'remoteCommand'))", "def execute(self, string, data=None):\n raise NotImplementedError", "def call_and_feed(cmd, data):\n p = Popen(cmd, shell=True, stdin=PIPE)\n p.stdin.write(data)\n p.stdin.close()\n return p.wait()", "def sendCommand(self, data):\n #make sure data has an even number of elements\n if(len(data) % 2 == 1):\n data.append(0)\n\n #Initiate message as an empty list\n message = []\n\n #Fill message by combining two bytes in one register\n for i in range(0, len(data)/2):\n message.append((data[2*i] << 8) + data[2*i+1])\n\n #To do!: Implement try/except\n with self.lock:\n self.client.write_registers(0, message)", "async def data_received(self, data):\n prefix, command, args = parse_raw_irc_command(data)\n await self.command_handler.run(command, prefix, *args)", "def on_data(self, session, byte_data):\n try:\n str_data = to_str(byte_data).strip()\n\n for cmd in str_data.split(\";\"):\n args = [\n val for val in [\n val.strip() for val in cmd.split(self._seps)\n ] if val]\n\n if not args:\n continue\n\n if not self.on_command(session, args):\n return False\n\n self.put_prompt(session)\n return True\n\n except Exception as exc: # pylint: disable=broad-except\n LOGGER.error(traceback.format_exc())\n self.reply_text(session, \"NG:Error occurred (%s)\" % str(exc))\n return False", "def _send_command(self, data):\n # make sure data has an even number of elements\n if(len(data) % 2 == 1):\n data.append(0)\n\n # Initiate message as an empty list\n message = []\n\n # Fill message by combining two bytes in one register\n for i in range(int(len(data)/2)):\n message.append((data[2*i] << 8) + data[2*i+1])\n\n # To do!: Implement try/except\n self.client.write_registers(0x03E8, message, unit=0x0009)", "def send(self, cmd, data):\n return self.client_provider.send(cmd, data)", "def __init__(self, command: str, data, checksum: bool):\r\n self.command = command\r\n \"\"\"Command to perform\"\"\"\r\n self.checksum = checksum\r\n \"\"\"Checksum protocol mode.\"\"\"\r\n self.data = data\r\n \"\"\"Data for PWM (or SET for closed loop) command.\"\"\"", "def get_data(self):\n self._send_command(self._adapter.get_data())", "def on_data(self, session, byte_data):\n pass", "def data(self, msg):\n self.putcmd(\"data\")\n (code,repl)=self.getreply()\n if self.debuglevel >0 : print>>sys.stderr, \"data:\", (code,repl)\n if code != 354:\n raise SMTPDataError(code,repl)\n else:\n q = quotedata(msg)\n if q[-2:] != CRLF:\n q = q + CRLF\n q = q + \".\" + CRLF\n\n # begin modified send code\n chunk_size = 10240\n bytes_sent = 0\n\n while bytes_sent != len(q):\n chunk = q[bytes_sent:bytes_sent+chunk_size]\n self.send(chunk)\n bytes_sent += len(chunk)\n if hasattr(self, \"callback\"):\n self.callback(bytes_sent, len(q))\n # end modified send code\n\n (code, msg) = self.getreply()\n if self.debuglevel > 0 : print>>sys.stderr, \"data:\", (code,msg)\n return (code,msg)", "def OnCommand(self, data, onResult=LogAck):\r\n\t\td = self.perspective.callRemote(\"Command\", data)\r\n\t\td.addCallback(onResult)\r\n\t\td.addErrback(self.OnError)", "async def _async_send_command(self, data_cmd):\n device_id = self._device_id\n if not device_id:\n return\n if not data_cmd:\n return\n\n api_device = f\"{API_DEVICES}/{device_id}\"\n api_command = f\"{api_device}/commands\"\n\n async with self._session.post(\n api_command,\n headers=_headers(self._api_key),\n data=data_cmd,\n raise_for_status=True,\n ) as resp:\n await resp.json()\n\n await self._device_refresh()", "def _spi_cmd(command, data=None):\n spi_cs.write_digital(CS_ACTIVE)\n spi_dc.write_digital(0)\n spi.write(bytearray([command]))\n if data is not None:\n spi_dc.write_digital(1)\n spi.write(bytearray(data))\n spi_cs.write_digital(CS_INACTIVE)", "def data_to(self, data, name):\r\n return self.interpreter(name)(data)", "def get_cmdb_data(device_type):\n pass", "def execute(self, devices, command_bytes):", "def data_received(self, data):\n print('S> data received ['+str(len(data))+']: '+str(data))\n self.deserializer.append(data)\n if self.deserializer.ready():\n msg = self.deserializer.deserialize()\n status = TSDBStatus.OK # until proven otherwise.\n response = TSDBOp_Return(status, None) # until proven otherwise.\n try:\n op = TSDBOp.from_json(msg)\n except TypeError as e:\n print(e)\n response = TSDBOp_Return(TSDBStatus.INVALID_OPERATION, None)\n if status is TSDBStatus.OK:\n if isinstance(op, TSDBOp_InsertTS):\n response = self._insert_ts(op)\n elif isinstance(op, TSDBOp_UpsertMeta):\n response = self._upsert_meta(op)\n elif isinstance(op, TSDBOp_Select):\n response = self._select(op)\n elif isinstance(op, TSDBOp_AugmentedSelect):\n response = self._augmented_select(op)\n elif isinstance(op, TSDBOp_AddTrigger):\n response = self._add_trigger(op)\n elif isinstance(op, TSDBOp_RemoveTrigger):\n response = self._remove_trigger(op)\n elif isinstance(op, TSDBOp_DeleteTS):\n print('running delete')\n response = self._delete_ts(op)\n else:\n response = TSDBOp_Return(TSDBStatus.UNKNOWN_ERROR,\n op['op'])\n\n self.conn.write(serialize(response.to_json()))\n # print(\"close\")\n self.conn.close()", "def __init__(self, command=None, data_length=0, data=[]):\n if command is not None:\n self.command = command\n self.data_length = data_length\n self.data = data\n self.encode()\n else:\n self.message_length = 0\n self.command = 0\n self.data_length = 0\n self.data = []\n self.string = \"\"", "def execute_command(command, data={}):\n if data:\n command = command % data\n print \"Executing %s\" % command\n return subprocess.call(command, shell=True)", "def post_execute(self, data):\n return data", "def sendData(self, program_counter, data_type, data):\n self.stringReceived(program_counter, data_type, data)", "def got_data(self, data):\n if self.get_current_state() == SBE37ProtocolState.DIRECT_ACCESS:\n # direct access mode\n if len(data) > 0:\n mi_logger.debug(\"SBE37Protocol._got_data(): <\" + data + \">\") \n if self._driver_event:\n self._driver_event(DriverAsyncEvent.DIRECT_ACCESS, data)\n # TODO: what about logging this as an event?\n return\n \n if len(data)>0:\n # Call the superclass to update line and prompt buffers.\n CommandResponseInstrumentProtocol.got_data(self, data)\n \n # If in streaming mode, process the buffer for samples to publish.\n cur_state = self.get_current_state()\n if cur_state == SBE37ProtocolState.AUTOSAMPLE:\n if SBE37_NEWLINE in self._linebuf:\n lines = self._linebuf.split(SBE37_NEWLINE)\n self._linebuf = lines[-1]\n for line in lines:\n self._extract_sample(line)", "def sendData (self, data) :\n\n assert len(data) <= 255\n \n return self.sendCommand(\"CMD_IN_DATA_STDIN\", data).addCallback(self._sendData_result)", "def send(self, data):", "def __commandparser(self, data):\n # zum bearbeiten einen String daraus machen\n cmdstr = data.decode('utf-8')\n self.log.debug(\"cmd: %s\" % cmdstr)\n # json parsen und dictonary Objekt daraus machen\n cmd = json.loads(cmdstr)\n #\n # ist es ein GET Kommando?\n #\n if 'get' in cmd:\n self.log.debug(\"get cmd recognized...\")\n return self.__get_cmd_parse(cmd['get'])\n elif 'set' in cmd:\n self.log.debug(\"set cmd recognized...\")\n return self.__set_cmd_parse(cmd['set'])\n elif 'delete' in cmd:\n self.log.debug(\"DELETE cmd recognized...\")\n return self.__delete_cmd_parse(cmd['delete'])\n else:\n self.log.warning(\"unknown command recived! Data: <{}>\".format(cmdstr))\n return json.dumps({'error': 'unknown command or not implemented yet'}).encode(encoding='utf-8')\n # ENDE __commandparser", "def send(self, data: bytes):", "def _send_command(self, command, data=None):\n self._spi_write(_SPI_COMMAND, [command])\n if data is not None:\n self._send_data(data)", "def send(self, data):\n pass", "def COM(cmd,data): #Status: WIP\r\n #Desc CMD Target Address\r\n if cmd == 'U': #get update U\r\n parseLocal(data)\r\n if cmd == 'T':\r\n setETM(data)\r\n# rpc(addr,getLocals,addr, lTime, lSped, lLoca, lStat)\r\n elif cmd == 'M': #merge M\r\n setETM(data)\r\n merge()\r\n elif cmd == 'E': #help E multicasted\r\n setStatus(data)\r\n emergency()\r\n elif cmd == 'D':\r\n getDest()", "def data_received(self, data):\n # self.debug(\"received data=%r\", binascii.hexlify(data))\n self.dispatcher.add_data(data)", "def send_data(self):\n data = self.datastore.use(self.data_name)\n if data is None:\n self.dbg(\"sockets_warning\", \"Data is none for {}\", [self.data_name])\n encoded_data = json.dumps(data).encode()\n self.conn.sendall(encoded_data)\n self.dbg(\"sockets_verbose\", \"Data sent\")", "def command_data(self, arg):\n if not self.__rcpt:\n raise errors.BadSequence('Error: need RCPT command')\n if arg:\n raise errors.BadArguments('DATA')\n self.__state = self.DATA\n self.write('354 End data with <CR><LF>.<CR><LF>',\n read_until_delimiter='{0}.{0}'.format(CRLF))", "def execute(self, data, options):\n return escape_if_needed(self.fetch(data), options)", "def from_server(self, data):\r\n pass", "def handle_datachan(bot, event):\n event.reply(event.chan.data.tojson())", "def _send_data(self):\n pass", "def data(self, data):\n self.__data = data", "def _run_internal(self, cmd, buf=None, via='set'):\n log.debug(' Command: {}'.format(str(cmd)))\n retval = None\n if str(via).lower().strip() == 'set':\n if type(buf) is list:\n retval = self.ds9.set(cmd, *buf)\n else:\n retval = self.ds9.set(cmd, buf)\n elif str(via).lower().strip() == 'get':\n # workaround for DS9 seg fault with empty frame\n # and wcs requests\n if 'wcs' in cmd and not self._loaded_data():\n log.debug(' No loaded data; skipping frame')\n retval = ''\n else:\n try:\n retval = self.ds9.get(cmd)\n except TypeError as err:\n log.error('Error in pyds9')\n log.debug(err)\n else:\n log.error('Unknown ds9 interaction command: ' + str(via))\n return retval", "def getData(self,cmd):\n self.ser.write(cmd.encode()+END.encode())\n out = self.ser.readline()\n\n if(out == \"\"):\n raise IOError(\"communication failed\")\n return out", "def dataReceived(self, data):", "def send_data(self, data: dict):\n pass", "def _send_command_to_entity_server(self, command, data=''):\n\t\tself._client_message_lock.acquire()\n\t\treply = self._entity_server_connection.send_message(command + ':' + str(data))\n\t\tself._client_message_lock.release()\n\t\treturn reply", "def send_serial_command(data):\n print(data)\n serial_command = data\n SERIAL_PARENT.send(serial_command)\n OUTGOING.append(serial_command)", "def __call__(self,data):\n\n log.debug('got data: %s' % (len(data)))\n\n # if we don't have args yet, these must be them\n if not self.args:\n self.parse_args(data)\n\n else:\n # we've already got args, must\n # be a message\n self.handle_send(data)", "def data(self, data):\n try:\n self.put_nowait(data)\n except Full:\n pass", "def dataReceived(self, data):\n print \"received:\", data", "def data(self, data):\n self._data = data", "def data(self, data):\n self._data = data", "def send_data(self, data):\r\n try:\r\n self.sock.sendto(data, self.addr)\r\n except Exception:\r\n print(\"Cant't send a package\")", "def receive(self, data):\n raise NotImplementedError", "def receive(self, data):\n raise NotImplementedError", "def test_sendCommand(self):\n self.p.sendCommand(\"CMD\", (\"param1\", \"param2\"))\n self.check(\"CMD param1 param2\\r\\n\")", "def setData(self,data):\n self.data = struct.pack(\"!Q\",data)", "def setData(self,data):\n self.data = struct.pack(\"!Q\",data)", "def data(self, data):\n if len(data) > 41:\n self._data = b\"\"\n else:\n self._data = data\n return self._data", "async def send_api_cmd(self, command: str) -> dict or None:\n try:\n # open reader and writer streams to the api\n reader, writer = await asyncio.open_connection(self.ip, self.api_port)\n\n # write the command\n api_command = json.dumps({\"command\": command}).encode('utf-8')\n\n # send the command\n writer.write(api_command)\n await writer.drain()\n\n data = b\"\"\n\n # loop to receive all the data\n while True:\n d = await reader.read(4096)\n if not d:\n break\n data += d\n\n # load the data into a dict\n data = json.loads(data.decode('utf-8')[:-1])\n\n # close the connection\n writer.close()\n await writer.wait_closed()\n\n # return the data\n return data\n except:\n # if an error happens, return none\n return None", "def update(self, data: bytes):\n self.send(data)", "def dataReceived(self, data: bytes):\n\n if self.output:\n self.output.write(data) # redirect the message to the server", "def recvCommand(self):\n return", "def data_received(self, data):\n pass", "def dataReceived(self, data):\n try:\n try:\n if not hasattr(self, \"_data\"):\n self._data = data\n else:\n self._data += data\n\n self._checkDataLength(self._data)\n\n git_command = self._data[4:]\n if git_command[:15] == b\"git-upload-pack\":\n self._buildResponseAndSend(git_command.decode(\"utf-8\"))\n else:\n raise ProtocolError()\n\n except GitCommandLengthMismatch:\n pass\n\n except ProtocolError:\n self.transport.loseConnection()\n return", "def __send(self, cmd_val, data):\n # Proof the input\n if cmd_val not in command.values():\n raise ValueError(\"{}: the provided command value {} is not valid.\".format(self.sensor_name, cmd_val))\n if not isinstance(data, bytearray):\n raise TypeError(\"{}: command data must be of type byte array.\".format(self.sensor_name))\n\n # Initialise the command bytes array\n bytes_to_send = bytearray()\n bytes_to_send.append(self.__SerialStart)\n bytes_to_send.append(self.__SendByte)\n bytes_to_send.append(cmd_val)\n\n # Add data and set zero to the remainder\n for i in range(0, 12):\n if i < len(data):\n bytes_to_send.append(data[i])\n else:\n bytes_to_send.append(0)\n\n # Last two bytes before the checksum is the CommandTerminator\n # TODO : rename command terminator to sensor ID\n bytes_to_send.append(self.__CommandTerminator)\n bytes_to_send.append(self.__CommandTerminator)\n\n # Calculate and append the checksum\n checksum = self.__checksum_make(bytes_to_send)\n bytes_to_send.append(checksum % 256)\n\n # Append the terminator for serial message\n bytes_to_send.append(self.__SerialEnd)\n\n self.logger.info(\"{}: sending {} {} command with {} message.\".format(self.sensor_name, command_mode.keys()[command_mode.values().index(bytes_to_send[3])], command.keys()[command.values().index(cmd_val)], \":\".join(\"%02x\" % b for b in bytes_to_send)))\n\n if len(bytes_to_send) != self.__CommandLength:\n raise IOError(\"{}: sent {} bytes, expected {}.\".format(self.sensor_name, len(bytes_to_send), self.__CommandLength))\n\n # Send the command\n written_bytes = self.device.write(bytes_to_send)\n self.device.flush()\n\n if written_bytes != len(bytes_to_send):\n raise IOError(\"{}: not all bytes written.\".format(self.sensor_name))\n\n # Check the received values\n received = self.__response(cmd_val)\n\n if len(received) != self.__ResponseLength:\n raise IOError(\"{}: received {} bytes, expected {}.\".format(self.sensor_name, len(received), self.__ResponseLength))\n\n if len(received) == 0:\n raise IOError(\"{}: sensor is not responding.\".format(self.sensor_name))\n\n # When no command or command is request command,\n # second byte has to be ReceiveByte\n if (cmd_val is None or cmd_val == command[\"Request\"]) and received[1] != self.__ReceiveByte:\n raise ValueError(\"{}: expected to receive value {:#X} on a value request. Received: \\\"{}\\\".\".format(self.sensor_name, self.__ReceiveByte, received[1]))\n\n # Check, if the response is response of the command, except request command\n if cmd_val != command[\"Request\"]:\n if received[2] != cmd_val:\n raise ValueError(\"{}: sensor response does not belong to the command sent before.\".format(self.sensor_name))\n else:\n return received[3: -2]\n else:\n return received", "def exec_command(self, cmd, in_data=None, sudoable=True):\n super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)\n\n self._display.vvv(u\"EXEC {0}\".format(cmd), host=self._host)\n\n cmd_args_list = shlex.split(to_native(cmd, errors='surrogate_or_strict'))\n\n if getattr(self._shell, \"_IS_WINDOWS\", False):\n # Become method 'runas' is done in the wrapper that is executed,\n # need to disable sudoable so the bare_run is not waiting for a\n # prompt that will not occur\n sudoable = False\n\n # Generate powershell commands\n cmd_args_list = self._shell._encode_script(cmd, as_list=True, strict_mode=False, preserve_rc=False)\n\n # TODO(odyssey4me):\n # Implement buffering much like the other connection plugins\n # Implement 'env' for the environment settings\n # Implement 'input-data' for whatever it might be useful for\n request_exec = {\n 'execute': 'guest-exec',\n 'arguments': {\n 'path': cmd_args_list[0],\n 'capture-output': True,\n 'arg': cmd_args_list[1:]\n }\n }\n request_exec_json = json.dumps(request_exec)\n\n display.vvv(u\"GA send: {0}\".format(request_exec_json), host=self._host)\n\n # TODO(odyssey4me):\n # Add timeout parameter\n result_exec = json.loads(libvirt_qemu.qemuAgentCommand(self.domain, request_exec_json, 5, 0))\n\n display.vvv(u\"GA return: {0}\".format(result_exec), host=self._host)\n\n request_status = {\n 'execute': 'guest-exec-status',\n 'arguments': {\n 'pid': result_exec['return']['pid']\n }\n }\n request_status_json = json.dumps(request_status)\n\n display.vvv(u\"GA send: {0}\".format(request_status_json), host=self._host)\n\n # TODO(odyssey4me):\n # Work out a better way to wait until the command has exited\n result_status = json.loads(libvirt_qemu.qemuAgentCommand(self.domain, request_status_json, 5, 0))\n\n display.vvv(u\"GA return: {0}\".format(result_status), host=self._host)\n\n while not result_status['return']['exited']:\n result_status = json.loads(libvirt_qemu.qemuAgentCommand(self.domain, request_status_json, 5, 0))\n\n display.vvv(u\"GA return: {0}\".format(result_status), host=self._host)\n\n if result_status['return'].get('out-data'):\n stdout = base64.b64decode(result_status['return']['out-data'])\n else:\n stdout = b''\n\n if result_status['return'].get('err-data'):\n stderr = base64.b64decode(result_status['return']['err-data'])\n else:\n stderr = b''\n\n # Decode xml from windows\n if getattr(self._shell, \"_IS_WINDOWS\", False) and stdout.startswith(b\"#< CLIXML\"):\n stdout = _parse_clixml(stdout)\n\n display.vvv(u\"GA stdout: {0}\".format(to_text(stdout)), host=self._host)\n display.vvv(u\"GA stderr: {0}\".format(to_text(stderr)), host=self._host)\n\n return result_status['return']['exitcode'], stdout, stderr", "def execute(self, data, options):\n return escape_if_needed(\n self.case.execute(self.token.execute(data, {'escape': False})), options)", "def test_data(clear, data):\r\n cmd = ShdlcCmdGetErrorState(clear=clear)\r\n assert type(cmd.data) is bytes\r\n assert cmd.data == data", "def allDataReceivedForProtocol(self, protocol, data):\n protocol.dataReceived(data)\n protocol.connectionLost(None)", "def cb_exec_cmd(data, remaining_calls):\n # Process the entered command.\n data = list(data)\n del data[0]\n data = \"\".join(data)\n # s/foo/bar command.\n if data.startswith(\"s/\"):\n cmd = data\n parsed_cmd = next(csv.reader(StringIO(cmd), delimiter=\"/\",\n escapechar=\"\\\\\"))\n pattern = re.escape(parsed_cmd[1])\n repl = parsed_cmd[2]\n repl = re.sub(r\"([^\\\\])&\", r\"\\1\" + pattern, repl)\n flag = None\n if len(parsed_cmd) == 4:\n flag = parsed_cmd[3]\n count = 1\n if flag == \"g\":\n count = 0\n buf = weechat.current_buffer()\n input_line = weechat.buffer_get_string(buf, \"input\")\n input_line = re.sub(pattern, repl, input_line, count)\n weechat.buffer_set(buf, \"input\", input_line)\n # Shell command.\n elif data.startswith(\"!\"):\n weechat.command(\"\", \"/exec -buffer shell %s\" % data[1:])\n # Commands like `:22`. This should start cursor mode (``/cursor``) and take\n # us to the relevant line.\n # TODO: look into possible replacement key bindings for: ← ↑ → ↓ Q m q.\n elif data.isdigit():\n line_number = int(data)\n hdata_window = weechat.hdata_get(\"window\")\n window = weechat.current_window()\n x = weechat.hdata_integer(hdata_window, window, \"win_chat_x\")\n y = (weechat.hdata_integer(hdata_window, window, \"win_chat_y\") +\n (line_number - 1))\n weechat.command(\"\", \"/cursor go {},{}\".format(x, y))\n # Check againt defined commands.\n else:\n data = data.split(\" \", 1)\n cmd = data[0]\n args = \"\"\n if len(data) == 2:\n args = data[1]\n if cmd in VI_COMMANDS:\n weechat.command(\"\", \"%s %s\" % (VI_COMMANDS[cmd], args))\n # No vi commands defined, run the command as a WeeChat command.\n else:\n weechat.command(\"\", \"/{} {}\".format(cmd, args))\n return weechat.WEECHAT_RC_OK", "def do_command(command):\n send_command(command)\n response = get_response()\n print(\"Rcvd: <<< \\n\" + response)\n return response", "def read_vsys_data_direct(command):\n global _vs_vsys\n if _vs_vsys is None:\n try:\n _vs_vsys = VsysFrontend(_VSYS_FRONTEND_TARGET)\n _vs_vsys.open()\n except VsysException as err:\n collectd.error('Failed to setup VsysFrontend: %s' % err)\n return {}\n\n try:\n raw_data = _vs_vsys.sendrecv(command)\n except VsysException as err:\n collectd.error('Failed to receive message: %s' % err)\n _vs_vsys.close()\n _vs_vsys = None # Will be re-opened on next call.\n return {}\n\n try:\n data = json.loads(raw_data)\n except ValueError as err:\n collectd.error('Failed to load json from raw data: -%s-' % raw_data)\n collectd.error(str(err))\n return {}\n\n return data", "def request(self, command_code, data):\n name, request_func, response_func = afpcommands.commands[command_code]\n return request_func(data)", "def transfer_data(self):\n pass", "def setData(self,data):\n self.data = struct.pack(\"!I\",data)", "def setData(self, data):\n self.data = struct.pack(\"!I\",data)", "def execute_command(self, command):\n raise NotImplementedError", "def sendData(self, program_counter, data_type, data):\n pc_size = len(program_counter)\n data_size = len(data)\n fmt = \"!HHB%dI%ds\" % (pc_size, data_size)\n t = (pc_size, data_size, data_type) + program_counter + (data,)\n packet = struct.pack(fmt, *t)\n self.sendString(packet)\n self.sent_packets += 1\n self.sent_bytes += len(packet)", "def sendDataMessage(iTag, clsName, msgID, msg): #@NoSelf", "def package_data(self, data):\n pass", "def send_command(self, command):\n send_message = \"\"\n for i in command:\n send_message += chr(i)\n #send_message += bytes(i)\n\n for data in send_message:\n self.pymata.transport.write(data)", "def send(self, data: bytes) -> int:\n ...", "def setData(self,data):\n self.data = struct.pack(\"!d\",data)", "def transmit_command(command, socket, guit):\n if command == \"get_map_update\":\n send_data(socket, \"SEND_MAP\")\n ack = receive_data(socket)\n robot_map_data = json.loads(receive_data(socket)) # [[robot_x, robot_y], [map..]\n guit.receive_command([\"update_map\", robot_map_data[0], robot_map_data[1]])\n elif command == \"sync_mode\":\n send_data(socket, \"SYNC_MODE\") #0 is autonomous, 1 is manual\n ack = receive_data(socket)\n current_mode_integer = receive_data(socket)\n guit.receive_command([\"update_mode\", current_mode_integer]) \n elif len(command) > 3 and command[:4] == \"key_\": #Fulhack that will save us many rows.\n send_data(socket, \"KEY_EVENT\")\n ack = receive_data(socket)\n send_data(socket, command[4:])\n elif len(command) > 4 and command [:5] == \"mode_\":\n send_data(socket, \"TOGGLE_MODE\")\n ack = receive_data(socket)\n send_data(socket, command[5:])\n elif command == \"get_motor_data\":\n send_data(socket, \"FORWARD_MOTOR_INFO\")\n ack = receive_data(socket)\n motor_data = json.loads(receive_data(socket))\n dir_mod_left = 1 if motor_data[\"LEFT_SIDE_DIRECTION\"] else -1\n dir_mod_right = 1 if motor_data[\"RIGHT_SIDE_DIRECTION\"] else -1\n speed_left = motor_data[\"LEFT_SIDE_SPEED\"]*dir_mod_left\n speed_right = motor_data[\"RIGHT_SIDE_SPEED\"]*dir_mod_right\n guit.receive_command([\"set_motors\", speed_left, speed_right])\n guit.receive_command([\"set_servo\", motor_data[\"SERVO_ANGLE\"]])\n elif command == \"get_sensor_data\":\n send_data(socket, \"FORWARD_SENSOR_INFO\")\n ack = receive_data(socket)\n sensor_data = json.loads(receive_data(socket))\n guit.receive_command([\"set_sensors\", sensor_data])", "def run_command(self, command):\n # Put the command in a nice byte-encoded variable\n full_command = command.encode('ascii') + b'\\n'\n # Write out the command to telnet\n self.tn.write(full_command)\n # Get the command output, decode it, and split out the junk\n command_output = self.tn.read_until(b'> ').decode('ascii').split('\\r\\n')[:-1]\n # Raise command error if VLC does not recognize the command.\n if command_output != []:\n command_error = re.match(r\"Error in.*\", command_output[0])\n if re.match(\"Unknown command `.*'\\. Type `help' for help\\.\", command_output[0]):\n raise CommandError(\"Unkown Command\")\n elif command_error:\n raise LuaError(command_error.group())\n # Return the split output of the command\n return command_output", "def received_command(self, cmd: Data, source: tuple, destination: tuple) -> bool:\n raise NotImplemented", "def runSerial(self, op, data):\n return self.testMode(self._runSerial, op, data)", "def send_data(data_string):\n if connection_type == USE_I2C:\n cmd = \"\"\n cmd += chr( SSD1306_ADDRESS )\n cmd += chr( SELECT_DATA_BYTE )\n cmd += data_string\n i2cWrite(cmd, 10, False)\n else:\n print \"Not implemented for that connection type yet.\"", "def command(self,addr,cmd):\n if isinstance(cmd,int):\n cmd = struct.pack('!L',cmd)\n res = self._send(bytes([0xef,0xfe,0x05,addr<<1])+cmd)\n if res:\n self.wrcache[addr] = cmd", "def handle_dataevent(bot, event):\n event.reply(event.tojson())", "def handle(self, data):\n pass", "def __init__(self, header: CmdHeader, raw_data: bytes) -> None:\n super().__init__(header, raw_data)\n self.status, self.length, *self.values = unpack_from(f'<{self.header.params_count}I', raw_data)\n self.data = raw_data[8:8 + self.length] if self.length > 0 else b''", "async def data_received(self, data: bytes):\n logging.info('received: %s' % data.decode())", "def push_data(self, data):\n self.incoming.write(data)", "def send_bytes(self, data: bytes) -> None:", "def simulate_reply(self, data):\n self._data = data[:]" ]
[ "0.71024805", "0.65037835", "0.63819116", "0.636673", "0.63638586", "0.63270706", "0.62600714", "0.62551844", "0.62551564", "0.62109536", "0.6167672", "0.6122832", "0.61040425", "0.60816944", "0.60466945", "0.60187083", "0.60072166", "0.59970105", "0.5980538", "0.59644943", "0.5964164", "0.59414417", "0.59411055", "0.59286344", "0.5925875", "0.5925522", "0.59239167", "0.59222513", "0.59150076", "0.5910096", "0.58773255", "0.58544576", "0.58501744", "0.5844513", "0.5787662", "0.5783724", "0.5773337", "0.57609767", "0.5733386", "0.5730797", "0.57191914", "0.5712854", "0.57128197", "0.56946146", "0.569181", "0.5672716", "0.56704575", "0.5654422", "0.5654387", "0.56531864", "0.5652927", "0.5639178", "0.56376594", "0.56210494", "0.56179523", "0.56179523", "0.56178993", "0.56150734", "0.56150734", "0.5601096", "0.55909675", "0.55909675", "0.5590585", "0.5588175", "0.55864775", "0.5583373", "0.55766815", "0.55724096", "0.5567413", "0.5563956", "0.55584955", "0.5555762", "0.554817", "0.5541742", "0.5537285", "0.55345523", "0.5532884", "0.55248463", "0.55227786", "0.5519075", "0.5515678", "0.55142945", "0.55133784", "0.551157", "0.54916984", "0.5475282", "0.5470563", "0.54588777", "0.54583", "0.5443958", "0.5443337", "0.5437554", "0.5426533", "0.5422843", "0.5421818", "0.5418454", "0.54148567", "0.540987", "0.5401496", "0.5398286", "0.53918105" ]
0.0
-1
The `starttls` command is a special case it accesses protocol directly, rather than using `execute_command`.
async def test_disconnected_server_raises_on_starttls(preset_client): await preset_client.connect() preset_client.server.responses.append( b"\n".join([b"250-localhost, hello", b"250-SIZE 100000", b"250 STARTTLS"]) ) await preset_client.ehlo() preset_client.server.responses.append(b"220 begin TLS pls") preset_client.server.drop_connection_event.set() with pytest.raises(SMTPServerDisconnected): await preset_client.starttls(validate_certs=False) # Verify that the connection was closed assert not preset_client._connect_lock.locked() assert preset_client.protocol is None assert preset_client.transport is None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __starttls(self, keyfile=None, certfile=None):\n if not self.has_tls_support():\n raise Error(\"STARTTLS not supported by the server\")\n code, data = self.__send_command(\"STARTTLS\")\n if code != \"OK\":\n return False\n try:\n nsock = ssl.wrap_socket(self.sock, keyfile, certfile)\n except ssl.SSLError as e:\n raise Error(\"SSL error: %s\" % str(e))\n self.sock = nsock\n self.__capabilities = {}\n self.__get_capabilities()\n return True", "def starttls(self,\r\n username=None, password=None,\r\n certChain=None, privateKey=None,\r\n checker=None,\r\n settings=None):\r\n (resp, reply) = self.docmd(\"STARTTLS\")\r\n if resp == 220:\r\n helper = ClientHelper(\r\n username, password, \r\n certChain, privateKey,\r\n checker,\r\n settings)\r\n conn = TLSConnection(self.sock)\r\n helper._handshake(conn)\r\n self.sock = conn\r\n self.file = conn.makefile('rb')\r\n return (resp, reply)", "def starttls(self) -> pulumi.Output[bool]:\n return pulumi.get(self, \"starttls\")", "def start_tls(self, start_tls):\n\n self._start_tls = start_tls", "def func_starttls(self, data):\n check = bytes(data).decode().encode('ascii', 'ignore').decode().lower().rstrip()\n if check == 'starttls':\n self.func_denied(self.conf_th_ic.get_item(q_key='err-messages').get(check))\n return True", "def starttls(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"starttls\")", "def starttls(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"starttls\")", "def test_startTLS(self):\n disconnected = self.startTLSAndAssertSession()\n self.connected.addCallback(self._cbStopClient)\n self.connected.addErrback(self._ebGeneral)\n return disconnected", "def test_tls_in_tls_tunnel(self) -> None:\n self.start_destination_server()\n self.start_proxy_server()\n\n sock = socket.create_connection(\n (self.proxy_server.host, self.proxy_server.port)\n )\n with self.client_context.wrap_socket(\n sock, server_hostname=\"localhost\"\n ) as proxy_sock:\n with SSLTransport(\n proxy_sock, self.client_context, server_hostname=\"localhost\"\n ) as destination_sock:\n assert destination_sock.version() is not None\n destination_sock.send(sample_request())\n response = consume_socket(destination_sock)\n validate_response(response)", "def starttls(socket, success=None, failure=None, io=None, **options):\n\n ## Default Options\n\n options.setdefault('do_handshake_on_connect', False)\n options.setdefault('ssl_version', ssl.PROTOCOL_TLSv1)\n\n ## Handlers\n\n def done():\n \"\"\"Handshake finished successfully.\"\"\"\n\n io.remove_handler(wrapped.fileno())\n success and success(wrapped)\n\n def error():\n \"\"\"The handshake failed.\"\"\"\n\n if failure:\n return failure(wrapped)\n ## By default, just close the socket.\n io.remove_handler(wrapped.fileno())\n wrapped.close()\n\n def handshake(fd, events):\n \"\"\"Handler for SSL handshake negotiation. See Python docs for\n ssl.do_handshake().\"\"\"\n\n if events & io.ERROR:\n error()\n return\n\n try:\n new_state = io.ERROR\n wrapped.do_handshake()\n return done()\n except ssl.SSLError as exc:\n if exc.args[0] == ssl.SSL_ERROR_WANT_READ:\n new_state |= io.READ\n elif exc.args[0] == ssl.SSL_ERROR_WANT_WRITE:\n new_state |= io.WRITE\n else:\n raise\n\n if new_state != state[0]:\n state[0] = new_state\n io.update_handler(fd, new_state)\n\n ## set up handshake state; use a list as a mutable cell.\n io = io or ioloop.IOLoop.instance()\n state = [io.ERROR]\n\n ## Wrap the socket; swap out handlers.\n io.remove_handler(socket.fileno())\n wrapped = SSLSocket(socket, **options)\n wrapped.setblocking(0)\n io.add_handler(wrapped.fileno(), handshake, state[0])\n\n ## Begin the handshake.\n handshake(wrapped.fileno(), 0)\n return wrapped", "def test_doubleSTARTTLS(self):\n\n class DoubleSTARTTLSClient(SimpleClient):\n\n def startTLS(self):\n if not self.startedTLS:\n return SimpleClient.startTLS(self)\n\n return self.sendCommand(imap4.Command(b\"STARTTLS\"))\n\n self.client = DoubleSTARTTLSClient(self.connected,\n contextFactory=self.clientCTX)\n\n disconnected = self.startTLSAndAssertSession()\n\n self.connected.addCallback(strip(self.client.startTLS))\n self.connected.addErrback(self.assertClientFailureMessage, b\"TLS already negotiated\")\n\n self.connected.addCallback(self._cbStopClient)\n self.connected.addErrback(self._ebGeneral)\n\n return disconnected", "def do(self):\n call_command('activate-ssl')", "def tlslite_connect(host, username, password, port = DEFAULT_SERVER_PORT):\n return factory.tlslite_connect(host, port, username, password, SlaveService)", "def has_tls_support(self):\n return \"STARTTLS\" in self.__capabilities", "def startTLSAndAssertSession(self):\n success = []\n self.connected.addCallback(strip(self.client.startTLS))\n def checkSecure(ignored):\n self.assertTrue(\n interfaces.ISSLTransport.providedBy(self.client.transport))\n self.connected.addCallback(checkSecure)\n self.connected.addCallback(success.append)\n\n d = self.loopback()\n d.addCallback(lambda x : self.assertTrue(success))\n return defer.gatherResults([d, self.connected])", "def test_get_tls(matrix):\n matrix.charm_config[\"enable-tls\"] = True\n result = matrix.get_tls()\n assert result is True\n matrix.charm_config[\"enable-tls\"] = False\n result = matrix.get_tls()\n assert result is False", "def _config_tls(self):\n pass", "def ssl_connect(host, port = DEFAULT_SERVER_SSL_PORT, keyfile = None, \n certfile = None, ca_certs = None, ssl_version = None):\n return factory.ssl_connect(host, port, keyfile = keyfile, certfile = certfile,\n ssl_version = ssl_version, ca_certs = ca_certs, service = SlaveService)", "def _set_tls_capabilities(self, caps):\n if self.settings.get(\"ssl-mode\") == SSLMode.DISABLED:\n return\n\n if self.stream.is_socket():\n if self.settings.get(\"ssl-mode\"):\n _LOGGER.warning(\"SSL not required when using Unix socket.\")\n return\n\n if \"tls\" not in caps:\n self.close_connection()\n raise OperationalError(\"SSL not enabled at server\")\n\n is_ol7 = False\n if platform.system() == \"Linux\":\n distname, version, _ = linux_distribution()\n try:\n is_ol7 = \"Oracle Linux\" in distname and version.split(\".\")[0] == \"7\"\n except IndexError:\n is_ol7 = False\n\n if sys.version_info < (2, 7, 9) and not is_ol7:\n self.close_connection()\n raise RuntimeError(\n \"The support for SSL is not available for this Python version\"\n )\n\n self.protocol.set_capabilities(tls=True)\n self.stream.set_ssl(\n self.settings.get(\"tls-versions\", None),\n self.settings.get(\"ssl-mode\", SSLMode.REQUIRED),\n self.settings.get(\"ssl-ca\"),\n self.settings.get(\"ssl-crl\"),\n self.settings.get(\"ssl-cert\"),\n self.settings.get(\"ssl-key\"),\n self.settings.get(\"tls-ciphersuites\"),\n )\n if \"attributes\" in self.settings:\n conn_attrs = self.settings[\"attributes\"]\n self.protocol.set_capabilities(session_connect_attrs=conn_attrs)", "def ClientApp(stream):\n\n def secured():\n print 'client secured!'\n write('QUIT')\n\n def read(line):\n line = line.strip()\n print 'server said: %r.' % line\n if line == \"YOU SAID: 'hello'\":\n write('STARTTLS')\n elif line == 'PROCEED':\n stream.starttls(secured)\n elif line == 'GOODBYE':\n stream.close()\n else:\n wait()\n\n def write(data):\n stream.write('%s\\n' % data)\n wait()\n\n def wait():\n stream.read_until('\\n', read)\n\n ## Begin\n print 'starting client: %r' % stream.socket.fileno()\n write('hello')", "def force_communicate_tls(rest: ClusterManager) -> bool:\n settings, err = rest.get_security_settings()\n _exit_if_errors(err)\n\n # The cluster isn't using 'strict' cluster encryption, we shouldn't need to force enable TLS\n if 'clusterEncryptionLevel' not in settings or settings['clusterEncryptionLevel'] != 'strict':\n return False\n\n # The user might not have used a 'https://' scheme prefix, so communicating to other nodes via the secure ports may\n # lead to interesting/surprising errors; let them know beforehand.\n _warning(\"sub-command requires multi-node communication via TLS enabled ports, '--cacert' or \"\n \"'--no-ssl-verify' may need to be supplied\")\n\n return True", "def test_startTLSWithExistingChallengers(self):\n self.server.challengers = {b\"LOGIN\": imap4.LOGINCredentials,\n b\"PLAIN\": imap4.PLAINCredentials}\n\n @defer.inlineCallbacks\n def assertLOGINandPLAIN():\n capabilities = yield self.client.getCapabilities()\n self.assertIn(b\"AUTH\", capabilities)\n self.assertIn(b\"LOGIN\", capabilities[b\"AUTH\"])\n self.assertIn(b\"PLAIN\", capabilities[b\"AUTH\"])\n\n self.connected.addCallback(strip(assertLOGINandPLAIN))\n\n disconnected = self.startTLSAndAssertSession()\n\n self.connected.addCallback(strip(assertLOGINandPLAIN))\n\n self.connected.addCallback(self._cbStopClient)\n self.connected.addErrback(self._ebGeneral)\n\n return disconnected", "def test_loginBeforeSTARTTLS(self):\n # Prevent the client from issuing STARTTLS.\n self.client.startTLS = lambda: defer.succeed(\n ([], 'OK Begin TLS negotiation now')\n )\n self.connected.addCallback(\n lambda _: self.client.login(b\"wrong\", b\"time\"),\n )\n\n self.connected.addErrback(\n self.assertClientFailureMessage,\n b\"LOGIN is disabled before STARTTLS\",\n )\n\n self.connected.addCallback(self._cbStopClient)\n self.connected.addErrback(self._ebGeneral)\n\n return defer.gatherResults([self.loopback(), self.connected])", "def credential_server_spawn( old_exit_status ):\n \n setproctitle.setproctitle( \"syndicate-credential-server\" )\n \n private_key = syndicate_storage.read_private_key( CONFIG.SYNDICATE_PRIVATE_KEY )\n if private_key is None:\n # exit code 255 will be ignored...\n logger.error(\"Cannot load private key. Exiting...\")\n sys.exit(255)\n \n logger.info(\"Starting Syndicate Observer credential server on port %s\" % CONFIG.SYNDICATE_HTTP_PORT)\n \n srv = CredentialServer( private_key.exportKey(), CONFIG.SYNDICATE_OPENCLOUD_SECRET, ('', CONFIG.SYNDICATE_HTTP_PORT), CredentialServerHandler)\n srv.serve_forever()", "def send_command(command, timeout_time = set_err_codes.tcs_coms_timeout):\n\t\n\ttry:\n\t\t#Send the command to the TCS\t\n\t\toutput = subprocess.run(['ssh','wasp@tcs', command],\n\t\t\t\tcapture_output=True, timeout=timeout_time)\n\texcept subprocess.TimeoutExpired:\n\t\tlogger.critical('Failed to contact TCS')\n\telse:\n\t\tresponse = output.stdout\n\t\n\t#get rid of repeated command\n\tresponse = response.decode('utf-8')\n\tlogger.info('FROM TCS: '+response)\n\treturn response", "def test_ssl_adapters(\n http_request_timeout,\n tls_http_server, adapter_type,\n tls_certificate,\n tls_certificate_chain_pem_path,\n tls_certificate_private_key_pem_path,\n tls_ca_certificate_pem_path,\n):\n interface, _host, port = _get_conn_data(ANY_INTERFACE_IPV4)\n tls_adapter_cls = get_ssl_adapter_class(name=adapter_type)\n tls_adapter = tls_adapter_cls(\n tls_certificate_chain_pem_path, tls_certificate_private_key_pem_path,\n )\n if adapter_type == 'pyopenssl':\n tls_adapter.context = tls_adapter.get_context()\n\n tls_certificate.configure_cert(tls_adapter.context)\n\n tlshttpserver = tls_http_server((interface, port), tls_adapter)\n\n # testclient = get_server_client(tlshttpserver)\n # testclient.get('/')\n\n interface, _host, port = _get_conn_data(\n tlshttpserver.bind_addr,\n )\n\n resp = requests.get(\n 'https://{host!s}:{port!s}/'.format(host=interface, port=port),\n timeout=http_request_timeout,\n verify=tls_ca_certificate_pem_path,\n )\n\n assert resp.status_code == 200\n assert resp.text == 'Hello world!'", "def test_enable_tls_and_plaintext(cassandra_service, dcos_ca_bundle):\n update_service_transport_encryption(\n cassandra_service, enabled=True, allow_plaintext=True)\n verify_client_can_write_read_and_delete(dcos_ca_bundle)", "def test_tls_client_auth( # noqa: C901, WPS213 # FIXME\n # FIXME: remove twisted logic, separate tests\n http_request_timeout,\n mocker,\n tls_http_server, adapter_type,\n ca,\n tls_certificate,\n tls_certificate_chain_pem_path,\n tls_certificate_private_key_pem_path,\n tls_ca_certificate_pem_path,\n is_trusted_cert, tls_client_identity,\n tls_verify_mode,\n):\n test_cert_rejection = (\n tls_verify_mode != ssl.CERT_NONE\n and not is_trusted_cert\n )\n interface, _host, port = _get_conn_data(ANY_INTERFACE_IPV4)\n\n client_cert_root_ca = ca if is_trusted_cert else trustme.CA()\n with mocker.mock_module.patch(\n 'idna.core.ulabel',\n return_value=ntob(tls_client_identity),\n ):\n client_cert = client_cert_root_ca.issue_cert(\n ntou(tls_client_identity),\n )\n del client_cert_root_ca\n\n with client_cert.private_key_and_cert_chain_pem.tempfile() as cl_pem:\n tls_adapter_cls = get_ssl_adapter_class(name=adapter_type)\n tls_adapter = tls_adapter_cls(\n tls_certificate_chain_pem_path,\n tls_certificate_private_key_pem_path,\n )\n if adapter_type == 'pyopenssl':\n tls_adapter.context = tls_adapter.get_context()\n tls_adapter.context.set_verify(\n _stdlib_to_openssl_verify[tls_verify_mode],\n lambda conn, cert, errno, depth, preverify_ok: preverify_ok,\n )\n else:\n tls_adapter.context.verify_mode = tls_verify_mode\n\n ca.configure_trust(tls_adapter.context)\n tls_certificate.configure_cert(tls_adapter.context)\n\n tlshttpserver = tls_http_server((interface, port), tls_adapter)\n\n interface, _host, port = _get_conn_data(tlshttpserver.bind_addr)\n\n make_https_request = functools.partial(\n requests.get,\n 'https://{host!s}:{port!s}/'.format(host=interface, port=port),\n\n # Don't wait for the first byte forever:\n timeout=http_request_timeout,\n\n # Server TLS certificate verification:\n verify=tls_ca_certificate_pem_path,\n\n # Client TLS certificate verification:\n cert=cl_pem,\n )\n\n if not test_cert_rejection:\n resp = make_https_request()\n is_req_successful = resp.status_code == 200\n if (\n not is_req_successful\n and IS_PYOPENSSL_SSL_VERSION_1_0\n and adapter_type == 'builtin'\n and tls_verify_mode == ssl.CERT_REQUIRED\n and tls_client_identity == 'localhost'\n and is_trusted_cert\n ):\n pytest.xfail(\n 'OpenSSL 1.0 has problems with verifying client certs',\n )\n assert is_req_successful\n assert resp.text == 'Hello world!'\n resp.close()\n return\n\n # xfail some flaky tests\n # https://github.com/cherrypy/cheroot/issues/237\n issue_237 = (\n IS_MACOS\n and adapter_type == 'builtin'\n and tls_verify_mode != ssl.CERT_NONE\n )\n if issue_237:\n pytest.xfail('Test sometimes fails')\n\n expected_ssl_errors = requests.exceptions.SSLError,\n if IS_WINDOWS or IS_GITHUB_ACTIONS_WORKFLOW:\n expected_ssl_errors += requests.exceptions.ConnectionError,\n with pytest.raises(expected_ssl_errors) as ssl_err:\n make_https_request().close()\n\n try:\n err_text = ssl_err.value.args[0].reason.args[0].args[0]\n except AttributeError:\n if IS_WINDOWS or IS_GITHUB_ACTIONS_WORKFLOW:\n err_text = str(ssl_err.value)\n else:\n raise\n\n if isinstance(err_text, int):\n err_text = str(ssl_err.value)\n\n expected_substrings = (\n 'sslv3 alert bad certificate' if IS_LIBRESSL_BACKEND\n else 'tlsv1 alert unknown ca',\n )\n if IS_MACOS and IS_PYPY and adapter_type == 'pyopenssl':\n expected_substrings = ('tlsv1 alert unknown ca',)\n if (\n tls_verify_mode in (\n ssl.CERT_REQUIRED,\n ssl.CERT_OPTIONAL,\n )\n and not is_trusted_cert\n and tls_client_identity == 'localhost'\n ):\n expected_substrings += (\n 'bad handshake: '\n \"SysCallError(10054, 'WSAECONNRESET')\",\n \"('Connection aborted.', \"\n 'OSError(\"(10054, \\'WSAECONNRESET\\')\"))',\n \"('Connection aborted.', \"\n 'OSError(\"(10054, \\'WSAECONNRESET\\')\",))',\n \"('Connection aborted.', \"\n 'error(\"(10054, \\'WSAECONNRESET\\')\",))',\n \"('Connection aborted.', \"\n 'ConnectionResetError(10054, '\n \"'An existing connection was forcibly closed \"\n \"by the remote host', None, 10054, None))\",\n \"('Connection aborted.', \"\n 'error(10054, '\n \"'An existing connection was forcibly closed \"\n \"by the remote host'))\",\n ) if IS_WINDOWS else (\n \"('Connection aborted.', \"\n 'OSError(\"(104, \\'ECONNRESET\\')\"))',\n \"('Connection aborted.', \"\n 'OSError(\"(104, \\'ECONNRESET\\')\",))',\n \"('Connection aborted.', \"\n 'error(\"(104, \\'ECONNRESET\\')\",))',\n \"('Connection aborted.', \"\n \"ConnectionResetError(104, 'Connection reset by peer'))\",\n \"('Connection aborted.', \"\n \"error(104, 'Connection reset by peer'))\",\n ) if (\n IS_GITHUB_ACTIONS_WORKFLOW\n and IS_LINUX\n ) else (\n \"('Connection aborted.', \"\n \"BrokenPipeError(32, 'Broken pipe'))\",\n )\n\n if PY310_PLUS:\n # FIXME: Figure out what's happening and correct the problem\n expected_substrings += (\n 'SSLError(SSLEOFError(8, '\n \"'EOF occurred in violation of protocol (_ssl.c:\",\n )\n if IS_GITHUB_ACTIONS_WORKFLOW and IS_WINDOWS and PY310_PLUS:\n expected_substrings += (\n \"('Connection aborted.', \"\n 'RemoteDisconnected('\n \"'Remote end closed connection without response'))\",\n )\n\n assert any(e in err_text for e in expected_substrings)", "def pasv_cmd(self):\n print_debug(\"Executing PASV\")\n command = \"PASV\\r\\n\"\n msg_rec = self.send_and_log(self.s, command)\n print_debug(msg_rec)\n # PASV creates a new connection from client to server.\n sock = new_socket()\n pasv_ip, pasv_port = self.parse_pasv_resp(msg_rec)\n self.pasv_connection(sock, pasv_ip, pasv_port)\n return msg_rec, sock", "def ServerApp(stream, certfile, keyfile):\n\n def secured():\n print 'server secured!'\n wait()\n\n def read(line):\n line = line.strip()\n print 'client said: %r.' % line\n\n if line == 'QUIT':\n write('GOODBYE')\n elif line == 'STARTTLS':\n stream.write('PROCEED\\n')\n stream.starttls(\n secured,\n server_side=True,\n certfile=certfile,\n keyfile=keyfile\n )\n else:\n write('YOU SAID: %r' % line)\n\n def write(data):\n stream.write('%s\\n' % data)\n wait()\n\n def wait():\n stream.read_until('\\n', read)\n\n ## Begin\n print 'starting server: %r' % stream.socket.fileno()\n wait()", "def tls(self) -> pulumi.Input[bool]:\n return pulumi.get(self, \"tls\")", "def proc_exec_async(cmd):\n\n envoy.connect(cmd)\n return None", "def secure_connection(HOST,PORT,PROTOCOL=\"smtp\",TLSSTRENGTH=\"tls1_2\"):\n\n print(\"Connecting to host: {} on Port Number {} using an IMPLICITY SECURE Connection \\r\\n\".format(HOST,PORT))\n\n context = create_tls_context(TLSSTRENGTH)\n secure_client = context.wrap_socket(socket.socket(socket.AF_INET),server_hostname=HOST)\n secure_client.settimeout(SOCKET_TIMEOUT)\n\n try:\n secure_client.connect((HOST,PORT))\n # SMTP NEEDS A EHLO MESSAGE BEFORE OTHER COMMANDS\n # IMAP AND POP DO NOT\n data = secure_client.recv(1024)\n if PROTOCOL==\"smtp\":\n secure_client.send(SMTP_EHLO)\n data = secure_client.recv(1024)\n #print('SMTP EHLO RESPONSE: ', repr(data))\n print_cipher_certificate(secure_client)\n decide_protocol_handler(secure_client,PROTOCOL)\n\n except Exception as e:\n print(\"Connection Could Not Be Established \\r\\n\")\n print(e)", "def setServerHandshakeOp(self, **args):\r\n handshaker = self.tlsConnection.handshakeServerAsync(**args)\r\n self.setHandshakeOp(handshaker)", "def test_tls_in_tls_recv_into_sendall(self) -> None:\n self.start_destination_server()\n self.start_proxy_server()\n\n sock = socket.create_connection(\n (self.proxy_server.host, self.proxy_server.port)\n )\n with self.client_context.wrap_socket(\n sock, server_hostname=\"localhost\"\n ) as proxy_sock:\n with SSLTransport(\n proxy_sock, self.client_context, server_hostname=\"localhost\"\n ) as destination_sock:\n destination_sock.sendall(sample_request())\n response = bytearray(65536)\n destination_sock.recv_into(response)\n str_response = response.decode(\"utf-8\").rstrip(\"\\x00\")\n validate_response(str_response, binary=False)", "async def _connect(self, subsystem=None, exec_command=None):\n ip, port, user, passwd = await self.dest_info()\n self._extra_info[\"peer\"] = PeerInfo(ip, port)\n\n if self._devinfo.proxy_required(ip):\n host = self.service.get_http_proxy_url(ip)\n elif self._devinfo.should_nat(ip):\n host = await self._devinfo.translate_address(ip)\n else:\n host = ip\n\n self.logger.info(\"Connecting to: %s: %d\", host, port)\n\n # known_hosts is set to None to disable the host verifications. Without\n # this the connection setup fails for some devices\n conn, _ = await asyncssh.create_connection(\n self._client_factory,\n host=host,\n port=port,\n username=user,\n password=passwd,\n client_keys=None,\n known_hosts=None,\n )\n\n chan, cmd_stream = await self._conn.create_session(\n lambda: CommandStream(self, self._loop),\n encoding=None,\n term_type=self.TERM_TYPE,\n subsystem=subsystem,\n command=exec_command,\n )\n self._chan = chan\n return cmd_stream", "def start(self):\n self.protocol.makeConnection(self.transport)", "def start(self):\n self.protocol.makeConnection(self.transport)", "def make_tls_http_server(bind_addr, ssl_adapter, request):\n httpserver = HTTPServer(\n bind_addr=bind_addr,\n gateway=HelloWorldGateway,\n )\n # httpserver.gateway = HelloWorldGateway\n httpserver.ssl_adapter = ssl_adapter\n\n threading.Thread(target=httpserver.safe_start).start()\n\n while not httpserver.ready:\n time.sleep(0.1)\n\n request.addfinalizer(httpserver.stop)\n\n return httpserver", "def testEstablishWebRTCSshTunnel(self):\n fake_ip_addr = \"1.1.1.1\"\n fake_rsa_key_file = \"/tmp/rsa_file\"\n ssh_user = \"fake_user\"\n self.Patch(utils, \"ReleasePort\")\n self.Patch(utils, \"_ExecuteCommand\")\n self.Patch(subprocess, \"check_call\", return_value=True)\n extra_args_ssh_tunnel = \"-o command='shell %s %h' -o command1='ls -la'\"\n utils.EstablishWebRTCSshTunnel(\n ip_addr=fake_ip_addr, rsa_key_file=fake_rsa_key_file,\n ssh_user=ssh_user, extra_args_ssh_tunnel=None)\n args_list = [\"-i\", \"/tmp/rsa_file\",\n \"-o\", \"UserKnownHostsFile=/dev/null\",\n \"-o\", \"StrictHostKeyChecking=no\",\n \"-L\", \"8443:127.0.0.1:8443\",\n \"-L\", \"15550:127.0.0.1:15550\",\n \"-L\", \"15551:127.0.0.1:15551\",\n \"-N\", \"-f\", \"-l\", \"fake_user\", \"1.1.1.1\"]\n first_call_args = utils._ExecuteCommand.call_args_list[0][0]\n self.assertEqual(first_call_args[1], args_list)\n\n extra_args_ssh_tunnel = \"-o command='shell %s %h'\"\n utils.EstablishWebRTCSshTunnel(\n ip_addr=fake_ip_addr, rsa_key_file=fake_rsa_key_file,\n ssh_user=ssh_user, extra_args_ssh_tunnel=extra_args_ssh_tunnel)\n args_list_with_extra_args = [\"-i\", \"/tmp/rsa_file\",\n \"-o\", \"UserKnownHostsFile=/dev/null\",\n \"-o\", \"StrictHostKeyChecking=no\",\n \"-L\", \"8443:127.0.0.1:8443\",\n \"-L\", \"15550:127.0.0.1:15550\",\n \"-L\", \"15551:127.0.0.1:15551\",\n \"-N\", \"-f\", \"-l\", \"fake_user\", \"1.1.1.1\",\n \"-o\", \"command=shell %s %h\"]\n first_call_args = utils._ExecuteCommand.call_args_list[1][0]\n self.assertEqual(first_call_args[1], args_list_with_extra_args)", "def test_non_ssl_ports_after_enabling_tls(self):\n self.enable_tls_encryption_cli_on_nodes(nodes=[self.cluster.master])\n CbServer.use_https = True\n rest = RestConnection(self.cluster.master)\n for non_ssl_request in self.sample_urls_map.keys():\n api = non_ssl_request % self.cluster.master.ip\n try:\n rest._http_request(api=api, timeout=10)\n except Exception as _:\n ssl_request = self.sample_urls_map[non_ssl_request]\n api = ssl_request % self.cluster.master.ip\n status, content, response = rest._http_request(api=api, timeout=10)\n if not status:\n self.fail(\"{0} failed\".format(api))\n else:\n self.log.error(\"{0} worked\".format(api))\n\n self.disable_n2n_encryption_cli_on_nodes(nodes=[self.cluster.master])\n CbServer.use_https = False\n rest = RestConnection(self.cluster.master)\n for non_ssl_request in self.sample_urls_map.keys():\n api = non_ssl_request % self.cluster.master.ip\n status, content, response = rest._http_request(api=api, timeout=10)\n if not status:\n self.fail(\"{0} api failed with content {1}\".format(api, content))", "def do_CONNECT(self):\n self.log.debug('do_CONNECT called')\n pre_ssl_request = ssl_proxy_log.Request(self.path, 'CONNECT', self.headers,\n self.log, self.http_redirect_table, self.ssl_redirect_table)\n\n spoof = self.SSLSpoofCheck(self.path)\n\n if not spoof:\n request_log = self.BypassSSL(pre_ssl_request, self.connection)\n\n if not request_log:\n return\n\n if self.interactive:\n self.Log(request_log)\n\n return\n\n ssl_response = self.SpoofSSL(pre_ssl_request, self.connection)\n self.log.debug('do_CONNECT: Host to connect to: %s' % self.path)\n\n # Now that the Client thinks they are talking to the server, redo the\n # request processing as if we are the target server.\n self.raw_requestline = self.rfile.readline()\n if not self.raw_requestline:\n self.close_connection = 1\n return False\n\n if not self.parse_request(): # An error code has been sent, just exit\n return False\n\n mname = 'do_' + self.command\n\n if not hasattr(self, mname):\n self.send_error(501, \"Unsupported method (%r)\" % self.command)\n return False\n\n method = getattr(self, mname)\n\n # Build a new path for an HTTPS operation, and call the correct method\n target_host = pre_ssl_request.GetTargetHost()\n \n\n _parts = self.path.split('/', 3)\n if len(_parts) > 3:\n _path = _parts[3]\n else:\n _path = ''\n \n \n self.path = 'https://%s:%s/%s' % (target_host[0], target_host[1], _path)\n print('URL: %s' % self.path)\n\n if not hasattr(self, mname):\n self.send_error(501, \"Unsupported method (%r)\" % self.command)\n return\n\n self.log.debug('do_CONNECT: New SSL path: %s' % self.path)\n\n method = getattr(self, mname)\n method()", "async def handshake(self):\n if not self.wrapped:\n await self._communicate(self.tls.do_handshake)\n self.client_random, self.master_secret = get_ssl_master_key(self.tls)\n\n self.wrapped = True", "def start_secure_mqtt_server(run_event):\n print('START SECURE MQTT SERVER')\n cmd = ['mosquitto', '-v', '-c', '/etc/mosquitto/mosquitto-ssl.conf']\n process = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=False)\n while run_event.is_set():\n time.sleep(1)\n process.terminate()\n process.wait()", "def connect(self):\n # Standard implementation from HTTPSConnection, which is not\n # designed for extension, unfortunately\n sock = socket.create_connection((self.host, self.port),\n self.timeout, self.source_address)\n if getattr(self, '_tunnel_host', None):\n self.sock = sock\n self._tunnel()\n\n # This is the only difference; default wrap_socket uses SSLv23\n self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, ssl_version=ssl.PROTOCOL_TLSv1_2)", "def run_client(host, port, cafile):\n loop = asyncio.get_event_loop()\n client = ChatClient()\n\n if cafile:\n print('Encrpyted')\n print(cafile)\n purpose = ssl.Purpose.SERVER_AUTH\n context = ssl.create_default_context(purpose, cafile=cafile)\n coro = loop.create_connection(lambda: client, host, port, ssl=context, server_hostname='localhost')\n loop.run_until_complete(coro)\n asyncio.async(handle_user_input(loop, client))\n\n else:\n coro = loop.create_connection(lambda: client, host, port)\n loop.run_until_complete(coro)\n asyncio.async(handle_user_input(loop, client))\n\n try:\n loop.run_forever()\n finally:\n loop.close()", "def connect(\n self, login, password, authz_id=b\"\", starttls=False,\n authmech=None):\n try:\n self.sock = socket.create_connection((self.srvaddr, self.srvport))\n self.sock.settimeout(Client.read_timeout)\n except socket.error as msg:\n raise Error(\"Connection to server failed: %s\" % str(msg))\n\n if not self.__get_capabilities():\n raise Error(\"Failed to read capabilities from server\")\n if starttls and not self.__starttls():\n return False\n if self.__authenticate(login, password, authz_id, authmech):\n return True\n return False", "def ssl_connect(host, port, keyfile=None, certfile=None, ca_certs=None,\n cert_reqs=None, ssl_version=None, ciphers=None,\n service=VoidService, config={}, ipv6=False, keepalive=False, verify_mode=None):\n ssl_kwargs = {\"server_side\": False}\n if keyfile is not None:\n ssl_kwargs[\"keyfile\"] = keyfile\n if certfile is not None:\n ssl_kwargs[\"certfile\"] = certfile\n if verify_mode is not None:\n ssl_kwargs[\"cert_reqs\"] = verify_mode\n else:\n ssl_kwargs[\"cert_reqs\"] = ssl.CERT_NONE\n if ca_certs is not None:\n ssl_kwargs[\"ca_certs\"] = ca_certs\n ssl_kwargs[\"cert_reqs\"] = ssl.CERT_REQUIRED\n if cert_reqs is not None:\n ssl_kwargs[\"cert_reqs\"] = cert_reqs\n elif cert_reqs != ssl.CERT_NONE:\n ssl_kwargs[\"check_hostname\"] = False\n if ssl_version is not None:\n ssl_kwargs[\"ssl_version\"] = ssl_version\n if ciphers is not None:\n ssl_kwargs[\"ciphers\"] = ciphers\n s = SocketStream.ssl_connect(host, port, ssl_kwargs, ipv6=ipv6, keepalive=keepalive)\n return connect_stream(s, service, config)", "def tls_http_server(request):\n return functools.partial(make_tls_http_server, request=request)", "async def _open_connection_https(self, location):\n sock = await connect_tcp(\n location[0],\n location[1],\n ssl_context=self.ssl_context,\n local_host=self.source_address,\n tls=True,\n tls_standard_compatible=False,\n )\n sock._active = True\n return sock", "def test_tls_in_tls_makefile_rw_text(self) -> None:\n self.start_destination_server()\n self.start_proxy_server()\n\n sock = socket.create_connection(\n (self.proxy_server.host, self.proxy_server.port)\n )\n with self.client_context.wrap_socket(\n sock, server_hostname=\"localhost\"\n ) as proxy_sock:\n with SSLTransport(\n proxy_sock, self.client_context, server_hostname=\"localhost\"\n ) as destination_sock:\n read = destination_sock.makefile(\"r\", encoding=\"utf-8\")\n write = destination_sock.makefile(\"w\", encoding=\"utf-8\")\n\n write.write(sample_request(binary=False)) # type: ignore[arg-type, call-overload]\n write.flush()\n\n response = read.read()\n assert isinstance(response, str)\n if \"\\r\" not in response:\n # Carriage return will be removed when reading as a file on\n # some platforms. We add it before the comparison.\n assert isinstance(response, str)\n response = response.replace(\"\\n\", \"\\r\\n\")\n validate_response(response, binary=False)", "def test_http_over_https_error(\n http_request_timeout,\n tls_http_server, adapter_type,\n ca, ip_addr,\n tls_certificate,\n tls_certificate_chain_pem_path,\n tls_certificate_private_key_pem_path,\n):\n # disable some flaky tests\n # https://github.com/cherrypy/cheroot/issues/225\n issue_225 = (\n IS_MACOS\n and adapter_type == 'builtin'\n )\n if issue_225:\n pytest.xfail('Test fails in Travis-CI')\n\n tls_adapter_cls = get_ssl_adapter_class(name=adapter_type)\n tls_adapter = tls_adapter_cls(\n tls_certificate_chain_pem_path, tls_certificate_private_key_pem_path,\n )\n if adapter_type == 'pyopenssl':\n tls_adapter.context = tls_adapter.get_context()\n\n tls_certificate.configure_cert(tls_adapter.context)\n\n interface, _host, port = _get_conn_data(ip_addr)\n tlshttpserver = tls_http_server((interface, port), tls_adapter)\n\n interface, _host, port = _get_conn_data(\n tlshttpserver.bind_addr,\n )\n\n fqdn = interface\n if ip_addr is ANY_INTERFACE_IPV6:\n fqdn = '[{fqdn}]'.format(**locals())\n\n expect_fallback_response_over_plain_http = (\n (\n adapter_type == 'pyopenssl'\n )\n )\n if expect_fallback_response_over_plain_http:\n resp = requests.get(\n 'http://{host!s}:{port!s}/'.format(host=fqdn, port=port),\n timeout=http_request_timeout,\n )\n assert resp.status_code == 400\n assert resp.text == (\n 'The client sent a plain HTTP request, '\n 'but this server only speaks HTTPS on this port.'\n )\n return\n\n with pytest.raises(requests.exceptions.ConnectionError) as ssl_err:\n requests.get( # FIXME: make stdlib ssl behave like PyOpenSSL\n 'http://{host!s}:{port!s}/'.format(host=fqdn, port=port),\n timeout=http_request_timeout,\n )\n\n if IS_LINUX:\n expected_error_code, expected_error_text = (\n 104, 'Connection reset by peer',\n )\n if IS_MACOS:\n expected_error_code, expected_error_text = (\n 54, 'Connection reset by peer',\n )\n if IS_WINDOWS:\n expected_error_code, expected_error_text = (\n 10054,\n 'An existing connection was forcibly closed by the remote host',\n )\n\n underlying_error = ssl_err.value.args[0].args[-1]\n err_text = str(underlying_error)\n assert underlying_error.errno == expected_error_code, (\n 'The underlying error is {underlying_error!r}'.\n format(**locals())\n )\n assert expected_error_text in err_text", "def validate_tls_min_version(self, node=None, version=\"1.2\", expect=\"fail\"):\n if node is None:\n node = self.cluster.master\n cmd = self.curl_path + \" -v --tlsv\" + version + \" --tls-max \" + version + \\\n \" -u \" + node.rest_username + \":\" + node.rest_password + \\\n \" https://\" + node.ip + \":18091/pools/ -k\"\n shell = RemoteMachineShellConnection(node)\n o, e = shell.execute_command(cmd)\n if expect == \"fail\":\n if len(o) != 0:\n shell.disconnect()\n self.fail(\"Command worked when it should have failed\")\n else:\n if len(o) == 0 or \"pools\" not in o[0]:\n shell.disconnect()\n self.fail(\"Command failed when it should have worked\")\n shell.disconnect()", "async def protocol_factory(cot_url):\n reader = None\n writer = None\n scheme = cot_url.scheme.lower()\n\n if scheme in [\"tcp\"]:\n host, port = pytak.parse_cot_url(cot_url)\n reader, writer = await asyncio.open_connection(host, port)\n elif scheme in [\"tls\", \"ssl\"]:\n host, port = pytak.parse_cot_url(cot_url)\n\n client_cert = os.getenv(\"PYTAK_TLS_CLIENT_CERT\")\n client_key = os.getenv(\"PYTAK_TLS_CLIENT_KEY\")\n client_cafile = os.getenv(\"PYTAK_TLS_CLIENT_CAFILE\")\n client_ciphers = os.getenv(\n \"PYTAK_TLS_CLIENT_CIPHERS\", pytak.DEFAULT_FIPS_CIPHERS)\n\n dont_check_hostname = bool(os.getenv(\"PYTAK_TLS_DONT_CHECK_HOSTNAME\"))\n dont_verify = bool(os.getenv(\"PYTAK_TLS_DONT_VERIFY\"))\n\n # SSL Context setup:\n ssl_ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)\n ssl_ctx.options |= ssl.OP_NO_TLSv1\n ssl_ctx.options |= ssl.OP_NO_TLSv1_1\n ssl_ctx.set_ciphers(client_ciphers)\n ssl_ctx.check_hostname = True\n ssl_ctx.verify_mode = ssl.VerifyMode.CERT_REQUIRED\n\n if client_key:\n ssl_ctx.load_cert_chain(client_cert, keyfile=client_key)\n else:\n ssl_ctx.load_cert_chain(client_cert)\n\n if client_cafile:\n ssl_ctx.load_verify_locations(cafile=client_cafile)\n\n # Default to verifying cert:\n if dont_verify:\n print(\n \"pytak TLS Certificate Verification DISABLED by Environment.\")\n print(\"pytak TLS Hostname Check DISABLED by Environment.\")\n ssl_ctx.check_hostname = False\n ssl_ctx.verify_mode = ssl.CERT_NONE\n\n # Default to checking hostnames:\n if dont_check_hostname:\n print(\"pytak TLS Hostname Check DISABLED by Environment.\")\n ssl_ctx.check_hostname = False\n\n\n reader, writer = await asyncio.open_connection(host, port, ssl=ssl_ctx)\n elif scheme in [\"udp\"]:\n writer = await pytak.udp_client(cot_url)\n else:\n raise Exception(\n \"Please specify a protocol in your CoT Destination URL, \"\n \"for example: tcp:xxx:9876, tls:xxx:1234, udp:xxx:9999, etc.\")\n\n return reader, writer", "def command(\n cmd,\n hostname=None,\n username=None,\n key_filename=None,\n timeout=None,\n connection_timeout=None,\n port=22,\n background=False,\n) -> Union[None, SSHCommandResult]:\n if hostname is None:\n raise ValueError(\"Can not start SSH client. The 'hostname' argument is missing.\")\n if timeout is None:\n timeout = COMMAND_TIMEOUT\n if connection_timeout is None:\n connection_timeout = CONNECTION_TIMEOUT\n if background:\n with get_channel(\n hostname=hostname, username=username, key_filename=key_filename, timeout=timeout, port=port\n ) as channel:\n channel.exec_command(cmd)\n else:\n with get_connection(\n hostname=hostname, username=username, key_filename=key_filename, timeout=connection_timeout, port=port\n ) as connection:\n return execute_command(cmd, connection, timeout, connection_timeout)", "def setHost(host, port, ssl=0):", "def insecure_tls(self) -> pulumi.Output[bool]:\n return pulumi.get(self, \"insecure_tls\")", "def configureSSL(domainName,dry=False):\n \n #enable ssl mod\n execute(subprocess.call,[\"a2enmod\",\"ssl\"],dry=dry)\n restartApache(dry=dry)\n \n #create input string for openssl command\n inputStr='CA\\nNova Scotia\\nHalifax\\nCompute Canada\\nACENET\\n'+domainName+'\\[email protected]\\n'\n \n #create ssl cert\n #Note that dry is fixed to be False, creating the cert doesn't really cause a problem except \n #it might overwrite an existing cert, and if it isn't actually executed the following steps will not be able to execute\n p=execute(subprocess.Popen,[\"openssl\",\"req\",\"-x509\",\"-nodes\"\n ,\"-days\",\"3650\"\n ,\"-newkey\",\"rsa:2048\"\n ,\"-keyout\",\"/etc/ssl/private/server.key\"\n ,\"-out\",\"/etc/ssl/certs/server.crt\"]\n ,stdout=subprocess.PIPE,stdin=subprocess.PIPE,stderr=subprocess.STDOUT,dry=dry)\n \n #have to handle dry runs in a special way as this command (dry or not) \n #depends on p not being None\n if not dry:\n output=execute(p.communicate,input=inputStr.encode('utf-8'),dry=dry)[0]\n else:\n print(\"p.communicate(input=\"+inputStr+\")\")\n \n #Set correct ownership and permission of key\n execute(subprocess.call,[\"sudo\",\"chown\",\"root:ssl-cert\",\"/etc/ssl/private/server.key\"],dry=dry)\n execute(subprocess.call,[\"sudo\",\"chmod\",\"640\",\"/etc/ssl/private/server.key\"],dry=dry)\n \n #comment out any previous settings\n execute(commentOutLineMatching,\".*SSLCertificateFile.*\",\"/etc/apache2/sites-available/default-ssl.conf\",dry=dry)#not matching\n execute(commentOutLineMatching,\".*SSLCertificateKeyFile.*\",\"/etc/apache2/sites-available/default-ssl.conf\",dry=dry)#not matching\n execute(commentOutLineMatching,\".*SSLCertificateChainFile.*\",\"/etc/apache2/sites-available/default-ssl.conf\",dry=dry)#not matching\n \n #add settings before for improved security </VirtualHost>\n execute(replaceStrInFileRe,\"</VirtualHost>\"\n ,\"\\tSSLCertificateFile /etc/ssl/certs/server.crt\\n\"\n +\"\\t\\tSSLCertificateKeyFile /etc/ssl/private/server.key\\n\"\n +\"\\t\\tSSLCertificateChainFile /etc/ssl/certs/server.crt\\n\"\n +\"\\t\\tServerName \"+domainName+\"\\n\"\n +\"\\t\\tServerAlias www.\"+domainName+\"\\n\"\n +\"\\t\\tSSLProtocol all -SSLv2 -SSLv3\\n\"\n +\"\\t\\tSSLCipherSuite HIGH:MEDIUM:!aNULL:!MD5:!SEED:!IDEA:!RC4\\n\"\n +\"\\t\\tSSLHonorCipherOrder on\\n\"\n +\"\\t</VirtualHost>\",\"/etc/apache2/sites-available/default-ssl.conf\",dry=dry)\n \n #add redirect to https\n execute(replaceStrInFileRe,\"</VirtualHost>\"\n ,\"\\tRedirect permanent / https://\"+domainName+\"/\\n</VirtualHost>\\n\"\n ,\"/etc/apache2/sites-available/000-default.conf\",dry=dry)\n \n #enable ssl on our virtual host\n execute(subprocess.call,[\"a2ensite\",\"default-ssl.conf\"])\n execute(subprocess.call,[\"service\",\"apache2\",\"restart\"])", "async def test_plain_smtp_connect(preset_client):\n await preset_client.connect()\n assert preset_client.is_connected\n\n await preset_client.quit()\n assert not preset_client.is_connected", "def smtplib_connector(hostname, port, username=None, password=None, use_ssl=False):\n\n def connect():\n import smtplib\n\n ctor = smtplib.SMTP_SSL if use_ssl else smtplib.SMTP\n conn = ctor(hostname, port)\n if use_ssl:\n import ssl\n\n context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)\n conn.ehlo()\n conn.starttls(context=context)\n conn.ehlo()\n if username or password:\n conn.login(username, password)\n return conn\n\n return connect", "def startup():\n\n # Earlier versions of traffic_ctl do not support\n # \"server start\", so we prefer traffic_line here.\n if _TRAFFICLINE:\n cmd = _traffic_line(\"-U\")\n else:\n cmd = _traffic_ctl(\"server\", \"start\")\n\n _subprocess(cmd)\n return _statuscmd()", "def tls_enabled(self):\n has_cert = getattr(self.args, 'ssl_certfile', None) is not None\n has_key = getattr(self.args, 'ssl_keyfile', None) is not None\n tls = getattr(self.args, 'tls', False)\n return tls or self.is_sandbox or (has_cert and has_key)", "def run(ctx, user_cmd):\n connecter = ScalingoInterface(ctx.obj)\n connecter.run(user_cmd)", "def __init__(self, tls_1_2=None, tls_1_1=None, tls_1_0=None, ssl_3_0=None):\n self.tls_1_2 = tls_1_2\n self.tls_1_1 = tls_1_1\n self.tls_1_0 = tls_1_0\n self.ssl_3_0 = ssl_3_0", "def open_https(self, url, data=None, ssl_context=None):\n # type: (AnyStr, Optional[bytes], Optional[SSL.Context]) -> addinfourl\n if ssl_context is not None and isinstance(ssl_context, SSL.Context):\n self.ctx = ssl_context\n else:\n self.ctx = SSL.Context()\n user_passwd = None\n if isinstance(url, six.string_types):\n try: # python 2\n # http://pydoc.org/2.5.1/urllib.html\n host, selector = splithost(url)\n if host:\n user_passwd, host = splituser(host)\n host = unquote(host)\n realhost = host\n except NameError: # python 3 has no splithost\n # https://docs.python.org/3/library/urllib.parse.html\n parsed = urlparse(url)\n host = parsed.hostname\n if parsed.port:\n host += \":{0}\".format(parsed.port)\n user_passwd = parsed.username\n if parsed.password:\n user_passwd += \":{0}\".format(parsed.password)\n selector = parsed.path\n else:\n host, selector = url\n urltype, rest = splittype(selector)\n url = rest\n user_passwd = None\n if urltype.lower() != 'http':\n realhost = None\n else:\n try: # python 2\n realhost, rest = splithost(rest)\n if realhost:\n user_passwd, realhost = splituser(realhost)\n if user_passwd:\n selector = \"%s://%s%s\" % (urltype, realhost, rest)\n except NameError: # python 3 has no splithost\n parsed = urlparse(rest)\n host = parsed.hostname\n if parsed.port:\n host += \":{0}\".format(parsed.port)\n user_passwd = parsed.username\n if parsed.password:\n user_passwd += \":{0}\".format(parsed.password)\n # print(\"proxy via http:\", host, selector)\n if not host:\n raise IOError('http error', 'no host given')\n if user_passwd:\n import base64\n auth = base64.encodestring(user_passwd).strip()\n else:\n auth = None\n # Start here!\n h = httpslib.HTTPSConnection(host=host, ssl_context=self.ctx)\n # h.set_debuglevel(1)\n # Stop here!\n if data is not None:\n h.putrequest('POST', selector)\n h.putheader('Content-type', 'application/x-www-form-urlencoded')\n h.putheader('Content-length', '%d' % len(data))\n else:\n h.putrequest('GET', selector)\n if auth:\n h.putheader('Authorization', 'Basic %s' % auth)\n for args in self.addheaders:\n h.putheader(*args) # for python3 - used to use apply\n h.endheaders()\n if data is not None:\n h.send(data + '\\r\\n')\n # Here again!\n resp = h.getresponse()\n fp = resp.fp\n return addinfourl(fp, resp.msg, \"https:\" + url)\n # Stop again.", "def test_apache_https_request_status_and_security(host):\n args = \"\"\"url=https://127.0.0.1/server-status\n follow_redirects=none validate_certs=no\"\"\"\n\n request = host.ansible(\"uri\", args, check=False)\n assert request[\"status\"] == 200\n assert request[\"server\"] == \"Apache\"\n hsts = \"max-age=63072000; includeSubDomains\"\n assert request[\"strict_transport_security\"] == hsts", "def ssh_command (user, host, password, command):\n ssh_newkey = 'Are you sure you want to continue connecting (yes/no)?'\n child = pexpect.spawn('ssh -l %s %s %s'%(user, host, command))\n i = child.expect([ssh_newkey, PASSWORD, pexpect.TIMEOUT])\n if i == 0: # First Time access - send yes to connect.\n child.sendline ('yes')\n child.expect (PASSWORD)\n i = child.expect([PASSWORD,pexpect.TIMEOUT])\n if i == 0: # prompted for password\n child.sendline(password)\n elif i == 1: # Got Timeout\n print 'ERROR!'\n print 'SSH could not login. Here is what SSH said:'\n print child.before, child.after\n print str(child)\n return None\n if i == 1: # Asked for Password - provide it.\n child.sendline(password)\n elif i == 2:\n print 'ERROR!'\n print 'SSH could not login. Here is what SSH said:'\n print child.before, child.after\n print str(child)\n return None\n return child", "def connect(self):\r\n try:\r\n self.host_win_ip = \"http://\" + self.host_ip + \":5985/wsman\"\r\n self.conn = Protocol(\r\n endpoint=self.host_win_ip,\r\n transport=\"ntlm\",\r\n username=self.usr,\r\n password=self.pwd,\r\n server_cert_validation=\"ignore\")\r\n logger.warn(\"Connecting Windows ...\")\r\n self.shell_id = self.conn.open_shell()\r\n logger.warn(self.shell_id)\r\n logger.warn('Connected to Windows.')\r\n except Exception as error:\r\n msg_exception_error = \"Exception raised: %s \" % error\r\n raise(msg_exception_error)", "def startSSL(self, ssl_options={}):\n if self.ssl_enabled:\n raise RuntimeError(\"startSSL() called on SSL-enabled %r.\" % self)\n\n if self._closed:\n raise RuntimeError(\"startSSL() called on closed %r.\" % self)\n\n if ssl_options.setdefault(\"server_side\", True) is not True:\n raise ValueError(\"SSL option 'server_side' must be True.\")\n\n if ssl_options.setdefault(\"do_handshake_on_connect\", False) is not False:\n raise ValueError(\"SSL option 'do_handshake_on_connect' must be False.\")\n\n self.ssl_enabled = True\n self._ssl_options = ssl_options\n\n return self", "def negotiate_tls(self, tcp_sock, peer_ipaddr):\n try:\n # Note that SNI is mandatory for HTTP/2, so you *must* pass the\n # server_hostname argument.\n if self.client_side == False:\n self.tls_conn = self.ctx.wrap_socket(tcp_sock, server_side=True)\n else:\n self.tls_conn = self.ctx.wrap_socket(tcp_sock, server_hostname=peer_ipaddr)\n except Exception as e:\n #print(\"Fail to create tls connection1!! : client_side=%s, Err=%s\" % (self.client_side, e))\n return None\n\t\n # Always prefer the result from ALPN to that from NPN.\n # You can only check what protocol was negotiated once the handshake is\n # complete.\n try:\n negotiated_protocol = self.tls_conn.selected_alpn_protocol()\n if negotiated_protocol is None:\n negotiated_protocol = self.tls_conn.selected_npn_protocol()\n\n if negotiated_protocol != \"h2\":\n print(\"Err. negotiated_protocol=%s\" % (negotiated_protocol))\n raise RuntimeError(\"Didn't negotiate HTTP/2!\")\n except Exception as e:\n print(\"Fail to create tls connection2!! : %s\" % (e))\n return None\n \n #print(\"%s\" % self.ctx.client_random())\n #print(\"%s\" % ssl.Connection.client_random())\n #print(self.tls_conn._sslobj.SSL_get_client_random())\n #print(self.ctx.SSL_get_client_random())\n\n return self.tls_conn", "def cli_run(host_ip:str, linux_user:str, linux_password:str, cmd:str)->dict:\n try:\n c = Connection(linux_user + \"@\" + host_ip, connect_kwargs={'password':linux_password})\n return c.run(cmd, warn=True)\n except Exception as e:\n return {\"Error\": str(e)}", "def soap_client(self):\n context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)\n context.verify_mode = ssl.CERT_NONE\n return pyVim.connect.SmartConnect(\n host=self.host,\n user=self.settings.username,\n pwd=self.settings.password,\n port=443,\n sslContext=context,\n )", "def check_connect():\n arg_parser = resilient.ArgumentParser(resilient.get_config_file())\n host = arg_parser.getopt(\"resilient\", \"host\")\n #\n # Use Openssl first\n #\n print(\"-------------------------------------\")\n print(\"Using openssl to connect to resilient\")\n print(\"-------------------------------------\")\n command = \"openssl s_client -connect {}:443\".format(host)\n user = arg_parser.getopt(\"resilient\", \"email\")\n password = arg_parser.getopt(\"resilient\", \"password\")\n process = subprocess.Popen(\"/bin/bash\", stdin=subprocess.PIPE, stdout=subprocess.PIPE)\n out, err = process.communicate(command)\n cafile = arg_parser.getopt(\"resilient\", \"cafile\")\n verify = True\n if cafile is not None and cafile == \"false\":\n verify = False\n print(out)\n if err is not None:\n print(err)\n\n print(\"---------------------------------------------\")\n print(\"Using python requests to connect to resilient\")\n print(\"---------------------------------------------\")\n\n rest_url = \"https://{}:443/rest/session\".format(host)\n data = '{\"email\": \"' + user + '\",\"password\":\"' + password + '\", \"interactive\": true}'\n try:\n header = {\"Content-Type\": \"application/json\"}\n resp = requests.post(rest_url,\n data=data,\n headers=header,\n verify=verify)\n print(\"\\tResponse: \" + str(resp))\n\n except Exception as e:\n print(\"\\tConnection failed!!\")\n print(\"\\t\" + str(e))", "def _login(self):\n self._smtp = smtplib.SMTP(host=self._config.host,\n port=self._config.port)\n # send 'hello' to SMTP server\n self._smtp.ehlo()\n # start TLS encryption\n self._smtp.starttls()\n self._smtp.login(self._config.sender_email, self._config.password)\n self._connected = True", "def command(cmd: str) -> str:\n with MCRcon(SERVERADDRESS, PASSWORD) as mcr:\n response = mcr.command(cmd)\n return response", "def do_command(command):\n send_command(command)\n # time.sleep(0.1) # may be required on slow machines\n response = get_response()\n print(\"Rcvd: <<< \" + response)\n return response", "def enable_end_to_end_tls(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"enable_end_to_end_tls\")", "def test_disable_tls(cassandra_service):\n update_service_transport_encryption(\n cassandra_service, enabled=False, allow_plaintext=False)\n verify_client_can_write_read_and_delete()", "def server_cmd(command):\n\tp = Popen([COMMAND_SCRIPT, command], stderr=PIPE)\n\tret = p.wait()\n\tif ret:\n\t\tout, err = p.communicate()\n\t\traise OSError(command, ret, err.read().strip())\n\treturn", "def cli(ctx, url, login, password, no_verify, ssl_certificate, debug,\n timeout, no_pager, pager, color, output_format):\n\n pagerize = False\n if no_pager:\n warnings.warn(\n \"--no-pager (or environment variable DECAPOD_NO_PAGER) \"\n \"is deprecated. This is default behavior now. If you want \"\n \"pager support, please use --pager option.\",\n PendingDeprecationWarning\n )\n if pager:\n pagerize = True\n\n if ssl_certificate:\n ssl_certificate.close()\n ssl_certificate = ssl_certificate.name\n\n ctx.obj = {\n \"url\": url,\n \"login\": login,\n \"password\": password,\n \"debug\": debug,\n \"timeout\": timeout,\n \"format\": output_format,\n \"pager\": pagerize,\n \"color\": color,\n \"client\": decapodlib.Client(\n url, login, password,\n timeout=timeout, verify=not no_verify,\n certificate_file=ssl_certificate\n )\n }\n utils.configure_logging(debug)\n\n ctx.call_on_close(ctx.obj[\"client\"].logout)", "async def connect(\n address,\n device_id,\n local_key,\n protocol_version,\n enable_debug,\n listener=None,\n port=6668,\n timeout=5,\n):\n loop = asyncio.get_running_loop()\n on_connected = loop.create_future()\n _, protocol = await loop.create_connection(\n lambda: TuyaProtocol(\n device_id,\n local_key,\n protocol_version,\n enable_debug,\n on_connected,\n listener or EmptyListener(),\n ),\n address,\n port,\n )\n\n await asyncio.wait_for(on_connected, timeout=timeout)\n return protocol", "def send_telnet_command(command):\n\n tn = telnetlib.Telnet(stb_parameters.STB_IP)\n tn.read_until(bytes(\"login: \", 'UTF-8'))\n tn.write(bytes(stb_parameters.STB_USER_NAME + \"\\n\", 'UTF-8'))\n tn.write(bytes(command + \"\\n\", 'UTF-8'))\n tn.write(bytes(\"exit\\n\", 'UTF-8'))\n result = tn.read_all().decode('ascii')\n return result", "def ConnectSSL(self):\n with open(self.DEFAULT_CLIENT_KEY_FILE, 'rb') as f:\n private_key = f.read()\n with open(self.DEFAULT_CLIENT_CHAIN_FILE, 'rb') as f:\n certificate_chain = f.read()\n with open(self.DEFAULT_ROOT_CERT_FILE, 'rb') as f:\n root_ca = f.read()\n credentials = grpc.ssl_channel_credentials(root_certificates=root_ca, private_key=private_key, certificate_chain=certificate_chain)\n self.channel = grpc.secure_channel(self.address, credentials)\n self._setup()", "def ssl_wrap_socket(socket, ssl_options, server_hostname=..., **kwargs):\n ...", "def ssl(self, cainfo=None, verify=True, cert=None, key=None):\n if cainfo:\n self.curl.setopt(pycurl.CAINFO, cainfo)\n\n if verify == False:\n self.curl.setopt(pycurl.SSL_VERIFYPEER, 0)\n self.curl.setopt(pycurl.SSL_VERIFYHOST, 0)\n else:\n self.curl.setopt(pycurl.SSL_VERIFYPEER, 1)\n self.curl.setopt(pycurl.SSL_VERIFYHOST, 2)\n if cert:\n #self.curl.setopt(pycurl.SSLCERTTYPE, \"DER\")\n self.curl.setopt(pycurl.SSLCERT, cert)\n if key:\n self.curl.setopt(pycurl.SSLKEY, key)", "def ssh_cmd(ctx):\n pass", "def run(self, verbose=True):\n if verbose:\n scheme = \"https\" if self.config.ssl_enabled else \"http\"\n print(\"[mathenjeu] running on '{}' and {}\".format(\n scheme, self.config.bind))\n self._run(self.config)", "def send_telnet_command(self, cmd):\n data = bytes(cmd)\n self.send_to_client(data)", "def command():\n server = get_server()\n port = get_port()\n \n click.echo(f'{server.get(\"hostname\")}:{port} -> localhost:{port}')\n click.echo('CTRL+C for quit')\n bash('ssh -N -L {port}:localhost:{port} -i {ssh_key_path} {username}@{hostname}'.format(\n ssh_key_path=server.get('ssh_key_path'),\n username=server.get('username'),\n hostname=server.get('hostname'),\n port=port\n ))", "def test_connect_ex(self):\n port = socket_any_family()\n port.bind((\"\", 0))\n port.listen(3)\n\n clientSSL = Connection(Context(SSLv23_METHOD), socket(port.family))\n clientSSL.setblocking(False)\n result = clientSSL.connect_ex(port.getsockname())\n expected = (EINPROGRESS, EWOULDBLOCK)\n assert result in expected", "def do_command(command):\n send_command(command)\n response = get_response()\n print(\"Rcvd: <<< \\n\" + response)\n return response", "def __init__(self, sslenabled, authenticator):\n Command.__init__(self, sslenabled, 'localhost', '/')\n # save the ssl status for the various reinits done for each API call supported\n self.sslenabled = sslenabled\n self.authenticator = authenticator\n self.auth = authenticator\n self.log = logging.getLogger(__name__)", "def _client_cmd(self, cmd):\n logging.info('Client cmd: [%s]', cmd)\n return self._client.run(cmd)", "def _connect(self):\n\n # Get the timeout\n m_timeout = OMPv4.TIMEOUT\n if self.__timeout:\n m_timeout = self.__timeout\n\n # Connect to the server\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.settimeout(m_timeout)\n try:\n sock.connect((self.__host, int(self.__port)))\n except socket.error, e:\n raise ServerError(str(e))\n self.socket = ssl.wrap_socket(sock, ssl_version=ssl.PROTOCOL_TLSv1)\n\n # Authenticate to the server\n self._authenticate(self.__username, self.__password)", "def test_connect(self):\n port = socket_any_family()\n port.bind((\"\", 0))\n port.listen(3)\n\n clientSSL = Connection(Context(SSLv23_METHOD), socket(port.family))\n clientSSL.connect((loopback_address(port), port.getsockname()[1]))\n # XXX An assertion? Or something?", "def _Run(args, holder, ssl_certificate_ref):\n client = holder.client\n\n certificate_type, self_managed, managed = _ParseCertificateArguments(\n client, args)\n\n if ssl_certificates_utils.IsRegionalSslCertificatesRef(ssl_certificate_ref):\n request = client.messages.ComputeRegionSslCertificatesInsertRequest(\n sslCertificate=client.messages.SslCertificate(\n type=certificate_type,\n name=ssl_certificate_ref.Name(),\n selfManaged=self_managed,\n managed=managed,\n description=args.description),\n region=ssl_certificate_ref.region,\n project=ssl_certificate_ref.project)\n else:\n request = client.messages.ComputeSslCertificatesInsertRequest(\n sslCertificate=client.messages.SslCertificate(\n type=certificate_type,\n name=ssl_certificate_ref.Name(),\n selfManaged=self_managed,\n managed=managed,\n description=args.description),\n project=ssl_certificate_ref.project)\n\n if ssl_certificates_utils.IsRegionalSslCertificatesRef(ssl_certificate_ref):\n collection = client.apitools_client.regionSslCertificates\n else:\n collection = client.apitools_client.sslCertificates\n\n return client.MakeRequests([(collection, 'Insert', request)])", "def test_servername(self):\n args = []\n\n def servername(conn):\n args.append((conn, conn.get_servername()))\n\n context = Context(SSLv23_METHOD)\n context.set_tlsext_servername_callback(servername)\n\n # Necessary to actually accept the connection\n context.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))\n context.use_certificate(\n load_certificate(FILETYPE_PEM, server_cert_pem)\n )\n\n # Do a little connection to trigger the logic\n server = Connection(context, None)\n server.set_accept_state()\n\n client = Connection(Context(SSLv23_METHOD), None)\n client.set_connect_state()\n client.set_tlsext_host_name(b\"foo1.example.com\")\n\n interact_in_memory(server, client)\n\n assert args == [(server, b\"foo1.example.com\")]", "def start(self):\n if self._use_ssl:\n try:\n ca_file = CONF.ssl_ca_file\n cert_file = CONF.ssl_cert_file\n key_file = CONF.ssl_key_file\n\n if cert_file and not os.path.exists(cert_file):\n raise RuntimeError(\n _(\"Unable to find cert_file : %s\") % cert_file)\n\n if ca_file and not os.path.exists(ca_file):\n raise RuntimeError(\n _(\"Unable to find ca_file : %s\") % ca_file)\n\n if key_file and not os.path.exists(key_file):\n raise RuntimeError(\n _(\"Unable to find key_file : %s\") % key_file)\n\n if self._use_ssl and (not cert_file or not key_file):\n raise RuntimeError(\n _(\"When running server in SSL mode, you must \"\n \"specify both a cert_file and key_file \"\n \"option value in your configuration file\"))\n ssl_kwargs = {\n 'server_side': True,\n 'certfile': cert_file,\n 'keyfile': key_file,\n 'cert_reqs': ssl.CERT_NONE,\n }\n\n if CONF.ssl_ca_file:\n ssl_kwargs['ca_certs'] = ca_file\n ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED\n\n self._socket = eventlet.wrap_ssl(self._socket,\n **ssl_kwargs)\n\n self._socket.setsockopt(socket.SOL_SOCKET,\n socket.SO_REUSEADDR, 1)\n # sockets can hang around forever without keepalive\n self._socket.setsockopt(socket.SOL_SOCKET,\n socket.SO_KEEPALIVE, 1)\n\n # This option isn't available in the OS X version of eventlet\n if hasattr(socket, 'TCP_KEEPIDLE'):\n self._socket.setsockopt(socket.IPPROTO_TCP,\n socket.TCP_KEEPIDLE,\n CONF.tcp_keepidle)\n\n except Exception:\n with excutils.save_and_reraise_exception():\n LOG.error(_(\"Failed to start %(name)s on %(host)s\"\n \":%(port)s with SSL support\") % self.__dict__)\n\n wsgi_kwargs = {\n 'func': eventlet.wsgi.server,\n 'sock': self._socket,\n 'site': self.app,\n 'protocol': self._protocol,\n 'custom_pool': self._pool,\n 'log': self._wsgi_logger,\n 'log_format': CONF.wsgi_log_format\n }\n\n if self._max_url_len:\n wsgi_kwargs['url_length_limit'] = self._max_url_len\n\n self._server = eventlet.spawn(**wsgi_kwargs)", "def test_set_tlsext_host_name_wrong_args(self):\n conn = Connection(Context(SSLv23_METHOD), None)\n with pytest.raises(TypeError):\n conn.set_tlsext_host_name(object())\n with pytest.raises(TypeError):\n conn.set_tlsext_host_name(b\"with\\0null\")\n\n with pytest.raises(TypeError):\n conn.set_tlsext_host_name(b\"example.com\".decode(\"ascii\"))", "def _context(use_tls=False):\n if use_tls is False:\n return None\n config = Config()\n ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)\n ctx.load_cert_chain(config.tls_cert, config.tls_key)\n ctx.options |= ssl.OP_NO_SSLv2\n ctx.options |= ssl.OP_NO_SSLv3\n ctx.options |= ssl.OP_NO_COMPRESSION\n ctx.options |= ssl.OP_CIPHER_SERVER_PREFERENCE\n if not config.args.less_secure:\n ctx.options |= ssl.OP_SINGLE_DH_USE\n ctx.options |= ssl.OP_SINGLE_ECDH_USE\n ctx.set_ciphers(\":\".join(ciphers))\n if config.tls_dhparams:\n ctx.load_dh_params(config.tls_dhparams)\n return ctx" ]
[ "0.7027462", "0.6803335", "0.6757425", "0.6352501", "0.6308599", "0.6119223", "0.6119223", "0.59911376", "0.5971303", "0.5914122", "0.57055014", "0.5575204", "0.55595917", "0.5529245", "0.55291855", "0.53638923", "0.5362069", "0.5353421", "0.5348377", "0.5279511", "0.52661175", "0.52056634", "0.51731616", "0.51717174", "0.5161183", "0.51469237", "0.513447", "0.51236284", "0.5115684", "0.5115617", "0.5112418", "0.5085156", "0.50656277", "0.5046893", "0.5042193", "0.5037967", "0.50289285", "0.50289285", "0.50171787", "0.5010129", "0.50047094", "0.49783933", "0.49623427", "0.49381122", "0.49347863", "0.4930836", "0.48873124", "0.48718902", "0.48502728", "0.4840419", "0.4838458", "0.48146993", "0.48054683", "0.4790862", "0.47885424", "0.47861642", "0.478567", "0.4781011", "0.47753203", "0.47726473", "0.47726032", "0.47676533", "0.47625443", "0.47621", "0.47511715", "0.4741809", "0.4740527", "0.47351462", "0.47347552", "0.47309944", "0.47240022", "0.47154614", "0.469466", "0.4690495", "0.46845144", "0.46843457", "0.46825936", "0.46798614", "0.4676954", "0.46746883", "0.46730694", "0.46697628", "0.46649724", "0.4658379", "0.46557277", "0.46499842", "0.46488312", "0.4646397", "0.46398008", "0.46355045", "0.46354946", "0.46333987", "0.46263042", "0.46235624", "0.4623276", "0.4622064", "0.46199027", "0.46145678", "0.45822206", "0.45801562" ]
0.47385627
67
Exceptions can be raised, but the context manager should handle disconnection.
async def test_context_manager_disconnect_handling(preset_server, event_loop): preset_client = SMTP( hostname=preset_server.hostname, port=preset_server.port, loop=event_loop ) async with preset_client: assert preset_client.is_connected preset_server.responses.append(b"250 noop") preset_server.drop_connection_event.set() try: await preset_client.noop() except SMTPServerDisconnected: pass assert not preset_client.is_connected
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def connection_lost(self, exc):\n pass", "def __exit__(self, exc_type, exc_val, exc_tb):\n self.conn.close()\n if exc_val:\n raise", "async def __aexit__(self, exc_type, exc_value, traceback):\n\n # Close the connection\n await self.disconnect()", "def connection_closed(self, exc):\n _logger.info(\"Connection lost: %s\", str(exc))\n super().close()", "def __exit__(self, exc_type, exc_val, exc_tb):\n self.socket.close()", "def handle_connection_lost(self, exc: Optional[Exception]) -> None:", "def connection_lost(self, exc):\n if self._closing:\n return\n self._closing = True\n\n # inform yielding readers about closed connection\n if exc is None:\n logger.info(\"Connection closed for %s\", self)\n self.reader.feed_eof()\n else:\n logger.info(\"Connection lost for %s: %s\", self, exc)\n self.reader.set_exception(exc)\n\n # cancel protocol tasks, namely on-connect negotiations\n for task in self._tasks:\n task.cancel()\n\n # close transport (may already be closed), set _waiter_closed and\n # cancel Future _waiter_connected.\n self._transport.close()\n self._waiter_connected.cancel()\n if self.shell is None and self._waiter_closed is not None:\n # raise deprecation warning, _waiter_closed should not be used!\n self._waiter_closed.set_result(weakref.proxy(self))\n\n # break circular references.\n self._transport = None", "def __exit__(self, exc_type, exc_val, exc_tb):\n self.db_connection.close()\n if exc_val:\n raise Exception(exc_val)", "def teardown_request(exception):\r\n try:\r\n g.conn.close()\r\n except Exception as e:\r\n pass", "def teardown_request(exception):\r\n try:\r\n g.conn.close()\r\n except Exception as e:\r\n pass", "def teardown_request(exception):\r\n try:\r\n g.conn.close()\r\n except Exception as e:\r\n pass", "def __exit__(self, exc_type, exc_value, traceback):\n return self.close()", "def __exit__(self, exc_type, exc_value, tb):\n if exc_type is not None:\n traceback.print_exception(exc_type, exc_value, tb)\n # return False # uncomment to pass exception through\n\n return True \n\n self.db = None\n self.client.close()\n\n return True", "def __exit__(self, exc_type, exc_value, exc_traceback):\n\n self.close()", "def __exit__(self, exc_type, exc_value, traceback):\n self.close()", "def __exit__(self, exc_type, exc_value, traceback):\n self.close()", "def __exit__(self, exc_type, exc_value, traceback):\n self.close()", "def __exit__(self, exc_type, exc_value, traceback):\n self.close()", "async def __aexit__(\n self, exc_type: Exception, exc_value: str, traceback: TracebackType\n ) -> None:\n await self.disconnect()", "def teardown_request(exception):\n try:\n g.conn.close()\n except Exception:\n print exception", "def __exit__(self, unused_exception_type, unused_exc_value, unused_traceback):\n self.close()", "def teardown_request(exception):\n try:\n g.conn.close()\n except Exception as e:\n pass", "def teardown_request(exception):\n try:\n g.conn.close()\n except Exception as e:\n pass", "def teardown_request(exception):\n try:\n g.conn.close()\n except Exception as e:\n pass", "def teardown_request(exception):\n try:\n g.conn.close()\n except Exception as e:\n pass", "def teardown_request(exception):\n try:\n g.conn.close()\n except Exception as e:\n pass", "def teardown_request(exception):\n try:\n g.conn.close()\n except Exception as e:\n pass", "def close_connection(self) -> None:\n self.batch.__exit__(*sys.exc_info())", "def __exit__(self, exc_type, exc_val, exc_tb):\n self.session.close()", "def connection_lost(self, transport, exc):\n if transport not in self.clients:\n return\n reason = str(exc) if exc else 'Connection reset by peer'\n self.client_close(transport, reason)", "def __exit__(self, unused_exception_type, unused_exc_value, unused_traceback):\r\n self.close()", "def __exit__(self, exception_type, exception, traceback):\n self.close()", "def __exit__(self, exception_type, exception, traceback):\n self.close()", "def __exit__(self, exc_type, exc_val, exc_tb):\n self.close()", "def __exit__(self, exc_type, exc_value, traceback):\n self.close()\n return False # any exception is raised by the with statement.", "def connection_lost(self, exc):\n if not self._closing:\n self._closing = True\n self.log.info('{about}{reason}'.format(\n about=self.__str__(),\n reason=': {}'.format(exc) if exc is not None else ''))\n self.waiter_connected.cancel()\n self.waiter_closed.set_result(self)", "def connection_lost(self, exc) -> None:\n # We can assert these because this should never get called\n # before self.connection_made which sets these:\n assert self.transport is not None\n assert self.task is not None\n logging.info(\"Server: Connection lost.\")\n self.task.cancel()\n self.transport.close()", "def on_exception(self):\n pass", "def __exit__(self, exc_type, exc_value, traceback): \n self.shutdown()", "def connection_lost(self, exc):\n super().connection_lost(exc)\n\n if self.session is not None:\n # Kill the session.\n self.session.close(SessionCloseErrorCode.SESSION_DIED)\n self.session = None\n\n self.client = None", "def __exit__(self, exc_type, exc_val, exc_tb):\n pass", "def __exit__(self, exc_type, exc_val, exc_tb):\r\n pass", "def __exit__(self):\n return self.connection.__exit__", "def connection_lost(self, exc):\n self.node.notify(Task(TaskType.CONN_LOST, exc))", "def __exit__(self, *_exc):\r\n self.release()", "def connection_lost(self, exc):\n logger.info('The server closed the connection')\n self.loop.stop()", "def connection_closed(self) -> bool:", "def abortConnection():\n pass", "def __exit__(self, type, value, traceback):\n self.transport.close()", "def __exit__(self, exc_type, exc_value, traceback):\r\n pass", "def __exit__(self, exc_type, exc_value, traceback):\r\n pass", "def __exit__(self, exc_type, exc_value, traceback):\r\n pass", "def connectionLost(reason):", "def __exit__(self, exc_type, exc_value, traceback):\n pass", "def __exit__(self, exc_type, exc_val, exc_tb):\n self.__descriptor.close()", "def __exit__(self, exc_type, exc_val, exc_tb):\n self.__descriptor.close()", "def __exit__(self, *excinfo):\n pass", "def __exit__(self, exc_type, exc_val, exc_tb):\n self._unlock()\n # Let all exceptions through by not returning True.", "async def __aexit__(self, exc_type, exc, traceback) -> None:\n\n await self.close()", "async def __aexit__(self, exc_type, exc, traceback) -> None:\n\n await self.close()", "def __exit__(self, *args):\n self.close()\n # propagate exception\n return False", "def __exit__(self, exc_type, exc_val, exc_tb):\n self.release()", "def __exit__(self, exc_type, exc_value, traceback):\n self.close()\n return False", "def rescue(self, instance):\n pass", "async def __aexit__(self, exc_type, exc_val, exc_tb):\n pass", "def __exit__(self, exc_type, exc_value, exc_tb):\n self.disconnect()\n self.is_authenticated = False\n self._next_id = 0", "def on_connection_closed(self):", "def __exit__(self, exc_type, exc_val, exc_tb) -> None: # type: ignore\n self.shutdown()", "def whenException(self, channel, call):", "async def __aexit__(self, exc_type, exc_val, exc_tb):\n await self._close()", "def teardown(self, exception):", "def test_event_handler_didnt_downgrade_disconnect(\n self, ping_fixture_all_errs_disconnect\n ):\n engine = ping_fixture_all_errs_disconnect\n\n @event.listens_for(engine, \"handle_error\")\n def setup_disconnect(ctx):\n assert ctx.is_pre_ping\n assert ctx.is_disconnect\n\n conn = engine.connect()\n conn.close()", "def __exit__(self, exc_type, exc_value, traceback):\n if self._close_on_exit:\n self.close()", "def onCorruptConnection(self):\n self.log.critical(\"Connection is corrupt!!! Shutting down...\")\n self.connectionCorrupted = True\n self.cancelOutboundTransactions(SMPPClientConnectionCorruptedError())\n self.shutdown()", "async def connection_lost(self):\n logging.info('connection dropped')", "def __exit__(self, exc_type, exc_value, exc_trace):\n try:\n self.conn.commit()\n self.cursor.close()\n self.conn.close()\n except Exception as error:\n print(f\"DBCM::__exit__::{error}\")", "def __exit__(self, unused_type, unused_value, unused_traceback):\n self.close()", "def unexpectedException(self):", "def connection_lost(self, exc):\n self._log.warn(\"Serial Connection Lost\")\n self.transport = None\n self.onReady(False)", "def __exit__(self, exc_type, exc_val, exc_tb):\n self.queue.channel.rpc(self._basic_cancel)\n self.queue.consuming = False", "def connectionLost(self,reason):\n pass", "def __exit__(self, exc_type, exc_value, traceback):\n self._reader.__exit__(exc_type, exc_value, traceback)", "def __exit__(self, exc_type, exc_val, exc_tb) -> None:\n for obj in self.__dict__.values():\n if hasattr(obj, 'close'):\n obj.close()", "def __exit__(self, exc_type, exc_value, traceback) -> bool:\n self.close()\n return False", "def _interrupt(self, threadId, connection):\n try:\n connection.close()\n except pymysql.Error:\n pass", "def __exit__(self, exc_type, exc_val, exc_tb):\n self.close()\n try:\n self._store.close()\n except AttributeError:\n pass", "def __enter__(self):\n logger.verbose(\"Establishing connection to {0}:{1}...\"\n .format(self.server, self.port))\n try:\n return super(SmarterConnection, self).__enter__()\n except vim.fault.HostConnectFault as e:\n if not re.search(\"certificate verify failed\", e.msg):\n raise e\n # Self-signed certificates are pretty common for ESXi servers\n logger.warning(e.msg)\n self.UI.confirm_or_die(\"SSL certificate for {0} is self-signed or \"\n \"otherwise not recognized as valid. \"\n \"Accept certificate anyway?\"\n .format(self.server))\n _create_unverified_context = ssl._create_unverified_context\n ssl._create_default_https_context = _create_unverified_context\n return super(SmarterConnection, self).__enter__()\n except requests.exceptions.ConnectionError as e:\n # ConnectionError can wrap another internal error; let's unwrap it\n # so COT can log it more cleanly\n outer_e = e\n inner_message = None\n while e.errno is None:\n inner_e = None\n if hasattr(outer_e, 'reason'):\n inner_e = outer_e.reason\n else:\n for arg in outer_e.args:\n if isinstance(arg, Exception):\n inner_e = arg\n break\n if inner_e is None:\n break\n if hasattr(inner_e, 'strerror'):\n inner_message = inner_e.strerror\n elif hasattr(inner_e, 'message'):\n inner_message = inner_e.message\n else:\n inner_message = inner_e.args[0]\n logger.debug(\"\\nInner exception: {0}\".format(inner_e))\n if hasattr(inner_e, 'errno') and inner_e.errno is not None:\n e.errno = inner_e.errno\n break\n outer_e = inner_e\n if e.strerror is None:\n e.strerror = (\"Error connecting to {0}:{1}: {2}\"\n .format(self.server, self.port, inner_message))\n raise", "def connection_lost(self, exc: Optional[Exception]) -> None:\n self.listener.handle_connection_lost(exc)", "def test_disconnect_closed(self):\n self.sock.close()\n self.inverter.sock.close()\n self.inverter.sock_file.close()\n self.inverter.disconnect() # Should not raise exception", "def connection_lost(self, exc):\n super().connection_lost(exc)\n\n if self.session is not None:\n # Free up the allocated ID.\n self.server.session_id_allocator.free(self.session.id)\n\n # Kill the session.\n self.session.close(SessionCloseErrorCode.SESSION_DIED)\n self.session = None\n\n self.server = None", "def __exit__(self, *exc):\n return self._lock.__exit__(*exc)", "def __exit__(self, exc_type, exc_value, traceback):\n return None", "def teardown_request(exception):\n if hasattr(g, 'db'):\n g.db.close()", "def teardown_request(exception):\n if hasattr(g, 'db'):\n g.db.close()", "def teardown_request(exception):\n if hasattr(g, 'db'):\n g.db.close()", "def handle_expt(self):\r\n self._perform_on_error_handling()", "def teardown_request(exception):\n if hasattr(g, 'db'):\n g.db.close()", "def on_connection_open_error(self, _unused_connection, err):\n # LOGGER.error('Connection open failed: %s', err)\n self.reconnect()", "def close(self):\n return exolib.py_exclos(self.exoid)", "async def __aexit__(self, exc_type: Type[Exception], *_) -> None:\n # If the 'with' block raises an exception, the batch will not be\n # sent to the server.\n if self._transaction and exc_type is not None:\n return\n\n await self.close()", "def connection_lost(self, exc):\n if self._stream.done:\n # Stream is done, no need to panic\n pass\n else:\n self._logger.debug('[%s] Connection lost!', self._sock_id, exc_info=exc)" ]
[ "0.721711", "0.7171181", "0.7018697", "0.7018545", "0.67196554", "0.6688977", "0.66760486", "0.6672632", "0.666469", "0.666469", "0.666469", "0.66525275", "0.66458035", "0.6639069", "0.6637687", "0.6637687", "0.6637687", "0.6637687", "0.6625044", "0.661997", "0.6600019", "0.6578358", "0.6578358", "0.6578358", "0.6578358", "0.6578358", "0.6578358", "0.65736705", "0.6571731", "0.65570873", "0.6552524", "0.65087044", "0.65087044", "0.6506358", "0.6497435", "0.6484557", "0.64237607", "0.64124084", "0.63754684", "0.6369494", "0.63530415", "0.6349731", "0.6348355", "0.6340643", "0.63403445", "0.6333328", "0.6317427", "0.6313547", "0.6309827", "0.628214", "0.628214", "0.628214", "0.6269751", "0.6269668", "0.6247899", "0.6247899", "0.62372464", "0.6235564", "0.62331116", "0.62331116", "0.62208414", "0.6213509", "0.61991054", "0.6194335", "0.61867386", "0.6185311", "0.6183314", "0.61815745", "0.6158591", "0.6141188", "0.61289096", "0.61207086", "0.60804623", "0.6074135", "0.60645026", "0.6064381", "0.6054949", "0.6051196", "0.6042671", "0.6037959", "0.6008174", "0.5998562", "0.59943473", "0.5991628", "0.5985594", "0.5978455", "0.59779054", "0.59745413", "0.5966181", "0.5962667", "0.59555566", "0.594808", "0.594789", "0.594789", "0.594789", "0.59463215", "0.5943321", "0.5933953", "0.59276956", "0.5923503", "0.5917813" ]
0.0
-1
Renders its contents to a string using the current context, allowing you to process template variables embedded in things like model content, djangoflatblocks, etc.
def render_inline(parser, token): nodelist = parser.parse(('end_render_inline',)) parser.delete_first_token() return RenderInlineNode(nodelist)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def render_string(self, template: str, **vars) -> str:", "def render(self, template: str, **vars) -> str:", "def _render_context(self, template, block, **context):\n return u''.join(block(template.new_context(context)))", "def get_rendered_text(self, context):\n missing = set()\n for required in utils.get_variable_names_from_template(self):\n if required not in context:\n missing.add(required)\n if missing:\n raise MissingContext(missing)\n tmpl = utils.PyratempTemplate(self.text)\n context = context.copy()\n context[\"locale\"] = self.language.iso_code\n return tmpl.render(context)", "def html(self, **kwargs):\n # get bas context_data\n context_data = self.get_context_data(**kwargs)\n # setup the context object for it\n context = loader.Context(context_data)\n # render the template_source_body with current set of context\n body = loader.render_to_string(self.template_source_body, context)\n # add the rendered body to the underlying wrapper template\n context_data = self.get_context_data(body=body)\n # rerender it\n context = loader.Context(context_data)\n return self.template.render(context)", "def _render(self) -> str:\n html = self._template.render(self._transient_context)\n self._transient_context = None\n return html", "def render(self, context=None, **kwargs):\n # store the given context\n global_context = {}\n # store the result\n result = []\n # group the given context or kwargs\n if context:\n global_context.update(context)\n elif kwargs:\n global_context.update(kwargs)\n\n # this function to output from context\n # to the rendered template\n def write(*args):\n result.extend([str(arg) for arg in args])\n\n def fmt_write(fmt, *args):\n result.append(fmt % args)\n\n # add write and fmt_write into global_context\n global_context['write'] = write\n global_context['fmt_write'] = fmt_write\n # run the code\n for is_code, token in self.tokens:\n if is_code:\n exec(token, global_context)\n else:\n result.append(token)\n return ''.join(result)", "def render( context, *args, **kwargs ):", "def render(self):\n return render_to_string(\n self.template_name, self.get_context_data(), request=self.request\n )", "def render_template(self, string, context=None):\n context = context or {}\n context = Context(context)\n return Template(string).render(context)", "def render(self, value, context=None):\n template = value.template\n if template:\n return render_to_string(template, {'self': value})\n else:\n return self.render_basic(value)", "def render(template, context):\n if not template:\n return None\n\n text = \"\"\n filename = \"templates/\" + template\n with open(filename) as f:\n text = f.read()\n # First compile template into extended base template.\n is_child = re.search(extend_search, text.splitlines()[0])\n if is_child:\n base_filename = \"templates/\" + is_child.group(2)\n with open(base_filename) as base:\n text = extend_template(base.read(), text)\n # Run conditional checks\n has_conditions = re.search(if_search, text)\n if has_conditions:\n text = render_conditionals(text, context)\n # Replace any variables passed to the render function.\n for replace in context.replaces.keys():\n arg_search = re.compile(\"{{ \" + replace + \" }}\")\n text = re.sub(arg_search, context.replaces[replace], text)\n return text", "def render(*content, **context):\n return u''.join((e for c in content for e in render_content(c, **context)))", "def render_template_string(source, **context):\n ctx = stack.top\n template = Template(source, lookup=_lookup(ctx.app))\n return _render(template, context, ctx.app)", "def render(self):\n\n template_string = parseValues(self.data)[1]\n context = self.context\n\n # Run the prebuild plugins, we can't use the standard method here because\n # plugins can chain-modify the context and data.\n for plugin in self.site._plugins:\n if hasattr(plugin, 'preBuildPage'):\n context, data = plugin.preBuildPage(self.site, self, context, data)\n\n if self.site.two_phase:\n initial_string = Template(template_string).render(context)\n return Template(initial_string).render(context)\n\n\n return Template(template_string).render(context)", "def renderedUnicode(self, context):\n\n content = self.renderAsTemplate(context)\n templateName = self.getMetadata(u'template')\n if templateName:\n log(u'rendering template ' + templateName)\n template = templateFiles[templateName]\n context['content'] = content\n content = template.renderedUnicode(context)\n return content", "def render_template(self, context=None):\n if context is None:\n context = self.get_template_context()\n return self.get_template_object().render(context)", "def render_func(raw_str: str) -> str:\n try:\n rendered_str = raw_str.format(**live_context)\n except KeyError as err:\n raise SQLTemplaterError(\n \"Failure in Python templating: {}. Have you configured your \"\n \"variables? https://docs.sqlfluff.com/en/stable/\"\n \"configuration.html#templating-configuration\".format(err)\n )\n return rendered_str", "def render(self, context):\n #Turn our resolvers into actual values:\n try:\n object_obj = self.object_name_resolver.resolve(context)\n except AttributeError: #Happens if a string was passed in as the object name. Try to rescue this and treat as a var:\n object_obj = context.get(self.object_name_resolver, None)\n method_name = self.method_name_resolver.resolve(context) or str(self.method_name_resolver) #Can resolve as variable, but will also resolve as a string. Put in \"inverted commas\" to force string resolution\n if not object_obj or not method_name:\n raise TemplateSyntaxError(\"{{%% callmethod object_name.method_name %%}} cannot make sense of the resolved values for object_name.method_name '{object_name}.{method_name}'\".format(object_name=self.object_name_resolver, method_name=self.method_name_resolver))\n #Resolve the args\n args = []\n for arg_resolver in self.args_resolvers:\n arg = arg_resolver.resolve(context)\n args.append(arg)\n #Resolve the kwargs\n kwargs = {}\n for k_raw, v_resolver in self.kwargs_resolvers.items():\n k = smart_text(k_raw,'ascii')\n v = v_resolver.resolve(context)\n kwargs[k]=v\n \n #Now try to call the method on the object\n try:\n output = getattr(object_obj, method_name)(*args, **kwargs)\n except Exception as e: #Fail silently, but tell the console:\n print(\"\\033[91m{err_type} from {{%% callmethod <{obj_name}>.{method_name}() %%}}: {err_msg}\\033[0m\".format(err_type=e.__class__.__name__, obj_name=object_obj, method_name=method_name, err_msg=e))\n output = \"\"\n \n #Set to context variable if a context variable:\n if self.asvar:\n context[self.asvar] = output #NB: context is a dict, which is mutable :-)\n return \"\"\n return output #Otherwise return output (i.e. render this string into the page) ", "def render(self, _template, context=None):\n variables = {}\n if context:\n variables.update(context)\n rv = self.jinja2.render_template(_template, **variables)\n self.response.write(rv)", "def render_str(self, template, **params):\n tmp = JINJA_ENV.get_template(template)\n return tmp.render(params)", "def render_str(self, template, **params):\n return render_str(template, **params)", "def render(self, template: str, **vars) -> str:\n vars.setdefault('ctx', self._ctx)\n return self._renderer.render(template, **vars)", "def render(self, template, context):\n try:\n template = self.environment.from_string(template)\n except TemplateSyntaxError as e:\n raise TemplateError(e)\n try:\n return template.render(**context)\n except (UndefinedError, TypeError) as e:\n raise TemplateError(e)", "def render_string(self, template_name, **kwargs):\n raise NotImplementedError()", "def render(self, context=None):\n # Make the complete context we'll use.\n render_context = dict(self.context)\n if context:\n render_context.update(context)\n return self._render_function(render_context, self._do_dots)", "def render(self):\n\n context = {\n 'model': self,\n 'hidden_fields': self.hidden_fields,\n 'css_prefix': self.css_prefix,\n }\n rendered = loader.render_to_string(self.template_path,\n dictionary=context)\n return rendered", "def render_string(self, source: str, **vars) -> str:\n vars.setdefault('ctx', self._ctx)\n return self._renderer.render_string(source, **vars)", "def render(self, context_instance):\r\n # collapse context_instance to a single dictionary for mako\r\n context_dictionary = {}\r\n\r\n # In various testing contexts, there might not be a current request context.\r\n if edxmako.middleware.requestcontext is not None:\r\n for d in edxmako.middleware.requestcontext:\r\n context_dictionary.update(d)\r\n for d in context_instance:\r\n context_dictionary.update(d)\r\n context_dictionary['settings'] = settings\r\n context_dictionary['EDX_ROOT_URL'] = settings.EDX_ROOT_URL\r\n context_dictionary['django_context'] = context_instance\r\n context_dictionary['marketing_link'] = marketing_link\r\n\r\n return super(Template, self).render_unicode(**context_dictionary)", "def render_to_string(filename, context=None, request=None):\n context = {} if context is None else context\n template = env.get_template(filename)\n context['request'] = request\n rendered = template.render(**context)\n return rendered", "def content(self):\n return self.template.render(weblogsnippet=self.weblogsnippet, pathto=pathto)", "def render(self, value, context=None):\n if self.raw_html is not None:\n return format_html(self.raw_html)\n else:\n return ''", "def render_template(content, **context_args):\n template = Template(\"{% load font_awesome %}\" + content)\n return template.render(Context(context_args))", "def render(self, name, value, attrs=None):\n context = self.get_context(name, value, attrs)\n return loader.render_to_string(\n self.template_name,\n context\n )", "def render(self, context):\n if not self.phrase:\n return ''\n t = context.template.engine.get_template('philosophy_phrases/phrase_popup.html')\n return t.render(template.Context({'phrase': self.phrase}))", "def render(template, variables={}):\r\n\treturn prettify( parse(template).render(dict(variables.items())) )", "def render_string(self, template, **params):\n t = jinja_env.get_template(template)\n return t.render(params)", "def renderstr_from_template(self, template, args=None):\n renderedtext = template.render_string(args)\n return renderedtext", "def render(self):\n self._render_text = self.content.replace('\\n', '<br>')\n return TemplateFile.jinja_render_str(\"post.html\", p=self)", "def render(self):\n\n self._render_text = self.content.replace('\\n', '<br>')\n return render_str('post_template.html', post=self, user=self.user)", "def _repr_html_(self) -> str:\n output_html = self.template_base.render(context=self.context)\n return output_html", "def content():\n try:\n url = request.args.get('url')\n if not url:\n raise Exception('Expected url parameter')\n return render(cached_content(url=url), template='content.jinja2')\n except Exception, e:\n traceback.print_exc()\n return render({'url': request.url, 'error': str(e)},\n template='error.jinja2')", "def render(request, *args, **kw):", "def render_str(self, template_name, **params):\n template = jinja_env.get_template(template_name)\n return template.render(params)", "def render_to_string(self, template_name, template_values=None):\n if not template_values:\n template_values = {}\n\n # add xsrf token to the context\n template_values['_xsrf'] = self._xsrf_token\n\n # add any functions/constants defined in config to the context\n for k, v in self.app.config['jinja2']['globals'].items():\n try:\n template_values[k]\n except KeyError:\n template_values[k] = v\n\n # add common request-specific items to the context\n template_values['request'] = self.request\n template_values['session'] = self.session\n template_values['user'] = users.get_current_user()\n\n # render and return template as string\n t = self.jinja2.get_template(template_name)\n return t.render(template_values)", "def email_render(self, template_path, context):\n body = render_to_string(template_path, context)\n\n return body", "def message(self):\n if callable(self.template_name):\n template_name = self.template_name()\n else:\n template_name = self.template_name\n return loader.render_to_string(template_name,\n self.get_context())", "def render(self):\n context = {'groups': self._groups}\n\n return loader.render_to_string(self._template_path, dictionary=context)", "def render_string(self, source, **kwargs):\n template = self._jinja_env.from_string(source)\n return self._render(template, **kwargs)", "def __str__(self):\n t = Template(\n \"\"\"\n <h4>$title</h4>\n $imgs\n $footnotes\n <hr/>\"\"\")\n # Return result.\n return t.substitute({\n \"title\": self.title,\n \"imgs\": self.render_images(),\n \"footnotes\": self.render_footnotes()\n })", "def preview(self) -> str:\n context = self.get_context()\n message = render_to_string(self.template_name, context, using=\"django\")\n return message", "def render(self, source: str, context: dict):\n\n # Creating new class which will be used as a template context.\n context_class = type('RenderContext', (Context,), {})\n\n # All callable objects in context.\n helpers = {}\n\n for key, value in context.items():\n\n # Install each callable object as a context class property.\n if callable(value):\n setattr(context_class, 'helper_' + key, Helper(value))\n helpers[key] = value\n\n # Helper function is run only when context dict has it name as a key.\n # Use template context class to create dict.\n render_context = context_class(context)\n\n result = pystache.render(source, render_context)\n\n return result", "def render(self):\n master = Template(self.master_file.read_text())\n content = Template(self.content_template.read_text())\n\n # Render content\n d = {\n \"citekey\": self.citekey,\n \"author\": self.author,\n \"ts\": self.ts_iso,\n \"ts_day\": self.ts_day,\n \"title\": self.fieldValues[\"title\"],\n \"creator\": self.fieldValues[\"author\"],\n \"date\": self.fieldValues[\"issued\"],\n \"doi\": self.fieldValues[\"DOI\"],\n \"type\": self.fieldValues[\"type\"],\n }\n rendered = master.render(d) + \"\\n\\n\" + content.render()\n\n return rendered", "def render_in_context(context, template, local_context=None):\n\n if context is None:\n context = Context()\n\n if not hasattr(template, \"render\"): # Quacks like a template?\n try:\n engine = context.template.engine\n except AttributeError:\n engine = Engine.get_default()\n\n if isinstance(template, (list, tuple)):\n template = engine.select_template(template)\n else:\n template = engine.get_template(template)\n\n with context.push(local_context):\n return template.render(context)", "def get(self, request):\n return render(request, self.template, self.context)", "def render(self):\n\n context = {\n 'record': self,\n 'hidden_fields': self.hidden_fields,\n 'css_prefix': self.css_prefix,\n }\n\n meta = getattr(self, 'Meta', None)\n context['survey_name'] = getattr(meta, 'survey_name', '')\n\n rendered = loader.render_to_string(self.template_path,\n dictionary=context)\n return rendered", "def render(self, activity, context, typename=None):\n if not isinstance(context, dict):\n raise ContextTypeException('context must be dict. it should not Context or RequestContext')\n template_names = self.get_template_names(activity, typename)\n template = select_template(template_names)\n context = self.prepare_context(activity, context,\n typename=typename)\n return template.render(context)", "def render(filename, context):\n\ttemplate = parser.Template(open(TEMPLATES_DIR + '/' + filename).read())\n\treturn template.eval(context)", "def _render_content(template_name, workflow: Workflow):\n content = render_template(\n dag_name=workflow.dag_name,\n template_name=template_name,\n relations=workflow.task_group_relations,\n task_groups=list(workflow.task_groups.values()),\n )\n return content", "def template_string(template, **kwargs):\n\n temp = Template(template)\n return temp.render(**kwargs)", "def render(self):\n self._render_text = self.content.replace('\\n', '<br>') # deal with new line\n return render_str(\"post.html\", p = self)", "def render_html(self):\n return self.template.render(content=self.content, **self.styles)", "def render(self, tmpl_name, context_env):\n return self.tmpl._render(tmpl_name, context_env)", "def render_string(\n self, source: str, *args: dict, **kwargs: typing.Any\n ) -> str:\n template = self._environment.from_string(source=source)\n return template.render(*args, **kwargs)", "def render(self):\n self.increase_view_count()\n return render_to_string(self.template.template_file, {'advert':self})", "def render_template(template: str, context: dict) -> str:\n if template is None:\n return \"\"\n return Template(template).render(Context(context))", "def render(self, _template, **context):\n context['_request'] = self.request\n self.response.write(self.jinja2.render_template(_template, **context))", "def render_text(self, context, result):\n\t\tcontext.response.text = result\n\t\treturn True", "def render(self, template, values=None, **options):\n body = []\n self.compile(template, options)(self, body.append, values or {})\n return u''.join(body).encode('utf8')", "def test_basic_usage(self):\n t = Template('{% load djblets_utils %}'\n '{% include_as_string template_name %}')\n\n self.assertEqual(\n t.render(Context({\n 'template_name': 'testing/foo.html',\n 'foo': 1,\n 'bar': 2,\n })),\n \"'1 2\\\\\\n'\")", "def render_template(\n template_name: str = \"index.html\", context: t.Dict[str, str] = {}\n):\n html_str: str\n with open(template_name, \"r\") as f:\n html_str = f.read()\n html_str = html_str.format(**context)\n return html_str\n # return f\"<h1>Hello {path=}</h1>\\n{template_name=}\"", "def render(self, context):\n engine = Renderer()\n engine.environment.filters['format_date'] = format_date\n engine.environment.filters['format_datetime'] = format_datetime\n result = engine.render(self.template, **context)\n response = HttpResponse(\n content_type='application/vnd.oasis.opendocument.text; charset=UTF-8'\n )\n response['Content-Disposition'] = 'inline; filename=' + self.filename\n with tempfile.NamedTemporaryFile() as output:\n output.write(result)\n output.flush()\n output = open(output.name, 'rb')\n response.write(output.read())\n return response", "def _render(format_string, message_body, context):\r\n # If we wanted to support substitution, we'd call:\r\n # format_string = format_string.replace(COURSE_EMAIL_MESSAGE_BODY_TAG, message_body)\r\n result = format_string.format(**context)\r\n # Note that the body tag in the template will now have been\r\n # \"formatted\", so we need to do the same to the tag being\r\n # searched for.\r\n message_body_tag = COURSE_EMAIL_MESSAGE_BODY_TAG.format()\r\n result = result.replace(message_body_tag, message_body, 1)\r\n\r\n # finally, return the result, after wrapping long lines and without converting to an encoded byte array.\r\n return wrap_message(result)", "def post(self, request, *args, **kwargs):\n return render(request, self.template_name, self.get_context_data(**kwargs))", "def render_to_string(template):\n from django.template import Context, Template\n from django.template.loader import render_to_string\n \n final_fqfn = find_template(template)\n# for path in get_template_dirs():\n# fqfn = os.path.abspath(os.path.join(path, template))\n# if os.path.isfile(fqfn):\n# print>>sys.stderr, 'Using template: %s' % (fqfn,)\n# final_fqfn = fqfn\n# break\n# else:\n# print>>sys.stderr, 'Template not found: %s' % (fqfn,)\n assert final_fqfn, 'Template not found in any of:\\n%s' % ('\\n'.join(paths),)\n \n #content = render_to_string('template.txt', dict(env=env))\n template_content = open(final_fqfn, 'r').read()\n t = Template(template_content)\n c = Context(env)\n rendered_content = t.render(c)\n rendered_content = rendered_content.replace('&quot;', '\"')\n return rendered_content", "def render(self, template, **kw):\n self.write(self.render_str(template, **kw))", "def render(self, template, **kw):\n self.write(self.render_str(template, **kw))", "def render(self, template, **kw):\n self.write(self.render_str(template, **kw))", "def render(self, template, **kw):\n self.write(self.render_string(template, **kw))", "def render_str(template, **params):\n t = env.jinja_env.get_template(template)\n return t.render(params)", "def render(self, variables):\n variables = variables or {}\n try:\n rendered = render_template(self.template, variables)\n except TemplateError as exc:\n _LOGGER.error(\n \"Failed to render variables for %s.%s: %s\",\n self.action_type,\n self.action_id,\n exc,\n )\n raise\n return rendered", "def _render_str(self, template, ** params):\n\n for key in params:\n if(isinstance(params[key], str)):\n params[key] = params[key].decode('utf-8')\n if(isinstance(params[key], dict)):\n for sub_key in params[key]:\n if(isinstance(params[key][sub_key], str)):\n params[key][sub_key] = params[key][sub_key].decode('utf-8')\n t = constants.JINJA_ENV.get_template(template)\n return t.render(params)", "def django_template_include(file_name, mako_context):\r\n\r\n dictionary = dict(mako_context)\r\n return loader.render_to_string(file_name, dictionary=dictionary)", "def render( request, etype, value, tb ):", "def render( *args, **kwargs ):", "def get(self, request, *args, **kwargs):\n return render(request, self.template_name)", "def get(self, request, *args, **kwargs):\n return render(request, self.template_name)", "def get(self, request, *args, **kwargs):\n return render(request, self.template_name)", "def get(self, request, *args, **kwargs):\n return render(request, self.template_name)", "def get(self, request, *args, **kwargs):\n return render(request, self.template_name)", "def render(self, *html, **opt):\n context = self.context\n # add request, response to context implicitly.\n context['request'] = self.request\n context['response'] = self.response\n if 'context' in opt:\n context.update(opt['context'])\n opt['context'] = context.dicts[0]\n cnt = self.get_controller()\n cnt.render(*html, **opt)", "def render_to_response(template, context, request, *args, **kwargs):\n from django.shortcuts import render_to_response as rtr\n from django.template import RequestContext\n return rtr(template, context, context_instance=RequestContext(request), *args, **kwargs)", "def render_to_string(filename, context=None, environment=None):\n \n if context is None:\n context = {}\n \n if environment is None:\n environment = get_env()\n \n return environment.get_template(filename).render(\n context_to_dict(context))", "def render(self, context):\n try:\n obj = Variable(self.obj).resolve(context)\n except VariableDoesNotExist:\n return \"\"\n \n rel = {}\n related_models = obj._meta.get_all_related_objects()\n related_models.extend(obj._meta.get_all_related_many_to_many_objects())\n \n for related in related_models:\n # If model is specified to be excluded, just move on to the \n # next related model.\n if related.name in EXCLUDED_MODELS:\n continue\n \n # Get the app and model\n app, model = related.name.split(\":\")\n \n # Build the kwargs for the queryset that will be shown\n kwgs = {'%s__pk' % related.field.name: obj.pk}\n \n # Retreive the queryset, limiting the number of item\n # that will be returned\n qs = related.model.objects.filter(**kwgs)\n \n # If the queryset is empty, just move on \n # to the next related model.\n if not qs:\n continue\n \n # Add a display_name, items, related field name and the admin\n # url for the model changelist.\n try:\n rel[related.name] = {\n 'display_name': \"%s %s\" % (app, model),\n 'items': qs[:ITEM_LIMIT],\n 'related_field_name': related.field.name,\n 'url': reverse(\"admin:%s_%s_changelist\" % (app, model))\n }\n except NoReverseMatch:\n # This error will occur naturally for models that have no\n # admin interface specified.\n pass\n \n # Set the return variable to the dictionary.\n context[self.varname] = rel\n return \"\"", "def render_plaintext(self, plaintext, context):\r\n return CourseEmailTemplate._render(self.plain_template, plaintext, context)", "def render_text(self, text, *args, **kwargs):\n global TEXT_TEMPLATE\n self.render(TEXT_TEMPLATE, text=text, *args, **kwargs)", "def render(self, tmpl_file, context):\n template = Template(tmpl_file.read_text(), keep_trailing_newline=True)\n return template.render(context)", "def html(template, **data):\n tmpl = template_loader.load(template)\n context = {}\n context_setup.dispatch(context)\n context.update(data)\n stream = tmpl.generate(**context)\n return stream", "def render_content(content, **context):\n if content is None:\n yield u''\n elif isinstance(content, basestring):\n yield content\n elif callable(content):\n for e in render_content(content(**context), **context):\n yield e\n elif isinstance(content, collections.Iterable):\n for e in render_iterable(content, **context):\n yield e\n else:\n yield unicode(content)", "def _unprocessed_render(node: RenderTreeNode, context: RenderContext) -> str:\n return node.content", "def get(self, request, *args, **kwargs):\n context = self.get_context_data(request)\n return render(\n request,\n self.template_name,\n context\n )" ]
[ "0.74136245", "0.73672175", "0.72275555", "0.7093623", "0.7001947", "0.6995675", "0.6968364", "0.69323826", "0.6897053", "0.67660546", "0.6755247", "0.6604185", "0.6604105", "0.6596961", "0.6593976", "0.65834063", "0.6507833", "0.64963186", "0.6459822", "0.64542526", "0.64530545", "0.6447075", "0.64418304", "0.64344984", "0.641178", "0.638764", "0.6366497", "0.63561153", "0.635346", "0.6341837", "0.6330324", "0.6328545", "0.6294346", "0.628498", "0.6281569", "0.6280879", "0.6276333", "0.6254043", "0.624839", "0.62330365", "0.62314844", "0.6227522", "0.62269676", "0.6186922", "0.6180478", "0.61680174", "0.61492485", "0.6138599", "0.6130511", "0.61124414", "0.61123663", "0.6106875", "0.61065006", "0.6103936", "0.6098553", "0.6094587", "0.6084835", "0.60781217", "0.60694057", "0.6053014", "0.604118", "0.6023783", "0.60209215", "0.6014", "0.6005715", "0.60023457", "0.5999631", "0.5987841", "0.5975521", "0.5974507", "0.5974317", "0.59569645", "0.5955158", "0.5954991", "0.5943142", "0.5938602", "0.5938602", "0.5938602", "0.5916857", "0.5897733", "0.58908015", "0.5882238", "0.5874139", "0.58689183", "0.5865198", "0.585227", "0.585227", "0.585227", "0.585227", "0.585227", "0.5829112", "0.5828762", "0.58239985", "0.58192295", "0.5818185", "0.5816332", "0.5812288", "0.58065385", "0.5797473", "0.5788162", "0.5778221" ]
0.0
-1
Trivial helper for the common case where you have a dictionary and want one value
def get_key(dict, key): return dict.get(key, None)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dict_first(my_dict: Dict) -> Any:\n return list(my_dict.values())[0]", "def _single_getitem(self, key):\n try:\n return self._dict[key]\n except KeyError:\n return self.default", "def safely_get_value(dct: Mapping, key: Any,\n default: Union[T, None] = None\n ) -> Union[Any, T]:\n if key in dct:\n return dct[key]\n else:\n return default", "def getValue(dictionary, key, value):\n if not key in dictionary.keys():\n return value\n else:\n return dictionary[key]", "def _proper_type_return(val):\n if len(val) == 0:\n return None\n elif len(val) == 1:\n return list(val.values())[0]\n else:\n return val", "def dict_item(dictionary, key):\n try:\n return dictionary.get(key, None)\n except AttributeError:\n # fail silently if something other than a dict is passed\n return None", "def first_entity_value(entities, entity):\n if entity not in entities:\n return None\n val = entities[entity][0]['value']\n if not val:\n return None\n return val['value'] if isinstance(val, dict) else val", "def get_value(dct, key):\n return dct.get(key)", "def get_item(dictionary, key):\n return dictionary.get(key)", "def get_item(dictionary, key):\n return dictionary.get(key)", "def get_item(dictionary, key):\n return dictionary.get(key)", "def get_item(dictionary, key):\n return dictionary.get(key)", "def dict_value(dict_object, dictkey):\n \n try:\n val = dict_object[dictkey]\n except: # Exception as ex:\n val = ''\n return val", "def getitem(value, key):\n try:\n return value[key]\n except Exception:\n return \"\"", "def _dictfetchone(self):\n data = self._dictfetchall()\n if data:\n return data[0]\n return {}", "def keyvalue(dict, key):\n try:\n return dict[key]\n except KeyError:\n return ''", "def get_value(self) -> Dict[str, any]:", "def get_random_value_from_dict(d: dict):\n return d[get_random_key_from_dict(d)]", "def find_value(dic, key):\n return dic[key]", "def get(self, key: str, default=None) -> Any:\n try:\n return self[key][0]\n except KeyError:\n return default", "def getfirst(self, key, default=None):\n \n values = self.getlist(key)\n return values[0] if values else default", "def get_value(self, query_dict, k): \n if k in query_dict:\n return query_dict[k]\n return ''", "def get_key(dictionary: dict, *args) -> Union[str, bool, dict]:\n data = reduce(lambda c, k: c.get(k, {}), args, dictionary)\n if data == {}:\n return \"\"\n return data", "def get(dict, var, default=None):\n\n if dict.has_key(var):\n return dict[var]\n else:\n return default", "def getRetKey(dictionary):\n retKey = \"\"\n try:\n if dictionary:\n retKey = dictionary.values()[0].keys()[0]\n except TypeError:\n logging.debug(\"type error\")\n\n return retKey", "def get_item(obj, key):\n val = None\n if obj and type(obj) == dict:\n val = obj.get(key)\n elif obj and hasattr(obj, key):\n val = getattr(obj, key)\n val = val or ''\n return val", "def _get(obj, name):\n try:\n # try to get value using dict's __getitem__ descriptor first\n return dict.__getitem__(obj, name)\n except TypeError:\n # if it's a dict, then preserve the TypeError\n if isinstance(obj, dict):\n raise\n # otherwise try one last time, relying on __getitem__ if any\n return obj[name]", "def GFID(myDict):\n return (myDict[next(iter(myDict))])", "def get_value(self, listOfDicts, key):\n for val in listOfDicts:\n if key in val:\n return val[key]", "def __getitem__(self, v):\r\n return self.unif.get(v, (v, None))[0]", "def value_from_dict_array(dict_array, key):\n for element in dict_array:\n if element[\"name\"] == key:\n return element[\"value\"]\n return \"\"", "def _get_default(ddict, key, default):\n if ddict is None or key not in ddict or ddict[key] is None:\n return default\n return ddict[key]", "def get_key_by_value(needvalue, mydict):\n return [key for key, value in mydict.iteritems() if value == needvalue][0]", "def value(\n self, key: _K = 0, default: t.Optional[object] = None\n ) -> t.Any:\n try:\n index = self.index(key)\n except (IndexError, KeyError):\n return default\n else:\n return self[index]", "def my(d,k):\n try:\n return d[k]\n except KeyError:\n return CONFIG_DEFAULTS[k]", "def get_from_dict(d, k):\n try:\n return reduce(dict.get, k, d)\n except TypeError:\n # Value not found.\n return None", "def get_value(key, dic, default_dic):\n\n v = dic.get(key)\n\n if v is None:\n if key in default_dic:\n v = default_dic.get(key)\n else:\n print_log_msg(\n 'ERROR', 'get_param', 'key not in default_dic', key\n )\n\n return v", "def _dig_first(*pairs: Tuple[Mapping[str, Setting], str], ignore_empty: bool = False) -> Setting:\n if not pairs:\n raise ValueError(\"pairs cannot be empty\")\n\n for dict_like, key in pairs:\n if key in dict_like:\n value = dict_like[key]\n\n if ignore_empty and value == \"\":\n continue\n\n return value\n\n last_key = pairs[-1][1]\n raise KeyError(last_key)", "def get_value(key, obj, default=missing):\n if isinstance(key, int):\n return _get_value_for_key(key, obj, default)\n return _get_value_for_keys(key.split('.'), obj, default)", "def dictionary_value_grabber(self, value, dic):\r\n self.coder=\"Used to grab a value in a dictionary\"\r\n for v in dic.values():\r\n if v==value:\r\n return value\r\n else:\r\n pass", "def get_element(d, path): # type: (Dict, Tuple) -> Any\n if len(path) == 0:\n raise ValueError('Path length cant be 0')\n elif len(path) == 1:\n return d.get(path[0])\n elif d.get(path[0]):\n return DictUtil.get_element(d[path[0]], path[1:])\n return None", "def get_from_first(key: Any, *getters: Sequence, default: Any = None) -> Any:\n for item in getters:\n if item and (\n isinstance(item, dict) and key in item\n or isinstance(key, numbers.Integral) and hasattr(item, \"__len__\") and 0 <= int(key) < len(item)\n ):\n return item[key]\n return default", "def get(self, key, default=None):\n try:\n val = self[key]\n except KeyError:\n return default\n if val == []:\n return default\n return val", "def _get_value(match_entry: Dict, path0: str) -> any:\n if path0 is None:\n current_el = match_entry\n else:\n path = path0.split('/')\n current_el = match_entry\n for p in path:\n if current_el is None:\n break\n current_el = current_el.get(p)\n return current_el", "def get_dict_attrib(in_dict, key, default=None):\n\n try:\n return in_dict[key]\n except KeyError:\n return default", "def get(key):\n return current().values[key]", "def get_value_from_object(obj, key):\n if is_dict(obj):\n return obj.get(key)\n return getattr(obj, key, None)", "def force_dict(obj):\n if type(obj) == dict:\n return obj\n if type(obj) == list and len(obj) > 0 and type(obj[0]) == dict:\n return obj[0]\n return None", "def get(dd, kk, default=0):\n if kk in dd.keys():\n return dd[kk]\n else:\n return default", "def getValue(variable):\n if(dic.__contains__(variable)):\n return dic[variable]\n else:\n print(\"Variable : \"+str(variable) + \" ERROR KEY NOT IN DIC\")", "def get_value(value, key, client):\n if client is None:\n return value.__dict__[key]\n elif \"glance\" in str(client):\n return value[key]\n elif \"cinder\" in str(client):\n return value.__dict__[key]\n elif \"nova\" in str(client):\n return value.__dict__[key]", "def extract(dictionary: Any, key: Any) -> Union[Any, None]:\n if dictionary is None or not isinstance(dictionary, dict):\n return None\n return dictionary.get(key)", "def returner_base(self, key, dict):\n try:\n value = dict[key]\n except KeyError:\n value = dict[key.lower()]\n return value", "def uniform_get(sequence, index, default=None):\n\n if isinstance(sequence, abc.Mapping):\n return sequence.get(index, default)\n else:\n return sequence[index] if index < len(sequence) else default", "def get(self, key, default=None):", "def lookup(my_dict, my_key, default_value=None):\n if my_key in my_dict:\n return my_dict[my_key]\n else:\n return default_value", "def search_value(d, key, default=None):\n stack = [iter(d.items())]\n while stack:\n for k, v in stack[-1]:\n if isinstance(v, dict):\n stack.append(iter(v.items()))\n break\n elif k == key:\n return v\n else:\n stack.pop()\n return default", "def get(self, key: Any, default: Optional[Any] = None) -> Any:\n try:\n return self[key]\n except (KeyError, ValueError, IndexError):\n return default", "def get(self, key: str, default: t.Optional[object] = None) -> t.Any:\n try:\n index = self.__keys.index(str(key))\n except ValueError:\n return default\n if 0 <= index < len(self):\n return self._super_getitem_single(index)\n else:\n return default", "def get_value(self, key):\n pass", "def get_in(d, ks, default=None):\n *ks_, last = ks\n d_ = d\n\n for k in ks_:\n if type(d_) != dict or k not in d_:\n return default\n d_ = d_[k]\n\n if type(d_) == dict:\n return d_.get(last, default)\n\n return default", "def valuecall(key, atom_dict):\n if key not in atom_dict:\n return 0\n else:\n return atom_dict[key]", "def get_pydantic_error_value(data: dict, loc: tuple):\n try:\n obj = data\n for item in loc:\n obj = obj[item]\n except KeyError:\n return None\n else:\n return obj", "def _access_dict(self, d, key):\n try:\n # try to get access to the value by using the key\n value = d[key]\n return value\n except:\n # fail to access the value from the key\n # namely, the feature does not exist in the \n # feature dictionary of a specific apartment\n return None", "def get_value(obj, name):\n if isinstance(obj, dict):\n return obj.get(name)\n\n return getattr(obj, name, obj)", "def value(self, key):\n item = self.default(key)\n return self.__getSafeValue(key, item)", "def dict_key_from_item(dictionary, value):\n\n # iterate dictionary entries\n # if value equals parameter value, return key\n\n for item in dictionary.items():\n if item[1] == value:\n return item[0]", "def _get_value(o):\n return value(o, exception=False)", "def map_value(field):\n\n if is_map(field):\n return field\n return None", "def get_first_key_from_dict(ad_dict):\n\n try:\n key = list(ad_dict.keys())[0]\n\n return key\n except IndexError as exc:\n print(\"Cannot find key \" + str(exc), file=sys.stderr)\n return None", "def get(self, key: Union[Any, int]) -> Union[Any, Sequence[Any]]:\n try:\n return[key]\n except KeyError:\n return self.default_factory", "def get(aMap, key, default=None):\n\ti, k, v = get_slot(aMap, key, default)\n\treturn v", "def get(self, item, default=NO_DEFAULT):\n try:\n return util.annotate(self.dict_[item])\n except KeyError:\n if default is NO_DEFAULT:\n raise\n return default", "def __getitem__(self, key):\n return self.__values.__getitem__(key)", "def at_key(a_dict, key):\n\treturn a_dict[key]", "def __getitem__(self, key):\n return self.keyvaluepair_set.get(key=key).value", "def get(self, key: K) -> Optional[V]:\n return self.mget([key])[0]", "def get(aMap, key, default=None):\n\ti, k, v = get_slot(aMap, key, default=default)\n\treturn v", "def get(self, item, default=None):\n return self.as_dict().get(item, default)", "def first(data, key):\n for i in data:\n if key(i):\n return i\n return None", "def getValue(self, *args, **kwargs):\n if self._restriction_type == \"dict_key\":\n value = kwargs.pop(\"mapped\", False)\n if value:\n return self._restriction_arg[self.__str__()][\"value\"]\n return self", "def getValue(self,value):\n if value in self.header.keys():\n return self.header[value]\n if value in self.subintinfo.keys():\n return self.subintinfo[value][-1]\n if self.params is None:\n return None\n return self.params.get(value) #will return None if non-existent", "def get(self, key, default=None):\n return self.metadata_dict.get(key, default)", "def meta_value(request_object, dictkey):\n \n try:\n val = request_object.META[dictkey]\n except: # Exception as ex:\n val = ''\n return val", "def _strict_get_value(item: JsonObject, *keys: str) -> Any:\n try:\n val = item\n for key in keys:\n if isinstance(val, dict):\n val = val[key]\n else:\n raise KeyError(f\"Access path {keys} leads to a non-dict object.\")\n except KeyError:\n raise KeyError(f\"Key '{keys}' does not exist in all items. Try 'strict=False'.\")\n else:\n return val", "def _get_tagged_value(self, key):\n return self._tagged_values_dict[key]", "def get(aMap, key, default=None):\n\t#assigns variables to the same values we received from the get_slot function\n\t#index of the slot, the key and the value it found.\n\ti, k, v = get_slot(aMap, key, default=default)\n\t#but all we care about is the value, so that's all we return\n\t#this is _basically_ the same thing as get_slot, but since most people\n\t#only care about the value from this kind of function, that's all we return\n\treturn v", "def get_value(o, object_name, value_name, value_type):\n try:\n return o[value_name]\n except KeyError:\n l.warning(\"The field {} doesn't exists in {}: {}\".format(value_name, object_name, ujson.dumps(o)))\n return value_type()", "def get_value(self, key: str) -> Any:\r\n if key is None:\r\n return self.data\r\n try:\r\n return self.data[key]\r\n except KeyError:\r\n return None", "def __getitem__(self, key: Union[Tuple[str, T], str]) -> Union[str, T]:\n if isinstance(key, tuple):\n return self.get(key[0], default=key[1])\n else:\n return self.get(key)", "def getValue(name, default=None):", "def return_xml_dict_entry_value(self,dict_entry, dict_entry_position):\n\tvalue = self.subtitle_dict[dict_entry][dict_entry_position]\n\treturn value[0]", "def first(self, key):\n # Look through the JSON cache\n for name, value in self._json_cache:\n if key == name:\n return value\n\n # Exhaustion\n else:\n return None", "def get(self, key, default=None):\n try:\n return self.__getitem__(key)\n except ValueError:\n if default is not None:\n return default\n else:\n raise", "def _single_getitem(self, key):\n return getattr(self._cpp_obj, self._getter)(key)", "def get_dict_as_tuple(d):\n for k, v in six.iteritems(d):\n return k, v\n return None", "def __getitem__(self, value):\n return self.d.get(value, 0)", "def when(x: Any, d: Dict) -> Dict:\n return d if x else {}", "def get_value(self, key):\n try:\n return self.map[key]\n except KeyError:\n raise KeyError('key is not in map')", "def key_from_val(dictionary: dict, value) -> str:\r\n\r\n val = None\r\n for key, val in dictionary.items():\r\n if val == value:\r\n return key\r\n raise KeyError(f\"No key found for value {value}\")" ]
[ "0.7501027", "0.7214503", "0.7051891", "0.70475656", "0.7027528", "0.69873375", "0.6776066", "0.67013735", "0.66397923", "0.66397923", "0.66397923", "0.66397923", "0.66311496", "0.6604605", "0.65671986", "0.6557712", "0.6530084", "0.6483239", "0.6474412", "0.642642", "0.6372216", "0.6334062", "0.6325784", "0.63173753", "0.6282186", "0.6244082", "0.6199477", "0.6194648", "0.6184051", "0.6152735", "0.6146838", "0.6142019", "0.61303526", "0.61183834", "0.6108903", "0.60987586", "0.6098744", "0.60982627", "0.60942626", "0.6082064", "0.60761243", "0.6047169", "0.5999741", "0.5990274", "0.5983537", "0.5965642", "0.59640104", "0.59617424", "0.59584934", "0.5952708", "0.5949682", "0.5948234", "0.5939451", "0.59249985", "0.5917285", "0.5914935", "0.5906421", "0.5892998", "0.5892371", "0.58923477", "0.587462", "0.5868827", "0.5864639", "0.58571506", "0.5856276", "0.5837785", "0.5831505", "0.582825", "0.5827689", "0.5827486", "0.5811905", "0.5811743", "0.57914054", "0.57866424", "0.5783976", "0.57755035", "0.5771935", "0.5770509", "0.5767907", "0.5759672", "0.57583255", "0.5749722", "0.57487077", "0.5746333", "0.5733267", "0.57295656", "0.57216924", "0.57200617", "0.5717408", "0.5716544", "0.5708967", "0.5705261", "0.5705255", "0.57013303", "0.56979334", "0.5696266", "0.56908005", "0.5680685", "0.56787074", "0.5677667" ]
0.5995878
43
Initializer for the Symmetric Key Registration Client
def __init__(self, mqtt_state_based_provider): super(SymmetricKeyProvisioningDeviceClient, self).__init__(mqtt_state_based_provider) self._polling_machine = PollingMachine(mqtt_state_based_provider)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self):\n self.key = b'FSMF73R873YM187R'\n self.signer = AES.new(self.key, AES.MODE_EAX)\n self.verifier = AES.new(self.key, AES.MODE_EAX, nonce=self.signer.nonce)", "def _init_keys(self):\n\n basic_constraints = crypto.X509Extension('basicConstraints'.encode('ascii'), True,\n 'CA:TRUE, pathlen:0'.encode('ascii'))\n serial = self._get_serial()\n pkey = self._create_pkey(self.commonname, serial)\n self._create_cert(pkey, self.commonname, serial, [basic_constraints], expire=30*365)", "def __init__(self, server, key):\n self.server = server\n self.key = key", "def create_key ():", "def load_registration_key(self):\n key = self.getfilehttps(self.epo_url + \"reqseckey.bin\")\n reqseckey_p = int(key[2:130].hex(),16)\n reqseckey_q = int(key[132:152].hex(),16)\n reqseckey_g = int(key[154:282].hex(),16)\n reqseckey_pub = int(key[284:412].hex(),16)\n reqseckey_priv = int(key[415:435].hex(),16)\n dsa_key = (reqseckey_pub, reqseckey_g, reqseckey_p, reqseckey_q, reqseckey_priv)\n self.regkey = dsa_key", "def __init__(self, key, initial_prng):\n self.cipher = key\n self.prng = initial_prng\n self.nonce = None", "def __init__(self, key_id: str, user: str, password: str):\n\n self.key_id = key_id\n self.user = user\n self.password = password\n self.con_strategy = \"unknown\"\n self.session = requests.Session()\n self.session.auth = (user, password)\n self.__fields = None\n if self.key_id == \"localhost\":\n self.local_ip_list = \"127.0.0.1\"\n self.local_ip = \"127.0.0.1\"\n self.port = \"52199\"\n self.con_strategy = \"local\"", "def __init__(self):\n try:\n context = ssl.create_default_context(\n purpose=ssl.Purpose.CLIENT_AUTH)\n context.options |= ssl.OP_NO_SSLv2\n context.options |= ssl.OP_NO_SSLv3\n context.options |= ssl.OP_NO_TLSv1\n context.options |= ssl.OP_NO_TLSv1_1\n context.options |= ssl.OP_NO_COMPRESSION\n context.verify_mode = ssl.CERT_REQUIRED\n # TODO do not use static configuration parameters\n context.load_verify_locations(cafile='/sbin/rpcsd/root.cert.pem')\n context.load_cert_chain(certfile='/sbin/rpcsd/gaps.pem')\n context.set_ciphers('AES128-SHA256')\n RPCS.context = context\n except FileNotFoundError:\n # If we can't set up TLS context, log error and exit\n LOG.error(\"Could not setup TLS context: certificate file(s) \"\n \"not present in the correct directory\")\n exit(1)", "def __init__( self, masterKey ):\n\n assert len(masterKey) == const.MASTER_KEY_SIZE\n\n checkKeys()\n\n # The random name is used to recognize previously issued tickets.\n #self.keyName = mycrypto.weak_random(NAME_LENGTH)\n\n # Initialization vector for AES-CBC.\n self.IV = mycrypto.strong_random(IV_LENGTH)\n\n # The server's actual (encrypted) protocol state.\n self.state = ProtocolState(masterKey)\n\n # AES and HMAC key to protect the ticket.\n self.symmTicketKey = AESKey\n self.hmacTicketKey = HMACKey", "def __init__(self, uid, key, initial_prng):\n self.uid = uid\n self.key = key\n Crypto1.__init__(self, key, initial_prng)", "def __init__(__self__,\n resource_name: str,\n args: CryptoKeyArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(self):\n self._keypair = RSA.generate(2048)\n self.public_key = self._keypair.publickey().exportKey()", "def generate_key(self):\n\n self.key = Fernet.generate_key()\n self.cryptor = Fernet(self.key)", "def __init__(self, key: bytearray):\n self.__key = key\n self.__KSA(bytearray([i for i in range(256)]))", "def __init__(self):\n self.__client = Client(verify_ssl_cert=True)\n self.__headers = {'Content-Type': 'application/json'}\n self.login()", "def __init__(self):\n self.charm_config = hookenv.config()\n self.kv = unitdata.kv()\n if not self.synapse_signing_key_file:\n self.synapse_signing_key_file = \"{}/{}.signing.key\".format(\n self.synapse_conf_dir, self.get_server_name()\n )", "def __init__(self):\n publicKeyFileName = \"serverPublicKey\"\n privateKeyFileName = \"serverPrivateKey.pem\"\n try:\n f = open(privateKeyFileName, 'rb')\n self.keys = RSA.importKey(f.read())\n except:\n self.keys = RSA.generate(1024)\n self.publickey = self.keys.publickey()\n # export public and private keys\n privHandle = open(privateKeyFileName, 'wb')\n privHandle.write(self.keys.exportKey('PEM'))\n privHandle.close()\n \n pubHandle = open(publicKeyFileName, 'wb')\n pubHandle.write(self.keys.publickey().exportKey())\n pubHandle.close()\n self.publickey = self.keys.publickey()", "def init_connection(session):\n user = session.user\n sock = session.sock\n rnd = b64encode(get_random_bytes(config.SECURE_CHANNEL_KEY_SIZE_BYTES))\n common.send_msg(sock, {\n kk.typ: kk.init_conn,\n kk.user: user,\n kk.nonce: rnd.decode()\n })\n resp = common.recv_message(sock)\n if resp[kk.typ] != kk.init_key:\n print('Big bad happen.')\n exit(1)\n if messaging.common.check_msg_sig(session, resp, extra=rnd) != True:\n print('Invalid server signature while initiating connection!')\n exit(2)\n key = messaging.common.pkc_decrypt(\n b64decode(resp[kk.key]), session.encryption_key)\n session.symkey = key", "def __init__(self):\n self._api_key = os.environ.get('IDCF_DNS_API_KEY')\n self._secret_key = os.environ.get('IDCF_DNS_SECRET_KEY')", "def __init__(__self__, *,\n identity_client_id: Optional[str] = None,\n key_name: Optional[str] = None,\n key_vault_uri: Optional[str] = None,\n key_version: Optional[str] = None):\n if identity_client_id is not None:\n pulumi.set(__self__, \"identity_client_id\", identity_client_id)\n if key_name is not None:\n pulumi.set(__self__, \"key_name\", key_name)\n if key_vault_uri is not None:\n pulumi.set(__self__, \"key_vault_uri\", key_vault_uri)\n if key_version is not None:\n pulumi.set(__self__, \"key_version\", key_version)", "def __init__(self):\n self.public_key = None\n self._private_key = None", "def generate_symmetric_key():\n return Fernet.generate_key()", "def test_create_key():\n\n assert symmetric.create_key() != \"\"", "def __init__(self, key):\r\n self._key = key\r\n self._authenticated = Deferred()", "def __init__(self, session=None, key=None, salt=b\"\", variables_to_sign=None):\n super().__init__() # Yes, I know that this currently doesn't do anything.\n self.session = session\n self.key = key or Session.SECRET\n self.salt = salt\n self.variables_to_sign = variables_to_sign or []\n assert \"_signature\" not in self.variables_to_sign", "def __init__(self,\n master_secret=None,\n private_key=None,\n private_key_file=None,\n public_key=None,\n public_key_file=None,\n algorithm='HS256',\n expiration_delta=datetime.timedelta(hours=6),\n leeway=0,\n verify_expiration=True,\n issuer=None,\n auth_header_prefix='JWT',\n userid_claim='sub'\n ):\n self.master_secret = master_secret\n self.private_key = private_key\n self.private_key_file = private_key_file\n self.public_key = public_key\n self.public_key_file = public_key_file\n self.algorithm = algorithm\n self.expiration_delta = expiration_delta\n self.leeway = leeway\n self.verify_expiration = verify_expiration\n self.issuer = issuer\n self.auth_header_prefix = auth_header_prefix\n self.userid_claim = userid_claim", "def __init__(self, pubkey=None):", "def provider_init(self) -> str:\n return self.context.get(\"/ckks/provider/init\", None,\n \"CKKS:: failed init crypto\"\n )", "def initialize(self):\r\n if self.key_gen is None:\r\n self.key = random_string(self.key_len)\r\n else:\r\n self.key = self.key_gen()\r\n return self.key", "def __init__(self, username, password):\n self.username = username\n self.password = password\n self.privkey = None\n\n # sets self.privkey\n self.__set_or_create_key_if_not_exist()", "def __init__(self, key = None):\n self.key = key\n self.response_format = 'json'\n \n if self.key is None:\n raise NoAPIKeyException('Warning: Missing API Key. Please visit ' + API_SIGNUP_PAGE + ' to register for a key.')", "def __init__(self, cust_key):\n\n # Call the base class constructor to pass in the base URL\n super().__init__(base_url=\"https://s-platform.api.opendns.com/1.0\")\n\n # Store the API key for use as a query parameters later\n self.auth_params = {\"customerKey\": cust_key}", "def __init__(self, basekey=\"\"):\n self.basekey = basekey", "def initialize_client():\n logging.info('Initializing Sendgrid provider')\n sendgrid_authentication, sendgrid_username = get_provider_credentials('sendgrid') \n sendgrid_provider = SendGridProvider(sendgrid_authentication, sendgrid_username)\n\n logging.info('Initializing Mailgun provider')\n mailgun_authentication, mailgun_domain = get_provider_credentials('mailgun')\n mailgun_provider = MailGunProvider(mailgun_authentication, mailgun_domain)\n\n logging.info('Registering providers')\n client.register_provider(sendgrid_provider, 10)\n client.register_provider(mailgun_provider, 20)", "def init(x_in):\n global public_keys, secret_keys, x\n x = func.get_bits(x_in)\n\n public_keys, secret_keys = [], []\n\n elgamal.init_g_p_q()\n for i in range(3):\n create_keys(i)", "def __init__(self, negotiate_flags, exported_session_key, source=\"client\"):\n self.negotiate_flags = negotiate_flags\n self.exported_session_key = exported_session_key\n self.outgoing_seq_num = 0\n self.incoming_seq_num = 0\n self._source = source\n self._client_sealing_key = compkeys.get_seal_key(self.negotiate_flags, exported_session_key,\n SignSealConstants.CLIENT_SEALING)\n self._server_sealing_key = compkeys.get_seal_key(self.negotiate_flags, exported_session_key,\n SignSealConstants.SERVER_SEALING)\n\n self.outgoing_handle = None\n self.incoming_handle = None\n self.reset_rc4_state(True)\n self.reset_rc4_state(False)\n\n if source == \"client\":\n self.outgoing_signing_key = compkeys.get_sign_key(exported_session_key, SignSealConstants.CLIENT_SIGNING)\n self.incoming_signing_key = compkeys.get_sign_key(exported_session_key, SignSealConstants.SERVER_SIGNING)\n elif source == \"server\":\n self.outgoing_signing_key = compkeys.get_sign_key(exported_session_key, SignSealConstants.SERVER_SIGNING)\n self.incoming_signing_key = compkeys.get_sign_key(exported_session_key, SignSealConstants.CLIENT_SIGNING)\n else:\n raise ValueError(\"Invalid source parameter %s, must be client \"\n \"or server\" % source)", "def __init__(self, p=P_NIST, g=G_NIST):\n dh_a = self._random_int(p)\n dh_A = gmp.powmod(g, dh_a, p)\n\n self._dh_p = p\n self._dh_g = g\n self._secret_key = dh_a\n self._public_key = dh_A", "def sendInit(self, connID, key):\r\n assert len(connID) == 16\r\n assert len(key) == 16\r\n\r\n self.sendString(connID + key)", "def staticstatic(self, private_key, public_key):", "def __init__(self, key, data):\n hmac_result = hmac.new(key, data, hashlib.sha512).digest()\n\n self.private_key = PrivateKey(hmac_result[0:32])\n self.chain_code = hmac_result[32:]", "def __init__(self):\n super(TLS12AuthenticationSuite, self).__init__()\n self._protocol = ssl.PROTOCOL_TLSv1_2\n self._ciphers = ':'.join((\n 'AES128-SHA256',\n 'AES256-SHA256',\n 'DH-DSS-AES256-SHA256',\n 'DH-DSS-AES128-SHA256',\n 'DH-RSA-AES128-SHA256',\n 'DHE-DSS-AES128-SHA256',\n 'DHE-RSA-AES128-SHA256',\n 'DH-DSS-AES256-SHA256',\n 'DH-RSA-AES256-SHA256',\n 'DHE-DSS-AES256-SHA256',\n 'DHE-RSA-AES256-SHA256',\n 'ECDH-ECDSA-AES128-SHA256',\n 'ECDH-ECDSA-AES256-SHA256',\n 'ECDHE-ECDSA-AES128-SHA256',\n 'ECDHE-ECDSA-AES256-SHA384',\n 'ECDH-RSA-AES128-SHA256',\n 'ECDH-RSA-AES256-SHA384',\n 'ECDHE-RSA-AES128-SHA256',\n 'ECDHE-RSA-AES256-SHA384',\n 'ECDHE-ECDSA-AES128-GCM-SHA256',\n 'ECDHE-ECDSA-AES256-GCM-SHA384',\n 'ECDHE-ECDSA-AES128-SHA256',\n 'ECDHE-ECDSA-AES256-SHA384',\n ))", "def generate_keys(self):\n\n # TODO: Store keys encrypted\n rsa1 = RsaPrivateKey.Generate()\n self.sign_private = str(rsa1)\n self.sign_public = str(rsa1.public_key)\n\n rsa2 = RsaPrivateKey.Generate()\n self.crypt_private = str(rsa2)\n self.crypt_public = str(rsa2.public_key)", "def __init__(self, key, secret, token=None, token_secret=None, subdomain=None):\n\n self.key = key\n self.secret = secret\n self.token = token\n self.token_secret = token_secret\n self.subdomain = subdomain or self.DOMAIN", "def __init__(self, sk=None, n=None, h=None):\r\n if sk:\r\n self.n = sk.n\r\n self.h = sk.h\r\n elif n and h:\r\n self.n = n\r\n self.h = h\r\n else:\r\n raise Exception(\"Public Key construction failed: insufficient/wrong arguments\")\r\n\r\n self.signature_bound = Params[self.n][\"sig_bound\"]\r\n self.sig_bytelen = Params[self.n][\"sig_bytelen\"]", "def __init__(self, accesskey):\n self.accesskey = accesskey\n self.UpdateFromServer()", "def private_key(self):", "def __init__(self):\n # Initialize key variables\n self.reserved = '_SYSTEM_RESERVED_'\n self.config = configuration.Config()", "def __init__(self, private_key):\n self._sk = ed25519.Ed25519PrivateKey.from_private_bytes(private_key.bytes)", "def setup(self):\n self.build_serverkeyhash()\n self.build_agent_pubkey()\n self.load_registration_key()", "def __init__(self, key_info):\n if (key_info.type != client_pb2.KeyInfo.ECDSA):\n raise error.UnsupportedAlgorithmError(\n \"Expected ECDSA key, but got key type %d\" % key_info.type)\n\n # Will raise a PemError on invalid encoding\n self.__der, _ = pem.from_pem(key_info.pem_key, self.__READ_MARKERS)\n try:\n self.__key = ecdsa.VerifyingKey.from_der(self.__der)\n except ecdsa.der.UnexpectedDER as e:\n raise error.EncodingError(e)", "def create_keys(self):\n crypto_tool = CryptoTools()\n # creating RSA keys for the signer user\n public_key, private_key = crypto_tool.create_key_with_entropy()\n self.priv_key = crypto_tool.get_pem_format(private_key).decode(\"utf-8\")\n self.pub_key = crypto_tool.get_pem_format(public_key).decode(\"utf-8\")", "def __init__(__self__, *,\n client_id: pulumi.Input[str],\n secret: pulumi.Input[str]):\n pulumi.set(__self__, \"client_id\", client_id)\n pulumi.set(__self__, \"secret\", secret)", "def __init__(self):\n #print (\"Object created\")\n self.apikey='acc_4fc1a435b3188b5'\n self.secret = 'f49c4be14a048d5de7e7f6c564b52022'\n self.fileToIdMap = {}", "def generate_keystream(self):", "def __init__(self, key):\n self.key = key\n self.BLOCK_SIZE = 16", "def __init__(self, key):\n self.bs = 16\n self.key = hashlib.sha256(key.encode()).digest()", "def __init__(self, auth_key, auth_secret):\n\n self._auth_key = auth_key\n self._auth_secret = auth_secret", "def __init__(self, key_path, pass_path, password = '',\n encoding = 'utf-8'):\n self.encoding = encoding\n self.key_path = key_path\n self.pass_path = pass_path\n self.password = password", "def __init__(self, key=None):\n\n self.key = key\n self.cryptor = None\n self.file_ext_targets = ['txt']", "def __init__(self, **options):\n\n super().__init__(**options)\n\n self._private_key = None\n self._public_key = None\n\n self._load_keys(**options)", "def test_init(self):\n DummyCryptographicObject()", "def genKey(self, otherKey):\n self.sharedSecret = self.genSecret(self.privateKey, otherKey)\n #print(\"Shared secret:\")\n #print(self.sharedSecret)\n s = hashlib.sha256()\n s.update(bytes(str(self.sharedSecret).encode()))\n self.key = s.digest()", "def create_server_certs_enc():\n global server_keystore, config\n\n same_enc_sign_cert = config[\"config\"][\"same_enc_sign_cert\"]\n if same_enc_sign_cert:\n dn = \"/CN=server certificate RSA\"\n else:\n dn = \"/CN=server certificate encryption RSA\"\n key_pair_rsa = create_csr(dn)\n server_keystore[\"key\"] = key_pair_rsa[\"key\"]\n san = [f'URI.1 = {uuid.uuid4().urn}']\n server_keystore[\"crt\"] = sign_csr(key_pair_rsa[\"pub\"], dn, san)", "def __init__(self, globalKey, publicKey, resourceName, **rest):\n super(SshKey, self).__init__({\n \"globalKey\": globalKey,\n \"publicKey\": publicKey,\n \"resourceName\": resourceName,\n }, **rest)", "def __init__(self, config, **kwargs):\n validate_config(config, signer=kwargs.get('signer'))\n if 'signer' in kwargs:\n signer = kwargs['signer']\n else:\n signer = Signer(\n tenancy=config[\"tenancy\"],\n user=config[\"user\"],\n fingerprint=config[\"fingerprint\"],\n private_key_file_location=config.get(\"key_file\"),\n pass_phrase=get_config_value_or_default(config, \"pass_phrase\"),\n private_key_content=config.get(\"key_content\")\n )\n\n base_client_init_kwargs = {\n 'regional_client': True,\n 'service_endpoint': kwargs.get('service_endpoint'),\n 'timeout': kwargs.get('timeout'),\n 'base_path': '/20160918',\n 'skip_deserialization': kwargs.get('skip_deserialization', False)\n }\n self.base_client = BaseClient(\"identity\", config, signer, identity_type_mapping, **base_client_init_kwargs)\n self.retry_strategy = kwargs.get('retry_strategy')", "def genKeys():\r\n (pub, priv) = rsa.newkeys(256)\r\n context = {\r\n 'pub': pub,\r\n 'priv': priv\r\n }\r\n return context", "def gen_heat_client(self):\n\n print \"\\t* Generating heat client\"\n # request a new auth token from keystone\n keystone = ksclient.Client(auth_url=self.auth_url,\n username=self.username,\n password=self.password,\n tenant_name=self.tenant_name,\n region_name=self.region_name)\n auth_token = keystone.auth_token\n heat_url = 'http://%s:8004/v1/%s' % (self.ip, self.tenant_id)\n\n # instantiate client\n self.heatclient = hClient('1', endpoint=heat_url, token=auth_token)", "def init():\n\n return \"Welcome to SIX SIGMA, this api is only available to SIX SIGMA developers\"", "def __init__(self):\n\n self.host=\"localhost\"\n \"\"\"\n :annotation = \"localhost\":\n defaults to \"localhost\". At this time MumbleClient is ipv4 only\n \"\"\"\n\n self.port=64738\n self.nickname=\"MumblePythonBot\"\n self.SSLOptions=CertificateOptions()\n self.password=None", "def __init__(self, user, password, device):\n self.user = user\n self.password = password\n self.device = device\n self.session = requests.session()\n\n # Hard-coded auth seed\n seed = \"oZ7QE6LcLJp6fiWzdqZc\"\n\n # Get auth tokens\n sha1p = sha.new(user.lower() + password + seed)\n sha1p = sha1p.hexdigest()\n\n auth_server = \"vera-us-oem-autha11.mios.com\"\n\n url = \"https://%s/autha/auth/username/%s?SHA1Password=%s&PK_Oem=1\" % \\\n (auth_server, user.lower(), sha1p)\n\n response = self.session.get(url).json()\n\n self.server_account = response[\"Server_Account\"]\n self.auth_token = response[\"Identity\"]\n self.auth_sig = response[\"IdentitySignature\"]\n\n # Get account number\n account_info = json.loads(base64.b64decode(self.auth_token))\n pk_account = account_info[\"PK_Account\"]\n sys.stderr.write(\"Account number: %s\\n\" % pk_account)\n\n # Get session token for server account\n session_token = self.get_session_token(self.server_account)\n\n # Get devices\n headers = { \"MMSSession\": session_token }\n url = \"https://%s/account/account/account/%s/devices\" % \\\n (self.server_account, str(pk_account))\n devices = self.session.get(url, headers=headers).json()\n\n # Work out server device\n server_device = None\n for i in devices[\"Devices\"]:\n if i[\"PK_Device\"] == device:\n server_device = i[\"Server_Device\"]\n if server_device == None:\n raise RuntimeError, \"Device %s not known.\\n\" % device\n \n sys.stderr.write(\"Server device: %s\\n\" % server_device)\n\n # Get session token on server_device\n session_token = self.get_session_token(server_device)\n\n # Get server_relay\n headers = { \"MMSSession\": session_token }\n\n url = \"https://\" + server_device + \"/device/device/device/\" + \\\n str(device)\n\n relay_info = self.session.get(url, headers=headers).json()\n\n self.relay = relay_info[\"Server_Relay\"]\n\n sys.stderr.write(\"Server relay: %s\\n\" % self.relay)\n\n # Get session token on server_relay\n self.session_token = self.get_session_token(self.relay)\n\n Vera.__init__(self)\n\n sys.stderr.write(\"Connected to remote device.\\n\")", "def __init__(self, alg, key):\n self.alg = alg\n self.key = key", "def __init__(self, assoc_handle, signed, invalidate_handle=None):\n self.assoc_handle = assoc_handle\n self.signed = signed\n self.invalidate_handle = invalidate_handle\n self.namespace = OPENID2_NS", "def __init__(__self__,\n resource_name: str,\n args: ServerCertificateArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(self) -> None:\n self._public_id = 'daf1fbca87e94c9db377c98570e32ece'\n self._secret_id = '1a674398d1bb44859ccaa4488df1aaa9'\n self._redirect_uri = 'https://pass-post.netlify.app'", "def __init__ (self, key = None, password = None):\n self.KEY = key or 'testapi'\n self.PASSWORD = password or 'testpass'\n self.URL = 'https://api.internet.bs/'\n # assume we use test credential if none were provided\n if not key or not password:\n self.URL = 'https://testapi.internet.bs'", "def init_pki():\n global server_keystore\n\n if pki_is_persistent:\n if not Path(pki_dir).is_dir():\n create_pki()\n else:\n print(f'Do nothing, {pki_dir} already exists')\n else:\n if Path(pki_dir).is_dir():\n shutil.rmtree(pki_dir)\n create_pki()\n with open(server_key_files[\"ca\"]) as crt:\n server_keystore[\"ca\"] = crt.read()\n crt.close()", "def newKeyGenerate():\n generate()\n return '', 204", "def __init__(self, \r\n x509Fingerprint=None,\r\n checkResumedSession=False):\r\n\r\n self.x509Fingerprint = x509Fingerprint\r\n self.checkResumedSession = checkResumedSession", "def authenticate(self):\n # Receive public key from server\n message = self.receive()\n # Initialize RSA with public key of server\n self.secret.init_rsa(public_key=message)\n # Initialize AES\n self.secret.init_aes()\n # Encrypt AES key & nonce\n payload = self.secret.encrypt_rsa(self.secret.export_aes_key())\n # Send encrypted AES key & nonce pair to server\n self.send(payload)\n self.secret.ready = True", "def __init__(self, rsa_key):\r\n if isinstance(rsa_key, tuple):\r\n self.keypair = Crypto.PublicKey.RSA.construct(rsa_key)\r\n else:\r\n self._InitFromString(rsa_key)", "def __init__(\n self,\n key: Optional[OpenSSL.crypto.PKey] = None,\n cert: Optional[OpenSSL.crypto.X509] = None,\n ):\n self.key = key if key else new_RSA()\n self.cert = cert if cert else new_X509()\n\n # Creates CA.\n self.cert.set_pubkey(self.key)\n self.cert.add_extensions(\n [\n OpenSSL.crypto.X509Extension(b\"basicConstraints\", True, b\"CA:TRUE, pathlen:0\"),\n OpenSSL.crypto.X509Extension(b\"keyUsage\", True, b\"keyCertSign, cRLSign\"),\n OpenSSL.crypto.X509Extension(b\"subjectKeyIdentifier\", False, b\"hash\", subject=self.cert),\n ],\n )\n self.cert.sign(self.key, \"sha256\")", "def __init__(self, proxy_only = False):\n self.key_file = None\n self.cert_file = None\n self.ca_path = None\n self.key_pass = None\n\n path = os.getenv(\"X509_CERT_DIR\", None)\n if path and os.path.exists(path):\n self.ca_path = path\n\n if not self.ca_path:\n path = \"/etc/grid-security/certificates\"\n if os.path.exists(path):\n self.ca_path = path\n\n path = os.getenv(\"X509_USER_PROXY\", None)\n if path and os.path.exists(path):\n self.key_file = self.cert_file = path\n\n if not self.key_file:\n path = os.getenv(\"X509_USER_KEY\", None)\n if path and os.path.exists(path):\n self.key_file = path\n\n if not self.cert_file:\n path = os.getenv(\"X509_USER_CERT\", None)\n if path and os.path.exists(path):\n self.cert_file = path\n\n if not self.key_file:\n path = os.getenv(\"HOME\") + \"/.globus/userkey.pem\"\n if os.path.exists(path):\n self.key_file = path\n\n if not self.cert_file:\n path = os.getenv(\"HOME\") + \"/.globus/usercert.pem\"\n if os.path.exists(path):\n self.cert_file = path\n\n if not self.ca_path or not os.path.exists(self.ca_path):\n raise RuntimeError(\"no certificate directory found\")\n\n if not self.key_file or not os.path.exists(self.key_file):\n raise RuntimeError(\"no certificate private key file found\")\n\n if not self.cert_file or not os.path.exists(self.cert_file):\n raise RuntimeError(\"no certificate public key file found\")\n\n if not proxy_only and self.key_file != self.cert_file:\n self.key_pass = getpass(\"Password for %s: \" % self.key_file)", "def do_server(wrapping_key_public):\n secret = os.urandom(32)\n logging.info(f'secret: {hexlify(secret)}')\n\n ref_path = 'server-secret-for-reference.bin'\n logging.debug(f'creating {ref_path}')\n with open(ref_path, 'wb') as f:\n f.write(secret)\n\n # generate IV\n iv = os.urandom(12)\n logging.debug(f'iv: {hexlify(iv)}')\n\n # generate 256-bit AES encryption key\n ephemeral_key = os.urandom(32)\n logging.debug(f'ephemeral_key: {hexlify(ephemeral_key)}')\n\n # xor_mask = os.urandom(32)\n xor_mask = b'\\x00' * 32\n logging.debug(f'xor_mask: {hexlify(xor_mask)}')\n\n # xor with mask to get transportKey\n transport_key = bytes([ephemeral_key[i] ^ xor_mask[i] for i in range(32)])\n logging.debug(f'transport_key: {hexlify(transport_key)}')\n\n logging.debug(f'wrapping the transport key with the public RSA wrapping key')\n encrypted_transport_key = wrap(wrapping_key_public, transport_key)\n\n logging.debug(f'encrypting the secure secret with the AES ephermeral key')\n encrypted_secret, tag = encrypt(ephemeral_key, iv, secret)\n\n logging.debug(f'encrypted_secret: {hexlify(encrypted_secret)}')\n logging.debug(f'tag: {hexlify(tag)}')\n\n authorizationList = AuthorizationList()\n\n key_description = KeyDescription()\n key_description['keyFormat'] = KM_KEY_FORMAT_RAW\n key_description['keyParams'] = authorizationList\n\n secure_key_wrapper = SecureKeyWrapper()\n secure_key_wrapper['version'] = 0\n secure_key_wrapper['encryptedTransportKey'] = encrypted_transport_key\n secure_key_wrapper['initializationVector'] = iv\n secure_key_wrapper['keyDescription'] = key_description\n secure_key_wrapper['encryptedKey'] = encrypted_secret\n secure_key_wrapper['tag'] = tag\n\n encoded_secure_key_wrapper = encode_secure_key_wrapper(secure_key_wrapper)\n\n return encoded_secure_key_wrapper, xor_mask", "def __init__(__self__, *,\n kms_encryption_config: pulumi.Input['FhirDatastoreKmsEncryptionConfigArgs']):\n pulumi.set(__self__, \"kms_encryption_config\", kms_encryption_config)", "def __init__(self):\n self.host = CONF.zvm.zvm_xcat_server\n self.port = 443\n self.conn = HTTPSClientAuthConnection(self.host, self.port,\n CONF.zvm.zvm_xcat_ca_file,\n timeout=CONF.zvm.zvm_xcat_connection_timeout)", "def __init__(self, host = '', port = IMAP4_TLS_PORT,\r\n username=None, password=None,\r\n certChain=None, privateKey=None,\r\n checker=None,\r\n settings=None):\r\n\r\n ClientHelper.__init__(self,\r\n username, password,\r\n certChain, privateKey,\r\n checker,\r\n settings)\r\n\r\n IMAP4.__init__(self, host, port)", "def __init__(__self__, *,\n certificates: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n keys: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n secrets: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n storage: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):\n if certificates is not None:\n pulumi.set(__self__, \"certificates\", certificates)\n if keys is not None:\n pulumi.set(__self__, \"keys\", keys)\n if secrets is not None:\n pulumi.set(__self__, \"secrets\", secrets)\n if storage is not None:\n pulumi.set(__self__, \"storage\", storage)", "def __init__(self, host, access_key, secret_key):\n self._host = host\n self._access_key = access_key\n self._secret_key = secret_key", "def __init__(self, host, access_key, secret_key):\n self._host = host\n self._access_key = access_key\n self._secret_key = secret_key", "def __init__(self, host, access_key, secret_key):\n self._host = host\n self._access_key = access_key\n self._secret_key = secret_key", "def __init__(self):\n _hypre.HypreIdentity_swiginit(self, _hypre.new_HypreIdentity())", "def authenticator():", "def __init__(self, generator=2, group=17, keyLength=540):\n min_keyLength = 180\n\n default_generator = 2\n valid_generators = [2, 3, 5, 7]\n\n # Sanity check fors generator and keyLength\n if(generator not in valid_generators):\n print(\"Error: Invalid generator. Using default.\")\n self.generator = default_generator\n else:\n self.generator = generator\n\n if(keyLength < min_keyLength):\n print(\"Error: keyLength is too small. Setting to minimum.\")\n self.keyLength = min_keyLength\n else:\n self.keyLength = keyLength\n\n self.prime = self.getPrime(group)\n\n self.privateKey = self.genPrivateKey(keyLength)\n self.publicKey = self.genPublicKey()", "def __init__(self):\n self._key = ''", "def get_symetric_key():\n\treturn os.urandom(32)", "def connect_syndicate( username=CONFIG.SYNDICATE_OPENCLOUD_USER, password=CONFIG.SYNDICATE_OPENCLOUD_PASSWORD, user_pkey_pem=CONFIG.SYNDICATE_OPENCLOUD_PKEY ):\n debug = True \n if hasattr(CONFIG, \"DEBUG\"):\n debug = CONFIG.DEBUG\n \n client = syntool.Client( username, CONFIG.SYNDICATE_SMI_URL,\n password=password,\n user_pkey_pem=user_pkey_pem,\n debug=debug )\n\n return client", "def __init__(self, key=None):\n self._key = key or os.environ['HERE_API_KEY']", "def __init__(self, output_key: bytes, input_key: bytes) -> None:\n super().__init__(output_key, input_key)\n self.send_seqno = randrange(0x100000000, 0x1FFFFFFFF)", "def __init__(\n self, dev_id, local_key, protocol_version, enable_debug, on_connected, listener\n ):\n super().__init__()\n self.loop = asyncio.get_running_loop()\n self.set_logger(_LOGGER, dev_id, enable_debug)\n self.id = dev_id\n self.local_key = local_key.encode(\"latin1\")\n self.real_local_key = self.local_key\n self.dev_type = \"type_0a\"\n self.dps_to_request = {}\n\n if protocol_version:\n self.set_version(float(protocol_version))\n else:\n # make sure we call our set_version() and not a subclass since some of\n # them (such as BulbDevice) make connections when called\n TuyaProtocol.set_version(self, 3.1)\n\n self.cipher = AESCipher(self.local_key)\n self.seqno = 1\n self.transport = None\n self.listener = weakref.ref(listener)\n self.dispatcher = self._setup_dispatcher(enable_debug)\n self.on_connected = on_connected\n self.heartbeater = None\n self.dps_cache = {}\n self.local_nonce = b\"0123456789abcdef\" # not-so-random random key\n self.remote_nonce = b\"\"", "def __init__(__self__, *,\n key_ring_id: pulumi.Input[str],\n crypto_key_backend: Optional[pulumi.Input[str]] = None,\n crypto_key_id: Optional[pulumi.Input[str]] = None,\n destroy_scheduled_duration: Optional[pulumi.Input[str]] = None,\n import_only: Optional[pulumi.Input[bool]] = None,\n labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n location: Optional[pulumi.Input[str]] = None,\n next_rotation_time: Optional[pulumi.Input[str]] = None,\n project: Optional[pulumi.Input[str]] = None,\n purpose: Optional[pulumi.Input['CryptoKeyPurpose']] = None,\n rotation_period: Optional[pulumi.Input[str]] = None,\n skip_initial_version_creation: Optional[pulumi.Input[bool]] = None,\n version_template: Optional[pulumi.Input['CryptoKeyVersionTemplateArgs']] = None):\n pulumi.set(__self__, \"key_ring_id\", key_ring_id)\n if crypto_key_backend is not None:\n pulumi.set(__self__, \"crypto_key_backend\", crypto_key_backend)\n if crypto_key_id is not None:\n pulumi.set(__self__, \"crypto_key_id\", crypto_key_id)\n if destroy_scheduled_duration is not None:\n pulumi.set(__self__, \"destroy_scheduled_duration\", destroy_scheduled_duration)\n if import_only is not None:\n pulumi.set(__self__, \"import_only\", import_only)\n if labels is not None:\n pulumi.set(__self__, \"labels\", labels)\n if location is not None:\n pulumi.set(__self__, \"location\", location)\n if next_rotation_time is not None:\n pulumi.set(__self__, \"next_rotation_time\", next_rotation_time)\n if project is not None:\n pulumi.set(__self__, \"project\", project)\n if purpose is not None:\n pulumi.set(__self__, \"purpose\", purpose)\n if rotation_period is not None:\n pulumi.set(__self__, \"rotation_period\", rotation_period)\n if skip_initial_version_creation is not None:\n pulumi.set(__self__, \"skip_initial_version_creation\", skip_initial_version_creation)\n if version_template is not None:\n pulumi.set(__self__, \"version_template\", version_template)", "def __init__(__self__, *,\n primary_auth_key_hash: pulumi.Input[str],\n secondary_auth_key_hash: pulumi.Input[str]):\n pulumi.set(__self__, \"primary_auth_key_hash\", primary_auth_key_hash)\n pulumi.set(__self__, \"secondary_auth_key_hash\", secondary_auth_key_hash)" ]
[ "0.64860773", "0.6398895", "0.63749385", "0.62866133", "0.6212949", "0.6190224", "0.6162016", "0.61214364", "0.60898226", "0.60414624", "0.603934", "0.6026967", "0.6023296", "0.59877056", "0.59869146", "0.5985004", "0.5983398", "0.5977729", "0.5918868", "0.59038883", "0.58940583", "0.5825841", "0.5823577", "0.5813439", "0.5795364", "0.57895494", "0.5787684", "0.5768145", "0.5754658", "0.57424176", "0.57163054", "0.5711905", "0.5695762", "0.5692303", "0.5685414", "0.56764346", "0.56739795", "0.5672449", "0.56708765", "0.566442", "0.56630856", "0.566282", "0.56495667", "0.56418777", "0.5637171", "0.5631176", "0.56239814", "0.5623946", "0.5621603", "0.55998015", "0.55971056", "0.55877936", "0.55763656", "0.55709815", "0.55626935", "0.55527556", "0.5551439", "0.5550686", "0.5550031", "0.55495536", "0.5547617", "0.5535765", "0.5535372", "0.55277324", "0.55201304", "0.5518081", "0.5509284", "0.54916906", "0.5490949", "0.5489942", "0.548704", "0.54849637", "0.5482252", "0.54753613", "0.54746056", "0.54705495", "0.54683876", "0.5457509", "0.5451969", "0.54456896", "0.5442803", "0.5439905", "0.5429785", "0.5429316", "0.54263777", "0.5420488", "0.5419246", "0.5417694", "0.5417694", "0.5417694", "0.5406909", "0.54022014", "0.53957504", "0.5387877", "0.53869", "0.5382071", "0.53789544", "0.5377701", "0.5374501", "0.53630614", "0.53625095" ]
0.0
-1
Register the device with the provisioning service. This is a synchronous call, meaning that this function will not return until the registration process has completed successfully or the attempt has resulted in a failure. Before returning the client will also disconnect from the Hub. If a registration attempt is made while a previous registration is in progress it may throw an error.
def register(self): logger.info("Registering with Hub...") register_complete = Event() def on_register_complete(result=None, error=None): # This could be a failed/successful registration result from the HUB # or a error from polling machine. Response should be given appropriately if result is not None: if result.status == "assigned": logger.info("Successfully registered with Hub") else: # There be other statuses logger.error("Failed registering with Hub") if error is not None: # This can only happen when the polling machine runs into error logger.info(error) register_complete.set() self._polling_machine.register(callback=on_register_complete) register_complete.wait()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def register_device():\n payload = request.get_json()\n return _register_device(payload)", "def RegisterDeviceAndSendResponse(self, msg, username):\n device_id = self.GetUniqueParam('deviceid')\n if not device_id:\n return (400, 'Missing device identifier')\n\n token_info = self.server.RegisterDevice(\n device_id, msg.machine_id, msg.type, username)\n\n # Send back the reply.\n response = dm.DeviceManagementResponse()\n response.register_response.device_management_token = (\n token_info['device_token'])\n response.register_response.machine_name = token_info['machine_name']\n response.register_response.enrollment_type = token_info['enrollment_mode']\n\n return (200, response)", "def register_device(self, expiry: int) -> str:\n # pylint: disable=c0103\n sr = self._id_scope + \"%2Fregistrations%2F\" + self._device_id\n sig_no_encode = DeviceRegistration.compute_derived_symmetric_key(self._key, sr + \"\\n\" + str(expiry))\n sig_encoded = parse.quote(sig_no_encode, \"~()*!.'\")\n auth_string = \"SharedAccessSignature sr=\" + sr + \"&sig=\" + sig_encoded + \"&se=\" + str(expiry) + \"&skn=registration\"\n\n headers = {\n \"content-type\": \"application/json; charset=utf-8\",\n \"user-agent\": \"iot-central-client/1.0\",\n \"Accept\": \"*/*\",\n }\n\n if auth_string is not None:\n headers[\"authorization\"] = auth_string\n\n body = {\"registrationId\": self._device_id}\n\n uri = \"https://%s/%s/registrations/%s/register?api-version=%s\" % (\n self._dps_endpoint,\n self._id_scope,\n self._device_id,\n self._dps_api_version,\n )\n target = parse.urlparse(uri)\n\n self._logger.info(\"Connecting...\")\n self._logger.info(\"URL: \" + target.geturl())\n self._logger.info(\"body: \" + json.dumps(body))\n print(\"headers: \" + json.dumps(headers))\n\n response = self.__run_put_request_with_retry(target.geturl(), body, headers)\n\n data = None\n try:\n data = response.json()\n except Exception as e:\n err = \"ERROR: non JSON is received from \" + self._dps_endpoint + \" => \" + str(response) + \" .. message : \" + str(e)\n self._logger.error(err)\n raise DeviceRegistrationError(err)\n\n if \"errorCode\" in data:\n err = \"DPS => \" + str(data)\n self._logger.error(err)\n raise DeviceRegistrationError(err)\n\n time.sleep(1)\n return self._loop_assign(data[\"operationId\"], headers)", "def register(self):\n if self.hub.is_connected:\n if self._private_key is not None:\n raise SAMPClientError(\"Client already registered\")\n\n result = self.hub.register(self.hub.lockfile[\"samp.secret\"])\n\n if result[\"samp.self-id\"] == \"\":\n raise SAMPClientError(\n \"Registration failed - samp.self-id was not set by the hub.\"\n )\n\n if result[\"samp.private-key\"] == \"\":\n raise SAMPClientError(\n \"Registration failed - samp.private-key was not set by the hub.\"\n )\n\n self._public_id = result[\"samp.self-id\"]\n self._private_key = result[\"samp.private-key\"]\n self._hub_id = result[\"samp.hub-id\"]\n\n if self._callable:\n self._set_xmlrpc_callback()\n self._declare_subscriptions()\n\n if self._metadata != {}:\n self.declare_metadata()\n\n self._is_registered = True\n\n else:\n raise SAMPClientError(\n \"Unable to register to the SAMP Hub. Hub proxy not connected.\"\n )", "def registerDevice(self):\n\t\tr = req.post(\"http://localhost:9090/devices?id={}&sensors={}_{}&board={}\".format(\n\t\t\tBOARD_ID,\n\t\t\tSENSOR1,\n\t\t\tSENSOR2,\n\t\t\tBOARD\n\t\t))\n\t\tprint (\"[{}] Device Registered on Room Catalog\".format(\n\t\t\tint(time.time()),\n\t\t))", "def register():\n user_gender = request.form['user_gender']\n user_age = request.form['user_age']\n sensors = {\n 'accelerometer': request.form.get('accelerometer', False),\n 'ambient_temperature': request.form.get('ambient_temperature', False),\n 'gravity': request.form.get('gravity', False),\n 'gyroscope': request.form.get('gyroscope', False),\n 'light': request.form.get('light', False),\n 'linear_accelerometer': request.form.get('linear_accelerometer', False),\n 'magnetic_field': request.form.get('magnetic_field', False),\n 'orientation': request.form.get('orientation', False),\n 'pressure': request.form.get('pressure', False),\n 'proximity': request.form.get('proximity', False),\n 'relative_humidity': request.form.get('relative_humidity', False),\n 'rotation_vector': request.form.get('rotation_vector', False),\n 'temperature': request.form.get('temperature', False)\n }\n\n device, token = create_subject(user_gender, user_age, sensors)\n\n response = jsonify(status=\"Register Success\", message=\"Your device has been registered.\",\n device=device, token=token)\n\n response.status_code = 201\n\n return response", "async def _perform_register(self):\n data = {\"username\": self.user, \"password\": self.password}\n return await self._perform_request(\"register\", data, lambda r: r.text())", "def ProcessRegister(self, msg):\n policy = self.server.GetPolicies()\n # Check the auth token and device ID.\n auth = self.CheckGoogleLogin()\n if not auth:\n return (403, 'No authorization')\n\n if ('managed_users' not in policy):\n return (500, 'Error in config - no managed users')\n username = self.server.ResolveUser(auth)\n if ('*' not in policy['managed_users'] and\n username not in policy['managed_users']):\n return (403, 'Unmanaged')\n\n return self.RegisterDeviceAndSendResponse(msg, username)", "def RegisterDevice(self, device_id, machine_id, type, username):\n dmtoken_chars = []\n while len(dmtoken_chars) < 32:\n dmtoken_chars.append(random.choice('0123456789abcdef'))\n dmtoken = ''.join(dmtoken_chars)\n allowed_policy_types = {\n dm.DeviceRegisterRequest.BROWSER: [\n 'google/chrome/user',\n 'google/chrome/extension'\n ],\n dm.DeviceRegisterRequest.USER: [\n 'google/chromeos/user',\n 'google/chrome/extension'\n ],\n dm.DeviceRegisterRequest.DEVICE: [\n 'google/chromeos/device',\n 'google/chromeos/publicaccount',\n 'google/chrome/extension',\n 'google/chromeos/signinextension'\n ],\n dm.DeviceRegisterRequest.ANDROID_BROWSER: [\n 'google/android/user'\n ],\n dm.DeviceRegisterRequest.TT: ['google/chromeos/user',\n 'google/chrome/user'],\n }\n if machine_id in KIOSK_MACHINE_IDS:\n enrollment_mode = dm.DeviceRegisterResponse.RETAIL\n else:\n enrollment_mode = dm.DeviceRegisterResponse.ENTERPRISE\n self._registered_tokens[dmtoken] = {\n 'device_id': device_id,\n 'device_token': dmtoken,\n 'allowed_policy_types': allowed_policy_types[type],\n 'machine_name': 'chromeos-' + machine_id,\n 'machine_id': machine_id,\n 'enrollment_mode': enrollment_mode,\n 'username': username,\n }\n self.WriteClientState()\n return self._registered_tokens[dmtoken]", "def register_device(project_id, credentials, device_model_id, device_id):\n base_url = '/'.join([DEVICE_API_URL, 'projects', project_id, 'devices'])\n device_url = '/'.join([base_url, device_id])\n session = google.auth.transport.requests.AuthorizedSession(credentials)\n r = session.get(device_url)\n print(device_url, r.status_code)\n if r.status_code == 404:\n print('Registering....', end='', flush=True)\n r = session.post(base_url, data=json.dumps({\n 'id': device_id,\n 'model_id': device_model_id,\n 'client_type': 'SDK_LIBRARY'\n }))\n if r.status_code != 200:\n raise Exception('failed to register device: ' + r.text)\n print('\\rDevice registered.')", "def registerDevice(device, device_config):\n raise NotImplementedError(\"All inherited classes of DeviceRegisterer must implement registerDevice.\")", "def register_to_core(self):\n self.channel.basic_publish(exchange='', routing_key='peripheral_register', body=json.dumps({self.name: api}))", "def test_register_device(self):\n resp = self.client.post(reverse('ios-notifications-device-create'),\n {'token': self.device_token,\n 'service': self.service.id})\n\n self.assertEqual(resp.status_code, 201)\n self.assertTrue(isinstance(resp, JSONResponse))\n content = resp.content\n device_json = json.loads(content)\n self.assertEqual(device_json.get('model'), 'ios_notifications.device')", "def register_user(self):\n response = self.client.post(self.register_url, self.register_data, format='json')\n return response", "def register(self, voice=False):\n payload = {\"type\": \"register\", \"username\": self.username, \"voice\": voice}\n self._send_command(payload)", "def register(self, dbus_path, uuid, codec, capabilities):\n self._media_proxy.proxy.RegisterEndpoint(\n dbus_path,\n {\n \"UUID\": uuid,\n \"Codec\": Byte(codec),\n \"Capabilities\": Array(capabilities, signature=\"y\")\n })", "def register(self, device_token, alias=None, tags=None, badge=None):\n url = DEVICE_TOKEN_URL + device_token\n payload = {}\n if alias is not None:\n payload['alias'] = alias\n if tags is not None:\n payload['tags'] = tags\n if badge is not None:\n payload['badge'] = badge\n if payload:\n body = json.dumps(payload)\n content_type = 'application/json'\n else:\n body = ''\n content_type = None\n\n status, response = self._request('PUT', body, url, content_type)\n if not status in (200, 201):\n raise AirshipFailure(status, response)\n return status == 201", "def RegisterProduct(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def test_device_registration(self):\n sensor = self._get_dummy_sensor()\n responses = []\n sensor.set_response_callback(func=lambda response: responses.append(response))\n sensor.register()\n time.sleep(wait_seconds)\n for response in responses:\n print(json.loads(response.content.decode()))\n assert len(responses) > 0\n assert json.loads(responses[0].content.decode())['module_name'] == 'test_get_sensor'\n sensor.stopped.set()", "def attempt_to_register(self, message: Message):\n\t\tlogger.info(\"Attempting to register client.\")\n\n\t\tsuccessful_parse = re.match(r'\\/regi (.{1,30})', message.body)\n\n\t\tif successful_parse and self.validate_name(successful_parse.group(1)):\n\t\t\tlogger.info(\"Client successfully registered.\")\n\t\t\tself.registry.register(successful_parse.group(1), message.sender)\n\t\telse:\n\t\t\tlogger.info(\"Client not registered\") # Ignore the message", "def connect(self, register = False, on_success=None, on_fail=None):\n JabberClient.connect(self, register)\n if register:\n s = self.stream\n s.registration_callback = self.process_registration_form\n s.registration_error_callback = on_fail\n s.registration_success_callback = lambda: (self.disconnect(), on_success())", "def register():\n response = jsonify(msg='unauthorized'), 400\n if request.method == 'POST' and request.get_json():\n data = request.get_json()\n valid = validator.user_registration(data)\n\n if valid['status']:\n ecomap_user.register(data['first_name'],\n data['last_name'],\n data['email'],\n data['password'])\n msg = 'added %s %s' % (data['first_name'],\n data['last_name'])\n response = jsonify({'status_message': msg}), 201\n else:\n response = Response(json.dumps(valid),\n mimetype='application/json'), 400\n return response", "def ProcessCertBasedRegister(self, signed_msg):\n # Unwrap the request\n try:\n req = self.UnwrapCertificateBasedDeviceRegistrationData(\n signed_msg.signed_request)\n except (IOError):\n return(400, 'Invalid request')\n\n # TODO(drcrash): Check the certificate itself.\n if req.certificate_type != dm.CertificateBasedDeviceRegistrationData.\\\n ENTERPRISE_ENROLLMENT_CERTIFICATE:\n return(403, 'Invalid certificate type for registration')\n\n register_req = req.device_register_request\n username = None\n\n if (register_req.flavor == dm.DeviceRegisterRequest.\n FLAVOR_ENROLLMENT_ATTESTATION_USB_ENROLLMENT):\n enrollment_token = self.CheckEnrollmentToken()\n policy = self.server.GetPolicies()\n if not enrollment_token:\n return (401, 'Missing enrollment token.')\n\n if ((not policy['token_enrollment']) or\n (not policy['token_enrollment']['token']) or\n (not policy['token_enrollment']['username'])):\n return (500, 'Error in config - no token-based enrollment')\n if policy['token_enrollment']['token'] != enrollment_token:\n return (403, 'Invalid enrollment token')\n username = policy['token_enrollment']['username']\n\n return self.RegisterDeviceAndSendResponse(register_req, username)", "def registration(self):\n response = self.app.get(\"/registration\")\n self.assertTrue(response.status_code, 200)\"\"\"\"\"\"", "def async_register_device(self, device):\n self._entities.append(device)\n\n @asyncio.coroutine\n def async_shutdown(event):\n \"\"\"Stop ffmpeg process.\"\"\"\n yield from device.async_stop_ffmpeg()\n\n self.hass.bus.async_listen_once(\n EVENT_HOMEASSISTANT_STOP, async_shutdown)\n\n # start on startup\n if device.initial_state:\n @asyncio.coroutine\n def async_start(event):\n \"\"\"Start ffmpeg process.\"\"\"\n yield from device.async_start_ffmpeg()\n yield from device.async_update_ha_state()\n\n self.hass.bus.async_listen_once(\n EVENT_HOMEASSISTANT_START, async_start)", "def register():\n result = register_helper(User)\n return jsonify(result[0]), result[1]", "def users_register(self):\n content = request.form\n if not USERS_REGISTER_MANDATORY_FIELDS.issubset(content.keys()):\n self.logger.debug((messages.MISSING_FIELDS_ERROR % (USERS_REGISTER_MANDATORY_FIELDS - set(content.keys()))))\n return messages.ERROR_JSON % (\n messages.MISSING_FIELDS_ERROR % (USERS_REGISTER_MANDATORY_FIELDS - set(content.keys()))), 400\n photo = None\n if 'photo' in request.files:\n photo = Photo.from_bytes(request.files['photo'].stream)\n try:\n self.auth_server.user_register(email=content[\"email\"], fullname=content[\"fullname\"],\n phone_number=content[\"phone_number\"], photo=photo,\n plain_password=content[\"password\"])\n except UserAlreadyRegisteredError:\n self.logger.debug(messages.USER_ALREADY_REGISTERED_MESSAGE % content[\"email\"])\n return messages.ERROR_JSON % messages.USER_ALREADY_REGISTERED_MESSAGE % content[\"email\"], 400\n except InvalidRegisterFieldError as e:\n self.logger.debug(str(e))\n return messages.ERROR_JSON % str(e), 400\n return messages.SUCCESS_JSON, 200", "async def attempt_register(username, password, email, expect=201):\n\n response = await database_sync_to_async(UserCreateView.as_view())(factory.post('/register', {\"username\": username, \"password\": password, \"email\": email}))\n assert response.status_code == expect", "def registerServiceAsync(self, uri, identity=None, correlationId=None,\n options=None):\n if correlationId is None:\n correlationId = CorrelationId()\n if options is None:\n options = ServiceRegistrationOptions()\n\n _ExceptionUtil.raiseOnError(\n internals.blpapi_ProviderSession_registerServiceAsync(\n self.__handle,\n uri,\n get_handle(identity),\n get_handle(correlationId),\n get_handle(options)\n ))\n\n return correlationId", "def post(self):\n data = request.json\n\n register(data)\n return \"User Successfully Registered\", 200", "def _async_register(service, notifier):\n\n proc = multiprocessing.Process(\n name='Async Registration {}'.format(service.iden),\n target=_register, args=(service, notifier))\n proc.start()", "def register(self):\n self.logger.info(\"Registering agent %s\", \"/registry/\" + self._configuration[\"identification\"][\"uuid\"])\n self._coordination.update(\"/registry/\" + self._configuration[\"identification\"][\"uuid\"], self._configuration[\"identification\"])", "def register(self, model: CreationType) -> CreationType:\n path = self._get_path()\n try:\n data = self.session.post_resource(path, model.dump())\n data = data[self._individual_key] if self._individual_key else data\n self._check_experimental(data)\n return self.build(data)\n except NonRetryableException as e:\n raise ModuleRegistrationFailedException(model.__class__.__name__, e)", "def register_client(self, authorization, request_body):\n # type (Mapping[str, str], str) -> Response\n if not authorization:\n raise OIDCFederationError(\"Missing Authorization header in registration request.\")\n if not authorization.startswith(\"pop \"):\n raise OIDCFederationError(\"Wrong Authentication scheme in registration request.\")\n\n registration_request = FederationRegistrationRequest(**json.loads(request_body))\n registration_request.rm_blanks()\n\n try:\n registration_request.verify()\n except MessageException as e:\n raise OIDCFederationError(\"Error in client registration request: {}.\".format(str(e)))\n\n client_software_statement, client_signing_key = self._verify_signature_chain(\n registration_request[\"software_statements\"],\n registration_request[\"signing_key\"])\n\n request_signature = authorization[len(\"pop \"):]\n try:\n SignedHttpRequest(client_signing_key).verify(request_signature, body=request_body)\n except ValidationError as e:\n raise OIDCFederationError(\"Could not verify signature of client registration request.\")\n\n provider_software_statement = self._find_software_statement_for_federation(\n client_software_statement.jwt.headers[\"kid\"])\n matched_preferences = self.registration_verification.verify(\n self.provider.capabilities.to_dict(), provider_software_statement.msg,\n registration_request.to_dict(), client_software_statement.msg)\n\n # recreate client registration request only with the common capabilities\n registration_request.update(matched_preferences)\n result = self.provider.client_registration_setup(registration_request)\n if isinstance(result, Response):\n return result\n\n registration_response = FederationRegistrationResponse(**result.to_dict())\n registration_response[\n \"provider_software_statement\"] = provider_software_statement.jwt.pack()\n return Created(registration_response.to_json(), content=\"application/json\")", "def register_sensor(name):\n message = \"REGISTER:\" + name + '\\n'\n sock.sendall(message)\n return", "def put(self, request, registration_id):\n Device.objects.update_or_create(user=request.user,\n registration_id=registration_id)\n return Response(status=rest_framework.status.HTTP_201_CREATED)", "def registered_device(self, registered_device):\n\n self._registered_device = registered_device", "def registered_device(self, registered_device):\n\n self._registered_device = registered_device", "def registered_device(self, registered_device):\n\n self._registered_device = registered_device", "def register(self):\n raise NotImplementedError(\"Should have implemented this\")", "def register():\n register_form = RegisterForm() # We're only getting stuff from JSON now\n if not register_form.validate():\n return jsonify({\n \"errors\": register_form.errors.items(),\n \"success\": False,\n \"user\": None,\n \"sent_json\": request.json\n })\n\n user = User.create(username=request.json['username'], password=request.json['password'])\n\n g.user = user\n\n return jsonify({\n \"errors\": [],\n \"success\": True,\n \"user\": g.user.username,\n \"sent_json\": request.json\n })", "def register(client):\n rv = client.register('user1', 'default')\n assert 'You were successfully registered ' \\\n 'and can login now' in rv.data\n rv = client.register('user1', 'default')\n assert 'The username is already taken' in rv.data\n rv = client.register('', 'default')\n assert 'You have to enter a username' in rv.data\n rv = client.register('meh', '')\n assert 'You have to enter a password' in rv.data\n rv = client.register('meh', 'x', 'y')\n assert 'The two passwords do not match' in rv.data\n rv = client.register('meh', 'foo', email='broken')\n assert 'You have to enter a valid email address' in rv.data", "def _register(self):\n self._log(self.botlog, 'Registering as %s' % self.nickname)\n self._send('USER %s B C :%s' % (self.ident, self.realname))\n self._send('NICK %s' % self.nickname)", "def register():\n data = None\n response = {\n 'status': 400,\n 'error': 'Provide: firstname, lastname, email, othername, phone_number, and password as json.'\n }\n try:\n data = request.get_json()\n except:\n return jsonify(response), 400\n\n if not data:\n return jsonify(response), 400\n\n user_data = {\n 'firstname': data.get('firstname'),\n 'lastname': data.get('lastname'),\n 'email': data.get('email'),\n 'password': data.get('password'),\n 'othername': data.get('othername'),\n 'phone_number': data.get('phone_number'),\n 'is_admin': data.get('is_admin'),\n 'is_politician': data.get('is_politician')\n }\n valdiator_result = Validator.validate_user(user_data)\n if isinstance(valdiator_result, dict):\n return jsonify(valdiator_result), valdiator_result['status']\n if isinstance(valdiator_result, bool) and valdiator_result:\n result = politico.register_user(user_data)\n\n response = {}\n if result == 'User added':\n # return a response notifying the user that they registered successfully\n response['status'] = 201\n response['data'] = []\n response['data'].append({\n 'message': 'User registered successfully'\n })\n elif result == 'Other name taken':\n # return a response notifying the user that othername is taken\n response['status'] = 409\n response['error'] = 'The othername you chose is taken'\n elif result == 'User already exists':\n # notify the user that an account with the same email is already registered\n response['status'] = 409\n response['error'] = 'User already exists'\n return make_response(jsonify(response), response['status'])", "def do_dbus_register(self, connection, object_path):\n logger.debug('::dbus_register')\n Gio.Application.do_dbus_register(self, connection, object_path)\n failure = False\n try:\n connection.connect('closed', lambda i: self.quit())\n self._dbus_id = connection.register_object(\n object_path,\n DeskChangerDaemonDBusInterface.interfaces[0],\n self._handle_dbus_call,\n self._handle_dbus_get,\n self._handle_dbus_set\n )\n except TypeError:\n # TODO - Handle this failure correctly.\n failure = True\n except GLib.Error as e:\n logger.debug(e.args)\n finally:\n if self._dbus_id is None or self._dbus_id == 0:\n logger.critical('failed to register DBus name %s', object_path)\n if failure:\n logger.error('possibly unsupported version of glib')\n return False\n\n logger.info('successfully registered DBus name %s', object_path)\n return True", "def _register(self):\n logger.debug(\"Registering HFP profile on DBus...\")\n\n self._profile = Profile(\n system_bus=self._system_bus,\n dbus_path=HFP_DBUS_PROFILE_ENDPOINT)\n self._profile.on_connect = self._profile_on_connect\n self._profile.on_disconnect = self._profile_on_disconnect\n self._profile.on_release = self._profile_on_release\n\n self._profilemgr_proxy.proxy.RegisterProfile(\n HFP_DBUS_PROFILE_ENDPOINT,\n \"hfp-hf\",\n {\n \"Name\": \"Hands-Free\",\n \"Version\": UInt16(0x0107),\n \"Features\": UInt16(HF_SUPPORTED_FEATURES),\n \"RequireAuthentication\": True,\n \"RequireAuthorization\": False,\n })\n logger.debug(\"Registered HFP profile on DBus.\")", "def test_register(self):\n users = User.objects.filter(username='test')\n self.assertTrue(len(users) == 0)\n\n username = \"test3\"\n data = {'username': username, 'password': \"123test\", 'email': '[email protected]',\n 'newsletter': 'false', 'research': 'true', 'device': self.device}\n\n response = self.requestRegistration(data)\n\n self.assertTrue('client_id' in response.data)\n self.assertTrue(not 'password' in response.data)\n\n users = User.objects.filter(username=username)\n self.assertTrue(len(users) == 1)\n user = users[0]\n profile = user.user_profile\n self.assertTrue(profile.research)\n self.assertFalse(profile.newsletter)\n\n phone = Device.objects.get(user=user)\n\n self.assertTrue(phone.uuid == self.uuid)\n self.assertTrue(phone.cordova == self.device['cordova'])", "def register_server():\n (code, message) = rest_api.register_server(request)\n if (code == 200):\n return message\n else:\n abort(code)", "def create_device(self, app_name='FooBar', device_type='Raspberry Pi 2'):\n\n app = self.resin.models.application.create(app_name, device_type)\n return app, self.resin.models.device.register(app['id'], self.resin.models.device.generate_uuid())", "def register():\n schema = UserRegistrationSchema()\n\n data = schema.loads(request.data)\n if data.errors:\n return jsonify(data.errors), 400\n\n # Confirming that a duplicate user doesn't exist\n duplicates_q = f\"g.V().hasLabel('{User.LABEL}')\" + \\\n f\".or(has('username', '{data.data['username']}'),\" + \\\n f\"has('email', '{data.data['email']}'))\"\n duplicates = client.submit(duplicates_q).all().result()\n if duplicates:\n return jsonify_response({\"error\": \"User already exists!\"}, 400)\n\n data.data[\"password\"] = generate_password_hash(\n data.data[\"password\"]).decode(\"utf-8\")\n\n # Creating the User and it's primary account + account admin edge\n user = User.create(**data.data)\n account = Account.create(title=f\"myaccount@{user.username}\")\n holds_edge = UserHoldsAccount.create(user=user.id, account=account.id,\n relationType=\"primary\")\n admin_edge = UserIsAccountAdmin.create(user=user.id, account=account.id)\n\n response = {\n \"user\": json.loads(schema.dumps(user).data),\n \"token\": create_access_token(identity=user)\n }\n\n return jsonify_response(response, 201)", "def register(self):\r\n if self.fields_not_empty(request, [\"first_name\", \"last_name\", \"age\", \"CPR\", \"email\", \"phone_number\", \"password\", \"confirm_password\"]):\r\n return jsonify({\"error\": \"Some fields are empty\"}), 400\r\n user = self.create_user_object(request)\r\n if request.form.get(\"password\") != request.form.get(\"confirm_password\"):\r\n return jsonify({\"error\": \"Passwords did not match\"}), 400\r\n db.insert_one(user)\r\n return self.start_session(user)", "async def register(\n username: str = Form(...),\n password: str = Form(...),\n email: EmailStr = Form(...),\n first_name: str = Form(...),\n last_name: str = Form(None)\n):\n user = await crud.user.get_by_email(email=email)\n if user:\n raise HTTPException(\n status_code=HTTP_409_CONFLICT,\n detail=\"The user with this email already exists in the system\",\n )\n user = await crud.user.get_by_username(username=username)\n if user:\n raise HTTPException(\n status_code=HTTP_409_CONFLICT,\n detail=\"Username already taken\",\n )\n user = UserCreate(username=username,\n password=password,\n email=email,\n first_name=first_name,\n last_name=last_name,\n is_email_verified=False\n )\n user_id = await crud.user.create(user)\n register_token = create_register_token(data={\"email\": user.email})\n if send_verify_account_email(\n email=user.email, username=user.username, first_name=user.first_name, token=register_token\n ):\n return {\"msg\": \"New account email sent, check your inbox to verify your account\"}\n else:\n await crud.user.remove(user_id)\n raise HTTPException(\n status_code=HTTP_500_INTERNAL_SERVER_ERROR,\n detail=\"Error while trying to send email, please try again\",\n )", "async def register(hass, token, symbol, pin):\n keystore = await Keystore.create(device_model=\"Home Assistant\")\n account = await Account.register(keystore, token, symbol, pin)\n return {\"account\": account, \"keystore\": keystore}", "def put(self):\n dev = self.request.get('device')\n reg = self.request.get('registry')\n uploaded_file = self.request.POST.get('data')\n data = uploaded_file.file.read()\n\n self.response.headers['Content-Type'] = 'text/plain'\n if (not dev) and len(dev)==0:\n self.response.write('parameter device not found')\n elif (not reg) and len(reg)==0:\n self.response.write('parameter registry not found')\n elif (not data) and len(data)==0:\n self.response.write('invalid or no key file found')\n else:\n # Get user account\n ds = Datastore()\n user = ds.get_registry(reg)\n if len(user) == 0:\n self.response.write(\"Registry does not exist\")\n else:\n region = get_region_from_user(user)\n\n # Add Device on IOT Core\n iot = IOT()\n success, message = iot.create_device(dev, reg, data, region)\n if success:\n self.response.write('Device Added')\n else:\n self.response.write(message)", "def create_device(self, device_dict):\n devices = {'devices': [device_dict]}\n url = '{}/iot/devices'.format(self.url)\n return self.post(url, data=json.dumps(devices), headers=self.headers)", "def register(self, secret):\n return self._samp_hub.register(secret)", "def post(self):\n request_error = self.__validatePackageRegisterRequest(request)\n if request_error:\n return jsonify(error_message=request_error), 400\n login = get_jwt_identity()\n self.__registerPackageFromRequest(login, request)\n return \"Created\", 201", "def eventRegister(self, eventId=None):\n\n\t\tmessage = {}\n\n\t\tmessage[\"msg_type\"] = \"request\"\n\t\tmessage[\"command\"] = \"event_register\"\n\t\tmessage[\"event_item\"] = { \"id\" : \"34ee2cf2\" }\n\n\t\tregistration_info = {}\n\t\tregistration_info[\"first_name\"] = \"Patrick\"\n\t\tregistration_info[\"last_name\"] = \"Farrell\"\n\t\tregistration_info[\"email\"] = \"[email protected]\"\n\n\t\tmessage[\"registration_info\"] = registration_info\n\n\t\tresponse = self.sendMessage( message )\n\n\t\tprint response", "def register(self, region=None, payload=None):\n return self._put_response_body([], payload=payload)", "def register(self):\n raise NotImplementedError()", "def register(self):\n raise NotImplementedError", "def register(self):\n raise NotImplementedError", "def flask_create_device():\n try:\n # retrieve the authorization token\n token = retrieve_auth_token(request)\n\n params = {\n 'count': request.args.get('count', '1'),\n 'verbose': request.args.get('verbose', 'false'),\n 'content_type': request.headers.get('Content-Type'),\n 'data': request.data\n }\n\n result = DeviceHandler.create_device(params, token)\n devices = result.get('devices')\n deviceId = devices[0].get('id')\n LOGGER.info(f' Creating a new device with id {deviceId}.')\n return make_response(jsonify(result), 200)\n except HTTPRequestError as e:\n LOGGER.error(f' {e.message} - {e.error_code}.')\n if isinstance(e.message, dict):\n return make_response(jsonify(e.message), e.error_code)\n\n return format_response(e.error_code, e.message)", "def register(self, username, email, password, confirmPassword):\n\t\turl = \"https://habitica.com/api/v4/user/auth/local/register\"\n\t\tpayload = {\"username\": username, \"email\": email, \"password\": password, \"confirmPassword\": confirmPassword}\n\t\treturn(postUrl(url, self.credentials, payload))", "def test_register_uuid_only(self):\n username = \"testuuid\"\n data = {'username': username, 'password': \"123test\", 'email': '[email protected]',\n 'newsletter': \"True\", 'research': \"True\", 'uuid': '49b44243-a240-49bb-8076-1dee1782e1fa'}\n\n response = self.requestRegistration(data, viewname='v1_api_register')\n\n self.assertTrue(response.status_code == status.HTTP_201_CREATED)\n self.assertTrue('client_id' in response.data)\n\n users = User.objects.filter(username=username)\n self.assertTrue(len(users) == 1)\n user = users[0]\n profile = user.user_profile\n phone = Device.objects.get(user=user)\n\n self.assertTrue(phone.uuid == self.uuid)", "def doRegisterAgent(\n registrar_ip: str,\n registrar_port: str,\n agent_id: str,\n ek_tpm: bytes,\n ekcert: Optional[Union[bytes, str]],\n aik_tpm: bytes,\n mtls_cert: Optional[bytes] = None,\n contact_ip: Optional[str] = None,\n contact_port: Optional[str] = None,\n) -> Optional[str]:\n\n data: Dict[str, Any] = {\n \"ekcert\": ekcert,\n \"aik_tpm\": aik_tpm,\n }\n if ekcert is None or ekcert == \"emulator\":\n data[\"ek_tpm\"] = ek_tpm\n\n if mtls_cert is not None:\n data[\"mtls_cert\"] = mtls_cert\n else:\n data[\"mtls_cert\"] = \"disabled\"\n logger.error(\"Most actions require the agent to have mTLS enabled, but no cert was provided!\")\n if contact_ip is not None:\n data[\"ip\"] = contact_ip\n if contact_port is not None:\n data[\"port\"] = contact_port\n\n response = None\n try:\n # The agent accesses the registrar without mTLS, meaning without client\n # certificate\n # TODO the registrar could be accessed using TLS, but without client\n # certificate verification. Currently it is accessed without TLS at all\n client = RequestsClient(f\"{registrar_ip}:{registrar_port}\", False)\n response = client.post(f\"/v{api_version}/agents/{agent_id}\", data=json.dumps(data))\n response_body = response.json()\n\n if response.status_code != 200:\n logger.error(\"Error: unexpected http response code from Registrar Server: %s\", response.status_code)\n keylime_logging.log_http_response(logger, logging.ERROR, response_body)\n return None\n\n logger.info(\"Agent registration requested for %s\", agent_id)\n\n if \"results\" not in response_body:\n logger.critical(\"Error: unexpected http response body from Registrar Server: %s\", response.status_code)\n return None\n\n if \"blob\" not in response_body[\"results\"]:\n logger.critical(\"Error: did not receive blob from Registrar Server: %s\", response.status_code)\n return None\n\n return str(response_body[\"results\"][\"blob\"])\n except Exception as e:\n if response and response.status_code == 503:\n logger.error(\"Agent cannot establish connection to registrar at %s:%s\", registrar_ip, registrar_port)\n sys.exit()\n else:\n logger.exception(e)\n\n return None", "def register_platform(self, address, serverkey=None, display_name=None):\n _log.info('Attempting registration of vcp at address: '\n '{} display_name: {}, serverkey: {}'.format(address,\n display_name,\n serverkey))\n parsed = urlparse(address)\n if parsed.scheme not in ('tcp', 'ipc'):\n raise ValueError(\n 'Only ipc and tpc addresses can be used in the '\n 'register_platform method.')\n try:\n connection = self._build_connection(address, serverkey)\n except gevent.Timeout:\n _log.error(\"Initial building of connection not found\")\n raise\n\n try:\n if connection is None:\n raise ValueError(\"Connection was not able to be found\")\n manager_key = connection.call('get_manager_key')\n except gevent.Timeout:\n _log.error(\"Couldn't retrieve managment key from platform\")\n raise\n\n try:\n if manager_key is not None:\n if manager_key == self.core.publickey:\n _log.debug('Platform is already managed and connected.')\n return\n else:\n _log.warn(\n 'Platform is registered with a different vc key.'\n 'This could be expected.')\n\n if parsed.scheme == 'tcp':\n self.core.publickey\n _log.debug(\n 'TCP calling manage. my serverkey: {}, my publickey: {}'.format(\n self._serverkey, self.core.publickey))\n pk = connection.call(\n 'manage', self._external_addresses[0], self._serverkey,\n self.core.publickey)\n else:\n pk = connection.call('manage', self.core.address)\n except gevent.Timeout:\n _log.error('RPC call to manage did not return in a timely manner.')\n raise\n # If we were successful in calling manage then we can add it to\n # our list of managed platforms.\n if pk is not None and len(pk) == 43:\n try:\n address_uuid = self._address_to_uuid.get(address)\n time_now = format_timestamp(get_aware_utc_now())\n\n if address_uuid is not None:\n _log.debug('Attempting to get instance id to reconfigure '\n 'the agent on the remote instance.')\n current_uuid = connection.call('get_instance_uuid')\n\n if current_uuid != address_uuid:\n _log.debug('Reconfiguring with new uuid. {}'.format(\n address_uuid\n ))\n connection.call('reconfigure',\n **{'instance-uuid': address_uuid})\n if self._registered_platforms.get(address_uuid) is None:\n self._registered_platforms[address_uuid] = dict(\n address=address, serverkey=serverkey,\n display_name=display_name,\n registered_time_utc=time_now,\n instance_uuid=address_uuid\n )\n else:\n address_uuid = str(uuid.uuid4())\n _log.debug(\"New platform with uuid: {}\".format(\n address_uuid))\n connection.call('reconfigure',\n **{'instance-uuid': address_uuid})\n self._address_to_uuid[address] = address_uuid\n if display_name is None:\n display_name = address\n self._registered_platforms[address_uuid] = dict(\n address=address, serverkey=serverkey,\n display_name=display_name,\n registered_time_utc=time_now,\n instance_uuid=address_uuid\n )\n self._platform_connections[address_uuid] = connection\n self._registered_platforms.sync()\n except gevent.Timeout:\n _log.error(\n 'Call to reconfigure did not return in a timely manner.')\n raise", "async def register(cls, websocket): \n\t\tawait websocket.send(json.dumps({\n\t\t\t's': 1,\n\t\t\t'c': 'CONNECT_START'\n\t\t}))\n\n\t\tuser = utilities.is_json(await websocket.recv())\n\t\tif not user:\n\t\t\traise exceptions.ClientError('INVALID_JSON')\n\n\t\tif user.get('d'):\n\t\t\tusername = user['d'].get('name')\n\t\t\tif not utilities.validate_string(username):\n\t\t\t\tusername = utilities.random_string(16)\n\t\telse:\n\t\t\tusername = utilities.random_string(16)\n\n\t\treturn cls(websocket, group=None, name=username)", "def register_user_device(username: str, password: str, mac_address: str, email: Optional[str] = None) -> \\\n Union[str, Token]:\n ret = register_user(username, password, email)\n if isinstance(ret, str):\n return ret\n else:\n user_id = ret\n token, device_id = _add_update_device(user_id, mac_address)\n client_logger_security().info(f\"Successfully added new device: user_id={user_id}, device_id={device_id}\")\n _set_user_authenticated(user_id, device_id)\n return token", "def post(self):\n reg = self.request.get('registry')\n region_name = self.request.get('region')\n if reg and len(reg) > 0 and reg.isalnum() and validate_region(region_name):\n region = get_region_id(region_name)\n # Create Registry on IOT Core\n iot = IOT()\n success, message = iot.create_registry(region,reg)\n if success:\n # Add registry to Datastore\n ds = Datastore()\n status = ds.add_registry(reg, region_name)\n self.response.headers['Content-Type'] = 'text/plain'\n if status:\n self.response.write('Registry Added')\n else:\n self.response.write('Registry already exists')\n else:\n self.response.write(message)\n else:\n self.response.write('invalid parameters: ' + reg + \" \" + region_name )", "def register(self):\n try:\n sha = sha1(self.email).hexdigest()\n except TypeError:\n raise SleekException(\"Could not register user.\", 401)\n\n if not redis.sadd(\"sl:account:ids\", sha):\n raise SleekException(\"Could not register new user.\", 401)\n self.save(register=True)", "def register_with_existing_node():\n #print('********************')\n print(request.get_json())\n node_address = request.get_json()[\"node_address\"]\n if not node_address:\n return \"Invalid data\", 400\n\n data = {\"node_address\": request.host_url}\n headers = {'Content-Type': \"application/json\"}\n\n # Make a request to register with remote node and obtain information\n response = requests.post(node_address + \"/register_node\",\n data=json.dumps(data), headers=headers)\n\n if response.status_code == 200:\n global blockchain\n global peers\n # update chain and the peers\n chain_dump = response.json()['chain']\n blockchain = create_chain_from_dump(chain_dump)\n peers.update(response.json()['peers'])\n return \"Registration successful\", 200\n else:\n # if something goes wrong, pass it on to the API response\n #print(response.content)\n #print(response.status_code)\n return response.content, response.status_code", "def register():\n # Validate and deserialize input\n json_data = request.get_json()\n if not json_data:\n return CustomResponse(\n message=Constant.response.NO_INPUT_DATA\n ).response()\n\n user = UserService(data=json_data).create_user()\n return CustomResponse(data=user).response()", "def register(self, method=\"POST\", fullname=\"John Doe\", name=\"johndoe\",\r\n password=\"p4ssw0rd\", password2=None, email=None):\r\n if password2 is None:\r\n password2 = password\r\n if email is None:\r\n email = name + '@example.com'\r\n if method == \"POST\":\r\n return self.app.post('/account/register',\r\n data={\r\n 'fullname': fullname,\r\n 'name': name,\r\n 'email_addr': email,\r\n 'password': password,\r\n 'confirm': password2},\r\n follow_redirects=True)\r\n else:\r\n return self.app.get('/account/register', follow_redirects=True)", "def register(session: Session, email: str, password: str, given_name: str, last_name: str,\n phone: str) -> RegisterResult:\n # Validate the data integrity of the parameters\n if email is None:\n return RegisterResult.BAD_USERNAME\n if password is None or not passwordService.validate(password):\n return RegisterResult.BAD_PASSWORD\n\n # Check to see if the user already exists\n existing_user = session.query(User) \\\n .filter(User.email == email) \\\n .first()\n if existing_user is not None:\n return RegisterResult.USERNAME_ALREADY_REGISTERED\n\n # Everything seems fine, so we go ahead and create the user & the linked account.\n password_hash = passwordService.hash(password)\n new_user = User(role=UserType.VOLUNTEER, password=password_hash, first_name=given_name, last_name=last_name,\n mobile_number=phone, email=email, preferred_hours={}, experience_years=0, possibleRoles=[\"Basic\"],\n qualifications=[],\n availabilities={\"Friday\": [], \"Monday\": [], \"Sunday\": [], \"Tuesday\": [], \"Saturday\": [],\n \"Thursday\": [], \"Wednesday\": []})\n session.add(new_user)\n session.flush()\n return RegisterResult.SUCCESS", "async def _register_hardware_platform(\n hass: HomeAssistant, integration_domain: str, platform: HardwareProtocol\n) -> None:\n if integration_domain == DOMAIN:\n return\n if not hasattr(platform, \"async_info\"):\n raise HomeAssistantError(f\"Invalid hardware platform {platform}\")\n hass.data[DOMAIN][\"hardware_platform\"][integration_domain] = platform", "def register():\n data = request.get_json()\n username = data[\"username\"]\n password = data[\"password\"]\n client_data = data[\"client_data\"]\n if register_user(username, password, client_data):\n return \"1\"\n else:\n return \"0\"", "def cmd_register(self, app_path=None):\n if app_path:\n app_path = os.path.abspath(app_path)\n else:\n app_path = os.getcwd()\n app_name = os.path.basename(app_path)\n rc = self.socket_command('register %s %s' % (app_name, app_path))\n return rc", "def async_register_host_in_dev_reg(\n entry_id: str,\n dev_reg: dr.DeviceRegistry,\n) -> None:\n params = DeviceInfo(\n identifiers={(DOMAIN, \"host\")},\n manufacturer=\"Home Assistant\",\n model=SupervisorEntityModel.HOST,\n name=\"Home Assistant Host\",\n entry_type=dr.DeviceEntryType.SERVICE,\n )\n dev_reg.async_get_or_create(config_entry_id=entry_id, **params)", "def on_register(self, response):\n print('You have been registered!')\n self.on_auth(response)", "def register_probe(request):\n serial_number = request.data['sn']\n\n try:\n probe = Probe.objects.get(sn=serial_number)\n except Probe.DoesNotExist:\n probe = None\n\n # md = []\n # for modem in request.data['modems']:\n # try:\n # inst_modem = Modem.objects.get(imei=modem['imei'])\n # except Modem.DoesNotExist:\n # inst_modem = None\n # md.append( inst_modem)\n serializer = ProbeSerializer(probe, data=request.data, partial=True)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response({\"message\": _(\"Error probe registering!\"), 'more': serializer.errors})", "def register():\n (status, userRecord) = cs411_user.registerUser(\n request.form.get('username', 'USER NAME IS MISSING'),\n request.form.get('password', 'PASSWORD IS MISSING'),\n request.form.get('Email', 'EMAIL IS MISSING'),\n request.form.get('UFirst_Name', 'FIRST NAME IS MISSING'),\n request.form.get('ULast_Name', 'LAST NAME IS MISSING')\n )\n if status is False: raise InvalidUsage(userRecord[\"message\"], 403)\n else: return prepJSON(userRecord)", "def msg_register(channel, version = NATIVE_HEADER_VERSION, order=\"<\"):\n return message_no_reply(REGISTER, channel, \"\", version, order)", "async def register(cls, conn: Connection, name: str) -> Client:\n self = cls()\n self.conn = conn\n\n await self.conn.write_json({\"type\": \"register\", \"name\": name})\n msg = await self.conn.read()\n packet = json.loads(msg)\n if packet[\"type\"] == \"done\":\n raise Error(\"game already finished\")\n elif packet[\"type\"] == \"error\":\n raise Error(packet[\"msg\"])\n elif packet[\"type\"] == \"registration\":\n self.id = PlayerID(packet[\"player\"])\n self.world = World(packet[\"world\"])\n else:\n raise Error(f\"unexpected message on registration: {msg}\")\n\n return self", "def registration_started(self):\n pass", "def register(self, new_reg=None):\n if new_reg is None:\n new_reg = messages.NewRegistration()\n action = LOG_ACME_REGISTER(registration=new_reg)\n with action.context():\n return (\n DeferredContext(\n self.update_registration(\n new_reg, uri=self.directory[new_reg]))\n .addErrback(self._maybe_registered, new_reg)\n .addCallback(\n tap(lambda r: action.add_success_fields(registration=r)))\n .addActionFinish())", "async def register_handler(self, ctx, error):\n if isinstance(error, InvalidTimezone):\n await self.send_reply(ctx, \"invalid_timezone\", error.timezone)\n elif isinstance(error, tosurnament.RegistrationEnded):\n await self.send_reply(ctx, \"registration_ended\")", "async def register_client(connection):\n connections[connection.uuid] = connection\n messages_to_clients[connection.uuid] = asyncio.Queue()\n\n await connection.notify_connected()", "def register():\n insert_user(json_body())\n try:\n db.session.commit()\n except IntegrityError:\n raise exc.CouldNotCreateEntry()\n\n return jsonify({'message': 'Created user.'}), 200", "def register() -> str:\n try:\n email = flask.request.form['email']\n password = flask.request.form['password']\n except KeyError:\n flask.abort(400)\n\n try:\n AUTH.register_user(email, password)\n except ValueError:\n return flask.jsonify({\"message\": \"email already registered\"}), 400\n\n msg = {\"email\": email, \"message\": \"user created\"}\n return flask.jsonify(msg)", "def _register_instance(self, discovery_address, display_name=None):\n\n _log.info(\n 'Attempting to register name: {} with address: {}'.format(\n display_name, discovery_address))\n\n try:\n discovery_response = DiscoveryInfo.request_discovery_info(\n discovery_address)\n except DiscoveryError as e:\n return {\n 'error': {\n 'code': DISCOVERY_ERROR, 'message': e.message\n }}\n\n pa_instance_serverkey = discovery_response.serverkey\n pa_vip_address = discovery_response.vip_address\n\n assert pa_instance_serverkey\n assert pa_vip_address\n\n self.register_platform(pa_vip_address, pa_instance_serverkey)\n\n if pa_vip_address not in self._address_to_uuid.keys():\n return {'status': 'FAILURE',\n 'context': \"Couldn't register address: {}\".format(\n pa_vip_address)}\n\n return {'status': 'SUCCESS',\n 'context': 'Registered instance {}'.format(display_name)}", "def ProcessRegisterBrowserRequest(self, msg):\n enrollment_token = None\n match = re.match('GoogleEnrollmentToken token=(\\\\w+)',\n self.headers.getheader('Authorization', ''))\n if match:\n enrollment_token = match.group(1)\n if not enrollment_token:\n return (401, 'Missing enrollment token.')\n\n device_id = self.GetUniqueParam('deviceid')\n if not device_id:\n return (400, 'Parameter deviceid is missing.')\n\n if not msg.machine_name:\n return (400, 'Invalid machine name: ')\n\n if enrollment_token == INVALID_ENROLLMENT_TOKEN:\n return (401, 'Invalid enrollment token')\n\n dm_token = 'fake_device_management_token'\n response = dm.DeviceManagementResponse()\n response.register_response.device_management_token = (\n dm_token)\n self.server.RegisterBrowser(dm_token, device_id, msg.machine_name)\n\n return (200, response)", "def register(self, name, username, password, email):\n return self.app.post('/register', data = dict(\n name = name,\n username = username,\n password = password,\n email = email\n ), follow_redirects = True)", "def register():\n if request.method == 'POST':\n details = request.get_json()\n name = details.get('name')\n email = details.get('email')\n national_id = details.get('national_id')\n is_admin = details.get('admin')\n password = details.get('password')\n confirm_pwd = details.get('confirm_pwd')\n if len(name) < 4:\n return make_response(jsonify(\n {'message': 'name must be four letters or more!'}\n )), 409\n if not User.validate_email(email):\n return make_response(jsonify(\n {'message': \"Invalid email\"}\n )), 409\n if password != confirm_pwd:\n return make_response(jsonify(\n {'message': 'password mistmatch'}\n )), 400\n if len(password) < 4:\n return make_response(jsonify(\n {'message': 'password too short'}\n )), 409\n if str(national_id).isalpha():\n return make_response(jsonify(\n {'message': 'national id must be digits'}\n )), 400\n if name.isdigit():\n return make_response(jsonify(\n {'message': 'Name must be an alphabet'}\n )), 400\n user = User.query.filter_by(email=email).first()\n if user:\n return make_response(jsonify(\n {'message': 'user already registred, login'}\n )), 200\n user = User(\n name=name,\n email=email,\n national_id=national_id,\n password=password,\n is_admin=is_admin)\n user.save_user()\n auth_token = user.token_generate(user.id)\n return make_response(jsonify(\n {\n 'message': 'registration successfull',\n 'token': auth_token.decode()\n }\n )), 201\n return None", "def register_user(client_socket, parts):\n\n username = parts[1]\n one_time_id = parts[2]\n signature = base64.b64decode(parts[3])\n hostname = parts[4]\n port = parts[5]\n client_cert = parts[6]\n\n valid_signature = Cryptography.verify_signature(base64.b64decode(client_cert).decode(),\n one_time_id,\n signature)\n\n if valid_signature and User.login(username, one_time_id):\n ClientsStore.save_client(username, hostname, port, client_cert)\n ClientsStore.set_online(username, hostname, port)\n Administration.__socket_map[client_socket] = username\n client_socket.send('OK'.encode())\n else:\n client_socket.send('NOTOK'.encode())", "def register_peer(self):\n try:\n self.get_file_list()\n num_files = len(self.file_list)\n total_ops = self.key_end - self.key_start\n run_ops = total_ops/num_files\n print \"Staring Benchmark Register Peer with Server...\"\n t1 = time.time()\n for i in range(run_ops):\n for file in self.file_list:\n self.service.put(file, self.peer_id)\n t2 = time.time()\n total = run_ops * num_files\n print \"%s Register operations = %s sec\" % (total,t2-t1)\n print \"per Register operation = %s sec\" % ((t2-t1)/total)\n print \"per Register operation = %s msec\" % (((t2-t1)/total)*1000)\n except Exception as e:\n print \"Registering Peer Error, %s\" % e\n sys.exit(1)", "def register(service_class, args, namespace, user_code, notifier=None):\n try:\n user = g.user\n except RuntimeError:\n user = 'anonymous'\n service = service_class(\n namespace=namespace, code_dir=user_code,\n users={user: ['POST', 'PUT', 'DELETE']},\n **dict(args))\n try:\n slot = service_store[service.iden]['slot']\n except KeyError:\n slot = 'free'\n\n # make sure to only use free or errored out slots\n if slot not in ('free', 'error'):\n raise APIException(\"service slot not available: {}\\n\"\n \"Current state: {}\"\n .format(service.iden, slot), 400)\n\n service_store[service.iden] = {\n 'slot': 'busy',\n 'msg': 'Empty service created',\n 'stage': 1,\n 'total_stages': 5,\n 'service': None\n }\n\n _async_register(service, notifier)\n return service", "def register_bob(self, follow=False):\n response = self.client.post(\"/registration/register/\", {\n \"username\": \"russellszn\",\n \"email\": \"[email protected]\",\n \"password1\": \"testing123\",\n \"password2\": \"testing123\",\n }, follow=follow)\n return response", "def register_bob(self, follow=False):\n response = self.client.post(\"/registration/register/\", {\n \"username\": \"russellszn\",\n \"email\": \"[email protected]\",\n \"password1\": \"testing123\",\n \"password2\": \"testing123\",\n }, follow=follow)\n return response", "def finalize_register(self, data: AccountActivationData) -> object:\n # Get and check data from token (still pending in DB)\n registration_response = self.registration_status(AccountActivationTokenData(validation_token=data.validation_token))\n if not registration_response['status'] and registration_response['msg'] != 'PENDING':\n return registration_response\n\n tmp_password = Helper.GenerateString(20, True)\n\n # Create Cognito account with a temp password\n try:\n response = self.cognito.admin_create_user(\n UserPoolId=os.getenv(\"COGNITO_POOL_ID\"),\n Username=data.user_email,\n UserAttributes=[\n {\"Name\": \"email\", \"Value\": data.user_email},\n {\"Name\": \"email_verified\", \"Value\": \"true\"}\n ],\n TemporaryPassword=tmp_password, # Set the password, so user needs to recieve this by mail\n MessageAction=\"SUPPRESS\", # AWS Cognito will not send any email.\n DesiredDeliveryMediums=['EMAIL']\n )\n except self.cognito.exceptions.UsernameExistsException as e:\n return {\n \"status\": False,\n \"msg\": \"A user with this e-mail address already exists\",\n \"data\": {\"cognito\": \"UsernameExistsException\"}\n }\n except Exception as e:\n return {\n \"status\": False,\n \"msg\": str(e),\n \"data\": None\n }\n\n # add the cognito details to YOUR database and do other stuff\n if response['ResponseMetadata']['HTTPStatusCode'] == 200:\n try:\n\n #TODO: do cool stuff here needed for your application\n pass\n\n except Exception as e:\n return {\n \"status\": False,\n \"msg\": str(e),\n \"data\": None\n }\n\n # Now reset the password for the created cognito account with the given user password\n password_reset = User()\n response_password_reset = password_reset.admin_set_user_password(data.user_email, data.new_password, True)\n\n # If success, do something\n if response_password_reset['ResponseMetadata']['HTTPStatusCode'] == 200:\n\n # send mail to user about account activation\n mailbody = f\"\"\"Dear user,\\r\\r\n Your account has been activated and ready to use. You have setup a password to secure your account, please keep your password in a save place.\\r\n You can access anytime your account. Start here: {os.getenv(\"APP_URL\")}/login and enter your email address and password.\\r\\r\n\n {os.getenv(\"APP_URL\")}\"\"\"\n # send mail\n\n return {\n \"status\": True,\n \"msg\": \"Your account has been activated, please login using your new password!\"\n }" ]
[ "0.7048597", "0.6574808", "0.63383627", "0.6312321", "0.6306717", "0.6257553", "0.6199068", "0.61455584", "0.60931623", "0.60073423", "0.5975581", "0.5859668", "0.57837015", "0.57369715", "0.57089573", "0.5657908", "0.5651063", "0.56088966", "0.55459535", "0.5511144", "0.54968065", "0.5454494", "0.5407802", "0.5407348", "0.54046065", "0.53984326", "0.5394558", "0.5392012", "0.53647506", "0.534053", "0.53305656", "0.53152657", "0.53118193", "0.5282816", "0.5271176", "0.5246187", "0.5245769", "0.5245769", "0.5245769", "0.52361387", "0.5234082", "0.5222841", "0.5220588", "0.5209903", "0.5194403", "0.51856244", "0.51642597", "0.5153151", "0.5150809", "0.51314056", "0.51282215", "0.51277804", "0.5121909", "0.5111057", "0.5109681", "0.50891525", "0.5086274", "0.5075721", "0.50737184", "0.50646657", "0.5060321", "0.5060321", "0.50591594", "0.5055023", "0.50442576", "0.50423086", "0.5042137", "0.503636", "0.5026014", "0.5020643", "0.5014635", "0.5009873", "0.5002811", "0.5002172", "0.49998343", "0.49997863", "0.49989736", "0.49965033", "0.49874732", "0.49872237", "0.49809873", "0.49804983", "0.49751428", "0.4968583", "0.49641186", "0.49426025", "0.49398968", "0.49388078", "0.49248457", "0.492316", "0.49102744", "0.4908792", "0.49082145", "0.4906724", "0.4904664", "0.48994154", "0.48942697", "0.48893052", "0.48893052", "0.48809764" ]
0.7058231
0
This is a synchronous call, meaning that this function will not return until the cancellation process has completed successfully or the attempt has resulted in a failure. Before returning the client will also disconnect from the Hub. In case there is no registration in process it will throw an error as there is no registration process to cancel.
def cancel(self): logger.info("Cancelling the current registration process") cancel_complete = Event() def on_cancel_complete(): cancel_complete.set() logger.info("Successfully cancelled the current registration process") self._polling_machine.cancel(callback=on_cancel_complete) cancel_complete.wait()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def disconnect(self):\n if self.is_connected:\n try:\n self.client.unregister()\n finally:\n if self.client.is_running:\n self.client.stop()\n self.hub.disconnect()", "async def async_cancel(self):\n raise NotImplementedError", "def cancel(self) -> asyncio.Future:\n pass # pragma: no cover", "def test_cancel_sync_handle_call_during_execution(serve_instance):\n running_signal_actor = SignalActor.remote()\n cancelled_signal_actor = SignalActor.remote()\n\n @serve.deployment\n class Ingress:\n async def __call__(self, *args):\n await running_signal_actor.send.remote()\n await send_signal_on_cancellation(cancelled_signal_actor)\n\n h = serve.run(Ingress.bind()).options(use_new_handle_api=True)\n\n # Send a request and wait for it to start executing.\n r = h.remote()\n ray.get(running_signal_actor.wait.remote(), timeout=10)\n\n # Cancel it and verify that it is cancelled via signal.\n r.cancel()\n ray.get(cancelled_signal_actor.wait.remote(), timeout=10)\n\n with pytest.raises(ray.exceptions.TaskCancelledError):\n r.result()", "def register(self):\n if self.hub.is_connected:\n if self._private_key is not None:\n raise SAMPClientError(\"Client already registered\")\n\n result = self.hub.register(self.hub.lockfile[\"samp.secret\"])\n\n if result[\"samp.self-id\"] == \"\":\n raise SAMPClientError(\n \"Registration failed - samp.self-id was not set by the hub.\"\n )\n\n if result[\"samp.private-key\"] == \"\":\n raise SAMPClientError(\n \"Registration failed - samp.private-key was not set by the hub.\"\n )\n\n self._public_id = result[\"samp.self-id\"]\n self._private_key = result[\"samp.private-key\"]\n self._hub_id = result[\"samp.hub-id\"]\n\n if self._callable:\n self._set_xmlrpc_callback()\n self._declare_subscriptions()\n\n if self._metadata != {}:\n self.declare_metadata()\n\n self._is_registered = True\n\n else:\n raise SAMPClientError(\n \"Unable to register to the SAMP Hub. Hub proxy not connected.\"\n )", "def register_signal(client):\n def process_failure_signal(sender, task_id, *args, **kwargs):\n client.captureException(\n extra={\n 'task_id': task_id,\n 'task': sender,\n }\n )\n task_failure.connect(process_failure_signal, weak=False)", "async def test_cancelled_task(self) -> None:\n cancelledMessage: str = \"I have been cancelled\"\n\n class CancelHandler(Handler):\n async def getName(self) -> str:\n raise asyncio.CancelledError(\n cancelledMessage\n ) # Pretend that this is some await call that gets cancelled\n\n async with TestServer(handler=CancelHandler(), ip=\"::1\") as sa:\n ip, port = sa.ip, sa.port\n assert ip and port\n async with get_client(TestingService, host=ip, port=port) as client:\n with self.assertRaises(ApplicationError) as ex:\n await client.getName()\n self.assertEqual(\n ex.exception.message,\n f\"Application was cancelled on the server with message: {cancelledMessage}\",\n )", "async def disconnect(self):", "def test_cancel_generator_sync(serve_instance):\n signal_actor = SignalActor.remote()\n\n @serve.deployment\n class Ingress:\n async def __call__(self, *args):\n yield \"hi\"\n await send_signal_on_cancellation(signal_actor)\n\n h = serve.run(Ingress.bind()).options(use_new_handle_api=True, stream=True)\n\n # Send a request and wait for it to start executing.\n g = h.remote()\n\n assert next(g) == \"hi\"\n\n # Cancel it and verify that it is cancelled via signal.\n g.cancel()\n\n with pytest.raises(ray.exceptions.TaskCancelledError):\n next(g)\n\n ray.get(signal_actor.wait.remote(), timeout=10)", "async def cancel():\n await asyncio.get_running_loop().run_in_executor(None, cancel_inner)", "def register(self):\n logger.info(\"Registering with Hub...\")\n register_complete = Event()\n\n def on_register_complete(result=None, error=None):\n # This could be a failed/successful registration result from the HUB\n # or a error from polling machine. Response should be given appropriately\n if result is not None:\n if result.status == \"assigned\":\n logger.info(\"Successfully registered with Hub\")\n else: # There be other statuses\n logger.error(\"Failed registering with Hub\")\n if error is not None: # This can only happen when the polling machine runs into error\n logger.info(error)\n\n register_complete.set()\n\n self._polling_machine.register(callback=on_register_complete)\n\n register_complete.wait()", "def test_cancel_async_handle_call_during_execution(serve_instance):\n running_signal_actor = SignalActor.remote()\n cancelled_signal_actor = SignalActor.remote()\n\n @serve.deployment\n class Downstream:\n async def __call__(self, *args):\n await running_signal_actor.send.remote()\n await send_signal_on_cancellation(cancelled_signal_actor)\n\n @serve.deployment\n class Ingress:\n def __init__(self, handle):\n self._h = handle.options(use_new_handle_api=True)\n\n async def __call__(self, *args):\n # Send a request and wait for it to start executing.\n r = self._h.remote()\n await running_signal_actor.wait.remote()\n\n # Cancel it and verify that it is cancelled via signal.\n r.cancel()\n await cancelled_signal_actor.wait.remote()\n\n with pytest.raises(ray.exceptions.TaskCancelledError):\n await r\n\n h = serve.run(Ingress.bind(Downstream.bind())).options(use_new_handle_api=True)\n h.remote().result() # Would raise if test failed.", "async def new_coro():\n try:\n await coro\n except asyncio.CancelledError:\n pass", "async def close(self) -> None:\n logger.debug(\n f\"['{self._instance_connection_string}']: Waiting for _current to be cancelled\"\n )\n self._current.cancel()\n logger.debug(\n f\"['{self._instance_connection_string}']: Waiting for _next to be cancelled\"\n )\n self._next.cancel()\n logger.debug(\n f\"['{self._instance_connection_string}']: Waiting for _client_session to close\"\n )\n await self._client_session.close()", "def test_cancel_sync_handle_call_during_assignment(serve_instance):\n signal_actor = SignalActor.remote()\n\n @serve.deployment(max_concurrent_queries=1)\n class Ingress:\n def __init__(self):\n self._num_requests = 0\n\n async def __call__(self, *args):\n self._num_requests += 1\n await signal_actor.wait.remote()\n\n return self._num_requests\n\n h = serve.run(Ingress.bind()).options(use_new_handle_api=True)\n\n # Send a request and wait for it to be ongoing so we know that further requests\n # will block trying to assign a replica.\n initial_response = h.remote()\n wait_for_condition(lambda: ray.get(signal_actor.cur_num_waiters.remote()) == 1)\n\n # Make a second request, cancel it, and verify that it is cancelled.\n second_response = h.remote()\n second_response.cancel()\n with pytest.raises(concurrent.futures.CancelledError):\n second_response.result()\n\n # Now signal the initial request to finish and check that the second request\n # never reached the replica.\n ray.get(signal_actor.send.remote())\n assert initial_response.result() == 1\n for i in range(2, 12):\n assert h.remote().result() == i", "def guiding_disconnect():\r\n try:\r\n app.guider.disconnect()\r\n return jsonify({\"status\": True})\r\n except Exception as e:\r\n return jsonify(\r\n {\"status\": False, \"error\": \"Failed disconnecting from guider: %s\" % e}\r\n )", "def test_only_relevant_task_is_cancelled(serve_instance):\n signal_actor = SignalActor.remote()\n\n @serve.deployment\n class Ingress:\n async def __call__(self, *args):\n await signal_actor.wait.remote()\n return \"ok\"\n\n h = serve.run(Ingress.bind()).options(use_new_handle_api=True)\n\n r1 = h.remote()\n r2 = h.remote()\n\n # Wait for both requests to be executing.\n wait_for_condition(lambda: ray.get(signal_actor.cur_num_waiters.remote()) == 2)\n\n r1.cancel()\n with pytest.raises(ray.exceptions.TaskCancelledError):\n r1.result()\n\n # Now signal r2 to run to completion and check that it wasn't cancelled.\n ray.get(signal_actor.send.remote())\n assert r2.result() == \"ok\"", "def test_cancel_generator_async(serve_instance):\n signal_actor = SignalActor.remote()\n\n @serve.deployment\n class Downstream:\n async def __call__(self, *args):\n yield \"hi\"\n await send_signal_on_cancellation(signal_actor)\n\n @serve.deployment\n class Ingress:\n def __init__(self, handle):\n self._h = handle.options(use_new_handle_api=True, stream=True)\n\n async def __call__(self, *args):\n # Send a request and wait for it to start executing.\n g = self._h.remote()\n assert await g.__anext__() == \"hi\"\n\n # Cancel it and verify that it is cancelled via signal.\n g.cancel()\n\n with pytest.raises(ray.exceptions.TaskCancelledError):\n assert await g.__anext__() == \"hi\"\n\n await signal_actor.wait.remote()\n\n h = serve.run(Ingress.bind(Downstream.bind())).options(use_new_handle_api=True)\n h.remote().result() # Would raise if test failed.", "async def disconnect(self):\n self.client.close()", "def registerServiceAsync(self, uri, identity=None, correlationId=None,\n options=None):\n if correlationId is None:\n correlationId = CorrelationId()\n if options is None:\n options = ServiceRegistrationOptions()\n\n _ExceptionUtil.raiseOnError(\n internals.blpapi_ProviderSession_registerServiceAsync(\n self.__handle,\n uri,\n get_handle(identity),\n get_handle(correlationId),\n get_handle(options)\n ))\n\n return correlationId", "def attempt_to_register(self, message: Message):\n\t\tlogger.info(\"Attempting to register client.\")\n\n\t\tsuccessful_parse = re.match(r'\\/regi (.{1,30})', message.body)\n\n\t\tif successful_parse and self.validate_name(successful_parse.group(1)):\n\t\t\tlogger.info(\"Client successfully registered.\")\n\t\t\tself.registry.register(successful_parse.group(1), message.sender)\n\t\telse:\n\t\t\tlogger.info(\"Client not registered\") # Ignore the message", "async def async_disconnect(self) -> None:\n\n def stop() -> None:\n \"\"\"Stop the MQTT client.\"\"\"\n # Do not disconnect, we want the broker to always publish will\n self._mqttc.loop_stop()\n\n def no_more_acks() -> bool:\n \"\"\"Return False if there are unprocessed ACKs.\"\"\"\n return not any(not op.is_set() for op in self._pending_operations.values())\n\n # stop waiting for any pending subscriptions\n await self._subscribe_debouncer.async_cleanup()\n # reset timeout to initial subscribe cooldown\n self._subscribe_debouncer.set_timeout(INITIAL_SUBSCRIBE_COOLDOWN)\n # stop the unsubscribe debouncer\n await self._unsubscribe_debouncer.async_cleanup()\n # make sure the unsubscribes are processed\n await self._async_perform_unsubscribes()\n\n # wait for ACKs to be processed\n async with self._pending_operations_condition:\n await self._pending_operations_condition.wait_for(no_more_acks)\n\n # stop the MQTT loop\n async with self._paho_lock:\n await self.hass.async_add_executor_job(stop)", "async def disconnect_callback_async(self, excep):\r\n _LOGGER.debug(\" ........... attempting reconnection\")\r\n await self.service_panel_stop(excep)\r\n await self.service_panel_start(excep)", "async def wait():\n try:\n await asyncio.get_running_loop().run_in_executor(None, wait_inner)\n except asyncio.CancelledError:\n await cancel()\n raise\n finally:\n kernel32.CloseHandle(timer)\n kernel32.CloseHandle(cancel_event)", "async def on_disconnect(self) -> None:", "def test_cancel_on_http_client_disconnect_during_assignment(serve_instance):\n signal_actor = SignalActor.remote()\n\n @serve.deployment(max_concurrent_queries=1)\n class Ingress:\n def __init__(self):\n self._num_requests = 0\n\n async def __call__(self, *args):\n self._num_requests += 1\n await signal_actor.wait.remote()\n\n return self._num_requests\n\n h = serve.run(Ingress.bind()).options(use_new_handle_api=True)\n\n # Send a request and wait for it to be ongoing so we know that further requests\n # will block trying to assign a replica.\n initial_response = h.remote()\n wait_for_condition(lambda: ray.get(signal_actor.cur_num_waiters.remote()) == 1)\n\n # Intentionally time out on the client, causing it to disconnect.\n with pytest.raises(requests.exceptions.ReadTimeout):\n requests.get(\"http://localhost:8000\", timeout=0.5)\n\n # Now signal the initial request to finish and check that the request sent via HTTP\n # never reaches the replica.\n ray.get(signal_actor.send.remote())\n assert initial_response.result() == 1\n for i in range(2, 12):\n assert h.remote().result() == i", "def connect_never_retry():\n try:\n messaging_service = MessagingService.builder().from_properties(boot.broker_properties()) \\\n .with_reconnection_retry_strategy(RetryStrategy.never_retry()).build()\n future = messaging_service.connect_async()\n\n return future.result()\n\n except PubSubPlusClientError as exception:\n raise exception\n\n finally:\n messaging_service.disconnect_async()", "def test_out_of_band_task_is_not_cancelled(serve_instance):\n signal_actor = SignalActor.remote()\n\n @serve.deployment\n class Downstream:\n async def hi(self):\n await signal_actor.wait.remote()\n return \"ok\"\n\n @serve.deployment\n class Ingress:\n def __init__(self, handle):\n self._h = handle.options(use_new_handle_api=True)\n self._out_of_band_req = self._h.hi.remote()\n\n async def __call__(self, *args):\n await self._h.hi.remote()\n\n async def get_out_of_band_response(self):\n return await self._out_of_band_req\n\n h = serve.run(Ingress.bind(Downstream.bind())).options(use_new_handle_api=True)\n\n # Send a request, wait for downstream request to start, and cancel it.\n r1 = h.remote()\n wait_for_condition(lambda: ray.get(signal_actor.cur_num_waiters.remote()) == 2)\n\n r1.cancel()\n with pytest.raises(ray.exceptions.TaskCancelledError):\n r1.result()\n\n # Now signal out of band request to run to completion and check that it wasn't\n # cancelled.\n ray.get(signal_actor.send.remote())\n assert h.get_out_of_band_response.remote().result() == \"ok\"", "async def _disconnect(self):\n if self._heart_beat_task:\n self._heart_beat_task.cancel()\n try:\n await self._heart_beat_task\n except asyncio.CancelledError:\n pass\n self._heart_beat_task = None\n if self._response_handler_task:\n self._response_handler_task.cancel()\n try:\n await self._response_handler_task\n except asyncio.CancelledError:\n pass\n self._response_handler_task = None\n if self._command_queue_task:\n self._command_queue_task.cancel()\n try:\n await self._command_queue_task\n except asyncio.CancelledError:\n pass\n self._command_queue_task = None\n if self._expected_responses:\n self._expected_responses.cancel_tasks()\n if self._writer:\n self._writer.close()\n self._writer = None\n self._reader = None\n self._sequence = 0\n self._command_queue.clear()", "def test_cancel_async_handle_call_during_assignment(serve_instance):\n signal_actor = SignalActor.remote()\n\n @serve.deployment(max_concurrent_queries=1)\n class Downstream:\n def __init__(self):\n self._num_requests = 0\n\n async def __call__(self, *args):\n self._num_requests += 1\n await signal_actor.wait.remote()\n\n return self._num_requests\n\n @serve.deployment\n class Ingress:\n def __init__(self, handle):\n self._h = handle.options(use_new_handle_api=True)\n\n async def __call__(self, *args):\n # Send a request and wait for it to be ongoing so we know that further\n # requests will block trying to assign a replica.\n initial_response = self._h.remote()\n\n async def one_waiter():\n return await signal_actor.cur_num_waiters.remote() == 1\n\n await async_wait_for_condition(one_waiter)\n\n # Make a second request, cancel it, and verify that it is cancelled.\n second_response = self._h.remote()\n second_response.cancel()\n with pytest.raises(asyncio.CancelledError):\n await second_response\n\n # Now signal the initial request to finish and check that the second request\n # never reached the replica.\n await signal_actor.send.remote()\n assert await initial_response == 1\n for i in range(2, 12):\n assert await self._h.remote() == i\n\n h = serve.run(Ingress.bind(Downstream.bind())).options(use_new_handle_api=True)\n h.remote().result() # Would raise if test failed.", "def test_unregistered(self):\n msg = self._send(self.unreg_conn, '1')\n self.assertEqual(len(msg.responses), 1)\n self.assertEqual(msg.responses[0].text,\n self.app.not_registered)", "def test_unregistered(self):\n msg = self._send(self.unreg_conn, '1')\n self.assertEqual(len(msg.responses), 1)\n self.assertEqual(msg.responses[0].text,\n self.app.not_registered)", "def dispatch(self, *args, **kwargs):\n if not self.registration_allowed():\n return redirect(self.disallowed_url)\n return super(ClientRegistrationView, self).dispatch(*args, **kwargs)", "async def disconnect(self):\n await self._client.disconnect()", "async def _disconnect(self: 'TelegramClient'):\n await self._sender.disconnect()\n await helpers._cancel(self._log[__name__], updates_handle=self._updates_handle)\n try:\n await self._updates_handle\n except asyncio.CancelledError:\n pass\n\n await self._session.insert_entities(self._entity_cache.get_all_entities())\n\n session_state, channel_states = self._message_box.session_state()\n for channel_id, pts in channel_states.items():\n await self._session.insert_channel_state(ChannelState(channel_id=channel_id, pts=pts))\n\n await self._replace_session_state(**session_state)", "async def unregister_client(connection):\n if connection.uuid in connections:\n connections.pop(connection.uuid)\n messages_to_clients.pop(connection.uuid)\n\n await connection.notify_disconnected()", "async def close(self) -> None:\n\n if self._client:\n await self._client.__aexit__()", "def unregister(self):\n if self.hub.is_connected:\n self._is_registered = False\n self.hub.unregister(self._private_key)\n self._hub_id = None\n self._public_id = None\n self._private_key = None\n else:\n raise SAMPClientError(\n \"Unable to unregister from the SAMP Hub. Hub proxy not connected.\"\n )", "def disconnect(self):\n from ray.util.client.api import _ClientAPI\n\n if self.client_worker is not None:\n self.client_worker.close()\n self.api = _ClientAPI()\n self.client_worker = None", "async def async_will_remove_from_hass(self) -> None:\n self._client.set_callback(None)\n self.hass.data[DOMAIN][self._entry_id].clients.remove(self)", "def test_cancel_on_http_client_disconnect_during_execution(\n serve_instance, use_fastapi: bool\n):\n inner_signal_actor = SignalActor.remote()\n outer_signal_actor = SignalActor.remote()\n\n @serve.deployment\n async def inner():\n await send_signal_on_cancellation(inner_signal_actor)\n\n if use_fastapi:\n app = FastAPI()\n\n @serve.deployment\n @serve.ingress(app)\n class Ingress:\n def __init__(self, handle):\n self._handle = handle.options(use_new_handle_api=True)\n\n @app.get(\"/\")\n async def wait_for_cancellation(self):\n await self._handle.remote()._to_object_ref()\n await send_signal_on_cancellation(outer_signal_actor)\n\n else:\n\n @serve.deployment\n class Ingress:\n def __init__(self, handle):\n self._handle = handle.options(use_new_handle_api=True)\n\n async def __call__(self, request: Request):\n await self._handle.remote()._to_object_ref()\n await send_signal_on_cancellation(outer_signal_actor)\n\n serve.run(Ingress.bind(inner.bind()))\n\n # Intentionally time out on the client, causing it to disconnect.\n with pytest.raises(requests.exceptions.ReadTimeout):\n requests.get(\"http://localhost:8000\", timeout=0.5)\n\n # Both the HTTP handler and the inner deployment handle call should be cancelled.\n ray.get(inner_signal_actor.wait.remote(), timeout=10)\n ray.get(outer_signal_actor.wait.remote(), timeout=10)", "def rstrtmgr_RmCancelCurrentTask(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"dwSessionHandle\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "async def wait_for_cancel(self):\n await self._cancel", "def get_hub_if_exists():\n return _threadlocal.hub", "def sync_close_connection(self):\n loop = asyncio.get_event_loop()\n task = loop.create_task(self.close_connection())\n loop.run_until_complete(task)", "async def cancel(self):\n\n await self.cb_0.cancel()\n await self.cb_1.cancel()", "async def on_disconnect(self, reconnecting):\n pass", "def async_token_cancel(auth_req_id): #pylint: disable=unused-argument\n return \"\"", "async def _perform_register(self):\n data = {\"username\": self.user, \"password\": self.password}\n return await self._perform_request(\"register\", data, lambda r: r.text())", "def cancel(self):\n return self.RES_OK", "def subscribe(self, request):\n email = self.cleaned_data.get('email')\n\n email_name, domain_part = email.rsplit('@', 1)\n domain_name = '@' + domain_part\n email_domain, created = Domain.objects.get_or_create(name=domain_name)\n\n subscriber, created = Subscriber.objects.get_or_create(email=email, mailing_list=self.mailing_list, defaults={\n 'domain': email_domain\n })\n subscriber.status = Status.PENDING\n subscriber.optin_ip_address = get_client_ip(request)\n subscriber.optin_date = timezone.now()\n subscriber.save()\n\n if not created:\n subscriber.tokens.filter(description='confirm_subscription').delete()\n\n token = subscriber.tokens.create(description='confirm_subscription')\n current_site = get_current_site(request)\n protocol = 'https' if request.is_secure() else 'http'\n domain = current_site.domain\n path = reverse('subscribers:confirm_double_optin_token', kwargs={\n 'mailing_list_uuid': self.mailing_list.uuid,\n 'token': token.text\n })\n confirm_link = '%s://%s%s' % (protocol, domain, path)\n\n confirm_email = self.mailing_list.get_confirm_email_template()\n confirm_email.send(subscriber.get_email(), {\n 'confirm_link': confirm_link\n })\n\n return subscriber", "def on_subscribe(self, request, ssn):\n if 'hub.challenge' not in request.GET:\n logger.error(f'Missing hub.challenge in subscription verification {ssn.pk}!')\n tasks.save.delay(\n pk = ssn.pk,\n subscribe_status = 'verifyerror',\n verifyerror_count = ssn.verifyerror_count + 1\n )\n return Response('Missing hub.challenge', status=HTTP_400_BAD_REQUEST)\n\n if not request.GET.get('hub.lease_seconds', '').isdigit():\n logger.error(f'Missing integer hub.lease_seconds in subscription verification {ssn.pk}!')\n tasks.save.delay(\n pk = ssn.pk,\n subscribe_status = 'verifyerror',\n verifyerror_count = ssn.verifyerror_count + 1\n )\n return Response('hub.lease_seconds required and must be integer', status=HTTP_400_BAD_REQUEST)\n\n if ssn.unsubscribe_status is not None:\n logger.error(f'Subscription {ssn.pk} received subscription verification request,'\n f' but its was explicitly unsubscribed before.')\n return Response('Unsubscribed')\n\n tasks.save.delay(\n pk = ssn.pk,\n subscribe_status = 'verified',\n lease_expiration_time = now() + timedelta(seconds=int(request.GET['hub.lease_seconds'])),\n connerror_count = 0,\n huberror_count = 0,\n verifyerror_count = 0,\n verifytimeout_count = 0\n )\n logger.info(f'Got {ssn.pk} subscribe confirmation from hub.')\n return HttpResponse(request.GET['hub.challenge'])", "async def async_setup(hass, config):\n hass.data[MODBUS_DOMAIN] = hub_collect = {}\n\n _LOGGER.debug(\"registering hubs\")\n for client_config in config[MODBUS_DOMAIN]:\n hub_collect[client_config[CONF_NAME]] = ModbusHub(client_config, hass.loop)\n\n def stop_modbus(event):\n \"\"\"Stop Modbus service.\"\"\"\n for client in hub_collect.values():\n del client\n\n def start_modbus(event):\n \"\"\"Start Modbus service.\"\"\"\n for client in hub_collect.values():\n _LOGGER.debug(\"setup hub %s\", client.name)\n client.setup()\n\n hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop_modbus)\n\n # Register services for modbus\n hass.services.async_register(\n MODBUS_DOMAIN,\n SERVICE_WRITE_REGISTER,\n write_register,\n schema=SERVICE_WRITE_REGISTER_SCHEMA,\n )\n hass.services.async_register(\n MODBUS_DOMAIN,\n SERVICE_WRITE_COIL,\n write_coil,\n schema=SERVICE_WRITE_COIL_SCHEMA,\n )\n\n async def write_register(service):\n \"\"\"Write Modbus registers.\"\"\"\n unit = int(float(service.data[ATTR_UNIT]))\n address = int(float(service.data[ATTR_ADDRESS]))\n value = service.data[ATTR_VALUE]\n client_name = service.data[ATTR_HUB]\n if isinstance(value, list):\n await hub_collect[client_name].write_registers(\n unit, address, [int(float(i)) for i in value]\n )\n else:\n await hub_collect[client_name].write_register(\n unit, address, int(float(value))\n )\n\n async def write_coil(service):\n \"\"\"Write Modbus coil.\"\"\"\n unit = service.data[ATTR_UNIT]\n address = service.data[ATTR_ADDRESS]\n state = service.data[ATTR_STATE]\n client_name = service.data[ATTR_HUB]\n await hub_collect[client_name].write_coil(unit, address, state)\n\n hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, start_modbus)\n\n return True", "def _disconnect(self):\n raise NotImplementedError(\"ERROR: Unimplemented function.\")", "async def close_backend(self) -> None:\n _LOGGER.debug(\"Closing backend\")\n\n # Close reconnect task\n if self._reconnect_task:\n self._reconnect_task.cancel()\n self._reconnect_task = None\n\n # Disconnect snitun\n if self._snitun:\n await self._snitun.stop()\n\n # Cleanup\n self._snitun = None\n self._acme = None\n self._token = None\n self._instance_domain = None\n self._snitun_server = None\n\n self.cloud.client.dispatcher_message(const.DISPATCH_REMOTE_BACKEND_DOWN)", "def cancel(self):\n raise NotImplementedError(\n u\"%s: Method not implemented\", self.__class__.__name__)", "def on_disconnect(unused_client, unused_userdata, rc):\n\tprint('on_disconnect', error_str(rc))\n\n\t# Since a disconnect occurred, the next loop iteration will wait with\n\t# exponential backoff.\n\tglobal should_backoff\n\tshould_backoff = True", "def control_cancel(self, wait_for_ready: bool = True) -> None:\n self.__logger.debug('Eva.control_cancel called')\n return self.__http_client.control_cancel(wait_for_ready=wait_for_ready)", "def handle_reg_client(self, event):\n try:\n while True:\n client_req = self.receive_msg()\n self.choose_action(client_req[ZERO], client_req[ONE:], event)\n except socket.error as e:\n print(e)", "def cancel_callback(self, goal_handle):\n self.get_logger().info('Received cancel request')\n return CancelResponse.ACCEPT", "def register_client(self, authorization, request_body):\n # type (Mapping[str, str], str) -> Response\n if not authorization:\n raise OIDCFederationError(\"Missing Authorization header in registration request.\")\n if not authorization.startswith(\"pop \"):\n raise OIDCFederationError(\"Wrong Authentication scheme in registration request.\")\n\n registration_request = FederationRegistrationRequest(**json.loads(request_body))\n registration_request.rm_blanks()\n\n try:\n registration_request.verify()\n except MessageException as e:\n raise OIDCFederationError(\"Error in client registration request: {}.\".format(str(e)))\n\n client_software_statement, client_signing_key = self._verify_signature_chain(\n registration_request[\"software_statements\"],\n registration_request[\"signing_key\"])\n\n request_signature = authorization[len(\"pop \"):]\n try:\n SignedHttpRequest(client_signing_key).verify(request_signature, body=request_body)\n except ValidationError as e:\n raise OIDCFederationError(\"Could not verify signature of client registration request.\")\n\n provider_software_statement = self._find_software_statement_for_federation(\n client_software_statement.jwt.headers[\"kid\"])\n matched_preferences = self.registration_verification.verify(\n self.provider.capabilities.to_dict(), provider_software_statement.msg,\n registration_request.to_dict(), client_software_statement.msg)\n\n # recreate client registration request only with the common capabilities\n registration_request.update(matched_preferences)\n result = self.provider.client_registration_setup(registration_request)\n if isinstance(result, Response):\n return result\n\n registration_response = FederationRegistrationResponse(**result.to_dict())\n registration_response[\n \"provider_software_statement\"] = provider_software_statement.jwt.pack()\n return Created(registration_response.to_json(), content=\"application/json\")", "def __exit__(self, exc_type, exc_val, exc_tb):\n self.queue.channel.rpc(self._basic_cancel)\n self.queue.consuming = False", "def closeConnecetion(self):\n client.close()", "def future(self, cancel):", "def get_hub(*args, **kwargs): # pylint:disable=unused-argument\n\n return get_hub_noargs()", "def success(self, cb: CircuitBreaker) -> None:", "def cancel(self):\n self.succeeded = False\n self.reject()", "def cancel(self):\n self.succeeded = False\n self.reject()", "def cancel(self):\n self.succeeded = False\n self.reject()", "def cancel(self):\n self.succeeded = False\n self.reject()", "async def disconnect(self):\n try:\n #print(\"Send disconnect command\")\n await self._writeControlPacket(ControlPacketsGenerator.getDisconnectPacket())\n except Exception as err:\n # TODO: catch this error if it is something like already disconnected\n #print(\"Unknown error\")\n raise err\n\n try:\n # Disconnect from this side as well.\n #print(\"Disconnect from this side as well\")\n self.core.ble.disconnect()\n except Exception as err:\n #print(\"Unknown error\")\n raise err", "async def close(self) -> None:\n assert self._client is not None\n await self._client.close()\n self._client = None", "async def onCanceled( # type: ignore[override]\n self, event: Event, strategy: Optional[EventHandler]\n ) -> None:\n pass", "def _cancel(self):\n client = SBusClient(self.storlet_pipe_path)\n try:\n resp = client.cancel(self.task_id)\n if not resp.status:\n raise StorletRuntimeException('Failed to cancel task')\n except SBusClientException:\n raise StorletRuntimeException('Failed to cancel task')", "def _dummy_client(self):\n logger.warning('Running dummy client for task #%s',\n self.task_data.get('task_id', 0))\n Client(LISTENER_ADDRESS).close()", "def on_disconnect( client, userdata, rc ):\n logging.info( \"Disconnected from Broker. Returned code: %s\\n\" %rc )\n client.connected_flag = False\n client.disconnect_flag = True", "async def public_cancel_subscription_async(\n subscription_id: str,\n user_id: str,\n body: Optional[CancelRequest] = None,\n namespace: Optional[str] = None,\n x_additional_headers: Optional[Dict[str, str]] = None,\n **kwargs\n):\n if namespace is None:\n namespace, error = get_services_namespace()\n if error:\n return None, error\n request = PublicCancelSubscription.create(\n subscription_id=subscription_id,\n user_id=user_id,\n body=body,\n namespace=namespace,\n )\n return await run_request_async(\n request, additional_headers=x_additional_headers, **kwargs\n )", "async def _register_command(self) -> JSON:\n loop = asyncio.get_event_loop()\n async with aiohttp.ClientSession() as session:\n async with session.post(\n url=InteractionRoute().application(self._application_id).commands(self._id).url,\n json=self._data\n ) as response:\n interaction: JSON = await response.json(encoding='utf-8')\n return interaction", "def close(self) -> None:\n logger.debug(\"close grpc\")\n self.client.close()", "def _on_disconnect(self, *args, **kwargs):\n logger.info('Client disconnected %r', self.address)\n self.server.client_disconnected(self)\n # self.unregister()", "def _add_disconnect_interrupt_to_coroutine(self, coro: Awaitable[_T]) -> Awaitable[_T]:\n\n async def wrapping_coroutine():\n original_task = asyncio.create_task(coro)\n done, _ = await asyncio.wait(\n [original_task, self._wait_for_disconnect_task],\n return_when=asyncio.FIRST_COMPLETED,\n )\n if self._wait_for_disconnect_task in done:\n original_task.cancel()\n cause = self._wait_for_disconnect_task.result()\n if cause is not None:\n raise cause\n else:\n raise asyncio.CancelledError(\"Cancelled by disconnect\")\n else:\n return await original_task\n\n return wrapping_coroutine()", "async def disconnect(self):\n if not self._session:\n await self._create_session()\n await self._session.post(self._network.SERVER_ADDR + '/api/disconnect')", "async def connect(self) -> None:\n if not self._snitun:\n _LOGGER.error(\"Can't handle request-connection without backend\")\n raise RemoteNotConnected()\n\n # Check if we already connected\n if self._snitun.is_connected:\n return\n\n insecure = False\n try:\n _LOGGER.debug(\"Refresn snitun token\")\n async with async_timeout.timeout(30):\n await self._refresh_snitun_token()\n\n _LOGGER.debug(\"Attempting connection to %s\", self._snitun_server)\n async with async_timeout.timeout(30):\n await self._snitun.connect(\n self._token.fernet,\n self._token.aes_key,\n self._token.aes_iv,\n throttling=self._token.throttling,\n )\n _LOGGER.debug(\"Connected\")\n\n self.cloud.client.dispatcher_message(const.DISPATCH_REMOTE_CONNECT)\n except asyncio.TimeoutError:\n _LOGGER.error(\"Timeout connecting to snitun server\")\n except SniTunConnectionError:\n _LOGGER.error(\"Connection problem to snitun server\")\n except RemoteBackendError:\n _LOGGER.error(\"Can't refresh the snitun token\")\n except RemoteInsecureVersion:\n self.cloud.client.user_message(\n \"connect_remote_insecure\",\n \"Home Assistant Cloud error\",\n \"Remote connection is disabled because this Home Assistant instance is marked as insecure. For more information and to enable it again, visit the [Nabu Casa Account page](https://account.nabucasa.com).\",\n )\n insecure = True\n except SubscriptionExpired:\n pass\n except AttributeError:\n pass # Ignore because HA shutdown on snitun token refresh\n finally:\n # start retry task\n if self._snitun and not self._reconnect_task and not insecure:\n self._reconnect_task = self.cloud.run_task(self._reconnect_snitun())\n\n # Disconnect if the instance is mark as insecure and we're in reconnect mode\n elif self._reconnect_task and insecure:\n self.cloud.run_task(self.disconnect())", "def disconnect(self):\n raise NotImplementedError", "def disconnect(self):\n raise NotImplementedError", "async def unsubscribe(connection, message):\n from high_templar.hub import NotSubscribedException\n\n if 'requestId' not in message:\n return await connection.send({\n 'code': 'error',\n 'message': 'no-request-id'\n })\n\n for subscription in connection.app.hub.subscriptions[connection]:\n if subscription.request_id == message['requestId']:\n try:\n connection.app.hub.unsubscribe(subscription)\n await connection.send({ 'code': 'success' })\n except NotSubscribedException:\n await connection.send({\n 'code': 'error',\n 'message': 'not-subscribed'\n })\n break\n else:\n return await connection.send({\n 'code': 'error',\n 'message': 'not-subscribed'\n })", "def disconnect(self):\n\t\treturn Job(SDK.PrlVmDev_Disconnect(self.handle)[0])", "async def test_client_aexit_no_await(self) -> None:\n\n async with TestServer() as sa:\n ip, port = sa.ip, sa.port\n assert ip and port\n client = get_client(TestingService, host=ip, port=port)\n await client.__aenter__()\n self.assertTrue(await client.invert(False))\n self.assertFalse(await client.invert(True))\n # pyre-fixme[1001]: `client.__aexit__(None, None, None)` is never\n # awaited.\n client.__aexit__(None, None, None)\n del client # If we do not abort here then good", "def on_disconnect(unused_client, unused_userdata, rc):\n print(f\"on_disconnect: {error_str(rc)}\")\n print()\n\n global connected\n connected = False", "def disconnect(self, code):\n try:\n if not self.scope['user'].is_authenticated:\n logger.error('User in not authenticated')\n self.close()\n\n user = Profile.objects.get(user=self.scope['user'])\n group_name = user.group_name\n\n self.channel_layer.group_discard(group_name, self.channel_name)\n except Exception as e:\n logger.error(e)", "def after_request(r):\n if 'client_id' in g and g.client_id:\n release_semaphore(g.client_id)\n return r", "async def do_disconnect(self):\n # We shouldn't be asked to disconnect if it wasn't us who connected\n # originally\n if not self.connect_requested:\n log.info(\n \"%s disconnecting, although we didn't connect originally\", self.name\n )\n self.connect_requested = False\n # Actually do it\n await self.disconnect()", "async def register_client(connection):\n connections[connection.uuid] = connection\n messages_to_clients[connection.uuid] = asyncio.Queue()\n\n await connection.notify_connected()", "async def close_connection(self):\n\t\t...", "def testConnectionMade(self):\n d = self._getClientConnection()\n d.addCallback(lambda _: self.client.disconnect())\n return d", "def test_immediate_cancel(self):\n # This depends on the way reactor runs callFromThread calls, so need\n # real functional test.\n program = \"\"\"\\\nimport os, threading, signal, time, sys\n\nfrom twisted.internet.defer import Deferred, CancelledError\n\nimport crochet\ncrochet.setup()\n\[email protected]_in_reactor\ndef run():\n return Deferred()\n\ner = run()\ner.cancel()\ntry:\n er.wait(1)\nexcept CancelledError:\n sys.exit(23)\nelse:\n sys.exit(3)\n\"\"\"\n process = subprocess.Popen(\n [sys.executable, \"-c\", program],\n cwd=crochet_directory, )\n self.assertEqual(process.wait(), 23)", "def disconnect():\n\n if not session.disconnect():\n\n # If the client is not connected send an error.\n return dict(ok=False, error=\"Client not connected\")\n\n return dict(ok=True)", "def register_on_disconnect(\n self, on_disconnect_cb: Callable[[], Awaitable[None]]\n ) -> Callable:\n\n def unsubscribe() -> None:\n \"\"\"Unsubscribe listeners.\"\"\"\n if on_disconnect_cb in self._on_disconnect:\n self._on_disconnect.remove(on_disconnect_cb)\n\n self._on_disconnect.append(on_disconnect_cb)\n return unsubscribe", "def _connectPrepError(self, failure, authenticator):\r\n authenticator.cancel()\r\n return failure", "def on_reg_state(self):\n\n if self.sem:\n if self.account.info().reg_status >= 200:\n self.sem.release()" ]
[ "0.561611", "0.53545636", "0.53502023", "0.52480394", "0.5240914", "0.5077308", "0.50643533", "0.49984068", "0.49921355", "0.498722", "0.49676874", "0.49660704", "0.49642637", "0.49493033", "0.49309194", "0.49151906", "0.4906097", "0.4895857", "0.48792404", "0.48342264", "0.47956735", "0.4792095", "0.4781637", "0.47739178", "0.47660327", "0.4757601", "0.46935606", "0.46833223", "0.4681466", "0.46604437", "0.4656912", "0.4656912", "0.46529526", "0.4643858", "0.46339813", "0.46213564", "0.46136868", "0.46076652", "0.46067825", "0.4604825", "0.46008974", "0.45927852", "0.4591642", "0.45904332", "0.45697957", "0.45660797", "0.45660403", "0.4542884", "0.45338592", "0.45268536", "0.451842", "0.45182684", "0.44973946", "0.44935253", "0.44860032", "0.4484324", "0.44815975", "0.44815132", "0.44769514", "0.44691548", "0.44676802", "0.4466442", "0.44598982", "0.44588137", "0.44552928", "0.44536638", "0.4447748", "0.4447748", "0.4447748", "0.4447748", "0.44449666", "0.444409", "0.44399753", "0.44202217", "0.44161996", "0.44066468", "0.43989635", "0.439251", "0.43920535", "0.4391702", "0.43871278", "0.43845302", "0.43823427", "0.43807864", "0.43807864", "0.43778563", "0.4373548", "0.43731242", "0.43661907", "0.43595144", "0.43580338", "0.4350971", "0.4350579", "0.43433183", "0.4340803", "0.43380165", "0.43344378", "0.43326727", "0.43249083", "0.43213606" ]
0.6079265
0
read dataset from file
def read_data(self, filepath, is_build_vocab=False): with open("general_list.pkl", "rb") as file: self.general_list = pl.load(file) self.vocab.token2idx = {"<pad>": 0, "<unk>": 1} print(len(self.general_list)) ll = 2 for token in self.general_list: self.vocab.token2idx[token] = ll ll+=1 print("max id", max(list(self.vocab.token2idx.values())), len(self.vocab.token2idx)) self.vocab.idx2token = {idx: token for token, idx in self.vocab.token2idx.items()} #print("max_len", self.vocab.token2idx) datas = [] with open(filepath, "r", encoding="utf-8") as reader: for line in reader: line = line.strip() if not line: continue obj = json.loads(line) datas.append(obj) return datas
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_dataset(file_path):\n return Dataset.load(file_path)", "def open_file(path):\n input_file = os.path.join(path)\n with open(input_file) as f:\n dataset = f.read()\n return dataset", "def import_dataset(fpath):\r\n data = read_csv(fpath)\r\n print(data.head())\r\n print(data.shape)\r\n return data", "def read_dataset(filetxt):\n text = open(filetxt, 'r')\n dataset = text.read()\n dataset = dataset.strip()\n text.close()\n return dataset", "def read_dataset(filetxt):\n text = open(filetxt, 'r')\n dataset = text.read()\n dataset = dataset.strip()\n text.close()\n return dataset", "def read_data(feature_file, label_file):", "def read(self, filename):\n st_tree = parse(filename)\n datasets = []\n id_ = 1\n st_datasets = st_tree.findall('weight')\n for st_dataset in st_datasets:\n date = util.str2date(st_dataset.find('date').text[0:10])\n weight = round(float(st_dataset.find('value').text), 1)\n note = st_dataset.find('comment').text\n datasets.append(Dataset(id_, date, weight, note=note))\n id_ += 1\n return datasets", "def read_dataset(self, file_path):\n try:\n with open(file_path, encoding=\"utf-8\") as file:\n fieldnames = ['hit_id', 'sentence', 'start_offset', 'end_offset', 'target_word', 'native_annots',\n 'nonnative_annots', 'native_complex', 'nonnative_complex', 'gold_label', 'gold_prob']\n\n dataset = pd.read_csv(file, names=fieldnames, sep=\"\\t\")\n\n except FileNotFoundError:\n print(\"File {} not found.\".format(file_path))\n dataset = None\n\n return dataset", "def read_data_set():\n # shapes of datasets -- [] means expanded form:\n # - X: J\n # - net.R: J [x J x 1]\n # - F_DIST: J x J x num_features\n # - F_DIST_w1: J x J x num_features\n # - w['except_first'][-1]: (last weights) J x num_features [x 1]\n # - w['except_first'][1:-1]: (second to last weights) J x J x num_features\n # - first weights **were** also J x J x num_features\n # - w['first_for_r']: J x 1 x num_features\n\n read_X()\n read_weights(read_FDIST())", "def load(self, file):\n with open(file) as file:\n self.dataset = [line.strip() for line in file]\n\n return self.dataset", "def load_data():\n with open('../data/dataset.txt', 'r') as data_file:\n return data_file.read().split('\\n')", "def read(filename: str) -> orm.Data:\n return from_bands_inspect(load(hdf5_file=filename))", "def load():\n filepath = dirname(abspath(__file__))\n##### EDIT THE FOLLOWING TO POINT TO DatasetName.csv #####\n data = recfromtxt(open(filepath + '/spector.csv',\"rb\"), delimiter=\" \",\n names=True, dtype=float, usecols=(1,2,3,4))\n names = list(data.dtype.names)\n endog = array(data[names[3]], dtype=float)\n endog_name = names[3]\n exog = column_stack(data[i] for i in names[:3]).astype(float)\n exog_name = names[:3]\n dataset = Dataset(data=data, names=names, endog=endog, exog=exog,\n endog_name = endog_name, exog_name=exog_name)\n return dataset", "def load():\n filepath = dirname(abspath(__file__))\n data = recfromtxt(filepath + '/scotvote.csv', delimiter=\",\",\n names=True, dtype=float, usecols=(1,2,3,4,5,6,7,8))\n names = list(data.dtype.names)\n endog = array(data[names[0]], dtype=float)\n endog_name = names[0]\n exog = column_stack(data[i] for i in names[1:]).astype(float)\n exog_name = names[1:]\n dataset = Dataset(data=data, names=names, endog=endog, exog=exog,\n endog_name = endog_name, exog_name=exog_name)\n return dataset", "def readfile(self, path, filename):\n # The DataStudio software uses ISO-8859-1 encoding (especially for the degree sign in temperature files)\n file = open(path + filename, encoding=\"iso-8859-1\")\n rowlist = file.readlines()\n\n title = rowlist[0].strip(\"\\n\")\n labels = rowlist[1].strip(\"\\n\").split(sep=\"\\t\")\n\n data = np.zeros((len(rowlist)-2, 2))\n\n for i in range(2, len(rowlist)):\n columns = rowlist[i].split(sep=\"\\t\")\n data[i-2, 0] = float(columns[0].replace(\",\", \".\"))\n data[i-2, 1] = float(columns[1].replace(\",\", \".\"))\n\n return data, title, labels", "def read_from(self, filename):\n self.x, self.y = np.loadtxt(filename, unpack=True, usecols=(0, 1))", "def read_file(self,file_name):\r\n data = np.genfromtxt(file_name)\r\n return data;", "def get_dataset(filepath):\n return pandas.read_csv(filepath, header='infer')", "def read_from_file(self, filename: str) -> None:", "def load_data(file_to_read):\n\n data = np.recfromtxt(file_to_read)\n data = np.asarray(data)\n\n return data", "def read_Dataset(dataset_Path):\n dataset = pd.read_csv(dataset_Path)\n return dataset", "def loadData(infile,k):\n f = open(infile,'r')\n #f = f.read().split(\"\\n\")\n #raw = json.loads(f[1])\n f = f.read()\n raw = json.loads(f)\n data = np.array(raw)\n dataset = data[k]\n return dataset", "def _read_dataset(self, dataset_path):\n dataset = pd.read_pickle(dataset_path)\n return dataset", "def readFromFile(filename):\n raise NotImplementedError", "def test_dataset_from_file(train_dataset):\n dummy = \"justo. Praesent luctus. Curabitur egestas nunc sed libero. Proin sed\"\n assert train_dataset[0][0] == dummy\n assert train_dataset[0][1] == '6'", "def read_file(self):\n colspecs = [[0, 7]] # for the id\n names = ['id']\n for question in self.question_list:\n colspecs.extend(question.get_column_range())\n names.extend(question.get_column_names())\n\n self.data = pd.read_fwf(self.file, colspecs=colspecs, encoding=self.encoding, names=names, header=None)\n self.data.fillna(0, inplace=True)\n self.data = self.data.astype(int)\n return self.data", "def read_data(self, file_path):\n raise NotImplementedError('should be overridden with specific data reader')", "def load_data_set(filename):\n\n input_file = open(filename)\n\n num_features = len(input_file.readline().split('\\t')) - 1\n input_file.seek(0)\n data_mat = []\n label_mat = []\n\n for line in input_file.readlines():\n line_arr = []\n curr_line = line.strip().split('\\t')\n for i in range(num_features):\n line_arr.append(float(curr_line[i]))\n data_mat.append(line_arr)\n label_mat.append(float(curr_line[-1]))\n\n return data_mat, label_mat", "def load_dataset(filepath):\n \n X = list()\n x = list()\n\n Y = list()\n y = list()\n \n for line in open(filepath):\n # blank lines separate sequences\n if len(line) <= 1:\n X.append(x)\n Y.append(y)\n\n x = list()\n y = list()\n else:\n a, b = line.strip().split('\\t')\n x.append(a)\n y.append(b)\n \n return X, Y", "def load_dataset(filename):\n return [(\n lambda point: {\n 'coordinate': tuple(map(float, point[:-1])),\n 'label': int(point[-1])})\n (string.strip().rstrip().split(','))\n for string in open(filename, 'r').read()\n .strip().rstrip().split('\\n')]", "def read_data(path):\n with h5py.File(path, 'r') as hf:\n data = np.array(hf.get('data'))\n label = np.array(hf.get('label'))\n return data, label", "def read_dataset_v1():\n path = load_config()\n T = feather.read_dataframe(path['data_dir'] / 'T_dat.feather')\n E = feather.read_dataframe(path['data_dir'] / 'E_dat.feather')\n M = feather.read_dataframe(path['data_dir'] / 'Meta.feather')\n data = sio.loadmat(path['data_dir'] / 'highvar_genes.mat', squeeze_me=True)\n return T, E, M, data", "def read_data(filename):\r\n with open(filename,'rb') as f:\r\n data = pk.load(f,encoding='bytes')\r\n return data[b'data'],data[b'labels']", "def read_KNN_dataFile(file):\n A = np.genfromtxt(file)\n return A", "def _read_train_datas(self):\r\n with open(self.train_label_path, 'r') as fb:\r\n lines = fb.readlines()\r\n return self._parse_raw_labels(lines)", "def read(cls, file_str, drop_variables_str=None, decode_cf=True, decode_times=True, engine_str=None):\n ds = xr.open_dataset(file_str, drop_variables=drop_variables_str, decode_cf=decode_cf, decode_times=decode_times, engine=engine_str, chunks=1000000)\n return ds", "def read_dataset(file_pattern, dataset_name='PIR', batch_size=128):\n\n auto = tf.data.experimental.AUTOTUNE\n ignore_order = tf.data.Options()\n dataset = tf.data.Dataset.list_files(file_pattern)\n dataset = dataset.flat_map(tf.data.TFRecordDataset)\n dataset = dataset.prefetch(auto)\n dataset.with_options(ignore_order)\n if dataset_name == 'PIR':\n dataset = dataset.map(\n read_labeled_tfrecord_photoshop, num_parallel_calls=auto)\n elif dataset_name == 'copydays10k' or dataset_name == 'copydays10k-strong':\n dataset = dataset.map(\n read_labeled_tfrecord_copydays10k, num_parallel_calls=auto)\n else:\n raise NotImplementedError(\n f'Evaluation for dataset {dataset_name} not implemented')\n dataset = dataset.batch(batch_size=batch_size)\n dataset_iter = iter(dataset)\n return dataset_iter", "def read_data(path):\n with h5py.File(path, 'r') as hf:\n data = np.array(hf.get('data'))\n label = np.array(hf.get('label'))\n return data, label", "def load_file(filename):\n f_data = []\n # open the data-set file\n file = open(filename, \"r\")\n for line in file:\n row = line.strip() # a row in the file\n f_data.append(row) # append it to the 2D array\n\n return f_data", "def load_dataset(file_name):\n file_path = join(DATA_DIR, file_name)\n text_field = Field(pad_token=None, tokenize=_tokenize_str)\n\n dataset = TabularDataset(\n path=file_path,\n format='csv',\n fields=[('text', text_field)],\n skip_header=False)\n\n text_field.build_vocab(dataset)\n return dataset", "def _read_data(self):\n with self._open(self.filename, 'rb') as f:\n try:\n f.seek(self._offset_data, self._offset_whence)\n except IOError:\n print('Error: hedp.io.HamamatsuFile seeking outside of file limits.')\n print(' Failed to parse file.')\n print(\" Either the 'offset' or 'dtype' input arguments must be wrong!\")\n raise\n except:\n raise\n\n data_len = np.prod(self.shape)*np.dtype(self._dtype).itemsize\n data_str = f.read(data_len)\n if data_len != len(data_str):\n print(data_len, len(data_str))\n raise ValueError('File ended before all data was read. Probably wrong offset or dtype!')\n\n\n self.data = np.fromstring(data_str, dtype=self._dtype).reshape(self.shape[::-1])\n self.data = np.ndarray.astype(self.data, 'float32')\n\n #self.data = np.fromfile(f, dtype=self._dtype,\n # count=np.prod(self.shape)).reshape(self.shape[::-1])", "def load_dataset(file_name, model_ver):\n\n print 'Loading dataset ...'\n\n if model_ver == 'dmspline':\n file_path = join(DATA_DIR, file_name)\n id_field = Field(sequential=False, use_vocab=False, dtype=torch.int)\n text_field = Field(pad_token=None, tokenize=_tokenize_str)\n attr_field = Field(sequential=False, use_vocab=False, dtype=torch.float)\n standardzed_attr_field = Field(sequential=False, use_vocab=False, dtype=torch.float)\n\n dataset = TabularDataset(\n path=file_path,\n format='csv',\n fields=[('attr', attr_field), ('id', id_field), ('standardized_attr', standardzed_attr_field), ('text', text_field)],\n skip_header=True)\n\n else:\n file_path = join(DATA_DIR, file_name)\n id_field = Field(sequential=False, use_vocab=False, dtype=torch.int)\n text_field = Field(pad_token=None, tokenize=_tokenize_str)\n\n dataset = TabularDataset(\n path=file_path,\n format='csv',\n fields=[('id', id_field), ('text', text_field)],\n skip_header=True)\n\n text_field.build_vocab(dataset, min_freq=10)\n return dataset", "def load_data(filename):\n data = []\n with open('data/' + filename) as raw_data:\n for line in raw_data.readlines():\n data.append(float(line.strip('\\n')))\n return data\n # data = np.mat(np.genfromtxt('data/' + filename)).T\n # return data", "def read_dataset(fname, dname, start=0, stop=-1, skip=1):\n data = None\n\n try:\n with h5py.File(fname, 'r') as f:\n if stop == -1:\n return f[dname][start::skip,]\n else:\n return f[dname][start:stop:skip,]\n except IOError:\n print(\"Error: cannot find file %s.\" % fname)\n\n return data", "def read_dataset(path_to_dataset_folder, index_filename):\n ###############################################################\n # Fill your code in this function\n ###############################################################\n # Hint: open(path_to_dataset_folder+'/'+index_filename,'r')\n with open(path_to_dataset_folder + '/' + index_filename, 'r') as index_file:\n lines = index_file.readlines()\n txt_paths = {}\n for line in lines:\n txt_path = line.split()\n txt_paths[txt_path[1]] = txt_path[0]\n\n A = []\n T = []\n for sample_file, label in txt_paths.items():\n A_16dim = []\n with open(path_to_dataset_folder + '/' + sample_file, 'r') as dim_values:\n lines = dim_values.readlines()\n for line in lines:\n dim_value = line.split()\n A_helper = [1]\n for element in dim_value:\n A_helper.append(float(element))\n A.append(A_helper)\n label = int(label)\n T_helper = [label]\n T.append(T_helper)\n A = np.array(A)\n T = np.array(T)\n return A, T", "def load_data(path_dataset):\n data = read_txt(path_dataset)[1:]\n return preprocess_data(data)", "def read_data(self,filename):\n self.x = [] #Input values\n self.t = [] #Target values\n\n with open(filename, \"r\") as infile:\n lines = infile.readlines()\n self.n = len(lines)\n for line in lines:\n words = line.split()\n self.x.append(float(words[0]))\n self.t.append(float(words[1]))\n\n self.x = np.array(self.x)\n self.t = np.array(self.t)\n self.create_design_matrix()", "def get_file(file_name):\r\n f = open(file_name)\r\n\r\n tids = f.readlines()\r\n \r\n dataset = [(int(tid), get_from_id(int(tid))) for tid in tids]\r\n\r\n f.close()\r\n return dataset", "def read(self, data_path: str = None, *args, **kwargs) -> Dict:\n\n with open(data_path) as f:\n content = f.readlines()\n\n dataset = dict()\n dataset[\"train\"] = [(line,) for line in content]\n dataset[\"valid\"] = []\n dataset[\"test\"] = []\n\n return dataset", "def read_data(data_path, filename,feature_number):\n\n with open(data_path + \"/\" + filename, 'r', encoding='utf-8-sig') as f: \n X = np.genfromtxt(f, delimiter=',')[:,0:feature_number]\n\n\n # Last column of datafile contains output labels\n Y = np.genfromtxt(data_path + \"/\" + filename,delimiter=\",\")[:,feature_number]\n Y = Y.reshape(X.shape[0])\n\n return X,Y", "def readData(path): \n try:\n open(path)\n dataset = np.loadtxt(path)\n # arms played by uniformly-random policy as recorded in dataset\n arms = dataset[:, 0].astype(int) \n # rewards received by playing arms using a uniformly-random policy as \n # recorded in dataset \n rewards = dataset[:, 1] \n # context vector \n contexts = dataset[:, 2:] \n except FileNotFoundError: \n raise \n return(arms, rewards, contexts)", "def read_dataset(filename: str) -> List[str]:\n with open(filename, encoding=\"utf8\") as file:\n f = (line.strip() for line in file)\n return [line for line in f if line]", "def test_full_dataset_from_file(full_dataset):\n train_dummy = \"justo. Praesent luctus. Curabitur egestas nunc sed libero. Proin sed\"\n val_dummy = \"malesuada. Integer id magna et ipsum cursus vestibulum. Mauris magna.\"\n\n assert full_dataset.train[0][0] == train_dummy\n assert full_dataset.train[0][1] == '6'\n\n assert full_dataset.val[0][0] == val_dummy\n assert full_dataset.val[0][1] == '8'\n\n assert full_dataset[0][0] == train_dummy\n assert full_dataset[100][0] == val_dummy", "def read(self, filename):\n pass", "def read(self, filename):\n pass", "def get_data(filename):\r\n return pd.read_csv(filename)", "def load_data(filename) :\r\n data = Data()\r\n data.load(filename)\r\n return data", "def read_data_file(self, file_name: str = \"\") -> None:\n\n if Path(file_name).is_file():\n with open(file_name, \"r\") as file:\n data_list = []\n line = file.readline()\n while line:\n data_list.append(int(line))\n line = file.readline()\n\n self.data = data_list\n self.mu = self.calculate_mean()\n self.sigma = self.calculate_std()\n\n else:\n raise FileNotFoundError(f\"\"\"{file_name} doesn't exist, please check\n the file or path\"\"\")", "def read_data(self, path):\n if self.data_format == 'twenty': \n length = 20\n else: raise ValueError(\"self.data_format = '%s' unknown.\" % \n self.data_format)\n data = []\n with open(path,'r') as f:\n for line in f:\n data.append([float(line[k:(k + length)]) for k in range(\n 0, len(line.strip('\\n')),length)])\n return np.array(data)", "def read_data(filepath):\n df = pd.read_csv(filepath)\n return df", "def read_data(filepath):\n df = pd.read_csv(filepath)\n return df", "def read_data(filepath):\n df = pd.read_csv(filepath)\n return df", "def read_data(path):\n with h5py.File(path, 'r') as hf:\n data = np.array(hf.get('data'))\n return data", "def read(cls, filename):\n with fits.open(str(make_path(filename)), memmap=False) as hdulist:\n return cls.from_hdulist(hdulist)", "def _read_datafile(self,path):\n \tlabels, images = [], []\n \twith gzip.GzipFile(path) as f:\n \t for line in f:\n \t vals = line.strip().split()\n \t labels.append(float(vals[0]))\n \t images.append([float(val) for val in vals[1:]])\n \tlabels = np.array(labels, dtype=np.int32)\n \tlabels[labels == 10] = 0 # fix weird 0 labels\n \timages = np.array(images, dtype=np.float32).reshape(-1, 16, 16, 1)\n \timages = (images + 1) / 2\n \treturn images, labels", "def read_data(path, filename, drop_col=\"index\", dt=\"float32\"):\n\tdata = pd.read_csv(path + filename, sep=\",\", dtype=dt)\n\tdata = data.drop(drop_col, axis=1)\n\treturn data.as_matrix()", "def fromfile(self, path):\n\t\tdata = filetools.read_data(path)\n\t\tprint \"File read: %i lines\" % len(data)\n\t\tself.build_matrix(data)", "def load_dataset(self, file_path: str,file_name: str) -> pd.DataFrame:\n combined_path=os.path.join(file_path,file_name)\n self.raw_data=pd.read_csv(combined_path)\n return self.raw_data", "def test_read(setup_teardown_file):\n f = setup_teardown_file[3]\n\n dt = np.dtype('(3,)f8')\n dset = f.create_dataset('x', (10,), dtype=dt)\n # TODO implement this\n # assert dset.shape == (10,)\n # assert dset.dtype == dt\n\n # Full read\n out = dset[...]\n assert out.dtype == np.dtype('f8')\n assert out.shape == (10, 3)\n\n # Single element\n out = dset[0]\n assert out.dtype == np.dtype('f8')\n assert out.shape == (3,)\n\n # Range\n out = dset[2:8:2]\n assert out.dtype == np.dtype('f8')\n assert out.shape == (3, 3)", "def readFile(self, fileName, d, addBias=False):\n\n dataSet = [];\n\n # open file and read lines from it, where each line contains a data point and label\n f = open(fileName, 'r');\n for line in f:\n # split line into list of strings, each string representing an element of the data point\n dataPt = (line.strip()).split(); \n \n # extract label for current data point\n label = int(dataPt[0]); \n if label == 0:\n label = -1; \n \n # create ndarray for data point with bias\n if addBias:\n fVector = np.zeros(d+1);\n fVector[-1] = 1;\n else:\n fVector = np.zeros(d);\n for i in range(1,len(dataPt)): \n fIndex, fVal = dataPt[i].split(':');\n fVector[int(fIndex)] = float(fVal);\n \n # add data point and label to data set\n dataSet.append((fVector,label));\n \n return dataSet;", "def _process_data_file(self):\n \n with open(self.data_file, 'r') as f:\n self.description = f.readline().strip()\n data = np.loadtxt(self.data_file, skiprows=1)\n\n return data", "def read_single_analysis_data(f):\n \n data = np.loadtxt(f, dtype=np.float64)\n\n return data", "def read_data(filepath, d = ','):\n return np.genfromtxt(filepath, delimiter=d, dtype=None)", "def get_local_dataset(\n self, \n file_name: str\n ):\n pd.read_csv(file_name)\n #save", "def loadSampleData(filePath):\n dataFile = open(filePath,'r')\n data = dataFile.read()\n dataFile.close()\n return data", "def read_data(filename):\n data = np.genfromtxt(filename, delimiter=',', dtype=str)\n X = data[1:,2:].astype(np.float)\n y = data[1:,0]\n y[y==label0]='0'\n y[y==label1]='1'\n y[y==label2]='2'\n y=y.astype(np.float)\n return X, y", "def read(self, filename):\n raise NotImplementedError", "def load_data(self):\n with open(self.file_name) as f:\n lines = f.readlines()\n\n labels = list()\n all_dat = list()\n for i, l in enumerate(lines):\n\n labels.append(int(l[0]))\n\n l = gensim.utils.any2unicode(l)\n all_dat.append(LabeledSentence(l.split(\"\\t\")[-1], [i]))\n\n return all_dat, np.asarray(labels)", "def get_data():\n return np.genfromtxt(FILENAME, delimiter=',', skip_header=1)", "def read_data(path):\n with h5py.File(path, 'r') as hf:\n data = np.array(hf.get('data'))\n label = np.array(hf.get('label'))\n\n data, label=data[:,:,:,0:2], label[:,:,:,0]\n #data=np.expand_dims(data,axis=-1)\n label=np.expand_dims(label,axis=-1)\n\n return data, label", "def import_data(path_dataset):\n\n X=np.array(pd.read_hdf(path_dataset))\n\n print('Data set shape:',np.shape(X))\n print('#####################################')\n \n return X", "def read_data(path):\n with h5py.File(path, 'r') as hf:\t\n input_ = np.array(hf.get('input'))\n label_ = np.array(hf.get('label'))\n return input_, label_", "def from_file(path: Union[Path, str], name: str) -> Dataset:\n _path = path if isinstance(path, Path) else Path(path)\n with open(_path, \"r\", encoding=\"utf-8\") as yupi_fd:\n data = json.load(yupi_fd)\n return Dataset._from_json(name, data)", "def read_file(self, fullname):\n\n data = np.genfromtxt(fullname, dtype=None, names=True, skip_header=0)\n return data", "def read(self):\n dataset = Dataset()\n with open(self.corpus_folder, encoding='utf-8') as file:\n reader = csv.reader(file, delimiter='\\t')\n for row in reader:\n docid, title, abstract = row\n title = title.strip()\n abstract = abstract.strip()\n\n document = Document()\n if title:\n document.parts['title'] = Part(title)\n if abstract and abstract != 'null':\n document.parts['abstract'] = Part(abstract)\n\n dataset.documents[docid] = document\n\n return dataset", "def read_file(netcdf_file_name):\n\n dataset_object = netCDF4.Dataset(netcdf_file_name)\n\n saliency_dict = {\n MODEL_FILE_KEY: str(getattr(dataset_object, MODEL_FILE_KEY)),\n IS_LAYER_OUTPUT_KEY: bool(getattr(dataset_object, IS_LAYER_OUTPUT_KEY)),\n LAYER_NAME_KEY: str(getattr(dataset_object, LAYER_NAME_KEY)),\n NEURON_INDICES_KEY: numpy.array(\n getattr(dataset_object, NEURON_INDICES_KEY), dtype=int\n ),\n IDEAL_ACTIVATION_KEY: getattr(dataset_object, IDEAL_ACTIVATION_KEY),\n MULTIPLY_BY_INPUT_KEY:\n bool(getattr(dataset_object, MULTIPLY_BY_INPUT_KEY)),\n VALID_TIMES_KEY: numpy.array(\n dataset_object.variables[VALID_TIMES_KEY][:], dtype=int\n ),\n LATITUDES_KEY: numpy.array(\n dataset_object.variables[LATITUDES_KEY][:], dtype=float\n ),\n LONGITUDES_KEY: numpy.array(\n dataset_object.variables[LONGITUDES_KEY][:], dtype=float\n ),\n SALIENCY_MATRIX_KEY: numpy.array(\n dataset_object.variables[SALIENCY_MATRIX_KEY][:], dtype=float\n )\n }\n\n dataset_object.close()\n return saliency_dict", "def read_data(filename):\n data = np.genfromtxt(filename, delimiter=',', dtype = str)\n X = data[1:,2:].astype(np.float)\n y = data[1:,0]\n y[y==label0]='0' \n y[y==label1]='1' \n y[y==label2]='2'\n y.astype(np.float) \n return X, y", "def read_input_file(file_name):\n matrix = np.asmatrix(np.loadtxt(file_name))\n matrix = matrix[:, :-1]\n\n (rows, attribute_count) = np.shape(matrix)\n\n # convert data into an list of Examples\n examples = [\n Example(matrix[i, :])\n for i in range(0, rows)\n ]\n\n return (examples, attribute_count)", "def load_data(path):\n with open(path) as f:\n return f.readlines()", "def load_file(self, dset_type):\r\n path = './data/{0}.{1}'.format(self.name, dset_type)\r\n try:\r\n file_contents = np.genfromtxt(path, missing_values=0, skip_header=0,\r\n dtype=int, delimiter=\",\")\r\n self.labels[dset_type] = file_contents[:, 0]\r\n self.examples[dset_type] = file_contents[:, 1:]\r\n\r\n except RuntimeError:\r\n print('ERROR: Unable to load file ''{0}''. Check path and try again.'.format(path))", "def load_data(dataset_path: str):\n data = arff.loadarff(dataset_path)\n data_frame = pd.DataFrame(data[0])\n return data_frame", "def read_dataset(file_read_func, input_files, config):\n # Shard, shuffle, and read files.\n filenames = tf.gfile.Glob(input_files)\n if not filenames:\n raise ValueError('Invalid input path specified in '\n '`input_reader_config`.')\n num_readers = config.num_readers\n if num_readers > len(filenames):\n num_readers = len(filenames)\n tf.logging.warning('num_readers has been reduced to %d to match input file '\n 'shards.' % num_readers)\n filename_dataset = tf.data.Dataset.from_tensor_slices(filenames)\n if config.shuffle:\n filename_dataset = filename_dataset.shuffle(\n config.filenames_shuffle_buffer_size)\n elif num_readers > 1:\n tf.logging.warning('`shuffle` is false, but the input data stream is '\n 'still slightly shuffled since `num_readers` > 1.')\n filename_dataset = filename_dataset.repeat(config.num_epochs or None)\n records_dataset = filename_dataset.apply(\n tf.contrib.data.parallel_interleave(\n file_read_func,\n cycle_length=num_readers,\n block_length=config.read_block_length,\n sloppy=config.shuffle))\n if config.shuffle:\n records_dataset = records_dataset.shuffle(config.shuffle_buffer_size)\n return records_dataset", "def read_from_file(self,fn):\n fh = open(fn,\"r\")\n labels = []\n xyz = []\n sizes = []\n colors = []\n for line in fh.readlines():\n try:\n if not line.startswith(\"#\"):\n label,x,y,z,size,r,g,b = line.split(\",\")\n labels.append(label)\n xyz.append([x,y,z])\n sizes.append(size)\n colors.append((float(r),float(g),float(b)))\n except IOError, ioe:\n print \"IOError:\", ioe\n self._labels = np.array(labels)\n self._xyz = np.array(xyz).astype(\"f\")\n self._sizes = np.array(sizes).astype(\"f\")\n self._colors = np.array(colors)", "def read_file(self):\n # This is quite ugly but works for now.\n self.header = read_csv(self.file_name, delim_whitespace=True,\n header=TrackData.header_line,\n nrows=1).to_dict(orient='index')[0]\n self.data = read_csv(self.file_name, delim_whitespace=True, \n header=TrackData.data_line)", "def read_data(self):\n print 'Reading Data ...'\n fname = self.wpath + 'Data/' + self.city[2] + '-' + self.application + '.csv.bz2'\n self.dataset = loadtxt(fname, skiprows=1,\n dtype=[('lat', 'f8'), ('lng', 'f8'), ('time', 'i4'), ('user', 'S20')],\n usecols=(0, 1, 2, 3), delimiter=';', comments='#')", "def read_data_test(path):\n with h5py.File(path, 'r') as hf:\n input_ = np.array(hf.get('data'))\n label_ = np.array(hf.get('label'))\n\t\n return input_, label_", "def read_data_set(self, path):\n data = self._preprocessor.create_empty_input_target_data()\n\n for filename in glob.iglob(os.path.join(path, \"*\" + NoiseReader.file_extension())):\n exp_name = os.path.splitext(os.path.basename(filename))[0]\n\n experiment = self.read_experiment(path, exp_name)\n data = self._preprocessor.concat_input_target_data(data, experiment)\n\n return data", "def read_dataset(file, interner=None, point_factory=ListOpsDataPoint):\n data_set = []\n r = range(0, 10)\n\n if (interner == None):\n func = RespectfulNumbering(respect=r)\n interner = interners.Interner(not_present_function=func)\n\n\n reader = csv.reader(file, delimiter='\\t')\n for row in reader:\n #print(row)\n if len(row) > 1:\n try:\n data_set.append(point_factory(row[0], row[1], interner))\n except ValueError:\n print(\"Warning, could not read line: %s\"%row, file=sys.stderr)\n\n return data_set, interner", "def _read_data(filename):\n logger.info('Reading file {}'.format(filename))\n return pd.read_csv(filename)", "def loadDCData(filename):\n data = np.genfromtxt(filename+'.txt', skip_header=3, names=True)\n globals()[filename] = data", "def read(dataset = \"training\", path = \".\"):\n\n if dataset is \"training\":\n fname_img = os.path.join(path, 'train-images.idx3-ubyte')\n fname_lbl = os.path.join(path, 'train-labels.idx1-ubyte')\n elif dataset is \"testing\":\n fname_img = os.path.join(path, 't10k-images.idx3-ubyte')\n fname_lbl = os.path.join(path, 't10k-labels.idx1-ubyte')\n else:\n raise ValueError(\"dataset must be 'testing' or 'training'\")\n\n # Load everything in some numpy arrays\n with open(fname_lbl, 'rb') as flbl:\n magic, num = struct.unpack(\">II\", flbl.read(8))\n lbl = np.fromfile(flbl, dtype=np.int8)\n\n with open(fname_img, 'rb') as fimg:\n magic, num, rows, cols = struct.unpack(\">IIII\", fimg.read(16))\n img = np.fromfile(fimg, dtype=np.uint8).reshape(len(lbl), rows, cols)\n\n get_img = lambda idx: (lbl[idx], img[idx])\n\n # Create an iterator which returns each image in turn\n for i in range(len(lbl)):\n yield get_img(i)" ]
[ "0.7562527", "0.7480861", "0.7459469", "0.7441094", "0.7441094", "0.7398603", "0.73654073", "0.72704476", "0.72160506", "0.7185216", "0.7167113", "0.7102375", "0.7040365", "0.7037207", "0.7018122", "0.70112073", "0.69961697", "0.69836414", "0.6924565", "0.69151974", "0.6850237", "0.684586", "0.68157226", "0.6767203", "0.6746395", "0.6734499", "0.67325425", "0.67280847", "0.67075074", "0.6692892", "0.6672259", "0.66671884", "0.6666823", "0.66655123", "0.6661339", "0.66611546", "0.66142243", "0.6608949", "0.6605471", "0.6602876", "0.659579", "0.6594388", "0.65933573", "0.65928084", "0.65912014", "0.65771514", "0.6572033", "0.6549422", "0.6543777", "0.65401196", "0.65383434", "0.65303165", "0.652537", "0.6522034", "0.6522034", "0.65185267", "0.6517381", "0.6511854", "0.65089816", "0.647014", "0.647014", "0.647014", "0.6469101", "0.64620787", "0.6461743", "0.6455589", "0.6454323", "0.645264", "0.6450595", "0.6448139", "0.64474946", "0.6438233", "0.6428809", "0.64270747", "0.642672", "0.6422975", "0.64061946", "0.6397513", "0.6394444", "0.639208", "0.638763", "0.63874155", "0.63856506", "0.6371715", "0.63678694", "0.63670033", "0.63644695", "0.63581336", "0.6357069", "0.6350793", "0.63426393", "0.6339092", "0.6334932", "0.63339204", "0.6326251", "0.63189495", "0.6318717", "0.6318504", "0.63163733", "0.6315396", "0.6308588" ]
0.0
-1
Ask a yes/no/quit question via raw_input() and return their answer. "question" is a string that is presented to the user. "default" is the presumed answer if the user just hits . It must be "yes" (the default), "no", "quit" or None (meaning an answer is required of the user). The "answer" return value is one of "yes", "no" or "quit".
def query_yes_no_quit(question, default="yes"): valid = {"yes":"yes", "y":"yes", "ye":"yes", "no":"no", "n":"no", "quit":"quit", "qui":"quit", "qu":"quit", "q":"quit"} if default == None: prompt = " [y/n/q] " elif default == "yes": prompt = " [Y/n/q] " elif default == "no": prompt = " [y/N/q] " elif default == "quit": prompt = " [y/n/Q] " else: raise ValueError("invalid default answer: '%s'" % default) while 1: sys.stdout.write(question + prompt) choice = raw_input().lower() if default is not None and choice == '': return default elif choice in valid.keys(): return valid[choice] else: sys.stdout.write("Please respond with 'yes', 'no' or 'quit'.\n")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def query_yes_no_quit(question, default=\"yes\"):\n valid = {\"yes\":\"yes\", \"y\":\"yes\", \"ye\":\"yes\",\n \"no\":\"no\", \"n\":\"no\",\n \"quit\":\"quit\", \"qui\":\"quit\", \"qu\":\"quit\", \"q\":\"quit\"}\n if default == None:\n prompt = \" [y/n/q] \"\n elif default == \"yes\":\n prompt = \" [Y/n/q] \"\n elif default == \"no\":\n prompt = \" [y/N/q] \"\n elif default == \"quit\":\n prompt = \" [y/n/Q] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while 1:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return default\n elif choice in valid.keys():\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes', 'no' or 'quit'.\\n\")", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\":\"yes\", \"y\":\"yes\", \"ye\":\"yes\",\n \"no\":\"no\", \"n\":\"no\"}\n if default == None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n while 1:\n sys.stdout.write(question + prompt)\n if sys.version_info[0]==2:\n choice = raw_input().lower()\n elif sys.version_info[0]>2:\n choice = input().lower()\n if default is not None and choice == '':\n return default\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\\\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\":\"yes\", \"y\":\"yes\", \"ye\":\"yes\",\n \"no\":\"no\", \"n\":\"no\"}\n if default == None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while 1:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return default\n elif choice in valid.keys():\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\\\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(self,question, default=\"yes\"):\n valid = {\"yes\":\"yes\", \"y\":\"yes\", \"ye\":\"yes\", \"no\":\"no\", \"n\":\"no\"}\n\n if default == None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while 1:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return default\n elif choice in valid.keys():\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' (or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"no\"):\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\n valid = { \"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False }\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\":True, \"y\":True, \"ye\":True,\n \"no\":False, \"n\":False}\n if default == None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError( _(\"invalid default answer:\") + \" '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write( _(\"Please respond with 'yes' or 'no' \") + \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False}\n if default == None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \" \\\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\r\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\r\n \"no\": False, \"n\": False}\r\n if default is None:\r\n prompt = \" [y/n] \"\r\n elif default == \"yes\":\r\n prompt = \" [Y/n] \"\r\n elif default == \"no\":\r\n prompt = \" [y/N] \"\r\n else:\r\n raise ValueError(\"invalid default answer: '%s'\" % default)\r\n\r\n while True:\r\n sys.stdout.write(question + prompt)\r\n choice = raw_input().lower()\r\n if default is not None and choice == '':\r\n return valid[default]\r\n elif choice in valid:\r\n return valid[choice]\r\n else:\r\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\r\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\":True, \"y\":True, \"ye\":True,\n \"no\":False, \"n\":False}\n if default == None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\\\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\":True, \"y\":True, \"ye\":True,\n \"no\":False, \"n\":False}\n if default == None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\\\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\":True, \"y\":True, \"ye\":True,\n \"no\":False, \"n\":False}\n if default == None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\\\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\":True, \"y\":True, \"ye\":True,\n \"no\":False, \"n\":False}\n if default == None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\\\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\":True, \"y\":True, \"ye\":True,\n \"no\":False, \"n\":False}\n if default == None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\\\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\r\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\r\n \"no\": False, \"n\": False}\r\n\r\n if default is None:\r\n prompt = \" [y/n] \"\r\n elif default == \"yes\":\r\n prompt = \" [Y/n] \"\r\n elif default == \"no\":\r\n prompt = \" [y/N] \"\r\n else:\r\n raise ValueError(\"invalid default answer: '%s'\" % default)\r\n\r\n while True:\r\n sys.stdout.write(question + prompt)\r\n choice = raw_input().lower()\r\n if default is not None and choice == '':\r\n return valid[default]\r\n elif choice in valid:\r\n return valid[choice]\r\n else:\r\n sys.stdout.write(\"Please respond with 'yes' or 'no' (or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\n\tvalid = {\"yes\": True, \"y\": True, \"ye\": True,\n\t\t\t \"no\": False, \"n\": False}\n\tif default is None:\n\t\tprompt = \" [y/n] \"\n\telif default == \"yes\":\n\t\tprompt = \" [Y/n] \"\n\telif default == \"no\":\n\t\tprompt = \" [y/N] \"\n\telse:\n\t\traise ValueError(\"invalid default answer: '%s'\" % default)\n\n\twhile True:\n\t\tsys.stdout.write(question + prompt)\n\t\tchoice = raw_input().lower()\n\t\tif default is not None and choice == '':\n\t\t\treturn valid[default]\n\t\telif choice in valid:\n\t\t\treturn valid[choice]\n\t\telse:\n\t\t\tsys.stdout.write(\"Please respond with 'yes' or 'no' \"\n\t\t\t\t\t\t\t \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default='no'):\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False}\n if default is None: \n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n print(question, end=' ')\n sys.stdout.write(prompt)\n choice = input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid: \n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")\n return", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\r\n valid = {\"yes\":True, \"y\":True, \"ye\":True,\r\n \"no\":False, \"n\":False}\r\n if default == None:\r\n prompt = \" [y/n] \"\r\n elif default == \"yes\":\r\n prompt = \" [Y/n] \"\r\n elif default == \"no\":\r\n prompt = \" [y/N] \"\r\n else:\r\n raise ValueError(\"invalid default answer: '%s'\" % default)\r\n\r\n while True:\r\n sys.stdout.write(question + prompt)\r\n choice = raw_input().lower()\r\n if default is not None and choice == '':\r\n return valid[default]\r\n elif choice in valid:\r\n return valid[choice]\r\n else:\r\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\\\r\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\r\n valid = {\"yes\":True, \"y\":True, \"ye\":True,\r\n \"no\":False, \"n\":False}\r\n if default == None:\r\n prompt = \" [y/n] \"\r\n elif default == \"yes\":\r\n prompt = \" [Y/n] \"\r\n elif default == \"no\":\r\n prompt = \" [y/N] \"\r\n else:\r\n raise ValueError(\"invalid default answer: '%s'\" % default)\r\n\r\n while True:\r\n sys.stdout.write(question + prompt)\r\n choice = raw_input().lower()\r\n if default is not None and choice == '':\r\n return valid[default]\r\n elif choice in valid:\r\n return valid[choice]\r\n else:\r\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\\\r\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\": True, \"y\": True, \"ye\": True, \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\n\tvalid = {\"yes\": True, \"y\": True, \"ye\": True,\n\t\t\t \"no\": False, \"n\": False}\n\tif default is None:\n\t\tprompt = \" [y/n] \"\n\telif default == \"yes\":\n\t\tprompt = \" [Y/n] \"\n\telif default == \"no\":\n\t\tprompt = \" [y/N] \"\n\telse:\n\t\traise ValueError(\"invalid default answer: '%s'\" % default)\n\n\twhile True:\n\t\t# sys.stdout.write(question + prompt)\n\t\tchoice = raw_input(question + prompt).lower()\n\t\t# print(choice)\n\t\tif default is not None and choice == '':\n\t\t\treturn valid[default]\n\t\telif choice in valid:\n\t\t\treturn valid[choice]\n\t\telse:\n\t\t\tsys.stdout.write(\"Please respond with 'yes' or 'no' \"\n\t\t\t\t\t\t\t \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\r\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\r\n \"no\": False, \"n\": False}\r\n if default is None:\r\n prompt = \" [y/n] \"\r\n elif default == \"yes\":\r\n prompt = \" [Y/n] \"\r\n elif default == \"no\":\r\n prompt = \" [y/N] \"\r\n else:\r\n raise ValueError(\"invalid default answer: '%s'\" % default)\r\n\r\n while True:\r\n sys.stdout.write(question + prompt)\r\n choice = raw_input().lower()\r\n if default is not None and choice == '':\r\n return valid[default]\r\n elif choice in valid:\r\n return valid[choice]\r\n else:\r\n sys.stdout.write(\"Please respond with 'yes' or 'no' (or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\n valid = {\n \"yes\": True,\n \"y\": True,\n \"ye\": True,\n \"no\": False,\n \"n\": False\n }\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' (or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"no\"):\n valid = {\"yes\": True, \"y\": True, \"ye\": True, \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\": True, \"y\": True, \"ye\": True, '1': True,\n \"no\": False, \"n\": False, '0': False, }\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=None):\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n while True:\n sys.stdout.write(question + prompt)\n choice = input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=None):\n valid = {\"yes\":True, \"y\":True, \"ye\":True,\n \"no\":False, \"n\":False}\n if default == None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \" \\\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n print question + prompt\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n print \"Please respond with 'yes' or 'no' (or 'y' or 'n').\"", "def query_yes_no(question, default=None):\n valid = {\"yes\": True, \"no\": False}\n if default is None:\n prompt = \" [yes/no] \"\n elif default == \"yes\":\n prompt = \" [YES/no] \"\n elif default == \"no\":\n prompt = \" [yes/NO] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = input().lower()\n if default is not None and choice == \"\":\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no'.\\n\")", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\":True, \"y\":True, \"ye\":True,\n \"no\":False, \"n\":False}\n if default == None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"=> Please respond with 'yes' or 'no' \"\\\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default='yes'):\n valid = {'yes': True, 'y': True, 'ye': True, 'no': False, 'n': False}\n if default is None:\n prompt = ' [y/n] '\n elif default == 'yes':\n prompt = ' [Y/n] '\n elif default == 'no':\n prompt = ' [y/N] '\n else:\n raise ValueError('invalid default answer: %s' % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = input().lower()\n if default is not None and choice == '':\n return valid[default]\n if choice in valid:\n return valid[choice]\n else:\n sys.stdout.write('Please respond with \\'yes\\' or \\'no\\''\n '(or \\'y\\' or \\'n\\').\\n')", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n print(colored(question + prompt, 'white', 'on_red', attrs=['reverse', 'blink']))\n choice = input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\": True, \"y\": True, \"ye\": True, \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n print(question + prompt)\n choice = input().lower()\n if default is not None and choice == \"\":\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n print(\"Please respond with 'yes' or 'no' \" \"(or 'y' or 'n').\\n\")", "def query_yes_no(self, question, default=None):\r\n valid = {'yes': True, 'y': True, 'ye': True, 'no': False, 'n': False}\r\n if default is None:\r\n prompt = ' [Y/N] '\r\n elif default == 'yes':\r\n prompt = ' [Y/N] '\r\n elif default == 'no':\r\n prompt = ' [Y/N] '\r\n else:\r\n raise ValueError(\"invalid default answer: '%s'\" % default)\r\n while True:\r\n sys.stdout.write(question + prompt)\r\n choice = raw_input().lower()\r\n if default is not None and choice == '':\r\n return valid[default]\r\n if choice in valid:\r\n return valid[choice]\r\n sys.stdout.write(\"Please respond with 'yes' or 'no' (or 'y' or 'n').\\n\")\r\n\r\n return", "def query_yes_no(question: str, default: str = \"yes\") -> bool:\n valid = {\"yes\": True,\n \"y\": True,\n \"ye\": True,\n \"no\": False,\n \"n\": False}\n\n if default is None:\n prompt = \" [y/n]: \"\n elif default == \"yes\":\n prompt = \" [Y/n]: \"\n elif default == \"no\":\n prompt = \" [y/N]: \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no': \")", "def query_yes_no(question, default=\"yes\"):\n\n valid = {\"yes\": \"yes\", \"y\": \"yes\", \"ye\": \"yes\", \"no\": \"no\", \"n\": \"no\"}\n prompt = {None: \" [y/n] \", \"yes\": \" [Y/n] \", \"no\": \" [y/N] \"}.get(default, None)\n\n if not prompt:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n reply = None\n\n while not reply:\n sys.stdout.write(colorize(question, Colors.PROMPT) + prompt)\n\n choice = input().lower()\n reply = None\n\n if default and not choice:\n reply = default\n elif choice in valid:\n reply = valid[choice]\n else:\n print_failure(\"Please respond with 'yes' or 'no' (or 'y' or 'n').\\n\")\n\n return reply == \"yes\"", "def question(text, default):\n\n while 1:\n if isinstance(default, bool):\n if default:\n default_str = \"yes\"\n else:\n default_str = \"no\"\n else:\n default_str = str(default)\n report(text + \" [\" + default_str + \"] :\")\n input_audio_icon()\n\n if not dont_ask:\n str_inp = input(\">\")\n\n # On plain enter, return default\n if dont_ask or (len(str_inp) == 0):\n return default\n # If a value was typed, check it and convert it\n elif isinstance(default, bool):\n if str_inp in [\"yes\", \"y\", \"Y\", \"true\", \"t\", \"1\"]:\n return True\n elif str_inp in [\"no\", \"n\", \"N\", \"false\", \"f\", \"0\"]:\n return False\n else:\n report(\"Unknown answer (type 'yes' or 'no')\")\n continue\n elif isinstance(default, int):\n return int(str_inp)\n elif isinstance(default, str):\n return str_inp\n else:\n raise TypeError(\"Invalid type for the default value\")", "def queryYesNo(question, default=None):\n \n valid = {\"yes\": True, \"ye\": True, \"y\": True, \"yea\": True, \"yep\": True, \"ya\": True,\n \"no\": False, \"n\": False, \"nope\": False, \"nah\": False, \"nop\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: {0}\".format(default))\n\n while True:\n print(\"{0} {1}\".format(question, prompt))\n choice = input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n print(\"Please respond with 'yes' or 'no' \\n\")", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\":\"yes\", \"y\":\"yes\", \"ye\":\"yes\",\n \"no\":\"no\", \"n\":\"no\"}\n valid_true = {\"yes\":\"yes\", \"y\":\"yes\", \"ye\":\"yes\"}\n if default == None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while 1:\n sys.stdout.write(question + prompt)\n choice = input().lower()\n if default is not None and choice == '':\n return (default in valid_true.keys())\n elif choice in valid.keys():\n return (choice in valid_true.keys())\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\\\n \"(or 'y' or 'n').\\n\")", "def query_user_bool(question, default=True):\n\n valid_yes_ans = [\"yes\", \"y\"]\n valid_no_ans = [\"no\", \"n\"]\n\n if default is None:\n prompt = \" [y/n] \"\n elif default:\n prompt = \" [Y/n] \"\n else:\n prompt = \" [y/N] \"\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n\n if default is not None and choice == '':\n return default\n\n if choice in valid_yes_ans:\n return True\n\n if choice in valid_no_ans:\n return False\n\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\\\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=True):\n yes_list = [\"yes\", \"y\"]\n no_list = [\"no\", \"n\"]\n\n default_dict = { # default => prompt default string\n None: \"[y/n]\",\n True: \"[Y/n]\",\n False: \"[y/N]\",\n }\n\n default_str = default_dict[default]\n prompt_str = \"%s %s \" % (question, default_str)\n\n while True:\n choice = input_(prompt_str).lower()\n\n if not choice and default is not None:\n return default\n if choice in yes_list:\n return True\n if choice in no_list:\n return False\n\n notification_str = \"Please respond with 'y' or 'n'\"\n print(notification_str)", "def prompt_yes_no(question, default):\n again = 'Unknown response.'\n if default.lower() in ('y', 'yes'):\n options = '(Y/n): '\n elif default.lower() in ('n', 'no'):\n options = '(y/N): '\n else:\n raise ValueError('default must be \"y\", \"yes\", \"n\", or \"no\"')\n\n response = input(' '.join((question, options))).lower()\n while response not in ('y', 'yes', 'n', 'no', ''):\n response = input(' '.join((again, question, options))).lower()\n if response == '':\n return default\n return response", "def query_input(question, default=None, color=default_color):\n if default is None or default == '':\n prompt = ' '\n elif type(default) == str:\n prompt = flo(' [{default}] ')\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(color(question + prompt))\n choice = raw_input()\n if default is not None and choice == '':\n return default\n if choice != '':\n return choice", "def ask_yes_no(question, default=\"y\"):\n valid = {\"y\": True, \"n\": False}\n\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"y\":\n prompt = \" [Y/n] \"\n elif default == \"n\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = input().lower()\n\n if default is not None and choice == '':\n return valid[default]\n\n choice_letter = choice[0]\n\n if choice_letter in valid:\n return valid[choice_letter]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")", "def ask_question(msg, answers=\"[yes/No]\", default=\"no\"):\n if answers[0] != '[' or answers[-1] != ']':\n msg = \"%s wrongly specified, should be in [] separated by /\" % answers\n raise ValueError(msg)\n\n answer_list = answers[1:-1].split('/')\n \n if len(answer_list) < 2:\n raise ValueError(\"Too few possible answers: %s\" % answers)\n \n answer_list = [item.lower() for item in answer_list[:]]\n default = default.lower()\n \n if default not in answer_list:\n raise ValueError(\"Default answer %s not among answers: %s\" % (default,\n answers))\n \n print_out = \"%s %s: \" % (msg, answers)\n print print_out,\n \n inpt = None\n while inpt == None:\n try:\n inpt = raw_input()\n except KeyboardInterrupt:\n print_msg_exit(\" KeyboardInterrupt, exit.\", exit_code=1)\n except Exception, ex:\n print ex\n inpt = None\n print(\" Couldn't recognize the answer, try again.\")\n print print_out,\n else:\n inpt = inpt.lower()\n # finally, check what the user answered \n for i in range(len(answer_list)):\n if inpt == answer_list[i][0] or inpt == answer_list[i]:\n return answer_list[i]\n else:\n if inpt == '':\n return default\n else:\n inpt = None\n print \" Couldn't recognize the answer, try again.\"\n print print_out,", "def query_yes_no(question, default=\"yes\", force=False):\n valid = {\"yes\":True, \"y\":True, \"ye\":True,\n \"no\":False, \"n\":False}\n if default == None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n if not force:\n choice = raw_input().lower()\n else:\n choice = \"yes\"\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\\\n \"(or 'y' or 'n').\\n\")", "def get_answer(message, answers='Yn', default='Y', quit=''):\r\n if quit and quit not in answers:\r\n answers = answers + quit\r\n \r\n message = message + '(' + '/'.join(answers) + ')[' + default + ']:'\r\n ans = raw_input(message).strip().upper()\r\n if default and not ans:\r\n ans = default.upper()\r\n while ans not in answers.upper():\r\n ans = raw_input(message).strip().upper()\r\n if quit and ans == quit.upper():\r\n print \"Command be cancelled!\"\r\n sys.exit(0)\r\n return ans", "def confirm(question, default_choice='yes'):\n valid = {'yes':True, 'y':True, 'ye':True, 'no':False, 'n':False}\n default_choice = str(default_choice).lower()\n if default_choice == 'none':\n prompt = ' [y/n] '\n elif default_choice == 'yes':\n prompt = ' [Y/n] '\n elif default_choice == 'no':\n prompt = ' [y/N] '\n else:\n raise ValueError('invalid default answer: \"%s\"' % default_choice)\n\n while True:\n print(str(question) + prompt)\n choice = input().lower()\n if default_choice != 'none' and choice == '':\n return valid[default_choice]\n elif choice in valid:\n return valid[choice]\n else:\n print(\"Please respond with 'yes' or 'no' (or 'y' or 'n').\\n\")", "def yes_no(question, default= None):\n valid_option = {\"yes\": True,\n \"y\": True,\n \"no\": False,\n \"n\": False\n }\n if default is None:\n option = \" [y/n] \"\n elif default == \"yes\":\n option == \" [Y/n] \"\n elif default == \"no\":\n option == \" [y/N] \"\n else:\n raise ValueError(\"'%s' is a non valid option. Please select \" % default)\n \n while True:\n print(question + option)\n user_choice = input().lower()\n if default is not None and user_choice == '':\n return valid_option[default]\n elif user_choice in valid_option:\n return valid_option[user_choice]\n else:\n print(\"Please respond with 'yes' or 'no' (or 'y' or 'n'). \\t\")", "def ask(question, options, default):\n assert default in options\n\n question += \" ({})? \".format(\"/\".join(o.upper() if o == default else o for o in options))\n selected = None\n while selected not in options:\n selected = input(question).strip().lower()\n if selected == \"\":\n selected = default\n else:\n if selected not in options:\n question = \"Please type '{}'{comma} or '{}': \".format(\n \"', '\".join(options[:-1]), options[-1],\n comma=',' if len(options) > 2 else '',\n )\n return selected", "def _ask_user_yn(question, default):\n\n input_valid = False\n default = default.lower()\n answer = \"\"\n while not input_valid:\n answer = input(question)\n if answer == \"\":\n answer = default\n if re.findall(r\"[YyNn]\", answer):\n input_valid = True\n answer = answer[0].lower()\n else:\n print(\"Please answer Y, N or Return.\")\n\n return answer", "def yes_no(prompt, default=None):\n if default is None:\n response = input(prompt + ' (y/n): ')\n elif default:\n response = input(prompt + ' ([y]/n): ')\n elif not default:\n response = input(prompt + ' (y/[n]): ')\n else:\n raise KeyError('Default must be True or False')\n if response.lower() == 'y':\n return True\n elif response.lower() == 'n':\n return False\n elif response == '' and default is not None:\n return default\n else:\n print('Please enter \\'y\\' or \\'n\\' as a valid response.')\n return yes_no(prompt, default)", "def input_with_default(prompt, default):\n response = raw_input(\"%s (Default %s) \"%(prompt, default))\n if not response:\n return default\n return response", "def ask_yes_no(question, default=None, ctrl_c=\"n\", ctrl_d=None):\n\n ctrl_d = default if ctrl_d is None else ctrl_d\n\n option_prompt = OPTION_PROMPTS[default]\n prompt = question + \"? [{}] \".format(option_prompt)\n\n ans = None\n while ans not in ANSWERS:\n try:\n ans = read_input(prompt).lower()\n if not ans: # response was an empty string\n ans = default\n except KeyboardInterrupt:\n print()\n ans = ctrl_c\n except EOFError:\n print()\n ans = ctrl_d\n\n return ANSWERS[ans]", "def simple_response(prompt, default=None):\n if default is None:\n response = input(prompt + ': ')\n else:\n response = input(prompt + f' [{default}]' + ': ')\n if response != '':\n return response\n elif response == '' and default is not None:\n return default\n else:\n print('Please enter a valid response')\n return simple_response(prompt, default)", "def askQuestion(question, answers = [\"Yes\", \"No\"], title = \"Myro Question\",\n default = 0, bitmap=Dialog.DIALOG_ICON):\n d = _tkCall(Dialog.Dialog, myro.globvars.gui,\n title=title, default=default, bitmap=bitmap,\n text=question, strings=answers)\n return answers[int(d.num)]", "def user_yesno(msg, default=None):\n\n # Parse optional `default` answer\n valid = {\"yes\": True, \"y\": True, \"ye\":True, \"no\":False, \"n\":False}\n if default is None:\n suffix = \" [y/n] \"\n elif default == \"yes\":\n suffix = \" [Y/n] \"\n elif default == \"no\":\n suffix = \" [y/N] \"\n\n # Wait for valid user input, if received return `True`/`False`\n while True:\n choice = input(msg + suffix).lower()\n if default is not None and choice == \"\":\n return valid[default]\n elif choice in valid.keys():\n return valid[choice]\n else:\n print(\"Please respond with 'yes' or 'no' (or 'y' or 'n').\\n\")", "def boolean_input(self, question, default=False):\n if default is None:\n yes_no = \"y/n\"\n default_text = None\n elif default:\n yes_no = \"[Y/n]\"\n default_text = 'y'\n else:\n yes_no = \"[y/N]\"\n default_text = 'n'\n\n prompt = \"{question} {yes_no}? \".format(question=question, yes_no=yes_no)\n\n result = self.selection_input(\n prompt=prompt,\n choices=['y', 'n'],\n default=default_text,\n error_message=\"Please enter Y or N\",\n transform=lambda s: s.lower()[:1],\n )\n if result == 'y':\n return True\n\n return False", "def prompt(name, default):\n value = raw_input('%s [%s]: ' %(name, default))\n if not value:\n value = default\n return value", "def raw_input_default_config(q, default=None, obj=None):\n if default is None:\n if callable(q['default']):\n f1 = q['default']\n try:\n default = f1(obj)\n except TypeError:\n pass\n else:\n default = q['default']\n if 'ask' in q and not q['ask']:\n return default\n if 'obfuscate' in q and q['obfuscate']:\n return raw_input_default(q['q'], default=default, obfuscate=True)\n else:\n return raw_input_default(q['q'], default=default, obfuscate=False)", "def user_prompt(prompt, default=None):\n prompt = f\"\\n {prompt} [{default}] runs or type an amount: \"\n response = input(prompt)\n if not response and default:\n return default\n else:\n return response", "def ask_permission(question, default_no=True):\n if _YES:\n log(\"DEBUG\", \"Assuming the answer is 'yes' for this question: \" +\n question)\n return True\n postfix = \"[y/N]\" if default_no else \"[Y/n]\"\n answer = input(question + \" \" + postfix)\n if answer and answer[0].lower() == 'y':\n return True\n return False", "def question(self, message=\"Do you wish to proceed?\", title=\"Question\", cancel=False):\n if cancel:\n instructions = \"y = yes, n = no, c = cancel\"\n options = ['y', 'n', 'c']\n else:\n instructions = \"y = yes, n = no, c = cancel\"\n options = ['y', 'n']\n print(title)\n print(message)\n print(instructions)\n answer = input()\n while answer not in options:\n print(\"Sorry, I can't interpret that answer\")\n print(message)\n print(instructions)\n answer = input()\n if answer == 'y': return \"Yes\"\n if answer == 'n': return \"No\"\n if answer == 'c': return \"Cancel\"", "def ask_yes_no(question):\n answer = None\n while answer not in (\"y\",\"n\"):\n answer = input(question).lower()\n return answer", "def prompt_yes_or_no(question, def_yes=True):\n if def_yes:\n ans = raw_input(question + ' [y] or n: ')\n is_yes = ans == 'y' or ans == ''\n is_no = ans == 'n'\n if not (is_yes or is_no):\n print \"Invalid answer! Answer must be n or y or simple enter.\"\n prompt_yes_or_no(question, def_yes)\n else:\n return is_yes\n else:\n ans = raw_input(question + ' y or [n]: ')\n is_yes = ans == 'y'\n is_no = ans == 'n' or ans == ''\n if not (is_yes or is_no):\n print \"Invalid answer! Answer must be n or y or simple enter.\"\n prompt_yes_or_no(question, def_yes)\n else:\n return is_yes", "def get_yes_no_input(logger, text, default=None):\n if default:\n default = default.strip().lower()\n\n y = \"Y\" if default == \"y\" else \"y\"\n n = \"N\" if default == \"n\" else \"n\"\n\n prompt = f\"{text} [{yellow(y)}/{yellow(n)}]\"\n user_input = \"\"\n\n while not user_input:\n logger(prompt, end=\"\")\n user_input = input(\" \").strip().lower()\n if user_input == \"\" and default:\n user_input = default\n\n return user_input", "def reply_request(question: str, reply_options = ['y', 'yes', 'n', 'no'], show_reply = False):\n\n reply = None \n while not reply in reply_options:\n reply = input(question).lower()\n else:\n if show_reply:\n print(f'Your choice: {reply}')\n \n if reply in ['yes', 'no']:\n return reply[0]\n else:\n return reply", "def ask_question(\n question: Text,\n input_type: Callable[[Text], Any],\n default: Optional[Any] = None,\n hide_input: Optional[bool] = False,\n) -> Any:\n if hide_input:\n if default:\n hint = \"**\"\n else:\n hint = \"\"\n\n return getpass.getpass(f\"{question} [{hint}] \")\n\n if default:\n ask = f\"{question} [{default}]\"\n else:\n ask = question\n\n answer = input(f\"{ask}: \")\n return input_type(answer)", "def get_input(prompt, default=None, choices=None, option_value=None):\r\n if option_value is not None:\r\n return option_value\r\n \r\n choices = choices or []\r\n while 1:\r\n r = raw_input(prompt+' ').strip()\r\n if not r and default is not None:\r\n return default\r\n if choices:\r\n if r not in choices:\r\n r = None\r\n else:\r\n break\r\n else:\r\n break\r\n return r", "def ask_yes_no(question):\r\n\tresponse = None\r\n\twhile response not in (\"y\", \"n\"):\r\n\t\tresponse = input(question).lower()\r\n\treturn response", "def confirm(s=None, default=False):\n\n if s:\n s = '{} (y/n): '.format(s)\n else:\n s = 'Continue? (y/n): '\n answer = input(s).strip().lower()\n return answer.startswith('y') if answer else default", "def binary_prompt(question: str, console: io.IO,\n default: Optional[bool] = None) -> bool:\n\n while True:\n answer = console.ask(question).lower()\n\n if default is not None and not answer:\n return default\n\n try:\n _binary_validate(answer)\n break\n except ValueError as e:\n console.error(e)\n\n return answer == 'y'", "def ask_for_confirmation(prompt=\"Are you sure? \", default=True):\n yes, no = (\"Y\", \"n\") if default else (\"y\", \"N\")\n prompt += f\"[{yes}/{no}] \"\n\n while True:\n ans = input(prompt).lower().strip()\n if not ans:\n return default\n elif not (\"yes\".startswith(ans) or \"no\".startswith(ans)):\n print(\"Please enter yes or no.\")\n continue\n else:\n return \"yes\".startswith(ans)", "def confirm_choice(\n choice: Text, default: Optional[bool] = True, abort: Optional[bool] = True\n) -> bool:\n if default:\n hint = \"Y/n\"\n else:\n hint = \"y/N\"\n answer = input(f\"{choice} [{hint}]: \")\n\n value = None\n if answer.lower() in [\"y\", \"yes\"]:\n value = True\n\n if answer.lower() in [\"n\", \"no\"]:\n value = False\n\n if not answer:\n value = default\n\n if value is None:\n print(\"Invalid answer\")\n return confirm_choice(choice=choice, default=default, abort=abort)\n\n if abort and not value:\n raise RuntimeError(\"Aborting\")\n\n return value", "def confirm(prompt_str, default=False):\r\n if default:\r\n prompt = '%s [Y/n]: ' % prompt_str\r\n else:\r\n prompt = '%s [y/N]: ' % prompt_str\r\n\r\n response = valid_response(prompt, 'y', 'yes', 'yeah', 'yup', 'yolo')\r\n\r\n if response is None:\r\n return default\r\n\r\n return response", "def confirm(\n\t\ttext: str,\n\t\tdefault: bool = False,\n\t\tabort: bool = False,\n\t\tprompt_suffix: str = \": \",\n\t\tshow_default: bool = True,\n\t\terr: bool = False,\n\t\t):\n\n\tprompt = _build_prompt(text, prompt_suffix, show_default, \"Y/n\" if default else \"y/N\")\n\n\twhile True:\n\t\ttry:\n\t\t\tvalue = _prompt(prompt, err=err, hide_input=False).lower().strip()\n\t\texcept (KeyboardInterrupt, EOFError):\n\t\t\traise click.Abort()\n\n\t\tif value in ('y', \"yes\"):\n\t\t\trv = True\n\t\telif value in ('n', \"no\"):\n\t\t\trv = False\n\t\telif value == '':\n\t\t\trv = default\n\t\telse:\n\t\t\tclick.echo(\"Error: invalid input\", err=err)\n\t\t\tcontinue\n\t\tbreak\n\n\tif abort and not rv:\n\t\traise click.Abort()\n\n\treturn rv", "def ask_user( prompt ):\n answer = raw_input( prompt )\n if answer.lower() in [\"y\",\"yes\"]:\n return True\n else:\n return False", "def user_question():\n return input('What would you like? (espresso/latte/cappuccino): ')", "def prompt_with_options(prompt, default=None, options=None):\n\n msg = \"%s [%s]: \" % (prompt, default) if default is not None else \"%s: \" % prompt\n value = None\n while value is None:\n value = raw_input(msg).strip()\n if value:\n if options and value not in options:\n value = None\n elif default is not None:\n value = default\n\n return value", "def ask(question):\n while True:\n query = input('{}\\n Reply (y/n) >>'.format(question))\n res = query[0].lower()\n if query == '' or not res in ['y', 'n']:\n pass\n else:\n break\n\n if res == 'y':\n return True\n else:\n return False", "def _ask_prompt(question: str,\n console: io.IO,\n validate: Optional[Callable[[str], None]] = None,\n default: Optional[str] = None) -> str:\n validate = validate or (lambda x: None)\n while True:\n answer = console.ask(question)\n if default and not answer:\n answer = default\n try:\n validate(answer)\n break\n except ValueError as e:\n console.error(e)\n\n return answer", "def prompt(msg, default=NO_DEFAULT, validate=None):\n while True:\n response = input(msg + \" \").strip()\n if not response:\n if default is NO_DEFAULT:\n continue\n return default\n if validate is None or validate(response):\n return response", "def prompt_string(prompt=\"Enter a value\",\n default=None):\n _new = None\n while True:\n try:\n _new = str(input(f\"{prompt}? [{str(default)}]: \")) # nosec\n break\n except ValueError:\n print(\"Sorry, I didn't understand that.\")\n continue\n except KeyboardInterrupt:\n break\n return default if _new in [None, ''] else _new", "def query_number(question, default=1):\n if default is None:\n prompt = \" [] \"\n else:\n prompt = \" [%d] \" % default\n\n while True:\n sys.stdout.write(question + prompt)\n choice = input().lower()\n if default is not None and choice == '':\n return int(default)\n elif choice.isdigit():\n return int(choice)\n else:\n sys.stdout.write(\"Please respond with a number\\n\")", "def ask_question(self, question):\n self.response((question))\n return input()", "def getQuestion():\n\n tcflush(sys.stdin, TCIFLUSH)\n question = input(\" You say:\\n \")\n\n return validQuestion(question)" ]
[ "0.8437878", "0.8223772", "0.8212373", "0.8156623", "0.8149691", "0.81355673", "0.81233436", "0.8122114", "0.8120957", "0.8120957", "0.8120957", "0.8120957", "0.8120957", "0.8120957", "0.8120957", "0.8120957", "0.81196237", "0.81192315", "0.81192315", "0.81192315", "0.81192315", "0.81192315", "0.81175584", "0.81165403", "0.8115645", "0.8111393", "0.8111262", "0.8111262", "0.81042784", "0.81026244", "0.80998766", "0.80984455", "0.809636", "0.8094383", "0.80877197", "0.80877197", "0.80877197", "0.80877197", "0.80877197", "0.80834275", "0.8082899", "0.8076588", "0.8073941", "0.80708045", "0.8037915", "0.79764926", "0.7949643", "0.792514", "0.7911671", "0.7909967", "0.7897192", "0.78778356", "0.7789637", "0.7773532", "0.77411824", "0.7699201", "0.7669678", "0.76638466", "0.76208204", "0.7560135", "0.75050575", "0.7432323", "0.74099517", "0.72778094", "0.7198592", "0.7198363", "0.71618736", "0.705973", "0.7055353", "0.69002664", "0.686292", "0.68197626", "0.67796206", "0.6743385", "0.66767573", "0.6671264", "0.6644624", "0.66044265", "0.6600726", "0.65321934", "0.6517015", "0.6508442", "0.6503481", "0.6483954", "0.64514023", "0.6448418", "0.64270127", "0.6424407", "0.63986987", "0.6397903", "0.6397191", "0.6357423", "0.63363665", "0.63331956", "0.6316796", "0.6268772", "0.62598366", "0.62567925", "0.6243412", "0.6227152" ]
0.834716
1
Returns the url to access a detail record for this book.
def get_absolute_url(self): return reverse("mountain", args=[str(self.state_name), str(self.name)])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_absolute_url(self):\n return reverse('book-detail', args=[str(self.id)]) \n # Returns an URL that can be used to access a detail record for this model \n # (for this to work we will have to \n # -- Define a URL mapping that has the name 'book-detail' (name='book-detail')\n # -- Define an associated view.\n # -- Define an associated template.", "def get_absolute_url(self):\n return reverse('book_details', args=[str(self.id)])", "def get_absolute_url(self):\n return reverse('book-detail', args=[str(self.id)])", "def get_absolute_url(self):\n return reverse('book-detail', args=[str(self.id)])", "def get_absolute_url(self):\n return reverse('book-detail', args=[str(self.id)])", "def detail_url(author_id):\n return reverse('author-books', args=[author_id])", "def get_absolute_url(self):\n return '/booking/%s/detail' % self.id", "def details_url(self):\n if self._data.get('details_url'):\n path = self._data.get('details_url')\n try:\n path, hash_ = path.split('#')\n hash_ = '#' + hash_\n except ValueError:\n hash_ = ''\n return '{}?from_activity={}{}'.format(path, self._data.get('id'), hash_)", "def get_absolute_url(self):\n return reverse('book-detail', kwargs={'slug': self.slug})", "def get_details_url(self, textbook_id):\r\n return reverse_course_url(\r\n 'textbooks_detail_handler',\r\n self.course.id,\r\n kwargs={'textbook_id': textbook_id}\r\n )", "def get_detail_URL(recipe_id):\n return reverse('recipeapp:recipe-detail', args=[recipe_id])", "def get_url(self):\n return self.metadata['thisRecordUrl']", "def get_absolute_url(self):\n return reverse(\"jewelry_detail\", args = [str(self.id)])", "def detail_url(recipe_id): # /api/recipe/recipes/ID\n return reverse('recipe:recipe-detail', args = [recipe_id])", "def exam_url(self, obj):\n request = self.context.get(\"request\")\n return reverse(\"exam-detail\", args=[obj.id], request=request)", "def get_absolute_url(self):\n return reverse('bleedinfo-detail', args=[str(self.id)])", "def get_absolute_url(self):\n return reverse('bl-detail', args=[str(self.id)])", "def detail_url(product_id):\n return reverse('product:product-detail', args=[product_id])", "def get_absolute_url(self):\n return reverse('csv-detail', args=[str(self.id)])", "def get_absolute_url(self):\n return reverse('favbook-detail', args=[str(self.id)])", "def get_absolute_url(self):\n return reverse('model-detail-view', args=[str(self.id)])", "def get_absolute_url(self):\n return reverse('model-detail-view', args=[str(self.id)])", "def detail_url(recipe_id):\n return reverse('recipe:recipe-detail', args=[recipe_id])", "def detail_url(recipe_id):\n return reverse('recipe:recipe-detail', args=[recipe_id])", "def detail_url(recipe_id):\n return reverse('recipe:recipe-detail', args=[recipe_id])", "def detail_url(recipe_id):\n return reverse('recipe:recipe-detail',args=[recipe_id])", "def get_absolute_url(self):\n return reverse('blogger-detail', args=[str(self.id)])", "def build_details_url(self, params={}):\n\n if 'url' in params:\n url = params['url']\n url += '?page=' + str(int(params['page'])) + '&sort=' + str(params['sort'])\n return url", "def detail_url(reteta_id):\n return reverse('reteta:reteta-detail', args=[reteta_id])", "def url(self):\n\n if self.identifier and self.identifier != \"\":\n return self.collection.url + self.identifier + \"/\"\n else:\n return self.collection.url", "def get_absolute_url(self):\n return ('publication_detail', (), {'slug': self.slug})", "def detail_url(movie_id):\n return reverse('movie:movie-detail', args=[movie_id])", "def get_absolute_url(self):\n\n return reverse('kid-detail', args=[str(self.id)])", "def get_absolute_url(self):\n return reverse('structured-name-detail', args=[str(self.id)])", "def get_absolute_url(self):\n return reverse('patient-detail', args=[str(self.id)])", "def get_absolute_url(self):\n return reverse('card-detail', args=[str(self.id)])", "def get_absolute_url(self):\n\n return reverse('performer-detail', args=[str(self.id)])", "def get_absolute_url(self):\n\t\treturn reverse('source-detail', args=[str(self.id)])", "def get_absolute_url(self):\n return reverse('injury-detail', args=[str(self.id)])", "def detail_url(ingredient_id):\n return reverse(\"recipe:ingredient-detail\", args=[ingredient_id])", "def get_absolute_url(self):\n return reverse('invoice-detail', args=[str(self.id)])", "def detail_url(ingredient_id):\n return reverse('recipe:ingredient-detail', args=[ingredient_id])", "def url(self):\n return url_for_item(self.key)", "def url(self):\n return url_for_item(self.key)", "def get_detail_url(order_id):\n return reverse(\"order-detail\", args=[str(order_id)])", "def get_absolute_url(self):\n return reverse('model-detail-view', args=[str(self.id)])", "def get_absolute_url(self):\n return reverse('product-detail', args=[str(self.id)])", "def get_absolute_url(self):\n\t\treturn reverse('author-detail', args=[str(self.id)])", "def get_absolute_url(self):\n return reverse('quotes:detail', kwargs={'pk': self.pk})", "def get_absolute_url(self):\n return reverse('market:seller-detail', args=[str(self.id)])", "def make_book_api_url(self):\n\n # format API_LINK = \"https://www.goodreads.com/book/show/<book id>.xml?key=<your dev key>\"\n\n self.book_url = f\"https://www.goodreads.com/book/show/{self.book_id}.xml?key={self.key}\"", "def get_item_url(self, item):\n return self.get_absolute_url(item, 'detail')", "def get_absolute_url(self):\n return reverse('author-detail', args=[str(self.id)])", "def get_absolute_url(self):\n return reverse('author-detail', args=[str(self.id)])", "def get_absolute_url(self):\n return reverse('author-detail', args=[str(self.id)])", "def get_absolute_url(self):\n return reverse('author-detail', args=[str(self.id)])", "def detail_book(request, pk):\n\n try:\n book = Book.objects.get(id = pk)\n except Book.DoesNotExist:\n raise Http404\n return render(request, 'detail_book.html', context)", "def book_url(cls, identifier, extension='epub', open_access=True, \n data_source=None):\n root = cls.content_root(open_access)\n args = [identifier.type, identifier.identifier]\n args = [urllib.quote(x) for x in args]\n if data_source:\n args.insert(0, urllib.quote(data_source.name))\n template = \"%s/%s/%s.%s\"\n else:\n template = \"%s/%s.%s\"\n return root + template % tuple(args + [extension])", "def get_absolute_url(self):\n\n return reverse('caretaker-detail', args=[str(self.id)])", "def get_absolute_url(self):\n return reverse('market:product-detail', args=[str(self.id)])", "def detail_link(db_obj, text=None):\n\n def build_link(obj):\n name = str(obj) if text is None else text\n return _make_link(obj.detail_url(), name)\n\n return mark_safe(', '.join(map(build_link, as_list(db_obj))))", "def get_absolute_url(self):\n return reverse('relation-detail', args=[str(self.id)])", "def recipe_detail_url(recipe_id):\n return reverse('recipes:recipe-detail', args=[recipe_id])", "def get_absolute_url(self):\n return reverse(\"cars:detail\", kwargs={\"slug\": self.slug})", "def get_absolute_url(self):\n return reverse('wine-detail', args=[str(self.id)])", "def get_absolute_url(self):\n return reverse('restaurant-detail', args=[str(self.id)])", "def book_url(self, identifier, extension='.epub', open_access=True,\n data_source=None, title=None):\n bucket = self.get_bucket(\n S3UploaderConfiguration.OA_CONTENT_BUCKET_KEY if open_access\n else S3UploaderConfiguration.PROTECTED_CONTENT_BUCKET_KEY)\n root = self.content_root(bucket)\n\n if not extension.startswith('.'):\n extension = '.' + extension\n\n parts = []\n if data_source:\n parts.append(data_source.name)\n parts.append(identifier.type)\n if title:\n # e.g. DataSource/ISBN/1234/Title.epub\n parts.append(identifier.identifier)\n filename = title\n else:\n # e.g. DataSource/ISBN/1234.epub\n filename = identifier.identifier\n parts.append(filename + extension)\n return root + self.key_join(parts)", "def get_absolute_url(self):\n return reverse('library-detail', kwargs={'slug': self.slug})", "def get_absolute_url(self):\n return reverse('name-detail', args=[str(self.id)])", "def get_absolute_url(self):\n return reverse('reference-detail', args=[str(self.id)])", "def get_absolute_url(self):\n\n return reverse('author-detail', args=[str(self.id)])", "def get_absolute_url(self):\n return reverse('questionSchema-detail', args=[str(self.questionId)])", "def get_absolute_url(self):\n return reverse('properties:detail', kwargs={'pk': self.pk})", "def get_absolute_url(self):\n return reverse('binning-detail', args=[str(self.id)])", "def detail_url(vehicle_id):\n return reverse('heroad:vehicle-detail', args=[vehicle_id])", "def get_absolute_url(self):\n return ('member_detail', [self.pk])", "def get_provenance_url(uuid):\n return '{explore_url}/details/{uuid}'.format(explore_url=EXPLORE_URL, uuid=uuid)", "def get_absolute_url(self):\n return reverse('qualifier-detail', args=[str(self.id)])", "def get_absolute_url(self):\n return reverse(\n \"catalogue:detail\", kwargs={\"product_slug\": self.slug, \"pk\": self.id}\n )", "def get_absolute_url(self):\n return reverse('report', args=[str(self.id)])", "def get_absolute_url(self):\n return reverse('accountInfo-detail', args=[str(self.uid)])", "def detail_url(pizza_id):\n return reverse('pizza:pizza-detail', args=[pizza_id])", "def get_absolute_url(self):\n return reverse('qualifier-name-detail', args=[str(self.id)])", "def get_absolute_url(self):\n return reverse('payment-detail', args=[str(self.id)])", "def get_resource_uri(self, item):\n if isinstance(item, Bundle):\n pk = item.obj._id\n else:\n pk = item._id\n return reverse(\"api_dispatch_detail\", kwargs={\n \"resource_name\": self._meta.resource_name,\n \"api_name\": self._meta.api_name, \n \"pk\": pk\n })", "def detail_template(self):\n return '{}/{}.html'.format(self.object_name, self.detail_endpoint)", "def get_absolute_url(self):\n return reverse('blog-detail', args=[str(self.id)])", "def get_details_link(self, element):\n tag = element.find_elements_by_class_name(\"btn-action\")[0]\n return tag.get_attribute(\"href\")", "def get_absolute_url(self):\n return reverse('food-detail', args=[str(self.id)])", "def test_api_can_get_a_book(self):\n response = self.client.get(\n reverse('details_book', kwargs={'pk': book_pk}), format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def display_uri(self) -> str:\n return pulumi.get(self, \"display_uri\")", "def reference_url(self):\n return self.get(\"reference_url\", decode=True)", "def get_absolute_url(self):\n return reverse('hist-detail', args=[str(self.id_historico)])", "def subscriber_detail(self):\n model_name = Subscriber._meta.object_name.lower()\n app_label = self._meta.app_label\n link = '/admin/%s/%s/' % (app_label, model_name)\n link += '?campaign__id=%d' % self.id\n display_link = _(\"<a href='%(link)s'>%(name)s</a>\") % \\\n {'link': link, 'name': _('details')}\n return display_link", "def resource_uri(self):\n primary_key_value = getattr(self, self.primary_key(), None)\n return '/{}/{}'.format(self.endpoint(), primary_key_value)", "def get_reference(self):\n return self.resource.url", "def get_absolute_url(self):\n return reverse('trait_browser:source:studies:pk:detail', kwargs={'pk': self.pk})", "def href(obj):\n if isinstance(obj, Filing):\n return reverse('filing', args=(obj.region, obj.name, obj.period_name))\n else:\n raise ValueError('cannot build a URL for {}.{} objects'.format(\n type(obj).__module__, type(obj).__name__))", "def get_absolute_url(self):\n return reverse('texture_detail', args=[str(self.id)])", "def get_absolute_url(self):\n return ('project_detail', (), {\n 'name': self.title\n })", "def get_absolute_url(self):\n return reverse('brand-detail', args=[str(self.id)]) #View on Site (front-end)" ]
[ "0.76990575", "0.7611185", "0.7257499", "0.7257499", "0.7257499", "0.7117704", "0.6992158", "0.69547594", "0.6935497", "0.684423", "0.6591669", "0.6543265", "0.6541738", "0.64656514", "0.6445659", "0.64323515", "0.64318234", "0.6417527", "0.6407866", "0.638662", "0.6367192", "0.6367192", "0.6358796", "0.6358796", "0.6358796", "0.6344985", "0.63443846", "0.6336574", "0.63187414", "0.6308525", "0.6307779", "0.63032395", "0.62983495", "0.6285492", "0.6281266", "0.62690365", "0.6254449", "0.6247611", "0.62338305", "0.62221766", "0.62081623", "0.62036353", "0.61960876", "0.61960876", "0.61852205", "0.61659473", "0.61491823", "0.61399704", "0.6139178", "0.6135849", "0.61247474", "0.6124698", "0.6109979", "0.6109979", "0.6109979", "0.6109979", "0.61065036", "0.60951716", "0.60870886", "0.6074789", "0.6069559", "0.6064603", "0.60569286", "0.60413563", "0.6031316", "0.60281074", "0.6023119", "0.60197973", "0.6008479", "0.6007758", "0.59972405", "0.5983826", "0.5976012", "0.5971514", "0.59653765", "0.5964371", "0.59496784", "0.5942336", "0.5928877", "0.5927093", "0.5920655", "0.5912138", "0.59007233", "0.58987224", "0.5898442", "0.58977014", "0.58910197", "0.5874747", "0.58743465", "0.58735645", "0.587256", "0.5863156", "0.58601594", "0.5856006", "0.5848491", "0.5846177", "0.58454376", "0.58400774", "0.583483", "0.5829644", "0.58293" ]
0.0
-1
Loads directly from a joint/pose/match network's stored checkpoint
def load_weights(self, state_dict): own_state = self.state_dict() # Copy the convloutional layers for name, param in state_dict.iteritems(): if 'base_conv' in name: own_state[name].copy_(param) # Convert the FC layers to convolutional layers own_state['base_fc.0.weight'].copy_(state_dict['base_fc.0.weight'].view(500, 160, 3, 3)) own_state['base_fc.0.bias'].copy_(state_dict['base_fc.0.bias']) own_state['base_fc.2.weight'].copy_(state_dict['base_fc.2.weight'].view(500, 500, 1, 1)) own_state['base_fc.2.bias'].copy_(state_dict['base_fc.2.bias']) # Freeze the weights for the initial Conv and FC layers for i in range(len(self.base_conv)): if hasattr(self.base_conv[i], 'weight'): self.base_conv[i].weight.requires_grad = False if hasattr(self.base_conv[i], 'bias'): self.base_conv[i].bias.requires_grad = False for i in range(len(self.base_fc)): if hasattr(self.base_fc[i], 'weight'): self.base_fc[i].weight.requires_grad = False if hasattr(self.base_fc[i], 'bias'): self.base_fc[i].bias.requires_grad = False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_checkpoint(self, session, model_dir):\n assert self.params.cp_load == True, (\"cp_load must be set to true to load a checkpoint\")\n self.loader.restore(session, model_dir)", "def load_checkpoint(self, label):\n model_dir = os.path.join(\n config.results_dir, config.experiment_name, 'checkpoints')\n model_file = os.path.join(model_dir, '{}_net.pth.tar'.format(label))\n\n print(\"Loading model from {}\".format(model_file))\n model_dict = torch.load(model_file)\n\n self.use_cuda = model_dict['use_cuda']\n if self.use_cuda:\n self.net.cuda()\n\n self.net.load_state_dict(model_dict['net_state_dict'])", "def load_checkpoint(tag, params, model):\r\n file_name = os.path.join(\"saved_models\", params.path, tag + \".pt\")\r\n logger.info(\"Load checkpoint from %s\" % file_name)\r\n if os.path.exists(file_name):\r\n checkpoint = torch.load(file_name, map_location='cpu')\r\n params.training_id = checkpoint['training_id']\r\n logger.info(checkpoint['training_id'])\r\n model.global_step = checkpoint['global_step']\r\n model.load_state_dict(checkpoint['model'])\r\n for i, optimizer in enumerate(model.optimizers):\r\n optimizer.load_state_dict(checkpoint['optimizers'][i])\r\n else:\r\n raise Exception(\"Checkpoint not found.\")", "def load_model_from_checkpoint(self, path: str):\n ckpt = torch.load(path, map_location='cpu')\n self.net_q.encoder.load_state_dict(ckpt['encoder'])\n self.net_q.head.load_state_dict(ckpt['head'])\n self.net_ps.load_state_dict(ckpt['net_ps'])\n self.net_k.load_state_dict(ckpt['net_k'])\n self.queue.load_state_dict(ckpt['queue'])\n self.optimizer.load_state_dict(ckpt['optimizer'])\n if 'scheduler' in ckpt:\n self.scheduler.load_stae_dict(ckpt['scheduler'])\n self.move_optimizer_states(self.optimizer, self.local_rank)", "def load(self):\n checkpoint = torch.load(self.checkpoint_path,\n map_location=self.net.device)\n self.load_state_dict(checkpoint)\n del checkpoint", "def load_checkpoint(self, checkpoint: Dict[str, OrderedDict]):\n self.model.load_state_dict(checkpoint[\"model_state_dict\"])\n self.optimizer.load_state_dict(checkpoint[\"optimizer_state_dict\"])\n return self", "def load(self):\r\n checkpoint = torch.load(self.checkpoint_path,\r\n map_location=self.device)\r\n self.load_state_dict(checkpoint)\r\n del checkpoint", "def load_checkpoint(self, file):\n \"\"\"Load \"\"\"\n chkpnt = torch.load(file)\n self.load_state_dict(chkpnt['model_state_dict'])", "def load_model(self, ckpt_fn):\n checkpoint = torch.load(ckpt_fn)\n self.net_.load_state_dict(checkpoint[\"model\"])\n self.optimizer_.load_state_dict(checkpoint[\"optimizer\"])\n self.epoch_ = checkpoint[\"epoch\"]\n self.global_step_ = checkpoint[\"global_step\"]\n self.model_samples_ = deque(checkpoint[\"model_samples\"])\n self.sampler.load_state_dict(checkpoint[\"sampler_state\"])\n self.ais_loss.load_state_dict(checkpoint[\"ais_state\"])\n self.replay_prob = checkpoint[\"replay_prob\"]\n self.max_replay = checkpoint[\"max_replay\"]", "def load_checkpoint(self):\n if self.params.resume_from is not None and os.path.exists(self.params.resume_from):\n try:\n LOG('Loading Checkpoint at %s' % self.params.resume_from)\n ckpt = torch.load(self.params.resume_from)\n self.epoch = ckpt['epoch']\n try:\n self.train_loss = ckpt['train_loss']\n self.val_loss = ckpt['val_loss']\n except:\n self.train_loss = []\n self.val_loss = []\n self.network.load_state_dict(ckpt['state_dict'])\n self.opt.load_state_dict(ckpt['optimizer'])\n LOG('Checkpoint Loaded!')\n LOG('Current Epoch: %d' % self.epoch)\n self.ckpt_flag = True\n except:\n WARNING('Cannot load checkpoint from %s. Start loading pre-trained model......' % self.params.resume_from)\n else:\n WARNING('Checkpoint do not exists. Start loading pre-trained model......')", "def load_checkpoint(ckpt_path):\n checkpoint = None\n if ckpt_path:\n logger.info(\"Loading checkpoint from %s\" % ckpt_path)\n checkpoint = torch.load(ckpt_path, map_location=torch.device(\"cpu\"))\n\n if \"model\" in checkpoint.keys():\n # This preserves backward-compat for models using customed layernorm\n def fix_key(s):\n s = re.sub(\n r\"(.*)\\.layer_norm((_\\d+)?)\\.b_2\", r\"\\1.layer_norm\\2.bias\", s\n )\n s = re.sub(\n r\"(.*)\\.layer_norm((_\\d+)?)\\.a_2\", r\"\\1.layer_norm\\2.weight\", s\n )\n return s\n\n checkpoint[\"model\"] = {\n fix_key(k): v for k, v in checkpoint[\"model\"].items()\n }\n # Force add_ffnbias to True if bias found in model w_1 keys\n for key in checkpoint[\"model\"].keys():\n if \"w_1.bias\" in key:\n checkpoint[\"opt\"].add_ffnbias = True\n\n if not hasattr(checkpoint[\"opt\"], \"num_kv\"):\n checkpoint[\"opt\"].num_kv = 0\n if not hasattr(checkpoint[\"opt\"], \"add_ffnbias\"):\n checkpoint[\"opt\"].add_ffnbias = False\n if not hasattr(checkpoint[\"opt\"], \"parallel_residual\"):\n checkpoint[\"opt\"].parallel_residual = False\n if not hasattr(checkpoint[\"opt\"], \"shared_layer_norm\"):\n checkpoint[\"opt\"].shared_layer_norm = False\n if not hasattr(checkpoint[\"opt\"], \"use_ckpting\"):\n checkpoint[\"opt\"].use_ckpting = []\n if not hasattr(checkpoint[\"opt\"], \"relative_positions_buckets\"):\n checkpoint[\"opt\"].relative_positions_buckets = 0\n if not hasattr(checkpoint[\"opt\"], \"parallel_mode\"):\n checkpoint[\"opt\"].parallel_mode = \"data_parallel\"\n if not hasattr(checkpoint[\"opt\"], \"norm_eps\"):\n checkpoint[\"opt\"].norm_eps = 1e-6\n\n # fix v2 compatibility\n if \"generator\" in checkpoint.keys() and checkpoint[\"generator\"]:\n if \"0.weight\" in checkpoint[\"generator\"]:\n checkpoint[\"generator\"][\"weight\"] = checkpoint[\"generator\"].pop(\n \"0.weight\"\n )\n if \"0.bias\" in checkpoint[\"generator\"]:\n checkpoint[\"generator\"][\"bias\"] = checkpoint[\"generator\"].pop(\"0.bias\")\n # end of patch for backward compatibility\n\n return checkpoint", "def load_checkpoint(self):\n checkpoin_path = self.get_checkpoint_path()\n _logger.info('Load checkpoint ignored by tuner, checkpoint path: %s', checkpoin_path)", "def load_checkpoint(self, checkpoint: str, **kwargs) -> None:\n with open(checkpoint, \"rb\") as f:\n state = SafePickle.load(f)\n\n state_id = ray.put(state)\n ray.get([worker.set_state.remote(state_id, **kwargs) for worker in self.remote_workers])", "def load_checkpoint(model, save_path):\n model.load_state_dict(torch.load(save_path))", "def load_checkpoint(self, model):\n print(f\"load model {self.save_model_path}\")\n model.load_state_dict(torch.load(self.save_model_path))", "def load_checkpoint(self, checkpoint_path, continue_from_epoch=True):\n print(\"Loading checkpoint: {}\".format(checkpoint_path))\n state = torch.load(checkpoint_path)\n self.model.load_state_dict(state['state_dict'])\n self.optimizer.load_state_dict(state['optim_dict'])\n\n if continue_from_epoch:\n self.epoch = state['epoch']", "def load_ckpt(self, name=None):\r\n name = name if name == 'latest' else \"ckpt_epoch{}\".format(name)\r\n load_path = os.path.join(self.model_dir, \"{}.pth\".format(name))\r\n if not os.path.exists(load_path):\r\n raise ValueError(\"Checkpoint {} not exists.\".format(load_path))\r\n\r\n checkpoint = torch.load(load_path)\r\n print(\"Checkpoint loaded from {}\".format(load_path))\r\n if isinstance(self.net, nn.DataParallel):\r\n self.net.module.load_state_dict(checkpoint['model_state_dict'])\r\n else:\r\n self.net.load_state_dict(checkpoint['model_state_dict'])\r\n self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\r\n self.scheduler.load_state_dict(checkpoint['scheduler_state_dict'])\r\n self.clock.restore_checkpoint(checkpoint['clock'])", "def load_model(self, checkpoint):\n print(f'Load parameters from {checkpoint}')\n epoch = re.match(r\"[0-9]*\", os.path.basename(checkpoint)).group(0)\n self.epoch_i = int(epoch)\n self.model.load_state_dict(torch.load(checkpoint))", "def load_checkpoint(checkpoint, model, optimizer=None):\n if not os.path.exists(checkpoint):\n raise (\"File doesn't exist {}\".format(checkpoint))\n checkpoint = torch.load(checkpoint)\n model.load_state_dict(checkpoint['state_dict'])\n\n if optimizer:\n optimizer.load_state_dict(checkpoint['optim_dict'])\n\n return checkpoint", "def _load_checkpoint(filename, map_location=None):\n if filename.startswith('modelzoo://'):\n warnings.warn('The URL scheme of \"modelzoo://\" is deprecated, please '\n 'use \"torchvision://\" instead')\n model_urls = get_torchvision_models()\n model_name = filename[11:]\n checkpoint = load_url_dist(model_urls[model_name])\n elif filename.startswith('torchvision://'):\n model_urls = get_torchvision_models()\n model_name = filename[14:]\n checkpoint = load_url_dist(model_urls[model_name])\n elif filename.startswith('open-mmlab://'):\n model_urls = get_external_models()\n model_name = filename[13:]\n deprecated_urls = get_deprecated_model_names()\n if model_name in deprecated_urls:\n warnings.warn(f'open-mmlab://{model_name} is deprecated in favor '\n f'of open-mmlab://{deprecated_urls[model_name]}')\n model_name = deprecated_urls[model_name]\n model_url = model_urls[model_name]\n # check if is url\n if model_url.startswith(('http://', 'https://')):\n checkpoint = load_url_dist(model_url)\n else:\n filename = osp.join(_get_mmcv_home(), model_url)\n if not osp.isfile(filename):\n raise IOError(f'{filename} is not a checkpoint file')\n checkpoint = torch.load(filename, map_location=map_location)\n elif filename.startswith('mmcls://'):\n model_urls = get_mmcls_models()\n model_name = filename[8:]\n checkpoint = load_url_dist(model_urls[model_name])\n checkpoint = _process_mmcls_checkpoint(checkpoint)\n elif filename.startswith(('http://', 'https://')):\n checkpoint = load_url_dist(filename)\n elif filename.startswith('pavi://'):\n model_path = filename[7:]\n checkpoint = load_pavimodel_dist(model_path, map_location=map_location)\n elif filename.startswith('s3://'):\n checkpoint = load_fileclient_dist(\n filename, backend='ceph', map_location=map_location)\n else:\n if not osp.isfile(filename):\n raise IOError(f'{filename} is not a checkpoint file')\n checkpoint = torch.load(filename, map_location=map_location)\n return checkpoint", "def load_checkpoint(checkpoint, model, optimizer=None):\n if not os.path.exists(checkpoint):\n raise (\"File doesn't exist {}\".format(checkpoint))\n checkpoint = torch.load(checkpoint, map_location=torch.device('cpu'))\n model.load_state_dict(checkpoint['state_dict'])\n\n if optimizer:\n optimizer.load_state_dict(checkpoint['optim_dict'])\n\n return checkpoint", "def load_ckp(checkpoint_fpath, model, optimizer, device):\n\n checkpoint = torch.load(checkpoint_fpath,map_location=device)\n model.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n valid_acc = checkpoint['valid_acc'] \n return model, optimizer, checkpoint['epoch'], valid_acc", "def _restore_checkpoint(self, checkpoint_path):\n self.logger.info(\"Loading checkpoint: {} ...\".format(checkpoint_path))\n checkpoint = torch.load(checkpoint_path)\n pretrained_dict = checkpoint['state_dict'] # 预训练模型的state_dict\n model_dict = self.model.state_dict() # 当前用来训练的模型的state_dict\n \n if pretrained_dict.keys() != model_dict.keys(): # 需要进行参数的适配\n print('Parameters are inconsistant, adapting model parameters ...')\n # 在合并前(update),需要去除pretrained_dict一些不需要的参数\n # 只含有识别分支的预训练模型参数字典中键'0', '1'对应全模型参数字典中键'2', '3'\n pretrained_dict['2'] = transfer_state_dict(pretrained_dict['0'], model_dict['2'])\n pretrained_dict['3'] = transfer_state_dict(pretrained_dict['1'], model_dict['3'])\n del pretrained_dict['0'] # 把原本预训练模型中的键值对删掉,以免错误地更新当前模型中的键值对\n del pretrained_dict['1']\n model_dict.update(pretrained_dict) # 更新(合并)模型的参数\n self.model.load_state_dict(model_dict)\n else:\n print('Parameters are consistant, load state dict directly ...')\n self.model.load_state_dict(checkpoint['state_dict'])\n # self.optimizer.load_state_dict(checkpoint['optimizer'])\n # if self.with_cuda:\n # for state in self.optimizer.state.values():\n # for k, v in state.items():\n # if isinstance(v, torch.Tensor):\n # state[k] = v.cuda(self.device)", "def load_from_checkpoint(self, path):\n print(f'# loading trainer state from {path}')\n checkpoint = torch.load(path)\n self.load(checkpoint)", "def load_checkpoint(checkpoint, model, optimizer=None):\n model_state_dict, optimizer_state_dict = torch.load(checkpoint)\n model.load_state_dict(model_state_dict)\n\n if optimizer is not None:\n optimizer.load_state_dict(optimizer_state_dict)", "def load_checkpoint(self, checkpoint_path=None):\n if checkpoint_path is None:\n checkpoint_path = self.get_latest_path()\n\n if os.path.isfile(checkpoint_path):\n key = 'cuda' if torch.cuda.is_available() else 'cpu'\n checkpoint = torch.load(checkpoint_path, map_location=key)\n self.network.load_state_dict(checkpoint['network'])\n self.network_target.load_state_dict(checkpoint['network_target'])\n self.optimizer.load_state_dict(checkpoint['optimizer'])\n\n print('checkpoint loaded at {}'.format(checkpoint_path))\n else:\n raise OSError(\"Checkpoint file not found.\")", "def load_actor(self, checkpoint):\n \n model = torch.load(checkpoint)\n self.actor_local.load_state_dict(model)", "def load(self, checkpoint_dir):\n print(\"\\nReading Checkpoints.....\\n\\n\")\n model_dir = \"%s\" % (\"cnn\") # give the model name by label_size\n checkpoint_dir = os.path.join(checkpoint_dir, model_dir)\n ckpt = tf.train.get_checkpoint_state(checkpoint_dir)\n \n # Check the checkpoint is exist\n if ckpt and ckpt.model_checkpoint_path:\n ckpt_path = str(ckpt.model_checkpoint_path) # convert the unicode to string\n self.saver.restore(self.sess, os.path.join(os.getcwd(), ckpt_path))\n print(\"\\n Checkpoint Loading Success! %s\\n\\n\"% ckpt_path)\n else:\n print(\"\\n! Checkpoint Loading Failed \\n\\n\")", "def _load(checkpoint_path):\n state_dict, optimizer_state = dg.load_persistables(dirname=checkpoint_path)\n return state_dict, optimizer_state", "def load_checkpoint_ram(self, checkpoint, train=True):\n # -- For all tasks, create a corresponding head, otherwise the restoring would not work due to mismatching weights -- #\n self.mh_network.add_n_tasks_and_activate(self.already_trained_on[str(self.fold)]['tasks_at_time_of_checkpoint'],\n self.already_trained_on[str(self.fold)]['active_task_at_time_of_checkpoint'])\n \n # -- Set the network to the full MultiHead_Module network to restore everything -- #\n self.network = self.mh_network\n \n # -- Use parent class to save checkpoint for MultiHead_Module model consisting of self.model, self.body and self.heads -- #\n super().load_checkpoint_ram(checkpoint, train)\n\n # -- Reset network to the assembled model to continue training -- #\n self.network = self.mh_network.model", "def load_checkpoint(self, checkpoint_path: str, *args, **kwargs) -> Dict:\n return torch.load(checkpoint_path, *args, **kwargs)", "def load_checkpoint(self, checkpoint_path: str, *args, **kwargs) -> Dict:\n return torch.load(checkpoint_path, *args, **kwargs)", "def load_snapshot(device, net, snapshot_name, optimizer=None):\n\ttry:\n\t\tcheckpoint = torch.load(snapshot_name+'.pth', map_location=device)\n\t\tnet.load_state_dict(checkpoint['model_state_dict'])\n\t\tif optimizer:\n\t\t\trestore_optimizer(optimizer, checkpoint)\n\texcept:\n\t\tcheckpoint = None\t\n\treturn checkpoint", "def load_pretrained_model(self, load_from):\n print(\"loading model from %s\\n\" % (load_from))\n try:\n if self.use_cuda:\n pretrained_dict = torch.load(load_from)\n else:\n pretrained_dict = torch.load(load_from, map_location='cpu')\n\n model_dict = self.online_net.state_dict()\n pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}\n model_dict.update(pretrained_dict)\n self.online_net.load_state_dict(model_dict)\n print(\"The loaded parameters are:\")\n keys = [key for key in pretrained_dict]\n print(\", \".join(keys))\n print(\"--------------------------\")\n except Exception as e:\n print(\"Failed to load checkpoint...\")\n print(e)", "def load_checkpoint(filepath):\n checkpoint = torch.load(filepath)\n \n arch = checkpoint['arch']\n if arch == 'vgg':\n model = models.vgg16(pretrained=True)\n elif arch == 'densenet':\n model = models.densenet121(pretrained=True) \n \n model.class_to_idx = checkpoint['class_to_idx']\n model.classifier = checkpoint['classifier']\n model.classifier.load_sate_dict = checkpoint['classifier_state_dict']\n model.optimizer = checkpoint['optimizer_state_dict']\n model.input_size = checkpoint['input_size']\n model.output_size = checkpoint['output_size']\n \n return model", "def load_checkpoint(cfg, args):\n checkpoint_iteration = args.checkpoint\n bucket = connect_to_bucket(args.bucket)\n # load actual checkpoint\n if not os.path.isdir(cfg.OUTPUT_DIR):\n os.mkdir(cfg.OUTPUT_DIR)\n blob = bucket.blob(cfg.OUTPUT_DIR + \"/model_\" + str(checkpoint_iteration) + \".pth\")\n blob.download_to_filename(cfg.OUTPUT_DIR + \"/model_\" + str(checkpoint_iteration) + \".pth\")\n if args.resume:\n # also write last checkpoint file for when --resume statement, model gets checkpoint name from this file\n with open(cfg.OUTPUT_DIR + \"/last_checkpoint\", \"w\") as file:\n file.write(\"model_\" + str(checkpoint_iteration) + \".pth\")\n # return statement not clean, but useful for inference code\n return checkpoint_iteration, bucket", "def load_checkpoint(checkpoint_path, model, optimizer=None):\n if not os.path.exists(checkpoint_path):\n raise IOError(f\"Checkpoint '{checkpoint_path}' does not exist\")\n\n state = torch.load(checkpoint_path)\n try:\n model.load_state_dict(state['model_state_dict'])\n except BaseException as e:\n print('Failed to do something: ' + str(e))\n\n if optimizer is not None:\n try:\n optimizer.load_state_dict(state['optimizer_state_dict'])\n except Exception as e:\n print(e)\n\n return state", "def load(loadname, checkpoint=None):\n ckpt_dir = \"./models/tf_ckpt_\" + loadname + \"/\"\n if checkpoint is not None:\n status = checkpoint.restore(tf.train.latest_checkpoint(ckpt_dir))\n status.assert_consumed()\n print(\"Loaded checkpoint\")\n else:\n print(\"Not Loading any checkpoint\")\n print(\"Starting training from initial configuration\")", "def load_checkpoint(filename: str) -> CheckpointData:\n return torch.load(filename)", "def checkpoint_load(checkpoint_path, gpu):\n model_info = torch.load(checkpoint_path)\n model = models.vgg19(pretrained=True)\n for param in model.parameters():\n param.requires_grad = False\n \n model.class_to_idx = model_info['class_to_idx']\n\n model = classifier(model)\n model.load_state_dict(model_info[\"model_state_dict\"])\n return model, model.class_to_idx", "def load(self):\n try:\n if self.model.is_cuda:\n self.model.load_state_dict(torch.load(os.path.join(self.save_path, \"save_point.pth\")))\n else:\n self.model.load_state_dict(torch.load(os.path.join(self.save_path, \\\n \"save_point.pth\"), map_location=\"cpu\"))\n except:\n sys.exit(\"Unable to load previous model\")", "def load_checkpoint(args, trainer, epoch_itr):\n os.makedirs(os.path.join(args.save_dir, 'checkpoints'), exist_ok=True)\n checkpoint_path = os.path.join(args.save_dir, 'checkpoints', args.restore_file)\n if os.path.isfile(checkpoint_path):\n extra_state = trainer.load_checkpoint(checkpoint_path)\n if extra_state is not None:\n # replay train iterator to match checkpoint\n epoch_itr.load_state_dict(extra_state['train_iterator'])\n\n print('| loaded checkpoint {} (epoch {} @ {} updates)'.format(\n checkpoint_path, epoch_itr.epoch, trainer.get_num_updates()))\n\n trainer.lr_step(epoch_itr.epoch)\n trainer.lr_step_update(trainer.get_num_updates())\n if 'best' in extra_state:\n save_checkpoint.best = extra_state['best']", "def load_checkpoint(checkpoint_dir, epoch, iteration):\n path = opj(checkpoint_dir, str(epoch) + '.' + str(iteration) + '.ckpt')\n if not os.path.isfile(path):\n raise Exception(\"Checkpoint in epoch %d doesn't exist :sob:\" % epoch)\n\n checkpoint = torch.load(path)\n start_epoch = checkpoint['epoch']\n state_dict = checkpoint['state_dict']\n start_iteration = checkpoint['iteration']\n\n assert iteration == start_iteration\n return start_epoch, start_iteration, state_dict", "def loadCheckpoint(self, time_stamp, data_only=False, load_memory=True):\n state_filename = os.path.join(self.saving_dir, 'checkpoint.' + time_stamp + '.pth.tar')\n mem_filename = os.path.join(self.saving_dir, 'memory.' + time_stamp + '.pth.tar')\n\n print 'loading checkpoint: ', time_stamp\n checkpoint = torch.load(state_filename)\n if data_only:\n self.episode_rewards = checkpoint['episode_rewards']\n self.episode_lengths = checkpoint['episode_lengths']\n return\n\n self.episodes_done = checkpoint['episode']\n self.steps_done = checkpoint['steps']\n self.episode_rewards = checkpoint['episode_rewards']\n self.episode_lengths = checkpoint['episode_lengths']\n\n self.policy_net.load_state_dict(checkpoint['policy_state_dict'])\n self.policy_net = self.policy_net.to(self.device)\n self.policy_net.train()\n\n self.target_net.load_state_dict(checkpoint['policy_state_dict'])\n self.target_net = self.target_net.to(self.device)\n self.target_net.eval()\n\n self.optimizer = optim.Adam(self.policy_net.parameters())\n self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n\n if load_memory:\n memory = torch.load(mem_filename)\n self.memory = memory['memory']", "def restore(self, checkpoint):\n raise NotImplementedError", "def load_training_checkpoint(args, model, PATH, ckpt_id):\r\n logger = args.logger\r\n _, checkpoint_state_dict = model.network.load_checkpoint(PATH, ckpt_id)\r\n epoch = checkpoint_state_dict['epoch']\r\n last_global_step = checkpoint_state_dict['last_global_step']\r\n last_global_data_samples = checkpoint_state_dict[\r\n 'last_global_data_samples']\r\n del checkpoint_state_dict\r\n return (epoch, last_global_step, last_global_data_samples)", "def load_checkpoint(checkpoint_path, model, optimizer=None,\n model_key='model_state_dict', optimizer_key='optimizer_state_dict'):\n if not os.path.exists(checkpoint_path):\n raise IOError(f\"Checkpoint '{checkpoint_path}' does not exist\")\n\n state = torch.load(checkpoint_path, map_location='cpu')\n model.load_state_dict(state[model_key])\n\n if optimizer is not None:\n optimizer.load_state_dict(state[optimizer_key])\n\n return state", "def try_and_init_from(self, path):\n log.info(\"Loading weights from foreign checkpoint {}\".format(path))\n if not os.path.exists(path):\n raise ValueError(\"Checkpoint {} does not exist\".format(path))\n\n chkpt = th.load(path, map_location=th.device(\"cpu\"))\n if \"model\" not in chkpt.keys() or chkpt[\"model\"] is None:\n raise ValueError(\"{} has no model saved\".format(path))\n\n mdl = chkpt[\"model\"]\n for n, p in self.model.named_parameters():\n if n in mdl:\n p2 = mdl[n]\n if p2.shape != p.shape:\n log.warning(\"Parameter {} ignored, checkpoint size does not match: {}, should be {}\".format(n, p2.shape, p.shape))\n continue\n log.debug(\"Parameter {} copied\".format(n))\n p.data.copy_(p2)\n else:\n log.warning(\"Parameter {} ignored, not found in source checkpoint.\".format(n))\n\n log.info(\"Weights loaded from foreign checkpoint {}\".format(path))", "def load_checkpoint(filename, from_gpu=True):\r\n assert os.path.exists(filename)\r\n if from_gpu:\r\n return torch.load(filename)\r\n else:\r\n return torch.load(filename, map_location=lambda storage, loc: storage)", "def load_checkpoint(self, path: str = '', train: bool = True) -> int:\n\n if not path:\n dir_ = os.path.dirname(os.path.realpath('__file__'))\n path = os.path.join(dir_, 'model.pt')\n\n try:\n ckpt = torch.load(path)\n except FileNotFoundError:\n return 0\n else:\n print('Loaded model at epoch: ', end='')\n\n self.load_state_dict(ckpt['model_state_dict'])\n self.actor_optimizer.load_state_dict(ckpt['ac_optim_dict'])\n self.critic_optimizer.load_state_dict(ckpt['critic_optim_dict'])\n epoch = ckpt['epoch']\n\n print(epoch)\n\n if not train:\n self.eval()\n else:\n self.train()\n\n return epoch", "def load_checkpoint(path, model, optimizer=None, reset_optimizer=True):\n print(\"Load checkpoint from: {}\".format(path))\n state_dict, optimizer_state = _load(path)\n\n model.load_dict(state_dict)\n if not reset_optimizer and optimizer is not None:\n if optimizer_state is not None:\n print(\"[loading] Load optimizer state from {}\".format(path))\n optimizer.load(optimizer_state)\n\n return model", "def load(cls, path):\n print(f'load checkpoint from {path}')\n if torch.cuda.is_available():\n resume_checkpoint = torch.load(os.path.join(path, cls.TRAINER_STATE_NAME))\n model = torch.load(os.path.join(path, cls.MODEL_NAME))\n else:\n resume_checkpoint = torch.load(os.path.join(path, cls.TRAINER_STATE_NAME), map_location=lambda storage, loc: storage)\n model = torch.load(os.path.join(path, cls.MODEL_NAME), map_location=lambda storage, loc: storage)\n \n # model.flatten_parameters() # make RNN parameters contiguous\n optimizer = resume_checkpoint['optimizer']\n return Checkpoint(\n model=model, \n optimizer=optimizer,\n epoch=resume_checkpoint['epoch'],\n path=path\n )", "def load_model(model, checkpoint_path: str): \r\n checkpoint = torch.load(checkpoint_path)\r\n model.load_state_dict(checkpoint['model'])\r\n epoch = checkpoint['epoch']\r\n print('Loaded model from {}, epoch {}'.format(checkpoint_path, epoch))", "def load_checkpoint(self, checkpoint_path: Union[str, Path]) -> Dict[str, Any]:\n # TODO: move to CheckpointIO\n torch.cuda.empty_cache()\n checkpoint_path = inject_model_parallel_rank(checkpoint_path)\n return self.checkpoint_io.load_checkpoint(checkpoint_path)", "def load_checkpoint(self, checkpoint_dir):\r\n\r\n if not os.path.exists(checkpoint_dir):\r\n raise Exception('No checkpoint directory <%s>' % checkpoint_dir)\r\n\r\n path = os.path.join(checkpoint_dir, 'model.pt')\r\n self.model.load_state_dict(torch.load(path, self.device))\r\n self.update()", "def unpack_checkpoint(\n self,\n checkpoint: Dict,\n model=None,\n criterion=None,\n optimizer=None,\n scheduler=None,\n **kwargs,\n ) -> None:\n super().unpack_checkpoint(\n checkpoint,\n model=model,\n criterion=criterion,\n optimizer=optimizer,\n scheduler=scheduler,\n **kwargs,\n )\n\n # NOTE: propper way to load state, docs:\n # https://nvidia.github.io/apex/amp.html#checkpointing\n if \"amp\" in checkpoint:\n amp.load_state_dict(checkpoint[\"amp\"])", "def unpack_checkpoint(\n self,\n checkpoint: Dict,\n model=None,\n criterion=None,\n optimizer=None,\n scheduler=None,\n **kwargs,\n ) -> None:\n super().unpack_checkpoint(\n checkpoint,\n model=model,\n criterion=criterion,\n optimizer=optimizer,\n scheduler=scheduler,\n **kwargs,\n )\n\n # NOTE: propper way to load state, docs:\n # https://nvidia.github.io/apex/amp.html#checkpointing\n if \"amp\" in checkpoint:\n amp.load_state_dict(checkpoint[\"amp\"])", "def load_checkpoint(path: str, use_cuda: bool = True) -> dict:\n assert os.path.isfile(path), \"Checkpoint %s not found\" % path\n checkpoint = torch.load(path, map_location=\"cuda\" if use_cuda else \"cpu\")\n return checkpoint", "def load_checkpoint(checkpoint_path, model, optimizer=None):\n if not os.path.exists(checkpoint_path):\n raise IOError(\"Checkpoint '{}' does not exist\".format(checkpoint_path))\n\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else 'cpu')\n state = torch.load(checkpoint_path, map_location=\"cuda:0\")\n model.load_state_dict(state['model_state_dict'])\n\n if optimizer is not None:\n optimizer.load_state_dict(state['optimizer_state_dict'])\n\n return state", "def load_checkpoint(fpath):\n if fpath is None:\n raise ValueError('File path is None')\n if not osp.exists(fpath):\n raise FileNotFoundError('File is not found at \"{}\"'.format(fpath))\n map_location = None if torch.cuda.is_available() else 'cpu'\n try:\n checkpoint = torch.load(fpath, map_location=map_location)\n except UnicodeDecodeError:\n pickle.load = partial(pickle.load, encoding=\"latin1\")\n pickle.Unpickler = partial(pickle.Unpickler, encoding=\"latin1\")\n checkpoint = torch.load(\n fpath, pickle_module=pickle, map_location=map_location\n )\n except Exception:\n print('Unable to load checkpoint from \"{}\"'.format(fpath))\n raise\n return checkpoint", "def load_checkpoint(model,\n filename,\n map_location='cpu',\n strict=False,\n logger=None):\n checkpoint = _load_checkpoint(filename, map_location)\n # OrderedDict is a subclass of dict\n if not isinstance(checkpoint, dict):\n raise RuntimeError(\n f'No state_dict found in checkpoint file {filename}')\n # get state_dict from checkpoint\n if 'state_dict' in checkpoint:\n state_dict = checkpoint['state_dict']\n elif 'model' in checkpoint:\n state_dict = checkpoint['model']\n elif 'module' in checkpoint:\n state_dict = checkpoint['module']\n else:\n state_dict = checkpoint\n # strip prefix of state_dict\n if list(state_dict.keys())[0].startswith('module.'):\n state_dict = {k[7:]: v for k, v in state_dict.items()}\n\n # for MoBY, load model of online branch\n if sorted(list(state_dict.keys()))[0].startswith('encoder'):\n state_dict = {k.replace('encoder.', ''): v for k, v in state_dict.items() if k.startswith('encoder.')}\n\n # reshape absolute position embedding for Swin\n if state_dict.get('absolute_pos_embed') is not None:\n absolute_pos_embed = state_dict['absolute_pos_embed']\n N1, L, C1 = absolute_pos_embed.size()\n N2, C2, H, W = model.absolute_pos_embed.size()\n if N1 != N2 or C1 != C2 or L != H*W:\n logger.warning(\"Error in loading absolute_pos_embed, pass\")\n else:\n state_dict['absolute_pos_embed'] = absolute_pos_embed.view(N2, H, W, C2).permute(0, 3, 1, 2)\n\n rank, _ = get_dist_info()\n if \"rel_pos_bias.relative_position_bias_table\" in state_dict:\n if rank == 0:\n print(\"Expand the shared relative position embedding to each layers. \")\n num_layers = model.get_num_layers()\n rel_pos_bias = state_dict[\"rel_pos_bias.relative_position_bias_table\"]\n for i in range(num_layers):\n state_dict[\"blocks.%d.attn.relative_position_bias_table\" % i] = rel_pos_bias.clone()\n\n state_dict.pop(\"rel_pos_bias.relative_position_bias_table\")\n\n all_keys = list(state_dict.keys())\n for key in all_keys:\n if \"relative_position_index\" in key:\n state_dict.pop(key)\n\n if \"relative_position_bias_table\" in key:\n rel_pos_bias = state_dict[key]\n src_num_pos, num_attn_heads = rel_pos_bias.size()\n dst_num_pos, _ = model.state_dict()[key].size()\n dst_patch_shape = model.patch_embed.patch_shape\n if dst_patch_shape[0] != dst_patch_shape[1]:\n raise NotImplementedError()\n num_extra_tokens = dst_num_pos - (dst_patch_shape[0] * 2 - 1) * (dst_patch_shape[1] * 2 - 1)\n src_size = int((src_num_pos - num_extra_tokens) ** 0.5)\n dst_size = int((dst_num_pos - num_extra_tokens) ** 0.5)\n if src_size != dst_size:\n if rank == 0:\n print(\"Position interpolate for %s from %dx%d to %dx%d\" % (\n key, src_size, src_size, dst_size, dst_size))\n extra_tokens = rel_pos_bias[-num_extra_tokens:, :]\n rel_pos_bias = rel_pos_bias[:-num_extra_tokens, :]\n\n def geometric_progression(a, r, n):\n return a * (1.0 - r ** n) / (1.0 - r)\n\n left, right = 1.01, 1.5\n while right - left > 1e-6:\n q = (left + right) / 2.0\n gp = geometric_progression(1, q, src_size // 2)\n if gp > dst_size // 2:\n right = q\n else:\n left = q\n\n # if q > 1.13492:\n # q = 1.13492\n\n dis = []\n cur = 1\n for i in range(src_size // 2):\n dis.append(cur)\n cur += q ** (i + 1)\n\n r_ids = [-_ for _ in reversed(dis)]\n\n x = r_ids + [0] + dis\n y = r_ids + [0] + dis\n\n t = dst_size // 2.0\n dx = np.arange(-t, t + 0.1, 1.0)\n dy = np.arange(-t, t + 0.1, 1.0)\n if rank == 0:\n print(\"x = {}\".format(x))\n print(\"dx = {}\".format(dx))\n\n all_rel_pos_bias = []\n\n for i in range(num_attn_heads):\n z = rel_pos_bias[:, i].view(src_size, src_size).float().numpy()\n f = interpolate.interp2d(x, y, z, kind='cubic')\n all_rel_pos_bias.append(\n torch.Tensor(f(dx, dy)).contiguous().view(-1, 1).to(rel_pos_bias.device))\n\n rel_pos_bias = torch.cat(all_rel_pos_bias, dim=-1)\n new_rel_pos_bias = torch.cat((rel_pos_bias, extra_tokens), dim=0)\n state_dict[key] = new_rel_pos_bias\n\n if 'pos_embed' in state_dict:\n pos_embed_checkpoint = state_dict['pos_embed']\n embedding_size = pos_embed_checkpoint.shape[-1]\n num_patches = model.patch_embed.num_patches\n num_extra_tokens = model.pos_embed.shape[-2] - num_patches\n # height (== width) for the checkpoint position embedding\n orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)\n # height (== width) for the new position embedding\n new_size = int(num_patches ** 0.5)\n # class_token and dist_token are kept unchanged\n if orig_size != new_size:\n if rank == 0:\n print(\"Position interpolate from %dx%d to %dx%d\" % (orig_size, orig_size, new_size, new_size))\n extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]\n # only the position tokens are interpolated\n pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]\n pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2)\n pos_tokens = torch.nn.functional.interpolate(\n pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False)\n pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)\n new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)\n state_dict['pos_embed'] = new_pos_embed\n\n # interpolate position bias table if needed\n relative_position_bias_table_keys = [k for k in state_dict.keys() if \"relative_position_bias_table\" in k]\n for table_key in relative_position_bias_table_keys:\n table_pretrained = state_dict[table_key]\n table_current = model.state_dict()[table_key]\n L1, nH1 = table_pretrained.size()\n L2, nH2 = table_current.size()\n if nH1 != nH2:\n logger.warning(f\"Error in loading {table_key}, pass\")\n else:\n if L1 != L2:\n S1 = int(L1 ** 0.5)\n S2 = int(L2 ** 0.5)\n table_pretrained_resized = F.interpolate(\n table_pretrained.permute(1, 0).view(1, nH1, S1, S1),\n size=(S2, S2), mode='bicubic')\n state_dict[table_key] = table_pretrained_resized.view(nH2, L2).permute(1, 0)\n\n # load state_dict\n load_state_dict(model, state_dict, strict, logger)\n return checkpoint", "def from_previous_ckpt(network, checkpoint):\n if os.path.exists(checkpoint):\n if os.path.isfile(checkpoint):\n try:\n network.load_state_dict(torch.load(checkpoint))\n print(f\"Loaded weights from {checkpoint}\")\n except RuntimeError:\n print(f\"{checkpoint} is a invalid checkpoint\")\n print(\"Neglecting this checkpoint.\")\n if os.path.isdir(checkpoint):\n epoch = 0\n file_name = None\n for ckpt in os.listdir(checkpoint):\n if ckpt[-4:] == '.pth':\n try:\n tmp_int_list = re.findall('[0-9]+', ckpt)\n ckpt_epoch = int(tmp_int_list[-1])\n except IndexError:\n ckpt_epoch = 0\n if ckpt_epoch >= epoch:\n epoch = ckpt_epoch\n file_name = os.path.join(checkpoint, ckpt)\n\n if file_name is None:\n print(f\"No checkpoint found in {checkpoint}\")\n print(\"Neglecting this checkpoint.\")\n else:\n try:\n network.load_state_dict(torch.load(file_name))\n print(f\"Loaded weights from {file_name}\")\n except (RuntimeError):\n print(f\"{file_name} is a invalid checkpoint\")\n print(\"Neglecting this checkpoint.\")\n\n else:\n print(f\"the checkpoint path: {checkpoint} doesn't exist.\")\n print(\"Neglecting this checkpoint.\")", "def load_pretrained(model, fname, optimizer=None):\n if os.path.isfile(fname):\n print(\"=> loading checkpoint '{}'\".format(fname))\n checkpoint = torch.load(fname)\n model.load_state_dict(checkpoint['state_dict'])\n if optimizer is not None:\n optimizer.load_state_dict(checkpoint['optimizer'])\n return model, optimizer, checkpoint['epoch']\n else:\n return model\n else:\n raise Exception(\"=> no checkpoint found at '{}'\".format(fname))", "def _load_checkpoint(cls, model: DeviceAwareModule, checkpoint_path: Path,\n key_in_state_dict: str, use_gpu: bool) -> int:\n logging.info(f\"Loading checkpoint {checkpoint_path}\")\n checkpoint = ModelAndInfo.read_checkpoint(checkpoint_path, use_gpu)\n\n try:\n state_dict = checkpoint[key_in_state_dict]\n except KeyError:\n logging.error(f\"Key {key_in_state_dict} not found in checkpoint\")\n return False\n\n if isinstance(model, torch.nn.DataParallel):\n result = model.module.load_state_dict(state_dict, strict=False)\n else:\n result = model.load_state_dict(state_dict, strict=False)\n\n if result.missing_keys:\n logging.warning(f\"Missing keys in model checkpoint: {result.missing_keys}\")\n if result.unexpected_keys:\n logging.warning(f\"Unexpected keys in model checkpoint: {result.unexpected_keys}\")\n\n return checkpoint[ModelAndInfo.EPOCH_KEY]", "def load_model(self, fname: str) -> None:\n checkpoint_data = torch.load(fname)\n\n # Load the models\n # P-Net\n model_import_path = checkpoint_data['p_net']['model_import_path']\n imp = importlib.import_module(model_import_path)\n mod = getattr(imp, checkpoint_data['p_net']['model_name'])\n self.p_net = mod()\n self.p_net.set_params(checkpoint_data['p_net'])\n # Q-Net\n model_import_path = checkpoint_data['q_net']['model_import_path']\n imp = importlib.import_module(model_import_path)\n mod = getattr(imp, checkpoint_data['q_net']['model_name'])\n self.q_net = mod()\n self.q_net.set_params(checkpoint_data['q_net'])", "def load_checkpoint(path):\n\n # Get the model name\n model_name = path.split('-')[0]\n assert (model_name in ['vgg16', 'resnet50'\n ]), \"Path must have the correct model name\"\n\n # Load in checkpoint\n checkpoint = torch.load(path)\n\n if model_name == 'vgg16':\n model = models.vgg16(pretrained=True)\n # Make sure to set parameters as not trainable\n for param in model.parameters():\n param.requires_grad = False\n model.classifier = checkpoint['classifier']\n\n elif model_name == 'resnet50':\n model = models.resnet50(pretrained=True)\n # Make sure to set parameters as not trainable\n for param in model.parameters():\n param.requires_grad = False\n model.fc = checkpoint['fc']\n\n # Load in the state dict\n model.load_state_dict(checkpoint['state_dict'])\n\n total_params = sum(p.numel() for p in model.parameters())\n print(f'{total_params:,} total parameters.')\n total_trainable_params = sum(\n p.numel() for p in model.parameters() if p.requires_grad)\n print(f'{total_trainable_params:,} total gradient parameters.')\n\n # Move to gpu\n if multi_gpu:\n model = nn.DataParallel(model)\n\n if train_on_gpu:\n model = model.to('cuda')\n\n # Model basics\n model.class_to_idx = checkpoint['class_to_idx']\n model.idx_to_class = checkpoint['idx_to_class']\n model.epochs = checkpoint['epochs']\n\n # Optimizer\n optimizer = checkpoint['optimizer']\n optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n\n return model, optimizer", "def load_model(path):\n if os.path.isfile(path):\n print(\"=> loading checkpoint '{}'\".format(path))\n checkpoint = torch.load(path)\n\n # # size of the top layer\n # N = checkpoint['state_dict']['top_layer.bias'].size()\n #\n # # build skeleton of the model\n # sob = 'sobel.0.weight' in checkpoint['state_dict'].keys()\n # model = models.__dict__[checkpoint['arch']](sobel=sob, out=int(N[0]))\n #\n # # deal with a dataparallel table\n # def rename_key(key):\n # if not 'module' in key:\n # return key\n # return ''.join(key.split('.module'))\n #\n # checkpoint['state_dict'] = {rename_key(key): val\n # for key, val\n # in checkpoint['state_dict'].items()}\n #\n # # load weights\n # model.load_state_dict(checkpoint['state_dict'])\n # print(\"Loaded\")\n # else:\n # model = None\n # print(\"=> no checkpoint found at '{}'\".format(path))\n\n # net = models.__dict__['ResNet18'](low_dim=128)\n # net = models.__dict__['resnet18'](low_dim=128)\n\n net = models.__dict__['alexnet'](out=128)\n # net = models.__dict__['Alexnet_C'](out=args.low_dim)\n\n net = torch.nn.DataParallel(net, device_ids=range(torch.cuda.device_count()))\n net.load_state_dict(checkpoint['net'])\n\n return net", "def load(self, path):\n checkpoint = torch.load(path)\n self.load_state_dict(checkpoint['model_state_dict'])\n self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])", "def load(self, path):\n checkpoint = torch.load(path)\n self.load_state_dict(checkpoint['model_state_dict'])\n self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])", "def load_model(self, ckpt_name=\"best_model.pth\"):\n path = \"/\".join(ckpt_name.split(\"/\")[:-1])\n chkpt = torch.load(ckpt_name)\n self.start_epoch = chkpt['epoch']\n self.best_metric = chkpt['best_metric']\n\n # fix the DataParallel caused problem with keys names\n if self.multi_gpu_flag:\n new_state_dict = fix_multigpu_chkpt_names(chkpt['state_dict'], drop=False)\n self.net.load_state_dict(new_state_dict)\n else:\n try:\n self.net.load_state_dict(chkpt['state_dict'])\n except:\n new_state_dict = fix_multigpu_chkpt_names(chkpt['state_dict'], drop=True)\n self.net.load_state_dict(new_state_dict)\n\n if self.load_optimizer_state:\n self.optimizer.load_state_dict(chkpt['optimizer'])\n logging.info(\"******** State loaded ********\")\n\n training_meta = pickle.load(open(f\"{path}/training_meta.pickle.dat\", \"rb\"))\n for k, v in training_meta.items():\n if k in self.__class__.__params:\n setattr(self, k, v)\n logging.info(\"******** Training params loaded ********\")", "def load_checkpoint(checkpoint_file: pl.Path) -> Optional[Dict[str, Any]]:\n if checkpoint_file.exists():\n logger.info(f\"Loading checkpoint {checkpoint_file}.\")\n checkpoint = torch.load(str(checkpoint_file))\n logger.info(f\"Done loading checkpoint from epoch {checkpoint['epoch']}.\")\n else:\n logger.warning(f\"No {checkpoint_file} checkpoint file found. Starting normal.\")\n return checkpoint", "def load_from_checkpoint(results_dir, load_fn, args):\n ckpt_dir = os.path.join(results_dir, \"tb\", \"version_0\", \"checkpoints\")\n files = os.listdir(ckpt_dir)\n assert len(files) > 0, \"Checkpoint directory is empty\"\n ckpt_path = os.path.join(ckpt_dir, files[-1])\n model = load_fn(checkpoint_path=ckpt_path, args=args)\n return model", "def load_state_dict(self, checkpoint):\n self.net.load_state_dict(checkpoint['Net'])\n self.optimizer.load_state_dict(checkpoint['Optimizer'])\n\n if ADVERSARIAL_FLAG:\n self.adv_net.load_state_dict(checkpoint['AdvNet'])\n self.adv_optimizer.load_state_dict(checkpoint['AdvOptimizer'])\n\n self.history = checkpoint['History']\n self.stats = checkpoint['Stats']\n\n # The following loops are used to fix a bug that was\n # discussed here: https://github.com/pytorch/pytorch/issues/2830\n # (it is supposed to be fixed in recent PyTorch version)\n for state in self.optimizer.state.values():\n for k, v in state.items():\n if isinstance(v, torch.Tensor):\n state[k] = v.to(self.net.device)\n if ADVERSARIAL_FLAG:\n for adv_state in self.adv_optimizer.state.values():\n for k, v in adv_state.items():\n if isinstance(v, torch.Tensor):\n adv_state[k] = v.to(self.adv_net.device)", "def load_ckpt(args):\n net = model.Model.load_from_checkpoint(args.ckpt)\n net = net.eval().requires_grad_(False).to(args.device)\n return net", "def load_epoch_checkpoint(self, directory, epoch):\n chkpnt = torch.load(directory / f\"chkpnt_epoch{epoch:04d}.pth\")\n self.load_state_dict(chkpnt['model_state_dict'])", "def load_checkpoints(args, model): \n print('Loading the model checkpoints from iter {}...'.format(args.resume_iter))\n checkpoint_path = os.path.join(config.checkpoint_path, args.model_type)\n\n gen_g_path = os.path.join(checkpoint_path, '{}-Gen_g.ckpt'.format(args.resume_iter))\n gen_f_path = os.path.join(checkpoint_path, '{}-Gen_f.ckpt'.format(args.resume_iter))\n model.gen_g.load_state_dict(torch.load(gen_g_path, map_location=lambda storage, loc: storage))\n model.gen_f.load_state_dict(torch.load(gen_f_path, map_location=lambda storage, loc: storage))\n\n if args.train:\n dis_c_path = os.path.join(checkpoint_path, '{}-Dis_c.ckpt'.format(args.resume_iter))\n dis_t_path = os.path.join(checkpoint_path, '{}-Dis_t.ckpt'.format(args.resume_iter))\n model.dis_c.load_state_dict(torch.load(dis_c_path, map_location=lambda storage, loc: storage))\n model.dis_t.load_state_dict(torch.load(dis_t_path, map_location=lambda storage, loc: storage))", "def load(self, filename):\n\n c = torch.load(filename)\n\n if type(c) is dict:\n sd = c['state_dict']\n self.net.load_state_dict(sd)\n if 'monitors' in c: # Remove the branching eventually\n self.monitors = c['monitors']\n else:\n self.monitors = {'loss_train': c['train_monitor'], 'loss_val': c['val_monitor'],\n 'accu_train': MetricHistory(), 'accu_val': MetricHistory()}\n if 'optimizer' in c: # Remove the branching eventually\n self.optimizer.load_state_dict(c['optimizer'])\n else:\n raise RuntimeError('Unsupported checkpoint. (Not a dict)')\n\n self.parent = filename\n self.last_checkpoint = filename\n self.start_epoch = self.monitors['loss_train'].num_epochs", "def load_ckpt(self):\n for name in [\"G\"]:\n if isinstance(name, str):\n # load_filename = '%s_net_%s.pth' % (epoch, name)\n # load_path = os.path.join(self.save_dir, load_filename)\n # ckpt_path =\n net = getattr(self.model, \"net\" + name)\n if isinstance(net, torch.nn.DataParallel):\n net = net.module\n print(\"loading the model from %s\" % self.option.ckpt_path)\n # if you are using PyTorch newer than 0.4 (e.g., built from\n # GitHub source), you can remove str() on self.device\n state_dict = torch.load(\n self.option.ckpt_path, map_location=str(self.model.device)\n )\n if hasattr(state_dict, \"_metadata\"):\n del state_dict._metadata\n\n # patch InstanceNorm checkpoints prior to 0.4\n # for key in list(state_dict.keys()): # need to copy keys here because we mutate in loop\n # self.__patch_instance_norm_state_dict(state_dict, net, key.split('.'))\n net.load_state_dict(state_dict)", "def load_pretrained_network(self):\n\n if self.manager is None or self.checkpoint is None:\n return False\n status = self.checkpoint.restore(self.manager.latest_checkpoint)\n return status", "def load_pretrained_network(self):\n\n if self.manager is None or self.checkpoint is None:\n return False\n status = self.checkpoint.restore(self.manager.latest_checkpoint)\n return status", "def load_states(self, checkpoint):\n raise NotImplementedError()", "def resume(self, checkpoint):\n model_dict = paddle.load(checkpoint)\n self.model.set_state_dict(model_dict)", "def load_checkpoint(model, filename, map_location='cpu', strict=False, logger=None):\n checkpoint = _load_checkpoint(filename, map_location)\n if not isinstance(checkpoint, dict):\n raise RuntimeError(f'No state_dict found in checkpoint file {filename}')\n if 'state_dict' in checkpoint:\n state_dict_tmp = checkpoint['state_dict']\n else:\n state_dict_tmp = checkpoint\n state_dict = OrderedDict()\n for k, v in state_dict_tmp.items():\n if k.startswith('module.backbone.'):\n state_dict[k[16:]] = v\n elif k.startswith('module.'):\n state_dict[k[7:]] = v\n elif k.startswith('backbone.'):\n state_dict[k[9:]] = v\n else:\n state_dict[k] = v\n load_state_dict(model, state_dict, strict, logger)\n return checkpoint", "def restore(self, checkpoint_path: str):\r\n raise NotImplementedError", "def load_checkpoint(model, model_name='model', validation_id=None):\n path = output_path(_checkpoint_path.format(model_name), validation_id=validation_id, have_validation=True)\n _load_model(model.module if type(model) is torch.nn.DataParallel else model, model_name, path=path, reload=True)", "def restore(self, checkpoint_path):\n start_time = time.time()\n latest_checkpoint = train_util.get_latest_chekpoint(checkpoint_path)\n if latest_checkpoint is not None:\n checkpoint = tf.train.Checkpoint(model=self)\n checkpoint.restore(latest_checkpoint).expect_partial()\n logging.info('Loaded checkpoint %s', latest_checkpoint)\n logging.info('Loading model took %.1f seconds', time.time() - start_time)\n else:\n logging.info('Could not find checkpoint to load at %s, skipping.',\n checkpoint_path)", "def load(cls, model_path: str, sample_shape: tuple = None,\n checkpoint: str = None, **kwargs):", "def load_model(self):\n self.pred_net.load((self.save_path / \"iqn_pred_net\").absolute().as_posix())\n self.target_net.load((self.save_path / \"iqn_target_net\").absolute().as_posix())", "def load(self):\r\n # self.model.load_state_dict(torch.load(os.path.join(self.ckpt_dir, 'best_model_state_dict.pt')))\r\n if torch.cuda.is_available():\r\n self.model = torch.load(os.path.join(self.ckpt_dir, 'best_model_INN.pt'))\r\n else:\r\n self.model = torch.load(os.path.join(self.ckpt_dir, 'best_model_INN.pt'), map_location=torch.device('cpu'))", "def parse_checkpoint(checkpoint_path):\n with gfile.Open(checkpoint_path, 'rb') as fp:\n raw_contents = fp.read()\n if raw_contents.startswith(b'model_checkpoint_path'):\n raise ValueError(\n 'Attempting to restore a TensorFlow checkpoint as a native T5X '\n f'checkpoint. Path: {checkpoint_path}')\n return serialization.msgpack_restore(raw_contents)", "def load_model(self, path):\n self._saver.restore(self._sess, path + '/model.ckp')\n pkl_file = open(path + '/som.pkl', 'rb')\n restored = pickle.load(pkl_file)\n pkl_file.close()\n self._m = restored['_m']\n self._n = restored['_n']\n self._neighbourhood = restored['_neighbourhood']\n # self._topography = restored['_topography']\n self._num_iterations = restored['_num_iterations']\n self._Wts = restored['_Wts']\n self._locations = restored['_locations']\n self._learned = restored['_learned']\n self._centroid_grid = restored['_centroid_grid']\n self.abnormal_dist = restored['abnormal_dist']\n\n print(\"Model restored from path: \" + path)", "def load_weights_from_checkpoint(self, path: str, key: str):\n ckpt = torch.load(path, map_location='cpu')\n self.load_state_dict(ckpt[key])\n # self.to(self.device)", "def load_weights_from_checkpoint(self, path: str, key: str):\n ckpt = torch.load(path, map_location='cpu')\n self.load_state_dict(ckpt[key])\n # self.to(self.device)", "def load_from_path(self, checkpoint_dir):\n\n vars = self.save_var_names\n saver = tf.train.Saver(vars)\n\n def load_aux(ckpt_path):\n \"\"\"Helper function to not repeat the same code in the following lines.\"\"\"\n\n ckpt_name = os.path.basename(ckpt_path)\n saver.restore(self.sess, ckpt_path)\n counter = int(next(re.finditer(\"(\\d+)(?!.*\\d)\", ckpt_name)).group(0))\n self.counter = counter\n print(\" [*] Loaded {}\".format(ckpt_name))\n return True, counter\n\n ckpt = tf.train.get_checkpoint_state(checkpoint_dir)\n try:\n if ckpt and ckpt.model_checkpoint_path:\n ckpt_name = os.path.basename(ckpt.model_checkpoint_path)\n return load_aux(os.path.join(checkpoint_dir, ckpt_name))\n else:\n print(\n \" [!] Failed to find a checkpoint within directory {}\".format(\n FLAGS.ckpt_path))\n return False, 0\n except:\n print(\" [!] Failed to find a checkpoint, Exception!\")\n return False, 0", "def load_checkpoint(path: str, save_dir: str, cuda: bool = False, attention_viz: bool = False) -> nn.Module:\r\n # Load model and args\r\n state = torch.load(path, map_location=lambda storage, loc: storage)\r\n args, loaded_state_dict = state['args'], state['state_dict']\r\n\r\n # Update args with current args\r\n args.cuda = cuda\r\n args.attention_viz = attention_viz\r\n args.save_dir = save_dir\r\n\r\n model = build_model(args)\r\n model.load_state_dict(loaded_state_dict)\r\n\r\n if cuda:\r\n print('Moving model to cuda')\r\n model = model.cuda()\r\n\r\n return model", "def load_from_checkpoint(self, chkpt, section=None):\n if section is None:\n section = self.name\n self.load_state_dict(chkpt[section])", "def _restore(self, checkpoint):\n checkpoint_path = os.path.join(checkpoint, \"model_weights\")\n self.model.load_weights(checkpoint_path)", "def load(self):\n if self.model is None:\n raise Exception(\"Build the model first.\")\n\n print(\"Loading model checkpoint {} ...\\n\".format(self.config[\"model\"][\"restore_model\"]))\n self.model.load_weights(self.config[\"model\"][\"restore_model\"])\n print(\"Model loaded!\")", "def load(self):\n if self.model is None:\n raise Exception(\"Build the model first.\")\n\n print(\"Loading model checkpoint {} ...\\n\".format(self.config[\"model\"][\"restore_model\"]))\n self.model.load_weights(self.config[\"model\"][\"restore_model\"])\n print(\"Model loaded!\")", "def load(self, model_path):\n # TODO: include new params based on ConfigEnum\n checkpoint = torch.load(model_path)\n\n self.image_size = checkpoint['image_size']\n self.device = checkpoint['device']\n self.fp16 = checkpoint['fp16']\n self.accumulate_grad_steps = checkpoint['accumulate_grad_steps']\n self.experiment_id = checkpoint['experiment_id']\n self.experiment_tag = checkpoint['experiment_tag']\n self.seed = checkpoint['seed']\n self.train_batch_size = checkpoint['train_batch_size']\n self.valid_batch_size = checkpoint['valid_batch_size']\n self.test_batch_size = checkpoint['test_batch_size']\n self.dataloader_num_workers = checkpoint['dataloader_num_workers']\n self.train_dataloader_shuffle = checkpoint['train_dataloader_shuffle']\n self.optimizer_type = checkpoint['optimizer_type']\n self.optimizer_params = checkpoint['optimizer_params']\n self.scheduler_type = checkpoint['scheduler_type']\n self.scheduler_params = checkpoint['scheduler_params']\n self.step_scheduler_after = checkpoint['step_scheduler_after']\n self.step_scheduler_metric = checkpoint['step_scheduler_metric']\n self.compute_train_loss_after = checkpoint['compute_train_loss_after']\n self.compute_train_metric_after = checkpoint['compute_train_metric_after']\n self.compute_valid_loss_after = checkpoint['compute_valid_loss_after']\n self.compute_valid_metric_after = checkpoint['compute_valid_metric_after']\n self.training_stopping_criteria = checkpoint['training_stopping_criteria']\n self.stopping_criteria_params = checkpoint['stopping_criteria_params']\n self.max_epoch = checkpoint['max_epoch']\n self.train_on_all_data = checkpoint['train_on_all_data']\n self.validate_after = checkpoint['validate_after']\n self.validation_steps = checkpoint['validation_steps']\n self.run_lr_range_test= checkpoint['run_lr_range_test']\n self.sleep_in_epochs = checkpoint['sleep_in_epochs']\n self.sleep_time = checkpoint['sleep_time']\n self.checkpoint_epochs = checkpoint['checkpoint_epochs']\n\n self._best_score = checkpoint['_best_score']\n self._current_score = checkpoint['_current_score']\n self._counter = checkpoint['_counter']\n self.metrics = checkpoint['metrics']\n self.current_epoch = checkpoint['current_epoch']\n self.current_train_batch = checkpoint['current_train_batch']\n self.current_valid_batch = checkpoint['current_valid_batch']\n self.num_train_samples = checkpoint['num_train_samples']\n self.num_train_iterations = checkpoint['num_train_iterations']\n self.checkpoint_snapshot = checkpoint['checkpoint_snapshot'] \n\n # initialize optimizer, scheduler, and gradient scaler\n self.configure_optimizers()\n self.configure_schedulers()\n \n if self.fp16:\n self.scaler = torch.cuda.amp.GradScaler()\n\n if self.model:\n self.model.load_state_dict(checkpoint['state_dict'])\n self.model.to(self.device)\n\n if self.optimizer:\n self.optimizer.load_state_dict(checkpoint['optimizer'])\n\n if self.scheduler:\n self.scheduler.load_state_dict(checkpoint['scheduler'])\n\n #if self.scaler:\n # self.scaler.load_state_dict(checkpoint['scaler'])", "def load_model(self):\n if self.ckpt_flag:\n LOG('Skip Loading Pre-trained Model......')\n else:\n if self.params.pre_trained_from is not None and os.path.exists(self.params.pre_trained_from):\n try:\n LOG('Loading Pre-trained Model at %s' % self.params.pre_trained_from)\n pretrain = torch.load(self.params.pre_trained_from)\n self.network.load_state_dict(pretrain)\n LOG('Pre-trained Model Loaded!')\n except:\n WARNING('Cannot load pre-trained model. Start training......')\n else:\n WARNING('Pre-trained model do not exits. Start training......')" ]
[ "0.7687991", "0.76203847", "0.7493186", "0.74810845", "0.74057525", "0.7371094", "0.7363626", "0.7347734", "0.72986746", "0.7289724", "0.7277159", "0.7276603", "0.72691387", "0.72505075", "0.7244596", "0.7222293", "0.7212705", "0.72107184", "0.72104317", "0.7187941", "0.7176097", "0.7169621", "0.71634567", "0.7154977", "0.7152088", "0.7141425", "0.71232444", "0.7061941", "0.7060443", "0.7052338", "0.70101655", "0.70101655", "0.7007746", "0.7004531", "0.69980425", "0.69811654", "0.69792926", "0.6975761", "0.69741213", "0.6957113", "0.694436", "0.69418347", "0.68856215", "0.68710923", "0.6868703", "0.68669397", "0.6823211", "0.6822184", "0.68215424", "0.6809755", "0.6802562", "0.6790647", "0.67889607", "0.6785114", "0.6785018", "0.67843425", "0.67843425", "0.677811", "0.6761976", "0.67592853", "0.6746907", "0.6743591", "0.673704", "0.67344487", "0.67114633", "0.6663579", "0.6656717", "0.6632705", "0.6632705", "0.66303945", "0.6624384", "0.66181225", "0.6605354", "0.66004914", "0.6589624", "0.6583819", "0.65807134", "0.6579836", "0.6569238", "0.6569238", "0.65670913", "0.6566611", "0.6562841", "0.6546239", "0.6537362", "0.6533999", "0.65327007", "0.652791", "0.65246534", "0.6521269", "0.6517666", "0.64832926", "0.64832926", "0.64695233", "0.6457767", "0.645594", "0.6455523", "0.6447092", "0.6447092", "0.6432768", "0.6429268" ]
0.0
-1
Creates invoice related analytics and financial move lines
def action_move_create(self): account_move = self.env['account.move'] for inv in self: if not inv.journal_id.sequence_id: raise UserError(_('Please define sequence on the journal related to this invoice.')) if not inv.invoice_line_ids.filtered(lambda line: line.account_id): raise UserError(_('Please add at least one invoice line.')) if inv.move_id: continue if not inv.date_invoice: inv.write({'date_invoice': fields.Date.context_today(self)}) if not inv.date_due: inv.write({'date_due': inv.date_invoice}) company_currency = inv.company_id.currency_id # create move lines (one per invoice line + eventual taxes and analytic lines) iml = inv.invoice_line_move_line_get() iml += inv.tax_line_move_line_get() diff_currency = inv.currency_id != company_currency # create one move line for the total and possibly adjust the other lines amount total, total_currency, iml = inv.compute_invoice_totals(company_currency, iml) name = inv.name or '' if inv.payment_term_id: totlines = inv.payment_term_id.with_context(currency_id=company_currency.id).compute(total, inv.date_invoice)[0] res_amount_currency = total_currency for i, t in enumerate(totlines): if inv.currency_id != company_currency: amount_currency = company_currency._convert(t[1], inv.currency_id, inv.company_id, inv._get_currency_rate_date() or fields.Date.today()) else: amount_currency = False # last line: add the diff res_amount_currency -= amount_currency or 0 if i + 1 == len(totlines): amount_currency += res_amount_currency _logger.info(inv) iml.append({ 'type': 'dest', 'name': name, 'price': t[1], 'account_id': inv.account_id.id, 'date_maturity': t[0], 'amount_currency': diff_currency and amount_currency, 'currency_id': diff_currency and inv.currency_id.id, 'invoice_id': inv.id, #'partner_id': inv.partner_line_id.id }) else: _logger.info(inv) total_taxes_to_pay = self.return_tax_to_payy() if inv.taxes_collected_id.type_taxes == 'tax_company': iml.append({ 'type': 'dest', 'name': name, 'price': total_taxes_to_pay, 'account_id': inv.taxes_collected_id.account_id.id, 'date_maturity': inv.date_due, 'amount_currency': diff_currency and total_currency, 'currency_id': diff_currency and inv.currency_id.id, 'invoice_id': inv.id, #'partner_id': inv.partner_line_id.id }) iml.append({ 'type': 'dest', 'name': name, 'price': total- total_taxes_to_pay, 'account_id': inv.account_id.id, 'date_maturity': inv.date_due, 'amount_currency': diff_currency and total_currency, 'currency_id': diff_currency and inv.currency_id.id, 'invoice_id': inv.id, #'partner_id': inv.partner_line_id.id }) else: iml.append({ 'type': 'dest', 'name': name, 'price': total, 'account_id': inv.account_id.id, 'date_maturity': inv.date_due, 'amount_currency': diff_currency and total_currency, 'currency_id': diff_currency and inv.currency_id.id, 'invoice_id': inv.id, #'partner_id': inv.partner_line_id.id }) part = self.env['res.partner']._find_accounting_partner(inv.partner_id) #validamo que sea una factura de proveedor if self.type == 'in_invoice': data_new = [] for l in iml: if 'partner_id' in l: if l['partner_id']: data_new.append((0, 0, self.line_get_convert(l, l['partner_id'])) ) else: data_new.append((0, 0, self.line_get_convert(l, part.id)) ) line = [l for l in data_new ] else: line = [(0, 0, self.line_get_convert(l, part.id)) for l in iml ] line = inv.group_lines(iml, line) line = inv.finalize_invoice_move_lines(line) date = inv.date or inv.date_invoice move_vals = { 'ref': inv.reference, 'line_ids': line, 'journal_id': inv.journal_id.id, 'date': date, 'narration': inv.comment, } move = account_move.create(move_vals) # Pass invoice in method post: used if you want to get the same # account move reference when creating the same invoice after a cancelled one: move.post(invoice = inv) # make the invoice point to that move vals = { 'move_id': move.id, 'date': date, 'move_name': move.name, } inv.write(vals) return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_invoice(self):\n self.ensure_one()\n partner = self.member_id.partner_id\n invoice = self.env['account.invoice'].create({\n 'partner_id': partner.id,\n 'account_id': partner.property_account_receivable_id.id,\n 'fiscal_position_id': partner.property_account_position_id.id\n })\n for line in self.line_ids:\n product = line.activity_id.product_id\n # Handling of invoice lines : needs cache record for onchange, then\n # real writing...\n invoice_line = self.env['account.invoice.line'].new({\n 'product_id': product.id,\n 'invoice_id': invoice.id\n })\n invoice_line._onchange_product_id()\n line_values = dict(invoice_line._cache)\n line_values['price_unit'] = line.price\n invoice_line = self.env['account.invoice.line'].create(line_values)\n invoice.compute_taxes()\n line.registration_id.invoice_line_id = invoice_line.id\n return invoice", "def create_invoice(self):\n for line in self:\n # if not line.account_id:\n # raise UserError(_('Please Add the incoming Account !!'))\n self.ensure_one()\n journal_id = self.env['account.journal'].search([\n ('type', '=', 'sale')], limit=1)\n inv_line_main = {\n 'name': line.description.name,\n 'price_unit': line.amount or 0.00,\n 'quantity': 1,\n 'discount': line.discount,\n 'account_id': line.description.property_account_income_id.id or line.description.categ_id.property_account_income_categ_id.id or False,\n }\n inv_values = {\n 'partner_id': line.patient_id.partner_id.id,\n 'patient_id': line.patient_id.id,\n 'dentist': line.dentist.id,\n 'move_type': 'out_invoice',\n 'invoice_date': datetime.now().strftime(DF) or False,\n 'journal_id': journal_id and journal_id.id or False,\n 'teeth_id': line.patient_id and line.patient_id.id or False,\n }\n acc_id = self.env['account.move'].create(inv_values)\n acc_id.write({'invoice_line_ids': [(0, 0, inv_line_main)]})\n\n self.write({'invc_id': acc_id.id, 'inv': True})\n context = dict(self._context or {})\n wiz_form_id = self.env['ir.model.data'].get_object_reference(\n 'account', 'view_move_form')[1]\n\n return {\n 'view_type': 'form',\n 'view_id': wiz_form_id,\n 'view_mode': 'form',\n 'res_model': 'account.move',\n 'res_id': self.invc_id.id,\n 'type': 'ir.actions.act_window',\n 'target': 'current',\n 'context': context,\n }", "def action_invoice_create(self, cr, uid, ids, context=None):\n res = False\n\n journal_obj = self.pool.get('account.journal')\n inv_obj = self.pool.get('account.invoice')\n inv_line_obj = self.pool.get('account.invoice.line')\n fiscal_obj = self.pool.get('account.fiscal.position')\n\n for order in self.browse(cr, uid, ids, context=context):\n# pay_acc_id = order.partner_id.property_account_payable.id\n #use a new method to get the account_id\n pay_acc_id = self._get_inv_pay_acc_id(cr,uid,order) \n journal_ids = journal_obj.search(cr, uid, [('type', '=','purchase'),('company_id', '=', order.company_id.id)], limit=1)\n if not journal_ids:\n raise osv.except_osv(_('Error!'),\n _('Define purchase journal for this company: \"%s\" (id:%d).') % (order.company_id.name, order.company_id.id))\n\n # generate invoice line correspond to PO line and link that to created invoice (inv_id) and PO line\n inv_lines = []\n for po_line in order.order_line:\n #check if this line have quantity to generate invoice, by johnw\n if po_line.product_qty <= po_line.invoice_qty:\n continue \n# if po_line.product_id:\n# acc_id = po_line.product_id.property_account_expense.id\n# if not acc_id:\n# acc_id = po_line.product_id.categ_id.property_account_expense_categ.id\n# if not acc_id:\n# raise osv.except_osv(_('Error!'), _('Define expense account for this company: \"%s\" (id:%d).') % (po_line.product_id.name, po_line.product_id.id,))\n# else:\n# acc_id = property_obj.get(cr, uid, 'property_account_expense_categ', 'product.category').id \n #use a new method to get the account_id, by johnw \n acc_id = self._get_inv_line_exp_acc_id(cr,uid,order,po_line)\n fpos = order.fiscal_position or False\n acc_id = fiscal_obj.map_account(cr, uid, fpos, acc_id)\n\n inv_line_data = self._prepare_inv_line(cr, uid, acc_id, po_line, context=context)\n #update the quantity to the quantity, by johnw\n inv_line_data.update({'quantity':(po_line.product_qty - po_line.invoice_qty)})\n inv_line_id = inv_line_obj.create(cr, uid, inv_line_data, context=context)\n inv_lines.append(inv_line_id)\n\n po_line.write({'invoiced':True, 'invoice_lines': [(4, inv_line_id)]}, context=context)\n \n #if no lines then return direct, by johnw\n if len(inv_lines) == 0:\n continue\n \n # get invoice data and create invoice\n inv_data = {\n 'name': order.partner_ref or order.name,\n 'reference': order.partner_ref or order.name,\n 'account_id': pay_acc_id,\n 'type': 'in_invoice',\n 'partner_id': order.partner_id.id,\n 'currency_id': order.pricelist_id.currency_id.id,\n 'journal_id': len(journal_ids) and journal_ids[0] or False,\n 'invoice_line': [(6, 0, inv_lines)],\n 'origin': order.name,\n 'fiscal_position': order.fiscal_position.id or False,\n 'payment_term': order.payment_term_id.id or False,\n 'company_id': order.company_id.id,\n }\n inv_id = inv_obj.create(cr, uid, inv_data, context=context)\n\n # compute the invoice\n inv_obj.button_compute(cr, uid, [inv_id], context=context, set_total=True)\n\n # Link this new invoice to related purchase order\n order.write({'invoice_ids': [(4, inv_id)]}, context=context)\n res = inv_id\n return res", "def create_invoices(self, cr, uid, ids, context=None):\n invoice_list = []\n po_obj = self.pool.get('purchase.order')\n inv_line_obj = self.pool.get('account.invoice.line')\n inv_obj = self.pool.get('account.invoice')\n addr_obj = self.pool.get('res.partner')\n journal_obj = self.pool.get('account.journal')\n if context is None:\n context = {}\n\n for purchase_adv_obj in self.browse(cr, uid, ids, context=context):\n for purchase_order in po_obj.browse(cr, uid, context.get('active_ids', []), context=context):\n inv_line_ids = []\n invoice_ids = []\n val = inv_line_obj.product_id_change(cr, uid, [], purchase_adv_obj.product_id.id,\n uom_id=False, partner_id=purchase_order.partner_id.id, fposition_id=purchase_order.fiscal_position.id)\n line_id = inv_line_obj.create(cr, uid, {\n 'name': val['value']['name'],\n 'account_id': val['value']['account_id'],\n 'price_unit': purchase_adv_obj.amount,\n 'quantity': purchase_adv_obj.qtty,\n 'discount': False,\n 'uos_id': val['value']['uos_id'],\n 'product_id': purchase_adv_obj.product_id.id,\n 'invoice_line_tax_id': [(6, 0, val['value']['invoice_line_tax_id'])],\n })\n inv_line_ids.append(line_id)\n addr = addr_obj.address_get(cr, uid, [purchase_order.partner_id.id], ['invoice'])\n journal_ids = journal_obj.search(cr, uid, [('type', '=', 'purchase')])\n context.update({'type':'in_invoice','journal_type':'purchase'})\n inv_vals = {\n 'name': purchase_order.partner_ref or purchase_order.name,\n 'origin': purchase_order.name,\n 'type': 'in_invoice',\n 'reference': False,\n 'account_id': purchase_order.partner_id.property_account_payable.id,\n 'journal_id':journal_ids and journal_ids[0] or False,\n 'partner_id': purchase_order.partner_id.id,\n 'address_invoice_id': addr['invoice'],\n 'invoice_line': [(6, 0, inv_line_ids)],\n 'currency_id': purchase_order.pricelist_id.currency_id.id,\n 'comment': '',\n 'payment_term': purchase_order.payment_term_id and purchase_order.payment_term_id.id or False,\n 'fiscal_position': purchase_order.fiscal_position.id or purchase_order.partner_id.property_account_position.id,\n 'prepaid': True\n }\n\n inv_id = inv_obj.create(cr, uid, inv_vals, context=context)\n inv_obj.button_reset_taxes(cr, uid, [inv_id], context=context)\n for invoice in purchase_order.invoice_ids:\n invoice_ids.append(invoice.id)\n invoice_ids.append(inv_id)\n po_obj.write(cr, uid, purchase_order.id, {'invoice_ids': [(6, 0, invoice_ids)]})\n invoice_list.append(inv_id)\n\n if purchase_order.invoice_method in ('picking','order'):\n self.pool.get('purchase.order.line').create(cr, uid, {\n 'order_id': purchase_order.id,\n 'name': val['value']['name'],\n 'date_planned':purchase_order.date_order,\n 'price_unit': -purchase_adv_obj.amount,\n 'product_uom_qty': purchase_adv_obj.qtty,\n 'product_uos': val['value']['uos_id'],\n 'product_uom': val['value']['uos_id'],\n 'product_id': purchase_adv_obj.product_id.id,\n 'adavance_product':True,\n 'discount': False,\n 'taxes_id': [(6, 0, val['value']['invoice_line_tax_id'])],\n }, context=context)\n\n\n context.update({'invoice_id':invoice_list})\n return {\n 'name': 'Open Invoice',\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_model': 'purchase.open.invoice',\n 'type': 'ir.actions.act_window',\n 'target': 'new',\n 'context': context\n }", "def _prepare_invoice(self, cr, uid, order, lines, context=None):\n if context is None:\n context = {}\n journal_ids = self.pool.get('account.journal').search(cr, uid,\n [('type', '=', 'sale'), ('company_id', '=', order.company_id.id)],\n limit=1)\n if not journal_ids:\n raise osv.except_osv(_('Error !'),\n _('There is no sales journal defined for this company: \"%s\" (id:%d)') % (order.company_id.name, order.company_id.id))\n\n invoice_vals = {\n 'name': order.client_order_ref or '',\n 'origin': order.name,\n 'type': 'out_invoice',\n 'reference': order.client_order_ref or order.name,\n 'account_id': order.partner_id.property_account_receivable.id,\n 'journal_id': order.partner_id.property_default_sale_invoice_journal.id,\n 'partner_id': order.partner_id.id,\n 'address_invoice_id': order.partner_invoice_id.id,\n #'address_contact_id': order.partner_order_id.id,\n 'invoice_line': [(6, 0, lines)],\n 'currency_id': order.pricelist_id.currency_id.id,\n 'comment': order.note,\n 'payment_term': order.payment_term and order.payment_term.id or False,\n 'fiscal_position': order.fiscal_position.id or order.partner_id.property_account_position.id,\n 'date_invoice': context.get('date_invoice', False),\n 'company_id': order.company_id.id,\n 'user_id': order.user_id and order.user_id.id or False\n }\n\n # Care for deprecated _inv_get() hook - FIXME: to be removed after 6.1\n invoice_vals.update(self._inv_get(cr, uid, order, context=context))\n\n return invoice_vals", "def action_create_invoice(self):\n if self.partner_id:\n supplier = self.partner_id\n else:\n supplier = self.partner_id.search(\n [(\"name\", \"=\", \"Salon Default Customer\")])\n lines = []\n product_id = self.env['product.product'].search(\n [(\"name\", \"=\", \"Salon Service\")])\n for records in self.order_line_ids:\n if product_id.property_account_income_id.id:\n income_account = product_id.property_account_income_id.id\n elif product_id.categ_id.property_account_income_categ_id.id:\n income_account = product_id.categ_id.\\\n property_account_income_categ_id.id\n else:\n raise UserError(\n _(\"Please define income account for this product: \"\n \"'%s' (id:%d).\") % (product_id.name, product_id.id))\n value = (0, 0, {\n 'name': records.service_id.name,\n 'account_id': income_account,\n 'price_unit': records.price,\n 'quantity': 1,\n 'product_id': product_id.id,\n })\n lines.append(value)\n invoice_line = {\n 'move_type': 'out_invoice',\n 'partner_id': supplier.id,\n 'invoice_user_id': self.env.user.id,\n 'invoice_origin': self.name,\n 'invoice_line_ids': lines,\n }\n inv = self.env['account.move'].create(invoice_line)\n action = self.env.ref('account.action_move_out_invoice_type',\n raise_if_not_found=False)\n result = {\n 'name': action.name,\n 'type': 'ir.actions.act_window',\n 'views': [[False, 'form']],\n 'target': 'current',\n 'res_id': inv.id,\n 'res_model': 'account.move',\n }\n self.inv_stage_identifier = True\n self.stage_id = 3\n invoiced_records = self.env['salon.order'].search(\n [('stage_id', 'in', [3, 4]), ('chair_id', '=', self.chair_id.id)])\n total = 0\n for rows in invoiced_records:\n invoiced_date = str(rows.date)\n invoiced_date = invoiced_date[0:10]\n if invoiced_date == str(date.today()):\n total = total + rows.price_subtotal\n self.chair_id.collection_today = total\n self.update_number_of_orders()\n return result", "def action_invoice_create(self, cr, uid, ids, context=None):\n res = False\n\n journal_obj = self.pool.get('account.journal')\n inv_obj = self.pool.get('account.invoice')\n inv_line_obj = self.pool.get('account.invoice.line')\n fiscal_obj = self.pool.get('account.fiscal.position')\n property_obj = self.pool.get('ir.property')\n\n for order in self.browse(cr, uid, ids, context=context):\n pay_acc_id = order.partner_id.property_account_payable.id\n journal_ids = journal_obj.search(cr, uid, [('type', '=','purchase'),('company_id', '=', order.company_id.id)], limit=1)\n if not journal_ids:\n raise osv.except_osv(_('Error !'),\n _('There is no purchase journal defined for this company: \"%s\" (id:%d)') % (order.company_id.name, order.company_id.id))\n\n # generate invoice line correspond to PO line and link that to created invoice (inv_id) and PO line\n inv_lines = []\n for po_line in order.order_line:\n if po_line.product_id:\n acc_id = po_line.product_id.product_tmpl_id.property_account_expense.id\n if not acc_id:\n acc_id = po_line.product_id.categ_id.property_account_expense_categ.id\n if not acc_id:\n raise osv.except_osv(_('Error !'), _('There is no expense account defined for this product: \"%s\" (id:%d)') % (po_line.product_id.name, po_line.product_id.id,))\n else:\n acc_id = property_obj.get(cr, uid, 'property_account_expense_categ', 'product.category').id\n fpos = order.fiscal_position or False\n acc_id = fiscal_obj.map_account(cr, uid, fpos, acc_id)\n\n inv_line_data = self._prepare_inv_line(cr, uid, acc_id, po_line, context=context)\n inv_line_id = inv_line_obj.create(cr, uid, inv_line_data, context=context)\n inv_lines.append(inv_line_id)\n\n po_line.write({'invoiced':True, 'invoice_lines': [(4, inv_line_id)]}, context=context)\n\n # get invoice data and create invoice\n inv_data = {\n 'name': order.partner_ref or order.name,\n 'reference': order.partner_ref or order.name,\n 'account_id': pay_acc_id,\n 'type': 'in_invoice',\n 'partner_id': order.partner_id.id,\n 'currency_id': order.pricelist_id.currency_id.id,\n 'address_invoice_id': order.partner_address_id.id,\n 'address_contact_id': order.partner_address_id.id,\n 'journal_id': len(journal_ids) and journal_ids[0] or False,\n 'invoice_line': [(6, 0, inv_lines)], \n 'origin': order.name,\n 'fiscal_position': order.fiscal_position.id or order.partner_id.property_account_position.id,\n 'payment_term': order.partner_id.property_payment_term and order.partner_id.property_payment_term.id or False,\n 'company_id': order.company_id.id,\n 'add_disc': order.add_disc or 0.0\n }\n inv_id = inv_obj.create(cr, uid, inv_data, context=context)\n\n # compute the invoice\n inv_obj.button_compute(cr, uid, [inv_id], context=context, set_total=True)\n\n # Link this new invoice to related purchase order\n order.write({'invoice_ids': [(4, inv_id)]}, context=context)\n res = inv_id\n return res", "def _prepare_invoice(self, cr, uid, order, lines, context=None):\n if context is None:\n context = {}\n journal_id = self.pool['account.invoice'].default_get(cr, uid, ['journal_id'], context=context)['journal_id']\n if not journal_id:\n raise osv.except_osv(_('Error!'),\n _('Please define sales journal for this company: \"%s\" (id:%d).') % (order.company_id.name, order.company_id.id))\n invoice_vals = {\n 'name': order.client_order_ref or '',\n 'origin': order.name,\n 'type': 'out_invoice',\n 'reference': order.client_order_ref or order.name,\n 'account_id': order.partner_invoice_id.property_account_receivable.id,\n 'partner_id': order.partner_invoice_id.id,\n 'journal_id': journal_id,\n 'invoice_line': [(6, 0, lines)],\n 'currency_id': order.pricelist_id.currency_id.id,\n 'comment': order.note,\n 'payment_term': order.payment_term and order.payment_term.id or False,\n 'fiscal_position': order.fiscal_position.id or order.partner_invoice_id.property_account_position.id,\n 'date_invoice': context.get('date_invoice', False),\n 'company_id': order.company_id.id,\n 'user_id': order.user_id and order.user_id.id or False,\n 'section_id' : order.section_id.id,\n 'test_1' :order.test\n }\n\n # Care for deprecated _inv_get() hook - FIXME: to be removed after 6.1\n invoice_vals.update(self._inv_get(cr, uid, order, context=context))\n return invoice_vals", "def action_move_create(self, cr, uid, ids, context=None):\n ait_obj = self.pool.get('account.invoice.tax')\n cur_obj = self.pool.get('res.currency')\n period_obj = self.pool.get('account.period')\n payment_term_obj = self.pool.get('account.payment.term')\n journal_obj = self.pool.get('account.journal')\n move_obj = self.pool.get('account.move')\n if context is None:\n context = {}\n for inv in self.browse(cr, uid, ids, context=context):\n if not inv.journal_id:\n raise orm.except_orm(_('Error!'),\n _('Journal not defined for this invoice!'))\n if not inv.journal_id.iva_registry_id:\n raise orm.except_orm(_('Error!'),\n _('You must link %s with a VAT registry!') % (inv.journal_id.name))\n if not inv.journal_id.sequence_id:\n raise orm.except_orm(_('Error!'),\n _('Please define sequence on the journal related to this invoice.')) \n if not inv.invoice_line:\n raise orm.except_orm(_('No Invoice Lines!'),\n _('Please create some invoice lines.'))\n if inv.move_id:\n continue\n\n ctx = context.copy()\n ctx.update({'lang': inv.partner_id.lang})\n if not inv.date_invoice:\n self.write(cr, uid, [inv.id],\n {'date_invoice': fields.date.context_today(self,\n cr,\n uid,\n context=context)},\n context=ctx)\n company_currency = self.pool['res.company'].browse(cr, uid,\n inv.company_id.id).currency_id.id\n # create the analytical lines\n # one move line per invoice line\n # iml = self._get_analytic_lines(cr, uid, inv.id, context=ctx)\n iml = super(account_invoice_makeover, self)._get_analytic_lines(cr, uid, inv.id, context=ctx)\n # check if taxes are all computed\n compute_taxes = ait_obj.compute(cr, uid, inv.id, context=ctx)\n # self.check_tax_lines(cr, uid, inv, compute_taxes, ait_obj)\n super(account_invoice_makeover, self).check_tax_lines(cr, uid, inv, compute_taxes, ait_obj)\n\n # I disabled the check_total feature\n group_check_total_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'account', 'group_supplier_inv_check_total')[1]\n group_check_total = self.pool.get('res.groups').browse(cr, uid,\n group_check_total_id,\n context=context)\n if group_check_total and uid in [x.id for x in group_check_total.users]:\n if (inv.type in ('in_invoice', 'in_refund') and abs(inv.check_total - inv.amount_total) >= (inv.currency_id.rounding / 2.0)):\n raise orm.except_orm(_('Bad Total!'), _('Please verify the price of the invoice!\\nThe encoded total does not match the computed total.'))\n\n if inv.payment_term:\n total_fixed = total_percent = 0\n for line in inv.payment_term.line_ids:\n if line.value == 'fixed':\n total_fixed += line.value_amount\n if line.value == 'procent':\n total_percent += line.value_amount\n total_fixed = (total_fixed * 100) / (inv.amount_total or 1.0)\n if (total_fixed + total_percent) > 100:\n raise orm.except_orm(_('Error!'), _(\"Cannot create the invoice.\\nThe related payment term is probably misconfigured as it gives a computed amount greater than the total invoiced amount. In order to avoid rounding issues, the latest line of your payment term must be of type 'balance'.\"))\n\n # one move line per tax line\n iml += ait_obj.move_line_get(cr, uid, inv.id)\n\n# entry_type = ''\n if inv.type in ('in_invoice', 'in_refund'):\n ref = inv.reference\n# entry_type = 'journal_pur_voucher'\n# if inv.type == 'in_refund':\n# entry_type = 'cont_voucher'\n else:\n # ref = self._convert_ref(cr, uid, inv.number)\n ref = super(account_invoice_makeover, self)._convert_ref(cr, uid, inv.number)\n# entry_type = 'journal_sale_vou'\n# if inv.type == 'out_refund':\n# entry_type = 'cont_voucher'\n\n diff_currency_p = inv.currency_id.id <> company_currency\n # create one move line for the total and possibly adjust the other lines amount\n total = 0\n total_currency = 0\n # total, total_currency, iml = self.compute_invoice_totals(cr, uid, inv, company_currency, ref, iml, context=ctx)\n total, total_currency, iml = super(account_invoice_makeover, self).compute_invoice_totals(cr, uid, inv, company_currency, ref, iml, context=ctx)\n acc_id = inv.account_id.id\n\n name = inv['name'] or inv['supplier_invoice_number'] or '/'\n totlines = False\n if inv.payment_term:\n totlines = payment_term_obj.compute(cr,\n uid, inv.payment_term.id, total, inv.date_invoice or False, context=ctx)\n if totlines:\n res_amount_currency = total_currency\n i = 0\n ctx.update({'date': inv.date_invoice})\n for t_line in totlines:\n if inv.currency_id.id != company_currency:\n amount_currency = cur_obj.compute(cr, uid, company_currency, inv.currency_id.id, t_line[1], context=ctx)\n else:\n amount_currency = False\n\n # last line add the diff\n res_amount_currency -= amount_currency or 0\n i += 1\n if i == len(totlines):\n amount_currency += res_amount_currency\n\n iml.append({\n 'type': 'dest',\n 'name': name,\n 'price': t_line[1],\n 'account_id': acc_id,\n 'date_maturity': t_line[0],\n 'amount_currency': diff_currency_p \\\n and amount_currency or False,\n 'currency_id': diff_currency_p \\\n and inv.currency_id.id or False,\n 'ref': ref,\n 'payment_type': t_line[2]\n })\n else:\n iml.append({\n 'type': 'dest',\n 'name': name,\n 'price': total,\n 'account_id': acc_id,\n 'date_maturity': inv.date_due or False,\n 'amount_currency': diff_currency_p \\\n and total_currency or False,\n 'currency_id': diff_currency_p \\\n and inv.currency_id.id or False,\n 'ref': ref,\n 'payment_type': None\n })\n\n date = inv.date_invoice or time.strftime('%Y-%m-%d')\n\n part = self.pool.get(\"res.partner\")._find_accounting_partner(inv.partner_id)\n\n line = map(lambda x:(0, 0, self.line_get_convert(cr, uid, x, part.id, date, context=ctx)), iml)\n\n # line = self.group_lines(cr, uid, iml, line, inv)\n line = super(account_invoice_makeover, self).group_lines(cr, uid, iml, line, inv)\n\n journal_id = inv.journal_id.id\n journal = journal_obj.browse(cr, uid, journal_id, context=ctx)\n if journal.centralisation:\n raise orm.except_orm(_('User Error!'),\n _('You cannot create an invoice on a centralized journal. Uncheck the centralized counterpart box in the related journal from the configuration menu.'))\n\n line = self.finalize_invoice_move_lines(cr, uid, inv, line)\n\n move = {\n 'ref': inv.reference and inv.reference or inv.name,\n 'line_id': line,\n 'journal_id': journal_id,\n 'date': date,\n 'narration': inv.comment,\n 'company_id': inv.company_id.id,\n }\n period_id = inv.period_id and inv.period_id.id or False\n ctx.update(company_id=inv.company_id.id,\n account_period_prefer_normal=True)\n if not period_id:\n period_ids = period_obj.find(cr, uid, inv.registration_date, context=ctx)\n period_id = period_ids and period_ids[0] or False\n if period_id:\n move['period_id'] = period_id\n for i in line:\n i[2]['period_id'] = period_id\n\n ctx.update(invoice=inv)\n move_id = move_obj.create(cr, uid, move, context=ctx)\n new_move_name = move_obj.browse(cr, uid, move_id, context=ctx).name\n # make the invoice point to that move\n self.write(cr, uid, [inv.id], {'move_id': move_id, 'period_id':period_id, 'move_name':new_move_name}, context=ctx)\n # Pass invoice in context in method post: used if you want to get the same\n # account move reference when creating the same invoice after a cancelled one:\n move_obj.post(cr, uid, [move_id], context=ctx)\n # self._log_event(cr, uid, ids)\n super(account_invoice_makeover, self)._log_event(cr, uid, ids)\n return True", "def action_move_create(self, cr, uid, ids, context=None):\n ait_obj = self.pool.get('account.invoice.tax')\n cur_obj = self.pool.get('res.currency')\n period_obj = self.pool.get('account.period')\n payment_term_obj = self.pool.get('account.payment.term')\n journal_obj = self.pool.get('account.journal')\n move_obj = self.pool.get('account.move')\n if context is None:\n context = {}\n for inv in self.browse(cr, uid, ids, context=context):\n if not inv.journal_id.sequence_id:\n raise osv.except_osv(_('Error!'), _('Please define sequence on the journal related to this invoice.'))\n if not inv.invoice_line:\n raise osv.except_osv(_('No Invoice Lines!'), _('Please create some invoice lines.'))\n if inv.move_id:\n continue\n\n ctx = context.copy()\n ctx.update({'lang': inv.partner_id.lang})\n if not inv.date_invoice:\n self.write(cr, uid, [inv.id], {'date_invoice': fields.date.context_today(self,cr,uid,context=context)}, context=ctx)\n company_currency = self.pool['res.company'].browse(cr, uid, inv.company_id.id).currency_id.id\n # create the analytical lines\n # one move line per invoice line\n iml = self._get_analytic_lines(cr, uid, inv.id, context=ctx)\n # check if taxes are all computed\n compute_taxes = ait_obj.compute(cr, uid, inv.id, context=ctx)\n self.check_tax_lines(cr, uid, inv, compute_taxes, ait_obj)\n\n # I disabled the check_total feature\n group_check_total_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'account', 'group_supplier_inv_check_total')[1]\n group_check_total = self.pool.get('res.groups').browse(cr, uid, group_check_total_id, context=context)\n if group_check_total and uid in [x.id for x in group_check_total.users]:\n if (inv.type in ('in_invoice', 'in_refund') and abs(inv.check_total - inv.amount_total) >= (inv.currency_id.rounding/2.0)):\n raise osv.except_osv(_('Bad Total!'), _('Please verify the price of the invoice!\\nThe encoded total does not match the computed total.'))\n\n if inv.payment_term:\n total_fixed = total_percent = 0\n for line in inv.payment_term.line_ids:\n if line.value == 'fixed':\n total_fixed += line.value_amount\n if line.value == 'procent':\n total_percent += line.value_amount\n total_fixed = (total_fixed * 100) / (inv.amount_total or 1.0)\n if (total_fixed + total_percent) > 100:\n raise osv.except_osv(_('Error!'), _(\"Cannot create the invoice.\\nThe related payment term is probably misconfigured as it gives a computed amount greater than the total invoiced amount. In order to avoid rounding issues, the latest line of your payment term must be of type 'balance'.\"))\n\n # one move line per tax line\n iml += ait_obj.move_line_get(cr, uid, inv.id)\n\n entry_type = ''\n if inv.type in ('in_invoice', 'in_refund'):\n ref = inv.reference\n entry_type = 'journal_pur_voucher'\n if inv.type == 'in_refund':\n entry_type = 'cont_voucher'\n else:\n ref = self._convert_ref(cr, uid, inv.number)\n entry_type = 'journal_sale_vou'\n if inv.type == 'out_refund':\n entry_type = 'cont_voucher'\n\n diff_currency_p = inv.currency_id.id <> company_currency\n # create one move line for the total and possibly adjust the other lines amount\n total = 0\n total_currency = 0\n total, total_currency, iml = self.compute_invoice_totals(cr, uid, inv, company_currency, ref, iml, context=ctx)\n acc_id = inv.account_id.id\n\n name = inv['name'] or inv['supplier_invoice_number'] or '/'\n totlines = False\n # kittiu\n #if inv.payment_term:\n if inv.payment_term and not inv.date_due:\n # --\n totlines = payment_term_obj.compute(cr,\n uid, inv.payment_term.id, total, inv.date_invoice or False, context=ctx)\n if totlines:\n res_amount_currency = total_currency\n i = 0\n ctx.update({'date': inv.date_invoice})\n for t in totlines:\n if inv.currency_id.id != company_currency:\n amount_currency = cur_obj.compute(cr, uid, company_currency, inv.currency_id.id, t[1], context=ctx)\n else:\n amount_currency = False\n\n # last line add the diff\n res_amount_currency -= amount_currency or 0\n i += 1\n if i == len(totlines):\n amount_currency += res_amount_currency\n\n iml.append({\n 'type': 'dest',\n 'name': name,\n 'price': t[1],\n 'account_id': acc_id,\n 'date_maturity': t[0],\n 'amount_currency': diff_currency_p \\\n and amount_currency or False,\n 'currency_id': diff_currency_p \\\n and inv.currency_id.id or False,\n 'ref': ref,\n })\n else:\n iml.append({\n 'type': 'dest',\n 'name': name,\n 'price': total,\n 'account_id': acc_id,\n 'date_maturity': inv.date_due or False,\n 'amount_currency': diff_currency_p \\\n and total_currency or False,\n 'currency_id': diff_currency_p \\\n and inv.currency_id.id or False,\n 'ref': ref\n })\n\n date = inv.date_invoice or time.strftime('%Y-%m-%d')\n\n part = self.pool.get(\"res.partner\")._find_accounting_partner(inv.partner_id)\n\n line = map(lambda x:(0,0,self.line_get_convert(cr, uid, x, part.id, date, context=ctx)),iml)\n\n line = self.group_lines(cr, uid, iml, line, inv)\n\n journal_id = inv.journal_id.id\n journal = journal_obj.browse(cr, uid, journal_id, context=ctx)\n if journal.centralisation:\n raise osv.except_osv(_('User Error!'),\n _('You cannot create an invoice on a centralized journal. Uncheck the centralized counterpart box in the related journal from the configuration menu.'))\n\n line = self.finalize_invoice_move_lines(cr, uid, inv, line)\n\n move = {\n 'ref': inv.reference and inv.reference or inv.name,\n 'line_id': line,\n 'journal_id': journal_id,\n 'date': date,\n 'narration': inv.comment,\n 'company_id': inv.company_id.id,\n }\n period_id = inv.period_id and inv.period_id.id or False\n ctx.update(company_id=inv.company_id.id,\n account_period_prefer_normal=True)\n if not period_id:\n period_ids = period_obj.find(cr, uid, inv.date_invoice, context=ctx)\n period_id = period_ids and period_ids[0] or False\n if period_id:\n move['period_id'] = period_id\n for i in line:\n i[2]['period_id'] = period_id\n\n ctx.update(invoice=inv)\n move_id = move_obj.create(cr, uid, move, context=ctx)\n new_move_name = move_obj.browse(cr, uid, move_id, context=ctx).name\n # make the invoice point to that move\n self.write(cr, uid, [inv.id], {'move_id': move_id,'period_id':period_id, 'move_name':new_move_name}, context=ctx)\n # Pass invoice in context in method post: used if you want to get the same\n # account move reference when creating the same invoice after a cancelled one:\n move_obj.post(cr, uid, [move_id], context=ctx)\n self._log_event(cr, uid, ids)\n return True", "def action_invoice_create(self, grouped=False, final=False):\n if self.invoice_option == 'before_delivery':\n inv_obj = self.env['account.invoice']\n for order in self:\n inv_data = order._prepare_invoice()\n invoice = inv_obj.create(inv_data)\n for inv_line in order.order_line:\n inv_line.invoice_line_create(invoice.id, inv_line.product_uom_qty)\n\n else:\n inv_obj = self.env['account.invoice']\n precision = self.env['decimal.precision'].precision_get('Product Unit of Measure')\n invoices = {}\n references = {}\n invoices_origin = {}\n invoices_name = {}\n\n # Keep track of the sequences of the lines\n # To keep lines under their section\n inv_line_sequence = 0\n for order in self:\n group_key = order.id if grouped else (order.partner_invoice_id.id, order.currency_id.id)\n\n # We only want to create sections that have at least one invoiceable line\n pending_section = None\n\n # Create lines in batch to avoid performance problems\n line_vals_list = []\n # sequence is the natural order of order_lines\n for line in order.order_line:\n if line.display_type == 'line_section':\n pending_section = line\n continue\n if float_is_zero(line.qty_to_invoice, precision_digits=precision):\n continue\n if group_key not in invoices:\n inv_data = order._prepare_invoice()\n invoice = inv_obj.create(inv_data)\n references[invoice] = order\n invoices[group_key] = invoice\n invoices_origin[group_key] = [invoice.origin]\n invoices_name[group_key] = [invoice.name]\n elif group_key in invoices:\n if order.name not in invoices_origin[group_key]:\n invoices_origin[group_key].append(order.name)\n if order.client_order_ref and order.client_order_ref not in invoices_name[group_key]:\n invoices_name[group_key].append(order.client_order_ref)\n\n if line.qty_to_invoice > 0 or (line.qty_to_invoice < 0 and final):\n if pending_section:\n section_invoice = pending_section.invoice_line_create_vals(\n invoices[group_key].id,\n pending_section.qty_to_invoice\n )\n inv_line_sequence += 1\n section_invoice[0]['sequence'] = inv_line_sequence\n line_vals_list.extend(section_invoice)\n pending_section = None\n\n inv_line_sequence += 1\n inv_line = line.invoice_line_create_vals(\n invoices[group_key].id, line.qty_to_invoice\n )\n inv_line[0]['sequence'] = inv_line_sequence\n line_vals_list.extend(inv_line)\n\n if references.get(invoices.get(group_key)):\n if order not in references[invoices[group_key]]:\n references[invoices[group_key]] |= order\n\n self.env['account.invoice.line'].create(line_vals_list)\n\n for group_key in invoices:\n invoices[group_key].write({'name': ', '.join(invoices_name[group_key]),\n 'origin': ', '.join(invoices_origin[group_key])})\n sale_orders = references[invoices[group_key]]\n if len(sale_orders) == 1:\n invoices[group_key].reference = sale_orders.reference\n\n if not invoices:\n raise UserError(_(\n 'There is no invoiceable line. If a product has a Delivered quantities invoicing policy, please make sure that a quantity has been delivered.'))\n\n for invoice in invoices.values():\n invoice.compute_taxes()\n if not invoice.invoice_line_ids:\n raise UserError(_(\n 'There is no invoiceable line. If a product has a Delivered quantities invoicing policy, please make sure that a quantity has been delivered.'))\n # If invoice is negative, do a refund invoice instead\n if invoice.amount_total < 0:\n invoice.type = 'out_refund'\n for line in invoice.invoice_line_ids:\n line.quantity = -line.quantity\n # Use additional field helper function (for account extensions)\n for line in invoice.invoice_line_ids:\n line._set_additional_fields(invoice)\n # Necessary to force computation of taxes. In account_invoice, they are triggered\n # by onchanges, which are not triggered when doing a create.\n invoice.compute_taxes()\n # Idem for partner\n so_payment_term_id = invoice.payment_term_id.id\n fp_invoice = invoice.fiscal_position_id\n invoice._onchange_partner_id()\n invoice.fiscal_position_id = fp_invoice\n # To keep the payment terms set on the SO\n invoice.payment_term_id = so_payment_term_id\n invoice.message_post_with_view('mail.message_origin_link',\n values={'self': invoice, 'origin': references[invoice]},\n subtype_id=self.env.ref('mail.mt_note').id)\n return [inv.id for inv in invoices.values()]", "def action_create_invoices(self, data):\n invoice_obj = self.env['account.invoice']\n values = {}\n for val in data:\n values.setdefault(val['invoice_type'], {\n 'order': val.get('sale', val.get('purchase')),\n 'values': []\n })\n values[val['invoice_type']]['values'].append((0, 0, val['values']))\n\n for inv_type, inv_data in values.items():\n invoice = invoice_obj.new(self._prepare_invoice(inv_type))\n invoice._onchange_partner_id()\n inv = invoice._convert_to_write({\n name: invoice[name] for name in invoice._cache\n })\n for _, _, line in inv_data['values']:\n line['account_id'] = inv['account_id']\n inv['invoice_line_ids'] = inv_data['values']\n new_invoice = invoice_obj.sudo().create(inv)\n new_invoice.action_invoice_open()\n inv_data['order'].write({\n 'exchange_invoice_ids': [(4, new_invoice.id)]\n })", "def action_invoice_create(self, grouped=False, final=False):\n inv_obj = self.env['account.invoice']\n precision = self.env['decimal.precision'].sudo().precision_get('Product Unit of Measure')\n invoices = {}\n references = {}\n for order in self:\n group_key = order.id if grouped else (order.partner_invoice_id.id, order.currency_id.id)\n for line in order.order_line.sorted(key=lambda l: l.qty_to_invoice < 0):\n if float_is_zero(line.qty_to_invoice, precision_digits=precision):\n continue\n if group_key not in invoices:\n inv_data = order._prepare_invoice()\n invoice = inv_obj.sudo().create(inv_data)\n references[invoice] = order\n invoices[group_key] = invoice\n invoice['sale_order_id'] = order.id\n elif group_key in invoices:\n vals = {}\n if order.name not in invoices[group_key].origin.split(', '):\n vals['origin'] = invoices[group_key].origin + ', ' + order.name\n if order.client_order_ref and order.client_order_ref not in invoices[group_key].name.split(\n ', ') and order.client_order_ref != invoices[group_key].name:\n vals['name'] = invoices[group_key].name + ', ' + order.client_order_ref\n invoices[group_key].sudo().write(vals)\n if line.qty_to_invoice > 0:\n line.invoice_line_create(invoices[group_key].id, line.qty_to_invoice)\n elif line.qty_to_invoice < 0 and final:\n line.invoice_line_create(invoices[group_key].id, line.qty_to_invoice)\n\n if references.get(invoices.get(group_key)):\n if order not in references[invoices[group_key]]:\n references[invoices[group_key]] |= order\n if not invoices:\n raise UserError(_('There is no invoiceable line.'))\n for invoice in invoices.values():\n if not invoice.invoice_line_ids:\n raise UserError(_('There is no invoiceable line.'))\n # If invoice is negative, do a refund invoice instead\n if invoice.amount_untaxed < 0:\n invoice.type = 'out_refund'\n for line in invoice.invoice_line_ids:\n line.quantity = -line.quantity\n # Use additional field helper function (for account extensions)\n for line in invoice.invoice_line_ids:\n line._set_additional_fields(invoice)\n # Necessary to force computation of taxes. In account_invoice, they are triggered\n # by onchanges, which are not triggered when doing a create.\n invoice.compute_taxes()\n invoice.message_post_with_view('mail.message_origin_link',\n values={'self': invoice, 'origin': references[invoice]},\n subtype_id=self.env.ref('mail.mt_note').id)\n return [inv.id for inv in invoices.values()]", "def merge_purchase_invoice(self):\r\n active_id = self.env['purchase.order'].browse(self.env['purchase.order']._context.get('active_ids'))\r\n journal_id = self.env['account.journal'].search([('type', '=', 'purchase')]) \r\n active_id_count = 0\r\n active_count = 0\r\n exist_vendor = []; invoice = [];exist_vendors = [];ctx = ();invoice_id = []\r\n for rec in active_id : \r\n po_reference = self.env['account.invoice'].search([('origin', 'like', rec.name)])\r\n active_count = len(active_id)\r\n if rec.picking_count >= 1 and rec.picking_count != rec.invoice_count:\r\n len_name = [] \r\n for inv in po_reference: \r\n len_name = inv.origin.split(\":\") \r\n if rec.name in len_name:\r\n if po_reference.state == 'draft':\r\n for record in po_reference.invoice_line_ids:\r\n print (record.line_id)\r\n for res in rec.order_line:\r\n if res.id == record.line_id: \r\n record.write({'quantity':res.qty_received})\r\n res.write({'qty_invoiced':record.quantity})\r\n \r\n else:\r\n \r\n po_list = [];line_values = {};lines = {};purchase = []\r\n if rec.state in 'purchase' and rec.invoice_status in 'to invoice':\r\n purchase.append(rec.id)\r\n active_id_count = len(purchase)\r\n if rec.partner_id.id in exist_vendor:\r\n for inv in invoice:\r\n if inv['partner_id'] == rec.partner_id.id:\r\n for recc in rec.order_line:\r\n if rec.picking_count > 1 and rec.invoice_count >= 1:\r\n qty_received = recc.qty_received - recc.qty_invoiced \r\n else:\r\n qty_received = recc.qty_received \r\n line_values = (0, 0, {'product_id': recc.product_id.id,\r\n 'quantity': qty_received ,\r\n 'price_unit': recc.price_unit,\r\n 'invoice_line_tax_ids': [(6, 0, recc.taxes_id and recc.taxes_id.ids) or False] ,\r\n 'price_subtotal': recc.price_subtotal,\r\n 'product_uom': recc.product_uom.id,\r\n 'name': recc.name,\r\n 'account_id': journal_id.default_debit_account_id.id ,\r\n 'line_id':recc.id\r\n }) \r\n inv['invoice_line_ids'].append(line_values)\r\n inv['origin'] = inv['origin'] + ':' + rec.name\r\n if rec.partner_id.id not in exist_vendor:\r\n exist_vendors.append(rec.partner_id.id) \r\n else: \r\n for recc in rec.order_line:\r\n if rec.picking_count > 1 and rec.invoice_count >= 1:\r\n qty_received = recc.qty_received - recc.qty_invoiced \r\n else:\r\n qty_received = recc.qty_received\r\n line_values = (0, 0, {'product_id': recc.product_id.id,\r\n 'quantity': qty_received,\r\n 'price_unit': recc.price_unit,\r\n 'invoice_line_tax_ids': [(6, 0, recc.taxes_id and recc.taxes_id.ids)or False],\r\n 'price_subtotal': recc.price_subtotal,\r\n 'product_uom': recc.product_uom.id,\r\n 'name': recc.name,\r\n 'account_id': journal_id.default_debit_account_id.id,\r\n 'line_id':recc.id\r\n }) \r\n print (rec.id)\r\n po_list.append(line_values) \r\n invoice.append({'origin':rec.name, 'partner_id': rec.partner_id.id, 'invoice_line_ids':po_list, 'account_id': rec.partner_id.property_account_payable_id.id, 'type': 'in_invoice', 'journal_id':journal_id.id,'date_invoice':datetime.today()}) \r\n if rec.partner_id.id not in exist_vendor:\r\n exist_vendor.append(rec.partner_id.id) \r\n \r\n else:\r\n po_list = [];line_values = {};lines = {};purchase = []\r\n if rec.state in 'purchase' and rec.invoice_status in 'to invoice':\r\n purchase.append(rec.id)\r\n active_id_count = len(purchase)\r\n if rec.partner_id.id in exist_vendor:\r\n for inv in invoice:\r\n if inv['partner_id'] == rec.partner_id.id:\r\n for recc in rec.order_line:\r\n if rec.picking_count > 1 and rec.invoice_count >= 1:\r\n qty_received = recc.qty_received - recc.qty_invoiced \r\n else:\r\n qty_received = recc.qty_received\r\n line_values = (0, 0, {'product_id': recc.product_id.id,\r\n 'quantity': qty_received ,\r\n 'price_unit': recc.price_unit,\r\n 'invoice_line_tax_ids': [(6, 0, recc.taxes_id and recc.taxes_id.ids) or False] ,\r\n 'price_subtotal': recc.price_subtotal,\r\n 'product_uom': recc.product_uom.id,\r\n 'name': recc.name,\r\n 'account_id': journal_id.default_debit_account_id.id ,\r\n 'line_id':recc.id\r\n }) \r\n inv['invoice_line_ids'].append(line_values)\r\n inv['origin'] = inv['origin'] + ':' + rec.name\r\n if rec.partner_id.id not in exist_vendor:\r\n exist_vendors.append(rec.partner_id.id) \r\n else: \r\n for recc in rec.order_line:\r\n if rec.picking_count > 1 and rec.invoice_count >= 1:\r\n qty_received = recc.qty_received - recc.qty_invoiced \r\n else:\r\n qty_received = recc.qty_received\r\n line_values = (0, 0, {'product_id': recc.product_id.id,\r\n 'quantity': qty_received,\r\n 'price_unit': recc.price_unit,\r\n 'invoice_line_tax_ids': [(6, 0, recc.taxes_id and recc.taxes_id.ids)or False],\r\n 'price_subtotal': recc.price_subtotal,\r\n 'product_uom': recc.product_uom.id,\r\n 'name': recc.name,\r\n 'account_id': journal_id.default_debit_account_id.id,\r\n 'line_id':recc.id\r\n }) \r\n print (rec.id)\r\n po_list.append(line_values) \r\n invoice.append({'origin':rec.name, 'partner_id': rec.partner_id.id, 'invoice_line_ids':po_list, 'account_id': rec.partner_id.property_account_payable_id.id, 'type': 'in_invoice', 'journal_id':journal_id.id,'date_invoice':date.today()}) \r\n if rec.partner_id.id not in exist_vendor:\r\n exist_vendor.append(rec.partner_id.id) \r\n \r\n invoices = []\r\n invoice_counts = 0\r\n for record in invoice:\r\n invoice_id = self.env['account.invoice'].create(record)\r\n invoices.append(invoice_id.id)\r\n invoice_counts = len(invoices)\r\n if active_id_count == 1:\r\n if invoice_counts == 1:\r\n form_view = self.env.ref('purchase.view_invoice_supplier_purchase_form').id\r\n tree_view = self.env.ref('account.invoice_tree').id \r\n return{\r\n 'name': _('Invoice'),\r\n 'type':'ir.actions.act_window',\r\n 'view_type':'form',\r\n 'view_mode':'form,tree',\r\n 'res_model':'account.invoice',\r\n 'res_id':invoices[0],\r\n 'views_id':False,\r\n 'views':[(form_view , 'form'), (tree_view , 'tree')],\r\n 'domain':[('id', 'in', invoices)],\r\n 'target': 'current',\r\n } \r\n else: \r\n form_view = self.env.ref('account.invoice_supplier_form').id\r\n tree_view = self.env.ref('account.invoice_supplier_tree').id \r\n return{\r\n 'name': _('Invoice'),\r\n 'type':'ir.actions.act_window',\r\n 'view_type':'form',\r\n 'view_mode':'form,tree',\r\n 'res_model':'account.invoice',\r\n 'views_id':True,\r\n 'views':[(tree_view , 'tree'), (form_view , 'form')],\r\n 'domain':[('id', 'in', invoices)],\r\n 'target': 'current',\r\n }", "def action_invoice_create(self, grouped=False, final=False):\n inv_obj = self.env['account.invoice']\n precision = self.env['decimal.precision'].precision_get('Product Unit of Measure')\n invoices = {}\n references = {}\n invoices_origin = {}\n invoices_name = {}\n for order in self:\n group_key = order.id if grouped else (order.partner_id.id, order.currency_id.id)\n for line in order.order_line.sorted(key=lambda l: l.qty_received - l.qty_invoiced < 0):\n if float_is_zero(line.qty_received - line.qty_invoiced, precision_digits=precision):\n continue\n if group_key not in invoices:\n inv_data = order._prepare_invoice()\n invoice = inv_obj.create(inv_data)\n references[invoice] = order\n invoices[group_key] = invoice\n invoices_origin[group_key] = [invoice.origin]\n invoices_name[group_key] = [invoice.name]\n elif group_key in invoices:\n if order.name not in invoices_origin[group_key]:\n invoices_origin[group_key].append(order.name)\n if order.partner_ref and order.partner_ref not in invoices_name[group_key]:\n invoices_name[group_key].append(order.partner_ref)\n\n if line.qty_received - line.qty_invoiced > 0:\n line.invoice_line_create(invoices[group_key].id, line.qty_received - line.qty_invoiced)\n elif line.qty_received - line.qty_invoiced < 0 and final:\n line.invoice_line_create(invoices[group_key].id, line.qty_received - line.qty_invoiced)\n\n if references.get(invoices.get(group_key)):\n if order not in references[invoices[group_key]]:\n references[invoices[group_key]] |= order\n\n for group_key in invoices:\n invoices[group_key].write({'name': ', '.join(invoices_name[group_key]),\n 'origin': ', '.join(invoices_origin[group_key])})\n\n if not invoices:\n raise UserError(_('There is no invoicable line.'))\n\n for invoice in invoices.values():\n if not invoice.invoice_line_ids:\n raise UserError(_('There is no invoicable line.'))\n # If invoice is negative, do a refund invoice instead\n if invoice.amount_total < 0:\n invoice.type = 'in_refund'\n for line in invoice.invoice_line_ids:\n line.quantity = -line.quantity\n # Necessary to force computation of taxes. In account_invoice, they are triggered\n # by onchanges, which are not triggered when doing a create.\n invoice.compute_taxes()\n invoice.message_post_with_view('mail.message_origin_link',\n values={'self': invoice, 'origin': references[invoice]},\n subtype_id=self.env.ref('mail.mt_note').id)\n return [inv.id for inv in invoices.values()]", "def create_landlord_invoice(self):\n if self.tenancy_id.is_landlord_rent:\n account_jrnl_obj = self.env['account.journal'].search(\n [('type', '=', 'purchase')], limit=1)\n inv_lines_values = {\n # 'origin': 'tenancy.rent.schedule',\n 'name': 'Rent Cost for' + self.tenancy_id.name,\n 'quantity': 1,\n 'price_unit': self.amount or 0.00,\n 'account_id':\n self.tenancy_id.property_id.account_depreciation_expense_id.id or False,\n 'analytic_account_id': self.tenancy_id.id or False,\n }\n owner_rec = self.tenancy_id.property_owner_id\n invo_values = {\n 'partner_id': self.tenancy_id.property_owner_id.id or False,\n 'type': 'in_invoice',\n 'invoice_line_ids': [(0, 0, inv_lines_values)],\n 'property_id': self.tenancy_id.property_id.id or False,\n 'invoice_date': self.start_date or False,\n # 'account_id': owner_rec.property_account_payable_id.id,\n # 'schedule_id': self.id,\n 'new_tenancy_id': self.tenancy_id.id,\n 'journal_id': account_jrnl_obj.id or False\n }\n\n acc_id = self.env['account.move'].with_context({'default_type': 'in_invoice'}).create(invo_values)\n self.write({'invc_id': acc_id.id, 'inv': True})\n wiz_form_id = self.env['ir.model.data'].get_object_reference(\n 'account', 'view_move_form')[1]\n return {\n 'view_type': 'form',\n 'view_id': wiz_form_id,\n 'view_mode': 'form',\n 'res_model': 'account.move',\n 'res_id': self.invc_id.id,\n 'type': 'ir.actions.act_window',\n 'target': 'current',\n 'context': self._context,\n }", "def action_invoice_create(self, cr, uid, ids, grouped=False, states=None, date_invoice=False, context=None):\n order = self.browse(cr, uid, ids[0], context=context)\n inv_obj = self.pool.get('account.invoice')\n # create the invoice\n inv_id = super(sale_order, self).action_invoice_create(cr, uid, ids, grouped, states, date_invoice, context=context)\n # modify the invoice\n inv_obj.write(cr, uid, [inv_id], {'past_doc': order.past_doc})\n return inv_id", "def _prepare_invoice_lines(self, exchange_line, order_line):\n invoice_type = {\n 'sale.order.line': {\n 'higher': 'out_invoice', 'lower': 'out_refund',\n 'type': 'sale', 'field': 'exchange_sale_line_id'\n },\n 'purchase.order.line': {\n 'higher': 'in_invoice', 'lower': 'in_refund',\n 'type': 'purchase', 'field': 'exchange_purchase_line_id'\n },\n }\n product = exchange_line.exchange_product_id or exchange_line.product_id\n data = {\n 'invoice_type': False,\n 'values': {\n 'product_id': product.id,\n 'quantity': exchange_line.quantity,\n 'name': 'Exchange for [%s]' % exchange_line.product_id.display_name,\n }\n }\n if exchange_line.exchange_product_id or \\\n exchange_line.price_subtotal > order_line.price_subtotal:\n data['invoice_type'] = invoice_type[order_line._name]['higher']\n elif exchange_line.price_subtotal < order_line.price_subtotal:\n data['invoice_type'] = invoice_type[order_line._name]['lower']\n else:\n return {}\n data[invoice_type[order_line._name]['type']] = order_line.order_id\n data['values'][invoice_type[order_line._name]['field']] = order_line.id\n data['values']['price_unit'] = exchange_line.price_unit\n # TODO i think we should take the different between prices NOT the all price\n # abs(exchange_line.price_unit - order_line.price_unit)\n return data", "def invoice_line_create(self, invoice_id, qty):\n invoice_lines = self.env['account.invoice.line']\n precision = self.env['decimal.precision'].precision_get('Product Unit of Measure')\n for line in self:\n if not float_is_zero(qty, precision_digits=precision):\n vals = line._prepare_invoice_line(qty=qty)\n vals.update({'invoice_id': invoice_id, 'purchase_line_id': line.id})\n invoice_lines |= self.env['account.invoice.line'].create(vals)\n return invoice_lines", "def make_invoices(self):\n for invoice in self.policy.invoices:\n db.session.delete(invoice)\n db.session.commit()\n\n billing_schedules = {'Annual': None, 'Semi-Annual': 3, 'Quarterly': 4, 'Monthly': 12}\n\n invoices = []\n first_invoice = Invoice(self.policy.id,\n self.policy.effective_date, # bill_date\n self.policy.effective_date + relativedelta(months=1), # due\n self.policy.effective_date + relativedelta(months=1, days=14), # cancel\n self.policy.annual_premium)\n invoices.append(first_invoice)\n\n if self.policy.billing_schedule == \"Annual\":\n pass\n elif self.policy.billing_schedule == \"Two-Pay\":\n first_invoice.amount_due = first_invoice.amount_due / billing_schedules.get(self.policy.billing_schedule)\n for i in range(1, billing_schedules.get(self.policy.billing_schedule)):\n months_after_eff_date = i*6\n bill_date = self.policy.effective_date + relativedelta(months=months_after_eff_date)\n invoice = Invoice(self.policy.id,\n bill_date,\n bill_date + relativedelta(months=1),\n bill_date + relativedelta(months=1, days=14),\n self.policy.annual_premium / billing_schedules.get(self.policy.billing_schedule))\n invoices.append(invoice)\n elif self.policy.billing_schedule == \"Quarterly\":\n first_invoice.amount_due = first_invoice.amount_due / billing_schedules.get(self.policy.billing_schedule)\n for i in range(1, billing_schedules.get(self.policy.billing_schedule)):\n months_after_eff_date = i*3\n bill_date = self.policy.effective_date + relativedelta(months=months_after_eff_date)\n invoice = Invoice(self.policy.id,\n bill_date,\n bill_date + relativedelta(months=1),\n bill_date + relativedelta(months=1, days=14),\n self.policy.annual_premium / billing_schedules.get(self.policy.billing_schedule))\n invoices.append(invoice)\n elif self.policy.billing_schedule == \"Monthly\":\n first_invoice.amount_due = first_invoice.amount_due / billing_schedules.get(self.policy.billing_schedule)\n for i in range(1, billing_schedules.get(self.policy.billing_schedule)):\n months_after_eff_date = i\n bill_date = self.policy.effective_date + relativedelta(months=months_after_eff_date)\n invoice = Invoice(self.policy.id,\n bill_date,\n bill_date + relativedelta(months=1),\n bill_date + relativedelta(months=1, days=14),\n self.policy.annual_premium / billing_schedules.get(self.policy.billing_schedule))\n invoices.append(invoice)\n else:\n print \"You have chosen a bad billing schedule.\"\n\n logger.info(str(len(invoices)) + \" invoices generated for policy %s\" % self.policy.id)\n\n for invoice in invoices:\n db.session.add(invoice)\n db.session.commit()", "def _prepare_invoice(self):\n self.ensure_one()\n journal_id = self.env['account.invoice'].default_get(['journal_id'])['journal_id']\n if not journal_id:\n raise UserError(_('Please define an accounting sales journal for this company.'))\n invoice_vals = {\n 'name': self.client_order_ref or '',\n 'origin': self.name,\n 'type': 'out_invoice',\n 'account_id': self.partner_invoice_id.property_account_receivable_id.id,\n 'partner_id': self.partner_invoice_id.id,\n 'partner_shipping_id': self.partner_shipping_id.id,\n 'journal_id': journal_id,\n 'currency_id': self.pricelist_id.currency_id.id,\n 'comment': self.note,\n 'payment_term_id': self.payment_term_id.id,\n 'fiscal_position_id': self.fiscal_position_id.id or self.partner_invoice_id.property_account_position_id.id,\n 'company_id': self.company_id.id,\n 'user_id': self.user_id and self.user_id.id,\n 'team_id': self.team_id.id,\n 'x_studio_field_rgEdd': self.x_studio_field_icWOZ.id,\n 'x_studio_car_type_1': self.vehicle.id,\n 'x_studio_job_card_1': self.x_studio_agency_job_card,\n 'x_studio_car_type_name': self.vehicle.model_id.name,\n 'x_studio_plate_num': self.vehicle.license_plate,\n 'x_studio_claim_num': self.claim_no,\n\n 'x_studio_is_insured':self.is_insured,\n 'x_studio_service_provider': self.service_advisor.id,\n 'date_invoice': fields.Date.today(),\n 'transaction_ids': [(6, 0, self.transaction_ids.ids)],\n }\n return invoice_vals\n\n # 'x_studio_field_rgEdd':order.x_studio_field_icWOZ.id,", "def action_generate_lines_txt(self):\n rp_obj = self.env['res.partner']\n voucher_obj = self.env['account.wh.iva']\n txt_iva_obj = self.env['txt.iva.line']\n vouchers = []\n txt_brw = self.browse(self._ids)[0]\n txt_ids = txt_iva_obj.search([('txt_id', '=', txt_brw.id)])\n if txt_ids:\n txt_ids.unlink()\n\n if txt_brw.type:\n vouchers = voucher_obj.search([\n ('date_ret', '>=', txt_brw.date_start),\n ('date_ret', '<=', txt_brw.date_end),\n ('period_id', '=', txt_brw.period_id.id),\n ('state', '=', 'done'),\n ('type', 'in', ['in_invoice', 'in_refund'])])\n else:\n vouchers = voucher_obj.search([\n ('date_ret', '>=', txt_brw.date_start),\n ('date_ret', '<=', txt_brw.date_end),\n ('period_id', '=', txt_brw.period_id.id),\n ('state', '=', 'done'),\n ('type', 'in', ['out_invoice', 'out_refund'])])\n\n for voucher in vouchers:\n acc_part_id = rp_obj._find_accounting_partner(voucher.partner_id)\n for voucher_lines in voucher.wh_lines:\n if voucher_lines.invoice_id.state not in ['open', 'paid']:\n continue\n for voucher_tax_line in voucher_lines.tax_line:\n txt_iva_obj.create(\n {'partner_id': acc_part_id.id,\n 'voucher_id': voucher.id,\n 'invoice_id': voucher_lines.invoice_id.id,\n 'txt_id': txt_brw.id,\n 'untaxed': voucher_tax_line.base,\n 'amount_withheld': voucher_tax_line.amount_ret,\n 'tax_wh_iva_id': voucher_tax_line.id,\n })\n return True", "def create_invoice(self):\n sales_tax = 0.06\n item_sum = 0\n inv = f'Invoice#: {self.invoice_id}\\n'\n for key, value in self.items_with_price.items():\n item_sum += value\n inv += f'{key}.....${value:.2f}\\n'\n\n tax = item_sum * sales_tax\n inv += f'Tax.....${tax:.2f}\\n'\n inv += f'Total.....${tax + item_sum:.2f}'\n # print(inv)\n # returning for unit testing purposes\n return inv", "def _create_payments(self, invoice):\n self.ensure_one()\n if self.schedule_id and self.schedule_id.occurences > 0:\n # TODO: make more intelligent price cut\n amount = invoice.amount_total\n amount_per_occurence = amount / self.schedule_id.occurences\n for day in self.schedule_id.day_ids:\n payment = self.env['account.payment'].new({\n 'payment_type': 'inbound',\n 'partner_type': 'customer',\n 'partner_id': self.member_id.partner_id.id,\n 'amount': amount_per_occurence,\n 'payment_date': day.day,\n 'journal_id': self.journal_id.id,\n })\n payment._onchange_journal()\n payment_values = dict(payment._cache)\n payment = self.env['account.payment'].create(payment_values)\n payment.invoice_ids = [(4, invoice.id, False)]", "def _prepare_invoice(self):\n # get current logged in user's timezone\n local = pytz.timezone(self.env['res.users'].browse(self._uid).tz) or pytz.utc\n\n self.ensure_one()\n journal_id = self.env['account.journal'].search([('type', '=', 'purchase')], limit=1).id\n if not journal_id:\n raise UserError(_('Please define an accounting purchase journal for this company.'))\n invoice_vals = {\n 'name': self.partner_ref or '',\n 'origin': self.name,\n 'type': 'in_invoice',\n 'account_id': self.partner_id.property_account_payable_id.id,\n 'partner_id': self.partner_id.id,\n 'journal_id': journal_id,\n 'currency_id': self.currency_id.id,\n 'comment': self.notes,\n 'payment_term_id': self.payment_term_id.id,\n 'fiscal_position_id': self.fiscal_position_id.id or self.partner_id.property_account_position_id.id,\n 'company_id': self.company_id.id,\n 'purchase_id': self.id,\n 'date_invoice':pytz.utc.localize(datetime.datetime.now()).astimezone(local).strftime('%Y-%m-%d'),\n }\n return invoice_vals", "def duplicate_invoice(invoice):\n from invoicer.models import Invoice\n from invoicer.models import LineItem\n\n # copy main attributes\n new_invoice = Invoice(\n company=invoice.company,\n invoice_date=datetime.now(),\n client=invoice.client,\n location=invoice.location,\n tax_rate=invoice.tax_rate,\n left_address=invoice.left_address,\n right_address=invoice.right_address,\n terms=invoice.terms,\n footer=invoice.footer\n )\n new_invoice.save()\n\n # now line items\n for line_item in invoice.line_items.all():\n new_invoice.line_items.add(LineItem(\n name=line_item.name,\n description=line_item.description,\n price=line_item.price,\n taxable=line_item.taxable,\n item=line_item.item,\n quantity=line_item.quantity\n ))\n\n return new_invoice", "def create_customer_df_invoice_line(self, customerID, list_stockCode\\\n , list_quantity, invoiceDate):\n \n dict_invoice = dict()\n\n dict_invoice['Quantity'] = list_quantity\n dict_invoice['StockCode'] = list_stockCode\n\n #------------------------------------------------------------------------\n # Build invoiceDate from local current time\n #------------------------------------------------------------------------\n if invoiceDate is None:\n time_struct = time.localtime()\n invoiceDate = str(time_struct.tm_year)+'-'+str(time_struct.tm_mon)\\\n +'-'+str(time_struct.tm_mday)\n invoiceDate +=' '\n invoiceDate +=str(time_struct.tm_hour)+':'+str(time_struct.tm_min)\\\n +':'+str(time_struct.tm_sec)\n invoiceDate = pd.Timestamp(invoiceDate)\n else:\n pass\n\n\n #------------------------------------------------------------------------\n # Lists initialization\n #------------------------------------------------------------------------\n list_customerID = list()\n list_invoiceNo = list()\n list_invoiceDate = list()\n list_invoice_line_index = list()\n \n #------------------------------------------------------------------------\n # Increase Invoice number\n #------------------------------------------------------------------------\n invoiceNo = max(self._df_invoice_original.InvoiceNo)\n invoiceNo += 1\n\n #------------------------------------------------------------------------\n # Get latest invoice line index value\n #------------------------------------------------------------------------\n invoice_line_index = max(self._df_invoice_original.index)\n\n #------------------------------------------------------------------------\n # Build lists for CustomerID, InvoiceNo, InvoiceDate\n # A list of incremented indexes is built for new rows.\n #------------------------------------------------------------------------\n for quantity in list_quantity:\n list_customerID.append(customerID)\n list_invoiceNo.append(invoiceNo)\n list_invoiceDate.append(invoiceDate)\n invoice_line_index += 1\n list_invoice_line_index.append(invoice_line_index) \n\n \n dict_invoice['CustomerID'] = list_customerID\n dict_invoice['InvoiceNo'] = list_invoiceNo\n dict_invoice['InvoiceDate'] = list_invoiceDate\n\n #------------------------------------------------------------------------\n # Get description list from list of stock codes.\n #------------------------------------------------------------------------\n list_description = self.getDescriptionList(list_stockCode)\n \n dict_invoice['Description'] = list_description\n\n #------------------------------------------------------------------------\n # Get unit price list from list of stock codes.\n #------------------------------------------------------------------------\n list_unitPrice = self.getUnitPriceList(list_stockCode)\n \n dict_invoice['UnitPrice'] = list_unitPrice\n\n #------------------------------------------------------------------------\n # Dataframe with new invoices lines is created.\n #------------------------------------------------------------------------\n df_invoice_line \\\n = pd.DataFrame(dict_invoice, columns=dict_invoice.keys()\\\n , index=list_invoice_line_index)\n \n return df_invoice_line", "def action_move_create(self):\n inv_obj = self.env['account.invoice']\n context = dict(self._context or {})\n context.update({'wh_src': True})\n ret = self.browse(self.ids[0])\n for line in ret.line_ids:\n if line.move_id:\n raise exceptions.except_orm(\n _('Invoice already withhold !'),\n _(\"You must omit the follow invoice '%s' !\") %\n (line.invoice_id.number,))\n\n acc_id = ret.account_id.id\n journal_id = ret.journal_id.id\n demo_enabled = self.env['ir.module.module'].search(\n [('name', '=', 'base'), ('demo', '=', True)])\n args = [('id', 'in')]\n if not demo_enabled:\n args.append(('special', '=', False))\n\n if ret.line_ids:\n for line in ret.line_ids:\n writeoff_account_id, writeoff_journal_id = False, False\n amount = line.wh_amount\n if line.invoice_id.type in ['in_invoice', 'in_refund']:\n name = 'COMP. RET. CRS ' + ret.number + ' Doc. ' + (\n line.invoice_id.supplier_invoice_number or '')\n else:\n name = 'COMP. RET. CRS ' + ret.number + ' Doc. ' + (\n line.invoice_id.number or '')\n # ret_move = inv_obj.ret_and_reconcile(\n # self, [line.invoice_id.id], amount, acc_id,\n # journal_id, writeoff_account_id,\n # writeoff_journal_id, ret.date_ret, name, [line]\n # )\n # rl = {\n # 'move_id': ret_move['move_id'],\n # }\n #lines = [(1, line.id)]\n self.write({'line_ids': line})\n\n if (line.invoice_id.type in [\n 'out_invoice', 'out_refund']):\n inv_obj.write({'wh_src_id': ret.id})\n else:\n return False\n return True", "def create_invoice(sender, invoice, issuer_details, **kwargs):\n if not invoice.items:\n return\n\n price = sum([item.price for item in invoice.items.all()])\n\n if not price:\n return\n\n paypal_invoice = models.Invoice(\n customer=invoice.customer,\n year=invoice.year,\n month=invoice.month,\n invoice_date=invoice.invoice_date,\n end_date=invoice.due_date,\n tax_percent=invoice.tax_percent,\n issuer_details=issuer_details,\n )\n\n paypal_invoice.payment_details = {\n 'name': invoice.customer.name,\n 'address': invoice.customer.address,\n 'country': invoice.customer.country,\n 'country_name': invoice.customer.get_country_display(),\n 'email': invoice.customer.email,\n 'postal': invoice.customer.postal,\n 'phone_number': invoice.customer.phone_number,\n 'bank_name': invoice.customer.bank_name,\n 'bank_account': invoice.customer.bank_account,\n }\n\n paypal_invoice.save()\n\n for item in invoice.items.all():\n models.InvoiceItem.objects.create(\n invoice=paypal_invoice,\n price=item.price,\n tax=item.tax,\n quantity=item.quantity,\n unit_price=item.unit_price,\n unit_of_measure=helpers.convert_unit_of_measure(item.unit),\n name=item.name,\n start=item.start,\n end=item.end,\n )", "def _prepare_invoice(self):\n self.ensure_one()\n # journal_id = self.env['account.invoice'].with_context(force_company=self.env.user.company_id.id).default_get(['journal_id'])['journal_id']\n journal_id = self.company_id.journal_id.id\n if not journal_id:\n raise UserError(_('Please define an accounting sales journal for this company.'))\n invoice_vals = {\n 'name': self.client_order_ref or '',\n 'origin': self.name,\n 'type': 'out_invoice',\n 'account_id': self.partner_invoice_id.property_account_receivable_id.id,\n 'partner_id': self.partner_invoice_id.id,\n 'partner_shipping_id': self.partner_shipping_id.id,\n 'journal_id': journal_id,\n 'currency_id': self.pricelist_id.currency_id.id,\n 'comment': self.note,\n 'payment_term_id': self.payment_term_id.id,\n 'fiscal_position_id': self.fiscal_position_id.id or self.partner_invoice_id.property_account_position_id.id,\n 'company_id': self.company_id.id,\n 'user_id': self.user_id and self.user_id.id,\n 'team_id': self.team_id.id\n }\n return invoice_vals", "def pl_create_order(self):\n\tprint()\n\tprint('Pl - Create Order')\n\n\n\tpartner = self.env['res.partner'].search([\n\t\t\t\t\t\t\t\t\t\t\t\t\t('name', '=', self.patient.name),\n\t\t\t\t\t\t\t\t\t\t\t\t],\n\t\t\t\t\t\t\t\t\t\t\t\t#order='appointment_date desc',\n\t\t\t\t\t\t\t\t\t\t\t\tlimit=1,)\n\n\n\t# Create Order\n\torder = self.env['sale.order'].create({\n\t\t\t\t\t\t\t\t\t\t\t\t\t'state':'draft',\n\t\t\t\t\t\t\t\t\t\t\t\t\t'x_doctor': self.physician.id,\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t#'partner_id': self.partner_id.id,\n\t\t\t\t\t\t\t\t\t\t\t\t\t'partner_id': partner.id,\n\t\t\t\t\t\t\t\t\t\t\t\t\t#'x_ruc': self.partner_id.x_ruc,\n\t\t\t\t\t\t\t\t\t\t\t\t\t#'x_dni': self.partner_id.x_dni,\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t'patient': self.patient.id,\n\t\t\t\t\t\t\t\t\t\t\t\t\t'x_id_doc': self.patient.x_id_doc,\n\t\t\t\t\t\t\t\t\t\t\t\t\t'x_id_doc_type': self.patient.x_id_doc_type,\n\t\t\t\t\t\t\t\t\t\t\t\t\t'x_family': 'procedure',\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t'treatment': self.id,\n\t\t\t\t\t\t\t\t\t\t\t\t})\n\t#print(order)\n\n\n\n\t# Create Order Lines\n\tfor cart_line in self.shopping_cart_ids:\n\n\t\tproduct = cart_line.product\n\n\t\t#print(product)\n\t\t#print(product.name)\n\n\t\t# Create Order Line\n\t\tol = order.order_line.create({\n\t\t\t\t\t\t\t\t\t\t'name': \t\tproduct.name,\n\t\t\t\t\t\t\t\t\t\t'product_id': \tproduct.id,\n\t\t\t\t\t\t\t\t\t\t'price_unit': \tcart_line.price,\n\t\t\t\t\t\t\t\t\t\t'product_uom_qty': cart_line.qty,\n\t\t\t\t\t\t\t\t\t\t'order_id': \torder.id,\n\t\t\t\t\t\t\t\t\t})\n\treturn order\n\n\t# pl_create_order", "def _prepare_invoice_line(self, cr, uid, group, picking, move_line, invoice_id,\n invoice_vals, context=None):\n result = super(stock_picking, self)._prepare_invoice_line(cr, uid, group, picking, move_line, invoice_id,\n invoice_vals, context=None)\n \n result['discount'] = self._get_discount_invoice(cr, uid, move_line)\n result['discount2'] = self._get_discount2_invoice(cr, uid, move_line)\n result['price_unit'] = self._get_price_unit_invoice(cr, uid, move_line, invoice_vals['type'], context=None)\n return result", "def generate_orders(self, cr, uid, ids, context=None):\n voucher_pool = self.pool.get('account.voucher')\n payment_term_obj = self.pool.get('account.payment.term')\n account_budget_confirmation_obj = self.pool.get('account.budget.confirmation')\n period_obj = self.pool.get('account.period')\n if context is None:\n context = {}\n for order in self.browse(cr, uid, ids, context=context):\n #################################to remind\n total_fixed = total_percent = 0\n for line in order.payment_term.line_ids:\n if line.value == 'fixed':\n total_fixed += line.value_amount\n if line.value == 'procent':\n total_percent += line.value_amount\n total_fixed = (total_fixed * 100) / (order.amount or 1.0)\n if (total_fixed + total_percent) > 100:\n raise orm.except_orm(_('Error!'), _(\"Can not create the payments !\\n\\\n The related payment term is probably miss configured as it gives a computed amount greater than the total permanent payment amount. \\\n The latest line of your payment term must be of type 'balance' to avoid rounding issues.\"))\n # create one move line for the total and possibly adjust the other lines amount\n totlines1 = []\n for o in order.line_ids:\n totlines1 += payment_term_obj.compute(cr, uid, order.payment_term.id, o.amount, order.date or False, context=context)\n \n d = {}\n for k, v in totlines1:\n d.setdefault(k, [k]).append(v)\n totlines = map(tuple, d.values())\n\n for t in totlines :\n #to substract date from the interval number \n order_date = t[0]\n entered_date = datetime.datetime.strptime(order_date, '%Y-%m-%d')\n entered_date = entered_date.date()\n account_id = (order.partner_id.property_account_payable and order.partner_id.property_account_payable.id) or \\\n (order.journal_id.default_credit_account_id and order.journal_id.default_credit_account_id.id)\n period_id = period_obj.find(cr, uid, t[0], context=context)[0]\n\n list_confirm = [conf.id for conf in o.confirmation_ids]\n confirmations = account_budget_confirmation_obj.search(cr, uid, [('id','in', list_confirm),('period_id','=', period_id)], context=context) #('date','=',t[0]),\n\n for confirm in confirmations:\n confirm_id = confirm\n\n voucher_lines = [(0, 0, {'name':ol.name, 'account_id':ol.account_id.id, 'type':'dr',\n 'amount':t[count + 1], 'account_analytic_id':ol.account_analytic_id.id, 'budget_confirm_id': confirm_id })\n for count, ol in enumerate(order.line_ids)]\n res = voucher_pool.onchange_price(cr, uid, 0, voucher_lines, [], partner_id=order.partner_id.id, context=context).get(\"value\", {})\n voucher_dict = {\n 'partner_id' : order.partner_id.id,\n 'account_id': account_id,\n 'company_id' : order.company_id.id,\n 'journal_id' : order.journal_id.id,\n 'period_id': order.period_id.id,\n 'type':'purchase',\n 'date' : t[0],\n 'reference': order.name,\n 'payment_permanent_voucher_id': order.id,\n 'line_ids':voucher_lines,\n 'amount':res.get(\"amount\", 0.0)\n }\n voucher_pool.create(cr, uid, voucher_dict, context=context)\n return self.write(cr, uid, ids, {'state':'done'}, context=context)", "def invoice(self, start, end):\n\n if self.invoice_type is None:\n invoice_type = self.conn.config[\"main\"][\"invoice:object\"]\n if \":\" not in invoice_type:\n raise AttributeError(\"Invoice configuration incorrect! %s\" % invoice_type)\n module, call = invoice_type.split(\":\")\n _package = __import__(module, globals(), locals(), [ call ])\n\n funct = getattr(_package, call)\n self.invoice_type = funct\n config = self.conn.config[\"invoice_object\"]\n invoice = self.invoice_type(self, config)\n return invoice", "def create_order(cls, invoice):\n order = cls(\n order_id=str(uuid.uuid4().int),\n invoice=invoice\n ).save()\n\n invoice_line_items = InvoiceLineItem.objects.filter(invoice=invoice, type=\"item\").all()\n\n for invoice_line_item in invoice_line_items:\n OrderLineItem.create_order_line_item(order=order, invoice_line_item=invoice_line_item)\n\n return order", "def _prepare_invoice_line(self, qty):\n self.ensure_one()\n res = {\n 'name': self.name,\n 'sequence': self.sequence,\n 'origin': self.order_id.name,\n 'account_id': self.product_id.product_tmpl_id._get_product_accounts()['stock_input'].id,\n 'price_unit': self.price_unit,\n 'quantity': qty,\n 'uom_id': self.product_uom.id,\n 'product_id': self.product_id.id or False,\n 'invoice_line_tax_ids': [(6, 0, self.taxes_id.ids)],\n 'account_analytic_id': self.account_analytic_id.id,\n 'analytic_tag_ids': [(6, 0, self.analytic_tag_ids.ids)],\n }\n return res", "def create_or_find_b2b_invoices_and_process_ept(self, row, sale_order, invoice_date, tax):\n\n vat_number = row.get('Buyer Tax Registration', False)\n invoice_number = row.get('VAT Invoice Number', False)\n\n invoices = sale_order.invoice_ids.filtered(\n lambda x: x.type == 'out_invoice' and x.state != 'cancel')\n if not invoices:\n lines = sale_order.order_line.filtered(lambda line: line.qty_to_invoice > 0)\n if not lines:\n return False\n invoices = sale_order._create_invoices()\n self.write({'invoice_ids': [(4, invoices and invoices.id)]})\n\n for invoice in invoices:\n if not invoice.partner_id.vat:\n invoice.partner_id.vat = vat_number\n\n payments_lines = []\n if invoice.invoice_payments_widget != 'false':\n payments_dict = json.loads(invoice.invoice_payments_widget)\n payments_content = payments_dict.get('content', [])\n for line in payments_content:\n payments_lines.append(line.get('payment_id', False))\n\n invoice_line = invoice.mapped('invoice_line_ids').filtered(\\\n lambda line: line.tax_ids != tax)\n if invoice_line:\n invoice.button_draft()\n invoice.write({'ref': invoice_number, 'date': invoice_date})\n\n if len(invoice_line) > 1:\n for line in invoice_line:\n line.with_context({'check_move_validity': False}).write( \\\n {'tax_ids': [(6, 0, [tax.id])]})\n else:\n invoice_line.with_context({'check_move_validity': False}).write( \\\n {'tax_ids': [(6, 0, [tax.id])]})\n\n invoice.with_context({'check_move_validity': False})._recompute_tax_lines( \\\n recompute_tax_base_amount=True)\n invoice.action_post()\n for line in payments_lines:\n invoice.js_assign_outstanding_line(line)\n\n return True", "def generate_eob(\n self, date_of_service, date_of_eob, insured, invoice_id, cpt_code, charge_amount\n ):\n if insured == \"insured\":\n # first copayments\n copay_amount = np.random.choice(\n self.distributions[\"copay_amounts\"],\n 1,\n p=self.distributions[\"copay_distribution\"],\n )[0]\n if copay_amount > 0:\n invoicelineitemeob = pd.DataFrame(\n {\n \"invoice_id\": [invoice_id],\n \"cpt_code\": [cpt_code],\n \"created_on\": [date_of_service],\n \"copay_amount\": [copay_amount],\n \"adjustment_amount\": [0],\n \"paid_amount\": [0],\n }\n )\n self.InvoiceLineItemsEob = self.InvoiceLineItemsEob.append(\n invoicelineitemeob\n )\n remaining_charge = charge_amount - copay_amount\n else:\n remaining_charge = charge_amount\n # next eob discounts\n eob_discount_percent = np.random.choice(\n self.distributions[\"eob_discount_percentages\"],\n 1,\n p=self.distributions[\"eob_discount_distribution\"],\n )[0]\n if eob_discount_percent > 0:\n insurance_adjustment = remaining_charge * eob_discount_percent / 100\n remaining_charge = remaining_charge - insurance_adjustment\n invoicelineitemeob = pd.DataFrame(\n {\n \"invoice_id\": [invoice_id],\n \"cpt_code\": [cpt_code],\n \"created_on\": [date_of_eob],\n \"copay_amount\": [0],\n \"adjustment_amount\": [insurance_adjustment],\n \"paid_amount\": [0],\n }\n )\n self.InvoiceLineItemsEob = self.InvoiceLineItemsEob.append(\n invoicelineitemeob\n )\n # next handle eob payments where relevant\n eob_payment_percentage = np.random.choice(\n self.distributions[\"eob_payment_percentages\"],\n 1,\n p=self.distributions[\"eob_payment_distribution\"],\n )[0]\n eob_payment_amount = remaining_charge * (eob_payment_percentage / 100.0)\n if eob_payment_amount > 0:\n invoicelineitemeob = pd.DataFrame(\n {\n \"invoice_id\": [invoice_id],\n \"cpt_code\": [cpt_code],\n \"created_on\": [date_of_eob],\n \"copay_amount\": [0],\n \"adjustment_amount\": [0],\n \"paid_amount\": [eob_payment_amount],\n }\n )\n self.InvoiceLineItemsEob = self.InvoiceLineItemsEob.append(\n invoicelineitemeob\n )\n remaining_charge = remaining_charge - eob_payment_amount\n else:\n remaining_charge = charge_amount\n return remaining_charge", "def create_invoice(self, order): # noqa:max-complexity=18\n\n if len(order['order_lines']) == 0:\n raise RuntimeError(\n \"Expected 1 order_lines in order {}, got: {}\".format(\n order['order_id'],\n order['order_lines']\n )\n )\n\n order_id = order['order_id']\n\n refund = False\n if order['state'] == 'REFUND':\n refund = True\n self.stdout.write(self.style.WARNING(\"Refunded order: {}\".format(order_id)))\n elif order['state'] == 'PAID':\n pass\n else:\n self.stdout.write(self.style.WARNING(\"Not processing unknown order state {} for: {}\".format(order['state'], order_id)))\n return\n\n if self.only_known and order_id not in billy.TICKETBUTLER_IGNORE_LIST:\n self.stdout.write(self.style.WARNING(\"Only processing known invoices, skipping {}\".format(order_id)))\n return\n\n # Object containing all created tickets, to have an invoice relation\n # appended later\n ticketbutler_tickets = []\n\n for ticket in order['tickets']:\n\n sprints = list(filter(\n lambda q: q['question'] == 148,\n ticket['answers']\n ))[0]\n\n if any(filter(lambda c: c['choice_heading'].lower() == 'no', sprints['answered_choices'])):\n sprints = models.TicketbutlerTicket.SPRINTS_NO\n elif any(filter(lambda c: c['choice_heading'].lower() == 'maybe', sprints['answered_choices'])):\n sprints = models.TicketbutlerTicket.SPRINTS_MAYBE\n elif any(filter(lambda c: c['choice_heading'].lower() == 'yes', sprints['answered_choices'])):\n sprints = models.TicketbutlerTicket.SPRINTS_YES\n\n ticketbutler_ticket = models.TicketbutlerTicket.get_or_create(\n ticket['email'],\n ticket['full_name'],\n order_id,\n sprints,\n ticket['ticket_type_name'],\n )\n if refund:\n self.stdout.write(self.style.WARNING(\"This ticket was marked refunded: {}\".format(order_id)))\n ticketbutler_ticket.refunded = True\n ticketbutler_ticket.save()\n else:\n ticketbutler_ticket.refunded = False\n ticketbutler_ticket.save()\n\n ticketbutler_tickets.append(ticketbutler_ticket)\n\n if refund:\n self.stdout.write(self.style.WARNING(\"Skipping refunded order: {}\".format(order_id)))\n return\n\n # If an email is changed on a TicketButler ticket and an old user exists without any other tickets,\n # then disable this user's account and delete the ticket from the system\n all_order_tickets = models.TicketbutlerTicket.objects.filter(ticketbutler_orderid=order_id)\n\n for ticket in order['tickets']:\n\n for verify_ticket in all_order_tickets:\n # Check if the ticket is active in the current order, if it is\n # then skip it.\n if any(active.id == verify_ticket.id for active in ticketbutler_tickets):\n continue\n # Yeah, it's not active anymore, so delete it and potentially\n # disable the user account\n inactive_ticket = verify_ticket\n self.stdout.write(self.style.WARNING(\"Going to remove ticket for {}, order_id: {}\".format(inactive_ticket.user.email, order_id)))\n if inactive_ticket.user.tickets.all().exclude(id=inactive_ticket.id).exists():\n # Just remove the ticket\n self.stdout.write(self.style.WARNING(\"Found another ticket for user {} and deleted the inactive ticket in question but not the user\".format(inactive_ticket.user.email)))\n if inactive_ticket.pk:\n inactive_ticket.delete()\n continue\n else:\n # Remove the user account too if there are no submissions and it's not a superuser\n if not inactive_ticket.user.is_superuser and not inactive_ticket.user.submissions.all().exists():\n if inactive_ticket.user.is_active:\n self.stdout.write(self.style.WARNING(\"Also disabling user account for: {}\".format(inactive_ticket.user.email)))\n inactive_ticket.user.is_active = False\n inactive_ticket.user.save()\n else:\n self.stdout.write(self.style.WARNING(\"User was already inactive: {}\".format(inactive_ticket.user.email)))\n # In case the user had several tickets, and one of them was already deleted\n if inactive_ticket.pk:\n inactive_ticket.delete()\n\n if 'discount' in order:\n if order['discount']['amount'] == 100:\n\n for ticket in ticketbutler_tickets:\n ticket.free_ticket = True\n ticket.save()\n\n self.stdout.write(self.style.SUCCESS(\"Skipping invoice for free ticket for order id: {}\".format(order_id)))\n return\n else:\n self.stdout.write(self.style.ERROR(\"!!! Order id {} will have an invoice generated with missing information, Ticketbutler said the discount was: {}\".format(order_id, order['discount']['amount'])))\n\n for ticketbutler_order_line_no, order_line in enumerate(order['order_lines']):\n\n self.process_order_line(order, order_line, ticketbutler_tickets, ticketbutler_order_line_no=ticketbutler_order_line_no)", "def create_purchase_order(self, cr, uid, ids, context=None):\n sale_obj = self.pool.get('sale.order')\n act_window = self.pool.get('ir.actions.act_window')\n wizard = self.browse(cr, uid, ids[0], context)\n sale_ids = context.get('active_ids', [])\n if wizard.advance_purchase_order == 'all':\n # create the final invoices of the active sales orders\n res = sale_obj.manual_purchase_order(cr, uid, sale_ids, context)\n \n return {'type': 'ir.actions.act_window_close'}\n\n if wizard.advance_purchase_order == 'lines':\n # open the list view of sales order lines to invoice\n res = act_window.for_xml_id(cr, uid, 'sale', 'action_order_line_tree2', context)\n res['context'] = {\n \n 'search_default_order_id': sale_ids and sale_ids[0] or False,\n }\n return res \n\n inv_ids = []\n for sale_id, inv_values in self._prepare_advance_po_vals(cr, uid, ids, context=context):\n inv_ids.append(self._create_purchase_order(cr, uid, inv_values, sale_id, context=context))\n\n \n return {'type': 'ir.actions.act_window_close'}", "def create_invoice(invoice: Invoice, callback_url: Optional[HttpUrl] = None):\n # Send the invoice, collect the money, send the notification (the callback)\n return {\"msg\": \"Invoice received\"}", "def create_order_invoice(sender, instance, created, using, **kwargs):\n\n # Create invoice if it doesn't already exist\n if (\n created\n and not Invoice.objects.filter(\n order__order_number=instance.order_number\n ).exists()\n ):\n invoice = Invoice(order=instance)\n # Saving it in reverse to avoid having this signal called again\n invoice.save()\n\n for slug, cls in discount_rules.get_all_discount_rules():\n if cls.can_user_have_access(instance.user, invoice):\n cls.apply_discount(instance.user, invoice)", "def _prepare_invoice_line(self, qty):\n self.ensure_one()\n res = super(SaleOrderLine, self)._prepare_invoice_line(qty)\n\n res.update({\n 'cost_center_id': self.cost_center_id and self.cost_center_id.id or False\n })\n return res", "def setUp(self):\n # Setup dummy custmers\n Customer.objects.create(name=\"Mike Zinyoni\", phone=\"+263784528370\", email=\"[email protected]\", address=\"Stand #11 Lorraine Drive, Bluffhill Harare Zimbabwe\")\n Customer.objects.create(name=\"Josh Nyamulomo\", phone=\"+26356839021\", email=\"[email protected]\", address=\"Stand #5 Lorraine Drive, Bluffhill Harare Zimbabwe\")\n Customer.objects.create(name=\"Brian Mpofu\", phone=\"+26390839021\", email=\"[email protected]\", address=\"Stand #25 Lorraine Drive, Bluffhill Harare Zimbabwe\")\n # Setup dummy items\n Item.objects.create(name=\"Chicken thighs\", description=\"Chunky big chicken thighs from Irvines chickens\", price=4.99, unit=\"Kg\")\n Item.objects.create(name=\"Beef steak\", description=\"Premium quality beef steak from Caswell meats\", price=6.99, unit=\"Kg\")\n Item.objects.create(name=\"Kefalos Youghgut\", description=\"Healthy and tasty youghgut available in strawberry, banana and butter milk flavour\", price=5.21, unit=\"litre\")\n Item.objects.create(name=\"Eversharp pen\", description=\"Pens available in: blue , red, green and black ink\", price=0.99, unit=\"dozen\")\n Item.objects.create(name=\"Proton Bread\", description=\"Fresh 700g bread\", price=0.9, unit=\"loaf\")\n # Setup dummy Invoice along side the invoice line\n invoice_1 = Invoice(customer=Customer.objects.get(id=1),total=0)\n invoice_1.save()\n InvoiceLine.objects.create(invoice=invoice_1,item=Item.objects.get(id=1), quantity=2, amount=(Item.objects.get(id=1).price*2))\n InvoiceLine.objects.create(invoice=invoice_1,item=Item.objects.get(id=4), quantity=1, amount=(Item.objects.get(id=4).price*1))\n InvoiceLine.objects.create(invoice=invoice_1,item=Item.objects.get(id=3), quantity=6, amount=(Item.objects.get(id=3).price*6))\n invoice_1.total = sum(invoiceLine.amount for invoiceLine in invoice_1.invoiceLines.all())\n invoice_1.save()\n \n invoice_2 = Invoice(customer=Customer.objects.get(id=3),total=0)\n invoice_2.save()\n InvoiceLine.objects.create(invoice=invoice_2,item=Item.objects.get(id=5), quantity=12, amount=(Item.objects.get(id=5).price*12))\n InvoiceLine.objects.create(invoice=invoice_2,item=Item.objects.get(id=4), quantity=2, amount=(Item.objects.get(id=4).price*2))\n invoice_2.total = sum(invoiceLine.amount for invoiceLine in invoice_2.invoiceLines.all())\n invoice_2.save()\n \n invoice_3 = Invoice(customer=Customer.objects.get(id=2),total=0)\n invoice_3.save()\n InvoiceLine.objects.create(invoice=invoice_3,item=Item.objects.get(id=5), quantity=12, amount=(Item.objects.get(id=5).price*12))\n InvoiceLine.objects.create(invoice=invoice_3,item=Item.objects.get(id=4), quantity=2, amount=(Item.objects.get(id=4).price*2))\n InvoiceLine.objects.create(invoice=invoice_3,item=Item.objects.get(id=1), quantity=2, amount=(Item.objects.get(id=1).price*2))\n InvoiceLine.objects.create(invoice=invoice_3,item=Item.objects.get(id=4), quantity=1, amount=(Item.objects.get(id=4).price*1))\n InvoiceLine.objects.create(invoice=invoice_3,item=Item.objects.get(id=3), quantity=6, amount=(Item.objects.get(id=3).price*6))\n invoice_3.total = sum(invoiceLine.amount for invoiceLine in invoice_3.invoiceLines.all())\n invoice_3.save()\n\n invoice_4 = Invoice(customer=Customer.objects.get(id=1),total=0)\n invoice_4.save()\n InvoiceLine.objects.create(invoice=invoice_4,item=Item.objects.get(id=1), quantity=6, amount=(Item.objects.get(id=1).price*6))\n invoice_4.total = sum(invoiceLine.amount for invoiceLine in invoice_4.invoiceLines.all())\n invoice_4.save()", "def prepare_invoice(self):\n journal_id = self.env['account.invoice'].default_get(['journal_id'])['journal_id']\n if not journal_id:\n raise UserError(_('Please define sales journal for this company: \"%s\" (id:%d).') % (self.company_id.name, self.company_id.id))\n invoice_vals = {\n 'order_id': self.id,\n 'name': self.order_no,\n 'origin': self.order_no,\n 'type': 'out_invoice',\n 'reference': self.patient_id.name + ':' + self.name,\n 'account_id': self.patient_id.partner_id.property_account_receivable_id.id,\n 'partner_id': self.patient_id.partner_id.id,\n 'journal_id': journal_id,\n 'comment': self.note,\n 'doctor_id': self.doctor_id.id,\n 'payment_term': False,\n 'user_id': False,\n }\n return invoice_vals", "def _prepare_invoice_line(self, inv_id):\n res = {}\n account_id = self.product_id.property_account_income_id.id or self.product_id.categ_id.property_account_income_categ_id.id\n if not account_id:\n raise UserError(_('Please define income account for this product: \"%s\" (id:%d).') % \\\n (self.product_id.name, self.product_id.id,))\n price_unit = self.product_id.lst_price\n res = {\n 'invoice_id': inv_id.id,\n 'name': self.name,\n 'origin': self.order_id.name,\n 'account_id': account_id,\n 'uom_id': self.product_uom_id.id,\n 'quantity': self.product_uom_qty,\n 'price_unit': price_unit,\n 'product_id': self.product_id.id,\n 'invoice_line_tax_id': False,\n 'order_line_id': self.id\n }\n return res", "def _prepare_invoice(self, cr, uid, order, lines, context=None):\n invoice_vals = super(my_sale_order, self)._prepare_invoice(cr, uid, order,\n lines, context)\n\n invoice_vals.update({\n 'partner_shipping_id': order.partner_shipping_id.id,\n })\n\n # Care for deprecated _inv_get() hook - FIXME: to be removed after 6.1\n invoice_vals.update(self._inv_get(cr, uid, order, context=context))\n\n return invoice_vals", "def _create_account_move_line(self, move, credit_account_id, debit_account_id, qty_out, already_out_account_id):\r\n AccountMoveLine = []\r\n\r\n base_line = {\r\n 'name': self.name,\r\n 'product_id': self.product_id.id,\r\n 'quantity': self.quantity, #Modificado, era 0\r\n }\r\n debit_line = dict(base_line, account_id=debit_account_id)\r\n credit_line = dict(base_line, account_id=credit_account_id)\r\n diff = self.additional_landed_cost\r\n if diff > 0:\r\n debit_line['debit'] = diff\r\n credit_line['credit'] = diff\r\n else:\r\n # negative cost, reverse the entry\r\n debit_line['credit'] = -diff\r\n credit_line['debit'] = -diff\r\n AccountMoveLine.append([0, 0, debit_line])\r\n AccountMoveLine.append([0, 0, credit_line])\r\n\r\n # Create account move lines for quants already out of stock\r\n if qty_out > 0:\r\n debit_line = dict(base_line,\r\n name=(self.name + \": \" + str(qty_out) + _(' already out')),\r\n quantity=qty_out, #Modificado, era 0\r\n account_id=already_out_account_id)\r\n credit_line = dict(base_line,\r\n name=(self.name + \": \" + str(qty_out) + _(' already out')),\r\n quantity=qty_out, #Modificado, era 0\r\n account_id=debit_account_id)\r\n diff = diff * qty_out / (self.former_stock_quantity or 1.0) #Modificado, era \"/ self.quantity\"\r\n if diff > 0:\r\n debit_line['debit'] = diff\r\n credit_line['credit'] = diff\r\n else:\r\n # negative cost, reverse the entry\r\n debit_line['credit'] = -diff\r\n credit_line['debit'] = -diff\r\n AccountMoveLine.append([0, 0, debit_line])\r\n AccountMoveLine.append([0, 0, credit_line])\r\n\r\n if self.env.company.anglo_saxon_accounting:\r\n expense_account_id = self.product_id.product_tmpl_id.get_product_accounts()['expense'].id\r\n debit_line = dict(base_line,\r\n name=(self.name + \": \" + str(qty_out) + _(' already out')),\r\n quantity=qty_out, #Modificado, era 0\r\n account_id=expense_account_id)\r\n credit_line = dict(base_line,\r\n name=(self.name + \": \" + str(qty_out) + _(' already out')),\r\n quantity=qty_out, #Modificado, era 0\r\n account_id=already_out_account_id)\r\n\r\n if diff > 0:\r\n debit_line['debit'] = diff\r\n credit_line['credit'] = diff\r\n else:\r\n # negative cost, reverse the entry\r\n debit_line['credit'] = -diff\r\n credit_line['debit'] = -diff\r\n AccountMoveLine.append([0, 0, debit_line])\r\n AccountMoveLine.append([0, 0, credit_line])\r\n return AccountMoveLine", "def _create_account_move_line(self, move, credit_account_id, debit_account_id, qty_out, already_out_account_id):\n AccountMoveLine = []\n query=\"\"\"select slcl.partner from stock_valuation_adjustment_lines sval\n left join stock_landed_cost_lines slcl on slcl.id=sval.cost_line_id where sval.id={}\"\"\".format(self.id)\n self._cr.execute(query=query)\n partner_id=self._cr.fetchone()\n print(partner_id)\n base_line = {\n 'name': self.name,\n 'product_id': self.product_id.id,\n 'quantity': 0,\n 'partner_id':partner_id[0],\n }\n debit_line = dict(base_line, account_id=debit_account_id)\n credit_line = dict(base_line, account_id=credit_account_id)\n diff = self.additional_landed_cost\n if diff > 0:\n debit_line['debit'] = diff\n credit_line['credit'] = diff\n else:\n # negative cost, reverse the entry\n debit_line['credit'] = -diff\n credit_line['debit'] = -diff\n AccountMoveLine.append([0, 0, debit_line])\n AccountMoveLine.append([0, 0, credit_line])\n\n # Create account move lines for quants already out of stock\n if qty_out > 0:\n debit_line = dict(base_line,\n name=(self.name + \": \" + str(qty_out) + _(' already out')),\n quantity=0,\n account_id=already_out_account_id)\n credit_line = dict(base_line,\n name=(self.name + \": \" + str(qty_out) + _(' already out')),\n quantity=0,\n account_id=debit_account_id)\n diff = diff * qty_out / self.quantity\n if diff > 0:\n debit_line['debit'] = diff\n credit_line['credit'] = diff\n else:\n # negative cost, reverse the entry\n debit_line['credit'] = -diff\n credit_line['debit'] = -diff\n AccountMoveLine.append([0, 0, debit_line])\n AccountMoveLine.append([0, 0, credit_line])\n\n if self.env.company.anglo_saxon_accounting:\n expense_account_id = self.product_id.product_tmpl_id.get_product_accounts()['expense'].id\n debit_line = dict(base_line,\n name=(self.name + \": \" + str(qty_out) + _(' already out')),\n quantity=0,\n account_id=expense_account_id)\n credit_line = dict(base_line,\n name=(self.name + \": \" + str(qty_out) + _(' already out')),\n quantity=0,\n account_id=already_out_account_id)\n\n if diff > 0:\n debit_line['debit'] = diff\n credit_line['credit'] = diff\n else:\n # negative cost, reverse the entry\n debit_line['credit'] = -diff\n credit_line['debit'] = -diff\n AccountMoveLine.append([0, 0, debit_line])\n AccountMoveLine.append([0, 0, credit_line])\n\n return AccountMoveLine", "def create_order(self):\n\tprint()\n\tprint('OH - pl_create_order')\n\n\t# Search Partner\n\tprint()\n\tprint('Search partner')\n\tpartner = self.env['res.partner'].search([\n\t\t\t\t\t\t\t\t\t\t\t\t\t('name', '=', self.patient.name),\n\t\t\t\t\t\t\t\t\t\t\t\t],\n\t\t\t\t\t\t\t\t\t\t\t\t#order='appointment_date desc',\n\t\t\t\t\t\t\t\t\t\t\t\tlimit=1,)\n\n\t# Search Pl\n\tprint()\n\tprint('Search pricelist')\n\tpricelist = self.env['product.pricelist'].search([\n\t\t\t\t\t\t\t\t\t\t\t#('active', 'in', [True]),\n\t\t\t\t\t\t\t\t\t\t\t],\n\t\t\t\t\t\t\t\t\t\t\t#order='x_serial_nr asc',\n\t\t\t\t\t\t\t\t\t\t\tlimit=1,\n\t\t\t\t\t\t\t\t\t\t)\n\tprint(pricelist)\n\n\t# Create Order\n\torder = self.env['sale.order'].create({\n\t\t\t\t\t\t\t\t\t\t\t\t\t'state':'draft',\n\t\t\t\t\t\t\t\t\t\t\t\t\t'x_doctor': self.physician.id,\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t#'partner_id': self.partner_id.id,\n\t\t\t\t\t\t\t\t\t\t\t\t\t'partner_id': partner.id,\n\t\t\t\t\t\t\t\t\t\t\t\t\t#'x_ruc': self.partner_id.x_ruc,\n\t\t\t\t\t\t\t\t\t\t\t\t\t#'x_dni': self.partner_id.x_dni,\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t'patient': self.patient.id,\n\t\t\t\t\t\t\t\t\t\t\t\t\t'x_id_doc': self.patient.x_id_doc,\n\t\t\t\t\t\t\t\t\t\t\t\t\t'x_id_doc_type': self.patient.x_id_doc_type,\n\t\t\t\t\t\t\t\t\t\t\t\t\t'x_family': 'procedure',\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t'treatment': self.id,\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t'pricelist_id': pricelist.id,\n\t\t\t\t\t\t\t\t\t\t\t\t})\n\t#print(order)\n\n\n\n\t# Create Order Lines\n\tfor cart_line in self.shopping_cart_ids:\n\n\t\tproduct = cart_line.product\n\n\t\t#print(product)\n\t\t#print(product.name)\n\n\t\t# Create Order Line\n\t\tol = order.order_line.create({\n\t\t\t\t\t\t\t\t\t\t'name': \t\tproduct.name,\n\t\t\t\t\t\t\t\t\t\t'product_id': \tproduct.id,\n\t\t\t\t\t\t\t\t\t\t'price_unit': \tcart_line.price,\n\t\t\t\t\t\t\t\t\t\t'product_uom_qty': cart_line.qty,\n\t\t\t\t\t\t\t\t\t\t'order_id': \torder.id,\n\t\t\t\t\t\t\t\t\t})\n\treturn order", "def _prepare_invoice_grp(self, cr, uid, order, line_ids, context=None):\n if context is None:\n context = {}\n context = dict(context)\n\n inv_data = super(grp_orden_compra, self)._prepare_invoice_grp(cr, uid, order, line_ids, context=context)\n\n # adicionando campos numero compromiso y no obligacion desde la OC\n monto_oc = math.floor(order.total_llavep or 0)\n monto_oc = int(monto_oc)\n inv_data.update({'nro_compromiso': order.nro_compromiso or False, 'monto_comprometido': monto_oc or 0, 'currency_id':order.currency_oc.id})\n\n # adicionando campos no afectacion y monto autorizado desde la primera APG\n if order.pc_apg_id:\n first_apg = order.pc_apg_id\n monto_apg = math.floor(first_apg.total_llavep)\n monto_apg = int(monto_apg)\n # TODO R SPRING X ADICIONANDO CABEZALES SIIF A LA FACTURA A PARTIR DE LA APG\n inv_data.update({'nro_afectacion': first_apg.nro_afectacion_siif or False,\n 'monto_afectado': monto_apg or 0,\n 'siif_tipo_ejecucion':first_apg.siif_tipo_ejecucion.id,\n 'siif_concepto_gasto':first_apg.siif_concepto_gasto.id,\n 'siif_financiamiento':first_apg.siif_financiamiento.id,\n 'siif_codigo_sir':first_apg.siif_codigo_sir.id,\n 'siif_nro_fondo_rot':first_apg.siif_nro_fondo_rot.id,\n }) # cambiando nro_afectacion 23/10\n # inv.update({'nro_afectacion': first_apg.nro_afectacion_apg or False, 'monto_afectado': monto_apg or 0})\n\n # # TODO R SPRING X NO LLEVAR LAS LLAVES PRESUPUESTALES POR DEFECTO\n # if order.pc_apg_id.llpapg_ids:\n # llavep_ids = []\n # for llavep in order.pc_apg_id.llpapg_ids:\n # llavep_ids.append((0, 0, {\n # 'programa_id': llavep.programa_id.id,\n # 'odg_id': llavep.odg_id.id,\n # 'auxiliar_id': llavep.auxiliar_id.id,\n # 'disponible': llavep.disponible,\n # 'proyecto_id': llavep.proyecto_id.id,\n # 'fin_id': llavep.fin_id.id,\n # 'mon_id': llavep.mon_id.id,\n # 'tc_id': llavep.tc_id.id,\n # 'importe': llavep.importe\n # }))\n # inv_data.update({'llpapg_ids': llavep_ids})\n\n return inv_data", "def invoice_items(self,org_id=None,query={}):\n if org_id is None:\n org_id = self.org_id\n query_end_date = datetime.strptime(query['endDate'],'%Y-%m-%dT%H:%M:%SZ')\n # Given a 'query_end_date' to find the invoice containing the\n # line items for that date we need to find the invoice which \n # has 'endDate' equal to the end of the month of the `query_end_date`\n query_first_next_month = query_end_date + relativedelta(months=+1) - relativedelta(days=(query_end_date.day-1))\n target_invoices = []\n invoices = self.invoices(org_id)\n if self.verbose:\n print('Searching invoices org_id={}'.format(org_id))\n print('query={} query_end_date={}'.format(query,query_end_date))\n print('Result keys: {}'.format( invoices['content'].keys() ))\n print('Total result count: {}'.format( invoices['content']['totalCount'] ))\n for invoice in invoices['content']['results']:\n #pprint.pprint(invoice)\n end_date = datetime.strptime(invoice['endDate'],'%Y-%m-%dT%H:%M:%SZ')\n if self.verbose: \n print('invoice({})[\\'endDate\\']={} end_date={}'.format(invoice['id'],invoice['endDate'],end_date))\n if end_date == query_first_next_month:\n target_invoices.append(invoice)\n \n if self.verbose: \n print('Target invoices: {}'.format(target_invoices))\n \n\n target_line_items = []\n for invoice in target_invoices:\n invoice_details = self.invoices(org_id,invoice['id']) \n print('invoice_details: {}'.format(invoice_details))\n for item in invoice_details['content']['lineItems']:\n end_date = datetime.strptime(item['endDate'],'%Y-%m-%dT%H:%M:%SZ')\n if end_date == query_end_date:\n target_line_items.append(item)\n if self.verbose:\n print('target_line_items: {}'.format(target_line_items)) \n return target_line_items", "def action_move_create(self):\n\n res = super(account_invoice, self).action_move_create()\n\n for inv in self:\n if not inv.move_id:\n return res\n for ml in inv.move_id.line_id:\n ml_vals = {\n 'emp_police': inv.pol_numpol,\n 'emp_quittance': inv.prm_numero_quittance,\n 'emp_effet': datetime.datetime.strptime(inv.prm_datedeb, '%Y-%m-%d').date() if inv.prm_datedeb else datetime.datetime.today(),\n 'emp_datech': datetime.datetime.strptime(inv.prm_datefin, '%Y-%m-%d').date() if inv.prm_datefin else datetime.datetime.today(),\n }\n ml.update(ml_vals)\n move_vals = {\n 'num_police': inv.pol_numpol,\n 'num_quittance': inv.prm_numero_quittance,\n 'date_effect': datetime.datetime.strptime(inv.prm_datedeb, '%Y-%m-%d').date() if inv.prm_datedeb else datetime.datetime.today(),\n 'date_end': datetime.datetime.strptime(inv.prm_datefin, '%Y-%m-%d').date() if inv.prm_datefin else datetime.datetime.today(),\n }\n inv.move_id.update(move_vals)\n self._log_event()\n return res", "def _get_query(self, type, date_from=False, date_to=False, users=None, products=None):\n # TODO: Revisar def _create_invoice(self, order, so_line, amount):...\n # so.user_id AS id_salesman\n # AND so.user_id IN (%s)\n # AND pp.id IN (%s)\n # GROUP BY salesman\n\n if type == 'most_sold':\n sql = \"\"\"\n SELECT min(sol.id) AS id, \n so.user_id AS salesman, \n sol.product_id AS product,\n AVG(sol.price_reduce_taxexcl) AS price, \n pp.product_tmpl_id AS product_template,\n so.company_id AS company,\n SUM(sol.product_uom_qty) AS qty,\n SUM(sol.price_subtotal) AS subtotal\n FROM sale_order_line sol\n LEFT JOIN sale_order so ON so.id = sol.order_id\n LEFT JOIN product_product pp ON pp.id = sol.product_id\n LEFT JOIN product_template pt ON pt.id = pp.product_tmpl_id\n WHERE so.state NOT IN ('draft', 'sent', 'cancel')\n AND so.date_order BETWEEN '%s' AND '%s'\n AND so.user_id IN (%s)\n AND pp.id IN (%s)\n GROUP BY salesman, sol.product_id, pp.product_tmpl_id, so.company_id\n ORDER BY qty DESC;\n \"\"\" % (date_from, date_to, ', '.join(str(u) for u in users), ', '.join(str(p) for p in products))\n else:\n sql = \"\"\" \n \"\"\"\n self.env.cr.execute(sql)\n return self.env.cr.dictfetchall()", "def test_invoice_payment_notification(node_factory):\n opts = [{}, {\"plugin\": os.path.join(os.getcwd(), \"contrib/plugins/helloworld.py\")}]\n l1, l2 = node_factory.line_graph(2, opts=opts)\n\n msats = 12345\n preimage = '1' * 64\n label = \"a_descriptive_label\"\n inv1 = l2.rpc.invoice(msats, label, 'description', preimage=preimage)\n l1.rpc.pay(inv1['bolt11'])\n\n l2.daemon.wait_for_log(r\"Received invoice_payment event for label {},\"\n \" preimage {}, and amount of {}msat\"\n .format(label, preimage, msats))", "def add_invoice() -> str:\r\n invoice_details = []\r\n #Catching values user has entered in UI\r\n invoice_number = request.args.get(\"invoice_number\")\r\n invoice_details.append(invoice_number)\r\n customer = request.args.get(\"customer\")\r\n invoice_details.append(customer)\r\n date_required = request.args.get(\"date_required\")\r\n invoice_details.append(date_required)\r\n recipe = request.args.get(\"recipe\")\r\n invoice_details.append(recipe)\r\n gyle_number = request.args.get(\"gyle_number\")\r\n invoice_details.append(gyle_number)\r\n quantity_ordered = request.args.get(\"quantity_ordered\")\r\n invoice_details.append(quantity_ordered)\r\n #Passing list to function which writes list to CSV file\r\n data_add(invoice_details)\r\n invoice_message = \"INVOICE ADDED\"\r\n return render_template(\"singular_message.html\",\r\n user_display=invoice_message)", "def test_invoice_payment_hook_hold(node_factory):\n opts = [{}, {'plugin': os.path.join(os.getcwd(), 'tests/plugins/hold_invoice.py'), 'holdtime': TIMEOUT / 2}]\n l1, l2 = node_factory.line_graph(2, opts=opts)\n\n inv1 = l2.rpc.invoice(123000, 'label', 'description', preimage='1' * 64)\n l1.rpc.pay(inv1['bolt11'])", "def merge_invoice(self, cr, uid, invoices, context=None):\n order_ids = []\n pick_ids = []\n if len(invoices) <= 1:\n return False\n parent = self.pool.get('account.invoice').browse(cr, uid, context['active_id'])\n for inv in invoices:\n if parent.partner_id != inv.partner_id:\n raise osv.except_osv(_(\"Partners don't match!\"), _(\"Can not merge invoice(s) on different partners or states !.\"))\n\n if inv.state != 'draft':\n raise osv.except_osv(_(\"Invalid action !\"), _(\"You can merge only invoices in draft state.\"))\n\n # Merge invoices that are in draft state\n inv_line_obj = self.pool.get('account.invoice.line')\n name = parent.name\n comment = parent.comment\n origin = parent.origin\n for inv in invoices:\n if inv.id == parent.id:\n continue\n\n # check if a line with the same product already exist. if so add quantity. else hang up invoice line to first invoice head.\n if inv.name:\n # Find if the same name already exist, if yes, skip to add.\n name_list = name.replace(' ', '').split(',')\n if inv.name not in name_list:\n name += ', %s' % inv.name\n if inv.comment:\n comment = comment and comment + ', %s' % inv.comment or inv.comment\n if inv.origin:\n origin += ', %s' % inv.origin\n line_ids = inv_line_obj.search(cr, uid, [('invoice_id', '=', inv.id)])\n for inv_lin in inv_line_obj.browse(cr, uid, line_ids):\n mrg_pdt_ids = inv_line_obj.search(cr, uid, [('invoice_id', '=', parent.id), ('product_id', '=', inv_lin.product_id.id),\n ('uos_id', '=', inv_lin.uos_id.id), ('price_unit', '=', inv_lin.price_unit) # kittiu: extra condition, unit price must also be the same.\n ])\n if len(mrg_pdt_ids) == 1 and inv.type == parent.type: # product found --> add quantity\n inv_line_obj.write(cr, uid, mrg_pdt_ids, {'quantity': inv_line_obj._can_merge_quantity(cr, uid, mrg_pdt_ids[0], inv_lin.id)})\n inv_line_obj.unlink(cr, uid, inv_lin.id)\n elif inv.type == parent.type:\n inv_line_obj.write(cr, uid, inv_lin.id, {'invoice_id': parent.id})\n else:\n inv_line_obj.write(cr, uid, inv_lin.id, {'invoice_id': parent.id, 'quantity': -inv_lin.quantity})\n\n if inv.sale_order_ids:\n order_ids += [order.id for order in inv.sale_order_ids]\n if inv.picking_ids:\n pick_ids += [picking.id for picking in inv.picking_ids]\n\n self.write(cr, uid, parent.id, {'origin': origin, 'name': name, 'comment': comment})\n\n #Remove By DRB\n #cr.execute('update sale_order_invoice_rel set invoice_id = %s where invoice_id = %s', (parent.id, inv.id))\n #cr.execute('update picking_invoice_rel set invoice_id = %s where invoice_id = %s', (parent.id, inv.id))\n\n self.unlink(cr, uid, [inv.id])\n #Distinct List\n order_ids = list(set(order_ids))\n pick_ids = list(set(pick_ids))\n\n self.write(cr, uid, parent.id, {'sale_order_ids': [(6, 0, order_ids)], 'picking_ids': [(6, 0, pick_ids)]})\n self.button_reset_taxes(cr, uid, [parent.id])\n return parent.id", "def _prepare_analytic_line(self, cr, uid, obj_line, context=None):\n return {'name': obj_line.name,\n 'date': obj_line.date,\n 'account_id': obj_line.analytic_account_id.id,\n 'unit_amount': obj_line.quantity,\n 'product_id': obj_line.product_id and obj_line.product_id.id or False,\n 'product_uom_id': obj_line.product_uom_id and obj_line.product_uom_id.id or False,\n 'amount': (obj_line.credit or 0.0) - (obj_line.debit or 0.0),\n 'general_account_id': obj_line.account_id.id,\n 'journal_id': obj_line.journal_id.analytic_journal_id.id,\n 'ref': obj_line.ref,\n 'move_id': obj_line.id,\n 'user_id': uid,\n }", "def generate_new_visit(self):\n if self.consecutive:\n customer_id = np.random.choice(\n self.customerIds, 1\n ) # choose a customer at random\n insured = self.Customers[self.Customers[\"customer_id\"] == customer_id[0]][\n \"insurance\"\n ].values[\n 0\n ] # does the customer have insurance?\n experiment_id = self.Customers[\n self.Customers[\"customer_id\"] == customer_id[0]\n ][\"experiment_id\"].values[\n 0\n ] # does the customer have insurance?\n\n event_list = (\n self.billing_choose_dates()\n ) # generate dates associated with this invoice\n cpt_code = random.sample(self.CPTCodes, 1)[0]\n date_of_service = str(event_list.values[0][0])\n created_on = str(event_list.values[1][0])\n date_of_eob = str(event_list.values[2][0])\n date_of_provider_adjustment = str(event_list.values[3][0])\n date_of_patient_payment = str(event_list.values[4][0])\n # generate a new invoice\n (invoice_id, charge_amount) = self.generate_new_invoice(\n created_on, date_of_service, customer_id, cpt_code\n )\n # generate subsequent EOB (i.e. copay, EOB adjustment, EOB payment)\n remaining_amount = self.generate_eob(\n date_of_service,\n date_of_eob,\n insured,\n invoice_id,\n cpt_code,\n charge_amount,\n )\n # generate provider adjustments\n remaining_amount = self.generate_provider_adjustment(\n date_of_provider_adjustment, invoice_id, cpt_code, remaining_amount\n )\n # generate a possible payment from the patient\n remaining_amount = self.generate_patient_payment(\n date_of_patient_payment,\n invoice_id,\n cpt_code,\n remaining_amount,\n experiment_id,\n )\n # record the remaining amounts in a separate table.\n self.record_remaining_amount(\n date_of_patient_payment, invoice_id, cpt_code, remaining_amount\n )\n return True\n else:\n print(\"Error generating new invoice- customerIds aren't consecutive\")", "def create_order_amended_invoice(sender, instance, using, **kwargs):\n\n sender_name = sender._meta.model.__name__\n\n if sender_name == \"WillOrder\":\n order = instance\n elif sender_name == \"Allocation\":\n order = instance.asset_store.order\n else:\n order = instance.order\n\n if Invoice.objects.filter(\n order=order, been_paid=True, parent_invoice=None\n ).exists():\n amended_invoice_required = False\n latest_paid_invoice = order.invoice.latest_paid()\n print(\"latest_paid_invoice\", latest_paid_invoice)\n if latest_paid_invoice:\n order_details = InvoiceService(order).limit_details\n\n for order_detail, order_numbers in order_details.items():\n try:\n willorder_limit = OrderLimit.objects.get(\n invoice=latest_paid_invoice, detail=order_detail\n )\n if order_numbers > willorder_limit.limit:\n amended_invoice_required = True\n except OrderLimit.DoesNotExist:\n amended_invoice_required = True\n\n parent_invoice = Invoice.objects.get(order=order, parent_invoice=None)\n\n if amended_invoice_required:\n if Invoice.objects.filter(\n order=order, been_paid=False, parent_invoice=parent_invoice\n ).exists():\n print(\"UPDATE AMENDED INVOICE\")\n order.invoice.latest().update_invoice()\n else:\n Invoice.objects.create(\n order=order, parent_invoice=parent_invoice)\n else:\n print(\"DELETE AMENDED INVOICE\")\n if Invoice.objects.filter(\n order=order, been_paid=False, parent_invoice=parent_invoice\n ).exists():\n Invoice.objects.get(\n order=order, parent_invoice=parent_invoice, been_paid=False\n ).delete()", "def _prepare_order_line_invoice_line(self, cr, uid, line, account_id=False, context=None):\n res = super(sale_order_line, self)._prepare_order_line_invoice_line(cr, uid, line, account_id=account_id, context=context)\n \n res.update({'part_number': line.part_number, 'internal_part_number' : line.internal_part_number})\n return res", "def action_move_create(self):\n account_move = self.env[\"account.move\"]\n\n for request in self:\n if not request.journal_id:\n raise UserError(\n _(\n \"Please define a journal for this request.\"\n )\n )\n if not request.journal_id:\n raise UserError(\n _(\n \"Please define sequence on the journal related to this request.\"\n )\n )\n if any(\n request.approve_request_ids.filtered(\n lambda line: not line.account_id\n )\n ):\n raise UserError(\n _(\n \"There is a line without any account. Please configure a stock account \"\n \"for all product categories that have products on the lines\"\n )\n )\n if not request.approve_request_ids:\n raise UserError(_(\"Please add at least one line!\"))\n if request.move_id:\n continue\n\n company_currency = request.company_id.currency_id\n partner_id = request.end_user.user_id.partner_id.id\n iml = request.approve_request_line_move_line_get()\n name = request.name or \"\"\n credit = 0.0\n debit = reduce(\n lambda x, y: x + y, [line.get(\"credit\", 0.0) for line in iml]\n )\n\n iml.append(\n {\n \"name\": self.name or \"/\",\n \"account_id\": request.account_id.id,\n \"currency_id\": company_currency.id,\n \"date_maturity\": fields.Date.context_today(self),\n \"debit\": debit,\n \"credit\": credit,\n \"partner_id\": partner_id,\n }\n )\n\n iml = [(0, 0, line_item) for line_item in iml]\n move_vals = {\n \"ref\": request.name,\n \"line_ids\": iml,\n \"name\": self.name or \"/\",\n \"journal_id\": request.journal_id.id,\n \"date\": fields.Date.context_today(self),\n \"partner_id\": partner_id,\n \"narration\": request.name,\n }\n move = account_move.with_context(check_move_validity=False).create(\n move_vals\n )\n move.post()\n vals = {\n \"move_id\": move.id,\n \"move_name\": move.name,\n }\n request.write(vals)\n return True", "def write_line_item(\n self,\n name,\n start_date,\n end_date,\n quantity,\n amount,\n currency_symbol,\n billing_type,\n line_item_start,\n ):\n self.PDF.setFillColor(\"black\")\n offset = line_item_start + 12\n self.fontSize(FONT_XXS)\n\n # simple text wrap\n words = name.split()\n line = \"\"\n for word in words:\n w = self.PDF.stringWidth(line + \" \" + word)\n if w > 100:\n self.PDF.drawString(100, offset, line)\n offset += 11\n line = \" \" + word\n else:\n line += \" \" + word\n self.PDF.drawString(100, offset, line)\n\n if start_date == end_date:\n start_date = transform_date(start_date)\n date_string = str(start_date.replace(\"-\", \"/\"))\n else:\n start_date = transform_date(start_date)\n end_date = transform_date(end_date)\n date_string = (\n f'{start_date.replace(\"-\", \"/\")} - {end_date.replace(\"-\", \"/\")}'\n )\n self.PDF.drawString(225, offset, date_string)\n\n if quantity is not None:\n new_quantity = \"{:g}\".format(float(quantity))\n self.PDF.drawString(350, offset, str(new_quantity))\n else:\n self.PDF.drawString(350, offset, \"\")\n if amount:\n new_amount = \"{:g}\".format(float(amount))\n self.PDF.drawString(412.5, offset, f\"{currency_symbol}{str(new_amount)}\")\n else:\n self.PDF.drawString(412.5, offset, f\"{currency_symbol}{str(amount)}\")\n\n self.PDF.drawString(475, offset, billing_type)\n\n self.PDF.setStrokeColor(black01)\n self.PDF.setLineWidth(1)\n self.PDF.line(90, line_item_start - 22, 90, (line_item_start - 22) + 35)\n self.PDF.setStrokeColor(\"black\")\n\n return line_item_start + 35", "def test_invoice_item_create(self):\n # first we create a customer\n id = self._create_model(\"customer\", self.customer_data, [\"name\", \"email\", \"phone\"])\n if id:\n # then we create a invoice\n data = self.invoice_data\n data[\"customer_id\"] = id\n id_inv = self._create_model(\"invoice\", data, [])\n if id_inv:\n # then we create a product\n id_prod = self._create_model(\"product\", self.product_data, [\"name\", \"description\", \"image_link\", \"price\"])\n if id_prod:\n # then we can create the invoice's item\n data = self.invoice_item_data\n data[\"invoice_id\"] = id_inv\n data[\"product_id\"] = id_prod\n self._create_model(\"invoiceitem\", data, [\"quantity\", \"quote_price\"])\n self.assertIsNotNone(id_prod)\n self.assertIsNotNone(id_inv)\n self.assertIsNotNone(id)", "def investment_line(self):\n inv, marks = self._get_marks()\n fig = plt.figure(figsize=(4, 2), dpi=200)\n fig.patch.set_facecolor('#ececec')\n ax = fig.add_subplot(111)\n investmentValues = inv['Invested']\n #investmentValues = pd.Series([0], index=[investmentValues.index[0]-timedelta(1)]).append(investmentValues)\n ax.plot(investmentValues, lw=1.2, color=\"blue\", label='Invested', marker=\"o\", markersize=3, markerfacecolor=\"grey\")\n ax.set_xlabel('Time')\n ax.set_ylabel('Investments (€)')\n ax.set_title('Investment Amount (€) - Daily')\n ax.xaxis.set_major_locator(dates.MonthLocator())\n ax.xaxis.set_major_formatter(dates.DateFormatter('%b-%Y'))\n for x, y, mark in zip(marks.index, marks['Invested'], marks['Marks']):\n a = ax.get_ylim()\n if x == marks.index[0]:\n ax.annotate(str(mark) + \" €\", xy=(x + timedelta(abs((self.data.index[0] - self.data.index[-1]).days) / 80), y + (a[1]-a[0])/35), fontsize=5)\n else:\n ax.annotate(str(mark) + \" €\", xy=(x + timedelta(abs((self.data.index[0] - self.data.index[-1]).days) / 50), y - (a[1]-a[0])/35), fontsize=5)\n ax.grid(True)\n fig.autofmt_xdate()\n ax.legend()\n return fig, ax", "def invoice(self,context,params):\n url = f\"https://api.freshbooks.com/accounting/account/{params['account_id']}/invoices/invoices/{params['invoice_id']}\"\n result = json.loads(util.rest(\"GET\", url, {}, context[\"headers\"][\"access_token\"]).text)\n invoice = result[\"response\"][\"result\"][\"invoice\"]\n invoice_obj = FreshbooksInvoice(\n account_id=invoice['accountid'],\n customerid=invoice['customerid'], \n invoice_id=invoice['invoiceid'],\n currency_code=invoice['currency_code'],\n language=invoice['language'],\n terms=invoice['terms'],\n discount_value=invoice['discount_value'],\n discount_amount=invoice['discount_total']['amount'],\n invoice_number=invoice['invoice_number'],\n po_number=invoice['po_number'],\n amount=invoice['amount']['amount'],\n code=invoice['amount']['code'],\n create_date=invoice['create_date']\n )\n return invoice_obj.__dict__", "def invoice_create_onaccept(form):\n\n # Get record ID\n form_vars = form.vars\n if \"id\" in form_vars:\n record_id = form_vars.id\n elif hasattr(form, \"record_id\"):\n record_id = form.record_id\n else:\n return\n\n # Look up the billing ID\n table = current.s3db.fin_voucher_invoice\n query = (table.id == record_id)\n invoice = current.db(query).select(table.billing_id,\n limitby = (0, 1),\n ).first()\n\n if invoice:\n # Assign the invoice\n from .helpers import assign_pending_invoices\n assign_pending_invoices(invoice.billing_id,\n invoice_id = record_id,\n )", "def action_move_create(self):\n res = super(HrExpenseExpense, self).action_move_create()\n for expense in self:\n if expense.invoice:\n partner = expense.invoice.partner_id.commercial_partner_id\n move_lines = expense.account_move_id.line_ids\n c_move_lines = move_lines.filtered(\n lambda x: x.partner_id == partner and\n x.debit == abs(round(expense.invoice.residual, 2)))\n c_move_lines |= expense.invoice.move_id.line_ids.filtered(\n lambda x: x.account_id == expense.invoice.account_id and\n x.credit == abs(round(expense.invoice.residual, 2)))\n if len(c_move_lines) != 2:\n raise exceptions.Warning(\n _('Cannot reconcile supplier invoice payable with '\n 'generated line. Please check amounts and see '\n 'if the invoice is already added or paid. '\n 'Invoice: %s') % expense.invoice.number)\n c_move_lines.reconcile()\n return res", "def create_now(self,cr,uid,ids,context=None):\n vals={}\n if context is None:\n context = {}\n trans_brw = self.browse(cr,uid,ids,context=context)\n so_l_obj = self.pool.get('sale.order.line')\n product_obj = self.pool.get('product.uom')\n if context['active_ids']:\n so_l_brw = so_l_obj.browse(cr,uid,context['active_ids'][0],context=context)\n sale_quantity = so_l_brw.product_uom_qty\n for i in trans_brw:\n for line in i.sale_order_line_id:\n quantity = line.quantity1\n diff = round(sale_quantity - quantity,4)\n if diff > 0:\n if line.length1 and line.heigth1:\n vals = {\n 'prod_lot_id':line.lot_id and line.lot_id.id,\n 'pieces':line.pieces_qty,\n 'product_uom_qty':quantity,\n }\n \n sale_quantity = diff\n current_move = so_l_obj.copy(cr, uid,context['active_ids'][0] , vals, context=context)\n \n if diff == 0 or diff < 0:\n vals = {\n 'prod_lot_id':line.lot_id and line.lot_id.id,\n 'pieces':line.pieces_qty,\n 'product_uom_qty':line.quantity1,\n }\n \n so_l_obj.write(cr, uid,context['active_ids'][0],vals)\n if diff > 0:\n if line.length1 and line.heigth1:\n pieces = product_obj._compute_pieces2(cr, uid,so_l_brw.product_id.stock_driver, diff, line.length1, line.heigth1, line.width1)\n vals = {\n 'prod_lot_id':False,\n 'pieces': pieces,\n 'product_uom_qty':diff,\n }\n so_l_obj.write(cr, uid,context['active_ids'][0],vals)\n \n return True", "def action_view_invoice_salon(self):\n return {\n 'name': 'Invoices',\n 'domain': [('invoice_origin', '=', self.name)],\n 'res_model': 'account.move',\n 'view_id': False,\n 'view_mode': 'tree,form',\n 'type': 'ir.actions.act_window',\n }", "def save(request):\n inv_num = request.POST[\"invoice_number\"]\n initial_data, data = process_request(request)\n tax_data = json.loads(request.POST[\"tax_data\"].replace(\"'\", \"\\\"\"))\n grand_total = request.POST[\"grand_total\"]\n\n Invoice.objects.create(number=inv_num,\n invoice_date=datetime.datetime.strptime(initial_data.get(\"invoice_date\"), \"%d %B, %Y\"),\n reference_number=initial_data.get(\"reference_number\"),\n reference_date=datetime.datetime.strptime(initial_data.get(\"reference_date\"), \"%d %B, %Y\"),\n addressed_to=initial_data.get(\"addressed_to\"),\n party_gst=initial_data.get(\"party_gst\"),\n created_at=datetime.datetime.now(),\n modified_at=datetime.datetime.now(),\n notes=tax_data.get(\"additional_notes\"),\n items=data,\n s_gst=tax_data.get(\"s_gst\"),\n c_gst=tax_data.get(\"c_gst\"),\n other_charges=tax_data.get(\"other_charges\"),\n total=grand_total\n ).save()\n\n return redirect(\"/invoice/print/\" + inv_num)", "def add_invoice(self, sys_department_id, contact_id=None, company_id=None, for_attention_of=None,\n payment_term=None, invoice_lines=None, draft_invoice=False, layout_id=None, date=None,\n po_number=None, direct_debit=False, comments=None, force_set_number=None, custom_fields=None):\n\n # get all arguments\n data = self._clean_input_to_dict(locals())\n\n # argument validation\n if contact_id is None and company_id is None:\n raise InvalidInputError(\"One of contact_id or company_id is required.\")\n\n if contact_id is not None and company_id is not None:\n raise InvalidInputError(\"Only one of contact_id or company_id is can be set.\")\n\n if payment_term is not None:\n if payment_term not in self._valid_payment_terms:\n raise InvalidInputError(\"Invalid contents of argument payment_term.\")\n\n invoice_lines = self._validate_type(invoice_lines, list)\n for line in invoice_lines:\n if not {'description', 'amount', 'vat', 'price'}.issubset(line.keys()):\n raise InvalidInputError(\"Fields description, amount, vat and price are required for each line.\")\n\n if line['vat'] not in ['00', '06', '12', '21', 'CM', 'EX', 'MC', 'VCMD']:\n raise InvalidInputError(\"Invalid contents of argument vat.\")\n\n if date is not None and type(date) != datetime.date:\n raise InvalidInputError(\"Invalid contents of argument date.\")\n\n custom_fields = self._validate_type(custom_fields, dict)\n\n # convert data elements that need conversion\n self._convert_custom_fields(data)\n\n if contact_id is not None:\n data['contact_or_company'] = 'contact'\n data['contact_or_company_id'] = data.pop('contact_id')\n else:\n data['contact_or_company'] = 'company'\n data['contact_or_company_id'] = data.pop('company_id')\n\n i = 1\n for line in invoice_lines:\n data['description_' + str(i)] = line['description']\n data['price_' + str(i)] = line['price']\n data['amount_' + str(i)] = line['amount']\n data['vat_' + str(i)] = line['vat']\n\n if 'product_id' in data:\n data['product_id_' + str(i)] = line['product_id']\n if 'account' in data:\n data['account_' + str(i)] = line['account']\n if 'subtitle' in data:\n data['subtitle_' + str(i)] = line['subtitle']\n\n i += 1\n\n del(data['invoice_lines'])\n\n if date is not None:\n data['date'] = data.pop('date').strftime('%d/%m/%Y')\n\n return self._request('addInvoice', data)", "def open_invoices(self):\n return {\n 'domain': \"[('id', 'in', \" + str(self.invoice_ids.ids) + \" )]\",\n 'name': 'Invoices',\n 'view_mode': 'tree,form',\n 'res_model': 'account.move',\n 'type': 'ir.actions.act_window',\n }", "def _prepare_invoice(self, invoice_type):\n return {\n 'partner_id': self.picking_id.partner_id.id,\n 'company_id': self.picking_id.company_id.id,\n 'type': invoice_type,\n 'name': _('Exchange Inv for %s') % self.picking_id.name,\n 'currency_id': self.env.user.company_id.currency_id.id,\n }", "def test_invoice_create(self):\n # first we create a customer\n id = self._create_model(\"customer\", self.customer_data, [\"name\", \"email\", \"phone\"])\n if id:\n # then we can create the invoice\n data = self.invoice_data\n data[\"customer_id\"] = id\n self._create_model(\"invoice\", data, [])\n self.assertIsNotNone(id)", "def button_fac_cob_ent(self):\n invoice = self._fac_ent()\n\n # pagar la factura\n # hacer configuracion para modificar esto\n receipt_obj = self.env['account.voucher.receiptbook']\n receipt = receipt_obj.search([('name', 'like', 'Recibos')], limit=1)\n\n journal = self.journal_id\n res = invoice.invoice_pay_customer()\n context = res['context']\n\n account_voucher_obj = self.env['account.voucher']\n voucher = account_voucher_obj.create({\n 'partner_id': context['default_partner_id'],\n 'journal_id': journal.id,\n 'account_id': journal.default_debit_account_id.id,\n 'type': context['type'],\n 'amount': context['default_amount'],\n 'net_amount': context['default_amount'],\n 'receiptbook_id': receipt.id,\n 'company_id': self.env.user.company_id.id\n })\n voucher.signal_workflow('proforma_voucher')\n\n account_move_line_obj = self.env['account.move.line']\n\n # obtener un recordser vacio\n lines2rec = account_move_line_obj.browse()\n\n # obtener las lineas a conciliar de facturas\n account_move_line = account_move_line_obj.search(\n [('document_number', '=', invoice.document_number)])\n for re in account_move_line:\n if re.account_id.reconcile:\n lines2rec += re\n\n # obtener las lineas a conciliar de pagos\n account_move_line = account_move_line_obj.search(\n [('document_number', '=', voucher.document_number)])\n for re in account_move_line:\n if re.account_id.reconcile:\n lines2rec += re\n\n period_obj = self.env['account.period']\n period = period_obj.find()\n\n # reconciliar las lineas de factura con pagos\n lines2rec.reconcile('manual',\n journal.default_debit_account_id.id, # writeoff_acc_id\n period.id, # writeoff_period_id,\n journal.id) # writeoff_journal_id)\n\n # imprime factura\n datas = {\n 'ids': invoice.ids,\n 'model': 'account.report_invoice',\n 'form': invoice.read()\n }\n return {\n 'type': 'ir.actions.report.xml',\n 'report_name': 'aeroo_report_ar_einvoice',\n 'datas': datas,\n }", "def onchange_invoice(self):\n self.product_id = False\n self.date = self.invoice.date_invoice\n self.name = (self.invoice and self.invoice.reference) or ''\n self.analytic_account_id = False\n self.unit_amount = self.invoice.residual\n self.quantity = 1\n self.total_amount = self.unit_amount", "def create_invoice(cls, payment_request: Tuple[Dict[str, Any]], authorization: Tuple[Dict[str, Any]]) -> Dict:\n # pylint: disable=too-many-locals, too-many-statements\n business_info = payment_request.get('businessInfo')\n filing_info = payment_request.get('filingInfo')\n account_info = payment_request.get('accountInfo', None)\n corp_type = business_info.get('corpType', None)\n business_identifier = business_info.get('businessIdentifier')\n\n payment_account = cls._find_payment_account(authorization)\n payment_method = _get_payment_method(payment_request, payment_account)\n current_app.logger.info(f'Creating Payment Request : '\n f'{payment_method}, {corp_type}, {business_identifier}, '\n f'{payment_account.auth_account_id}')\n\n bcol_account = cls._get_bcol_account(account_info, payment_account)\n\n # Calculate the fees\n fees = _calculate_fees(corp_type, filing_info)\n\n # Create payment system instance from factory\n pay_service: PaymentSystemService = PaymentSystemFactory.create(\n payment_method=payment_method,\n corp_type=corp_type,\n fees=sum(fee.total for fee in fees),\n account_info=account_info,\n payment_account=payment_account\n )\n current_app.logger.info(f'Created Pay System Instance : {pay_service}')\n\n pay_system_invoice: Dict[str, any] = None\n invoice: Invoice = None\n\n try:\n invoice = Invoice()\n invoice.bcol_account = bcol_account\n invoice.payment_account_id = payment_account.id\n invoice.cfs_account_id = payment_account.cfs_account_id\n invoice.invoice_status_code = pay_service.get_default_invoice_status()\n invoice.service_fees = sum(fee.service_fees for fee in fees) if fees else 0\n invoice.total = sum(fee.total for fee in fees) if fees else 0\n invoice.paid = 0\n invoice.refund = 0\n invoice.routing_slip = get_str_by_path(account_info, 'routingSlip')\n invoice.filing_id = filing_info.get('filingIdentifier', None)\n invoice.dat_number = get_str_by_path(account_info, 'datNumber')\n invoice.folio_number = filing_info.get('folioNumber', None)\n invoice.business_identifier = business_identifier\n invoice.payment_method_code = pay_service.get_payment_method_code()\n invoice.corp_type_code = corp_type\n details = payment_request.get('details')\n if not details or details == 'null':\n details = []\n invoice.details = details\n invoice = invoice.flush()\n\n line_items = []\n for fee in fees:\n line_items.append(PaymentLineItem.create(invoice.id, fee))\n\n current_app.logger.info(f'Handing off to payment system to create invoice for {invoice.id}')\n invoice_reference = pay_service.create_invoice(payment_account, line_items, invoice,\n corp_type_code=invoice.corp_type_code)\n\n invoice.commit()\n\n pay_service.complete_post_invoice(invoice, invoice_reference)\n\n invoice = Invoice.find_by_id(invoice.id, skip_auth_check=True)\n\n except Exception as e: # NOQA pylint: disable=broad-except\n current_app.logger.error('Rolling back as error occured!')\n current_app.logger.error(e)\n if invoice:\n invoice.rollback()\n if pay_system_invoice:\n pay_service.cancel_invoice(\n payment_account,\n pay_system_invoice.get('invoice_number'),\n )\n raise\n\n current_app.logger.debug('>Finished creating payment request')\n\n return invoice.asdict(include_dynamic_fields=True)", "def _prepare_invoice(self):\n self.ensure_one()\n result = super(SaleOrder, self)._prepare_invoice()\n result.update({\n 'cost_center_id': self.cost_center_id and self.cost_center_id.id or False\n })\n return result", "def test_invoice_detail(self):\n # first we create a customer\n id = self._create_model(\"customer\", self.customer_data, [\"name\", \"email\", \"phone\"])\n if id:\n # then we can create the invoice\n data = self.invoice_data\n data[\"customer_id\"] = id\n id_inv = self._create_model(\"invoice\", data, [])\n if id_inv:\n # then performing detail\n self._detail_model(\"invoice\", self.invoice_data, id, [])\n self.assertIsNotNone(id_inv)\n self.assertIsNotNone(id)", "def invoices(self, account_id):\n from pureport_client.commands.accounts.invoices import Command\n return Command(self.client, account_id)", "def create_purchase_requestion(self, cr, uid, ids, context=None):\n #TODO change the state of the purchase requestion to quotes and let the wizard in specefic state \n purchase_requestion_obj = self.pool.get('ireq.m')\n exchange = self.pool.get('exchange.order').browse(cr, uid, context['active_id'])\n requestion_lines_obj = self.pool.get('ireq.products')\n prod = self.pool.get('product.product')\n wf_service = netsvc.LocalService(\"workflow\")\n if exchange.purchase_requestion_id:\n raise osv.except_osv(_('Warning'), _('You allredy create a purchase requestion for this exchange order '))\n for wizard in self.browse(cr, uid, ids):\n requestion_id = purchase_requestion_obj.create(cr, uid, {'company_id': exchange.company_id.id,\n 'user': context['uid'],\n 'cat_id':exchange.category_id.id or False,\n 'ir_ref': exchange.name, \n 'department_id' : exchange.department_id.id,\n 'exchane_order_id':[(4, exchange.id)],})\n for wizard_lines in wizard.products_ids:\n product = prod.browse(cr, uid,wizard_lines.product_id.id)\n requestion_lines_obj.create(cr, uid, {'pr_rq_id':requestion_id,\n 'product_id': wizard_lines.product_id.id,\n 'name': product.name,\n 'product_qty': wizard_lines.product_qty,\n 'product_uom': product.uom_po_id.id, \n 'desc': wizard_lines.description,})\n \n exchange.write({'purchase_requestion_id':requestion_id , 'state' : 'wait_purchase' }) \n wf_service.trg_validate(uid, 'ireq.m', requestion_id, 'draft', cr)\n return requestion_id", "def _compute_amount(self):\n for line in self:\n price = line.price_unit * (1 - (line.discount or 0.0) / 100.0)\n new_price = price\n if line.lot_id and line.product_id.tracking in ['lot','serial']:\n lot_id = self.env['stock.production.lot'].search([('name', '=', line.lot_id), ('product_id', '=', line.product_id.id)])\n if lot_id.tax_ids.filtered(lambda tax: tax.amount_type == 'based_on_margin'):\n if lot_id.cost_price:\n new_price -= lot_id.cost_price\n sh_tax = line.tax_id.filtered(lambda tax: tax.amount_type =='based_on_margin').compute_all(new_price, line.order_id.currency_id, line.product_uom_qty, product=line.product_id, partner=line.order_id.partner_shipping_id)\n taxes = line.tax_id.filtered(lambda tax: tax.amount_type !='based_on_margin').compute_all(price, line.order_id.currency_id, line.product_uom_qty, product=line.product_id, partner=line.order_id.partner_shipping_id)\n print(taxes)\n line.update({\n 'price_tax': sum(t.get('amount', 0.0) for t in taxes.get('taxes', [])) + sum(t.get('amount', 0.0) for t in sh_tax.get('taxes', [])),\n 'price_total': taxes['total_included'],\n 'price_subtotal': taxes['total_excluded'],\n })\n if self.env.context.get('import_file', False) and not self.env.user.user_has_groups('account.group_account_manager'):\n line.tax_id.invalidate_cache(['invoice_repartition_line_ids'], [line.tax_id.id])", "def compute_counterpart_lines(self):\n for item in self:\n move_debit_lines = []\n move_credit_lines = []\n\n # list of all the move lines of the payment's move\n line_list = []\n for entry in item.journal_entry_ids:\n for line in entry.line_ids:\n if line.account_id.treasury_planning:\n line_list.append(line)\n\n # for each line above collect all the reconciled counterpart lines\n for line in line_list:\n if line.credit > 0 and line.debit == 0:\n for match in line.matched_debit_ids:\n move_debit_lines.append(match.debit_move_id.id)\n\n if line.credit == 0 and line.debit > 0:\n for match in line.matched_credit_ids:\n move_credit_lines.append(match.credit_move_id.id)\n\n if move_credit_lines:\n counterpart_move_ids = move_credit_lines\n else:\n counterpart_move_ids = move_debit_lines\n\n # bank move share is transformed to dictionary\n bank_move_dict = (ast.literal_eval(item.cf_share) if\n item.cf_share else {})\n\n # the share of each counterpart line is \"merged or added\"\n # in a weighted manner to the bank line share\n for cpt in counterpart_move_ids:\n dest_move_line = self.env['account.move.line'].browse(cpt)\n weight = round(dest_move_line.balance / item.amount, 2)\n # counterpart share is transformed into dictionary\n move_line_dict = ast.literal_eval(dest_move_line.cf_share)\n\n # each key is finally added to the bank line share\n for key, value in move_line_dict.iteritems():\n draft_dictionary = dictop.sum_dictionary(\n bank_move_dict.get(key, {}), 1,\n move_line_dict.get(key, {}), weight)\n bank_move_dict[key] = dictop.check_dict_total(\n draft_dictionary, 1)\n\n # the dictionary is transformed into string and assigned\n item.cf_share = json.dumps(bank_move_dict)", "def from_invoice_and_line_item(cls, invoice: InvoiceModel, line_item: LineItemModel, line_number: int,\n distribution: str):\n # Note the invoice_date should be the payment_date in the future.\n return cls(total=line_item.total, invoice_number=invoice.id,\n line_number=line_number,\n is_reversal=invoice.invoice_status_code in\n [InvoiceStatus.REFUNDED.value, InvoiceStatus.REFUND_REQUESTED.value],\n distribution=distribution)", "def order_process(self, customerID, list_stockCode, list_quantity\\\n , orderDate=None):\n\n segmentID = -1\n\n #-------------------------------------------------------------------------\n # A new customer is created and inserted into data-set.\n #-------------------------------------------------------------------------\n if customerID is None:\n customerID = int(self.createCustomerID())\n else:\n pass\n \n #-------------------------------------------------------------------------\n # A new dataframe with new invoice lines are created.\n #-------------------------------------------------------------------------\n df_invoice_line = self.create_customer_df_invoice_line(customerID\\\n , list_stockCode, list_quantity, orderDate)\n \n #-------------------------------------------------------------------------\n # Original dataframe is updated with customer invoices lines.\n #-------------------------------------------------------------------------\n print(\"order_process : shape before concat= \"+str(self._df_invoice_original.shape))\n self._df_invoice_original \\\n = pd.concat([self._df_invoice_original, df_invoice_line], axis=0)\n print(\"order_process : shape after concat= \"+str(self._df_invoice_original.shape))\n \n #-------------------------------------------------------------------------\n # All invoices lines (including new one) related to customer is retrieved \n # from original dataframe.\n #-------------------------------------------------------------------------\n df_invoice_line_customer \\\n = self.get_customer_history_df_invoice_line(customerID)\n\n #-------------------------------------------------------------------------\n # When calling get_customer_marketSegment(), df_invoice_line_customer is\n # concatened to the original dataframe.\n #------------------------------------------------------------------------- \n segmentID = self.get_customer_marketSegment(df_invoice_line_customer)\n \n return segmentID, customerID", "def write(self, cr, uid, ids, vals, context=None):\n new_accounts = {}\n deleted = []\n if 'line_id' in vals:\n for val in vals['line_id']:\n #the line changed\n if val[2]:\n if 'analytic_account_id' in val[2] or 'account_id' in val[2]:\n new_accounts[val[1]] = val[2]\n if val[0] == 2:\n #for delete case\n deleted.append(val[1])\n budget_line_obj = self.pool.get('account.budget.lines')\n\n analytic_obj = self.pool.get('account.analytic.account')\n account_obj = self.pool.get('account.account')\n for acc_move in self.browse(cr, uid, ids, context=context):\n for line in acc_move.line_id:\n account_id = line.account_id.id\n analytic_account_id = line.analytic_account_id.id\n budget = line.analytic_account_id.budget\n analytic_required = line.account_id.user_type.analytic_required \n if line.id in deleted:\n continue\n if line.id in new_accounts:\n if 'analytic_account_id' in new_accounts[line.id]:\n if new_accounts[line.id]['analytic_account_id']:\n analytic_account_id = new_accounts[line.id]['analytic_account_id']\n analytic_account = analytic_obj.browse(cr,uid,analytic_account_id,context=context)\n budget = analytic_account.budget\n else:\n #empty analytic account entered\n budget = analytic_account_id = False\n\n if 'account_id' in new_accounts[line.id]:\n account_id = new_accounts[line.id]['account_id']\n account_rec = account_obj.browse(cr,uid,account_id,context=context)\n analytic_required = account_rec.user_type.analytic_required\n line_ids = budget_line_obj.search(cr, uid, [('general_account_id','=',account_id),\n ('analytic_account_id', '=', analytic_account_id),\n ('period_id', '=', line.period_id.id)],\n context=context)\n if not analytic_account_id and analytic_required:\n raise orm.except_orm(_('Warning!'), _('Analytic Account Required!'))\n\n if not line_ids and budget:\n raise orm.except_orm(_('Warning!'), _('This account has noo budget!'))\n budget_line_vals = (vals.get('state','') in ['completed','closed','posted'] and \\\n {'move_line_ids':[(1,line.id,{'budget_line_id':line_ids and line_ids[0]})]}) or \\\n (line.budget_line_id and {'move_line_ids':[(3,line.id)]}) or {}\n budget_line_obj.write(cr, uid, line_ids and line_ids[0] or [], budget_line_vals,context=context)\n return super(account_move,self).write(cr, uid, ids, vals, context=context)", "def add_furniture(invoice_file, customer_name, item_code, item_description,\n item_monthly_price):\n add_line = [customer_name, item_code, item_description, item_monthly_price]\n with open(invoice_file, \"a+\", newline=\"\") as file:\n file_write = csv.writer(file)\n file_write.writerow(add_line)", "def test_add_data():\n add_furniture(\"invoice_file.csv\", \"Elisa Miles\", \"LR04\", \"Leather Sofa\", 25.00)\n add_furniture(\"invoice_file.csv\", \"Edward Data\", \"KT78\", \"Kitchen Table\", 10.00)\n add_furniture(\"invoice_file.csv\", \"Alex Gonzales\", \"BR02\", \"Queen Mattress\", 17.00)", "def operation_invoices(self):\r\n for operation in self:\r\n invoices = self.env['account.invoice'].search([\r\n ('freight_hbl', '=', operation.id),\r\n ('type', 'in', ['out_invoice', 'out_refund']),\r\n ('state', '!=', 'cancel'),\r\n ])\r\n action = self.env.ref('account.action_invoice_tree1').read()[0]\r\n if len(invoices) > 1:\r\n action['domain'] = [('id', 'in', invoices.ids)]\r\n elif len(invoices) == 1:\r\n action['views'] = [(self.env.ref('account.invoice_form').id, 'form')]\r\n action['res_id'] = invoices.ids[0]\r\n else:\r\n action = {'type': 'ir.actions.act_window_close'}\r\n return action", "def create(self, values):\n res = super(PurchaseOrderLine, self).create(values)\n states = ['purchase', 'done']\n if res.order_id.state in states:\n raise UserError(_('You can not create an additional purchase order line in a confirmed order '))\n return res", "def create_sale_order_line_vals_amazon(self,order_line,qty_price_dict,tax_id,amazon_product=False,odoo_product=False,amazon_order=False,instance=False,title=False):\n sale_order_line = self.env['sale.order.line']\n# new_record=self.env['sale.order.line'].new({'order_id':amazon_order.id,\n# 'company_id':amazon_order.company_id.id,\n# 'product_id':amazon_product and amazon_product.product_id.id or odoo_product and odoo_product.id or False,\n# 'product_uom':amazon_product and amazon_product.product_tmpl_id.uom_id or odoo_product and odoo_product.product_tmpl_id.uom_id,\n# 'name':title\n# })\n# new_record.product_id_change()\n# order_vals=new_record._convert_to_write({name: new_record[name] for name in new_record._cache}) \n# \n# order_qty=qty_price_dict.get('order_qty')\n# order_vals.update({\n# 'product_uom_qty' : order_qty,\n# 'amazon_order_qty':order_line.get('QuantityOrdered',{}).get('value',0.0),\n# 'price_unit' : qty_price_dict.get('amount_per_unit'),\n# 'customer_lead' :amazon_product and amazon_product.sale_delay or False,\n# 'invoice_status' : False,\n# 'state' : 'draft',\n# 'amazon_order_item_id':order_line.get('OrderItemId',{}).get('value'),\n# 'discount':0.0,\n# 'amazon_product_id':amazon_product and amazon_product.id or False,\n# 'product_uom':new_record.product_uom.id,\n# 'producturl':\"%s%s\"%(instance.producturl_prefix or '',order_line.getvalue(\"ASIN\", \"value\"))\n# }) \n\n vals = ({\n 'order_id':amazon_order.id,\n 'product_id':amazon_product and amazon_product.product_id.id or odoo_product and odoo_product.id or False,\n 'company_id':amazon_order.company_id.id,\n 'description':title,\n 'order_qty':qty_price_dict.get('order_qty'),\n 'price_unit':qty_price_dict.get('amount_per_unit'),\n 'discount':0.0,\n 'product_uom':amazon_product and amazon_product.product_tmpl_id.uom_id or odoo_product and odoo_product.product_tmpl_id.uom_id\n }) \n order_vals = sale_order_line.create_sale_order_line_ept(vals)\n \n order_vals.update({\n 'amazon_order_qty':order_line.get('QuantityOrdered',{}).get('value',0.0),\n 'customer_lead' :amazon_product and amazon_product.sale_delay or False,\n 'invoice_status' : False,\n 'amazon_order_item_id':order_line.get('OrderItemId',{}).get('value'),\n 'amazon_product_id':amazon_product and amazon_product.id or False,\n 'producturl':\"%s%s\"%(instance.producturl_prefix or '',order_line.getvalue(\"ASIN\", \"value\"))\n })\n return order_vals", "def _get_account_analytic_invoice(self, cursor, user, picking, move_line):\n if move_line.purchase_line_id:\n return move_line.purchase_line_id.order_id.account_analytic_id.id\n return super(stock_picking, self)._get_account_analytic_invoice(cursor, user, picking, move_line)", "def invoice(self, id):\r\n return Invoice(self, id)", "def parse_from_event(cls, payload):\n data = payload['data']['object']\n plan_info = data['lines']['data'][0]['plan']\n\n period_start_on = datetime.datetime.utcfromtimestamp(\n data['lines']['data'][0]['period']['start']).date()\n period_end_on = datetime.datetime.utcfromtimestamp(\n data['lines']['data'][0]['period']['end']).date()\n\n invoice = {\n 'payment_id': data['customer'],\n 'plan': plan_info['name'],\n 'receipt_number': data['receipt_number'],\n 'description': plan_info['statement_descriptor'],\n 'period_start_on': period_start_on,\n 'period_end_on': period_end_on,\n 'currency': data['currency'],\n 'tax': data['tax'],\n 'tax_percent': data['tax_percent'],\n 'total': data['total']\n }\n\n return invoice", "def _create_nsf_invoice(cls, cfs_account: CfsAccountModel, rs_number: str,\n payment_account: PaymentAccountModel) -> InvoiceModel:\n fee_schedule: FeeScheduleModel = FeeScheduleModel.find_by_filing_type_and_corp_type(corp_type_code='BCR',\n filing_type_code='NSF')\n invoice = InvoiceModel(\n bcol_account=payment_account.bcol_account,\n payment_account_id=payment_account.id,\n cfs_account_id=cfs_account.id,\n invoice_status_code=InvoiceStatus.CREATED.value,\n total=fee_schedule.fee.amount,\n service_fees=0,\n paid=0,\n payment_method_code=PaymentMethod.INTERNAL.value,\n corp_type_code='BCR',\n created_on=datetime.now(),\n created_by='SYSTEM',\n routing_slip=rs_number\n )\n invoice = invoice.save()\n distribution: DistributionCodeModel = DistributionCodeModel.find_by_active_for_fee_schedule(\n fee_schedule.fee_schedule_id)\n\n line_item = PaymentLineItemModel(\n invoice_id=invoice.id,\n total=invoice.total,\n fee_schedule_id=fee_schedule.fee_schedule_id,\n description=fee_schedule.filing_type.description,\n filing_fees=invoice.total,\n gst=0,\n priority_fees=0,\n pst=0,\n future_effective_fees=0,\n line_item_status_code=LineItemStatus.ACTIVE.value,\n service_fees=0,\n fee_distribution_id=distribution.distribution_code_id)\n line_item.save()\n\n invoice_response = CFSService.create_account_invoice(transaction_number=invoice.id,\n line_items=invoice.payment_line_items,\n cfs_account=cfs_account)\n\n invoice_number = invoice_response.get('invoice_number', None)\n current_app.logger.info(f'invoice_number {invoice_number} created in CFS for NSF.')\n\n InvoiceReferenceModel(\n invoice_id=invoice.id,\n invoice_number=invoice_number,\n reference_number=invoice_response.get('pbc_ref_number', None),\n status_code=InvoiceReferenceStatus.ACTIVE.value\n ).save()\n\n return invoice", "def invoice():\n name = raw_input(\"What is your name? \")\n\n subtotal = sub_total()\n discount = discount_card(subtotal)\n iva = tax(subtotal, discount)\n total = total_final(subtotal, discount, iva)\n\n reset()\n print \"---------------INVOICE---------------\"\n print \"\"\n print \" DESPENSA FAMILIAR \\n\"\n print \"%s\" % name\n print \"\"\n count_products(SAVE_EXISTENT)\n print \"\\nThe subtotal is:----------- Q%.2f\" % subtotal\n print \"The discount is:----------- Q%.2f\" % discount\n print \"The tax is:---------------- Q%.2f\" % iva\n print \"The total to pay is:------- Q%.2f\" % total\n print \"-------------------------------------\"\n print \"\\n\\n---Thank you for shopping with us---\"", "def draw_header(canvas, invoice):\n\n canvas.setLineWidth(2)\n canvas.line(2 * cm, -4 * cm, 19 * cm, -4 * cm)\n \"\"\" Draws the business address \"\"\"\n business_details = settings.BUSINESS_DETAIL\n business_data = []\n for line in business_details:\n business_data.append([line])\n\n table = Table(business_data, colWidths=[17 * cm], rowHeights=[15, 17, 11, 11, 11, 11, 11])\n table.setStyle([\n ('FONT', (0, 0), (-1, -1), 'Helvetica-Oblique'),\n ('FONTSIZE', (0, 0), (0, 0), 14),\n ('FONTSIZE', (0, 1), (0, -1), 6),\n ('TEXTCOLOR', (0, 0), (-1, -1), (0.2, 0.2, 0.2)),\n ('ALIGN', (0, 0), (-1, -1), 'CENTER'),\n ('BACKGROUND', (0, 0), (-1, -1), (0.95, 0.95,0.95)),\n ])\n tw, th, = table.wrapOn(canvas, 2 * cm, 19 * cm)\n table.drawOn(canvas, 2 * cm, -4 * cm)", "def _generate_valuation_lines_data(self, partner_id, qty, debit_value, credit_value, debit_account_id, credit_account_id, description):\n self.ensure_one()\n\n rslt = super(StockMove, self)._generate_valuation_lines_data(partner_id, qty, debit_value, credit_value, debit_account_id, credit_account_id, description)\n if self.purchase_line_id:\n purchase_currency = self.purchase_line_id.currency_id\n if purchase_currency != self.company_id.currency_id:\n # Do not use price_unit since we want the price tax excluded. And by the way, qty\n # is in the UOM of the product, not the UOM of the PO line.\n purchase_price_unit = (\n self.purchase_line_id.price_subtotal / self.purchase_line_id.product_uom_qty\n if self.purchase_line_id.product_uom_qty\n else self.purchase_line_id.price_unit\n )\n currency_move_valuation = purchase_currency.round(purchase_price_unit * abs(qty))\n rslt['credit_line_vals']['amount_currency'] = rslt['credit_line_vals']['credit'] and -currency_move_valuation or currency_move_valuation\n rslt['credit_line_vals']['currency_id'] = purchase_currency.id\n rslt['debit_line_vals']['amount_currency'] = rslt['debit_line_vals']['credit'] and -currency_move_valuation or currency_move_valuation\n rslt['debit_line_vals']['currency_id'] = purchase_currency.id\n return rslt" ]
[ "0.74461937", "0.7406681", "0.70905787", "0.7032683", "0.69090813", "0.68788975", "0.6871585", "0.68584", "0.68474764", "0.68273807", "0.6741477", "0.6672866", "0.6667218", "0.65647554", "0.65585667", "0.63950473", "0.6327984", "0.6303532", "0.6284394", "0.6256989", "0.62379223", "0.62132984", "0.62061334", "0.6090614", "0.6083835", "0.60662013", "0.6016566", "0.59997463", "0.5963619", "0.5958643", "0.59154123", "0.589272", "0.5887478", "0.5877128", "0.586769", "0.5847822", "0.5827206", "0.5810258", "0.57978594", "0.57926", "0.57820797", "0.5778369", "0.5775706", "0.57527995", "0.5736331", "0.57186526", "0.5711705", "0.5703028", "0.5702504", "0.5658799", "0.5647173", "0.5637283", "0.56330746", "0.56208605", "0.5618934", "0.56089735", "0.5555684", "0.5542955", "0.5540119", "0.5520819", "0.55034244", "0.54773724", "0.54405373", "0.5434496", "0.5431594", "0.5420106", "0.54182243", "0.5412404", "0.54088235", "0.54085004", "0.5396823", "0.5389461", "0.53857064", "0.53834116", "0.5374948", "0.53743315", "0.53734845", "0.5355862", "0.5354653", "0.5329527", "0.53196967", "0.5309286", "0.5308704", "0.53066486", "0.5294421", "0.528147", "0.5276226", "0.5274245", "0.52689797", "0.5267762", "0.5254564", "0.5248344", "0.52458745", "0.52410084", "0.52386", "0.5238116", "0.5233569", "0.5229482", "0.5224859", "0.521261" ]
0.65702105
13
Compute the mean absolute error on test set given X, y, and model parameter w.
def mean_absolute_error(w, X, y): ##################################################### # TODO 1: Fill in your code here # ##################################################### err = None temp = np.dot(X, w) err = np.mean(np.abs(_error(y, temp))) return err
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mean_absolute_error(w, X, y):\n #####################################################\n # TODO 1: Fill in your code here #\n #####################################################\n if w is None:\n return None\n\n err = None\n yhat = np.dot(X , w)\n err = np.abs(np.subtract(yhat,y)).mean()\n return err", "def mean_absolute_error(y, y_pred, w):\n return np.average(np.abs(y_pred - y), weights=w)", "def evaluate_mean_tst_loss(model, X_test, y_test):\n\ttest_losses = []\n\tfor j in range(len(X_test)):\n\t\tsingle_mol_as_array = np.array(X_test[j:j+1])\n\t\tsingle_y_as_array = np.reshape(y_test[j], (1, -1))\n\t\tsloss = model.test_on_batch(single_mol_as_array, single_y_as_array)\n\t\ttest_losses.append(sloss)\n\n\tmean_test_loss = np.mean(test_losses)\n\treturn mean_test_loss", "def mean_square_error(y, y_pred, w):\n return np.average(((y_pred - y) ** 2), weights=w)", "def mean_absolute_error(y_true, y_pred, *, sample_weight=..., multioutput=...):\n ...", "def mse(datax,datay,w):\n return np.mean((datax.dot(w.T)-datay)**2)", "def mae(t, y):\n\treturn mean_absolute_error(t, y)", "def root_mean_square_error(y, y_pred, w):\n return np.sqrt(np.average(((y_pred - y) ** 2), weights=w))", "def calculate_mean_squared_error(self, X, y):\n mserror = 0\n results = self.predict(X)\n \n mserror = mean_squared_error(results,y)\n #print(error)\n return mserror", "def MeanSquaredError(y_data, y_model):\n\tn = np.size(y_model)\n\tMSE = (1/n)*np.sum((y_data-y_model)**2)\n\n\treturn MSE", "def mae(self, x_train, y_train):\n # number of training examples\n m = x_train.shape[0]\n error = 0\n for pair, r in zip(x_train, y_train):\n u, i = pair\n error += abs(r - np.dot(self.P[u], self.Q[i]))\n return error / m", "def mae(y_true: np.ndarray, y_pred: np.ndarray):\n return np.mean(np.abs(y_true - y_pred))", "def calculate_mean_squared_error(self, X, y):\r\n mean_squared_error = np.square(np.subtract(y,self.predict(X))).mean()\r\n return mean_squared_error", "def mse(actual,expected):\n return np.mean(se(actual,expected))", "def relative_mean_absolute_error(y_true, y_pred, sample_weight=None):\n diff = np.abs(y_pred - y_true) / np.abs(y_true)\n rmae = np.average(diff, weights=sample_weight, axis=0)\n return rmae", "def eval_regression_model(model: NeuralNetwork, X_test: np.ndarray, y_test: np.ndarray):\n preds = model.forward(X_test)\n preds = preds.reshape(-1, 1)\n print(\"Mean absolute error: {:.2f}\".format(mae(preds, y_test)))\n print()\n print(\"Root mean squared error {:.2f}\".format(rmse(preds, y_test)))", "def mean_absolute_percentage_error(y_true, y_pred, sample_weight=..., multioutput=...):\n ...", "def compute_mean_squared_error(self, X_data, y_data):\n #assert isinstance(X_data, np.ndarray)\n #assert isinstance(y_data, np.ndarray)\n #assert X_data.shape[0] == y_data.shape[0]\n \n return np.square(np.subtract(X_data, y_data)).mean()", "def mae(y_true, y_pred):\n return K.mean(K.abs(y_true - y_pred))", "def mean_absolute_error(self):\n print('Mean absolute error regression loss: ' + str(mean_absolute_error(self.model.dataset.get_y_test(),\n self.model.get_predicted())))", "def modelmean(self, model_params, this_data, this_suff_stat):\n pass", "def mse(y_true: np.ndarray, y_pred: np.ndarray) -> float:\n return np.mean(np.power(y_true - y_pred, 2))", "def mean_baseline(d, mode='test'):\n m = d.trainY.mean()\n y = getattr(d, mode + \"Y\")\n preds = np.array([m] * y.shape[0])\n return (get_mse(d, preds, mode), get_mae(d, preds, mode),\n get_mape(d, preds, mode))", "def mean_squared_error(y_true, y_pred, *, sample_weight=..., multioutput=..., squared=...):\n ...", "def score(self, test_index):\n y_pred = self.predict(test_index)\n mae = mean_absolute_error(self._y[test_index], y_pred)\n return mae", "def score(self, test_index):\n y_pred = self.predict(test_index)\n mae = mean_absolute_error(self._y[test_index], y_pred)\n return mae", "def RMSE(X, y, w):\r\n n = X.shape[0]\r\n f = X @ w\r\n J = np.sqrt(np.sum(np.power((y - f), 2)) / n)\r\n return J", "def avg_abs_err(fitx, std0, target_cov, n):\n err = np.empty(n)\n for i in range(n):\n err[i] = get_abs_err(fitx, std0, target_cov)\n print(fitx, err.mean())\n return err.mean()", "def mean_baseline(self):\n train_mean = np.mean(self.data.loc[self.train_index, self.target_name])\n rmse = np.sqrt(\n np.mean(np.square(self.data.loc[self.test_index, self.target_name] - train_mean)))\n print 'mean baseline RMSE: {}'.format(rmse)", "def error_calculation_test(self):\n dataOrg = [[1,1], [2,2], [3,3], [4,4], [5,5], [6,6], [7,8], [7.3, 5], [8, 0], [9,10]]\n dataCalc = [[1,3], [2,5], [3,0], [4,3], [5,5], [6.1,6], [7,3], [7.3, 5], [8, 0], [9,9]]\n # abs difference: 2 3 3 1 0 NA 5 0 NA 1\n # local errors: 200 150 200 50 0 NA 125 0 NA 20\n # sum: 745\n\n tsOrg = TimeSeries.from_twodim_list(dataOrg)\n tsCalc = TimeSeries.from_twodim_list(dataCalc)\n\n wmape = WeightedMeanAbsolutePercentageError()\n wmape.initialize(tsOrg, tsCalc)\n assert str(wmape.get_error())[:6] == \"93.125\"", "def mse(x, y):\n\n return (x - y).pow(2).sum(dim=1, keepdim=True).mean() / x.size(1)", "def MSE(X, y, w):\r\n n = X.shape[0]\r\n f = X @ w\r\n J = np.sum(np.power((y - f), 2)) / n\r\n return J", "def compute_rmse(y, tx, w):\n return np.sqrt(2*compute_mse(y,tx,w))", "def compute_rmse(y, tx, w):\n return np.sqrt(2 * compute_mse(y, tx, w))", "def mean_absolute_error(y_true, y_pred):\n \n # initialize error at 0\n error = 0\n \n # loop over all samples in the true and predicted list\n for yt, yp in zip(y_true, y_pred):\n # calculate absolute error\n # and add to error\n error += np.abs(yt - yp)\n # return mean error\n return error / len(y_true)", "def linear_error(X, y, w):\n\n return np.where(y != np.sign(np.dot(X, w)), 1.0, 0.0).mean()", "def mean_squared_error(y_true, y_pred):\n mse = np.mean(np.power(y_true - y_pred, 2))\n return mse", "def objective_function(self, y_true, y_predicted, X=None, sample_weight=None):\n return metrics.mean_absolute_error(\n y_true, y_predicted, sample_weight=sample_weight\n )", "def mean_absolute_error(predictions, ratings_test):\n return np.abs(predictions - np.array(ratings_test.rating)).mean()", "def mae(x_pred, x_target, dim=0):\n if dim == 0:\n return x_pred.sub(x_target).abs().mean().item()\n elif dim == 1:\n return x_pred.sub(x_target).abs().mean((0,1))\n elif dim == 2:\n return x_pred.sub(x_target).abs().mean((0,2))\n else:\n raise ValueError(\"Not a valid dimension\")", "def test_avg_loss(model, dataset):\n _opt = optim.Adadelta(model.parameters(), lr=1)\n opt = BaseDamper(model, dataset, _opt)\n for epoch in range(1, 16 + 1):\n model, opt, meta, _ = experiment.train(model, opt)\n loss = [\n {\"loss\": opt._get_loss(frac=frac), \"frac\": frac, \"repeat\": repeat}\n for frac in np.linspace(0.5, 0.99, num=5)\n for repeat in range(5)\n ]\n total_loss = opt._get_loss(frac=1)\n df = pd.DataFrame(loss)\n summary = df.pivot(index=\"frac\", columns=\"repeat\", values=\"loss\")\n\n abs_error = np.abs(df.loss - total_loss)\n rel_error = abs_error / total_loss\n assert rel_error.max() <= 0.125\n assert np.percentile(rel_error, 50) <= 0.12\n assert 1.5 <= total_loss <= 2.2\n assert abs_error.max() <= 0.17", "def find_mse(model, samples, y_true):\n mu, covar = model.predict_noiseless(samples, full_cov=True)\n return mean_squared_error(y_true, mu)", "def mean_squared_error(y_true, y_pred):\n\tmse = np.mean(np.power(y_true, y_pred, 2))\n\treturn mse", "def mse(self, y):\n return T.mean((self.y_pred - y) ** 2)", "def mean_absolute_error(y_real, y_pred):\n y_real, y_pred = check_arrays(y_real, y_pred)\n \n dim = y_real.shape[0]\n\n solution = np.zeros(dim)\n solution = solution.astype(np.float32)\n \n mod = SourceModule(\"\"\"\n #include <math.h>\n \n __global__ void rmse(int *x, int *y, float *solution, int dim) {\n int idx = threadIdx.x;\n solution[idx] = abs(x[idx*2] - y[idx*2]);\n }\n \"\"\")\n\n func = mod.get_function('rmse')\n func(drv.In(y_real), drv.In(y_pred), drv.Out(solution), np.int32(dim), block=(dim, 1, 1))\n\n return np.sum(solution) / y_real.size", "def errors(self, y):\n return T.mean(T.neq(self.y_pred, y))", "def mean_squared_error(y_true, y_pred):\n mse = np.mean((y_true - y_pred)**2)\n return mse", "def mean_squared_log_error(y_true, y_pred, *, sample_weight=..., multioutput=...):\n ...", "def mse(X, Y, W):\n\n # TODO\n mse = np.sum((X@W-Y)**2)/(2*X.shape[0])\n # END TODO\n\n return mse", "def rmse(y_true: np.ndarray, y_pred: np.ndarray):\n return np.sqrt(np.mean(np.power(y_true - y_pred, 2)))", "def normalized_mean_absolute_error(y_real, y_pred, max_rating, min_rating):\n y_real, y_pred = check_arrays(y_real, y_pred)\n mae = mean_absolute_error(y_real, y_pred)\n return mae / (max_rating - min_rating)", "def error(self, y_predicted, y):\n errors = []\n for i in range(y.size): \n errors.append((y[i]-y_predicted[i]) ** 2)\n return mean(errors)", "def mse(y_pred, y):\n return np.mean((y - y_pred)**2)", "def compute_loss_mse(y, tx, w):\n e = y - tx.dot(w)\n\n return e.dot(e.T)/(2*len(e))", "def calc_errors(y_panel_df, y_insample_df, seasonality, benchmark_model='Naive2'):\n\n assert benchmark_model in y_panel_df.columns\n\n y_panel = y_panel_df[['unique_id', 'ds', 'y']]\n y_hat_panel_fun = lambda model_name: y_panel_df[['unique_id', 'ds', model_name]].rename(columns={model_name: 'y_hat'})\n\n model_names = set(y_panel_df.columns) - set(y_panel.columns)\n\n errors_smape = y_panel[['unique_id']].drop_duplicates().reset_index(drop=True)\n errors_mase = errors_smape.copy()\n\n for model_name in model_names:\n errors_smape[model_name] = None\n errors_mase[model_name] = None\n y_hat_panel = y_hat_panel_fun(model_name)\n\n errors_smape[model_name] = evaluate_panel(y_panel, y_hat_panel, smape)\n errors_mase[model_name] = evaluate_panel(y_panel, y_hat_panel, mase, y_insample_df, seasonality)\n\n mean_smape_benchmark = errors_smape[benchmark_model].mean()\n mean_mase_benchmark = errors_mase[benchmark_model].mean()\n\n errors_smape = errors_smape.drop(columns=benchmark_model).set_index('unique_id')\n errors_mase = errors_mase.drop(columns=benchmark_model).set_index('unique_id')\n\n errors = errors_smape/mean_mase_benchmark + errors_mase/mean_smape_benchmark\n errors = 0.5*errors\n errors = errors\n\n return errors", "def calc_rmse(y: np.ndarray, y_hat: np.ndarray) -> float:\n return np.sqrt(np.mean((y - y_hat)**2))", "def get_mse(data, yhat, mode=\"train\"):\n y = getattr(data, mode + \"Yref\")\n mse = mean_squared_error(y, data.revert(yhat, mode))\n return np.sqrt(mse)", "def rmse(self):\n return (self.model_error()**2).mean()**.5", "def compute_loss_mse(y, tx, w):\n y = np.array([y]).T.reshape([len(y), 1])\n w = np.array([w]).T.reshape([len(w), 1])\n \n e = y - np.matmul(tx,w) \n \n return (1/(2 * y.shape[0])) * (e.T.dot(e))", "def errors(self, y):\n return T.mean(T.neq(self.y_pred, y),\n dtype=theano.config.floatX,\n acc_dtype=theano.config.floatX)", "def mrae(self, benchmark: np.ndarray = None):\n return float(np.mean(np.abs(self._relative_error(benchmark))))", "def felm_rmse(y, X, weights, y_test, X_test):\n # Fit model and get predicted values of test data\n mod = sm.WLS(y, X, weights=weights).fit()\n pred = mod.predict(X_test)\n\n #Get residuals from test data\n res = (y_test[:] - pred.values)\n\n # Calculate ttest to check that residuals from test and train are independent\n t_stat, p_val = stats.ttest_ind(mod.resid, res, equal_var=False)\n\n # Return RMSE and t-stat from ttest\n return (np.sqrt(np.mean(res**2)), t_stat)", "def MSE(actual, noisy):\n mean_squared_error(actual, noisy)", "def mean_squared_error(self, y_out, y):\n loss = np.sum((y - y_out)**2)\n return loss", "def compute_mse(y, tx, w):\n e = y[:, np.newaxis] - tx @ w\n return (e * e).sum() / (2.0 * len(y))", "def calc_mse(data, ax=0):\n return ((data[:, 0] - data[:, 1]) ** 2).mean(axis=ax)", "def calc_mae(y: np.ndarray, y_hat: np.ndarray) -> float:\n return np.mean(np.abs(y - y_hat))", "def avgX(self):\n return np.mean(self.getx())", "def _calc_MSE_loss(self, y_hat, y):\n return np.mean((y_hat-y)**2)", "def mean_squared_error(self):\n print('Mean squared error regression loss: ' + str(mean_squared_error(self.model.dataset.get_y_test(),\n self.model.get_predicted())))", "def test(model, X_test, y_test):\n pred, loss = model(X_test, y_test)\n test_pred = np.argmax(pred, axis=1) \n acc = np.mean(np.argwhere(y_test==1)[:,1]==test_pred) \n\n print(\"Test acc is:\\n\", acc) \n return test\n raise NotImplementedError(\"Test method not implemented\")", "def compute_mse_loss(y, tx, w):\n e = y - np.dot(tx, w)\n N = e.size\n return (1./(2*N)) * np.sum(np.square(e))", "def _ave(self):\n return np.asarray(np.mean(self.model_estim.x, axis=0)).flatten()", "def get_mean_squared_error(self):\n return self.mean_squared_error", "def check_model(X, w, y, thr = 0.9):\n assert np.mean((y > 0) == (X @ w > 0)) > thr, \"model accuracy\"", "def _ave(self):\n\n return np.asarray(np.mean(self.model_estim.x, axis=0)).flatten()", "def evaluate_model(model, X_test, Y_test): \n #Make predictions with the model\n Y_pred = model.predict(X_test)\n #convert numpy output to dataframe and add columns\n Y_pred_df = pd.DataFrame(Y_pred)\n Y_pred_df.columns = Y_test.columns\n #Convert predictions and correct y values to float for faciliate comparison\n Y_pred_df = Y_pred_df.astype('float64')\n Y_test = Y_test.astype('float64')\n print_score(Y_test, Y_pred_df, 'weighted avg')", "def mae_loss(model: tf.keras.Model,\n model_input: tf.Tensor,\n model_target: tf.Tensor\n ):\n _y = model(model_input)\n _reduction_string = \"weighted_sum_over_batch_size\"\n return tf.losses.absolute_difference(labels=model_target,\n predictions=_y,\n reduction=_reduction_string\n )", "def test_mean_loss(model: nn.Module,\n objective: Callable,\n test_subset: Union[Dataset, Subset],\n statistics: Dict,\n config: Dict,\n device: str,\n **kwargs):\n model.eval()\n test_loader = DataLoader(test_subset, batch_size=int(config[\"batch_size\"]), shuffle=True)\n b_loss = 0\n n_datapoints = 0\n batches = tqdm.tqdm(test_loader, unit='batch', file=sys.stdout, ascii=False)\n batches.set_description('\\t\\tTesting ')\n with torch.no_grad():\n for i, batch in enumerate(batches):\n batch = batch.to(device)\n loss = objective(model, batch)\n b_loss += loss.item()\n n_datapoints += len(batch)\n batches.set_postfix_str(f'loss={loss.item() / len(batch)}' if i < len(batches) - 1 else f'ave_loss={b_loss / n_datapoints}')\n avg_loss = b_loss / n_datapoints\n statistics['test_loss'].append(avg_loss)\n return avg_loss", "def avgY(self):\n return np.mean(self.gety())", "def evaluation_error(y_real, y_pred, max_rating, min_rating):\n mae = mean_absolute_error(y_real, y_pred)\n nmae = normalized_mean_absolute_error(y_real, y_pred,\n max_rating, min_rating)\n rmse = root_mean_square_error(y_real, y_pred)\n\n return mae, nmae, rmse", "def root_mean_squared_error(y_true, y_pred):\n return sm.mean_squared_error(y_true, y_pred)**0.5", "def MeanSqError(self):\r\n\t\treturn self.mse", "def rmse(y_preds: ndarray, y_actual: ndarray) -> float:\n\n return np.sqrt(np.mean(np.power(y_preds - y_actual, 2)))", "def mse(y, y_pred, verbose=True):\n\n mse_sum = 0\n\n for i in range(len(y)):\n mse_sum += mean_squared_error(y[i], y_pred[i])\n\n if verbose:\n print(f\"Mean MSE {mse_sum / len(y)}\")\n\n return mse_sum / len(y)", "def mae(actual, predicted):\n rms = np.abs(actual-predicted)\n\n # Returning the sqaure root of the root mean square\n return float(rms.mean())", "def test_mean(self):\n pass", "def test_mean(self):\n pass", "def rmse(x: np.ndarray, y: np.ndarray):\n x, y = np.copy(x), np.copy(y)\n if x.ndim > 1:\n return np.sqrt(np.nanmean((x-y)**2, axis=1))\n return np.sqrt(np.nanmean((x-y)**2))", "def rmse_over_stdev(y_true, y_pred, train_y=None):\n if train_y is not None:\n stdev = np.std(train_y)\n else:\n stdev = np.std(y_true)\n rmse = root_mean_squared_error(y_true, y_pred)\n return rmse / stdev", "def rmse(actual: np.ndarray, predicted: np.ndarray):\n return np.sqrt(np.mean(np.square(_error(actual, predicted))))", "def average(self, u=None, y=None):\n\n saveu = False\n savey = False\n if u is None:\n u = self.u\n saveu = True\n if y is None:\n y = self.y\n savey = True\n um = u.mean(axis=-1) # (npp,m,R)\n ym = y.mean(axis=-1)\n um = um.swapaxes(1, 2).reshape(-1, self.m, order='F') # (npp*R,m)\n ym = ym.swapaxes(1, 2).reshape(-1, self.p, order='F') # (npp*R,p)\n\n if saveu:\n self.um = um\n # number of samples after average over periods\n self.mns = um.shape[0] # mns = npp*R\n if savey:\n self.ym = ym\n\n return um, ym", "def calc_centered_rms_error(x, y):\n xdat = x.flatten()\n ydat = y.flatten()\n xm = np.ma.mean(xdat)\n ym = np.ma.mean(ydat)\n\n anom = np.sqrt(np.ma.mean(((xdat - xm) - (ydat - ym)) ** 2.))\n\n return xm - ym, anom", "def gmrae(self, benchmark: np.ndarray = None) -> float:\n return _geometric_mean(np.abs(self._relative_error(benchmark)))", "def mean_absolute_error(y_true, y_pred):\n y_pred = tensor_conversion.convert_to_tensor_v2_with_dispatch(y_pred)\n y_true = math_ops.cast(y_true, y_pred.dtype)\n return backend.mean(math_ops.abs(y_pred - y_true), axis=-1)", "def evaluate_mae(y_true, y_pred):\n\n mae_eval = mean_absolute_error(y_true, y_pred)\n\n return mae_eval", "def calcStatistics(model_type, k = 1000, x = np.linspace(-1,1,1000), N = 2):\n # av_g\n av_g = calcAverage_g(model_type)\n \n # create X\n X = np.vstack((np.ones(len(x)),x)).transpose()\n \n # bias, average across whole input space\n b = np.mean((np.dot(X, av_g)-np.sin(np.pi*x))**2)\n \n # variance, average across whole input space for each data set, then average\n # this average across multiple data sets\n v = 0\n for i in range(k):\n hyp = model_type(N) # generate new hypothesis function\n v += np.mean((np.dot(X, hyp.w) - np.dot(X, av_g))**2) # average this across the entire inpute spacede\n v = v/k\n \n return b, v", "def w_mean(self) -> Optional[np.ndarray]:\n\n def _retrieve(fm: VariationalFM) -> np.ndarray:\n return fm.w\n\n return runtime_error_to_optional(self, _retrieve)", "def fit_and_report(model, X, y, Xv, yv, mode = 'regression'):\n model.fit(X, y)\n if mode.lower().startswith('regress'):\n errors = [mean_squared_error(y, model.predict(X)), mean_squared_error(yv, model.predict(Xv))]\n if mode.lower().startswith('classif'):\n errors = [1 - model.score(X,y), 1 - model.score(Xv,yv)] \n \n # tests\n assert len(errors) ==2, 'the len of errors is 2'\n \n return errors", "def _mean_diff(x, y):\n return np.mean(x) - np.mean(y)" ]
[ "0.77339864", "0.76291317", "0.7148356", "0.7034834", "0.68424296", "0.67941666", "0.66706353", "0.6591392", "0.6557777", "0.6557774", "0.6482599", "0.6372792", "0.63640726", "0.6351532", "0.63476115", "0.63377297", "0.63268167", "0.6277267", "0.6224823", "0.61900854", "0.616818", "0.6135212", "0.61208963", "0.6112047", "0.6108037", "0.6108037", "0.61037517", "0.607309", "0.6059663", "0.60321563", "0.6025464", "0.6017627", "0.6014334", "0.6010941", "0.5989509", "0.5985724", "0.59638274", "0.5963634", "0.5963133", "0.5953518", "0.59395474", "0.5930817", "0.59289414", "0.5917546", "0.5913001", "0.5911841", "0.59104407", "0.59081286", "0.5904052", "0.58967584", "0.58912086", "0.58763504", "0.5875228", "0.58670175", "0.5865703", "0.5860527", "0.585223", "0.58429945", "0.5842772", "0.5842597", "0.5829183", "0.58182436", "0.58181196", "0.5815737", "0.581519", "0.5813077", "0.5811709", "0.58099735", "0.5807604", "0.58047014", "0.5798869", "0.57988226", "0.5796426", "0.5785047", "0.577841", "0.57761014", "0.5769321", "0.5767936", "0.5756213", "0.575603", "0.5751436", "0.57469106", "0.5736224", "0.57361305", "0.57305807", "0.5729611", "0.5726397", "0.5726397", "0.5725116", "0.57141614", "0.57136947", "0.5712785", "0.5706542", "0.57027346", "0.5701826", "0.56996155", "0.5675972", "0.5675456", "0.56745386", "0.5672998" ]
0.7912273
0
Compute the weight parameter given X and y.
def linear_regression_noreg(X, y): ##################################################### # TODO 2: Fill in your code here # ##################################################### temp = X.T result = np.dot(temp, X) result = np.linalg.inv(result) result = np.dot(result, temp) w = np.dot(result, y) return w
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def weight(self, y, xn, xo):\n\n return self._model.log_prob(y, xn) + self._model.h_weight(xn, xo) - self._kernel.log_prob(xn)", "def compute_weight(self, y, x, test_x=None, test_y=None, **kwargs):\n model = copy.copy(self)\n model.__setattr__('train_y', y)\n model.__setattr__('train_x', x)\n if test_x is not None and test_y is not None:\n model.set_valid((test_y, test_x))\n _kwargs = []\n for name, value in kwargs.items():\n # Recognize parameter \"\n if name is \"regularizer_p\":\n model.__setattr__(name, value)\n model.regularizer.set_parameter(value)\n else:\n _kwargs.append((name, value))\n _kwargs = dict(_kwargs)\n if model.calculate_weight is 'gradient':\n return model.sgd(**_kwargs)\n # elif model.calculate_weight is 'newton':\n # return model.newton(**_kwargs)\n elif model.calculate_weight is 'normalequ':\n return model.normalequ(**_kwargs)", "def fit(self, X, y):\r\n newWeight = [0.0] * self.size\r\n w = [0.0] * len(X)\r\n val = self.predict_prob(X) \r\n grad = [(y-1.0) * i[1] for i in X] \r\n grad1 = float((math.exp(-math.fsum((self.weight[f]*v for f, v in X)))) * val)\r\n grad2 = [i[1] * -1 * grad1 for i in X] \r\n for i in range(len(w)):\r\n w[i] = (grad[i] - grad2[i])\r\n \r\n w = [i*self.eta for i in w]\r\n for i in range(len(X)):\r\n newWeight[i] = self.weight[X[i][0]] -w[i]\r\n \r\n self.weight = newWeight[:]\r\n \r\n pass", "def _compute_prob_y_given_x(self, _x, _y):\n normalisation_constant = sum([\n math.exp(sum([self.weights[_feature] *\n self.feature_funcs[_feature, cls](_feature, cls)\n for _feature in _x]))\n for cls in self.classes])\n\n return math.exp(sum([\n self.weights[_feature] *\n self.feature_funcs[_feature, _y](_feature, _y)\n for _feature in _x])) / normalisation_constant", "def get_gradient(self, y, x, weight):\n y = np.reshape(y, (len(y),))\n return np.dot(x.T, sigmoid(np.dot(x, weight)) - y) \\\n + self.regularizer.get_gradient(weight)", "def objective(self, wb, X, y):\n N,_ = X.shape\n w = wb[:-1]\n b = wb[-1]\n loss = 0.0 # objective function value\n loss += self.reg_param * np.dot(b-self.b0, b-self.b0)\n loss += self.reg_param * np.dot(w-self.w0, w-self.w0)\n\n for i in range(N):\n tmpvar = np.exp(-1 * y[i] * (np.dot(w, X[i]) + b))\n loss += np.log(1 + tmpvar)\n \n return loss", "def _computeWeight(self, Y):\n\n if self.loss == WeightedCrossEntropy_DistanceMap:\n return np2cuda(surfacedist(Y), self.dev)\n\n elif self.loss == WeightedCrossEntropy_ClassBalance:\n # Number of voxels per image\n numvox = np.prod(Y.shape[2:])\n # Number of 1s in each channel in each sample\n ones = np.sum(Y, axis=(2,3,4))\n # The weights are inversely proportional to the number of ones\n # so that if there are very few voxels from one category\n weights_ = 1 - ones/numvox\n Y = np.moveaxis(np.moveaxis(Y, 0, -1), 0, -1)\n weights = np.moveaxis(np.moveaxis(Y*weights_, -1, 0), -1, 0)\n return np2cuda(np.sum(weights, axis=1), self.dev)\n\n else:\n return None", "def lr_weights(x : pd.DataFrame, y : pd.DataFrame):\n reg = LR().fit(x, y)\n return reg#.coef_[0]", "def calculate_weights(y_train: np.ndarray) -> np.ndarray:\n\n weight_class = class_weight.compute_class_weight('balanced', np.unique(y_train), y_train)\n return weight_class", "def balance_training_weight(w, y):\n sample_weight = w.copy()\n neg_mask = (y == 0)\n pos_mask = (y == 1)\n \n bkg_sum_weight = np.sum(sample_weight[neg_mask])\n sig_sum_weight = np.sum(sample_weight[pos_mask])\n\n sample_weight[pos_mask] = sample_weight[pos_mask] / sig_sum_weight\n sample_weight[neg_mask] = sample_weight[neg_mask] / bkg_sum_weight\n return sample_weight", "def _update(self, x: np.ndarray, y: int):\n decision = self.weights.dot(x)\n v_t = x @ np.diag(np.diag(self._sigma)) @ x.T\n m_t = y * decision\n loss = (self._phi * math.sqrt(v_t) - m_t)\n #print(loss)\n if loss > 0:\n # We scale our learning rate (alpha) using the weight/cost\n alpha_t = self.class_weight_[y] * self._get_alpha(m_t, v_t)\n u_t = 0.25 * (-alpha_t * v_t * self._phi + math.sqrt(\n alpha_t ** 2 * v_t ** 2 * self._phi ** 2 + 4 * v_t)) ** 2\n beta_t = alpha_t * self._phi / (math.sqrt(u_t) +\n alpha_t * self._phi * v_t)\n sigma = np.expand_dims(x @ self._sigma, axis=0)\n self.weights += alpha_t * y * np.squeeze(sigma)\n self._sigma -= beta_t * sigma.T @ sigma", "def compute_w(self):\n self.pinvX = np.linalg.pinv(self.X)\n return np.dot(self.pinvX, self.y)", "def conditional_embedding_to_weights(mu_y_x, y, k_yy, x_q):\n return _solve_posdef(k_yy, mu_y_x(y, x_q))[0]", "def costFunction(self, x, y ):\n self.yEst = self.forward_propagate(x)\n sqErrors = ( self.yEst - y ) ** 2\n J = sqErrors.sum() / 2\n return J", "def fit(self, X, y): \n # initial values set by set_params when objet was initialised\n _, D = X.shape \n wb_init = np.zeros(D+1) # initial guess for weight vector\n w, b = self.get_params() # set_params inits to zero vector for wb\n wb_init[:-1] = w # self.w0\n wb_init[-1] = b # self.b0\n\n wb_opt, _, _ = fmin_l_bfgs_b(func = self.objective, \n x0 = wb_init, \n fprime = self.objective_grad, \n args = (X,y))\n \n self.set_params(wb_opt[:-1], wb_opt[-1])\n return", "def get_weights(y_true, prior_probs, params):\n # Parameters\n _lambda = params['lambda']\n Q = prior_probs.shape[0]\n\n # The weights are proportional to\n all_w = ((1 -_lambda)*prior_probs + _lambda/Q)**(-1) # (Q,)\n\n # The weighted distribution must sum to one: E[w] = sum(p_tilde*w) = 1\n all_w = all_w / tf.reduce_sum(prior_probs * all_w) # (Q,)\n\n # Find q_star\n q_star = tf.argmax(y_true, axis=3) # (b, H, W)\n\n # Select weights\n all_v = tf.gather(all_w, q_star) # (b, H, W)\n\n # Cast to float32, which is necessary for further calculations\n all_v = tf.cast(all_v, tf.float32) # (b, H, W)\n\n return all_v", "def fit(self, X, y):\n # compute W, b\n # pull W from the normal distribution, and b from 0->2 pi\n\n self.N, d = X.shape\n\n # weights go from R^d -> R^D\n self.W = rng.normal(loc=0, scale=1, size=(self.D, d))\n # bias is in R, need D terms\n self.b = rng.uniform(0, 2*np.pi, size=self.D)\n\n self.Z = self.compute_features(X)\n \n self._fit(y)\n self.fitted = True\n\n # now solve the least-squares problem:\n # min_w ||Z'w - y||_2^2 + \\lambda ||w||_2^2\n\n # done via linear equation solver, eg:\n # A x = b to solve x\n # use cholesky solver: https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.cho_solve.html\n\n # self.L = cholesky(self.kernel, lower=True)\n # self.alpha = cho_solve((self.L, True), y)", "def grad_loss_wrt_w(self, x, y):\n (N, D) = x.shape\n k1 = np.matmul(x, np.transpose(self.w)) + self.b\n y1 = y.reshape((N,1))\n dr = (1 + np.exp(1 * y1 * k1))\n nr = -y1 * x\n c1 = nr/dr\n #(N1,D1) = self.w.shape\n #c2 = np.zeros((N1,D1))\n #for i in range(N):\n # c2[i-1] = c1[i-1,:] + c1[i,:]\n #l_w = c2/N\n l_w1 = np.mean(c1,axis=0)\n return l_w1\n\n\n #raise NotImplementedError", "def get_sample_weight(y):\n class_counts = np.bincount(y)\n class_weight = 1 / class_counts\n sample_weight = class_weight[y]\n sample_weight = sample_weight / sample_weight.sum() * len(y)\n return sample_weight", "def pre_weight(self, y):\n\n if not isinstance(self._kernel, (TransformedDistribution, Independent)):\n return self._model.log_prob(y, self._kernel.loc)\n elif isinstance(self._kernel, Independent):\n return self._model.log_prob(y, self._kernel.base_dist.loc)\n\n # TODO: Not entirely sure about this, but think this is the case\n at = next(k for k in self._kernel.transforms if isinstance(k, AffineTransform))\n\n return self._model.log_prob(y, at.loc)", "def fit(self, X, y):\n self.centers = self._select_centers(X)\n self.ampls = self._select_ampl(y)\n G = self._calculate_interpolation_matrix(X)\n self.weights = np.dot(np.linalg.pinv(G), y)", "def _ols(self, X, y):\n # add bias \n X = self._add_bias(X)\n\n # optimise coefficients\n xTx = np.dot(X.T, X)\n inverse_xTx = np.linalg.inv(xTx)\n xTy = np.dot(X.T, y)\n bhat = np.dot(inverse_xTx, xTy)\n\n # pull out weights and bias\n b = bhat[0]\n w = bhat[1:]\n\n return w, b", "def ComputeWeights(self, p_float=..., p_float=..., p_float=..., *args, **kwargs):\n ...", "def MSE(X, y, w):\r\n n = X.shape[0]\r\n f = X @ w\r\n J = np.sum(np.power((y - f), 2)) / n\r\n return J", "def cost_func(w, X, y):\n y_pred = np.dot(X, w)\n err = np.sum(np.square(y_pred - y)) / (2 * len(y))\n\n return err", "def compute_weights(model, params, y_obs, LB_type='NWJ'):\n\n # Define PyTorch variables\n x = Variable(\n torch.from_numpy(params).type(torch.FloatTensor),\n requires_grad=True)\n y = Variable(\n torch.from_numpy(y_obs).type(torch.FloatTensor),\n requires_grad=True)\n\n # Pass observed data and parameters through the model\n w = list()\n for idx in range(len(x)):\n T = model(x[idx], y).data.numpy()\n if LB_type == 'NWJ':\n w.append(np.exp(T - 1))\n else:\n raise NotImplementedError\n w = np.array(w)\n\n return w.reshape(-1)", "def J(W1, b1, W2, b2, x, y):\n yhat = forwardPropagate(W1, b1, W2, b2, x) # OLD: yhat = softmax(x.dot(w))\n return crossEntropy(y, yhat)", "def local_weight(\r\n point: np.mat, training_data_x: np.mat, training_data_y: np.mat, bandwidth: float\r\n) -> np.mat:\r\n weight = weighted_matrix(point, training_data_x, bandwidth)\r\n W = (training_data.T * (weight * training_data)).I * (\r\n training_data.T * weight * training_data_y.T\r\n )\r\n return W", "def grad_l2(w, X, y, **kwargs):\n return -1 * np.dot(X.T, y - np.dot(X, w)) / X.shape[0]", "def objective(self,w):\n l = 0\n for i in range(len(self.x)):\n # Each example contributes log(sigma(y_i * x_i . w))\n l -= log(sigmoid(self.y[i] * np.dot(w, self.x[i,:])))\n # regularisation 1/2 * alpha * ||w||^2\n l += 0.5 * self.alpha * np.dot(w,w)\n return l", "def gradient_weight(X, Y, model):\n W = model['weight']\n b = model['bias']\n weight_decay = model['weight_decay']\n\n # YOUR CODE HERE\n # Write the gradient with respect to the weights.\n return np.add(np.subtract(np.dot(np.transpose(predict(X, model)), X), np.dot(np.transpose(Y), X)), 2 * LAMBDA * np.transpose(model['weight'])) #np.zeros((X.shape[1], Y.shape[1]))", "def _compute_weights(self, X, target_X, method='l2'):\n if method == 'proj_l2' or method == 'proj_l2_nonorm':\n #\n # At first calculate unrestricted weights: (X.T)^-1\n # Then project answer onto Unit simplex\n # \n target_center = np.mean(target_X, axis=0) # * X.shape[0]\n # Solve the system\n w = np.linalg.lstsq(X.T, target_center.T, rcond='warn')[0]\n weights = project_onto_simplex(w, normalize=True if method == 'proj_l2' else False)\n print(f\"Weights sum: {np.sum(weights)}\")\n weights /= np.sum(weights)\n return weights\n\n #\n # Pure solution, which make unrestricted weights\n #\n # Compute target center multiplied by number of source rows\n # target_center = np.mean(target_X, axis=0) * X.shape[0]\n # Solve the system\n # print(\"X^T shape: ({}), target_center^T shape: ({})\".format(X.T.shape, target_center.T.shape))\n # w = np.linalg.lstsq(X.T, target_center.T, rcond='warn')[0]\n # print(w)\n # return w.T\n if method == 'dist' or method == 'dist2':\n print(\"Using distance weighting\")\n target_center = np.mean(target_X, axis=0)\n residuals = X - target_center\n norm = np.linalg.norm(residuals, axis=1)\n print(f\"Max norm: {np.max(norm)}\")\n if method == 'dist':\n weights = np.max(norm) - norm # inverse weights\n elif method == 'dist2':\n small_eps = 1e-9\n weights = 1.0 / (norm + small_eps)\n weights = np.exp(weights) # softmax\n print(f\"Weights sum: {np.sum(weights)}\")\n weights /= np.sum(weights)\n return weights\n\n # Compute target center multiplied by number of source rows\n target_center = np.mean(target_X, axis=0) # * X.shape[0]\n # Solve the system\n q = cp.Constant(value=target_center.flatten())\n x_ = cp.Constant(value=X)\n\n w = cp.Variable(X.shape[0])\n # lam = self.optimization_lambda # 0.001\n # M = len(J)\n M = np.linalg.norm(X) ** 2 # target_X)\n print(\"M:\", M)\n lam = self.reg_lambda # 0.1\n if lam == 0:\n print(\"No regularization\")\n # cp.norm2(cp.matmul(X, beta) - Y)**2\n objective = cp.Minimize(cp.sum_squares(q.T - w.T @ x_)) # cp.Minimize(cp.sum_squares(q - x_ * w))\n else:\n objective = cp.Minimize(cp.sum_squares(q.T - w.T @ x_) / M + lam * cp.norm2(w)) # + lam * cp.norm2(w))\n constraints = [w >= 0, cp.sum_entries(w) == 1] #, w >= self.simplex_lower_boundary]\n prob = cp.Problem(objective, constraints)\n\n print(\"Problem is prepared\")\n\n try:\n result = prob.solve()\n except Exception as ex:\n print(\"Exception occurred: {}\".format(ex))\n print(\"Using SCS solver\")\n result = prob.solve(solver=cp.SCS, verbose=False)\n print(\"Problem status: {}\".format(prob.status))\n try:\n weights = w.value.A.flatten()\n except Exception as ex:\n print(\"Can't compute weights, use uniform distribution\")\n weights = np.ones((X.shape[0],)) / X.shape[0]\n print(weights)\n weights[weights < 0] = 0\n weights_sum = np.sum(weights)\n print(\"Weights sum: {}\".format(weights_sum))\n if weights_sum != 1.0: # probably always true\n weights /= weights_sum\n return weights", "def calculate_cost(x, y, weights):\r\n predictions = compute_prediction(x, weights)\r\n cost = np.mean(-y * np.log(predictions) - (1 - y) * np.log(1 - predictions))\r\n return cost", "def fit(self, x, y):\n self.w = np.random.rand(1,x.shape[1])\n self.b = 0\n loss = []\n for i in range(self.n_epochs):\n self.b = self.b - (self.lr * self.grad_loss_wrt_b(x,y))\n self.w = self.w - (self.lr * self.grad_loss_wrt_w(x,y))\n loss.append(self.loss(x,y))\n return loss", "def compute_gradient (w, x, y):\n (n,d) = x.shape\n g = np.zeros(d)\n for i in range(0,d):\n g[i] = (w*x-y)*np.transpose(x[i])\n g += 0.5*w\n return g", "def mutual_information(x, y, w):\r\n \r\n\r\n total_entropy = entropy(y,w)\r\n\r\n partitioned_x = partition(x)\r\n weighted_entropy = 0\r\n # calculate the weighted entropy over the partition of x\r\n vals,counts= np.unique(x,return_counts=True)\r\n for key in partitioned_x:\r\n weighted_entropy += np.sum([(np.sum(w[partitioned_x[key]])/np.sum(w)) * entropy(y[partitioned_x[key]],w[partitioned_x[key]])])\r\n\r\n information_gain = total_entropy - weighted_entropy\r\n return information_gain", "def density_weights(w_q, y, theta_y):\n # Setup the optimization problem\n up_scale = _gaussian_norm(theta_y)\n a_down_scale = _gaussian_norm(np.sqrt(3) * theta_y)\n a_body = _gaussian(y, y, np.sqrt(3) * theta_y)\n b_down_scale = _gaussian_norm(np.sqrt(2) * theta_y)\n b_body = _gaussian(y, y, np.sqrt(2) * theta_y)\n a_matrix = (up_scale / a_down_scale) * a_body\n b_matrix = (up_scale / b_down_scale) * b_body\n a = a_matrix # (n, n)\n b = -np.dot(b_matrix, w_q) # [(n, n_q), (n,)]\n\n # Initialize the normalized weights\n w_q_pdf_init = clip_normalize(w_q)\n w_q_pdf = np.zeros(w_q_pdf_init.shape)\n\n # Find the normalized weights using snucq\n if w_q.ndim == 2:\n n, n_q = w_q.shape\n for i in np.arange(n_q):\n w_q_pdf[:, i] = _snucq(a, b[:, i], w_q_pdf_init[:, i])\n elif w_q.ndim == 1:\n w_q_pdf = _snucq(a, b, w_q_pdf_init)\n else:\n raise ValueError('Weights have the wrong dimensions')\n\n # Return the normalized weights\n return w_q_pdf", "def fit(self, X, y, sample_weight, **kwargs):\n pass", "def score(self, X, y, sample_weight=None):\n return super().score(X, y, sample_weight)", "def score(self, X, y, sample_weight=None):\n return super().score(X, y, sample_weight)", "def find_weights(X, b):\n X = np.append(X, np.ones((X.shape[0], 1)), axis=1)\n w, _, _, _ = np.linalg.lstsq(X, b)\n return w", "def costFunction(theta, X, y):\n\n # Initialize some useful values\n m = y.size # number of training examples\n J = np.sum(np.array([inner(theta, xi, yi) for xi, yi in zip(X, y)]))\n J /= m\n\n\n return J", "def compute_gradient(self, X, y, weights):\n sigmoid = self.sigmoid(np.dot(X, weights))\n return np.dot(X.T, y - sigmoid)", "def _l2(self, y, x, y_minus_g, w_0):\n # Initialize weight vector to return\n w_1 = np.zeros(len(w_0))\n \n for j in range(len(x)):\n reg = float(w_0[j]) / (self._lambda*self._num_training)\n w_1[j] = w_0[j] + self._epsilon*(x[j]*y_minus_g - reg)\n \n return w_1", "def evaluate(self, w, X, y):\n # help avoid mistakes (as described in the assignment) by\n # potentially reshaping our arguments\n w = ensure_1d(w)\n y = ensure_1d(y)\n\n # Prediction is linear combination\n y_hat = X @ w\n # Residual is difference between ground truth and prediction\n # (\"what's left\" after your predicition)\n residuals = y - y_hat\n # Squared residuals gives us the objective function value\n f = 0.5 * np.sum(residuals ** 2)\n\n # Analytical gradient, written in mathematical form first\n # and then translated into Python.\n # The parentheses on the first term are just a small optimization:\n # this way, we do two matrix-vector multipliciations,\n # instead of a (more expensive) matrix-matrix mult and a matrix-vector\n g = X.T @ (X @ w) - X.T @ y\n return f, g", "def model(theta, x):\n\tw, b = theta\n\treturn w * x + b", "def evaluate(self, w, X, y):\n # help avoid mistakes (as described in the assignment) by\n # potentially reshaping our arguments\n w = ensure_1d(w)\n y = ensure_1d(y)\n\n \"\"\"YOUR CODE HERE FOR Q2.3\"\"\"\n raise NotImplementedError()", "def forward_pass(X, target_Y, W):\n\n pred_Y = activation(weighted_sum(W, X))\n print(\"\\tI/P:\", X, \" O/P:\", target_Y, \" W:\", W, \" W_Sum:\", round(weighted_sum(W, X), 3))\n\n if pred_Y != target_Y:\n for j in range(len(W)):\n W[j] = update_weight(W[j], pred_Y, target_Y, X[j])\n\n return W", "def objective_function(theta, X, y):\n # m number of training instances\n m = X.shape[0]\n jtheta = sum((np.dot(X, theta) - y)**2) / (2.0*m)\n return jtheta", "def bws(x, y, **kwargs):\n\tx.sort()\n\ty.sort()\n\tnpx = np.array(x)\n\tnpy = np.array(y)\n\n\txs = np.unique(npx)\n\tys = np.unique(npy)\n\txys = set(xs).union(set(ys))\n\taxy = np.array(list(xys))\n\taxy.sort()\n\n\tG = np.array([len(axy[np.where(axy <= xi)]) for xi in xs])\n\tH = np.array([len(axy[np.where(axy <= yi)]) for yi in ys])\n\n\tn = len(G)\n\tm = len(H)\n\tfn = float(n)\n\tfm = float(m)\n\n\tN = np.linspace(1,n,num=n)\n\tM = np.linspace(1,m,num=m)\n\n\txt1 = np.power(G - N*(fm + fn)/fn, 2.0)\n\txtt = N/(fn+1.0)\n\txt2 = xtt*(1 - xtt)*(fm * (fm+fn)/fn)\n\tBx = np.sum(xt1/xt2)/fn\n\t\n\tyt1 = np.power(H - M*(fm + fn)/fm, 2.0)\n\tytt = M/(fm+1.0)\n\tyt2 = ytt*(1 - ytt)*(fn * (fm+fn)/fm)\n\tBy = np.sum(yt1/yt2)/fm\n\n\tB = (Bx+By)/2.0\n\n\tprint \"B = \", B\n\t\n\tJ = 3\n\tif \"j\" in kwargs:\n\t\tJ = kwargs[\"j\"]\n\t\n\treturn compute_xi(B, J)", "def computeGradient(self, X, y, w):\n n = len(X)\n if self.loss == 'linear':\n gradient = -2 * np.dot(X.T, (y - X.dot(w)))\n elif self.loss == 'logistic':\n g = self.logistic(X, w)\n gradient = -2 * np.dot(X.T, (y - g) * g * (1 - g))\n elif self.loss == 'perceptron':\n newY = (y > 0).astype(int) * 2 - 1 # change from (0, 1) to (-1, 1)\n index = ((np.dot(X, w) >= 0).astype(int) != y)\n usedX = X[index[:, 0]]\n usedY = newY[index[:, 0]]\n gradient = -np.dot(usedX.T, usedY)\n elif self.loss == 'svm':\n newY = (y > 0).astype(int) * 2 - 1 # change from (0, 1) to (-1, 1)\n index = (np.dot(X, w) * newY < 1)\n usedX = X[index[:, 0]]\n usedY = newY[index[:, 0]]\n gradient = 2 * w - self.C * np.dot(usedX.T, usedY)\n gradient[0] = gradient[0] + 2 * w[0]\n\n return gradient", "def evaluate(self, w, X, y):\n pass # this is because it's a base class, it will be implemented below.", "def updateWeights(inputs, outputs, learning_rate, y, weights):\n for i in range(len(weights)):\n weights[i] = weights[i] + learning_rate * (outputs - y) * inputs[i]\n return weights", "def weighted_rmsd(x, y, dim=None, weights=None):\n dev = (x - y) ** 2\n dev_mean = weighted_mean(dev, dim, weights)\n return np.sqrt(dev_mean)", "def calculate_gradient(y, tx, w): \n return tx.T@(sigmoid(tx@w)-y)", "def gradW(self, X, y, W):\n num_x = X.shape[0]\n # if i==y, then softmax_grad = preds_i-1;\n # otherwise, softmax_grad = preds_i\n preds = softmax(np.matmul(X, W)) #preds.shape(N,C)\n preds[list(range(num_x)), y] -= 1\n # grad = X.T*softmax_grad\n return np.matmul(X.T, preds)/num_x", "def __call__(self, x: Tensor, model: Module, gt_y: Tensor):\n prediction = model(x)\n return self.weight * self.norm(prediction, gt_y)", "def activation_function(X):\n\tz = np.sum(w*x+b)\n\treturn z", "def learn(self, Xtrain, ytrain):\n numsamples = Xtrain.shape[0]\n Xless = Xtrain[:,self.params['features']]\n self.weights = np.dot(np.dot(np.linalg.inv(np.dot(Xless.T,Xless)/numsamples + (self.params['regwgt'] * np.identity(np.shape(Xless)[1]))), Xless.T),ytrain)/numsamples", "def update(self, X, y):\n proba = self.predict_proba(X)\n top_loss = proba - y\n bias_gradient = np.sum(top_loss)\n weight_gradient = (top_loss).T.dot(X)\n\n # the gradient update\n self.b = self.b - self.lrate * bias_gradient\n self.W = self.W - self.lrate * weight_gradient", "def score(self, X, y, weights=None):\n y_pred = self.predict(X)\n if weights is None:\n weights = 1.0\n\n mean = FrechetMean(self.metric, verbose=self.verbose).fit(y).estimate_\n numerator = gs.sum(weights * self.metric.squared_dist(y, y_pred))\n denominator = gs.sum(weights * self.metric.squared_dist(y, mean))\n\n return 1 - numerator / denominator if denominator != 0 else 0.0", "def polyFunction(x,weights):\n y=0\n for i in range (0,len(weights)):\n y+= weights[i]*(x**i)\n return y", "def y(x):\n x1, x2, x3, x4 = x[:, 0], x[:, 1], x[:, 2], x[:, 3]\n return 1 + 0.3 * x1 - 0.6 * x2 ** 2 - 0.2 * x3 ** 3 + 0.5 * x4 ** 4", "def update_weights(x_train, y_train, weights, learning_rate):\r\n predictions = compute_prediction(x_train, weights)\r\n weights_delta = np.dot(x_train.T, y_train - predictions)\r\n m = y_train.shape[0]\r\n weights += learning_rate / float(m) * weights_delta\r\n return weights", "def entropy(y,w):\r\n\r\n\t# my original entropy function commented below is not working as desired. The below implementation is based on from Sai Ram Chappidi's explanation\r\n\r\n # y_partition = partition(y)\r\n # elements,counts = np.unique(y,return_counts = True)\r\n # entropy=0\r\n\r\n # for i in range(len(elements)):\r\n # entropy += ((-(np.sum(w[y_partition[i]])))/np.sum(w))*np.log2(np.sum(w[y_partition[i]])/np.sum(w))\r\n # return entropy\r\n\r\n entropy = 0\r\n # two hypothesis cases 0,1\r\n h = {0: 0, 1: 0}\r\n leny = len(y)\r\n for i in range(leny):\r\n # if y is 0 add 0 to the weight\r\n if y[i] == 0:\r\n h[0] += w[i]\r\n # if y is 1 add 1 to the weight\r\n elif y[i] == 1:\r\n h[1] += + w[i]\r\n # summing all the weighted values \r\n val_sum = h[0] + h[1]\r\n\r\n # entropy calculation\r\n for j in range(len(h)):\r\n h[j] = h[j]/val_sum\r\n # to prevent divide by zero\r\n if h[j] != 0:\r\n entropy += h[j] * np.log2(h[j])\r\n entropy = -(entropy)\r\n return entropy", "def getWeights(self,dist,xin,yin):\r\n \r\n Ns = len(dist)\r\n \r\n # Construct the LHS matrix C\r\n C=np.ones((Ns+1,Ns+1))\r\n for i in range(0,Ns):\r\n C[i,i]=0\r\n for j in range(i+1,Ns):\r\n D = np.sqrt((xin[i]-xin[j])**2+(yin[i]-yin[j])**2)\r\n C[i,j] = self.semivariogram(D)\r\n C[j,i] = C[i,j]\r\n\r\n C[Ns,Ns]=0\r\n\r\n # Calculate the inverse of C \r\n Cinv = np.linalg.inv(C)\r\n \r\n # Loop through each model point and calculate the vector D\r\n gamma = np.ones((Ns+1,1))\r\n \r\n for j in range(0,Ns):\r\n gamma[j,0]= self.semivariogram( dist[j])\r\n # Solve the matrix to get the weights\r\n W = np.dot(Cinv,gamma)\r\n W = W[:-1,:]\r\n \r\n #print np.size(gamma,axis=0),np.size(gamma,axis=1) \r\n return 1.0/float(Ns)*np.ones((Ns,1))", "def gen_weights(self, f_target):\n\n # calculate x and psi\n x_track = self.cs.rollout()\n psi_track = self.gen_psi(x_track)\n\n # efficiently calculate BF weights using weighted linear regression\n self.w = jnp.zeros((self.n_dmps, self.n_bfs))\n for d in range(self.n_dmps):\n # spatial scaling term\n k = self.goal[d] - self.y0[d]\n for b in range(self.n_bfs):\n numer = jnp.sum(x_track * psi_track[:, b] * f_target[:, d])\n denom = jnp.sum(x_track ** 2 * psi_track[:, b])\n self.w[d, b] = numer / denom\n if abs(k) > 1e-5:\n self.w[d, b] /= k\n\n self.w = jnp.nan_to_num(self.w)", "def update_weights(self):\n\n\n self.w += self.learn_rate * (self.X.T.dot(self.T - self.Y)\n - self.reg_L1 * np.sign(self.w)\n - self.reg_L2 * 2*self.w)", "def fit(self, X, y, sample_weight=...):\n ...", "def fit(self, x, y, weights=None, **kwargs):\n from scipy.optimize import fmin_l_bfgs_b\n\n assert len(y) == x.shape[0]\n assert weights is None or len(weights) == x.shape[0]\n\n y0 = y == 0\n x0 = x[y0, :]\n x1 = x[~y0, :]\n\n if weights is None:\n loss_weights = None\n else:\n loss_weights = [weights[y0], weights[~y0]]\n\n def _loss_for_optimize(params):\n return LogisticRegression._loss_gradient(x0, x1, params[0], params[1:], self.lam, loss_weights)\n\n params0 = np.zeros(1 + x.shape[1])\n params_opt, loss_opt, info_opt = fmin_l_bfgs_b(_loss_for_optimize, params0, disp=0, **kwargs)\n print((\"%s funcalls: %s\" % (info_opt['task'], info_opt['funcalls'])))\n\n self.b = params_opt[0]\n self.w = params_opt[1:]", "def estimate_nb(x,y,smoothing):\n labels = set(y)\n doc_counts = defaultdict(float)\n weights = defaultdict(float)\n\n vocab = set()\n for base_features in x:\n for word in base_features.keys():\n vocab.add(word)\n\n for label in y:\n doc_counts[label] += 1\n\n\n for label in labels:\n weights[(label, OFFSET)] = np.log(doc_counts[label] / sum(doc_counts.values()))\n log_probabilities = estimate_pxy(x, y, label, smoothing, vocab)\n for word in log_probabilities:\n weights[(label, word)] = log_probabilities[word]\n\n return weights", "def get_weights(self):", "def _l1(self, y, x, y_minus_g, w_0):\n # Initialize weight vector to return\n w_1 = np.zeros(len(w_0))\n \n for j in range(len(x)):\n reg = float(self._sign(w_0[j])) / (self._lambda*self._num_training)\n w_1[j] = w_0[j] + self._epsilon*(x[j]*y_minus_g - reg)\n \n return w_1", "def _calc_gradients(self, X, y, y_hat):\n # calculate gradient of weight and bias\n grad_b = 2 * np.mean(y_hat - y)\n grad_W = 2 * np.mean(np.matmul((y_hat - y), X))\n return grad_W, grad_b", "def reweigh(X, y, S):\n\n\tX['label'] = y\n\n\tW = pd.DataFrame({'group': [1, 1, 0, 0], 'label': [1, 0, 1, 0]})\n\n\t# Calculate weight for each combination of sensitive attribute and class,\n\t# given by the expected probability of an example being in a certain group\n\t# and class if sensitive attribute/class are independent, divided by the\n\t# observed probability\n\tweights = [[len(X[X[S] == s]) * len(X[X['label'] == c]) \\\n\t\t\t\t/ float(len(X) * len(X[(X[S] == s) & (X['label'] == c)])) \\\n\t\t\t\tfor c in [1, 0]] for s in [1, 0]]\n\n\tW['weight'] = [i for j in weights for i in j]\n\t\n\tX_prime = X.copy()\n\tX_prime['weight'] = 0\n\n\t# Add weights according to class/group\n\tfor s in [1, 0]:\n\t\tfor c in [1, 0]:\n\t\t\tw = W.loc[(W['group'] == s) & (W['label'] == c), 'weight']\n\t\t\tX_prime.loc[(X[S] == s) & (X['label'] == c), 'weight'] = w.iloc[0]\n\n\tX.drop('label', axis = 1, inplace = True)\n\ty_prime = X_prime['label'].tolist()\n\tX_prime = X_prime.drop('label', axis = 1)\n\n\treturn(X_prime, y_prime)", "def local_weight_regression(\r\n training_data_x: np.mat, training_data_y: np.mat, bandwidth: float\r\n) -> np.mat:\r\n m, n = np.shape(training_data_x)\r\n ypred = np.zeros(m)\r\n\r\n for i, item in enumerate(training_data_x):\r\n ypred[i] = item * local_weight(\r\n item, training_data_x, training_data_y, bandwidth\r\n )\r\n\r\n return ypred", "def calculate_gradient(y, tx, w):\n\n\tret = tx.T.dot(sigmoid(np.dot(tx, w)) - y)\n\treturn ret", "def RMSE(X, y, w):\r\n n = X.shape[0]\r\n f = X @ w\r\n J = np.sqrt(np.sum(np.power((y - f), 2)) / n)\r\n return J", "def fit(self, X, y, sample_weight=..., **fit_params):\n ...", "def propagate(w, b, x, y):\n\n m = x.shape[1]\n\n # Forward Propagation (from x to cost)\n # Compute activation\n activation = 1 / (1 + np.exp(-(np.dot(w.T, x) + b)))\n # compute cost\n cost = (-1 / m) * np.sum(\n (np.dot(np.log(activation), y.T)) +\n np.dot(np.log(1 - activation), (1 - y).T)\n )\n\n # Backward Propagation (to find gradient descent)\n dw = (1 / m) * np.dot(x, (activation - y).T)\n db = (1 / m) * np.sum(activation - y)\n\n cost = np.squeeze(cost)\n grads = {\"dw\": dw,\n \"db\": db}\n\n return grads, cost", "def train_normal_equation(self, X, y):\r\n self.weights = np.dot(np.dot(np.linalg.inv(np.dot(X.T, X)), X.T), y)\r\n self.bias = 0\r\n \r\n return self.weights, self.bias", "def cost(self, X=None, y=None):\n\t\tif X is None:\n\t\t\tX = self.x_data\n\t\telse:\n\t\t\tn_samples = np.size(X, 0)\n\t\t\tX = np.hstack((np.ones((n_samples, 1)), (X - np.mean(X,0)) / np.std(X,0)))\n\n\t\tif y is None:\n\t\t\ty = self.y_data\n\t\telse:\n\t\t\ty = y[:, np.newaxis]\n\n\t\ty_pred = X @ self.weights\n\n\t\t# SSE formula\n\t\tcost = 1 - (((y - y_pred) ** 2 ).sum() / ((y- y.mean())** 2).sum())\n\n\t\treturn cost", "def weighted_sum(W, X):\n\n if len(W) != len(X):\n print(\"Dimension of weight vector should be same as input vector.\")\n return\n\n else:\n H = 0\n\n for i in range(len(W)):\n H += (W[i] * X[i])\n \n return H", "def compute_loss(self, X, y):\r\n # INSERT YOUR CODE HERE\r\n #raise Exception('Function not yet implemented!')\r\n\r\n # Computing the loss using the below formula\r\n # Loss = -(1/m)*sum( (y_i)*log(σ(wTx_i)) + (1-y_i)*log(1 - σ(wTx_i)))\r\n # m = number of examples and i for ith example\r\n\r\n loss = 0\r\n X = np.append(X, np.array([[1]]*X.shape[0]), axis=1)\r\n # for idx,example in enumerate(X):\r\n # loss = loss + y[idx] * np.log(self.sigmoid(np.dot(example, self.w))) + (1 - y[idx]) * np.log(1 - self.sigmoid(np.dot(example, self.w)))\r\n # loss = -loss/ X.shape[0]\r\n\r\n loss = -np.mean(y * np.log(self.sigmoid(np.dot(X, self.w))) + (1 - y) * np.log(1 - self.sigmoid(np.dot(X, self.w))))\r\n return loss", "def _kernel(self, bw, X, x):\n return (1.0 / np.sqrt(2 * np.pi) / bw) * np.exp(\n -((X - x) ** 2) / (bw ** 2 * 2.0)\n )", "def update(w, x, y, l2_param=0):\n # get dimension d and current prediction mu of shape Nx1\n d = x.shape.as_list()[1]\n mu = tf.sigmoid(tf.matmul(x, w))\n\n # build R of shape Nx1 (element wise multiplication)\n r_flat = mu * (1 - mu)\n\n # build regularisation term and hessian H = X'RX of shape dxd\n l2_regularisation = l2_param * tf.eye(d)\n h = tf.matmul(tf.transpose(x), r_flat * x) + l2_regularisation\n\n # do single-value decomposition of H\n sigma, u, v = tf.svd(h, full_matrices=True, compute_uv=True)\n sigma = tf.expand_dims(sigma, 1)\n\n # calculate Moore-Penrose-pseudo-inverse of H via single value decomposition\n s_mppinv = tf.where(tf.not_equal(sigma, 0), 1 / sigma, tf.zeros_like(sigma))\n h_mppinv = tf.matmul(v, s_mppinv * tf.transpose(u))\n\n # calculate update step\n w_delta = tf.matmul(h_mppinv, tf.matmul(tf.transpose(x), mu - y) + l2_param * w)\n return w_delta", "def posterior_weights(prior_embedding, x, y, theta_x, theta_y, x_q,\n epsil=0, delta=0,\n kbr='tikhonov',\n k_x=_gaussian, k_y=_gaussian,\n k_xx=None, k_yy=None,\n v=None):\n if v is None:\n k_xx = k_x(x, x, theta_x) if not k_xx else k_xx\n k_yy = k_y(y, y, theta_y) if not k_yy else k_yy\n v = _posterior_fields[kbr](prior_embedding(y), k_xx, k_yy, epsil, delta)\n return np.dot(v, k_x(x, x_q, theta_x))", "def get_weight(module,\n shape,\n weight_var='weight',\n kernel_init=None):\n\n return module.param(name=weight_var, shape=shape, initializer=kernel_init)", "def check_X_y_weights(X, y=None, sample_weights=None):\r\n # X checking\r\n try:\r\n assert np.isfinite(np.max(X)), \"X should only contain finite \" \\\r\n \"numerical values\"\r\n except Exception as e:\r\n print(str(e))\r\n raise RuntimeError(\"X should be a numerical array\")\r\n\r\n # y checking\r\n if y is not None:\r\n try:\r\n assert not np.isnan(np.min(y))\r\n except Exception as e:\r\n print(str(e))\r\n raise RuntimeError(\r\n \"y should not contain NaN values\"\r\n )\r\n\r\n try:\r\n assert (np.ndim(X) == np.ndim(y) == 2), \"X and y should be \" \\\r\n \"2-dim arrays\"\r\n assert (len(X) == len(y)), \"X and y should have the same \" \\\r\n \"'n_sample' first dimension\"\r\n except Exception as e:\r\n print(str(e))\r\n raise RuntimeError(\r\n \"y should be a numerical array of the same size than X\"\r\n )\r\n\r\n try:\r\n assert ((np.min(y) >= 0)\r\n and\r\n (np.max(y) <= 1)), \"y values should be between 0 and 1\"\r\n return y\r\n except Exception as e:\r\n # rounding issues might produce values outside of the\r\n # [0,1] range that we must correct\r\n y_corrected = y.copy()\r\n y_corrected[y < 0] = 0\r\n y_corrected[y > 1] = 1\r\n print(\"y should contain only values between 0 and 1\")\r\n print(str(e))\r\n\r\n # returning the corrected y (bounded between 0 and 1)\r\n return y_corrected\r\n\r\n # sample_weights checking\r\n if sample_weights is not None:\r\n try:\r\n assert (np.min(sample_weights) >= 0), \"sample weights should\" \\\r\n \" be positive values\"\r\n assert (len(sample_weights) == len(X)), \"there should be\" \\\r\n \" exactly one weight\" \\\r\n \" per sample\"\r\n except Exception as e:\r\n print(str(e))\r\n raise RuntimeError(\r\n \"sample_weights should be a numerical array \"\r\n \"of size n_samples and containing positive values\"\r\n )", "def fit(self, X: np.ndarray, y: np.ndarray, weights: np.ndarray) -> None:\n if not len(X) == len(y) == len(weights):\n raise ValueError(\"First dimension of arguments must be equal.\")\n if abs(weights).sum() == 0:\n raise ValueError(\"Weights must not be all 0.\")\n\n best_error = np.inf\n best_indices: Tuple[int, int] = (0, 0)\n for i in range(len(X)):\n for j in range(X.shape[1]):\n left_indices = X[:, j] < X[i, j]\n right_indices = np.logical_not(left_indices)\n left_weights = weights[left_indices]\n right_weights = weights[right_indices]\n left_y = y[left_indices]\n right_y = y[right_indices]\n\n error = (\n left_weights[left_y != -1].sum()\n + right_weights[right_y != -1].sum() # THIS IS CORRECT\n )\n error = error / weights.sum()\n if error < best_error:\n best_error = error\n best_indices = (i, j)\n\n self.threshold = X[best_indices]\n self.feature = best_indices[1]", "def fit(self, x: np.ndarray, y: np.ndarray, sample_weight: np.array = np.empty(0)) -> None:\n pass", "def apply_weights(self):\n return self.X.dot(self.get_weights())", "def fit(self, X, y=..., sample_weight=...):\n ...", "def prob(x, w):\n y = tf.constant(np.array([0., 1.]), dtype=tf.float32)\n prob_ = tf.exp(tf.matmul(x, w) * y) / (1 + tf.exp(tf.matmul(x, w)))\n return prob_", "def train(self, X, y):\n\n X = np.matrix(X)\n y = np.matrix(y) \n \n # Calculate hidden layer output matrix (Hinit)\n self.H = (X * self.weight.T) + self.bias\n\n # Sigmoid activation function\n self.H = self.sigmoid(self.H)\n\n # Calculate the Moore-Penrose pseudoinverse matriks \n H_moore_penrose = np.linalg.inv(self.H.T * self.H) * self.H.T\n\n # Calculate the output weight matrix beta\n self.beta = H_moore_penrose * y\n\n return self.H * self.beta", "def x2wp(xC, x={}, w={}, y=dict(Fe=1.)):\n return 100.*x2w(xC, x=x, w=w, y=y)", "def calculate_gradient(y, tx, w):\n return tx.T.dot(sigmoid(tx.dot(w))-np.reshape(y,(len(y),1)))", "def _get_gradient(self, X: array, y: array):\n\n # Use predict_prob method if this is a classifier.\n if hasattr(self, \"predict_prob\"):\n y_hat = self.predict_prob(X)\n else:\n y_hat = self.predict(X)\n\n # Calculate the gradient according to the dimention of X, y.\n grad_bias = y - y_hat\n if X.ndim is 1:\n grad_weights = grad_bias * X\n elif X.ndim is 2:\n grad_weights = grad_bias[:, None] * X\n grad_weights = grad_weights.mean(axis=0)\n grad_bias = grad_bias.mean()\n else:\n raise ValueError(\"Dimension of X has to be 1 or 2!\")\n return grad_bias, grad_weights", "def w_update(self, x, y, pred_class, alpha=0.0001):\n w_new = self.w + alpha * (y - pred_class) * np.append(x, 1)\n self.w = w_new", "def getWeight(self) -> float:\n ...", "def _loss(self, X, y, param, shape, weights=None):\n intercept, coef = gs.split(param, 2)\n intercept = gs.reshape(intercept, shape)\n coef = gs.reshape(coef, shape)\n intercept = gs.cast(intercept, dtype=y.dtype)\n coef = gs.cast(coef, dtype=y.dtype)\n if self.method == \"extrinsic\":\n base_point = self.space.projection(intercept)\n penalty = self.regularization * gs.sum((base_point - intercept) ** 2)\n else:\n base_point = intercept\n penalty = 0\n tangent_vec = self.space.to_tangent(coef, base_point)\n distances = self.metric.squared_dist(self._model(X, tangent_vec, base_point), y)\n if weights is None:\n weights = 1.0\n return 1.0 / 2.0 * gs.sum(weights * distances) + penalty" ]
[ "0.7623957", "0.7152357", "0.687893", "0.68283564", "0.6542995", "0.64705515", "0.6444899", "0.64352334", "0.63989145", "0.63828194", "0.6370652", "0.63449967", "0.63418835", "0.6321859", "0.6318847", "0.6310803", "0.6279792", "0.62733525", "0.62622386", "0.62539375", "0.6229019", "0.6219828", "0.6201386", "0.6195658", "0.6182734", "0.61766577", "0.61436296", "0.6127148", "0.61203575", "0.6116825", "0.61158407", "0.6109076", "0.61061394", "0.6103444", "0.60945845", "0.6092516", "0.609226", "0.6083853", "0.6081199", "0.6081199", "0.60756534", "0.6074433", "0.6070791", "0.60604465", "0.60491174", "0.6033286", "0.6030441", "0.6009611", "0.60068685", "0.6003914", "0.5995", "0.5989274", "0.59879386", "0.59651226", "0.59511924", "0.5951011", "0.5941762", "0.5923047", "0.59207195", "0.59142184", "0.5908865", "0.59035194", "0.5900149", "0.5898889", "0.58968365", "0.5895004", "0.5893357", "0.58922666", "0.5874945", "0.5873407", "0.5871353", "0.5864325", "0.5858915", "0.58492434", "0.58464175", "0.58433723", "0.5838066", "0.58250654", "0.5822814", "0.58091474", "0.58069414", "0.58000064", "0.5798421", "0.57937473", "0.5788371", "0.5788104", "0.578522", "0.5771208", "0.57602507", "0.57601285", "0.5756066", "0.57501423", "0.5749226", "0.57457644", "0.57404554", "0.5740452", "0.57400084", "0.57381666", "0.57376635", "0.5734843", "0.5732105" ]
0.0
-1
Compute the weight parameter given X and y.
def linear_regression_invertible(X, y): ##################################################### # TODO 3: Fill in your code here # ##################################################### w = None X_X_T = np.dot(X.T, X) ev = 0 while ev < (10**-5): ev = np.min((np.linalg.eig(X_X_T)[0])) if ev < (10**-5): X_X_T = X_X_T + (10**-1) * np.identity(X_X_T.shape[0]) w = np.dot(np.dot(np.linalg.inv(X_X_T), X.T), y) return w
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def weight(self, y, xn, xo):\n\n return self._model.log_prob(y, xn) + self._model.h_weight(xn, xo) - self._kernel.log_prob(xn)", "def compute_weight(self, y, x, test_x=None, test_y=None, **kwargs):\n model = copy.copy(self)\n model.__setattr__('train_y', y)\n model.__setattr__('train_x', x)\n if test_x is not None and test_y is not None:\n model.set_valid((test_y, test_x))\n _kwargs = []\n for name, value in kwargs.items():\n # Recognize parameter \"\n if name is \"regularizer_p\":\n model.__setattr__(name, value)\n model.regularizer.set_parameter(value)\n else:\n _kwargs.append((name, value))\n _kwargs = dict(_kwargs)\n if model.calculate_weight is 'gradient':\n return model.sgd(**_kwargs)\n # elif model.calculate_weight is 'newton':\n # return model.newton(**_kwargs)\n elif model.calculate_weight is 'normalequ':\n return model.normalequ(**_kwargs)", "def fit(self, X, y):\r\n newWeight = [0.0] * self.size\r\n w = [0.0] * len(X)\r\n val = self.predict_prob(X) \r\n grad = [(y-1.0) * i[1] for i in X] \r\n grad1 = float((math.exp(-math.fsum((self.weight[f]*v for f, v in X)))) * val)\r\n grad2 = [i[1] * -1 * grad1 for i in X] \r\n for i in range(len(w)):\r\n w[i] = (grad[i] - grad2[i])\r\n \r\n w = [i*self.eta for i in w]\r\n for i in range(len(X)):\r\n newWeight[i] = self.weight[X[i][0]] -w[i]\r\n \r\n self.weight = newWeight[:]\r\n \r\n pass", "def _compute_prob_y_given_x(self, _x, _y):\n normalisation_constant = sum([\n math.exp(sum([self.weights[_feature] *\n self.feature_funcs[_feature, cls](_feature, cls)\n for _feature in _x]))\n for cls in self.classes])\n\n return math.exp(sum([\n self.weights[_feature] *\n self.feature_funcs[_feature, _y](_feature, _y)\n for _feature in _x])) / normalisation_constant", "def get_gradient(self, y, x, weight):\n y = np.reshape(y, (len(y),))\n return np.dot(x.T, sigmoid(np.dot(x, weight)) - y) \\\n + self.regularizer.get_gradient(weight)", "def objective(self, wb, X, y):\n N,_ = X.shape\n w = wb[:-1]\n b = wb[-1]\n loss = 0.0 # objective function value\n loss += self.reg_param * np.dot(b-self.b0, b-self.b0)\n loss += self.reg_param * np.dot(w-self.w0, w-self.w0)\n\n for i in range(N):\n tmpvar = np.exp(-1 * y[i] * (np.dot(w, X[i]) + b))\n loss += np.log(1 + tmpvar)\n \n return loss", "def _computeWeight(self, Y):\n\n if self.loss == WeightedCrossEntropy_DistanceMap:\n return np2cuda(surfacedist(Y), self.dev)\n\n elif self.loss == WeightedCrossEntropy_ClassBalance:\n # Number of voxels per image\n numvox = np.prod(Y.shape[2:])\n # Number of 1s in each channel in each sample\n ones = np.sum(Y, axis=(2,3,4))\n # The weights are inversely proportional to the number of ones\n # so that if there are very few voxels from one category\n weights_ = 1 - ones/numvox\n Y = np.moveaxis(np.moveaxis(Y, 0, -1), 0, -1)\n weights = np.moveaxis(np.moveaxis(Y*weights_, -1, 0), -1, 0)\n return np2cuda(np.sum(weights, axis=1), self.dev)\n\n else:\n return None", "def lr_weights(x : pd.DataFrame, y : pd.DataFrame):\n reg = LR().fit(x, y)\n return reg#.coef_[0]", "def calculate_weights(y_train: np.ndarray) -> np.ndarray:\n\n weight_class = class_weight.compute_class_weight('balanced', np.unique(y_train), y_train)\n return weight_class", "def balance_training_weight(w, y):\n sample_weight = w.copy()\n neg_mask = (y == 0)\n pos_mask = (y == 1)\n \n bkg_sum_weight = np.sum(sample_weight[neg_mask])\n sig_sum_weight = np.sum(sample_weight[pos_mask])\n\n sample_weight[pos_mask] = sample_weight[pos_mask] / sig_sum_weight\n sample_weight[neg_mask] = sample_weight[neg_mask] / bkg_sum_weight\n return sample_weight", "def _update(self, x: np.ndarray, y: int):\n decision = self.weights.dot(x)\n v_t = x @ np.diag(np.diag(self._sigma)) @ x.T\n m_t = y * decision\n loss = (self._phi * math.sqrt(v_t) - m_t)\n #print(loss)\n if loss > 0:\n # We scale our learning rate (alpha) using the weight/cost\n alpha_t = self.class_weight_[y] * self._get_alpha(m_t, v_t)\n u_t = 0.25 * (-alpha_t * v_t * self._phi + math.sqrt(\n alpha_t ** 2 * v_t ** 2 * self._phi ** 2 + 4 * v_t)) ** 2\n beta_t = alpha_t * self._phi / (math.sqrt(u_t) +\n alpha_t * self._phi * v_t)\n sigma = np.expand_dims(x @ self._sigma, axis=0)\n self.weights += alpha_t * y * np.squeeze(sigma)\n self._sigma -= beta_t * sigma.T @ sigma", "def compute_w(self):\n self.pinvX = np.linalg.pinv(self.X)\n return np.dot(self.pinvX, self.y)", "def conditional_embedding_to_weights(mu_y_x, y, k_yy, x_q):\n return _solve_posdef(k_yy, mu_y_x(y, x_q))[0]", "def costFunction(self, x, y ):\n self.yEst = self.forward_propagate(x)\n sqErrors = ( self.yEst - y ) ** 2\n J = sqErrors.sum() / 2\n return J", "def fit(self, X, y): \n # initial values set by set_params when objet was initialised\n _, D = X.shape \n wb_init = np.zeros(D+1) # initial guess for weight vector\n w, b = self.get_params() # set_params inits to zero vector for wb\n wb_init[:-1] = w # self.w0\n wb_init[-1] = b # self.b0\n\n wb_opt, _, _ = fmin_l_bfgs_b(func = self.objective, \n x0 = wb_init, \n fprime = self.objective_grad, \n args = (X,y))\n \n self.set_params(wb_opt[:-1], wb_opt[-1])\n return", "def get_weights(y_true, prior_probs, params):\n # Parameters\n _lambda = params['lambda']\n Q = prior_probs.shape[0]\n\n # The weights are proportional to\n all_w = ((1 -_lambda)*prior_probs + _lambda/Q)**(-1) # (Q,)\n\n # The weighted distribution must sum to one: E[w] = sum(p_tilde*w) = 1\n all_w = all_w / tf.reduce_sum(prior_probs * all_w) # (Q,)\n\n # Find q_star\n q_star = tf.argmax(y_true, axis=3) # (b, H, W)\n\n # Select weights\n all_v = tf.gather(all_w, q_star) # (b, H, W)\n\n # Cast to float32, which is necessary for further calculations\n all_v = tf.cast(all_v, tf.float32) # (b, H, W)\n\n return all_v", "def fit(self, X, y):\n # compute W, b\n # pull W from the normal distribution, and b from 0->2 pi\n\n self.N, d = X.shape\n\n # weights go from R^d -> R^D\n self.W = rng.normal(loc=0, scale=1, size=(self.D, d))\n # bias is in R, need D terms\n self.b = rng.uniform(0, 2*np.pi, size=self.D)\n\n self.Z = self.compute_features(X)\n \n self._fit(y)\n self.fitted = True\n\n # now solve the least-squares problem:\n # min_w ||Z'w - y||_2^2 + \\lambda ||w||_2^2\n\n # done via linear equation solver, eg:\n # A x = b to solve x\n # use cholesky solver: https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.cho_solve.html\n\n # self.L = cholesky(self.kernel, lower=True)\n # self.alpha = cho_solve((self.L, True), y)", "def grad_loss_wrt_w(self, x, y):\n (N, D) = x.shape\n k1 = np.matmul(x, np.transpose(self.w)) + self.b\n y1 = y.reshape((N,1))\n dr = (1 + np.exp(1 * y1 * k1))\n nr = -y1 * x\n c1 = nr/dr\n #(N1,D1) = self.w.shape\n #c2 = np.zeros((N1,D1))\n #for i in range(N):\n # c2[i-1] = c1[i-1,:] + c1[i,:]\n #l_w = c2/N\n l_w1 = np.mean(c1,axis=0)\n return l_w1\n\n\n #raise NotImplementedError", "def get_sample_weight(y):\n class_counts = np.bincount(y)\n class_weight = 1 / class_counts\n sample_weight = class_weight[y]\n sample_weight = sample_weight / sample_weight.sum() * len(y)\n return sample_weight", "def pre_weight(self, y):\n\n if not isinstance(self._kernel, (TransformedDistribution, Independent)):\n return self._model.log_prob(y, self._kernel.loc)\n elif isinstance(self._kernel, Independent):\n return self._model.log_prob(y, self._kernel.base_dist.loc)\n\n # TODO: Not entirely sure about this, but think this is the case\n at = next(k for k in self._kernel.transforms if isinstance(k, AffineTransform))\n\n return self._model.log_prob(y, at.loc)", "def fit(self, X, y):\n self.centers = self._select_centers(X)\n self.ampls = self._select_ampl(y)\n G = self._calculate_interpolation_matrix(X)\n self.weights = np.dot(np.linalg.pinv(G), y)", "def _ols(self, X, y):\n # add bias \n X = self._add_bias(X)\n\n # optimise coefficients\n xTx = np.dot(X.T, X)\n inverse_xTx = np.linalg.inv(xTx)\n xTy = np.dot(X.T, y)\n bhat = np.dot(inverse_xTx, xTy)\n\n # pull out weights and bias\n b = bhat[0]\n w = bhat[1:]\n\n return w, b", "def ComputeWeights(self, p_float=..., p_float=..., p_float=..., *args, **kwargs):\n ...", "def MSE(X, y, w):\r\n n = X.shape[0]\r\n f = X @ w\r\n J = np.sum(np.power((y - f), 2)) / n\r\n return J", "def cost_func(w, X, y):\n y_pred = np.dot(X, w)\n err = np.sum(np.square(y_pred - y)) / (2 * len(y))\n\n return err", "def compute_weights(model, params, y_obs, LB_type='NWJ'):\n\n # Define PyTorch variables\n x = Variable(\n torch.from_numpy(params).type(torch.FloatTensor),\n requires_grad=True)\n y = Variable(\n torch.from_numpy(y_obs).type(torch.FloatTensor),\n requires_grad=True)\n\n # Pass observed data and parameters through the model\n w = list()\n for idx in range(len(x)):\n T = model(x[idx], y).data.numpy()\n if LB_type == 'NWJ':\n w.append(np.exp(T - 1))\n else:\n raise NotImplementedError\n w = np.array(w)\n\n return w.reshape(-1)", "def J(W1, b1, W2, b2, x, y):\n yhat = forwardPropagate(W1, b1, W2, b2, x) # OLD: yhat = softmax(x.dot(w))\n return crossEntropy(y, yhat)", "def local_weight(\r\n point: np.mat, training_data_x: np.mat, training_data_y: np.mat, bandwidth: float\r\n) -> np.mat:\r\n weight = weighted_matrix(point, training_data_x, bandwidth)\r\n W = (training_data.T * (weight * training_data)).I * (\r\n training_data.T * weight * training_data_y.T\r\n )\r\n return W", "def grad_l2(w, X, y, **kwargs):\n return -1 * np.dot(X.T, y - np.dot(X, w)) / X.shape[0]", "def objective(self,w):\n l = 0\n for i in range(len(self.x)):\n # Each example contributes log(sigma(y_i * x_i . w))\n l -= log(sigmoid(self.y[i] * np.dot(w, self.x[i,:])))\n # regularisation 1/2 * alpha * ||w||^2\n l += 0.5 * self.alpha * np.dot(w,w)\n return l", "def gradient_weight(X, Y, model):\n W = model['weight']\n b = model['bias']\n weight_decay = model['weight_decay']\n\n # YOUR CODE HERE\n # Write the gradient with respect to the weights.\n return np.add(np.subtract(np.dot(np.transpose(predict(X, model)), X), np.dot(np.transpose(Y), X)), 2 * LAMBDA * np.transpose(model['weight'])) #np.zeros((X.shape[1], Y.shape[1]))", "def _compute_weights(self, X, target_X, method='l2'):\n if method == 'proj_l2' or method == 'proj_l2_nonorm':\n #\n # At first calculate unrestricted weights: (X.T)^-1\n # Then project answer onto Unit simplex\n # \n target_center = np.mean(target_X, axis=0) # * X.shape[0]\n # Solve the system\n w = np.linalg.lstsq(X.T, target_center.T, rcond='warn')[0]\n weights = project_onto_simplex(w, normalize=True if method == 'proj_l2' else False)\n print(f\"Weights sum: {np.sum(weights)}\")\n weights /= np.sum(weights)\n return weights\n\n #\n # Pure solution, which make unrestricted weights\n #\n # Compute target center multiplied by number of source rows\n # target_center = np.mean(target_X, axis=0) * X.shape[0]\n # Solve the system\n # print(\"X^T shape: ({}), target_center^T shape: ({})\".format(X.T.shape, target_center.T.shape))\n # w = np.linalg.lstsq(X.T, target_center.T, rcond='warn')[0]\n # print(w)\n # return w.T\n if method == 'dist' or method == 'dist2':\n print(\"Using distance weighting\")\n target_center = np.mean(target_X, axis=0)\n residuals = X - target_center\n norm = np.linalg.norm(residuals, axis=1)\n print(f\"Max norm: {np.max(norm)}\")\n if method == 'dist':\n weights = np.max(norm) - norm # inverse weights\n elif method == 'dist2':\n small_eps = 1e-9\n weights = 1.0 / (norm + small_eps)\n weights = np.exp(weights) # softmax\n print(f\"Weights sum: {np.sum(weights)}\")\n weights /= np.sum(weights)\n return weights\n\n # Compute target center multiplied by number of source rows\n target_center = np.mean(target_X, axis=0) # * X.shape[0]\n # Solve the system\n q = cp.Constant(value=target_center.flatten())\n x_ = cp.Constant(value=X)\n\n w = cp.Variable(X.shape[0])\n # lam = self.optimization_lambda # 0.001\n # M = len(J)\n M = np.linalg.norm(X) ** 2 # target_X)\n print(\"M:\", M)\n lam = self.reg_lambda # 0.1\n if lam == 0:\n print(\"No regularization\")\n # cp.norm2(cp.matmul(X, beta) - Y)**2\n objective = cp.Minimize(cp.sum_squares(q.T - w.T @ x_)) # cp.Minimize(cp.sum_squares(q - x_ * w))\n else:\n objective = cp.Minimize(cp.sum_squares(q.T - w.T @ x_) / M + lam * cp.norm2(w)) # + lam * cp.norm2(w))\n constraints = [w >= 0, cp.sum_entries(w) == 1] #, w >= self.simplex_lower_boundary]\n prob = cp.Problem(objective, constraints)\n\n print(\"Problem is prepared\")\n\n try:\n result = prob.solve()\n except Exception as ex:\n print(\"Exception occurred: {}\".format(ex))\n print(\"Using SCS solver\")\n result = prob.solve(solver=cp.SCS, verbose=False)\n print(\"Problem status: {}\".format(prob.status))\n try:\n weights = w.value.A.flatten()\n except Exception as ex:\n print(\"Can't compute weights, use uniform distribution\")\n weights = np.ones((X.shape[0],)) / X.shape[0]\n print(weights)\n weights[weights < 0] = 0\n weights_sum = np.sum(weights)\n print(\"Weights sum: {}\".format(weights_sum))\n if weights_sum != 1.0: # probably always true\n weights /= weights_sum\n return weights", "def calculate_cost(x, y, weights):\r\n predictions = compute_prediction(x, weights)\r\n cost = np.mean(-y * np.log(predictions) - (1 - y) * np.log(1 - predictions))\r\n return cost", "def fit(self, x, y):\n self.w = np.random.rand(1,x.shape[1])\n self.b = 0\n loss = []\n for i in range(self.n_epochs):\n self.b = self.b - (self.lr * self.grad_loss_wrt_b(x,y))\n self.w = self.w - (self.lr * self.grad_loss_wrt_w(x,y))\n loss.append(self.loss(x,y))\n return loss", "def compute_gradient (w, x, y):\n (n,d) = x.shape\n g = np.zeros(d)\n for i in range(0,d):\n g[i] = (w*x-y)*np.transpose(x[i])\n g += 0.5*w\n return g", "def mutual_information(x, y, w):\r\n \r\n\r\n total_entropy = entropy(y,w)\r\n\r\n partitioned_x = partition(x)\r\n weighted_entropy = 0\r\n # calculate the weighted entropy over the partition of x\r\n vals,counts= np.unique(x,return_counts=True)\r\n for key in partitioned_x:\r\n weighted_entropy += np.sum([(np.sum(w[partitioned_x[key]])/np.sum(w)) * entropy(y[partitioned_x[key]],w[partitioned_x[key]])])\r\n\r\n information_gain = total_entropy - weighted_entropy\r\n return information_gain", "def density_weights(w_q, y, theta_y):\n # Setup the optimization problem\n up_scale = _gaussian_norm(theta_y)\n a_down_scale = _gaussian_norm(np.sqrt(3) * theta_y)\n a_body = _gaussian(y, y, np.sqrt(3) * theta_y)\n b_down_scale = _gaussian_norm(np.sqrt(2) * theta_y)\n b_body = _gaussian(y, y, np.sqrt(2) * theta_y)\n a_matrix = (up_scale / a_down_scale) * a_body\n b_matrix = (up_scale / b_down_scale) * b_body\n a = a_matrix # (n, n)\n b = -np.dot(b_matrix, w_q) # [(n, n_q), (n,)]\n\n # Initialize the normalized weights\n w_q_pdf_init = clip_normalize(w_q)\n w_q_pdf = np.zeros(w_q_pdf_init.shape)\n\n # Find the normalized weights using snucq\n if w_q.ndim == 2:\n n, n_q = w_q.shape\n for i in np.arange(n_q):\n w_q_pdf[:, i] = _snucq(a, b[:, i], w_q_pdf_init[:, i])\n elif w_q.ndim == 1:\n w_q_pdf = _snucq(a, b, w_q_pdf_init)\n else:\n raise ValueError('Weights have the wrong dimensions')\n\n # Return the normalized weights\n return w_q_pdf", "def fit(self, X, y, sample_weight, **kwargs):\n pass", "def score(self, X, y, sample_weight=None):\n return super().score(X, y, sample_weight)", "def score(self, X, y, sample_weight=None):\n return super().score(X, y, sample_weight)", "def find_weights(X, b):\n X = np.append(X, np.ones((X.shape[0], 1)), axis=1)\n w, _, _, _ = np.linalg.lstsq(X, b)\n return w", "def costFunction(theta, X, y):\n\n # Initialize some useful values\n m = y.size # number of training examples\n J = np.sum(np.array([inner(theta, xi, yi) for xi, yi in zip(X, y)]))\n J /= m\n\n\n return J", "def compute_gradient(self, X, y, weights):\n sigmoid = self.sigmoid(np.dot(X, weights))\n return np.dot(X.T, y - sigmoid)", "def _l2(self, y, x, y_minus_g, w_0):\n # Initialize weight vector to return\n w_1 = np.zeros(len(w_0))\n \n for j in range(len(x)):\n reg = float(w_0[j]) / (self._lambda*self._num_training)\n w_1[j] = w_0[j] + self._epsilon*(x[j]*y_minus_g - reg)\n \n return w_1", "def evaluate(self, w, X, y):\n # help avoid mistakes (as described in the assignment) by\n # potentially reshaping our arguments\n w = ensure_1d(w)\n y = ensure_1d(y)\n\n # Prediction is linear combination\n y_hat = X @ w\n # Residual is difference between ground truth and prediction\n # (\"what's left\" after your predicition)\n residuals = y - y_hat\n # Squared residuals gives us the objective function value\n f = 0.5 * np.sum(residuals ** 2)\n\n # Analytical gradient, written in mathematical form first\n # and then translated into Python.\n # The parentheses on the first term are just a small optimization:\n # this way, we do two matrix-vector multipliciations,\n # instead of a (more expensive) matrix-matrix mult and a matrix-vector\n g = X.T @ (X @ w) - X.T @ y\n return f, g", "def model(theta, x):\n\tw, b = theta\n\treturn w * x + b", "def evaluate(self, w, X, y):\n # help avoid mistakes (as described in the assignment) by\n # potentially reshaping our arguments\n w = ensure_1d(w)\n y = ensure_1d(y)\n\n \"\"\"YOUR CODE HERE FOR Q2.3\"\"\"\n raise NotImplementedError()", "def forward_pass(X, target_Y, W):\n\n pred_Y = activation(weighted_sum(W, X))\n print(\"\\tI/P:\", X, \" O/P:\", target_Y, \" W:\", W, \" W_Sum:\", round(weighted_sum(W, X), 3))\n\n if pred_Y != target_Y:\n for j in range(len(W)):\n W[j] = update_weight(W[j], pred_Y, target_Y, X[j])\n\n return W", "def objective_function(theta, X, y):\n # m number of training instances\n m = X.shape[0]\n jtheta = sum((np.dot(X, theta) - y)**2) / (2.0*m)\n return jtheta", "def bws(x, y, **kwargs):\n\tx.sort()\n\ty.sort()\n\tnpx = np.array(x)\n\tnpy = np.array(y)\n\n\txs = np.unique(npx)\n\tys = np.unique(npy)\n\txys = set(xs).union(set(ys))\n\taxy = np.array(list(xys))\n\taxy.sort()\n\n\tG = np.array([len(axy[np.where(axy <= xi)]) for xi in xs])\n\tH = np.array([len(axy[np.where(axy <= yi)]) for yi in ys])\n\n\tn = len(G)\n\tm = len(H)\n\tfn = float(n)\n\tfm = float(m)\n\n\tN = np.linspace(1,n,num=n)\n\tM = np.linspace(1,m,num=m)\n\n\txt1 = np.power(G - N*(fm + fn)/fn, 2.0)\n\txtt = N/(fn+1.0)\n\txt2 = xtt*(1 - xtt)*(fm * (fm+fn)/fn)\n\tBx = np.sum(xt1/xt2)/fn\n\t\n\tyt1 = np.power(H - M*(fm + fn)/fm, 2.0)\n\tytt = M/(fm+1.0)\n\tyt2 = ytt*(1 - ytt)*(fn * (fm+fn)/fm)\n\tBy = np.sum(yt1/yt2)/fm\n\n\tB = (Bx+By)/2.0\n\n\tprint \"B = \", B\n\t\n\tJ = 3\n\tif \"j\" in kwargs:\n\t\tJ = kwargs[\"j\"]\n\t\n\treturn compute_xi(B, J)", "def computeGradient(self, X, y, w):\n n = len(X)\n if self.loss == 'linear':\n gradient = -2 * np.dot(X.T, (y - X.dot(w)))\n elif self.loss == 'logistic':\n g = self.logistic(X, w)\n gradient = -2 * np.dot(X.T, (y - g) * g * (1 - g))\n elif self.loss == 'perceptron':\n newY = (y > 0).astype(int) * 2 - 1 # change from (0, 1) to (-1, 1)\n index = ((np.dot(X, w) >= 0).astype(int) != y)\n usedX = X[index[:, 0]]\n usedY = newY[index[:, 0]]\n gradient = -np.dot(usedX.T, usedY)\n elif self.loss == 'svm':\n newY = (y > 0).astype(int) * 2 - 1 # change from (0, 1) to (-1, 1)\n index = (np.dot(X, w) * newY < 1)\n usedX = X[index[:, 0]]\n usedY = newY[index[:, 0]]\n gradient = 2 * w - self.C * np.dot(usedX.T, usedY)\n gradient[0] = gradient[0] + 2 * w[0]\n\n return gradient", "def evaluate(self, w, X, y):\n pass # this is because it's a base class, it will be implemented below.", "def updateWeights(inputs, outputs, learning_rate, y, weights):\n for i in range(len(weights)):\n weights[i] = weights[i] + learning_rate * (outputs - y) * inputs[i]\n return weights", "def weighted_rmsd(x, y, dim=None, weights=None):\n dev = (x - y) ** 2\n dev_mean = weighted_mean(dev, dim, weights)\n return np.sqrt(dev_mean)", "def calculate_gradient(y, tx, w): \n return tx.T@(sigmoid(tx@w)-y)", "def gradW(self, X, y, W):\n num_x = X.shape[0]\n # if i==y, then softmax_grad = preds_i-1;\n # otherwise, softmax_grad = preds_i\n preds = softmax(np.matmul(X, W)) #preds.shape(N,C)\n preds[list(range(num_x)), y] -= 1\n # grad = X.T*softmax_grad\n return np.matmul(X.T, preds)/num_x", "def __call__(self, x: Tensor, model: Module, gt_y: Tensor):\n prediction = model(x)\n return self.weight * self.norm(prediction, gt_y)", "def activation_function(X):\n\tz = np.sum(w*x+b)\n\treturn z", "def learn(self, Xtrain, ytrain):\n numsamples = Xtrain.shape[0]\n Xless = Xtrain[:,self.params['features']]\n self.weights = np.dot(np.dot(np.linalg.inv(np.dot(Xless.T,Xless)/numsamples + (self.params['regwgt'] * np.identity(np.shape(Xless)[1]))), Xless.T),ytrain)/numsamples", "def update(self, X, y):\n proba = self.predict_proba(X)\n top_loss = proba - y\n bias_gradient = np.sum(top_loss)\n weight_gradient = (top_loss).T.dot(X)\n\n # the gradient update\n self.b = self.b - self.lrate * bias_gradient\n self.W = self.W - self.lrate * weight_gradient", "def score(self, X, y, weights=None):\n y_pred = self.predict(X)\n if weights is None:\n weights = 1.0\n\n mean = FrechetMean(self.metric, verbose=self.verbose).fit(y).estimate_\n numerator = gs.sum(weights * self.metric.squared_dist(y, y_pred))\n denominator = gs.sum(weights * self.metric.squared_dist(y, mean))\n\n return 1 - numerator / denominator if denominator != 0 else 0.0", "def polyFunction(x,weights):\n y=0\n for i in range (0,len(weights)):\n y+= weights[i]*(x**i)\n return y", "def y(x):\n x1, x2, x3, x4 = x[:, 0], x[:, 1], x[:, 2], x[:, 3]\n return 1 + 0.3 * x1 - 0.6 * x2 ** 2 - 0.2 * x3 ** 3 + 0.5 * x4 ** 4", "def update_weights(x_train, y_train, weights, learning_rate):\r\n predictions = compute_prediction(x_train, weights)\r\n weights_delta = np.dot(x_train.T, y_train - predictions)\r\n m = y_train.shape[0]\r\n weights += learning_rate / float(m) * weights_delta\r\n return weights", "def entropy(y,w):\r\n\r\n\t# my original entropy function commented below is not working as desired. The below implementation is based on from Sai Ram Chappidi's explanation\r\n\r\n # y_partition = partition(y)\r\n # elements,counts = np.unique(y,return_counts = True)\r\n # entropy=0\r\n\r\n # for i in range(len(elements)):\r\n # entropy += ((-(np.sum(w[y_partition[i]])))/np.sum(w))*np.log2(np.sum(w[y_partition[i]])/np.sum(w))\r\n # return entropy\r\n\r\n entropy = 0\r\n # two hypothesis cases 0,1\r\n h = {0: 0, 1: 0}\r\n leny = len(y)\r\n for i in range(leny):\r\n # if y is 0 add 0 to the weight\r\n if y[i] == 0:\r\n h[0] += w[i]\r\n # if y is 1 add 1 to the weight\r\n elif y[i] == 1:\r\n h[1] += + w[i]\r\n # summing all the weighted values \r\n val_sum = h[0] + h[1]\r\n\r\n # entropy calculation\r\n for j in range(len(h)):\r\n h[j] = h[j]/val_sum\r\n # to prevent divide by zero\r\n if h[j] != 0:\r\n entropy += h[j] * np.log2(h[j])\r\n entropy = -(entropy)\r\n return entropy", "def getWeights(self,dist,xin,yin):\r\n \r\n Ns = len(dist)\r\n \r\n # Construct the LHS matrix C\r\n C=np.ones((Ns+1,Ns+1))\r\n for i in range(0,Ns):\r\n C[i,i]=0\r\n for j in range(i+1,Ns):\r\n D = np.sqrt((xin[i]-xin[j])**2+(yin[i]-yin[j])**2)\r\n C[i,j] = self.semivariogram(D)\r\n C[j,i] = C[i,j]\r\n\r\n C[Ns,Ns]=0\r\n\r\n # Calculate the inverse of C \r\n Cinv = np.linalg.inv(C)\r\n \r\n # Loop through each model point and calculate the vector D\r\n gamma = np.ones((Ns+1,1))\r\n \r\n for j in range(0,Ns):\r\n gamma[j,0]= self.semivariogram( dist[j])\r\n # Solve the matrix to get the weights\r\n W = np.dot(Cinv,gamma)\r\n W = W[:-1,:]\r\n \r\n #print np.size(gamma,axis=0),np.size(gamma,axis=1) \r\n return 1.0/float(Ns)*np.ones((Ns,1))", "def gen_weights(self, f_target):\n\n # calculate x and psi\n x_track = self.cs.rollout()\n psi_track = self.gen_psi(x_track)\n\n # efficiently calculate BF weights using weighted linear regression\n self.w = jnp.zeros((self.n_dmps, self.n_bfs))\n for d in range(self.n_dmps):\n # spatial scaling term\n k = self.goal[d] - self.y0[d]\n for b in range(self.n_bfs):\n numer = jnp.sum(x_track * psi_track[:, b] * f_target[:, d])\n denom = jnp.sum(x_track ** 2 * psi_track[:, b])\n self.w[d, b] = numer / denom\n if abs(k) > 1e-5:\n self.w[d, b] /= k\n\n self.w = jnp.nan_to_num(self.w)", "def update_weights(self):\n\n\n self.w += self.learn_rate * (self.X.T.dot(self.T - self.Y)\n - self.reg_L1 * np.sign(self.w)\n - self.reg_L2 * 2*self.w)", "def fit(self, X, y, sample_weight=...):\n ...", "def fit(self, x, y, weights=None, **kwargs):\n from scipy.optimize import fmin_l_bfgs_b\n\n assert len(y) == x.shape[0]\n assert weights is None or len(weights) == x.shape[0]\n\n y0 = y == 0\n x0 = x[y0, :]\n x1 = x[~y0, :]\n\n if weights is None:\n loss_weights = None\n else:\n loss_weights = [weights[y0], weights[~y0]]\n\n def _loss_for_optimize(params):\n return LogisticRegression._loss_gradient(x0, x1, params[0], params[1:], self.lam, loss_weights)\n\n params0 = np.zeros(1 + x.shape[1])\n params_opt, loss_opt, info_opt = fmin_l_bfgs_b(_loss_for_optimize, params0, disp=0, **kwargs)\n print((\"%s funcalls: %s\" % (info_opt['task'], info_opt['funcalls'])))\n\n self.b = params_opt[0]\n self.w = params_opt[1:]", "def estimate_nb(x,y,smoothing):\n labels = set(y)\n doc_counts = defaultdict(float)\n weights = defaultdict(float)\n\n vocab = set()\n for base_features in x:\n for word in base_features.keys():\n vocab.add(word)\n\n for label in y:\n doc_counts[label] += 1\n\n\n for label in labels:\n weights[(label, OFFSET)] = np.log(doc_counts[label] / sum(doc_counts.values()))\n log_probabilities = estimate_pxy(x, y, label, smoothing, vocab)\n for word in log_probabilities:\n weights[(label, word)] = log_probabilities[word]\n\n return weights", "def get_weights(self):", "def _l1(self, y, x, y_minus_g, w_0):\n # Initialize weight vector to return\n w_1 = np.zeros(len(w_0))\n \n for j in range(len(x)):\n reg = float(self._sign(w_0[j])) / (self._lambda*self._num_training)\n w_1[j] = w_0[j] + self._epsilon*(x[j]*y_minus_g - reg)\n \n return w_1", "def _calc_gradients(self, X, y, y_hat):\n # calculate gradient of weight and bias\n grad_b = 2 * np.mean(y_hat - y)\n grad_W = 2 * np.mean(np.matmul((y_hat - y), X))\n return grad_W, grad_b", "def reweigh(X, y, S):\n\n\tX['label'] = y\n\n\tW = pd.DataFrame({'group': [1, 1, 0, 0], 'label': [1, 0, 1, 0]})\n\n\t# Calculate weight for each combination of sensitive attribute and class,\n\t# given by the expected probability of an example being in a certain group\n\t# and class if sensitive attribute/class are independent, divided by the\n\t# observed probability\n\tweights = [[len(X[X[S] == s]) * len(X[X['label'] == c]) \\\n\t\t\t\t/ float(len(X) * len(X[(X[S] == s) & (X['label'] == c)])) \\\n\t\t\t\tfor c in [1, 0]] for s in [1, 0]]\n\n\tW['weight'] = [i for j in weights for i in j]\n\t\n\tX_prime = X.copy()\n\tX_prime['weight'] = 0\n\n\t# Add weights according to class/group\n\tfor s in [1, 0]:\n\t\tfor c in [1, 0]:\n\t\t\tw = W.loc[(W['group'] == s) & (W['label'] == c), 'weight']\n\t\t\tX_prime.loc[(X[S] == s) & (X['label'] == c), 'weight'] = w.iloc[0]\n\n\tX.drop('label', axis = 1, inplace = True)\n\ty_prime = X_prime['label'].tolist()\n\tX_prime = X_prime.drop('label', axis = 1)\n\n\treturn(X_prime, y_prime)", "def local_weight_regression(\r\n training_data_x: np.mat, training_data_y: np.mat, bandwidth: float\r\n) -> np.mat:\r\n m, n = np.shape(training_data_x)\r\n ypred = np.zeros(m)\r\n\r\n for i, item in enumerate(training_data_x):\r\n ypred[i] = item * local_weight(\r\n item, training_data_x, training_data_y, bandwidth\r\n )\r\n\r\n return ypred", "def calculate_gradient(y, tx, w):\n\n\tret = tx.T.dot(sigmoid(np.dot(tx, w)) - y)\n\treturn ret", "def RMSE(X, y, w):\r\n n = X.shape[0]\r\n f = X @ w\r\n J = np.sqrt(np.sum(np.power((y - f), 2)) / n)\r\n return J", "def fit(self, X, y, sample_weight=..., **fit_params):\n ...", "def propagate(w, b, x, y):\n\n m = x.shape[1]\n\n # Forward Propagation (from x to cost)\n # Compute activation\n activation = 1 / (1 + np.exp(-(np.dot(w.T, x) + b)))\n # compute cost\n cost = (-1 / m) * np.sum(\n (np.dot(np.log(activation), y.T)) +\n np.dot(np.log(1 - activation), (1 - y).T)\n )\n\n # Backward Propagation (to find gradient descent)\n dw = (1 / m) * np.dot(x, (activation - y).T)\n db = (1 / m) * np.sum(activation - y)\n\n cost = np.squeeze(cost)\n grads = {\"dw\": dw,\n \"db\": db}\n\n return grads, cost", "def train_normal_equation(self, X, y):\r\n self.weights = np.dot(np.dot(np.linalg.inv(np.dot(X.T, X)), X.T), y)\r\n self.bias = 0\r\n \r\n return self.weights, self.bias", "def cost(self, X=None, y=None):\n\t\tif X is None:\n\t\t\tX = self.x_data\n\t\telse:\n\t\t\tn_samples = np.size(X, 0)\n\t\t\tX = np.hstack((np.ones((n_samples, 1)), (X - np.mean(X,0)) / np.std(X,0)))\n\n\t\tif y is None:\n\t\t\ty = self.y_data\n\t\telse:\n\t\t\ty = y[:, np.newaxis]\n\n\t\ty_pred = X @ self.weights\n\n\t\t# SSE formula\n\t\tcost = 1 - (((y - y_pred) ** 2 ).sum() / ((y- y.mean())** 2).sum())\n\n\t\treturn cost", "def weighted_sum(W, X):\n\n if len(W) != len(X):\n print(\"Dimension of weight vector should be same as input vector.\")\n return\n\n else:\n H = 0\n\n for i in range(len(W)):\n H += (W[i] * X[i])\n \n return H", "def compute_loss(self, X, y):\r\n # INSERT YOUR CODE HERE\r\n #raise Exception('Function not yet implemented!')\r\n\r\n # Computing the loss using the below formula\r\n # Loss = -(1/m)*sum( (y_i)*log(σ(wTx_i)) + (1-y_i)*log(1 - σ(wTx_i)))\r\n # m = number of examples and i for ith example\r\n\r\n loss = 0\r\n X = np.append(X, np.array([[1]]*X.shape[0]), axis=1)\r\n # for idx,example in enumerate(X):\r\n # loss = loss + y[idx] * np.log(self.sigmoid(np.dot(example, self.w))) + (1 - y[idx]) * np.log(1 - self.sigmoid(np.dot(example, self.w)))\r\n # loss = -loss/ X.shape[0]\r\n\r\n loss = -np.mean(y * np.log(self.sigmoid(np.dot(X, self.w))) + (1 - y) * np.log(1 - self.sigmoid(np.dot(X, self.w))))\r\n return loss", "def _kernel(self, bw, X, x):\n return (1.0 / np.sqrt(2 * np.pi) / bw) * np.exp(\n -((X - x) ** 2) / (bw ** 2 * 2.0)\n )", "def update(w, x, y, l2_param=0):\n # get dimension d and current prediction mu of shape Nx1\n d = x.shape.as_list()[1]\n mu = tf.sigmoid(tf.matmul(x, w))\n\n # build R of shape Nx1 (element wise multiplication)\n r_flat = mu * (1 - mu)\n\n # build regularisation term and hessian H = X'RX of shape dxd\n l2_regularisation = l2_param * tf.eye(d)\n h = tf.matmul(tf.transpose(x), r_flat * x) + l2_regularisation\n\n # do single-value decomposition of H\n sigma, u, v = tf.svd(h, full_matrices=True, compute_uv=True)\n sigma = tf.expand_dims(sigma, 1)\n\n # calculate Moore-Penrose-pseudo-inverse of H via single value decomposition\n s_mppinv = tf.where(tf.not_equal(sigma, 0), 1 / sigma, tf.zeros_like(sigma))\n h_mppinv = tf.matmul(v, s_mppinv * tf.transpose(u))\n\n # calculate update step\n w_delta = tf.matmul(h_mppinv, tf.matmul(tf.transpose(x), mu - y) + l2_param * w)\n return w_delta", "def posterior_weights(prior_embedding, x, y, theta_x, theta_y, x_q,\n epsil=0, delta=0,\n kbr='tikhonov',\n k_x=_gaussian, k_y=_gaussian,\n k_xx=None, k_yy=None,\n v=None):\n if v is None:\n k_xx = k_x(x, x, theta_x) if not k_xx else k_xx\n k_yy = k_y(y, y, theta_y) if not k_yy else k_yy\n v = _posterior_fields[kbr](prior_embedding(y), k_xx, k_yy, epsil, delta)\n return np.dot(v, k_x(x, x_q, theta_x))", "def get_weight(module,\n shape,\n weight_var='weight',\n kernel_init=None):\n\n return module.param(name=weight_var, shape=shape, initializer=kernel_init)", "def check_X_y_weights(X, y=None, sample_weights=None):\r\n # X checking\r\n try:\r\n assert np.isfinite(np.max(X)), \"X should only contain finite \" \\\r\n \"numerical values\"\r\n except Exception as e:\r\n print(str(e))\r\n raise RuntimeError(\"X should be a numerical array\")\r\n\r\n # y checking\r\n if y is not None:\r\n try:\r\n assert not np.isnan(np.min(y))\r\n except Exception as e:\r\n print(str(e))\r\n raise RuntimeError(\r\n \"y should not contain NaN values\"\r\n )\r\n\r\n try:\r\n assert (np.ndim(X) == np.ndim(y) == 2), \"X and y should be \" \\\r\n \"2-dim arrays\"\r\n assert (len(X) == len(y)), \"X and y should have the same \" \\\r\n \"'n_sample' first dimension\"\r\n except Exception as e:\r\n print(str(e))\r\n raise RuntimeError(\r\n \"y should be a numerical array of the same size than X\"\r\n )\r\n\r\n try:\r\n assert ((np.min(y) >= 0)\r\n and\r\n (np.max(y) <= 1)), \"y values should be between 0 and 1\"\r\n return y\r\n except Exception as e:\r\n # rounding issues might produce values outside of the\r\n # [0,1] range that we must correct\r\n y_corrected = y.copy()\r\n y_corrected[y < 0] = 0\r\n y_corrected[y > 1] = 1\r\n print(\"y should contain only values between 0 and 1\")\r\n print(str(e))\r\n\r\n # returning the corrected y (bounded between 0 and 1)\r\n return y_corrected\r\n\r\n # sample_weights checking\r\n if sample_weights is not None:\r\n try:\r\n assert (np.min(sample_weights) >= 0), \"sample weights should\" \\\r\n \" be positive values\"\r\n assert (len(sample_weights) == len(X)), \"there should be\" \\\r\n \" exactly one weight\" \\\r\n \" per sample\"\r\n except Exception as e:\r\n print(str(e))\r\n raise RuntimeError(\r\n \"sample_weights should be a numerical array \"\r\n \"of size n_samples and containing positive values\"\r\n )", "def fit(self, X: np.ndarray, y: np.ndarray, weights: np.ndarray) -> None:\n if not len(X) == len(y) == len(weights):\n raise ValueError(\"First dimension of arguments must be equal.\")\n if abs(weights).sum() == 0:\n raise ValueError(\"Weights must not be all 0.\")\n\n best_error = np.inf\n best_indices: Tuple[int, int] = (0, 0)\n for i in range(len(X)):\n for j in range(X.shape[1]):\n left_indices = X[:, j] < X[i, j]\n right_indices = np.logical_not(left_indices)\n left_weights = weights[left_indices]\n right_weights = weights[right_indices]\n left_y = y[left_indices]\n right_y = y[right_indices]\n\n error = (\n left_weights[left_y != -1].sum()\n + right_weights[right_y != -1].sum() # THIS IS CORRECT\n )\n error = error / weights.sum()\n if error < best_error:\n best_error = error\n best_indices = (i, j)\n\n self.threshold = X[best_indices]\n self.feature = best_indices[1]", "def fit(self, x: np.ndarray, y: np.ndarray, sample_weight: np.array = np.empty(0)) -> None:\n pass", "def apply_weights(self):\n return self.X.dot(self.get_weights())", "def fit(self, X, y=..., sample_weight=...):\n ...", "def prob(x, w):\n y = tf.constant(np.array([0., 1.]), dtype=tf.float32)\n prob_ = tf.exp(tf.matmul(x, w) * y) / (1 + tf.exp(tf.matmul(x, w)))\n return prob_", "def train(self, X, y):\n\n X = np.matrix(X)\n y = np.matrix(y) \n \n # Calculate hidden layer output matrix (Hinit)\n self.H = (X * self.weight.T) + self.bias\n\n # Sigmoid activation function\n self.H = self.sigmoid(self.H)\n\n # Calculate the Moore-Penrose pseudoinverse matriks \n H_moore_penrose = np.linalg.inv(self.H.T * self.H) * self.H.T\n\n # Calculate the output weight matrix beta\n self.beta = H_moore_penrose * y\n\n return self.H * self.beta", "def x2wp(xC, x={}, w={}, y=dict(Fe=1.)):\n return 100.*x2w(xC, x=x, w=w, y=y)", "def calculate_gradient(y, tx, w):\n return tx.T.dot(sigmoid(tx.dot(w))-np.reshape(y,(len(y),1)))", "def _get_gradient(self, X: array, y: array):\n\n # Use predict_prob method if this is a classifier.\n if hasattr(self, \"predict_prob\"):\n y_hat = self.predict_prob(X)\n else:\n y_hat = self.predict(X)\n\n # Calculate the gradient according to the dimention of X, y.\n grad_bias = y - y_hat\n if X.ndim is 1:\n grad_weights = grad_bias * X\n elif X.ndim is 2:\n grad_weights = grad_bias[:, None] * X\n grad_weights = grad_weights.mean(axis=0)\n grad_bias = grad_bias.mean()\n else:\n raise ValueError(\"Dimension of X has to be 1 or 2!\")\n return grad_bias, grad_weights", "def w_update(self, x, y, pred_class, alpha=0.0001):\n w_new = self.w + alpha * (y - pred_class) * np.append(x, 1)\n self.w = w_new", "def getWeight(self) -> float:\n ...", "def _loss(self, X, y, param, shape, weights=None):\n intercept, coef = gs.split(param, 2)\n intercept = gs.reshape(intercept, shape)\n coef = gs.reshape(coef, shape)\n intercept = gs.cast(intercept, dtype=y.dtype)\n coef = gs.cast(coef, dtype=y.dtype)\n if self.method == \"extrinsic\":\n base_point = self.space.projection(intercept)\n penalty = self.regularization * gs.sum((base_point - intercept) ** 2)\n else:\n base_point = intercept\n penalty = 0\n tangent_vec = self.space.to_tangent(coef, base_point)\n distances = self.metric.squared_dist(self._model(X, tangent_vec, base_point), y)\n if weights is None:\n weights = 1.0\n return 1.0 / 2.0 * gs.sum(weights * distances) + penalty" ]
[ "0.7623957", "0.7152357", "0.687893", "0.68283564", "0.6542995", "0.64705515", "0.6444899", "0.64352334", "0.63989145", "0.63828194", "0.6370652", "0.63449967", "0.63418835", "0.6321859", "0.6318847", "0.6310803", "0.6279792", "0.62733525", "0.62622386", "0.62539375", "0.6229019", "0.6219828", "0.6201386", "0.6195658", "0.6182734", "0.61766577", "0.61436296", "0.6127148", "0.61203575", "0.6116825", "0.61158407", "0.6109076", "0.61061394", "0.6103444", "0.60945845", "0.6092516", "0.609226", "0.6083853", "0.6081199", "0.6081199", "0.60756534", "0.6074433", "0.6070791", "0.60604465", "0.60491174", "0.6033286", "0.6030441", "0.6009611", "0.60068685", "0.6003914", "0.5995", "0.5989274", "0.59879386", "0.59651226", "0.59511924", "0.5951011", "0.5941762", "0.5923047", "0.59207195", "0.59142184", "0.5908865", "0.59035194", "0.5900149", "0.5898889", "0.58968365", "0.5895004", "0.5893357", "0.58922666", "0.5874945", "0.5873407", "0.5871353", "0.5864325", "0.5858915", "0.58492434", "0.58464175", "0.58433723", "0.5838066", "0.58250654", "0.5822814", "0.58091474", "0.58069414", "0.58000064", "0.5798421", "0.57937473", "0.5788371", "0.5788104", "0.578522", "0.5771208", "0.57602507", "0.57601285", "0.5756066", "0.57501423", "0.5749226", "0.57457644", "0.57404554", "0.5740452", "0.57400084", "0.57381666", "0.57376635", "0.5734843", "0.5732105" ]
0.0
-1
Compute the weight parameter given X, y and lambda.
def regularized_linear_regression(X, y, lambd): ##################################################### # TODO 4: Fill in your code here # ##################################################### w = None X_X_T = np.dot(X.T, X) X_X_T += lambd * np.identity(X_X_T.shape[0]) w = np.dot(np.dot(np.linalg.inv(X_X_T), X.T), y) return w
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def value(a, y, weights, lambda_):\n\t\treturn 0.5* (np.linalg.norm(a-y)**2) / (a.shape[0] * a.shape[1])\n\t\t# return unregularize + (0.5*lambda_*np.sum(np.square(weights[-1])) / (a.shape[0] * a.shape[1])) ", "def value(a, y, weights, lambda_):\n\t\treturn np.sum(np.nan_to_num(-y*np.log(a + 1e-15)-(1-y)*np.log(1-a + 1e-15))) / (a.shape[0] * a.shape[1])\n\t\t# return unregularize + (0.5*lambda_*np.sum(np.square(weights[-1])) / (a.shape[0] * a.shape[1])) ", "def penalized_logistic_regression(y, tx, w, lambda_):\n penality = lambda_*np.linalg.norm(w)**2\n diag = np.diag(np.repeat(2*lambda_, len(w)))\n return calculate_loss(y,tx,w) + penality, calculate_gradient(y,tx,w) + lambda_*2*w, calculate_hessian(y,tx,w) + diag", "def learning_by_penalized_gradient(y, tx, w, gamma, lambda_):\n\tgrad, H = penalized_logistic_regression(y, tx, w, lambda_)\n\n\thgrad = np.linalg.inv(H).dot(grad)\n\n\tw = w - gamma * hgrad\n\n\treturn w", "def penalized_logistic_regression(y, tx, w, lambda_):\n\tgradient = calculate_gradient(y, tx, w) + 2.0 * lambda_ * w\n\tH = calculate_hessian(y, tx, w) + 2.0 * lambda_\n\t\n\treturn gradient, H", "def least_squares_fn(y, tx, w, lambda_=None):\n gradient, error = compute_gradient(y, tx, w)\n loss = calculate_mse(error)\n\n return loss, gradient", "def learning_by_penalized_gradient_descent(y, tx, w, gamma, lambda_):\n loss = calculate_loss(y, tx, w) + lambda_ * np.squeeze(w.T.dot(w))\n grad = calculate_gradient(y, tx, w) + 2 * lambda_ * w\n w = w-gamma*grad\n return w, loss", "def ComputeCost(Y, W, P, my_lambda):\n l = [np.log(P[i][np.argmax(Y[i])]) for i in range(len(Y))]\n l = -np.mean(l)\n J = l\n for w in W:\n J += my_lambda * (w**2).sum()\n return J, l", "def cost_function (model, X, y, lambda_reg=0.):\n\n m = len (y)\n pred = model.predict (X)\n cost = 1. / (2. * m) * ((pred - y)**2).sum () + \\\n lambda_reg / (2. * m) * (model.coef_**2).sum ()\n return (cost)", "def lrCostFunction(theta, X, y, lambda_):\n if X.ndim == 1:\n X = X.reshape(1, -1)\n\n if y.dtype == bool:\n y = y.astype(int)\n\n # Initialize some useful values\n m = len(y) # number of training examples\n\n # ====================== YOUR CODE HERE ======================\n # Instructions: Compute the cost of a particular choice of theta.\n # You should set J to the cost.\n #\n # Hint: The computation of the cost function and gradients can be\n # efficiently vectorized. For example, consider the computation\n #\n # sigmoid(X * theta)\n #\n # Each row of the resulting matrix will contain the value of the\n # prediction for that example. You can make use of this to vectorize\n # the cost function and gradient computations.\n #\n\n z = X @ theta\n h = sigmoid(z)\n\n theta_ = np.r_[0, theta[1:]]\n\n J = (-y @ np.log(h) - (1 - y) @ np.log(1 - h)) / m\n J += lambda_ * sum(theta_**2) / (2 * m)\n\n grad = (h - y) @ X / m\n grad += lambda_ * theta_ / m\n\n # =============================================================\n\n return J, grad", "def get_weights(y_true, prior_probs, params):\n # Parameters\n _lambda = params['lambda']\n Q = prior_probs.shape[0]\n\n # The weights are proportional to\n all_w = ((1 -_lambda)*prior_probs + _lambda/Q)**(-1) # (Q,)\n\n # The weighted distribution must sum to one: E[w] = sum(p_tilde*w) = 1\n all_w = all_w / tf.reduce_sum(prior_probs * all_w) # (Q,)\n\n # Find q_star\n q_star = tf.argmax(y_true, axis=3) # (b, H, W)\n\n # Select weights\n all_v = tf.gather(all_w, q_star) # (b, H, W)\n\n # Cast to float32, which is necessary for further calculations\n all_v = tf.cast(all_v, tf.float32) # (b, H, W)\n\n return all_v", "def weight(self, y, xn, xo):\n\n return self._model.log_prob(y, xn) + self._model.h_weight(xn, xo) - self._kernel.log_prob(xn)", "def _lambda(self, x, y, t, x_his, y_his, t_his):\n lam = self.mu + tf.reduce_sum(self._kernel(x - x_his, y - y_his, t - t_his), axis=0)\n return lam", "def learning_by_penalized_gradient(y, tx, w, gamma, lambda_):\n\n #on test avec Newton\n\n loss,gradient,_ = penalized_logistic_regression(y,tx,w,lambda_)\n\n w = w - gamma*gradient\n return loss, w,gradient", "def _lambda(xi):#xi=xis; xi=Phi.dot(w); xi = 0\n div0 = np.divide(1, (2*xi),where=xi!=0)\n return (div0 * (sigmoid(xi)-(1/2)))", "def compute_lambda_mle(X: np.ndarray) -> float:\n\n Poisson._check_input_data(X=X)\n Poisson._check_support(X=X)\n\n lambda_ = X.mean()\n return lambda_", "def costFunctionReg(theta, X, y, Lambda):\n # Initialize some useful values\n m = len(y) # number of training examples\n j = costFunction(theta, X, y)\n j += (Lambda/(2*m))*np.sum(theta[1:]**2)\n return j", "def objective(self,w):\n l = 0\n for i in range(len(self.x)):\n # Each example contributes log(sigma(y_i * x_i . w))\n l -= log(sigmoid(self.y[i] * np.dot(w, self.x[i,:])))\n # regularisation 1/2 * alpha * ||w||^2\n l += 0.5 * self.alpha * np.dot(w,w)\n return l", "def ridge_regression(y, tx, lambda_):\n D = tx[0].size\n\n # w = (Xt + λ' * I)^(-1) * Xt * y\n lambdap = lambda_ * (2 * len(y))\n A = tx.T.dot(tx) + lambda_ * np.eye(D)\n w = np.linalg.inv(A).dot(tx.T.dot(y))\n\n loss = compute_mse_loss(y, tx, w)\n\n return w, loss", "def weights_cost(self, n, lambda_r=0.0):\n return lambda_r * np.sum(np.sum(w ** 2) for w in self.weights) / (2 * n)", "def step_maxL_penalized_gradient_descent(y, tx, w, gamma, lambda_):\n grad=calculate_maxL_gradient(y,tx,w)+ 2*lambda_*w \n loss=loss_maxL(y, tx, w)+ lambda_* np.linalg.norm(w)**2\n # update w by gradient\n w=w-gamma*grad\n return w, loss", "def activation_function(X):\n\tz = np.sum(w*x+b)\n\treturn z", "def f(z, X, Y, _lambda):\r\n w = z[:-1]\r\n beta = np.squeeze(z[-1])\r\n term_1_d = (X * (Y[:, np.newaxis] - g(X.dot(w) + beta))).sum(axis=0)\r\n term_d_1 = (Y[:, np.newaxis] - g(X.dot(w) + beta)).sum() + 2 * _lambda * beta\r\n return np.hstack((term_1_d, term_d_1))[:, np.newaxis]", "def cost_function(X, y, theta, _lambda, num_labels, n_hidden_layers=1):\n m, n = X.shape\n intercept = ones((m, 1), dtype=float64)\n X = append(intercept, X, axis=1)\n\n _h = h(X, theta, n_hidden_layers) # model hypothesis\n\n J = 0\n for c in range(num_labels):\n _J = dot(1 - (y == c).T, log(1 - _h[:, c]))\n _J = _J + dot((y == c).T, log(_h[:, c]))\n J = J - (1 / m) * sum(_J)\n\n theta_squared_term = 0\n for j in range(len(theta)):\n theta_squared_term += sum(power(theta[j][:, 1:], 2))\n\n J = J + (_lambda / (2 * m)) * theta_squared_term\n\n return J", "def compute_loss_lasso(y, tx, w, lambda_):\n e = y - tx.dot(w)\n\n return e.dot(e)/(2 * len(e)) + lambda_ * sum(abs(w))", "def objective(self, wb, X, y):\n N,_ = X.shape\n w = wb[:-1]\n b = wb[-1]\n loss = 0.0 # objective function value\n loss += self.reg_param * np.dot(b-self.b0, b-self.b0)\n loss += self.reg_param * np.dot(w-self.w0, w-self.w0)\n\n for i in range(N):\n tmpvar = np.exp(-1 * y[i] * (np.dot(w, X[i]) + b))\n loss += np.log(1 + tmpvar)\n \n return loss", "def compute_cost_function(X, Y, theta, lambda_factor, temp_parameter):\n h = compute_probabilities(X, theta, temp_parameter)\n\n cost = 0\n for i in range(X.shape[0]):\n for j in range(theta.shape[0]):\n if Y[i] == j:\n cost += np.log(h[j,i])\n\n cost = -cost / X.shape[0]\n\n theta = np.power(theta, 2)\n\n cost += lambda_factor / 2 * theta.sum()\n\n return cost", "def ridge_regression(y, tx, lambda_):\n N = tx.shape[0]\n a = tx.T.dot(tx) + 2 * N * lambda_ * np.identity(tx.shape[1])\n b = tx.T.dot(y)\n w = np.linalg.solve(a, b)\n loss = compute_loss_LS(y, tx, w) \n return loss, w", "def cost(theta, X, y, Lambda=0.0):\n m = X.shape[0] # number of samples\n\n h = hypothesis(theta, X) \n \n suma = (-y*np.log(h) - (1 - y)*np.log(1 - h)).sum() \n\n reg_term = 0.0\n if Lambda:\n reg_term = (Lambda/(2*m))*((theta[1:]**2).sum()) # skip theta-0\n\n return (1/m)*suma + reg_term", "def weights(self):\n \n n = self.n\n lambda_ = self.alpha**2 * (n +self.kappa) - n\n \n c = .5 / (n + lambda_)\n Wc = np.full(2*n + 1, c)\n Wm = np.full(2*n + 1, c)\n Wc[0] = lambda_ / (n + lambda_) + (1 - self.alpha**2 + self.beta)\n Wm[0] = lambda_ / (n + lambda_)\n \n return Wm, Wc", "def ridge_regression(y, tx, lambda_):\n lambda_prime = lambda_ * 2*tx.shape[0]\n\n a = tx.T.dot(tx) + lambda_prime*np.eye(tx.shape[1])\n b = tx.T.dot(y)\n w_star = np.linalg.solve(a, b)\n\n loss = compute_loss(y, tx, w_star)\n\n return w_star, loss", "def objective(beta, lambdat, X, y):\n return 1/len(y) * (np.sum(\n (np.maximum(0, 1-((y[:, np.newaxis]*X).dot(beta)))**2)))\\\n + lambdat * np.linalg.norm(beta)**2", "def compute_gradient_logreg_regl2(y, tx, w, lambda_):\n grad = compute_gradient_logreg(y, tx, w)\n penal_grad = grad + 2 * lambda_ * w\n\n return penal_grad", "def calc_objective_per_iter(self, w_i, lambda_):\r\n\t\tlinear_term = self.linear_term(self._sparse_features_matrix, w_i)\r\n\t\tnormalization_term = self.normalization_term(self._histories_dict, w_i, self._all_possible_sparse_matrix)\r\n\t\tregularization = self.regularization_term(lambda_, w_i)\r\n\t\tempirical_counts = self.empirical_counts(self._sparse_features_matrix)\r\n\t\texpected_counts = self.expected_counts(self._histories_dict,self._num_total_features, w_i, self._all_possible_sparse_matrix,self._all_possible_sparse_matrix_trans)\r\n\t\tregularization_grad = self.reg_grad(lambda_, w_i)\r\n\t\tlikelihood = linear_term - normalization_term - regularization\r\n\t\tgrad = empirical_counts - expected_counts - regularization_grad\r\n\r\n\t\treturn (-1) * likelihood, (-1) * grad", "def cost_func(w, X, y):\n y_pred = np.dot(X, w)\n err = np.sum(np.square(y_pred - y)) / (2 * len(y))\n\n return err", "def compute_loss_logreg_regl2(y, tx, w, lambda_):\n loss = compute_loss_logreg(y, tx, w)\n penal_loss = loss + lambda_ * w.dot(w)\n\n return penal_loss", "def getLambda(inp):\n\treturn 0.9/(getK1(inp) - getCMean(inp))", "def fit(self, y, x, n=1, epsilon=.01, regularization=None, _lambda=1.0):\n # Initialize the weight vector\n w_0 = np.zeros(x.shape[1])\n \n # Variables used for learning weights\n self._epsilon = epsilon\n self._num_training = x.shape[0]\n self._lambda = _lambda\n \n print 'Epsilon: {}'.format(self._epsilon)\n \n # Pick the correct update method\n if regularization == 'l1':\n print 'L1 regularization'\n print 'Lambda: {}'.format(self._lambda)\n update_func = self._l1\n elif regularization == 'l2':\n print 'L2 regularization'\n print 'Lambda: {}'.format(self._lambda)\n update_func = self._l2\n else:\n print 'No regularization'\n update_func = self._no_reg\n \n # Number of iterations\n for _ in range(n):\n\n # Loop over all the data points\n for i in range(x.shape[0]):\n \n y_minus_g = y[i] - sigmoid( np.dot(w_0, x[i]) )\n w_0 = update_func(y[i], x[i], y_minus_g, w_0)\n\n # Save the learned weights\n self.weights = w_0\n return None", "def objective_function(theta, X, y):\n # m number of training instances\n m = X.shape[0]\n jtheta = sum((np.dot(X, theta) - y)**2) / (2.0*m)\n return jtheta", "def ridge_regression(y, tx, lambda_):\n x_t = tx.T\n lambd = lambda_ * 2 * len(y)\n w = np.linalg.solve (np.dot(x_t, tx) + lambd * np.eye(tx.shape[1]), np.dot(x_t,y)) \n loss = compute_mse(y, tx, w)\n\n return w,loss", "def nlls_weights_fit(A: np.ndarray,\n y: np.ndarray,\n lmbda: float = 0.0,\n min_weight: float = 0.0) -> np.ndarray:\n n, m = A.shape\n Q = A.T @ A + lmbda * np.eye(m)\n c = - A.T @ y\n x = cp.Variable(m)\n prob = cp.Problem(cp.Minimize(0.5 * cp.quad_form(x, Q) + c.T @ x),\n [cp.sum(x) == 1,\n x >= min_weight])\n result = prob.solve()\n if np.isfinite(result):\n return x.value\n print(\"Can't solve optimization problem.\")\n return np.zeros(m)", "def compute_cost_and_grad_with_reg(theta, X, y, lambda_):\n m = X.shape[0]\n y_hat = sigmoid(X @ theta.T)\n J = - (1 / m) * np.sum(y * np.log(y_hat) + (1 - y) * np.log(1 - y_hat)) + np.sum(lambda_ / (2 * m) * theta[1:] ** 2)\n temp = theta\n temp[0] = 0\n grad = (1 / m) * X.T @ (y_hat - y) + lambda_ / m * temp\n return J, grad", "def compute_gradient_lasso(y, tx, w, lambda_):\n e = y - tx.dot(w)\n subgrad = lambda_ * np.sign(w)\n\n return -tx.T.dot(e)/len(e) + subgrad", "def mloss(_lambda):\n\n def loss(y_true, y_pred):\n return _lambda * qloss(y_true, y_pred) + (1 - _lambda) * score(y_true, y_pred)\n\n return loss", "def grad(theta, X, y, lambda_):\n # ... dopolnite (naloga 1, naloga 2)\n\n l = []\n for i, e in enumerate(theta):\n l.append(1 / len(y) * sum([(h(x, theta) - yi) * x[i] for x, yi in zip(X, y)]) + 2 * lambda_ * e)\n\n return np.array(l)", "def ridge_regression(y, tx, lambda_):\n N,D = tx.shape\n\n aI = 2 * N * lambda_ * np.identity(D)\n a = tx.T.dot(tx) + aI\n b = tx.T.dot(y)\n\n w = np.linalg.solve(a, b)\n return w, compute_mse(y, tx, w)", "def ridge_regression(y, tx, lambda_):\n # computing the gram matrix\n gram = tx.T@tx\n # diagonalizing the gram matrix\n u, d, ut = np.linalg.svd(gram, full_matrices=True)\n # adding the lmbda matrix to the diagonal matrix to prevent approximation problems\n d += 2*gram.shape[0]*lambda_\n # solving the least squares linear problem\n w = np.linalg.solve(np.diag(d).dot(ut), ut.dot(tx.T.dot(y)))\n return w, compute_cost(y, tx, w)", "def lrCostFunction(theta,X,y, lambda_reg):\n m = np.size(y)\n grad = np.zeros(np.size((theta)))\n J_base, grad = costFunction(theta, X, y)\n \n\n reg_cost = (lambda_reg / (2.0 * m)) * np.sum(theta[1:] ** 2)\n \n reg_gradient = (lambda_reg / m) * theta\n reg_gradient[0] = 0\n cost = J_base + reg_cost\n return cost, grad + reg_gradient", "def compute_weight(self, y, x, test_x=None, test_y=None, **kwargs):\n model = copy.copy(self)\n model.__setattr__('train_y', y)\n model.__setattr__('train_x', x)\n if test_x is not None and test_y is not None:\n model.set_valid((test_y, test_x))\n _kwargs = []\n for name, value in kwargs.items():\n # Recognize parameter \"\n if name is \"regularizer_p\":\n model.__setattr__(name, value)\n model.regularizer.set_parameter(value)\n else:\n _kwargs.append((name, value))\n _kwargs = dict(_kwargs)\n if model.calculate_weight is 'gradient':\n return model.sgd(**_kwargs)\n # elif model.calculate_weight is 'newton':\n # return model.newton(**_kwargs)\n elif model.calculate_weight is 'normalequ':\n return model.normalequ(**_kwargs)", "def compute_cost(AL, Y, parameters ,lambd):\n L = len(parameters) // 2\n m = Y.shape[1]\n cost = -1 / m * np.sum(np.nan_to_num(Y * np.log(AL) + (1-Y) * np.log(1-AL)))\n cost+= 0.5*(lambd/m)*sum(np.linalg.norm(parameters['W' + str(i)])**2 for i in range(1,L))\n return cost", "def weight(x, J, delta, sigma, z):\n return numpy.exp(-(x - x.T)**2/(2 * sigma**2)) * (J + delta * z)", "def ridge_regression(y, tx, lambda_):\n N = y.shape[0]\n I = np.identity(tx.shape[1])\n lb = lambda_*(2*N)\n w = np.linalg.solve(tx.T.dot(tx)+lb*I, tx.T.dot(y))\n \n #Compute mse loss\n err = y-tx.dot(w)\n loss = (1/(2*N))*((err.T).dot(err))\n return w, loss", "def tune_lambda(Xtrain, ytrain, Xval, yval):\n #####################################################\n # TODO 5: Fill in your code here #\n #####################################################\n\n bestlambda = None\n mean_abs_err = 10000000\n power = -19\n while power < 20:\n lambda0 = 10 ** (power)\n w = regularized_linear_regression(Xtrain, ytrain, lambda0)\n err = mean_absolute_error(w, Xval, yval)\n if err < mean_abs_err:\n mean_abs_err = err\n bestlambda = lambda0\n power = power + 1\n return bestlambda", "def _kernel(self, bw, X, x):\n return (1.0 / np.sqrt(2 * np.pi) / bw) * np.exp(\n -((X - x) ** 2) / (bw ** 2 * 2.0)\n )", "def compute_w(self):\n self.pinvX = np.linalg.pinv(self.X)\n return np.dot(self.pinvX, self.y)", "def lr_weights(x : pd.DataFrame, y : pd.DataFrame):\n reg = LR().fit(x, y)\n return reg#.coef_[0]", "def tune_lambda(Xtrain, ytrain, Xval, yval):\n #####################################################\n # TODO 5: Fill in your code here #\n #####################################################\n bestlambda = None\n err = 1\n\n for v in range(-19,20):\n if v>=0:\n val = float(\"1e+\"+str(v))\n else:\n val = float(\"1e\"+str(v))\n w = regularized_linear_regression(Xtrain,ytrain, val)\n error = mean_absolute_error(w, Xval,yval)\n if err > error:\n err = error\n bestlambda = val\n return bestlambda", "def costFunction(self, x, y ):\n self.yEst = self.forward_propagate(x)\n sqErrors = ( self.yEst - y ) ** 2\n J = sqErrors.sum() / 2\n return J", "def _update_weights(self, xi, target):\n output = self.net_input(xi)\n error= (target - output)\n self.w_[1:] += self.eta * xi.dot(error)\n self.w_[0] += self.eta * error\n cost =0.5* error**2\n return cost", "def _update_weights(self, xi, target):\n output = self.net_input(xi)\n error = (target - output)\n self.w_[1:] += self.eta * xi.dot(error)\n self.w_[0] += self.eta * error\n cost = 0.5 * error**2\n return cost", "def _update_weights(self, xi, target):\n output = self.net_input(xi)\n error = (target - output)\n self.w_[1:] += self.eta * xi.dot(error)\n self.w_[0] += self.eta * error\n cost = 0.5 * error**2\n return cost", "def __bestLambda(self):\n\t\t\n\t\t# Determine starting value for brent-method (to avoid local minimum).\n\t\tself.startValue = self.__findStartValue()\n\t\t\t\n\t\t# Check if there exists a minimum within the range of self.lamStart. \n\t\t# Otherwise, use fmin because we cannot provide an interval. \n\t\tif (self.startIdx != 0 and self.startIdx != self.nStartValues-1):\n\t\t\ts = scipy.optimize.brent(self.__minBayesianEvidence, brack=(self.logLamStart[self.startIdx-1], self.logLamStart[self.startIdx], self.logLamStart[self.startIdx+1]))\n\t\telse:\n\t\t\ts = scipy.optimize.fmin(self.__minBayesianEvidence, self.startValue, disp=False)[0]\n\t\t\n\t\treturn 10**s", "def costFunction(theta, X, y):\n\n # Initialize some useful values\n m = y.size # number of training examples\n J = np.sum(np.array([inner(theta, xi, yi) for xi, yi in zip(X, y)]))\n J /= m\n\n\n return J", "def grad(X, y, nn_params, _lambda, input_layer_size,\n hidden_layer_size, num_labels, n_hidden_layers=1):\n theta = unravel_params(nn_params, input_layer_size, hidden_layer_size,\n num_labels, n_hidden_layers)\n\n # Initi gradient with zeros\n theta_grad = empty((n_hidden_layers + 1), dtype=object)\n for i in range(len(theta)):\n theta_grad[i] = zeros(shape=theta[i].shape, dtype=float64)\n\n m, n = X.shape\n intercept = ones((m, 1), dtype=float64)\n X = append(intercept, X, axis=1)\n\n for t in range(m):\n\n z, a = feed_forward(X[[t], :], theta, n_hidden_layers)\n delta = back_propagation(y[t, :], theta, a, z,\n num_labels, n_hidden_layers)\n\n for l in range(len(theta_grad)):\n theta_grad[l] = theta_grad[l] + dot(delta[l + 1].T, a[l])\n\n for i in range(len(theta_grad)):\n theta_grad[i] = (1 / m) * theta_grad[i]\n\n # regularization\n for i in range(len(theta_grad)):\n theta_grad[i][:, 1:] = theta_grad[i][:, 1:] + \\\n (_lambda / m) * theta[i][:, 1:]\n\n flat_theta_grad = append(theta_grad[0].flatten(), theta_grad[1].flatten())\n for i in range(2, len(theta_grad)):\n flat_theta_grad = append(flat_theta_grad, theta_grad[i].flatten())\n\n return flat_theta_grad", "def model(theta, x):\n\tw, b = theta\n\treturn w * x + b", "def updateWeights(inputs, outputs, learning_rate, y, weights):\n for i in range(len(weights)):\n weights[i] = weights[i] + learning_rate * (outputs - y) * inputs[i]\n return weights", "def update_weights(x_train, y_train, weights, learning_rate):\r\n predictions = compute_prediction(x_train, weights)\r\n weights_delta = np.dot(x_train.T, y_train - predictions)\r\n m = y_train.shape[0]\r\n weights += learning_rate / float(m) * weights_delta\r\n return weights", "def objective_function(self, x):\n self._set_params_transformed(x)\n return -self.log_likelihood() - self.log_prior()", "def blurry_degree(lambdas):\n return lambdas[0] / (numpy.sum(lambdas) + 0.001)", "def ridge_regression(y, tx, lambda_, loss_function=rmse):\n lamb = 2 * y.shape[0] * lambda_\n w = np.linalg.solve(tx.T @ tx + lamb * np.identity(tx.shape[1]), tx.T @ y)\n loss = loss_function(y, tx, w)\n return w, loss", "def update_weights(self, BMU, currentIteration, input_data, lambda1):\n # Learning rate selection for each epoch\n lr = self.currentLearningRate(currentIteration, lambda1)\n \n # Neighborhood radius selection for each epoch\n radius = self.currentNeighbourhoodRadius(currentIteration, lambda1)\n \n # Iterating through randomly initialized weights and update weights\n for i in range(len(self.weights[0])):\n for j in range(len(self.weights)):\n tmpDist = np.power(BMU[0] - i, 2) + np.power(BMU[1] - j, 2)\n theta = np.exp(-tmpDist / (2*np.power(radius, 2)))\n for k in range(self.input_dimension):\n self.weights[i][j][k] = self.weights[i][j][k] + lr * theta * (input_data[k] - self.weights[i][j][k])", "def objective(self,w):\n diffs = self.get_y_times_diffs(self.get_split_weights(w))\n #print diffs, sigmoid(diffs)\n obj = -np.sum(np.log(sigmoid(diffs))) #negative, since minimising\n # regularisation\n obj += 0.5 * self.alpha * np.dot(w[:self.interp_index[0]], w[:self.interp_index[0]])\n return obj", "def polyFunction(x,weights):\n y=0\n for i in range (0,len(weights)):\n y+= weights[i]*(x**i)\n return y", "def lambda_(s, x_r):\n return LAMBDA / linalg.norm(np.array(s) - np.array(x_r))", "def loss(self, X, y=None, lambda_reg=0.0):\n \n # Unpack variables from the params dictionary\n N, D = X.shape\n\n # Compute the forward pass\n #############################################################################\n # TODO: Perform the forward pass, computing the class scores for the input. #\n # Store the result in the scores variable, which should be an array of #\n # shape (N, C). #\n #############################################################################\n scores, cache_list = self.network_forward(X)\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n \n #############################################################################\n # TODO: Compute for the loss. This should include L2 regularization for #\n # the weights of each layer. #\n #############################################################################\n loss_softmax, dloss_softmax = self.softmax_cross_entropy_loss(scores, y)\n loss = loss_softmax\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n \n #############################################################################\n # TODO: Compute the derivatives of the weights and biases. Store the #\n # results in the grads dictionary. For example, grads['W1'] should store #\n # the gradient on the weights W of the first layer, and be a matrix of #\n # same size. #\n #############################################################################\n grads = self.network_backward(dloss_softmax, cache_list)\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, grads", "def gradientFunctionReg(theta, X, y, Lambda): \n y = np.squeeze(y)\n m = y.shape # number of training samples\n grad = X.T.dot(sigmoid(theta.dot(X.T))-1*y)\n grad[1:] = grad[1:] + Lambda*theta[1:]\n grad /= m\n\n return grad", "def calculate_cost(x, y, weights):\r\n predictions = compute_prediction(x, weights)\r\n cost = np.mean(-y * np.log(predictions) - (1 - y) * np.log(1 - predictions))\r\n return cost", "def _learn_using_GD(self, y, tx, w, fn, gamma, lambda_, regularization):\n loss, grad = fn(y, tx, w, lambda_)\n loss, grad = self.apply_regularization(w, loss, grad, regularization, lambda_, tx.shape[0])\n w = w - gamma * grad\n return loss, w", "def fit(self, X, y):\r\n newWeight = [0.0] * self.size\r\n w = [0.0] * len(X)\r\n val = self.predict_prob(X) \r\n grad = [(y-1.0) * i[1] for i in X] \r\n grad1 = float((math.exp(-math.fsum((self.weight[f]*v for f, v in X)))) * val)\r\n grad2 = [i[1] * -1 * grad1 for i in X] \r\n for i in range(len(w)):\r\n w[i] = (grad[i] - grad2[i])\r\n \r\n w = [i*self.eta for i in w]\r\n for i in range(len(X)):\r\n newWeight[i] = self.weight[X[i][0]] -w[i]\r\n \r\n self.weight = newWeight[:]\r\n \r\n pass", "def _determine_new_weight(self, weight, input, currentNeuron, bmu):\n return weight \\\n + (self.neighborhood.fn(currentNeuron, bmu) \\\n * self.learning_rate * (input - weight))", "def cost_function(x, N, w, dt):\n yh = np.abs(fftkernel(x, w / dt)) # density\n # formula for density\n C = np.sum(yh ** 2) * dt - 2 * np.sum(yh * x) * \\\n dt + 2 / np.sqrt(2 * np.pi) / w / N\n C = C * N * N\n # formula for rate\n # C = dt*sum( yh.^2 - 2*yh.*y_hist + 2/sqrt(2*pi)/w*y_hist )\n return C, yh", "def _compute_prob_y_given_x(self, _x, _y):\n normalisation_constant = sum([\n math.exp(sum([self.weights[_feature] *\n self.feature_funcs[_feature, cls](_feature, cls)\n for _feature in _x]))\n for cls in self.classes])\n\n return math.exp(sum([\n self.weights[_feature] *\n self.feature_funcs[_feature, _y](_feature, _y)\n for _feature in _x])) / normalisation_constant", "def _optfn(self, x):\n\n logger.debug(\" optfn(theta=%s)\", str(x))\n\n wmx = max(self.weights) * self.weighttrunc\n\n ip = []\n for i,w in enumerate(self.weights):\n if w < wmx:\n continue\n ip.append((i,w,x))\n\n if self.pool is None:\n itr = map(self.worker.loglik_grad, ip)\n else:\n itr = self.pool.imap_unordered(_pool_loglik_grad, ip, 10)\n\n if self._prior_shape is None:\n ll = 0.\n grad = np.zeros(len(x))\n else:\n ll = sum(sp.special.xlogy(self._prior_shape-1,x)-(x/self._prior_scale))\n grad = (self._prior_shape - 1)/x - 1/self._prior_scale\n\n for l,g in itr:\n ll += l\n grad += g\n\n logger.debug(\" optfn=%g\", ll)\n\n return -ll, -grad", "def compute_regularized_square_loss_gradient(X, y, theta, lambda_reg):\n return compute_square_loss_gradient(X,y,theta) + 2*lambda_reg*theta", "def cost(self,x):\n Mx = self.generate_vector()\n self.prior.M.mult(x,Mx)\n return .5*Mx.inner(x)", "def __call__(self, x):\n return np.dot(x, self.weights[-1])", "def reg_logistic_regression(y, tx, lambda_, initial_w, max_iters, gamma):\n # initializing the weights\n w = initial_w\n\n # regularized logistic regression\n for iter in range(max_iters):\n # updating the weights\n grad = log_likelihood_gradient(y, tx, w)+2*lambda_*w\n # if iter % (max_iters//2) == 0:\n #print(log_likelihood_loss(y, tx, w)+lambda_*np.squeeze(w.T.dot(w)))\n w -= gamma*grad\n loss = log_likelihood_loss(y, tx, w)+lambda_*np.squeeze(w.T.dot(w))\n return w, loss", "def compute_regularized_square_loss_gradient(X, y, theta, lambda_reg):\n #TODO\n P = (np.dot(X, theta)-y)\n m = X.shape[0]\n\n return (2/m)*np.dot(X.T, P)+(2*lambda_reg*theta)", "def weight_expr(self, t, w_plus, z, value):\n pass", "def compute_weights(model, params, y_obs, LB_type='NWJ'):\n\n # Define PyTorch variables\n x = Variable(\n torch.from_numpy(params).type(torch.FloatTensor),\n requires_grad=True)\n y = Variable(\n torch.from_numpy(y_obs).type(torch.FloatTensor),\n requires_grad=True)\n\n # Pass observed data and parameters through the model\n w = list()\n for idx in range(len(x)):\n T = model(x[idx], y).data.numpy()\n if LB_type == 'NWJ':\n w.append(np.exp(T - 1))\n else:\n raise NotImplementedError\n w = np.array(w)\n\n return w.reshape(-1)", "def gen_weights(self, f_target):\n\n # calculate x and psi\n x_track = self.cs.rollout()\n psi_track = self.gen_psi(x_track)\n\n # efficiently calculate BF weights using weighted linear regression\n self.w = jnp.zeros((self.n_dmps, self.n_bfs))\n for d in range(self.n_dmps):\n # spatial scaling term\n k = self.goal[d] - self.y0[d]\n for b in range(self.n_bfs):\n numer = jnp.sum(x_track * psi_track[:, b] * f_target[:, d])\n denom = jnp.sum(x_track ** 2 * psi_track[:, b])\n self.w[d, b] = numer / denom\n if abs(k) > 1e-5:\n self.w[d, b] /= k\n\n self.w = jnp.nan_to_num(self.w)", "def prob(x, w):\n y = tf.constant(np.array([0., 1.]), dtype=tf.float32)\n prob_ = tf.exp(tf.matmul(x, w) * y) / (1 + tf.exp(tf.matmul(x, w)))\n return prob_", "def get_lambda(thalf):\n\n return math.log(2.)/thalf", "def get_lambda(model):\n best_lambdas = [1000.0, 0.001, 100.0, 0.001, 100.0, 100.0, 0.001, 100.0]\n lambda_ = best_lambdas[model]\n return lambda_", "def gen_weight(sigma, delta, act_fn=F.relu):\n alpha = 1.-torch.exp(-act_fn(sigma.squeeze(-1))*delta)\n weight = alpha * torch.cumprod(torch.cat([torch.ones((alpha.shape[0], 1),device = alpha.device), 1.-alpha+1e-10], -1), -1)[:, :-1]\n return weight", "def ComputeWeights(self, p_float=..., p_float=..., p_float=..., *args, **kwargs):\n ...", "def cost_function(x, params, y, m):\n\n x_theta = np.matmul(x, params)\n hypothesis = vectorized_sigmoid_function(x_theta)\n\n return np.sum(np.log(hypothesis) * y + np.log(1 - hypothesis) * (1 - y)) * (-1 / m)", "def gradient_weight(X, Y, model):\n W = model['weight']\n b = model['bias']\n weight_decay = model['weight_decay']\n\n # YOUR CODE HERE\n # Write the gradient with respect to the weights.\n return np.add(np.subtract(np.dot(np.transpose(predict(X, model)), X), np.dot(np.transpose(Y), X)), 2 * LAMBDA * np.transpose(model['weight'])) #np.zeros((X.shape[1], Y.shape[1]))", "def forward(self, weights):\n\n return (np.sum(np.square(weights))) * (self.lambd / 2)", "def gradW(self, X, y, W):\n num_x = X.shape[0]\n # if i==y, then softmax_grad = preds_i-1;\n # otherwise, softmax_grad = preds_i\n preds = softmax(np.matmul(X, W)) #preds.shape(N,C)\n preds[list(range(num_x)), y] -= 1\n # grad = X.T*softmax_grad\n return np.matmul(X.T, preds)/num_x", "def perceptron_output(weights, bias, x):\n return step_function(dot(weights, x) + bias)" ]
[ "0.7590008", "0.6892587", "0.6760674", "0.66900504", "0.6663615", "0.6605572", "0.65382785", "0.65217125", "0.64970404", "0.64742047", "0.64711386", "0.6437741", "0.6418927", "0.63539314", "0.6347361", "0.63001853", "0.6257059", "0.62478036", "0.62149435", "0.6209357", "0.6166603", "0.61515224", "0.61054295", "0.6091459", "0.60890704", "0.60862976", "0.60796076", "0.60263115", "0.5999575", "0.59933347", "0.5968609", "0.59685117", "0.5962669", "0.5959666", "0.59579045", "0.59526384", "0.5945569", "0.58964723", "0.588404", "0.588128", "0.58615875", "0.5854054", "0.5851223", "0.58433145", "0.5838203", "0.5815346", "0.5809757", "0.5794407", "0.5780936", "0.57544506", "0.5750313", "0.57453007", "0.57364", "0.5725415", "0.5712791", "0.57069194", "0.5684399", "0.56678796", "0.56643724", "0.56640804", "0.56640804", "0.5654551", "0.5643527", "0.56409925", "0.56393385", "0.56241906", "0.5618639", "0.5617505", "0.5612836", "0.56033003", "0.5593715", "0.55923134", "0.55815417", "0.5573954", "0.55707073", "0.55698603", "0.5567204", "0.5565359", "0.5548011", "0.55388135", "0.5535497", "0.55297375", "0.55215347", "0.5518303", "0.5504972", "0.5503109", "0.55031", "0.5500374", "0.5498898", "0.5497621", "0.54889905", "0.5485314", "0.54840326", "0.5481382", "0.5479202", "0.5475725", "0.5474485", "0.54720515", "0.5469249", "0.5464283", "0.5464245" ]
0.0
-1
Find the best lambda value.
def tune_lambda(Xtrain, ytrain, Xval, yval): ##################################################### # TODO 5: Fill in your code here # ##################################################### bestlambda = None err = 1 for v in range(-19,20): if v>=0: val = float("1e+"+str(v)) else: val = float("1e"+str(v)) w = regularized_linear_regression(Xtrain,ytrain, val) error = mean_absolute_error(w, Xval,yval) if err > error: err = error bestlambda = val return bestlambda
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_lambda(model):\n best_lambdas = [1000.0, 0.001, 100.0, 0.001, 100.0, 100.0, 0.001, 100.0]\n lambda_ = best_lambdas[model]\n return lambda_", "def __bestLambda(self):\n\t\t\n\t\t# Determine starting value for brent-method (to avoid local minimum).\n\t\tself.startValue = self.__findStartValue()\n\t\t\t\n\t\t# Check if there exists a minimum within the range of self.lamStart. \n\t\t# Otherwise, use fmin because we cannot provide an interval. \n\t\tif (self.startIdx != 0 and self.startIdx != self.nStartValues-1):\n\t\t\ts = scipy.optimize.brent(self.__minBayesianEvidence, brack=(self.logLamStart[self.startIdx-1], self.logLamStart[self.startIdx], self.logLamStart[self.startIdx+1]))\n\t\telse:\n\t\t\ts = scipy.optimize.fmin(self.__minBayesianEvidence, self.startValue, disp=False)[0]\n\t\t\n\t\treturn 10**s", "def cross_val_lambda(self, X, y, n_fold, n_iter, lambda_range, model=None):\n best_lambda = 0\n error = np.inf\n for lambda_cur in np.arange(lambda_range[0], lambda_range[1], 0.1):\n avg_error,_ = self.cross_val(X, y, n_fold, n_iter, lambda_cur, model=model)\n if avg_error < error:\n error = avg_error\n best_lambda = lambda_cur\n logging.debug(\"Best lambda= %s for model: %s\", best_lambda, model)\n return best_lambda", "def best_value(self):\r\n return self._best_value", "def tune_lambda(Xtrain, ytrain, Xval, yval):\n #####################################################\n # TODO 5: Fill in your code here #\n #####################################################\n\n bestlambda = None\n mean_abs_err = 10000000\n power = -19\n while power < 20:\n lambda0 = 10 ** (power)\n w = regularized_linear_regression(Xtrain, ytrain, lambda0)\n err = mean_absolute_error(w, Xval, yval)\n if err < mean_abs_err:\n mean_abs_err = err\n bestlambda = lambda0\n power = power + 1\n return bestlambda", "def _best_individual(self):\n return max(self._population, key=attrgetter(\"fitness\"))", "def best(self):\n if len(self) == 0:\n return None\n return max_elems(self, key=attr.getter(\"value\"), gt=self.solver.sense.is_better)[0]", "def argmin(seq, fn):\n best = seq[0]; best_score = fn(best)\n for x in seq:\n x_score = fn(x)\n if x_score < best_score:\n best, best_score = x, x_score\n return best", "def best(self):\n alpha = -1\n beta = +1\n move = self.__negamax(alpha, beta, tt=DictTT())\n return move[1]", "def _get_new_lambda_candidate(lower_bound, upper_bound):\n lambda_new_candidate = max(\n np.sqrt(np.clip(lower_bound * upper_bound, 0, np.inf)),\n lower_bound + 0.01 * (upper_bound - lower_bound),\n )\n\n return lambda_new_candidate", "def get_lambda_value(lambda_node):\n return get_call_value(lambda_node.body)", "def lambda_max(self):\n return const.b_wien / self.temperature", "def argmin(seq, fn):\n best = seq[0]; best_score = fn(best)\n for x in seq:\n x_score = fn(x)\n if x_score < best_score:\n best, best_score = x, x_score\n return best", "def argmin(seq, fn):\n best = seq[0]; best_score = fn(best)\n for x in seq:\n x_score = fn(x)\n if x_score < best_score:\n best, best_score = x, x_score\n return best", "def get_best_value(self):\n # Todo: implement\n best_value_global = -inf\n for particle in self.particles:\n if particle.best_value >= best_value_global:\n best_value_global = particle.best_value\n return best_value_global # Remove this line", "def _get_best(self, populations, func):\n best = None\n for population in populations:\n for item in population:\n if not best:\n best = item\n elif func.fit(*item) > func.fit(*best):\n best = item\n return best", "def lambda_func(self):\n air = self.air_alias.val\n fuel = self.fuel_alias.val\n\n m_air = 0\n m_fuel = 0\n\n for i in self.inl:\n m_air += (i.m.val_SI * i.fluid.val[air])\n m_fuel += (i.m.val_SI * i.fluid.val[fuel])\n\n return self.lamb.val - m_air / (m_fuel * self.air_min)", "def value(a, y, weights, lambda_):\n\t\treturn 0.5* (np.linalg.norm(a-y)**2) / (a.shape[0] * a.shape[1])\n\t\t# return unregularize + (0.5*lambda_*np.sum(np.square(weights[-1])) / (a.shape[0] * a.shape[1])) ", "def get_maximum_fitness(self) -> float:\n anticipated_change_cls = [cl for cl in self\n if cl.does_anticipate_change()]\n\n if len(anticipated_change_cls) > 0:\n best_cl = max(anticipated_change_cls, key=lambda cl: cl.fitness)\n return best_cl.fitness\n\n return 0.0", "def _compute_best_value(self):\n asgt = self._neighbors_values.copy()\n best_cost, best_val = None, []\n\n for v in self._variable.domain:\n asgt[self.variable.name] = v\n c = self._compute_cost(**asgt)\n if (\n best_cost is None\n or (best_cost > c and self._mode == \"min\")\n or (best_cost < c and self._mode == \"max\")\n ):\n best_cost = c\n best_val = [v]\n elif best_cost == c:\n best_val.append(v)\n\n return best_val, best_cost", "def best_value(self):\n return np.max(self.y.numpy())", "def max_value(board): # the X player wants to maximize the score\n if terminal(board):\n return utility(board), None\n else:\n v = -math.inf\n move = None\n for action in actions(board):\n val, _ = min_value(result(board, action))\n # Check if returned Value is less than v if not return v and current action\n if val > v:\n # Assign v the maximum value for future evaluation\n v = max(v,val)\n # Keep track of action\n move = action\n # If best move then return it\n if v == 1:\n return v, move\n return v, move", "def lambda_i(self, i):\n return self._eig_val[i]", "def best_step(self):\r\n return self._best_value_step", "def max_val(board):\n v = -math.inf\n if terminal(board):\n return utility(board)\n for action in actions(board):\n v = max(v,min_val(result(board,action)))\n return v", "def argmax(seq, fn):\n return argmin(seq, lambda x: -fn(x))", "def get_best(self, population):\n best = min(population, key=self.cost_function)\n return best, self.cost_function(best)", "def get_best_fitness(self):\n f = max(self.characters, key=operator.attrgetter('fitness'))\n self.best_fitness = round(f.fitness, 3)\n self.best_candidate = f", "def get_bestparameter(self):\n if self._df_test is None:\n raise RuntimeError('get_bestparameter: please the '\n 'train model first')\n mean = self._df_test.mean(axis=1)\n if len(mean) == 1:\n result = mean.idxmax()\n elif len(mean) == 2:\n result = mean.loc[mean.index > 1].idxmax()\n else:\n result = mean.loc[mean.index > 2].idxmax()\n return result", "def get_bestparameter(self):\n if self._df_test is None:\n raise RuntimeError('get_bestparameter: please the '\n 'train model first')\n mean = self._df_test.mean(axis=1)\n if len(mean) == 1:\n result = mean.idxmax()\n elif len(mean) == 2:\n result = mean.loc[mean.index > 1].idxmax()\n else:\n result = mean.loc[mean.index > 2].idxmax()\n return result", "def get_best(self):\n if len(self._table) == 0:\n self.log.warning(\"table is empty, cannot extract best value\")\n raise ValueError()\n\n max_prob = -np.inf\n max_assignment = None\n for assignment in self._table.keys():\n prob = self._table[assignment]\n if prob > max_prob:\n max_prob = prob\n max_assignment = assignment\n\n # TODO: check refactor > there is no case of max_assignment is None\n return max_assignment if max_assignment is not None else Assignment.create_default(self._head_vars)", "def fitness(self,*val):\n if len(val): self._fitness = val[0]\n return self._fitness", "def _get_lip_best(self) -> float:\n pass", "def get_best_pred_finger(self,f):\n return min(self.best_finger_pred[f],\\\n key=lambda kn:dist_ident(kn.ident,self.get_finger_pred_loc(f)))", "def argmin_random_tie(seq, fn):\n best_score = fn(seq[0]); n = 0\n for x in seq:\n x_score = fn(x)\n if x_score < best_score:\n best, best_score = x, x_score; n = 1\n elif x_score == best_score:\n n += 1\n if random.randrange(n) == 0:\n best = x\n return best", "def __findStartValue(self):\n\t\tself.minEvidence = 1e120*np.ones(self.nStartValues)\n\t\tfor idx1 in range(self.nStartValues):\n\t\t\tself.minEvidence[self.nStartValues-1-idx1] = self.__minBayesianEvidence(self.logLamStart[self.nStartValues-1-idx1])\n\t\t\t# If minEvidence > 1e100 a negative value has been found for wMP which\n\t\t\t# implies that lower values of lambda do not need to be considered.\n\t\t\tif self.minEvidence[self.nStartValues-1-idx1] > 1e100:\n\t\t\t\tbreak\n\t\tself.startIdx = np.argmin(self.minEvidence)\n\t\t\t\t\n\t\treturn self.logLamStart[self.startIdx]", "def n_lambda(self):\n return self.b()", "def estimate_lambda(pv):\n LOD2 = sp.median(st.chi2.isf(pv, 1))\n null_median = st.chi2.median(1)\n L = (LOD2 / null_median)\n return L", "def get_best_solution(self):\n if not self.tours:\n raise Exception('No solution has been computed yet')\n scores = {s:get_cost(self.tours[s],self) for s in self.tours}\n best = min(scores,key=scores.get)\n print('The best solution is given by {} with score {}'.format(best,scores[best]))\n return self.tours[best]", "def getLambda(inp):\n\treturn 0.9/(getK1(inp) - getCMean(inp))", "def value(a, y, weights, lambda_):\n\t\treturn np.sum(np.nan_to_num(-y*np.log(a + 1e-15)-(1-y)*np.log(1-a + 1e-15))) / (a.shape[0] * a.shape[1])\n\t\t# return unregularize + (0.5*lambda_*np.sum(np.square(weights[-1])) / (a.shape[0] * a.shape[1])) ", "def fmax(func_to_maximize, initial_guess=0.5*V):\n func_to_minimize = lambda x : -func_to_maximize(x)\n return fmin(func_to_minimize, initial_guess, disp=False)[0]", "def lambda_rad(self):\n InputFile = self('Meta','InputFile').decode(\"utf-8\")\n d_InputFile = dict([item.replace(' ','').split('=') for item in InputFile.splitlines() if '=' in item])\n if 'lambda' in d_InputFile:\n return float(d_InputFile['lambda'])\n else:\n return self.lambdaref", "def findval(self,val,x0,method='fmin',**kwargs):\n kwargs['valtofind'] = val\n return self._optimize(x0,'val',method,**kwargs)", "def argmax(seq, fn):\n return argmin(seq, lambda x: -fn(x))", "def get_best_vector(results, f_x, target):\n index_min = -1\n cur_min = 10**10\n\n with Session() as sess:\n for index, individual in enumerate(results):\n f_x_res = sess.run(f_x, feed_dict={\n target: individual\n })\n if f_x_res < cur_min:\n cur_min = f_x_res\n index_min = index\n\n best = results[index_min]\n\n return best", "def min_value(board): # the O player wants to minimze the score\n if terminal(board):\n return utility(board), None\n else:\n v = math.inf\n move = None\n track = {}\n for action in actions(board):\n val, _ = max_value(result(board, action))\n # Check if returned Value is less than v if not return v and current action\n if val < v:\n # Assign v the minimum value for future evaluation\n v = min(v, val)\n # Keep track of action\n move = action\n # If best move then return it\n if v == -1:\n return v, move\n return v, move", "def get_best_individual(self):\n return self._best_indv", "def best_action(q_table: np.ndarray, state: int) -> int:\n return int(np.argmax(q_table[state]))", "def find_max_x(self, Ns=50):\n with self.fix_evaluator():\n x0 = brute(lambda x: -self(x[0])[0], [[0, np.pi]], Ns=Ns,\n finish=None)\n res = minimize_scalar(\n lambda x: -self(x)[0],\n bracket=(x0, np.pi/Ns), bounds=(0, np.pi), method='bounded',\n options=dict(xatol=1e-12)\n )\n return res.x", "def maxValue(board, cur_optimal_val, player):\n # base case (leave recursion)\n if isTerminal(board):\n return getUtility(board)\n\n val = -math.inf\n for action in getActions(board):\n val = max(val, minValue(getResult(board, action), cur_optimal_val, player))\n if (\n (player == O and (val >= cur_optimal_val or val == 1)) or\n (player == X and val == 1)\n ):\n break\n return val", "def get_lambda(thalf):\n\n return math.log(2.)/thalf", "def get_most_valuable(self):\n return self.most_valuable", "def greedy_policy(self):\n # print(self.weights)\n policy = defaultdict(lambda: 0)\n\n for entry, values in self.weights.items():\n policy[entry] = np.argmax(self.weights[entry])\n # print(policy)\n\n return policy", "def ml_result(self, var, e):\n\t\tdist = self.enumerate_ask(var, e)\n\t\treturn max(dist.items(), key=lambda x:x[1])[0]", "def computeActionFromValues(self, state):\n \"*** YOUR CODE HERE ***\"\n maxvalue = -100000000\n bestaction = None\n for action in self.mdp.getPossibleActions(state):\n valueforthisaction = self.getQValue(state, action) # is this right? \n if valueforthisaction > maxvalue:\n bestaction = action\n maxvalue = valueforthisaction\n return bestaction", "def personal_best(scores):\n# return sorted(scores, reverse=True)[0]\n return max(scores)", "def compute_lambda_mle(X: np.ndarray) -> float:\n\n Poisson._check_input_data(X=X)\n Poisson._check_support(X=X)\n\n lambda_ = X.mean()\n return lambda_", "def get_best_candidate(self):\n if not self.scores:\n return None\n return self.te_list[self.scores.index(max(self.scores))]", "def GoodmanKruskalLambda_calc(TP, FP, FN, TN):\n try:\n n = TP + FP + FN + TN\n part1 = max(TP, FP) + max(FN, TN) + max(TP, FN) + max(FP, TN)\n part2 = max(TP + FP, FN + TN) + max(TP + FN, FP + TN)\n return (0.5 * (part1 - part2)) / (n - 0.5 * part2)\n except Exception:\n return \"None\"", "def min_val(board):\n v = math.inf\n if terminal(board):\n return utility(board)\n for action in actions(board):\n v = min(v,max_val(result(board,action)))\n return v", "def get_all_self_lambdas(source, lambdas):\n \n N = len(source)\n \n for i in prange(1, N): \n \n # The target process is everything ahead of i.\n t_max = 0\n c_max = 0\n\n for j in range(0, i): # Look back at the past\n if source[j] == source[i]: # Check if matches future's next element\n c_max = 1\n for k in range(1,min(N-i, i-j)): # Look through more of future\n if source[j+k] != source[i+k]:\n break\n else:\n c_max = c_max+1\n\n if c_max > t_max:\n t_max = c_max \n\n lambdas[i] = t_max+1\n \n return lambdas", "def _best_action(self, state):\n actions_rewards = list(self.Q[state].items())\n return max(actions_rewards, key=lambda x: x[1])[0]", "def findMaxEval(eVal, q, bWidth):\n out = minimize(\n lambda x, *args: errPDFs(x[0], *args),\n 0.5,\n args=(eVal, q, bWidth),\n bounds=((1e-5, 1 - 1e-5),),\n )\n if out[\"success\"]:\n var = out[\"x\"][0]\n else:\n var = 1\n eMax = var * (1 + (1.0 / q) ** 0.5) ** 2\n return eMax, var", "def _optimize_f(self,x0,type,method,**kwargs):\n from scipy.optimize import fmin,fmin_powell\n\n if type == 'min':\n g=lambda *args,**kwargs:self.f(*args,**kwargs)\n elif type == 'max':\n g=lambda *args,**kwargs:-1*self.f(*args,**kwargs)\n elif type == 'root':\n g=lambda *args,**kwargs:np.abs(self.f(*args,**kwargs))\n elif type == 'val':\n val = kwargs.pop('valtofind')\n g=lambda *args,**kwargs:np.abs(self.f(*args,**kwargs)-val)\n elif type == 'saddle':\n raise NotImplementedError\n else:\n raise ValueError('Unrecognized optimization type')\n\n if method == 'fmin':\n res = fmin(g,x0,tuple(self.parvals),**kwargs)\n elif method == 'fmin_powell':\n res = fmin_powell(g,x0,tuple(self.parvals),**kwargs)\n else:\n raise ValueError('Unrecognized method')\n\n self.lastOpt = res\n return res[0]", "def best_feas_seq(self):\n is_better = self.solver.sense.is_better\n best = self.solver.sense.worst_value\n for sol in self:\n if sol.is_feasible and is_better(sol.value, best):\n best = sol.value\n yield sol", "def fitness(self):\n # TO BE DECIDED\n return 1", "def best_node(self):\n nodes = self._all_nodes()\n sorted_nodes, _ = self.scorer.sort(nodes)\n return sorted_nodes[0]", "def get_best(self) -> Chromosome:\n if not (self._best_chromosome is None): # if the best chromosome is unchanged since the last calculation\n return self._best_chromosome\n\n best = None\n best_fitness = None\n\n for chromosome in self._population:\n chromosome_fitness = chromosome.get_fitness()\n\n if best_fitness is None or self._is_fitter(chromosome_fitness, best_fitness):\n best = chromosome\n best_fitness = chromosome_fitness\n\n return best", "def find_max_f():\n fmax = fmin(g, 2)\n return fmax[0]", "def find_maximum_value(self):\n if self.root: \n self.max_val = self.root.value\n else:\n return 'No tree found'\n def inner(root):\n if root.left:\n inner(root.left)\n\n if root.right:\n inner(root.right)\n\n if self.max_val < root.value:\n self.max_val = root.value\n\n inner(self.root)\n return self.max_val", "def bestAction(self, state):\n action = self.q_network.chooseBestAction(state)\n V = max(self.q_network.qValues(state))\n return action, V", "def find_max(self):\r\n maxVal = self.items[1]\r\n if maxVal is None:\r\n return None\r\n \r\n for i in range(1,len(self.items)):\r\n if self.items[i] is not None:\r\n if self.items[i] > maxVal:\r\n maxVal = self.items[i]\r\n return maxVal", "def fmax(func_to_maximize, initial_guess=0):\n func_to_minimize = lambda x : -func_to_maximize(x)\n return fmin(func_to_minimize, initial_guess, disp=False)[0]", "def max(self, fn=lambda x: x):\n return _(max(*self._, key=fn))", "def GuttmanLambdaB_calc(TP, FP, FN, TN):\n try:\n n = TP + FP + FN + TN\n part1 = max(TP, FP) + max(FN, TN)\n part2 = max(TP + FN, FP + TN)\n return (part1 - part2) / (n - part2)\n except Exception:\n return \"None\"", "def getBestActionValuePair(self, state):\n\t\tactionValuePairs = self.neuralNet.getValues(state)\n\t\tbestAction = np.argmax(actionValuePairs)\n\t\treturn (bestAction, actionValuePairs[bestAction])", "def best_params(self):\n return self.X[np.argmax(self.y.numpy())]", "def _compute_best_value(self):\n reduced_cs = []\n concerned_vars = set()\n\n for c in self.utilities:\n asgt = filter_assignment_dict(self._neighbors_values, c.dimensions)\n reduced_cs.append(c.slice(asgt))\n concerned_vars.update(c.dimensions)\n var_val, rel_val = find_arg_optimal(\n self.variable,\n lambda x: functools.reduce(operator.add, [f(x) for f in reduced_cs]),\n self._mode,\n )\n # Add the cost for each variable value if any\n for var in concerned_vars:\n if var.name == self.name:\n rel_val += var.cost_for_val(self.current_value)\n else:\n rel_val += var.cost_for_val(self._neighbors_values[var.name])\n\n return var_val, rel_val", "def get_best(self):\n scores, ids = self.sort_best()\n return scores[1], ids[1]", "def get_value(self, func):\n value = func(self.position)\n if value < self.best_value: # minimisation option\n self.best_value = value\n self.best_position = self.position\n #check if value is in the space limits\n if value > z_end:\n self.value = z_end\n if value < z_begin:\n self.value = z_begin\n else:\n self.value = value", "def V2lambda(V):\n return(3956/V)", "def value(q, s):\n # Your code here\n return max(q.get(s,a) for a in q.actions)", "def getBestOption(self):\n if len(self.Data) < 1:\n return None\n else:\n bestR = max(self.Data.items(), key=lambda x: x[1]['SPat'].I)\n return bestR[1]", "def GuttmanLambdaA_calc(TP, FP, FN, TN):\n try:\n n = TP + FP + FN + TN\n part1 = max(TP, FN) + max(FP, TN)\n part2 = max(TP + FP, FN + TN)\n return (part1 - part2) / (n - part2)\n except Exception:\n return \"None\"", "def computeValueFromQValues(self, state):\n \"*** YOUR CODE HERE ***\"\n max_next_qvalue = None\n for nextAction in self.legalActions:\n next_qvalue = self.getQValue(state, nextAction)\n if max_next_qvalue is None or max_next_qvalue < next_qvalue:\n max_next_qvalue = next_qvalue\n if max_next_qvalue is None:\n max_next_qvalue = 0.0\n\n return max_next_qvalue", "def computeValueFromQValues(self, state):\n \"*** YOUR CODE HERE ***\"\n max_next_qvalue = None\n for nextAction in self.legalActions:\n next_qvalue = self.getQValue(state, nextAction)\n if max_next_qvalue is None or max_next_qvalue < next_qvalue:\n max_next_qvalue = next_qvalue\n if max_next_qvalue is None:\n max_next_qvalue = 0.0\n\n return max_next_qvalue", "def max_value(self, game, depth):\n \n # Timeout Check\n if self.time_left() < self.TIMER_THRESHOLD:\n raise SearchTimeout()\n\n # Get legal moves\n valid_moves = game.get_legal_moves() \n # Best possible score -> initiated at -inf, the lowest score possible\n best_value = float(\"-inf\")\n\n # Terminal State:\n # When search reaches search limit or no legal moves left\n # Return score of terminal state\n if (depth == 0) or (not valid_moves):\n return self.score(game, self)\n \n # Search each move in legal moves\n for move in valid_moves:\n\n # Update best possible value with current best or search value \n best_value = max(best_value, min_value(self, game.forecast_move(move), depth-1))\n\n # Return best value (in this case max value)\n return best_value", "def search_for_maximum(self):\n return self.maximise_aquisition(self.expected_improvement)", "def lambda_eval(v):\n return v() if hasattr(v, '__call__') else v", "def _get_initial_guess_for_lambdas(\n main_model,\n):\n gradient_norm = np.linalg.norm(main_model.linear_terms)\n model_hessian = main_model.square_terms\n\n hessian_infinity_norm = np.linalg.norm(model_hessian, np.Inf)\n hessian_frobenius_norm = np.linalg.norm(model_hessian, \"fro\")\n\n hessian_gershgorin_lower, hessian_gershgorin_upper = _compute_gershgorin_bounds(\n main_model\n )\n\n lambda_lower_bound = max(\n 0,\n -min(model_hessian.diagonal()),\n gradient_norm\n - min(hessian_gershgorin_upper, hessian_frobenius_norm, hessian_infinity_norm),\n )\n lambda_upper_bound = max(\n 0,\n gradient_norm\n + min(-hessian_gershgorin_lower, hessian_frobenius_norm, hessian_infinity_norm),\n )\n\n if lambda_lower_bound == 0:\n lambda_candidate = 0\n else:\n lambda_candidate = _get_new_lambda_candidate(\n lower_bound=lambda_lower_bound, upper_bound=lambda_upper_bound\n )\n\n lambdas = DampingFactors(\n candidate=lambda_candidate,\n lower_bound=lambda_lower_bound,\n upper_bound=lambda_upper_bound,\n )\n\n return lambdas", "def max_value(self, state, max_alpha, max_beta, max_depth):\r\n if state.terminal_test():\r\n return state.utility(0)\r\n if max_depth <=0 :\r\n return self.score(state)\r\n\r\n v = float(\"-inf\")\r\n for a in state.actions():\r\n v = max(v, self.min_value(state.result(a), max_alpha, max_beta, max_depth - 1))\r\n if v >= max_beta:\r\n return v\r\n max_alpha = max(max_alpha, v)\r\n return v", "def get_fitness(self):\n if self.fitness == 0:\n self.fitness = 1 / self.get_cost()\n return self.fitness", "def minimum_value(self):\n return self._fitness[self._minidx]", "def max_ij(f, K):\n i_best, j_best, m_value_min = min_ij(lambda i, j: (-1) * f(i, j), K)\n return i_best, j_best, - m_value_min", "def minValue(board, cur_optimal_val, player):\n # base case (leave recursion)\n if isTerminal(board):\n return getUtility(board)\n\n val = math.inf\n for action in getActions(board):\n val = min(val, maxValue(getResult(board, action), cur_optimal_val, player)) \n if (\n (player == X and (val <= cur_optimal_val or val == -1)) or\n (player == O and val == -1)\n ):\n break\n return val", "def get_worst_fitness(self):\n f = min(self.characters, key=operator.attrgetter('fitness'))\n self.worst_fitness = round(f.fitness, 3)", "def find_bestParameter(self,currentEnergy):\n if currentEnergy==5.89:\n currentEnergy=6.4\n print(\"WARNING !!!!!!!! E=5.89 KeV ==>> uso best value trovato a 6.4 KeV !!!!!\")\n \n \n index_summary=1e6\n try:\n index_summary=np.where( np.logical_and ( self.energy<(float(currentEnergy)+0.05), self.energy >(float(currentEnergy)-0.05) ) )[0][0]\n print (\"readSummaryData: energia trovata! index = \",index_summary)\n except:\n print (\"readSummaryData: energia *NON* trovata nello scan ploarizzato\")\n\n \n bestPar=1e6 \n if ( index_summary<1000):\n bestPar=self.best_val[index_summary]\n \n return bestPar", "def find_bestParameter(self,currentEnergy):\n if currentEnergy==5.89:\n currentEnergy=6.4\n print(\"WARNING !!!!!!!! E=5.89 KeV ==>> uso best value trovato a 6.4 KeV !!!!!\")\n \n \n index_summary=1e6\n try:\n index_summary=np.where( np.logical_and ( self.energy<(float(currentEnergy)+0.05), self.energy >(float(currentEnergy)-0.05) ) )[0][0]\n print (\"readSummaryData: energia trovata! index = \",index_summary)\n except:\n print (\"readSummaryData: energia *NON* trovata nello scan ploarizzato\")\n\n \n bestPar=1e6 \n if ( index_summary<1000):\n bestPar=self.best_val[index_summary]\n \n return bestPar", "def get_best_action(self):\n if self.bestAction is None:\n self.calculate_best_action()\n return self.bestAction" ]
[ "0.74376184", "0.71890557", "0.655791", "0.6550202", "0.63085747", "0.6219973", "0.6184345", "0.61574817", "0.6153785", "0.61114615", "0.6099454", "0.60798913", "0.6037426", "0.6037426", "0.6017245", "0.5970384", "0.58916986", "0.58453995", "0.5843659", "0.5816554", "0.578878", "0.5780784", "0.57512057", "0.574895", "0.5743494", "0.57319444", "0.57308203", "0.5729633", "0.57248145", "0.57248145", "0.571684", "0.5706312", "0.5682805", "0.5675985", "0.56721836", "0.56638515", "0.5661529", "0.56571007", "0.5655105", "0.5641988", "0.56394255", "0.56357783", "0.5631966", "0.56261", "0.5620553", "0.5607268", "0.55992866", "0.5592792", "0.5579765", "0.5568393", "0.5558551", "0.555169", "0.5542862", "0.55328417", "0.5531768", "0.552776", "0.5524479", "0.55148077", "0.5504659", "0.5497111", "0.5496829", "0.5490808", "0.54887575", "0.548275", "0.5481472", "0.54800534", "0.5473001", "0.54601896", "0.54600185", "0.5456337", "0.5454861", "0.5448284", "0.5435902", "0.54264843", "0.5413185", "0.5409359", "0.54078764", "0.540412", "0.54028803", "0.53962594", "0.53929555", "0.53908825", "0.5378759", "0.5373596", "0.5370086", "0.5369436", "0.5369436", "0.53668225", "0.5359178", "0.53521144", "0.5342744", "0.53367627", "0.5335943", "0.53306437", "0.5325673", "0.5324088", "0.5317643", "0.5316879", "0.5316879", "0.531665" ]
0.6234567
5
r"""Convert ratio to decibels. Converting a ratio to decibels depends on whether the ratio is a ratio of amplitudes or a ratio of powers. For amplitudes the decibel value is
def dB(x, power=False): if power: return 10 * np.log10(np.abs(x)) else: return 20 * np.log10(np.abs(x))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def decibel(x):\n return 10.0 * np.log10(x)", "def idecibel(x):\n return 10.0 ** (x / 10.0)", "def _bcd2dec(self, value):\n return ((value >> 4) * 10) + (value & 0x0F)", "def ppcm_denominateurs(self):\n\t\tl = []\n\t\tn = 1\n\t\tif self.__valide:\n\t\t\tfor m in self.liste_decroissante():\n\t\t\t\t\"\"\" les denominateurs sont positifs \"\"\"\n\t\t\t\te = m.get_coefficient().get_denom().valeur()\n\t\t\t\tif not (e in l):\n\t\t\t\t\tl.append(e)\n\t\t\t\tn *= e\n\t\treturn n / pgcd_liste(l)", "def bi_to_dec (bi):\n bi_str = str(bi)\n dec = int( bi_str, 2)\n return dec", "def __dec_bcd(dec):\n bcd = 0\n for vala in (dec // 10, dec % 10):\n for valb in (8, 4, 2, 1):\n if vala >= valb:\n bcd += 1\n vala -= valb\n bcd <<= 1\n return bcd >> 1", "def _dec2bcd(self, value):\n return (value // 10) << 4 | (value % 10)", "def calculate_br_down_metric(br_down):\n if br_down < 1:\n br_down = 1\n min_baud = 1200\n max_baud = 38400\n\n num = np.log(br_down) - np.log(min_baud)\n den = np.log(max_baud) - np.log(min_baud)\n\n return (num / den + 0.1).clip(min=0, max=1)", "def binfracstr2decfrac(bstr):\n assert bstr[0] != '-', \"Only pass non-negative values\"\n dec_value = 0\n half = Decimal(\"0.5\")\n for place, bit in enumerate(bstr):\n if int(bit) == 1:\n dec_value += half**(place+1)\n return dec_value", "def dec(a):\n a = float(a)\n return a / 100", "def dBtoLinear(db):\r\n return 10**(db/20)", "def gon2dec(gon):\n return 9/10 * gon", "def __make_denominator_integer(self):\n while self.denominator % 1 !=0:\n self.denominator *=10\n self.numerator *=10", "def denom(self, a):\n return self.one", "def dec(self):\n return hp2dec(self.hp_angle)", "def to_decibel(x, ref_val, name=None):\n with tf.name_scope(name, op_util.resolve_op_name(\"ToDecibel\"), [x]):\n zero = tf.constant(0, dtype=ref_val.dtype)\n with tf.control_dependencies([\n tf.assert_greater(ref_val, zero, data=[ref_val],\n message=\"reference value must be > 0\")\n ]):\n return 10*math_ops.log10(x / ref_val)", "def getDec(self):\n return self._dec", "def pct_to_dec(num):\n dec = float(num) / 100\n return dec", "def conv_i_to_dec(i):\n return 1800 + i*10", "def dec(self):\n return self.dec_angle", "def dec_to_bi (dec):\n if int(dec) == 0:\n bi = int(0)\n return bi\n bi_old = bin(dec)\n bi_new = bi_old[2:]\n bi = int(bi_new)\n return bi", "def dB2gain(dB):\n V = math.exp(dB/20)\n return V", "def exchange_ratio(delta_x, salience, power, dominator):\n\treturn (delta_x * salience * power) / dominator", "def todB(s):\n sdB = 10*np.log10(s)\n return(sdB)", "def _to_ym_dec(ym):\n XXXX = int(ym * 0.01)\n YY = int(100 * (ym * 0.01 - XXXX) + 0.5)\n ZZ = (YY - 1) * 100.0 / 12.0\n ym_dec = XXXX + 0.01 * ZZ\n return ym_dec", "def denom(self, a):\n raise NotImplementedError", "def todB(s):\n sdB = 10*np.log10(s)\n return(sdB)", "def decfrac2binrep(x, context):\n assert x > 0, \"Provide only positive Decimal float value\"\n assert isinstance(x, Decimal), \"Provide only positive Decimal float value\"\n fraction, exponent = frexp(x, context)\n max_bits = context.characteristicClass.exp_largest + \\\n context.significandClass.digits\n bfrac = np.zeros(max_bits, int)\n i = 0\n i_stop = max_bits\n not_seen_one = True\n while fraction > 0 and i < max_bits:\n fraction *= 2\n bit = int(fraction)\n if not_seen_one and bit == 1:\n # speed optimization\n i_stop = i + context.significandClass.digits + 2\n bfrac[i] = bit\n fraction -= bit\n i += 1\n if i >= i_stop:\n break\n # negative exponents OK for this usage\n return decint2binstr(exponent), \"\".join([str(bit) for \\\n bit in bfrac[:i]])", "def rht_to_dp(temp, rh):\r\n # from https://en.wikipedia.org/wiki/Dew_point\r\n dp = temp - (0.36 * (100 - rh))\r\n # Check Calc\r\n # print(\"Temp: {} RH: {} DP: {}\".format(temp, rh, dp))\r\n return dp", "def gain2dB(gain):\n dB = 20*math.log(gain)\n return dB", "def dec(self, params):\n reg = params[0]\n if self.reg_dct[reg] == 0:\n self.reg_dct[reg] = (2 ** 32) - 1\n else:\n self.reg_dct[reg] -= 1", "def getFactor(currency):", "def ddm(self):\n return dec2ddm(self.dec_angle)", "def getDemodGain(self, inpwr):\n return min(100, max(0, int(100-2*(inpwr+40))))", "def conv(x):\n return x#-2*(16.41*x + 65.04-95.12) ", "def lineartodB(lin):\r\n return 20*np.log10(lin)", "def ConvertToDegress(value):\r\n return float(value.values[0].num) / float(value.values[0].den) + (float(value.values[1].num) / float(value.values[1].den) / 60.0) + (float(value.values[2].num) / float(value.values[2].den) / 3600.0)", "def extract_conversion_factors(self):\n\n # get some info to reduce ridiculously long java-prop names\n vDeflection_channel = \"lcd-info.{}.\".format(self.channel_numbers[\"vDeflection\"])\n vDeflection_encoder = \"{}encoder.scaling.\".format(vDeflection_channel)\n vDeflection_conversion = \"{}conversion-set.conversion.\".format(vDeflection_channel)\n\n height_channel = \"lcd-info.{}.\".format(self.channel_numbers[\"height\"])\n height_encoder = \"{}encoder.scaling.\".format(height_channel)\n height_conversion = \"{}conversion-set.conversion.\".format(height_channel)\n\n factors = {\"vDeflection\": {}, \"height\": {}}\n\n # parse vDeflection conversion factors\n factors[\"vDeflection\"][\"raw multiplier\"] = \\\n np.array(float(self.general[\"{}multiplier\".format(vDeflection_encoder)]))\n factors[\"vDeflection\"][\"raw offset\"] = np.array(float(self.general[\"{}offset\".format(vDeflection_encoder)]))\n factors[\"vDeflection\"][\"distance multiplier\"] = \\\n np.array(float(self.general[\"{}distance.scaling.multiplier\".format(vDeflection_conversion)]))\n factors[\"vDeflection\"][\"distance offset\"] = \\\n np.array(float(self.general[\"{}distance.scaling.offset\".format(vDeflection_conversion)]))\n factors[\"vDeflection\"][\"force multiplier\"] = \\\n np.array(float(self.general[\"{}force.scaling.multiplier\".format(vDeflection_conversion)]))\n factors[\"vDeflection\"][\"force offset\"] = \\\n np.array(float(self.general[\"{}force.scaling.offset\".format(vDeflection_conversion)]))\n\n # parse height conversion factors\n factors[\"height\"][\"raw multiplier\"] = np.array(float(self.general[\"{}multiplier\".format(height_encoder)]))\n factors[\"height\"][\"raw offset\"] = np.array(float(self.general[\"{}offset\".format(height_encoder)]))\n factors[\"height\"][\"calibrated multiplier\"] = \\\n np.array(float(self.general[\"{}nominal.scaling.multiplier\".format(height_conversion)]))\n factors[\"height\"][\"calibrated offset\"] = \\\n np.array(float(self.general[\"{}nominal.scaling.offset\".format(height_conversion)]))\n\n return factors", "def freq2den(freq):\n\n return freq * freq * k_2", "def interpret(self):\n return binfracstr2decfrac(self.bin_value)", "def duty_translate(self, n):\n return int((float(n) / 255) * 1023)", "def duty_cycle(self):\n\n duty_cycle_ns = int(utils.readstr_all(os.path.join(_CHANNEL_PATH(self._chip,\n self._channel),\n 'duty_cycle')))\n if self.period > 0:\n return float(duty_cycle_ns / 1000.0 / float(self.period))\n else:\n return 0.0", "def convert_to_degress(self, value):\n d0 = value[0][0]\n d1 = value[0][1]\n d = float(d0) / float(d1)\n\n m0 = value[1][0]\n m1 = value[1][1]\n m = float(m0) / float(m1)\n\n s0 = value[2][0]\n s1 = value[2][1]\n s = float(s0) / float(s1)\n\n return d + (m / 60.0) + (s / 3600.0)", "def denominator(num):\n require_type(isa(num,fractions.Fraction) or isa(num,int),\n 'parameter of denominator must be a fraction or integer')\n return num.denominator", "def bin2dec(x):\n return int(x, 2)", "def denominator(self):\n return 1", "def depolarization_ratio(self):\r\n if self._depol_ratio is not None:\r\n return round(self._depol_ratio,3)\r\n else:\r\n return self._depol_ratio", "def _convert_to_degress(value):\r\n d = float(value.values[0].num) / float(value.values[0].den)\r\n m = float(value.values[1].num) / float(value.values[1].den)\r\n s = float(value.values[2].num) / float(value.values[2].den)\r\n\r\n return d + (m / 60.0) + (s / 3600.0)", "def dB(x, power=True):\r\n if not power:\r\n return 20 * np.log10(np.abs(x))\r\n return 10 * np.log10(np.abs(x))", "def calculate_next_pwm_duty_cycle_for_led(angle, led):\n\n angle = adjust_angle_for_perspective_of_current_led(angle, led)\n if 120 < angle < 240:\n return 0\n elif angle <= 120:\n return 100 - (angle * (100 / 120.0))\n else:\n return 100 - ((360 - angle) * (100 / 120.0))", "def dec(self):\n return gon2dec(self.gon_angle)", "def b36_to_dec(b):\n return int(b, 2)", "def den(self):\n return self.den", "def amer_to_dec(fig):\n sign_pattern = r'[\\+\\-]'\n if fig == 'NaN':\n return 'No Lines'\n fig = str(fig)\n sign = re.search(sign_pattern, fig)\n if sign == None:\n sign = '+'\n num = float(fig)\n else:\n sign = sign[0]\n num = float(fig.lstrip(sign))\n if sign == '+' :\n return round(1 + (num/100), 2)\n elif sign == '-':\n return round(1 + (100/num), 2)\n else:\n return \"error\"", "def binvalstr2dec(x):\n if not isbinstr(x):\n raise ValueError(\"Invalid string representation of binary\"\n \" float: %s\" % x)\n if x[0] == '-':\n x = x[1:]\n sign = -1\n else:\n sign = 1\n if 'e' in x:\n x, estr = x.split('e')\n e = int(estr)\n elif 'E' in x:\n x, estr = x.split('E')\n e = int(estr)\n else:\n e = 0\n if '.' in x:\n try:\n whole, frac = x.split('.')\n except ValueError:\n raise ValueError(\"Invalid string representation of binary\"\n \" float\")\n else:\n if frac == \"\":\n frac = '0'\n if whole == \"\":\n whole = '0'\n else:\n whole = x\n frac = '0'\n try:\n dec_whole = Decimal(int(whole, base=2)) * Decimal(2)**e\n except ValueError:\n dec_whole = Decimal(0)\n dec_frac = binfracstr2decfrac(frac) * Decimal(2)**e\n return sign*(dec_whole+dec_frac)", "def _convert_to_degress(value):\n d = float(value.values[0].num) / float(value.values[0].den)\n m = float(value.values[1].num) / float(value.values[1].den)\n s = float(value.values[2].num) / float(value.values[2].den)\n\n return d + (m / 60.0) + (s / 3600.0)", "def dv(self):\n return self.dvdlogdp.mul(self.dlogdp)", "def duty_cycle(self):\n pwm = self._pca.pwm_regs[self._index]\n if pwm[0] == 0x1000:\n return 0xffff\n return pwm[1] << 4", "def conversion(temp, mode):\n if mode == 1:\n c_to_f = (temp * 9/5) + 32\n return c_to_f\n else:\n f_to_c = (temp - 32) * 5 / 9\n return f_to_c", "def _convert_to_degress(self, value):\n d = float(value.values[0].num) / float(value.values[0].den)\n m = float(value.values[1].num) / float(value.values[1].den)\n s = float(value.values[2].num) / float(value.values[2].den)\n\n return d + (m / 60.0) + (s / 3600.0)", "def lbd_func(C):\n if C == 0:\n return 0.0\n lbd = 1 / C\n return lbd", "def to_float(self): \n return (self._num / self._den)", "def dec(list, c1):\n\n\tpotencia = 0\n\tdecnumb = 0\n\twhile len(list)>1:\n\t\tdecnumb += list.pop() * 2 ** potencia\n\t\tpotencia += 1\n\tprint (list)\n\tif list[0] == 1:\n\n\t\tdecnumb = (decnumb * -1)\n\t\tif c1 is True:\n\t\t\tdecnumb = decnumb - 1\n\n\treturn decnumb", "def int2bcd( value ):\n\tbcd = 0\n\tfor i in (value // 10, value % 10):\n\t\tfor p in (8, 4, 2, 1):\n\t\t\tif i >= p:\n\t\t\t\tbcd += 1\n\t\t\t\ti -= p\n\t\t\tbcd <<= 1\n\treturn bcd >> 1\n\t#return (value or 0) + 6 * ((value or 0) // 10)", "def dec(self):\n if self.positive:\n return self.degree + (self.minute / 60)\n else:\n return -(self.degree + (self.minute / 60))", "def bitarray2dec(in_bitarray):\n\n number = 0\n\n for i in range(len(in_bitarray)):\n number = number + in_bitarray[i]*pow(2, len(in_bitarray)-1-i)\n return number", "def scale_to_factor(scale):\n return (B.pi / 2) / (2 * scale**2)", "def abbott_elec():\n per_kwh = 0.08 # [$/kWh]\n return per_kwh", "def digital_gain():\n def r(x):\n return x/512.\n\n def w(x):\n return int(x*512)\n return r, w", "def denominator(self, ???):", "def dec_s2dec(dec):\n # Convert to floats, noting sign of dec\n if isinstance(dec, basestring):\n dec = re.sub('[:dms]', ' ', dec)\n dec = dec.split()\n if dec[0].lstrip()[0] == '-':\n negdec = True\n else:\n negdec = False\n decd,decm,decs = [float(item) for item in dec]\n if negdec:\n decd *= -1.\n # Error checking\n if decd > 90. or decm >= 60. or decs > 60:\n raise ValueError('Dec is outside sensible limits: Dec = %s' % dec)\n\n d_dec = decd + DEG_PER_AMIN * decm + DEG_PER_ASEC * decs\n if negdec:\n d_dec *= -1.\n\n return d_dec", "def kelvin_to_degc(x):\r\n return x - 273.15", "def rbw_vbw_ratio(self):\r\n res = self._visa.query(f\"SENSE{self._screen()}:BANDWIDTH:VIDEO:RATIO?\")\r\n return 1 / float(res)", "def getDivertorPowerFraction(self, DivCode):\n if DivCode == 'UI':\n frac = self.fracUI\n elif DivCode == 'UO':\n frac = self.fracUO\n elif DivCode == 'LI':\n frac = self.fracLI\n elif DivCode == 'LO':\n frac = self.fracLO\n else:\n frac = 1.0\n return frac", "def schmeckle2usd(schmeckle):\n return schmeckle * 148.0", "def decode(self, encoded_value):\n return float(encoded_value) / (1 << self.frac_bits)", "def get_converter(self, parameter):\n if parameter not in self._converters:\n param = self.get_parameter(parameter)\n try:\n scale = float(param['Scale'])\n except KeyError:\n scale = 1\n\n def convert(value):\n # easy_scale = float(param['EasyScale'])\n # easy_scale_multiplier = float(param['EasyScaleMultiplier'])\n return value * scale\n\n return convert", "def dB(x, out=None):\n if out is None:\n return 10 * np.log10(x)\n else:\n np.log10(x, out)\n np.multiply(out, 10, out)", "def _dBmTomW(dBm):\n return math.pow(10.0, dBm / 10.0)", "def to_factor(self, time: float) -> float:\n pass", "def conv_dec_fact_base(dec_number, N):\n factbase = \"\"\n for i in range (N-1,-1,-1):\n x = factorial_of_number(i)\n factbase += str(dec_number//x)\n dec_number = dec_number % x\n return factbase", "def make_change_dp(amount, denominations):", "def doconvert(self):\n if self.amt < 0:\n raise ValueError('Amount must be a positive number')\n conv = (self.amt * self.getuval(self.ufrom)) / self.getuval(self.uto)\n return conv", "def inverse(self):\n return fraction(self.denom, self.num)", "def binList2Dec(self, list=[]):\r\n\t\tvalue = 0\r\n\t\tfor x in range(len(list)):\r\n\t\t\tvalue = value + list[x]*(2**x)\r\n\t\treturn value", "def getSubstanceConversionFactor(self):\n return _libsbml.Submodel_getSubstanceConversionFactor(self)", "def dec(self):\n if self.positive:\n return self.degree + (self.minute / 60) + (self.second / 3600)\n else:\n return -(self.degree + (self.minute / 60) + (self.second / 3600))", "def __float__(self):\n return self.num/self.denom", "def __float__(self):\n return self.num/self.denom", "def convergent_den( contFrac, m):\n\n qlast2 = 1\n\n if m == 1:\n return qlast2\n\n qlast1 = contFrac[1]\n\n if m == 2:\n return qlast1\n\n qnext = 0\n\n for i in range(2, m):\n qnext = contFrac[i]*qlast1 + qlast2\n qlast2 = qlast1\n qlast1 = qnext\n\n return qnext", "def get_fractional_degradation(bt):\n\n\n\n NomIch = 0.125 # Nominal charge current\n NomId = 0.25 # Nominal discharge current\n NomSoC = 0.5 # Nominal state of charge_mode\n NomDoD = 1.0 # Nominal depth of discharge\n B = 5 #Battery capacity\n qt = 5 * 0.5 # Amount of energy in the battery at the start\n # Determin charge of discharge\n if bt > 0:\n Id = bt/(B*1) # time interval differnece is 1\n Ich = NomIch\n else:\n Ich = bt/(B*1)\n Id = NomId\n\n #Calculate average State of Charge\n SoC = 100 * (qt - 0.5*bt)/B\n\n #Calculate Depth of Discharge\n DoD = 100 * bt /B\n\n # Functions\n nCL1 = (e * np.exp (f * Id) + g * np.exp(h * Id))/ (e * np.exp (f * NomId) + g * np.exp(h * NomId))\n nCL2 = (m * np.exp (n * Ich) + o * np.exp(p * Ich))/ (m* np.exp (n* NomIch) + o * np.exp(p * NomIch))\n nCL3 = get_CL4(DoD, SoC)/get_CL4(NomDoD, NomSoC)\n nCL = nCL1 * nCL2 * nCL3\n Fractional_D = (0.5/3650)/ nCL\n return Fractional_D", "def dbconvert(self, spl=0, chan=0):\n ref = REF_ES_dB\n if chan == 1:\n ref = REF_MAG_dB\n \n zeroref = REF_ES_volt/(10**(ref/20.0));\n sf = zeroref*10**(spl/20.0); # actually, the voltage needed to get spl out...\n if self.debugFlag:\n print (\"pystim.dbconvert: scale = %f for %f dB\" % (sf, spl))\n return (sf) # return a scale factor to multiply by a waveform normalized to 1 ", "def voltage_conversion(self):\r\n\t\tvoltage = ((self.data[0] * 256 + self.data[1]) / 65536.0) * 5.0\r\n\t\t\r\n\t\treturn {'v' : voltage}", "def decel_pap(samp, el_p, line=None):\n # Overvoltage Ratio\n el_i=el_p\n u0 = el_p.volt/(el_p.xray/1000.)\n mavg = (el_i.z * el_i.c1) / el_i.mass\n j_i = el_i.z*(10.04 + 8.25*np.exp(-1.0*el_i.z/11.22))/1000. # keV\n\n # [1]p35e8\n p_k = [0.78,\n 0.1,\n (-1.*(0.5 - 0.25*j_i))]\n\n d_k = [6.6e-6,\n 1.12e-5*(1.35 - 0.45*(j_i**2)),\n 2.2e-6/j_i]\n\n con = ((1.602e-19)**2*(6.023e23)/(8*np.pi*(8.854e-12)**2))/100\n if line == 'dE/dps':\n # find just the average energy loss (for checking) [1]p34e5\n pap_dec = 0\n f_V = 0\n for k in range(0, 3):\n print(d_k[k])\n print(p_k[k])\n f_V += (d_k[k]*(el_p.volt/j_i)**(p_k[k]))\n pap_dec = (mavg/j_i)*(1.0/(f_V))\n\n # Bethe\n con = ((1.602e-19)**2*(6.023e23)/(8*np.pi*(8.854e-12)**2))/100\n # Con ~ 78500 -\n\n bethe_dec2 =(con/(el_p.volt)*\n mavg*np.log(1.166*el_p.volt/j_i))\n return pap_dec, con, bethe_dec2", "def reduce(self):\n import math\n g = math.gcd(self.num, self.den)\n return Fraction(self.num//g, self.den//g)", "def bin_coded_dec(self):\n register = (self.opcode & 0xFFF) >> 8\n value = self.registers[register]\n self.memory[self.I] = int(math.floor(value / 100))\n self.memory[self.I + 1] = int(math.floor(value % 100 / 10))\n self.memory[self.I + 2] = value % 10\n logger.info(\"Stored BCD of V{}({}) starting at {}\".format(\n register,\n self.registers[register],\n hex(self.I)))", "def get_decimation_from_canvas_rect(self, canvas_rect):\n\n full_image_rect = self.canvas_rect_to_full_image_rect(canvas_rect)\n return self.get_decimation_factor_from_full_image_rect(full_image_rect)", "def getBid(self):\r\n\t\tdata = self.pair.data\r\n\t\tif data['ask'] == None:\r\n\t\t\treturn None\r\n\t\treturn 1. / data['ask']", "def dec2gon(dec):\n return 10/9 * dec", "def test_den(self):\n np_den, torch_den = self.get_denominator()\n np.testing.assert_array_almost_equal(np_den, torch_den.numpy())" ]
[ "0.5972083", "0.58928466", "0.5669203", "0.56526047", "0.55357", "0.55007315", "0.5486681", "0.54774976", "0.544386", "0.5438974", "0.53794795", "0.53770524", "0.53531665", "0.52374", "0.5221092", "0.51855433", "0.5179858", "0.5170302", "0.51680213", "0.5166022", "0.51482767", "0.51092434", "0.50982624", "0.50729614", "0.5050443", "0.50499606", "0.50399494", "0.5038151", "0.5016412", "0.49996892", "0.49985808", "0.49930716", "0.49902123", "0.4968584", "0.49600908", "0.49600357", "0.494113", "0.4936464", "0.4931091", "0.4924115", "0.49119872", "0.48988113", "0.48658687", "0.48639697", "0.48599702", "0.48499015", "0.48473635", "0.4838413", "0.48217198", "0.48191854", "0.48122233", "0.48071045", "0.4776653", "0.47718552", "0.4768397", "0.47604853", "0.47541162", "0.47521645", "0.47482708", "0.4747986", "0.47463635", "0.4743627", "0.47407657", "0.47273448", "0.47227174", "0.472178", "0.46924528", "0.46902224", "0.4689421", "0.46862814", "0.46854663", "0.4685064", "0.46842864", "0.4681009", "0.46776173", "0.46743774", "0.46725905", "0.46705943", "0.46674684", "0.4650058", "0.46402737", "0.46335962", "0.46274188", "0.46244293", "0.46161768", "0.46134886", "0.4611818", "0.46117532", "0.46117532", "0.46077815", "0.45996597", "0.4595841", "0.45946348", "0.45921892", "0.45910665", "0.45910624", "0.45881903", "0.4581625", "0.45796058", "0.4578504" ]
0.4874277
42
Iterate over orders. Iterator to iterate over the orders in the indexer. Will enable the synchronized `modes` iterator.
def orders(self): self._current_order = self.min_order while self._current_order <= self.max_order: yield self._current_order self._current_order += 1 del self._current_order
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def modes(self):\n try:\n order = self._current_order\n except AttributeError:\n raise AttributeError('Cannot iterate over modes without iterating over orders!') from None\n mode = -order\n while mode <= order:\n yield mode\n mode += 1", "def __iter__(self):\n for x in self._order:\n yield x", "def __iter__(self):\n return iter([v for k, v in sorted(self._modes.items())])", "def _iterate_basis_order_(reference_determinant, order):\n occupied_indices = numpy.where(reference_determinant)[0]\n unoccupied_indices = numpy.where(numpy.invert(reference_determinant))[0]\n\n for occ_ind, unocc_ind in itertools.product(\n itertools.combinations(occupied_indices, order),\n itertools.combinations(unoccupied_indices, order)):\n basis_state = reference_determinant.copy()\n\n occ_ind = list(occ_ind)\n unocc_ind = list(unocc_ind)\n\n basis_state[occ_ind] = False\n basis_state[unocc_ind] = True\n\n yield basis_state", "def __iter__(self):\n return iter(self._key_order)", "def __iter__(self):\n # return self.options[:self.idx] + self.options[self.idx:]\n for op in self.queue():\n yield op", "def orders(self) -> List[MetatraderOrder]:\n return self._orders", "def __iter__(self):\n return self.in_order", "def ordered_edges_iter(self, data=False, keys=False):\n\n # start by sorting nodes in temporal+topological order\n o = self.ordering_graph()\n nodes = nx.topological_sort(o)\n\n # iterate over edges using this very order\n for _ in self.edges_iter(nbunch=nodes, data=data, keys=keys):\n yield _", "def test_multiple_iterators_are_isolated(self):\r\n for q in (TestModel.objects(test_id=0), TestModel.objects(TestModel.test_id == 0)):\r\n q = q.order_by('attempt_id')\r\n expected_order = [0, 1, 2, 3]\r\n iter1 = iter(q)\r\n iter2 = iter(q)\r\n for attempt_id in expected_order:\r\n assert iter1.next().attempt_id == attempt_id\r\n assert iter2.next().attempt_id == attempt_id", "def orders ( self, block: bool = True ):\n\tresult = OutstandingOrders(\n\t\tauth\t\t= self.auth,\n\t\taccount_nbr = self.account_nbr,\n\t\tblock\t\t= block\n\t).request()\n\n\treturn result", "def test_multiple_iterators_are_isolated(self):\r\n for q in (self.table.objects(test_id=0), self.table.objects(self.table.column('test_id') == 0)):\r\n q = q.order_by('attempt_id')\r\n expected_order = [0,1,2,3]\r\n iter1 = iter(q)\r\n iter2 = iter(q)\r\n for attempt_id in expected_order:\r\n assert iter1.next().attempt_id == attempt_id\r\n assert iter2.next().attempt_id == attempt_id", "def iterate_over_descendents(self,index_groups):\r\n\r\n self.reset_iterators()\r\n for l_temp in index_groups:\r\n self.add_iterator(l_temp)\r\n self.show_iterators()", "def all_orders(self, symbol, **kwargs):\n pass", "def orders(self):\n return self._orders", "def iter(self, iters, executor=None):\n deps_by_kind = self.dependencies_by_kind()\n\n # Merge iterators of data that has the same kind\n kind_iters = dict()\n for kind, deps in deps_by_kind.items():\n kind_iters[kind] = strax.merge_iters(\n strax.sync_iters(\n strax.same_length,\n {d: iters[d] for d in deps}))\n\n if len(deps_by_kind) > 1:\n # Sync iterators of different kinds by time\n kind_iters = strax.sync_iters(\n partial(strax.same_stop, func=strax.endtime),\n kind_iters)\n\n iters = kind_iters\n pending = []\n yield from self._inner_iter(iters, pending, executor)\n self.cleanup(wait_for=pending)", "def __iter__(self):\n return self.ordered_keys.__iter__()", "def get_orders(self):\n return self.order_lst", "def __iter__(self):\n for key in itertools.chain(list(self._opts.keys()),\n list(self._groups.keys())):\n yield key", "def _iterate_basis_spin_order_(reference_determinant, alpha_order, beta_order):\n occupied_alpha_indices = numpy.where(reference_determinant[::2])[0] * 2\n unoccupied_alpha_indices = numpy.where(\n numpy.invert(reference_determinant[::2]))[0] * 2\n occupied_beta_indices = numpy.where(reference_determinant[1::2])[0] * 2 + 1\n unoccupied_beta_indices = numpy.where(\n numpy.invert(reference_determinant[1::2]))[0] * 2 + 1\n\n for (alpha_occ_ind, alpha_unocc_ind, beta_occ_ind,\n beta_unocc_ind) in itertools.product(\n itertools.combinations(occupied_alpha_indices, alpha_order),\n itertools.combinations(unoccupied_alpha_indices, alpha_order),\n itertools.combinations(occupied_beta_indices, beta_order),\n itertools.combinations(unoccupied_beta_indices, beta_order)):\n basis_state = reference_determinant.copy()\n\n alpha_occ_ind = list(alpha_occ_ind)\n alpha_unocc_ind = list(alpha_unocc_ind)\n beta_occ_ind = list(beta_occ_ind)\n beta_unocc_ind = list(beta_unocc_ind)\n\n basis_state[alpha_occ_ind] = False\n basis_state[alpha_unocc_ind] = True\n basis_state[beta_occ_ind] = False\n basis_state[beta_unocc_ind] = True\n\n yield basis_state", "def compute_modes(self, mode_indices, mode_handles, vec_handles=None):\n if vec_handles is not None:\n self.vec_handles = util.make_iterable(vec_handles)\n build_coeffs = np.dot(self.eigvecs, np.diag(self.eigvals**-0.5))\n self.vec_space.lin_combine(\n mode_handles, self.vec_handles, build_coeffs,\n coeff_array_col_indices=mode_indices)", "def iter_all_atoms(self):\n for atm in self.atom_order_list:\n if isinstance(atm, Atom):\n yield atm\n else:\n for atmx in atm:\n yield atmx", "def getOrderList(self):\r\n\t\treturn self.pair.orders", "def getOrderList(self):\r\n\t\treturn self.orders", "def manage_orders(self):\r\n for coin, pair_info in self.usdt_pairs.items():\r\n orders = self.kc.get_orders(pair_info[\"symbol\"], status=\"active\")\r\n self.log(coin, orders[\"totalNum\"])\r\n if orders[\"totalNum\"]:\r\n self.log(len(orders[\"items\"]))\r\n for order in orders[\"items\"]:\r\n self.log(order)\r\n\r\n self.log(mp.mpf())\r\n\r\n # ticker = current price action, bid/ask, etc\r\n ticker = self.kc.get_ticker(pair_info[\"symbol\"])\r\n self.log(ticker)\r\n return", "def orders(self) -> List[Order]:\n return store.orders.get_orders(self.exchange, self.symbol)", "def get_all_orders():", "def generator_orders(self):\n return tuple(self._gen_orders)", "def iterorderable(self):\r\n return (x for x in self.iterall() if x.orderable)", "def iter_mode(n, obj='ndarray'):\n for mode in cap[obj][MODE]:\n for char in fmtdict[mode]:\n yield randitems(n, obj, mode, char)", "def iter_mode(n, obj='ndarray'):\n for mode in cap[obj][MODE]:\n for char in fmtdict[mode]:\n yield randitems(n, obj, mode, char)", "def orders(self, orders):\n\n self._orders = orders", "def orders(self, orders):\n\n self._orders = orders", "def _get_index_iterator(indexes, length):\n return combinations(indexes, length)", "def test_entities__EntityOrder____iter____1(entityOrder):\n assert ([\n 'IcemacAddressbookAddressbookAddressbook',\n 'IcemacAddressbookPersonPerson',\n 'IcemacAddressbookPersonPersondefaults',\n 'IcemacAddressbookAddressPostaladdress',\n 'IcemacAddressbookAddressPhonenumber',\n 'IcemacAddressbookAddressEmailaddress',\n 'IcemacAddressbookAddressHomepageaddress',\n 'IcemacAddressbookFileFileFile',\n 'IcemacAddressbookKeywordKeyword',\n ] == list(iter(entityOrder)))", "def execution_order(self) -> typing.Iterator:\n return self.execution_order_strategy_class(self._steps)", "def __iter__(self):\n \n return iter(zip(self.orbital_operators, self.orbital_labels))", "def open_orders_for(self, symbol, **kwargs):\n pass", "def open_orders(self, **kwargs):\n pass", "def __iter__(self):\n cols = tuple([col[0] for col in self.description])\n for r in self._rows:\n yield OrderedDict(zip(cols, r))", "def _iterModels(modelIDs):\n\n class ModelInfoIterator(object):\n \"\"\"ModelInfo iterator implementation class\n \"\"\"\n\n # Maximum number of ModelInfo elements to load into cache whenever\n # cache empties\n __CACHE_LIMIT = 1000\n\n debug=False\n\n\n def __init__(self, modelIDs):\n \"\"\"\n Parameters:\n ----------------------------------------------------------------------\n modelIDs: a sequence of Nupic model identifiers for which this\n iterator will return _NupicModelInfo instances.\n NOTE: The returned instances are NOT guaranteed to be in\n the same order as the IDs in modelIDs sequence.\n retval: nothing\n \"\"\"\n # Make our own copy in case caller changes model id list during iteration\n self.__modelIDs = tuple(modelIDs)\n\n if self.debug:\n _emit(Verbosity.DEBUG,\n \"MODELITERATOR: __init__; numModelIDs=%s\" % len(self.__modelIDs))\n\n self.__nextIndex = 0\n self.__modelCache = collections.deque()\n return\n\n\n def __iter__(self):\n \"\"\"Iterator Protocol function\n\n Parameters:\n ----------------------------------------------------------------------\n retval: self\n \"\"\"\n return self\n\n\n\n def next(self):\n \"\"\"Iterator Protocol function\n\n Parameters:\n ----------------------------------------------------------------------\n retval: A _NupicModelInfo instance or raises StopIteration to\n signal end of iteration.\n \"\"\"\n return self.__getNext()\n\n\n\n def __getNext(self):\n \"\"\"Implementation of the next() Iterator Protocol function.\n\n When the modelInfo cache becomes empty, queries Nupic and fills the cache\n with the next set of NupicModelInfo instances.\n\n Parameters:\n ----------------------------------------------------------------------\n retval: A _NupicModelInfo instance or raises StopIteration to\n signal end of iteration.\n \"\"\"\n\n if self.debug:\n _emit(Verbosity.DEBUG,\n \"MODELITERATOR: __getNext(); modelCacheLen=%s\" % (\n len(self.__modelCache)))\n\n if not self.__modelCache:\n self.__fillCache()\n\n if not self.__modelCache:\n raise StopIteration()\n\n return self.__modelCache.popleft()\n\n\n\n def __fillCache(self):\n \"\"\"Queries Nupic and fills an empty modelInfo cache with the next set of\n _NupicModelInfo instances\n\n Parameters:\n ----------------------------------------------------------------------\n retval: nothing\n \"\"\"\n assert (not self.__modelCache)\n\n # Assemble a list of model IDs to look up\n numModelIDs = len(self.__modelIDs) if self.__modelIDs else 0\n\n if self.__nextIndex >= numModelIDs:\n return\n\n idRange = self.__nextIndex + self.__CACHE_LIMIT\n if idRange > numModelIDs:\n idRange = numModelIDs\n\n lookupIDs = self.__modelIDs[self.__nextIndex:idRange]\n\n self.__nextIndex += (idRange - self.__nextIndex)\n\n # Query Nupic for model info of all models in the look-up list\n # NOTE: the order of results may not be the same as lookupIDs\n infoList = _clientJobsDB().modelsInfo(lookupIDs)\n assert len(infoList) == len(lookupIDs), \\\n \"modelsInfo returned %s elements; expected %s.\" % \\\n (len(infoList), len(lookupIDs))\n\n # Create _NupicModelInfo instances and add them to cache\n for rawInfo in infoList:\n modelInfo = _NupicModelInfo(rawInfo=rawInfo)\n self.__modelCache.append(modelInfo)\n\n assert len(self.__modelCache) == len(lookupIDs), \\\n \"Added %s elements to modelCache; expected %s.\" % \\\n (len(self.__modelCache), len(lookupIDs))\n\n if self.debug:\n _emit(Verbosity.DEBUG,\n \"MODELITERATOR: Leaving __fillCache(); modelCacheLen=%s\" % \\\n (len(self.__modelCache),))\n\n\n return ModelInfoIterator(modelIDs)", "def __iter__(self):\n\n for opt in self.eset:\n if self.bitflags & int(opt):\n yield opt", "def orders_for(self, symbol):\n\t\tif self._session:\n\t\t\tdata = {'symbol': symbol}\n\t\t\treturn self._session.get_open_orders(**data)\n\n\t\treturn []", "def mix_iterator(self):\n self.job = OrderedDict()\n for list_i in self.grid_iterator():\n # Pick the values to be used in this run\n for (k, i) in zip(self.table.keys(), list_i):\n self.job[k] = self.table[k][i]\n # Do the string replace operations on the values themselves\n self.expand_values()\n yield self.job", "def iterate_optimizer_configs(options):\n for batch_size in options[consts.BATCH_SIZE]:\n for optimizer in options[consts.OPTIMIZER]:\n config = options.copy()\n config[consts.BATCH_SIZE] = batch_size\n config[consts.OPTIMIZER] = optimizer\n yield config", "def __iter__(self):\n for key in sorted(self.keys):\n yield key, self[key]", "def iter_atoms(self, sort=False, key='exp'):\n if not sort:\n for atom in self[key].atoms:\n yield atom\n else:\n for atom in SortAtom.sort(self, molecule=self[key]):\n yield atom", "def __iter__(self):\n keys = [CoolProp.iDmass,CoolProp.iHmass,CoolProp.iP,CoolProp.iSmass,CoolProp.iT]\n for key in sorted(keys):\n yield key", "def __iter__(self):\n for i in range(len(self.ks)):\n yield self.get_neighs([i]), self.get_sp_rel_pos([i]),\\\n [self.ks[i]], self.iss", "def open_orders(self, **params):\n return self._get('option/openOrders', signed=True, params=params, version=None)", "def __iter__(self):\n while True:\n if self.batches is None:\n for indexed_sentence in self.indexed_sentences:\n yield indexed_sentence\n else:\n for batch in self.batches:\n yield batch[:-1, :], batch[1:, :] # Return batch and target indices\n\n if not self.repeat:\n return", "def __iter__(self):\n indices = []\n for i, size in enumerate(self.group_sizes):\n if size == 0:\n continue\n indice = np.where(self.flag == i)[0]\n if not len(indice) == size:\n raise ValueError('the length of the indice should be equal to the size')\n np.random.shuffle(indice)\n num_extra = int(np.ceil(size / self.samples_per_gpu)\n ) * self.samples_per_gpu - len(indice)\n indice = np.concatenate([indice, indice[:num_extra]])\n indices.append(indice)\n indices = np.concatenate(indices)\n indices = [\n indices[i * self.samples_per_gpu:(i + 1) * self.samples_per_gpu]\n for i in np.random.permutation(\n range(len(indices) // self.samples_per_gpu))\n ]\n indices = np.concatenate(indices)\n indices = torch.from_numpy(indices).long()\n if not len(indices) == self.num_samples:\n raise ValueError(\"the length of the indices should be equal to num_samples\")\n return iter(indices)", "def __iter__(self):\n for p in self.positions(): # use same order as positons()\n yield p.element() # but yield each element", "def __iter__(self):\n for p in self.positions(): # use same order as positions()\n yield p.element() # but yield each element", "def iterator(self):\n yield", "async def on_orders_replaced(self, orders: List[MetatraderOrder]):\n self._orders = orders", "def __iter__(self):\n for o in self._iter:\n yield o", "def __iter__(self):\n for id in self.order():\n inputs = [w for w in self.wires if w['target'][0] == id]\n yield id, inputs", "def __iter__(self):\n alt_locs = self.keys()\n alt_locs.sort()\n for alt_loc in alt_locs:\n yield self[alt_loc]", "def __iter__(self):\n for key in self._ctx:\n yield key", "def iter_keys(self, search, itersize=None, client=None, version=None):\r\n\r\n if client is None:\r\n client = self.get_client(write=False)\r\n\r\n pattern = self.make_key(search, version=version)\r\n cursor = b\"0\"\r\n\r\n while True:\r\n cursor, data = client.scan(cursor, match=pattern, count=itersize)\r\n\r\n for item in data:\r\n item = smart_text(item)\r\n yield self.reverse_key(item)\r\n\r\n if cursor == b\"0\":\r\n break", "def __iter__(self) -> Iterator[Access]:\n for accesses in self._accesses.values():\n for access in accesses:\n yield access", "def _get_indices(self, parts: List[str], keys: List[str]):\n for key in keys:\n yield parts.index(key)", "def __iter__(self):\n for key in self.sprite_order:\n if key not in self.sprite_groups:\n # abstract type\n continue\n for s in self.sprite_groups[key]:\n yield s", "def __iter__(self):\r\n for addon in self.addons.itervalues():\r\n yield addon", "def getAllUnitsWithOrders(self):\n\n # While we're at it, lazily remove any units from the mapping if they\n # don't actually have orders.\n unitsWithNoOrders = []\n\n # Note! Don't change this to use iteritems(). We need to make sure that\n # the caller can modify the unit orders while they are iterating over\n # this list, without messing up the iteration. Currently this is\n # accomplished by just getting the whole list of unit IDs up front\n # (self.orders.keys()) and then iterating over that list.\n for unitId in self.orders.keys():\n if self.orders[unitId]:\n yield unitId\n else:\n # Don't actually remove the unit yet, because modifying a\n # container while iterating over it tends to cause problems.\n # Instead, store it in a list to remove later.\n unitsWithNoOrders.append(unitId)\n\n for unitId in unitsWithNoOrders:\n del self.orders[unitId]", "def __iter__(self):\n return zip(self._phases, self.data)", "def orders(self) -> List[\"UnitOrder\"]:\n return [UnitOrder.from_proto(o, self._game_data) for o in self.proto.orders]", "def get(self):\n return sync.get_open_orders()", "def iter_atoms(self):\n return iter(self.atom_list)", "def indexable_objects_iter(docs, es_index, es_doctype):\n\n for doc in docs:\n insert_doc = {\n '_index': es_index,\n '_type': es_doctype,\n '_id': \"%s-%s\" % (doc['state'], doc['id']),\n '_source': doc\n }\n yield insert_doc", "def __iter__(self):\n for key in self._group._opts.keys():\n yield key", "def ordered_indices(self):\n return self.base_dataset.ordered_indices()", "def make_calcurve_orders(self):\n for oi in self.orders:\n self.make_calcurve_order(oi)", "def in_order(self):\n for node_data in self._in_order_helper(self._root):\n yield node_data", "def getEquates(self) -> Iterator[ghidra.program.model.symbol.Equate]:\n ...", "def __iter__(self):\n for tokens in iter_documents(self.top_dir, self.types, self.sheets, self.np, self.ngrams):\n # transform tokens (strings) into a sparse vector, one at a time\n yield self.dictionary.doc2bow(tokens)", "def __iter__(self):\n for benchinst in sorted(self.instances.values()):\n yield benchinst", "def orders(self):\n\t\tORDER_MAP = [\"Do Nothing\", \"Collect All\", \"Drop All\", \"Collect\", \"Drop\", \"Collect All But\", \"Drop All But\", \"Garrison\"]\n\t\treturn [(delay, Star(star_id, galaxy=self.galaxy), ORDER_MAP[order], num_ships)\n\t\t for delay, star_id, order, num_ships in self.data.o]", "def __iter__(self):\n for atom in self.iter_atoms():\n yield atom", "def __iter__():", "def __iter__():", "def __iter__():", "def __iter__():", "def __iter__(self):\n yield from self._type_keys", "def iter_all_atoms(self):\n for model in self.iter_models():\n for atm in model.iter_all_atoms():\n yield atm", "def _get_open_orders(self):\n # When using ORDER_PRICE_PRIORITY, the get_priority function has a\n # side-effect of doing a database write for the tab_based_priority\n # field of each open order. More details documented on the function\n if settings.ENABLE_ORDER_PRICE_PRIORITY:\n return self._get_open_orders_by_price()\n return self._get_open_orders_by_time_created()", "def __iter__(self):\n\t\tfor nt in SeqVector.rev_mapping:\n\t\t\tyield nt", "def to_z_basis_ops(self) -> Iterator[raw_types.Operation]:\n for qubit, pauli in self.items():\n yield clifford_gate.SingleQubitCliffordGate.from_single_map(\n {pauli: (pauli_gates.Z, False)})(qubit)", "def get_sobol_indices(self, order):\n self._set_statistics()\n return self.statistics_object.get_sobol(order)", "def import_orders(cls, store_views=None):\n if store_views is None:\n store_views = cls.search([])\n\n for store_view in store_views:\n store_view.import_order_from_store_view()", "def __iter__(self):\n for index in range(len(self)):\n yield self[index]", "def __iter__(self):\n for block in sorted(self.blocks.values(), key=lambda b: b.ordinal):\n yield block", "def iter(self):\n\t\tfor element in self.elements:\n\t\t\tyield element", "def iter(self):\n\t\tfor element in self.elements:\n\t\t\tyield element", "def query_orders(self):\n return self._call_txtrader_api('query_orders', {})", "def __iter__(self,status='hold'):\n\t\tfor cart in sorted(self.get_cartridges(),key=lambda c: c.data['id_num']):\n\t\t\tyield cart", "def __iter__(self):\n for key in self._catalogs:\n yield key", "def _iterCoordsets(self):\n\n for i in range(self._n_csets):\n yield self._coords[i]", "def __iter__(self):\n with SessionContext(self.SessionClass) as session:\n keys = session.query(PAW2_DBObject.key)\n keys = [c[0] for c in keys]\n random.shuffle(keys)\n return keys.__iter__()" ]
[ "0.67183816", "0.6654742", "0.6436368", "0.6087598", "0.59756845", "0.5863205", "0.5597003", "0.55368114", "0.5527949", "0.5482221", "0.547718", "0.5439024", "0.54328644", "0.543001", "0.53729576", "0.53424174", "0.53381103", "0.5320791", "0.5310121", "0.52943534", "0.5290345", "0.5280263", "0.52748394", "0.5267393", "0.5261795", "0.522714", "0.5173866", "0.5155422", "0.5144449", "0.51244676", "0.51244676", "0.51225144", "0.51225144", "0.51071554", "0.50842625", "0.50671244", "0.50513136", "0.5028437", "0.50282204", "0.5026101", "0.49939668", "0.4983905", "0.49670428", "0.4958714", "0.49550015", "0.4952182", "0.49515432", "0.4949487", "0.49421504", "0.49307555", "0.4926776", "0.4915228", "0.4909328", "0.4903629", "0.48878217", "0.48782745", "0.4875376", "0.4859659", "0.48473796", "0.4845005", "0.48382372", "0.48359728", "0.48271307", "0.48204464", "0.48202258", "0.4809289", "0.4804053", "0.47975013", "0.47950405", "0.47940615", "0.47722244", "0.47719294", "0.47644797", "0.47611803", "0.47606876", "0.4755861", "0.474179", "0.4734773", "0.47294787", "0.4728537", "0.47267067", "0.47267067", "0.47267067", "0.47267067", "0.4723749", "0.4723391", "0.4716672", "0.4715516", "0.47126573", "0.47115463", "0.47079855", "0.4697177", "0.46961862", "0.46936613", "0.46936613", "0.46917182", "0.46886975", "0.4687852", "0.4682537", "0.4676257" ]
0.58461016
6
Iterate over modes. Synchronized iterator to iterate the modes in an order.
def modes(self): try: order = self._current_order except AttributeError: raise AttributeError('Cannot iterate over modes without iterating over orders!') from None mode = -order while mode <= order: yield mode mode += 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __iter__(self):\n return iter([v for k, v in sorted(self._modes.items())])", "def get_modes(self):\n return [i for i, j in enumerate(self._modemap._map) if j is not None]", "async def _load_modes(self) -> None:\n modes: List[Dict[str, Any]] = await self._api_request(\"modes\")\n _LOGGER.debug(\"Loaded modes\")\n self._modes = [Mode(m) for m in modes]", "def iter_mode(n, obj='ndarray'):\n for mode in cap[obj][MODE]:\n for char in fmtdict[mode]:\n yield randitems(n, obj, mode, char)", "def iter_mode(n, obj='ndarray'):\n for mode in cap[obj][MODE]:\n for char in fmtdict[mode]:\n yield randitems(n, obj, mode, char)", "def values(self):\n return self._modes.values()", "def _get_modes(self):\n return self.__modes", "def get_modes(self):\n modes = set()\n for er in self.exercise_recordings:\n if er.mode not in modes:\n modes.add(er.mode)\n return list(modes)", "def modes(self, modes):\n\n self._modes = modes", "def modes(self, modes):\n\n self._modes = modes", "def all_modes(self):\n\n # Find \"post-proj all modes\"\n # Jump to first value, ignoring text.\n # Move through data, adding it to a list\n # continue onto next line.\n # Repeat until the following line is known to be empty.\n\n # output.dat is the psi4 output file.\n with open('output.dat', 'r') as file:\n lines = file.readlines()\n for count, line in enumerate(lines):\n if \"post-proj all modes\" in line:\n start_of_vals = count\n break\n else:\n raise EOFError('Cannot locate modes in output.dat file.')\n\n # Barring the first (and sometimes last) line, dat file has 6 values per row.\n end_of_vals = start_of_vals + (3 * len(self.molecule.molecule['input'])) // 6\n\n structures = lines[start_of_vals][24:].replace(\"'\", \"\").split()\n structures = structures[6:]\n\n for row in range(1, end_of_vals - start_of_vals):\n # Remove double strings and weird formatting.\n structures += lines[start_of_vals + row].replace(\"'\", \"\").replace(\"]\", \"\").split()\n\n all_modes = [float(val) for val in structures]\n\n return array(all_modes)", "def all_modes(self):\n\n # Find \"post-proj all modes\"\n # Jump to first value, ignoring text.\n # Move through data, adding it to a list\n # continue onto next line.\n # Repeat until the following line is known to be empty.\n\n # output.dat is the psi4 output file.\n with open(\"output.dat\", \"r\") as file:\n lines = file.readlines()\n for count, line in enumerate(lines):\n if \"post-proj all modes\" in line:\n start_of_vals = count\n break\n else:\n raise EOFError(\"Cannot locate modes in output.dat file.\")\n\n # Barring the first (and sometimes last) line, dat file has 6 values per row.\n end_of_vals = start_of_vals + (3 * len(self.molecule.atoms)) // 6\n\n structures = lines[start_of_vals][24:].replace(\"'\", \"\").split()\n structures = structures[6:]\n\n for row in range(1, end_of_vals - start_of_vals):\n # Remove double strings and weird formatting.\n structures += (\n lines[start_of_vals + row].replace(\"'\", \"\").replace(\"]\", \"\").split()\n )\n\n all_modes = [float(val) for val in structures]\n\n return np.array(all_modes)", "def _sort_modes(self):\n sort_idx = np.lexsort((self.modes[:, 1], self.modes[:, 0], self.modes[:, 2]))\n self._modes = self.modes[sort_idx]", "def get_modes(self):\n return self.circuit.get_modes()", "def getModes(this):\n\t\tthis.checkInit()\n\t\t\n\t\t# On sauvegarde la config actuelle\n\t\tinit = this.config(get=True)\n\t\t\n\t\t# Ensembles de modes\n\t\tformats = Camera.formats.copy()\n\t\tmodes = set()\n\t\t\n\t\t# On averti du départ\n\t\tprint '\\nLooping modes for the camera... (%d modes)' % (len(formats))\n\t\t\t\n\t\t# Pour chaques formats\n\t\twhile formats:\n\t\t\t\n\t\t\t# On récupère le format à tester\n\t\t\tformat = formats.pop()\n\t\t\t\n\t\t\t# Configuration actuelle\n\t\t\tmode = this.config(\n\t\t\t\theight = float(format[1]),\n\t\t\t\twidth = float(format[0])\n\t\t\t)\n\t\t\t\n\t\t\t# On enregistre le mode\n\t\t\tcurrentFormat = (mode['width'], mode['height'])\n\t\t\tmodes.add(currentFormat)\n\t\t\tif currentFormat in formats:\n\t\t\t\tformats.remove(currentFormat)\n\t\t\t\n\t\t\t# On affiche l'itération courante\n\t\t\tprintf('%d%5s\\r' % (len(formats), ''))\n\t\t###\n\t\t\n\t\t# On remet comme avant et on retourne la liste de modes\n\t\tthis.config(params=init); print 'Done, found %d.' % (len(modes))\n\t\treturn [(int(mode[0]), int(mode[1])) for mode in modes]", "def modes(self) -> List[str]:\n return [m.name for m in self._modes]", "def modes(self):\n return np.hstack(tuple(self.operator.modes))", "def get_modes(self, code_block):\r\n # FUCK YOU INDEX ERRORS, LIST COMPS, AND EVEN YOU LAMBDAS I DON'T NEED PRETTY\r\n # 0 = pos mode\r\n # 1 = imm mode\r\n modes, mode_codes = [0, 0], list(reversed(str(code_block[0])))[2:]\r\n x = 0\r\n for mode in mode_codes:\r\n modes[x] = int(mode)\r\n x += 1\r\n print('Get modes: ')\r\n print(modes)\r\n return modes", "def __iter__(self):\n for x in self._order:\n yield x", "def sort_modes(self):\n # sorts by l, then n, then freq\n ind = np.lexsort((self.modes['freq'], self.modes['n'],self.modes['l']))\n self.modes = np.array([self.modes[i] for i in ind],dtype=modetype)", "def modes_for_course(cls, course_id):\r\n now = datetime.now(pytz.UTC)\r\n found_course_modes = cls.objects.filter(Q(course_id=course_id) &\r\n (Q(expiration_datetime__isnull=True) |\r\n Q(expiration_datetime__gte=now)))\r\n modes = ([Mode(\r\n mode.mode_slug,\r\n mode.mode_display_name,\r\n mode.min_price,\r\n mode.suggested_prices,\r\n mode.currency,\r\n mode.expiration_datetime\r\n ) for mode in found_course_modes])\r\n if not modes:\r\n modes = [cls.DEFAULT_MODE]\r\n return modes", "def test_modes_for_course_multiple(self):\r\n mode1 = Mode(u'honor', u'Honor Code Certificate', 0, '', 'usd', None)\r\n mode2 = Mode(u'verified', u'Verified Certificate', 0, '', 'usd', None)\r\n set_modes = [mode1, mode2]\r\n for mode in set_modes:\r\n self.create_mode(mode.slug, mode.name, mode.min_price, mode.suggested_prices)\r\n\r\n modes = CourseMode.modes_for_course(self.course_key)\r\n self.assertEqual(modes, set_modes)\r\n self.assertEqual(mode1, CourseMode.mode_for_course(self.course_key, u'honor'))\r\n self.assertEqual(mode2, CourseMode.mode_for_course(self.course_key, u'verified'))\r\n self.assertIsNone(CourseMode.mode_for_course(self.course_key, 'DNE'))", "def get_all_servers_modes():\n return _get_list(\n lambda server: server.mode,\n lambda server: server.mode_name_long\n )", "def modes(self, avg=False):\n if not self.fp_init:\n self._init_full_props\n if avg:\n return self._modes_avg, self._num_modes\n return self._modes, self._num_modes", "def compute_modes(self, mode_indices, mode_handles, vec_handles=None):\n if vec_handles is not None:\n self.vec_handles = util.make_iterable(vec_handles)\n build_coeffs = np.dot(self.eigvecs, np.diag(self.eigvals**-0.5))\n self.vec_space.lin_combine(\n mode_handles, self.vec_handles, build_coeffs,\n coeff_array_col_indices=mode_indices)", "def modes(self, exp_id: int) -> List[str]:\n return list(self.state[exp_id].keys())", "def change_modes(self, change_list):\n\t\tprint \"CHG_MODE START\"\n\t\tfor mode_ix in range(0,len(change_list),2):\n\t\t\tsetid_and_index = self.__mode_modesetid(change_list[mode_ix])\n\t\t\tif setid_and_index is not None:\n\t\t\t\tif change_list[mode_ix+1] == True:\n\t\t\t\t\tprint \"Setting Active Set:{0} Index:{1}\".format(setid_and_index[0], setid_and_index[1])\n\t\t\t\t\tself.ms_all[setid_and_index[0]].activate(setid_and_index[1])\n\t\t\t\telif change_list[mode_ix+1] == False:\n\t\t\t\t\tprint \"Setting DEactive Set:{0} Index:{1}\".format(setid_and_index[0], setid_and_index[1])\n\t\t\t\t\tself.ms_all[setid_and_index[0]].deactivate(setid_and_index[1])\n\t\t\t\telse:\n\t\t\t\t\tprint \"Invalid State\"\n\t\tif 'volume' in self.ms_all:\n\t\t\tprint self.ms_all['volume'].active()\n\t\tif 'modecycle1' in self.ms_all:\n\t\t\tprint self.ms_all['modecycle1'].active()\n\t\tprint \"CHG_MODE STOP\"", "def temp_mode_as(self, mode):\n previous_mode = self._mode\n self._mode = mode\n yield\n self._mode = previous_mode", "def cycle_mode(self):\n n_modes = len(self.sensors) + N_EXTRA_MODES\n self.mode += 1\n self.mode %= n_modes\n logger.info(f'Switched to mode {self.mode}')", "def getDisplayModes(self, obj):\n modes = []\n return modes", "def partial_modes(self, level, node=None):\n if node:\n return self.operator.modes[self._index_list(level, node)]\n\n indeces = [self._index_list(level, i) for i in range(2**level)]\n return np.hstack(tuple([self.operator.modes[idx] for idx in indeces]))", "def _calc_modes(self, f):\n\n fd = self.log_prob(f).mean(0)\n\n # If f_i-1 <= f_i and f_i+1 <= f_i, then f_i is a mode\n f1 = fd[...,1:]\n f2 = fd[...,:-1]\n b1 = f1.le(f2)\n b2 = f2.le(f1)\n m1 = b1[...,1:].logical_and(b2[...,:-1])\n\n # if f_0 => f_1 then f_0 is a mode\n # if f_N => f_N-1 then f_n is a mode\n mask = t.cat((\n fd[...,0].ge(fd[...,1]).unsqueeze(-1),\n m1,\n fd[...,-1].ge(fd[...,-2]).unsqueeze(-1)\n ), -1)\n\n self._modes = self.f.masked_select(mask)\n self._modes_avg = self.f_avg.masked_select(mask)\n self._num_modes = mask.sum(-1)", "def write_all(self):\n\n for _, seq in self.seq_dict.items():\n write_mode(seq)", "def test_multiple_modes(self, parse_input_mocked_metadata, modes):\n bb = parse_input_mocked_metadata(\"Vac | {}\\n\".format(modes))\n assert bb.operations == [{\"modes\": [0, 1, 2, 5], \"op\": \"Vac\"}]", "def get_all_color_modes(self):\n return self._all_color_modes", "def get_device_modes(self, device_id):\n e = ctypes.POINTER(rs_error)()\n dev = lrs.rs_get_device(self.ctx, device_id, ctypes.byref(e))\n _check_error(e)\n for stream_id in range(rs_stream.RS_STREAM_COUNT):\n mode_count = lrs.rs_get_stream_mode_count(dev, stream_id, ctypes.byref(e))\n _check_error(e)\n for idx in range(mode_count):\n width = ctypes.c_int()\n height = ctypes.c_int()\n fmt = ctypes.c_int()\n fps = ctypes.c_int()\n lrs.rs_get_stream_mode(dev, stream_id, idx,\n ctypes.byref(width),\n ctypes.byref(height),\n ctypes.byref(fmt),\n ctypes.byref(fps),\n ctypes.byref(e))\n _check_error(e)\n yield StreamMode(stream_id, width.value, height.value,\n fmt.value, fps.value)", "def modesets(self):\n\t\tcopy_ms_all = {}\n\t\tfor mode_set_id,mode_set in self.ms_all.iteritems():\n\t\t\tcopy_ms_all[mode_set_id] = copy.deepcopy(mode_set.simple())\n\t\treturn copy_ms_all", "def __iter__(self):\n\n for opt in self.eset:\n if self.bitflags & int(opt):\n yield opt", "def __iter__(self):\n for key in itertools.chain(list(self._opts.keys()),\n list(self._groups.keys())):\n yield key", "def mode():\n\n # assumption: if more than 1 mode is found, return list of modes instead of single item\n\n # use dictionary to track occurance for each int w/ key rep int, val rep occurance count\n countdict = {}\n\n for item in inlist:\n # to process each int, check if int already exists in dict as a key\n if item in countdict: \n # int already exists - increment the associated count (occurance count)\n countdict[item] = countdict[item]+1\n else: \n # int does not exist - make new entry in dict for first occurance of new key\n countdict[item] = 1\n \n # call values method to return a list of val in dict\n countlist = countdict.values()\n\n maxcount = max(countlist)\n\n modelist = []\n # itering though the dict keys looking for a key w/ a val that matches max count\n for item in countdict:\n # when found such a key, place that key in the mode list \n if countdict[item] == maxcount:\n # key/s assoc w/ count, appended to list of modes\n modelist.append(item)\n\n # check num of modes in collection if there's only one mode - output single item\n if len(modelist) <= 1:\n # for single mode - output single mode\n for item in modelist:\n return item\n else: \n # more than 1 mode in collection - output list of modes\n return modelist", "def init_modes(self):\n \n self.deleteMode = delete_Mode()\n self.commandMode = command_Mode()\n self.visualMode = visual_Mode()\n self.insertMode = insert_Mode()\n self.exMode = ex_Mode()\n self.yankMode = yank_Mode()\n self.gmodeMode = gmode_Mode()\n self.cmodeMode = cmode_Mode()\n self.rmodeMode = rmode_Mode()\n self.tmodeMode = tmode_Mode()\n self.selectionMode = selection_Mode()\n self.indentMode = indent_Mode()", "def __iter__(self):\n # return self.options[:self.idx] + self.options[self.idx:]\n for op in self.queue():\n yield op", "def __iter__(self):\n for key in self._ctx:\n yield key", "def getmode(self, mode):\r\n modes = {}\r\n # core modes\r\n for m, (basemode, basetype, bands) in _MODEINFO.items():\r\n modes[m] = ModeDescriptor(m, bands, basemode, basetype)\r\n # extra experimental modes\r\n modes[\"RGBa\"] = ModeDescriptor(\"RGBa\",\r\n (\"R\", \"G\", \"B\", \"a\"), \"RGB\", \"L\")\r\n modes[\"LA\"] = ModeDescriptor(\"LA\", (\"L\", \"A\"), \"L\", \"L\")\r\n modes[\"La\"] = ModeDescriptor(\"La\", (\"L\", \"a\"), \"L\", \"L\")\r\n modes[\"PA\"] = ModeDescriptor(\"PA\", (\"P\", \"A\"), \"RGB\", \"L\")\r\n # mapping modes\r\n modes[\"I;16\"] = ModeDescriptor(\"I;16\", \"I\", \"L\", \"L\")\r\n modes[\"I;16L\"] = ModeDescriptor(\"I;16L\", \"I\", \"L\", \"L\")\r\n modes[\"I;16B\"] = ModeDescriptor(\"I;16B\", \"I\", \"L\", \"L\")\r\n # set global mode cache atomically\r\n _modes = modes\r\n return _modes[mode]", "def _modes(self):\n answer = []\n for i in dir(self):\n if i.startswith('handle_'):\n answer.append(i.replace('handle_', ''))\n return answer", "def __mode_modesetid(self, mode):\n\t\tfor key,val in self.ms_all.iteritems():\n\t\t\tix = val.index(mode)\n\t\t\tif ix is not None:\n\t\t\t\treturn key, ix", "def __cb_mode_change(self, list_of_modes):\t\n\t\t\n\t\tnew_active_modes = []\t\t# only the new active mode(s)\n\t\tmode_change_params = []\n\t\tfor mode in list_of_modes:\n\t\t\tmode_change_params.append(mode['mode'])\n\t\t\tmode_change_params.append(mode['state'])\n\t\t\tif mode['state']:\n\t\t\t\tnew_active_modes.append(mode['mode'])\n\n\t\tself.__printer(\"Mode change. {0}\".format(mode_change_params),level=LL_DEBUG)\n\t\tself.__exec_function_by_code('MODE-CHANGE',*mode_change_params)\n\t\t\n\t\tif callable(self.callback_mode_change):\n\t\t\tself.callback_mode_change(mode_change_params)\n\t\t\n\t\t# Check if we have an event for this..\n\t\tif self.event_mode_change:\n\t\t\n\t\t\tfor emc in self.event_mode_change:\n\t\t\t\tif any(x in new_active_modes for x in emc['modes']):\n\t\t\t\t\t\n\t\t\t\t\t# TODO! check if ['type'] == 'mode_change'\n\t\t\t\t\t\n\t\t\t\t\tfor active_mode in new_active_modes:\n\t\t\t\t\t\tif active_mode in emc['modes']:\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\trgb_dev = self.get_device_config(emc['device'])\n\t\t\t\t\t\t\tpin_r = rgb_dev['r']\n\t\t\t\t\t\t\tpin_g = rgb_dev['g']\n\t\t\t\t\t\t\tpin_b = rgb_dev['b']\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t# ignore pattern for now..\n\t\t\t\t\t\t\t#turn on rgb_1, using ff0000\n\t\t\t\t\t\t\tself.gpio.pwm_rgb(pin_r,pin_g,pin_b,emc['rgb'])", "def _get_applicable_modes(command):\n mode_dict = {}\n _add_applicable_modes(command, mode_dict)\n return mode_dict.keys()", "def __iter__(self):\n return iter(self._key_order)", "def _new_course_mode_models(modes_data):\n return [\n CourseMode(**modes_dict)\n for modes_dict in modes_data\n ]", "def _build_modes_dict(self, mode_names, modes):\n last_index = 0\n mode_datas = dict()\n for mode in modes:\n mode_data = dict(mode._data)\n mode_data[\"name\"] = mode_names[\n last_index : last_index + mode_data[\"name_length\"]\n ]\n mode_datas[mode_data[\"id\"]] = mode_data\n last_index += mode_data[\"name_length\"]\n return mode_datas", "def append_modes(self,modes):\n self.modes = np.append(self.modes,np.array(modes,dtype=modetype))", "def modes_for_course_dict(cls, course_id):\r\n return {mode.slug: mode for mode in cls.modes_for_course(course_id)}", "def playback_modes(self):\n return list(playback_modes.values())", "def clear(self):\n while len(self._modes):\n key = sorted(list(self._modes.keys()))[0]\n self.remove(key)", "def iter_states(self):\n return iter(self._states_)", "def preset_modes(self) -> list:\n return self._preset_modes", "def selectable_booking_modes():\n\n db = current.db\n s3db = current.s3db\n\n mtable = s3db.org_booking_mode\n query = (mtable.deleted == False)\n rows = db(query).select(mtable.id,\n mtable.name,\n )\n modes = {row.id: row.name for row in rows}\n return modes", "def orders(self):\n self._current_order = self.min_order\n while self._current_order <= self.max_order:\n yield self._current_order\n self._current_order += 1\n del self._current_order", "def test_multiple_iterators_are_isolated(self):\r\n for q in (TestModel.objects(test_id=0), TestModel.objects(TestModel.test_id == 0)):\r\n q = q.order_by('attempt_id')\r\n expected_order = [0, 1, 2, 3]\r\n iter1 = iter(q)\r\n iter2 = iter(q)\r\n for attempt_id in expected_order:\r\n assert iter1.next().attempt_id == attempt_id\r\n assert iter2.next().attempt_id == attempt_id", "def __iter__(self):\n for channel in self.channels.itervalues():\n yield channel", "def GetPackageModes(self):\n return self._modes", "def iterate(self): # pragma: no mccabe\n for case_result in self.suite_result.passed:\n for scenario_result in case_result.passed:\n yield scenario_result, True, True\n for scenario_result in case_result.failed:\n yield scenario_result, False, True # pragma: no cover\n\n for case_result in self.suite_result.failed:\n for scenario_result in case_result.passed:\n yield scenario_result, True, False # pragma: no cover\n for scenario_result in case_result.failed:\n yield scenario_result, False, False", "def multiple_modes(mu, xv_min, xv_max, n_modes=10):\n # Make sure these are arrays\n xv_min = np.array(xv_min)\n xv_max = np.array(xv_max)\n\n # Generate a list of starting points\n n_dims, = xv_min.shape\n standard_range = np.random.rand(n_modes, n_dims)\n xv_start_list = (xv_max - xv_min) * standard_range + xv_min\n\n # Compute the modes\n # Size: (n_modes, d_x)\n return np.array([mode(mu, xv_start, xv_min, xv_max)\n for xv_start in xv_start_list])", "def keys(self):\n return self._modes.keys()", "def freq_sorted(self):\n\n for i in range(len(self.modes)-1):\n if (self.modes['l'][i] > 0): continue\n if (self.modes['l'][i] != self.modes['l'][i+1]): continue\n if (self.modes['freq'][i] > self.modes['freq'][i+1]):\n return False\n return True", "def g_modes_list(m, k, qlist, ecc=0, chi=0):\n approx = []\n for q in qlist:\n approx.append(g_modes(m, k, q, ecc=ecc, chi=chi))\n return np.asarray(approx)", "def iterator(self):\n yield", "def __iter__(self):\n return self.in_order", "def Iterate(self):\n\t\tfor atom in self.atoms:\n\t\t\tself.UpdateAtom(atom)", "def iter_sessions():\n return iter(_session_stack)", "def selectable_services_modes():\n\n db = current.db\n s3db = current.s3db\n\n mtable = s3db.org_service_mode\n query = (mtable.deleted == False)\n rows = db(query).select(mtable.id,\n mtable.name,\n )\n modes = {row.id: row.name for row in rows}\n return modes", "def animate_linearized_modes(self ):\n \n from pyro.dynamic.statespace import linearize\n \n linearized_sys = linearize( self )\n \n animations = []\n \n for i in range(self.n):\n ani = linearized_sys.animate_eigen_mode( i , self.is_3d )\n \n animations.append( ani )\n \n return linearized_sys , animations", "def _iterModels(modelIDs):\n\n class ModelInfoIterator(object):\n \"\"\"ModelInfo iterator implementation class\n \"\"\"\n\n # Maximum number of ModelInfo elements to load into cache whenever\n # cache empties\n __CACHE_LIMIT = 1000\n\n debug=False\n\n\n def __init__(self, modelIDs):\n \"\"\"\n Parameters:\n ----------------------------------------------------------------------\n modelIDs: a sequence of Nupic model identifiers for which this\n iterator will return _NupicModelInfo instances.\n NOTE: The returned instances are NOT guaranteed to be in\n the same order as the IDs in modelIDs sequence.\n retval: nothing\n \"\"\"\n # Make our own copy in case caller changes model id list during iteration\n self.__modelIDs = tuple(modelIDs)\n\n if self.debug:\n _emit(Verbosity.DEBUG,\n \"MODELITERATOR: __init__; numModelIDs=%s\" % len(self.__modelIDs))\n\n self.__nextIndex = 0\n self.__modelCache = collections.deque()\n return\n\n\n def __iter__(self):\n \"\"\"Iterator Protocol function\n\n Parameters:\n ----------------------------------------------------------------------\n retval: self\n \"\"\"\n return self\n\n\n\n def next(self):\n \"\"\"Iterator Protocol function\n\n Parameters:\n ----------------------------------------------------------------------\n retval: A _NupicModelInfo instance or raises StopIteration to\n signal end of iteration.\n \"\"\"\n return self.__getNext()\n\n\n\n def __getNext(self):\n \"\"\"Implementation of the next() Iterator Protocol function.\n\n When the modelInfo cache becomes empty, queries Nupic and fills the cache\n with the next set of NupicModelInfo instances.\n\n Parameters:\n ----------------------------------------------------------------------\n retval: A _NupicModelInfo instance or raises StopIteration to\n signal end of iteration.\n \"\"\"\n\n if self.debug:\n _emit(Verbosity.DEBUG,\n \"MODELITERATOR: __getNext(); modelCacheLen=%s\" % (\n len(self.__modelCache)))\n\n if not self.__modelCache:\n self.__fillCache()\n\n if not self.__modelCache:\n raise StopIteration()\n\n return self.__modelCache.popleft()\n\n\n\n def __fillCache(self):\n \"\"\"Queries Nupic and fills an empty modelInfo cache with the next set of\n _NupicModelInfo instances\n\n Parameters:\n ----------------------------------------------------------------------\n retval: nothing\n \"\"\"\n assert (not self.__modelCache)\n\n # Assemble a list of model IDs to look up\n numModelIDs = len(self.__modelIDs) if self.__modelIDs else 0\n\n if self.__nextIndex >= numModelIDs:\n return\n\n idRange = self.__nextIndex + self.__CACHE_LIMIT\n if idRange > numModelIDs:\n idRange = numModelIDs\n\n lookupIDs = self.__modelIDs[self.__nextIndex:idRange]\n\n self.__nextIndex += (idRange - self.__nextIndex)\n\n # Query Nupic for model info of all models in the look-up list\n # NOTE: the order of results may not be the same as lookupIDs\n infoList = _clientJobsDB().modelsInfo(lookupIDs)\n assert len(infoList) == len(lookupIDs), \\\n \"modelsInfo returned %s elements; expected %s.\" % \\\n (len(infoList), len(lookupIDs))\n\n # Create _NupicModelInfo instances and add them to cache\n for rawInfo in infoList:\n modelInfo = _NupicModelInfo(rawInfo=rawInfo)\n self.__modelCache.append(modelInfo)\n\n assert len(self.__modelCache) == len(lookupIDs), \\\n \"Added %s elements to modelCache; expected %s.\" % \\\n (len(self.__modelCache), len(lookupIDs))\n\n if self.debug:\n _emit(Verbosity.DEBUG,\n \"MODELITERATOR: Leaving __fillCache(); modelCacheLen=%s\" % \\\n (len(self.__modelCache),))\n\n\n return ModelInfoIterator(modelIDs)", "def __iter__(self):\n for run in self.runs:\n yield run", "def __iter__(self):\n for key in self._group._opts.keys():\n yield key", "def supported_modes(self) -> Set[str]:\n raise NotImplementedError", "def supported_operation_modes(\n self,\n ) -> list[HVACModeT]:", "def supported_modes(self):\n return [OFF, SYNC, CHARGE]", "def __iter__(self):\n for key in self.sprite_order:\n if key not in self.sprite_groups:\n # abstract type\n continue\n for s in self.sprite_groups[key]:\n yield s", "def __iter__(self):\n return iter(self._enums)", "def iter_context_objects(self):\n use_gevent = is_gevent_enabled()\n use_context = is_context_enabled()\n\n if use_context:\n tid = context_get_ident()\n elif use_gevent:\n tid = greenlet_get_ident()\n else:\n tid = thread_get_ident()\n\n objects = self._cache.get(tid)\n if objects is None:\n if len(self._cache) > _MAX_CONTEXT_OBJECT_CACHE:\n self._cache.clear()\n objects = self._global[:]\n objects.extend(getattr(self._thread_context, \"stack\", ()))\n\n if use_gevent:\n objects.extend(getattr(self._greenlet_context, \"stack\", ()))\n\n if use_context:\n objects.extend(self._context_stack.get([]))\n\n objects.sort(reverse=True)\n objects = [x[1] for x in objects]\n self._cache[tid] = objects\n return iter(objects)", "def modes_at(x_query):\n return multiple_modes(lambda y_q: mu_yx(y_q, x_query), yv_min, yv_max,\n n_modes=n_modes)", "def __iter__(self):\n for block in sorted(self.blocks.values(), key=lambda b: b.ordinal):\n yield block", "def start(self):\n for circuit in self.circuits:\n self.modes[self.print_mode](circuit)", "def _get_modes(self, L=0):\n l = np.arange(L + 1).reshape((-1, 1))\n z = np.zeros((L + 1, 2))\n return np.hstack([l, z])", "def _iterate(self):\n\n for atom in self.mol.GetAtoms(): #type: Chem.Atom\n atm_idx = atom.GetIdx()\n shells: List[Set[int]] = [] # List of Shells (set of atomidx (int))\n current_iter_atomts: Set[int] = {atm_idx} # Set of atoms for current iteration, initialized with central-atom\n prev_atms = {atm_idx} # Atoms already in inner shells, used to avoid occurrence in multiple shells\n for i in range(self.shellCount): # type: Chem.Atom\n next_iter_atoms: Set[int] = set()\n # Add neighbours of atoms in previous shell (potential candidates for next shell)\n for j_atm in current_iter_atomts: # type: int\n j_atm_obj: Chem.Atom = self.mol.GetAtomWithIdx(j_atm)\n next_iter_atoms.update([k_atm.GetIdx() for k_atm in j_atm_obj.GetNeighbors()])\n # Add atoms as shell if not in one of the previous shells\n shells.append(next_iter_atoms - prev_atms)\n # Update for next loop\n current_iter_atomts = next_iter_atoms - prev_atms\n prev_atms.update(next_iter_atoms)\n assert len(shells) == self.shellCount\n self._processShells(atm_idx, shells)", "def __iter__(self):\n return self.states()", "def __iter__(self):\n for runspec in self.runspecs:\n yield runspec", "def _list_contexts(self):\r\n return sorted(list(self._bbreader.cache.keys()))", "def itercases(self):\n for case in self._cases:\n yield case, self._casenames[case], self._case_vals[case]", "def preset_modes(self):\n return self._preset_modes", "def _get_modes(self, N=0):\n dim_tor = 2 * N + 1\n n = np.arange(dim_tor).reshape((-1, 1)) - N\n z = np.zeros((dim_tor, 2))\n return np.hstack([z, n])", "def __iter__(self):\n for benchinst in sorted(self.instances.values()):\n yield benchinst", "def nmodes(self):\n if self.mode_selection is not None:\n return len(self.mode_selection)\n else:\n return len(self.mol.normal_modes.modes.freqs)", "def _iterate_basis_order_(reference_determinant, order):\n occupied_indices = numpy.where(reference_determinant)[0]\n unoccupied_indices = numpy.where(numpy.invert(reference_determinant))[0]\n\n for occ_ind, unocc_ind in itertools.product(\n itertools.combinations(occupied_indices, order),\n itertools.combinations(unoccupied_indices, order)):\n basis_state = reference_determinant.copy()\n\n occ_ind = list(occ_ind)\n unocc_ind = list(unocc_ind)\n\n basis_state[occ_ind] = False\n basis_state[unocc_ind] = True\n\n yield basis_state", "async def flight_mode(self):\n\n request = telemetry_pb2.SubscribeFlightModeRequest()\n flight_mode_stream = self._stub.SubscribeFlightMode(request)\n\n try:\n async for response in flight_mode_stream:\n \n\n \n yield FlightMode.translate_from_rpc(response.flight_mode)\n finally:\n flight_mode_stream.cancel()", "def test_multiple_iterators_are_isolated(self):\r\n for q in (self.table.objects(test_id=0), self.table.objects(self.table.column('test_id') == 0)):\r\n q = q.order_by('attempt_id')\r\n expected_order = [0,1,2,3]\r\n iter1 = iter(q)\r\n iter2 = iter(q)\r\n for attempt_id in expected_order:\r\n assert iter1.next().attempt_id == attempt_id\r\n assert iter2.next().attempt_id == attempt_id", "def __iter__(self):\n for benchclass in sorted(self.classes.values()):\n yield benchclass", "def iter(self, iters, executor=None):\n deps_by_kind = self.dependencies_by_kind()\n\n # Merge iterators of data that has the same kind\n kind_iters = dict()\n for kind, deps in deps_by_kind.items():\n kind_iters[kind] = strax.merge_iters(\n strax.sync_iters(\n strax.same_length,\n {d: iters[d] for d in deps}))\n\n if len(deps_by_kind) > 1:\n # Sync iterators of different kinds by time\n kind_iters = strax.sync_iters(\n partial(strax.same_stop, func=strax.endtime),\n kind_iters)\n\n iters = kind_iters\n pending = []\n yield from self._inner_iter(iters, pending, executor)\n self.cleanup(wait_for=pending)" ]
[ "0.7799798", "0.63018954", "0.6299035", "0.62619734", "0.62619734", "0.6169084", "0.61189497", "0.60541093", "0.5993781", "0.5993781", "0.59646934", "0.58745813", "0.58432204", "0.58386713", "0.5829693", "0.57918906", "0.57898873", "0.57819664", "0.5591517", "0.5588966", "0.55590606", "0.55087155", "0.550612", "0.54356", "0.5426513", "0.54214007", "0.5403037", "0.53745687", "0.5368656", "0.5362822", "0.53594744", "0.5354816", "0.5323901", "0.5311701", "0.5280851", "0.52690834", "0.5264454", "0.52537054", "0.52367127", "0.52334714", "0.52172935", "0.5201681", "0.51347816", "0.51290953", "0.51097775", "0.5105074", "0.5105051", "0.5098101", "0.5076774", "0.5056767", "0.5055965", "0.50535256", "0.5046869", "0.5035398", "0.5027098", "0.5003806", "0.49955365", "0.49765152", "0.49683547", "0.4965261", "0.49586028", "0.49585646", "0.49545446", "0.4952738", "0.49504825", "0.4950198", "0.49500632", "0.49486178", "0.4946439", "0.49409255", "0.49403748", "0.4939825", "0.49396032", "0.4939453", "0.49387246", "0.49269834", "0.49225318", "0.49217874", "0.49203202", "0.49181032", "0.49168962", "0.49102557", "0.49039233", "0.49022257", "0.48996451", "0.4885431", "0.48758954", "0.48711383", "0.48687056", "0.4866977", "0.48646116", "0.48644578", "0.48577204", "0.48574883", "0.4854783", "0.4848652", "0.48448858", "0.48308355", "0.4809935", "0.48025885" ]
0.78386045
0
Sum spherical harmonics coefficients of the same order. Calculates the sum of the coefficients for all modes for each order individually. The `SphericalHarmonicsIndexer` needs to be created to match the orders of the expansion coefficients. This requires that the length of the summation axis is the same as the number of coefficients for the orders specified, i.e. `values.shape[axis] == len(self)`. If no axis is specified, the first suitable axis will be used.
def ordersum(self, values, axis=None): values = np.asarray(values) if axis is None: for axis in range(values.ndim): if values.shape[axis] == len(self): break else: raise ValueError('Cannot find axis of length {} in the given values!'.format(len(self))) values = np.moveaxis(values, axis, 0) output = np.zeros((self.max_order - self.min_order + 1, ) + values.shape[1:], dtype=values.dtype) for idx, order in enumerate(self.orders): output[idx] = np.sum(values[self(order, -order):self(order, order) + 1], axis=0) return np.moveaxis(output, 0, axis)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _harmonic_sum(self, rank: int) -> complex:\n return (self.flm * self.slepian.eigenvectors[rank].conj()).sum()", "def sum(self, axis: int = 0):\r\n self.values = self.values.sum(axis=axis)\r\n self.layers = [None]\r\n return self.copy()", "def sh( values ):\n # ECMWF normalizes the spherical harmonic coeffs differently than NCEP.\n # (m=0,n=0 is global mean, instead of sqrt(2)/2 times global mean)\n fld = 2.*values/np.sqrt(2.)\n \n #------SPLITTING IT UP IN AN IMAGARY AND REAL PART--------\n fldr = fld[ 0::2 ] #annenhver verdi fra 0\n fldi = fld[ 1::2 ] #annenhver verdi fra 1\n fldn = np.zeros( fldr.shape, 'F' ) #blir halvparten så stor som orginale fld\n fldn.real = fldr #legges da til i fldn vectoren\n fldn.imag = fldi\n #----------------------------------------------------------\n \n nlons = 360 #Have a feeling it probably is number of values like grid val\n nlats = 1280 #web sais it shourld be 180.. wellwell, seems to work\n s = spharm.Spharmt( nlons, nlats ) \n \n data = s.spectogrd( fldn ) #Hvis nlats = 180, så feiler denne delen pga hvordan formelen fungerer..\n \n lons = ( 360./nlons ) * np.arange( nlons )\n lats = 90.-( 180./( nlats - 1 ) ) * np.arange( nlats )\n lons, lats = np.meshgrid( lons, lats )\n \n #stack grids side-by-side (in longitiudinal direction), so\n # any range of longitudes (between -360 and 360) may be plotted on a world map.\n lons = np.concatenate(( lons - 360, lons ), 1 )\n lats = np.concatenate(( lats, lats ), 1 )\n data = np.concatenate(( data, data ), 1 )\n \n return lats, lons, data", "def _expand(self):\n cilm = SHExpandDH(self.data)\n coeffs = SHCoeffs.from_array(cilm, kind='DH')\n return coeffs", "def spherical_harmonic_functions(l, m = None):\n if m == None:\n m = l\n assert abs(m)<=l\n P_l_m = associated_legendre_polynomials(l,m)\n S_m, C_m = sin_cos_expressions(m)\n \n Y_func_l_m = [[None]*(2*j+1) for j in range(m+1)]\n for i in range(l+1):\n Y_func_l_m[i][0] = sym.simplify(K_l_m(i,0) * P_l_m[i][0])\n \n for i in range(1,l+1):\n for j in range(1,i+1):\n Y_func_l_m[i][j] = sym.simplify(2**0.5 * K_l_m(i,j) * C_m[j] * P_l_m[i][j])\n for i in range(1,l+1):\n for j in range(1,i+1):\n Y_func_l_m[i][-j] = sym.simplify(2**0.5 * K_l_m(i,-j) * S_m[j] * P_l_m[i][j])\n return Y_func_l_m", "def combine_modes(h_lm: dict, inc: float, phase: float) -> np.ndarray:\n total = sum([h_lm[(l, m)] * sYlm(-2, l, m, inc, phase) for l, m in h_lm])\n h_plus_cross = dict(plus=total.real, cross=-total.imag)\n return h_plus_cross", "def vector_compute_spherical_harmonics(theta, phi, selected_modes=None):\n # Only copies if input is scalar, else returns original array\n theta = np.array(theta, copy=False, ndmin=1)\n theta = np.array(phi, copy=False, ndmin=1)\n\n Ylms = {}\n one_m_theta = 1.0 - np.cos(theta)\n one_p_theta = 1.0 + np.cos(theta)\n snTheta = np.sin(theta)\n for m in range(-2, 3):\n if selected_modes is not None and (2,m) not in selected_modes:\n continue\n if m == -2:\n Ylms[(2,m)] = _a2m2 * one_m_theta * one_m_theta * np.exp(1j*m*phi)\n elif m == -1:\n Ylms[(2,m)] = _a2m1 * snTheta * one_m_theta * np.exp(1j*m*phi)\n elif m == 0:\n Ylms[(2,m)] = _a20 * snTheta * snTheta\n elif m == 1:\n Ylms[(2,m)] = _a21 * snTheta * one_p_theta * np.exp(1j*m*phi)\n elif m == 2:\n Ylms[(2,m)] = _a22 * one_p_theta * one_p_theta * np.exp(1j*m*phi)\n\n return Ylms", "def forward_theta(self):\n SW = self.simplesphere.sphere_wrapper\n for dm, m in enumerate(self.simplesphere.local_m):\n m_data = [f.data[dm] for f in self.component_fields]\n # Unpack for rank 0 to counteract shortcut bug in sphere_wrapper\n if self.rank == 0:\n m_data, = m_data\n self.coeffs[dm] = SW.forward(m, self.rank, m_data)", "def __call__(self, sphere_or_coeffs):\n if self.input_representation == \"spectral\":\n ell_max = sphere_or_coeffs.shape[1] - 1\n resolution = 2 * (ell_max + 1)\n if sphere_or_coeffs.shape[2] != 2*ell_max + 1:\n raise ValueError(\"Axes 1 and 2 must have dimensions \"\n \"(ell_max+1, 2*ell_max+1).\")\n elif self.input_representation == \"spatial\":\n resolution = sphere_or_coeffs.shape[1]\n ell_max = sphere_utils.ell_max_from_resolution(resolution)\n if sphere_or_coeffs.shape[2] != resolution:\n raise ValueError(\"Axes 1 and 2 must have the same dimensions!\")\n else:\n raise ValueError(\"`input_representation` must be either \"\n \"'spectral' or 'spatial'.\")\n\n if sphere_or_coeffs.shape[3] != len(list(self.spins_in)):\n raise ValueError(\"Input axis 3 (spins_in) doesn't match layer's.\")\n\n if self.spectral_pooling and self.spectral_upsampling:\n raise ValueError(\"`spectral_pooling` and `spectral_upsampling` \"\n \"should not be both True.\")\n\n if self.spectral_pooling:\n resolution //= 2\n ell_max = sphere_utils.ell_max_from_resolution(resolution)\n\n # Make sure constants contain all spins for input resolution.\n for spin in set(self.spins_in).union(self.spins_out):\n if not self.transformer.validate(resolution, spin):\n raise ValueError(\"Constants are invalid for given input!\")\n\n num_channels_in = sphere_or_coeffs.shape[-1]\n if self.num_filter_params is None:\n kernel = self._get_kernel(ell_max, num_channels_in)\n else:\n kernel = self._get_localized_kernel(ell_max, num_channels_in)\n\n # Map over the batch dimension.\n vmap_convolution = jax.vmap(\n _spin_spherical_convolution,\n in_axes=(None, 0, None, None, None, None, None, None, None))\n return vmap_convolution(self.transformer,\n sphere_or_coeffs, kernel,\n self.spins_in,\n self.spins_out,\n self.spectral_pooling,\n self.spectral_upsampling,\n self.input_representation,\n self.output_representation)", "def sum_over_energy(self):\n # Note that the array is using the opposite convention from WCS\n # so we sum over axis 0 in the array, but drop axis 2 in the WCS object\n return Map(np.sum(self.counts, axis=0), self.wcs.dropaxis(2))", "def sphere(*args, axis: Union[List[float, float, float], bool]=None, caching: bool=True,\n degree: Union[int, bool]=3, endSweep: Union[float, bool]=2, heightRatio: Union[float,\n bool]=2.0, nodeState: Union[int, bool]=0, pivot: Union[List[float, float, float],\n bool]=None, radius: Union[float, bool]=1.0, sections: Union[int, bool]=8, spans:\n Union[int, bool]=1, startSweep: Union[float, bool]=0, tolerance: Union[float,\n bool]=0.01, useTolerance: bool=False, constructionHistory: bool=True, name:\n AnyStr=\"\", object: bool=True, polygon: int=0, q=True, query=True, e=True, edit=True,\n **kwargs)->Union[List[AnyStr], Any]:\n pass", "def spherefcn(x: np.ndarray) -> np.ndarray:\n if x.ndim == 1:\n x = x.reshape(-1, len(x))\n f = np.sum(x**2, axis=1)\n return f.reshape(-1, 1)", "def _compute_cosineSequences(self, expand=False, factor=False,\n simplify=False):\n if self._has(\"omega\"):\n return\n if not self._has(\"theta\"):\n self.eigenvalues(expand=expand, factor=factor,\n simplify=simplify)\n omega = Matrix(SR, self._.d + 1)\n omega[:, 0] = 1\n for i in range(self._.d + 1):\n omega[i, 1] = self._.theta[i]/self._.b[0]\n for j in range(2, self._.d + 1):\n omega[i, j] = _simplify(_factor((\n (self._.theta[i] - self._.a[j-1]) * omega[i, j-1]\n - self._.c[j-1] * omega[i, j-2]) / self._.b[j-1]))\n self._.omega = omega", "def _integrate_sphere(self, rank: int) -> complex:\n assert isinstance(self.f, np.ndarray) # noqa: S101\n s_p = ssht.inverse(\n self.slepian.eigenvectors[rank],\n self.L,\n Method=sleplet._vars.SAMPLING_SCHEME,\n )\n weight = sleplet._integration_methods.calc_integration_weight(self.L)\n return sleplet._integration_methods.integrate_whole_sphere(\n weight,\n self.f,\n s_p.conj(),\n )", "def core_func(phi_by_term, out_by_phase, solver, sizes=[], cast_mode=''):\n\n L = out_by_phase.shape[1]\n L = 2*L if cast_mode == 'real-imag' else L\n kernels = dict()\n _phi_by_term = _cast_complex2real(phi_by_term, cast_mode)\n\n for is_odd in [False, True]:\n curr_phases = range(2-is_odd, N+1, 2)\n curr_y = np.concatenate([_complex2real(out_by_phase[p],\n cast_mode=cast_mode)\n for p in curr_phases], axis=0)\n\n curr_phi = np.bmat(\n [[_phi_by_term.get((p+2*k, k), np.zeros((L, sizes[p+2*k-1]))) *\n binomial(p+2*k, k) for k in range(1-(p+1)//2, 1+(N-p)//2)]\n for p in curr_phases])\n\n if not is_odd:\n curr_y = np.concatenate((np.real(out_by_phase[0]), curr_y),\n axis=0)\n n_even = range(2, N+1, 2)\n temp = np.concatenate([_phi_by_term[n, n//2] *\n binomial(n, n//2) for n in n_even],\n axis=1)\n curr_phi = np.concatenate((np.real(temp), curr_phi), axis=0)\n\n curr_f = _solver(curr_phi, curr_y, solver)\n\n index = 0\n for n in range(1 if is_odd else 2, N+1, 2):\n nb_term = sizes[n-1]\n kernels[n] = curr_f[index:index+nb_term]\n index += nb_term\n\n return kernels", "def E0_sum(r, k, fiber_radius, eps_out, eps_in, E0_mod, nmin_sc, nmax_sc, case):\n\n # refractive index of the cylinder relative \n # to that of the surrounding medium\n m = np.sqrt(eps_in / eps_out)\n E0 = Mie_scat_cyl.Es(r[0], r[1], r[2], k, fiber_radius,\n m, E0_mod, nmin_sc, nmax_sc, case)\n\n r_car = pol2cart(r)\n kvec_car = np.array([-k, 0, 0]) # normal incidence\n exp_factor = np.exp(1j * np.dot(kvec_car, r_car))\n if case == 1:\n Einc_car = np.array([0, 0, E0_mod], dtype=complex) * exp_factor\n # Ez is the same in pol and in cart coordinates\n E0 += Einc_car\n elif case == 2:\n Einc_car = np.array([0, E0_mod, 0], dtype=complex) * exp_factor\n E0 += vec_cart2pol(r_car, Einc_car)\n\n return(E0)", "def eval_hamiltonian(num_atoms, h_poly, (phase0, phase1)):\n # print \"phase=\",(phase0, phase1)\n h = numpy.zeros((num_atoms, num_atoms),\n dtype = numpy.complex64)\n\n for (exp0, exp1) in h_poly:\n # print phase0, phase1, exp0, exp1\n h += h_poly[(exp0, exp1)] * phase0**exp0 * phase1**exp1\n\n return h", "def total_electronic_hamiltonian(self):\n return block_diag(*[self.electronic_manifold(n) for n in range(3)])", "def compute_spherical_harmonics(Lmax, theta, phi, selected_modes=None):\n # PRB: would this be faster if we made it a 2d numpy array? \n Ylms = {}\n for l in range(2,Lmax+1):\n for m in range(-l,l+1):\n if selected_modes is not None and (l,m) not in selected_modes:\n continue\n Ylms[ (l,m) ] = lal.SpinWeightedSphericalHarmonic(theta, phi,-2, l, m)\n\n return Ylms", "def spher_harm_bas(Xc,Yc,Zc,X,Y,Z,Order):\n import math as mt\n from scipy.special import lpmv\n # Construct variables from axes; no meshgrid as of 6/4/14; no potential as of 6/12/14\n nx,ny,nz=X.shape[0],Y.shape[0],Z.shape[0]\n x,y,z = np.zeros((nx,ny,nz)),np.zeros((nx,ny,nz)),np.zeros((nx,ny,nz))\n for i in range(nx):\n for j in range(ny):\n for k in range(nz):\n x[i,j,k] = X[i]-Xc\n y[i,j,k] = Y[j]-Yc\n z[i,j,k] = Z[k]-Zc\n x,y,z=np.ravel(x,order='F'),np.ravel(y,order='F'),np.ravel(z,order='F') \n r,rt=np.sqrt(x*x+y*y+z*z),np.sqrt(x*x+y*y)\n # Normalize with geometric mean, 3/15/14 (most recently); makes error go down about order of magnitude\n rsort=np.sort(r)\n rmin=rsort[1] # first element is 0 \n rmax=rsort[len(r)-1] \n rnorm=np.sqrt(rmax*rmin)\n r=r/rnorm\n # Construct theta and phi\n theta,phi=np.zeros(len(r)),np.zeros(len(r))\n for i in range(len(z)): #Set theta and phi to be correct. 10/19/13\n theta[i] = mt.atan2(rt[i],z[i])\n phi[i] = mt.atan2(y[i],x[i])\n # Make the spherical harmonic matrix in sequence of [Y00,Y-11,Y01,Y11,Y-22,Y-12,Y02,Y12,Y22...]\n # In other words: Calculate the basis vectors of the sph. harm. expansion: \n Yj = np.zeros((nx*ny*nz,(Order+1)**2))\n fp = np.sqrt(1/(4*np.pi))\n Yj[:,0] = fp*np.sqrt(2)\n mc = 1\n for n in range(1,Order+1):\n for m in range(n+1):\n #ymn = np.sqrt((2*n+1)/(4*np.pi))*r**n*lpmv(m,n,np.cos(theta))\n ymn = r**n*lpmv(m,n,np.cos(theta))\n ymn = fp*ymn*np.sqrt((2*n+1))#/(4*np.pi))\n if m==0:\n #Yj[:,mc] = ymn\n Yj[:,mc+n] = ymn\n # better ordering means we need to change external mapping, as well as trap.configuration at start of expand_field\n else: # Nm is conversion factor to spherical harmonics, exclusing the sqrt(2*n+1/4pi) portion so that there is no coefficient to the m=0\n N1 = float(mt.factorial(n-m))\n N2 = float(mt.factorial(n+m))\n Nm = (-1)**m*np.sqrt(2*N1/N2) \n psin = Nm*ymn*np.sin(m*phi)\n pcos = Nm*ymn*np.cos(m*phi)\n #Yj[:,mc+1+2*(m-1)] = pcos\n #Yj[:,mc+2+2*(m-1)] = psin\n Yj[:,mc+n+m] = pcos\n Yj[:,mc+n-m] = psin\n mc += 2*n+1\n\n return Yj,rnorm", "def __init__(\n self, num_spherical, num_radial, cutoff=5.0, envelope_exponent=5\n ) -> None:\n super(SphericalBasisLayer, self).__init__()\n assert num_radial <= 64\n self.num_spherical = num_spherical\n self.num_radial = num_radial\n self.cutoff = cutoff\n self.envelope = Envelope(envelope_exponent)\n\n bessel_forms = bessel_basis(num_spherical, num_radial)\n sph_harm_forms = real_sph_harm(num_spherical)\n self.sph_funcs = []\n self.bessel_funcs = []\n\n x, theta = sym.symbols(\"x theta\")\n modules = {\"sin\": torch.sin, \"cos\": torch.cos}\n for i in range(num_spherical):\n if i == 0:\n sph1 = sym.lambdify([theta], sph_harm_forms[i][0], modules)(0)\n self.sph_funcs.append(lambda x: torch.zeros_like(x) + sph1)\n else:\n sph = sym.lambdify([theta], sph_harm_forms[i][0], modules)\n self.sph_funcs.append(sph)\n for j in range(num_radial):\n bessel = sym.lambdify([x], bessel_forms[i][j], modules)\n self.bessel_funcs.append(bessel)", "def val_sum(self, axis = None):\n f = self.to_Poly()\n return f.val_sum(axis).to_PolyMesh(self.params)", "def phase_modulation(rho, phase, channel):\n if phase == 0:\n return rho\n size = len(rho)\n rho_out = np.zeros((size,) * 4, dtype=complex)\n if channel == 1:\n for p1 in range(size):\n for p2 in range(size):\n for p1_ in range(size):\n for p2_ in range(size):\n rho_out[p1, p2, p1_, p2_] = rho[p1, p2, p1_, p2_] * np.exp(1j * phase * (p1 - p1_))\n elif channel == 2:\n for p1 in range(size):\n for p2 in range(size):\n for p1_ in range(size):\n for p2_ in range(size):\n rho_out[p1, p2, p1_, p2_] = rho[p1, p2, p1_, p2_] * np.exp(1j * phase * (p2 - p2_))\n else:\n raise ValueError('Incorrect configuration')\n return rho_out", "def _expand(self):\n cilm = SHExpandGLQ(self.data)\n coeffs = SHCoeffs.from_array(cilm, kind='GLQ')\n return coeffs", "def _apply_operations(self, values, axes_list, is_magnitude, unit, corr_unit):\n\n # Take magnitude before summing\n if is_magnitude and \"dB\" not in unit:\n values = np_abs(values)\n\n # Apply sums, means, etc\n for axis_requested in axes_list:\n # Get axis data\n ax_val = axis_requested.values\n extension = axis_requested.extension\n index = axis_requested.index\n if axis_requested.is_pattern:\n Nper, is_aper = None, None\n else:\n Nper, is_aper = self.axes[index].get_periodicity()\n if axis_requested.name in [\"time\", \"angle\", \"z\"]:\n is_phys = True\n is_freqs = False\n elif axis_requested.name in [\"freqs\", \"frequency\"]:\n is_phys = False\n is_freqs = True\n else:\n is_phys = False\n is_freqs = False\n if axis_requested.name in [\"freqs\", \"frequency\", \"wavenumber\"]:\n is_fft = True\n else:\n is_fft = False\n # max over max axes\n if extension in \"max\":\n values = nanmax(values, axis=index)\n # min over max axes\n elif extension in \"min\":\n values = nanmin(values, axis=index)\n # sum over sum axes\n elif extension in \"sum\":\n values = my_sum(\n values, index, Nper, is_aper, unit, is_fft, corr_unit=corr_unit\n )\n # root sum square over rss axes\n elif extension == \"rss\":\n values = root_sum_square(\n values,\n ax_val,\n index,\n Nper,\n is_aper,\n is_phys,\n unit,\n is_fft,\n corr_unit=corr_unit,\n )\n # mean value over mean axes\n elif extension == \"mean\":\n values = my_mean(values, ax_val, index, Nper, is_aper, is_phys, is_fft)\n # RMS over rms axes\n elif extension == \"rms\":\n values = root_mean_square(\n values, ax_val, index, Nper, is_aper, is_phys, is_fft\n )\n # integration over integration axes\n elif extension == \"integrate\":\n values = integrate(values, ax_val, index, Nper, is_aper, is_phys)\n # local integration over integration axes\n elif extension == \"integrate_local\":\n if axis_requested.name == \"z\":\n values, ax_val = integrate_local_pattern(values, ax_val, index)\n axis_requested.values = ax_val\n else:\n values = integrate_local(\n values, ax_val, index, Nper, is_aper, is_phys, is_freqs\n )\n # antiderivation over antiderivation axes\n elif extension == \"antiderivate\":\n values = antiderivate(\n values, ax_val, index, Nper, is_aper, is_phys, is_freqs\n )\n # derivation over derivation axes\n elif extension == \"derivate\":\n values = derivate(values, ax_val, index, Nper, is_aper, is_phys, is_freqs)\n\n if is_magnitude and \"dB\" in unit: # Correction for negative/small dB/dBA\n values[values < 2] = 0\n values = np_abs(values)\n\n return values", "def phaseEstimator(phases,omegas,T_s,k):\n length = phases.shape[0]\n pis = np.tile(2*np.pi,length)\n a = phases - T_s*k*omegas\n phaseShifts = np.mod(a,pis)\n b = phases-phaseShifts\n omega_hat = np.mod(b,pis)\n n = omega_hat/omegas\n estimatedTime = np.sum(n)/length\n \n estimatedPhase = phaseShifts + estimatedTime*omegas\n \n return estimatedPhase", "def sphere(indiv):\n return sum([ x ** 2 for x in indiv])", "def spherical_integral(C, rho):\n # phi - azimuthal angle (angle in xy-plane)\n # theta - polar angle (angle between z and xy-plane)\n # ( y , x )\n def func(theta, phi, C, rho): # Test function. Can I get 4*pi^2????\n x = sp.cos(phi) * sp.sin(theta)\n y = sp.sin(phi) * sp.sin(theta)\n z = sp.cos(theta)\n # dir = sp.array((x,y,z))\n # dc = dir_cosines(dir)\n dc = sp.array((x, y, z)) # Turns out these are direction cosines!\n Gamma = make_gamma(dc, C)\n rho_c_square = linalg.eigvals(Gamma).real # GPa\n rho_c_square = rho_c_square * 1e9 # Pa\n sound_vel = sp.sqrt(rho_c_square / rho) # m/s\n integrand = (\n 1 / (sound_vel[0] ** 3) + 1 / (sound_vel[1] ** 3) + 1 / (sound_vel[2] ** 3)\n )\n return integrand * sp.sin(theta)\n\n # ( y , x )\n # def sfunc(theta,phi,args=()):\n # return func(theta,phi,args)*sp.sin(theta)\n\n integral, error = dblquad(\n func, 0, 2 * sp.pi, lambda g: 0, lambda h: sp.pi, args=(C, rho)\n )\n return integral", "def do_amc(self, tensor: Tensor, defs, exts=None):\n\n substed = tensor.subst_all(defs)\n\n # Cache as locals for Spark serialization.\n tilde_range = self.tilde_range\n form_tilde = self.form_tilde\n m_range = self.m_range\n form_m = self.form_m\n\n def expand(dumm: Symbol):\n \"\"\"Expand a summation over quasi-particle orbitals.\"\"\"\n tilde = TildeOf(dumm)\n jtilde = JOf(tilde)\n return [\n (form_tilde(dumm), tilde_range, tilde),\n (form_m(dumm), m_range[-jtilde, jtilde + 1], MOf(dumm))\n ]\n\n return substed.expand_sums(\n self.qp_range, expand, exts=exts, conv_accs=[NOf, LOf, JOf, TOf]\n )", "def sum(self, axis=None):\n if axis is None:\n return numpy.ma.sum(self.data)\n\n new_data = numpy.ma.sum(self.data, axis=axis)\n remaining_axes = numpy.setdiff1d(range(self.ndim), axis)\n remaining_edges = [self.bset.edges[ax] for ax in remaining_axes]\n\n # This is kind of a hack that breaks good OO design, but is there\n # a better solution?\n if len(remaining_edges) == 2:\n return IntensityMap2D(new_data, (remaining_edges,))\n else:\n return IntensityMap(new_data, (remaining_edges,))", "def Hamiltonian(self):\n U = self.U.flatten()\n Vmat = sparse.spdiags([U], [0], len(U), len(U))\n Kmat = sparse.kron(-self.KEy * Schrodinger.D2mat(len(self.y), self.y[1] - self.y[0], self.periodic_y, self.qy),\n sparse.identity(len(self.x))) + \\\n sparse.kron(sparse.identity(len(self.y)),\n -self.KEx * Schrodinger.D2mat(len(self.x), self.x[1] - self.x[0], self.periodic_x, self.qx))\n return Kmat + Vmat", "def pack_coeffs(self):\n for dm, m in enumerate(self.simplesphere.local_m):\n m_coeffs = [t.coeffs[dm] for t in self.tensors]\n self.coeffs[dm] = np.hstack(m_coeffs)", "def sphere_sre(solution):\n a = 0\n bias = 0.2\n x = solution.get_x()\n x1 = x[:10]\n x2 = x[10:]\n value1 = sum([(i-bias)*(i-bias) for i in x1])\n value2 = 1/len(x) * sum([(i-bias)*(i-bias) for i in x2])\n return value1 + value2", "def sum_over_energy(self):\n # We sum over axis 0 in the array, and drop the energy binning in the\n # hpx object\n return HpxMap(np.sum(self.counts, axis=0), self.hpx.copy_and_drop_energy())", "def power_sum_mesh(self, PFC, mode='optical', scale2circ = True, verbose=False):\n# xyz = PFC.centers\n# R,Z,phi = tools.xyz2cyl(xyz[:,0],xyz[:,1],xyz[:,2])\n# deltaPhi = phi.max() - phi.min()\n deltaPhi = PFC.phiMax - PFC.phiMin\n if verbose==True:\n print('phiMin = {:f}'.format(PFC.phiMin))\n print('phiMax = {:f}'.format(PFC.phiMax))\n log.info('phiMin = {:f}'.format(PFC.phiMin))\n log.info('phiMax = {:f}'.format(PFC.phiMax))\n\n if mode=='gyro':\n #sum = np.sum(PFC.qGyro * PFC.areas )\n sum = np.sum(PFC.Pgyro)\n elif mode=='rad':\n sum = np.sum(PFC.Prad)\n else:\n sum = np.sum(PFC.qDiv * PFC.areas )\n\n if scale2circ == True:\n sum = sum * 2 * np.pi / deltaPhi\n\n return sum", "def kahan_sum(a, axis=0):\n s = np.zeros(a.shape[: axis] + a.shape[axis + 1:])\n c = np.zeros(s.shape)\n for i in range(a.shape[axis]):\n # https://stackoverflow.com/a/42817610/353337\n y = a[(slice(None),) * axis + (i,)] - c\n t = s + y\n c = (t - s) - y\n s = t.copy()\n return s", "def fit_harmonics(self, show=False, order=3, penalty=0.0):\n pts = self.surface_atoms.get_positions()\n com = np.mean(pts, axis=0)\n pts -= com\n r = np.sqrt(np.sum(pts**2, axis=1))\n theta = np.arccos(pts[:, 2]/r)\n phi = np.arctan2(pts[:, 1], pts[:, 0])\n data = np.zeros((len(phi), 3))\n data[:, 0] = phi\n data[:, 1] = theta\n data[:, 2] = r\n\n\n from cemc.tools import HarmonicsFit\n fit = HarmonicsFit(order=order)\n fit.fit(data, penalty=penalty)\n if show:\n fit.show()\n return fit", "def cosineSequences(self, index=None, ev=None, expand=False,\n factor=False, simplify=False):\n self._compute_cosineSequences(expand=expand, factor=factor,\n simplify=simplify)\n rewriteMatrix(self._.omega, expand=expand, factor=factor,\n simplify=simplify)\n if ev is not None:\n if not self._has(\"theta\"):\n self.eigenvalues(expand=expand, factor=factor,\n simplify=simplify)\n try:\n index = self._.theta.index(ev)\n except ValueError as ex:\n if index is None:\n raise ex\n if index is not None:\n return self._.omega[index]\n return Matrix(SR, self._.omega)", "def real_sph_harm(m, n, theta, phi):\n m = atleast_1d(m)\n # find where m is =,< or > 0 and broadcasts to the size of the output\n m_eq0,junk,junk,junk = broadcast_arrays(m == 0, n, theta, phi)\n m_gt0,junk,junk,junk = broadcast_arrays(m > 0, n, theta, phi)\n m_lt0,junk,junk,junk = broadcast_arrays(m < 0, n, theta, phi)\n\n sh = sph_harm(m, n, theta, phi)\n real_sh = empty(sh.shape, 'double')\n real_sh[m_eq0] = sh[m_eq0].real\n real_sh[m_gt0] = sh[m_gt0].real * sqrt(2)\n real_sh[m_lt0] = sh[m_lt0].imag * sqrt(2)\n return real_sh", "def square(self, n_coeffs, do_overlap_add=False):\n resampled_bands = [\n self._resample(band, n_coeffs)\n for band in self.iter_bands()]\n\n stacked = np.vstack(resampled_bands).T\n\n fdim = FrequencyDimension(self.scale)\n\n # TODO: This feels like it could be wrapped up nicely elsewhere\n chunk_frequency = Picoseconds(int(np.round(\n self.time_dimension.duration / Picoseconds(1) / n_coeffs)))\n\n td = TimeDimension(frequency=chunk_frequency)\n\n arr = ConstantRateTimeSeries(ArrayWithUnits(\n stacked.reshape(-1, n_coeffs, self.n_bands),\n dimensions=[self.time_dimension, td, fdim]))\n\n if not do_overlap_add:\n return arr\n\n # Begin the overlap add procedure\n overlap_ratio = self.time_dimension.overlap_ratio\n\n if overlap_ratio == 0:\n # no overlap add is necessary\n return ArrayWithUnits(stacked, [td, fdim])\n\n step_size_samples = int(n_coeffs * overlap_ratio)\n\n first_dim = int(np.round(\n (stacked.shape[0] * overlap_ratio) + (n_coeffs * overlap_ratio)))\n\n output = ArrayWithUnits(\n np.zeros((first_dim, self.n_bands)),\n dimensions=[td, fdim])\n\n for i, chunk in enumerate(arr):\n start = step_size_samples * i\n stop = start + n_coeffs\n output[start: stop] += chunk.reshape((-1, self.n_bands))\n\n return output", "def sum_sines(t, param_set):\n f = np.zeros(t.jyear.size)\n for params in param_set:\n f += sinefunc(t.jyear, *params)\n return f", "def add_computed_gas_concentrations(self):\n # Extract the z-coordinate and T, S, P profile\n zs = self.interp_ds.coords[self.ztsp[0]].values\n Ts = self.interp_ds[self.ztsp[1]].values\n Ss = self.interp_ds[self.ztsp[2]].values\n Ps = self.interp_ds[self.ztsp[3]].values\n \n # Create an air object\n air_names = ['nitrogen', 'oxygen', 'argon', 'carbon_dioxide']\n yk = np.array([0.78084, 0.20946, 0.009340, 0.00036])\n from tamoc import dbm\n air = dbm.FluidMixture(air_names)\n m = air.masses(yk)\n \n # Compute the concentrations adjusted for depth\n Cs = np.zeros((len(zs), len(air_names)))\n for i in range(len(zs)):\n Cs[i,:] = air.solubility(m, Ts[i], 101325., Ss[i])[0,:] * \\\n seawater.density(Ts[i], Ss[i], Ps[i]) / \\\n seawater.density(Ts[i], Ss[i], 101325.)\n \n # Make sure none of these gases are already in the measured profile\n for name in air_names:\n if name in self.interp_ds:\n air_names[air_names.index(name)] = 'computed_' + name\n \n # Add these data to the Profile object\n data = np.hstack((np.atleast_2d(zs).transpose(), Cs))\n names = [self.ztsp[0]] + air_names \n units = [self.ztsp_units[0]] + 4*['kg/m^3']\n self.append(data, names, units)\n \n # Rebuild the interpolator\n self._build_interpolator()", "def A_H(y,mask,S):\n y_zf = np.zeros(S.shape,dtype='complex128')\n y_zf[:,mask] = y\n x_zf = fft.fftshift(fft.ifft2(fft.ifftshift(y_zf)))*(mask.size)\n x_zf_sum = ( x_zf*(S.conj()) ).sum(axis=0)\n \n return x_zf_sum", "def compute_hamiltonian(num_atoms, atoms, bonds, flux_per_plaquette = 0):\n zero = numpy.zeros((num_atoms, num_atoms),\n dtype = numpy.complex64)\n h_poly = {}\n\n for (w, (i0, j0), (i1, j1)) in bonds:\n (phase0, atom0) = atoms[(i0, j0)]\n (phase1, atom1) = atoms[(i1, j1)]\n\n exp0 = phase1[0]-phase0[0]\n exp1 = phase1[1]-phase0[1]\n\n if not (exp0, exp1) in h_poly:\n h_poly[( exp0, exp1)] = numpy.copy(zero)\n h_poly[(-exp0, -exp1)] = numpy.copy(zero)\n\n # print \"a-a ((\",atom0, atom1,\"))\"\n # print (i0,j0),\"->\",(i1,j1)\n plaquettes = 0.5*(i1-i0)*(j0+j1)\n # print \"plaquettes=\",plaquettes\n mag_angle = flux_per_plaquette*plaquettes*units.electron_charge/units.hbar\n mag_phase = numpy.exp(1j*mag_angle)\n # print \"area=\",area\n # print \"magangle=\",mag_angle\n # print \"magphase\",w,mag_phase\n # print \"exp0,exp1=\",exp0,exp1\n\n h_poly[( exp0, exp1)][atom0, atom1] += w*mag_phase\n h_poly[(-exp0, -exp1)][atom1, atom0] += w/mag_phase\n # print h_poly\n return h_poly", "def sum(tensor, axis=None):\n raise NotImplementedError", "def phaseEstimator2(phases,omegas,T_s,k):\n \n \n length = phases.shape[0]\n pis = np.tile(2*np.pi,length)\n a = phases - k*omegas\n phaseShifts = np.mod(a,pis)\n\n averagedPhaseShift = np.sum(phaseShifts)/length\n \n estimatedPhase = np.mod(averagedPhaseShift + k*omegas,pis)\n #estimatedPhase = np.array([np.pi/2,np.pi/2,np.pi/2]) + k*omegas\n \n return estimatedPhase", "def sum(self, dim=None):\n if dim is None:\n x = self.flatten()\n else:\n x = self.transpose(0, dim)\n\n # Add all BinarySharedTensors\n while x.size(0) > 1:\n extra = None\n if x.size(0) % 2 == 1:\n extra = x[0]\n x = x[1:]\n x0 = x[: (x.size(0) // 2)]\n x1 = x[(x.size(0) // 2) :]\n x = x0 + x1\n if extra is not None:\n x.share = torch_cat([x.share, extra.share.unsqueeze(0)])\n\n if dim is None:\n x = x.squeeze()\n else:\n x = x.transpose(0, dim).squeeze(dim)\n return x", "def _append_phase(self, k, i):\n if not 0 <= i < self.num_qubits:\n raise QiskitError(\"phase qubit out of bounds.\")\n # If the kth bit is flipped, conjugate this gate\n if self.shift[i] == 1:\n k = (7 * k) % 8\n # Take all subsets \\alpha of the support of row i\n # of weight up to 3 and add k*(-2)**(|\\alpha| - 1) mod 8\n # to the corresponding term.\n support = np.arange(self.num_qubits)[np.nonzero(self.linear[i])]\n subsets_2 = itertools.combinations(support, 2)\n subsets_3 = itertools.combinations(support, 3)\n for j in support:\n value = self.poly.get_term([j])\n self.poly.set_term([j], (value + k) % 8)\n for j in subsets_2:\n value = self.poly.get_term(list(j))\n self.poly.set_term(list(j), (value + -2 * k) % 8)\n for j in subsets_3:\n value = self.poly.get_term(list(j))\n self.poly.set_term(list(j), (value + 4 * k) % 8)", "def __add__( self, other ) :\n\n try :\n other = float( other )\n c_ls = self.copy( )\n for l, c_l in enumerate( c_ls ) : c_ls.coefficients[l] += other\n except :\n self.checkSameSeriesType( other )\n c_l1, c_l2 = self, other\n if( len( self ) < len( other ) ) : c_l1, c_l2 = other, self\n c_ls = c_l1.copy( )\n for l, c_l in enumerate( c_l2 ) : c_ls.coefficients[l] += c_l\n return( c_ls )", "def _sph_harm(m, n, theta, phi):\n return sph_harm(m, n, phi, theta)", "def SPHarm(self, l, m):\n\n memSPHarCoef = memoizedSPHarmCoefficients()\n SPHarmCoefficients = memSPHarCoef.SPHarmCoefficients\n\n def mycoeffs(l0, m0, s0, l1, m1, s1):\n return SPHarmCoefficients(l, m, l0, m0, s0, l1, m1, s1)\n mat = matFromCoeffs(mycoeffs, self.eigenspinors)\n\n return mat", "def cosh(self):\r\n getcontext().prec += 2\r\n re = cosh(self._real) * cos(self._imag)\r\n im = sinh(self._real) * sin(self._imag)\r\n ans = self.__class__(re, im)\r\n getcontext().prec -= 2\r\n return +ans", "def update(self, *args):\n return _osgAnimation.QuatSphericalLinearChannel_update(self, *args)", "def get_spectrum(self, shcoeffs, nwins):\n for itaper in range(nwins):\n tapercoeffs = self._coeffs(itaper)\n modelcoeffs = shcoeffs.get_coeffs(normalization='4pi',kind='real')\n coeffs = SHMultiply(tapercoeffs,modelcoeffs)", "def _linear_dispersion_sum(k, j, d, beta, tau):\n # k, j, d , beta = columns -> convert to Series -> broadcasted\n # tau = Matrix as Frame\n calc_column = (k + j * d) * np.sqrt(beta)\n if isinstance(calc_column, pd.DataFrame):\n calc_column = calc_column.squeeze()\n return np.cos(2 * np.pi * tau).mul(calc_column, axis='index').sum(axis='index')", "def cosh(obj):\n\tif isinstance(obj, Variable):\n\t\tval = np.cosh(obj.val)\n\t\tder = np.sinh(obj.val)\n\t\tif len(obj.der.shape)>len(der.shape):\n\t\t\tder = np.expand_dims(der,1)\n\t\tder = np.multiply(der, obj.der)\n\t\treturn Variable(val, der)\n\telse:\n\t\treturn np.cosh(obj)", "def sinc(\n self,\n width=None,\n mfreq=None,\n chromaticity=None,\n dtype=None,\n power=True,\n ):\n widths, dtype = self._process_args(width, mfreq, chromaticity, dtype)\n response = np.sinc(0.5 * self.xs / np.sin(widths))\n if power:\n response = response ** 2\n return response.astype(dtype)", "def _integrate_with_spk_embed(self, hs, spembs):\n if self.spk_embed_integration_type == \"add\":\n # apply projection and then add to hidden states\n spembs = self.projection(F.normalize(spembs))\n hs = hs + spembs.unsqueeze(1)\n elif self.spk_embed_integration_type == \"concat\":\n # concat hidden states with spk embeds and then apply projection\n spembs = F.normalize(spembs).unsqueeze(1).expand(-1, hs.size(1), -1)\n hs = self.projection(torch.cat([hs, spembs], dim=-1))\n else:\n raise NotImplementedError(\"support only add or concat.\")\n\n return hs", "def TM_fluid(layer, kx, om):\n\n h = layer.d\n rho = layer.medium.rho\n K = layer.medium.K\n k = om*np.sqrt(rho/K)\n ky = np.sqrt(k**2-kx**2)\n T = np.zeros((2, 2), dtype=complex)\n T[0, 0] = np.cos(ky*h)\n T[1, 0] = (om**2*rho/ky)*np.sin(ky*h)\n T[0, 1] = -(ky/(om**2*rho))*np.sin(ky*h)\n T[1, 1] = np.cos(ky*h)\n return T", "def spherical_parameters(self):\n phi_mu_list = []\n theta_mu_list = []\n \n for mu in self.mu_list:\n r, phi, theta = T_cartesian_to_spherical(x=mu[0], y=mu[1], z=mu[2])\n phi_mu_list.append(phi)\n theta_mu_list.append(theta)\n \n return phi_mu_list, theta_mu_list", "def phase(N, phi0=0, *, dtype=None):\n dtype = dtype or settings.core[\"default_dtype\"] or _data.Dense\n phim = phi0 + (2 * np.pi * np.arange(N)) / N # discrete phase angles\n n = np.arange(N)[:, np.newaxis]\n states = np.array([np.sqrt(kk) / np.sqrt(N) * np.exp(1j * n * kk)\n for kk in phim])\n ops = np.sum([np.outer(st, st.conj()) for st in states], axis=0)\n return Qobj(ops, dims=[[N], [N]], type='oper', copy=False).to(dtype)", "def euclid_ccl(Omega_M):\n \n # Parameters from https://arxiv.org/pdf/1903.01473.pdf\n Omega_b_fraction = 0.15653724 # fraction of Omega_M\n \n sigma8 = 0.811\n Omega_b = Omega_b_fraction * Omega_M\n Omega_c = (1 - Omega_b_fraction) * Omega_M \n h = 0.674\n ns = 0.965\n w0 = -1.03\n\n cosmo_fid = ccl.Cosmology(Omega_c=Omega_c, Omega_b=Omega_b, h=0.674\n , sigma8=sigma8, n_s=ns, w0=w0)#, transfer_function='emulator', matter_power_spectrum='emu')\n\n dNdzs = np.zeros((nbins, z.size))\n shears = []\n \n for i in range(nbins):\n # edges of 1 equal width redshift bins, between 0 and 2\n zmin, zmax = i*(2./nbins), (i+1)*(2./nbins)\n # generate dNdz per bin\n dNdzs[i,:] = ccl.dNdz_tomog(z=z, zmin=zmin, zmax=zmax, pz_func=pz\n , dNdz_func = dNdz_true)\n # calculate the shear per bin\n gal_shapes = ccl.WeakLensingTracer(cosmo_fid, dndz=(z, dNdzs[i,:]))\n shears.append(gal_shapes)\n \n # calculate nbin*(nbin+1)/2 = 1 spectra from the shears\n Cls = []\n for i in range(nbins):\n for j in range(0,i+1):\n Cls.append(ccl.angular_cl(cosmo_fid, shears[i], shears[j], ells))\n \n return np.array(Cls), dNdzs", "def _apply_array_spatial12_halffilling(self, h1e: 'Nparray',\n h2e: 'Nparray') -> 'Nparray':\n if fqe.settings.use_accelerated_code:\n return self._apply_array_spatial12_lm(h1e, h2e)\n else:\n h1e = copy.deepcopy(h1e)\n h2e = numpy.moveaxis(copy.deepcopy(h2e), 1, 2) * (-1.0)\n norb = self.norb()\n for k in range(norb):\n h1e[:, :] -= h2e[:, k, k, :]\n\n if numpy.iscomplex(h1e).any() or numpy.iscomplex(h2e).any():\n dvec = self.calculate_dvec_spatial()\n out = numpy.einsum(\"ij,ijkl->kl\", h1e, dvec)\n dvec = numpy.einsum(\"ijkl,klmn->ijmn\", h2e, dvec)\n out += self._calculate_coeff_spatial_with_dvec(dvec)\n else:\n nij = norb * (norb + 1) // 2\n h1ec = numpy.zeros((nij), dtype=self._dtype)\n h2ec = numpy.zeros((nij, nij), dtype=self._dtype)\n for i in range(norb):\n for j in range(i + 1):\n ijn = j + i * (i + 1) // 2\n h1ec[ijn] = h1e[i, j]\n for k in range(norb):\n for l in range(k + 1):\n kln = l + k * (k + 1) // 2\n h2ec[ijn, kln] = h2e[i, j, k, l]\n dvec = self._calculate_dvec_spatial_compressed()\n out = numpy.einsum(\"i,ikl->kl\", h1ec, dvec)\n dvec = numpy.einsum(\"ik,kmn->imn\", h2ec, dvec)\n for i in range(self.norb()):\n for j in range(self.norb()):\n ijn = min(i, j) + max(i, j) * (max(i, j) + 1) // 2\n work = self._core.alpha_map(j, i)\n for source, target, parity in work:\n out[source, :] += dvec[ijn, target, :] * parity\n work = self._core.beta_map(j, i)\n for source, target, parity in work:\n out[:, source] += dvec[ijn, :, target] * parity\n\n return out", "def bare_hamiltonian(self):\n bare_hamiltonian = 0\n for subsys in self:\n evals = subsys.eigenvals(evals_count=subsys.truncated_dim)\n bare_hamiltonian += self.diag_hamiltonian(subsys, evals)\n return bare_hamiltonian", "def core_hamiltonian(basis, labels, coords):\n t = kinetic(basis=basis, labels=labels, coords=coords)\n v = nuclear(basis=basis, labels=labels, coords=coords)\n return numpy.ascontiguousarray(t + v)", "def core_func(phi_by_term, out_by_phase, solver, cast_mode='', **kwargs):\n\n kernels_vec = dict()\n _phi_by_term = _cast_complex2real(phi_by_term, cast_mode)\n _out_by_phase = out_by_phase.copy()\n\n for n in range(N, 0, -1):\n current_phi = _phi_by_term[(n, 0)]\n current_phase_sig = _complex2real(_out_by_phase[n],\n cast_mode=cast_mode)\n\n if n == 2:\n current_phi = np.concatenate(\n (current_phi, 2 * np.real(_phi_by_term[(2, 1)])), axis=0)\n current_phase_sig = np.concatenate(\n (current_phase_sig, np.real(_out_by_phase[0])), axis=0)\n\n kernels_vec[n] = _solver(current_phi, current_phase_sig, solver)\n\n for k in range(1, 1+n//2):\n p = n - 2*k\n _out_by_phase[p] -= binomial(n, k) * \\\n np.dot(phi_by_term[(n, k)], kernels_vec[n])\n return kernels_vec", "def cosh(self):\t\t\n\t\tval = np.cosh(self.val)\n\t\tif len(self.der.shape):\n\t\t\tto_multiply = np.sinh(self.val)\n\t\t\tto_multiply = np.expand_dims(to_multiply, 1) if len(self.der.shape) > len(to_multiply.shape) else to_multiply\n\t\t\tder = to_multiply * self.der\n\t\telse:\n\t\t\tder = None\n\t\treturn Var(val, der)", "def rowsums (self):\n return self.values.sum (axis=0)", "def sph(vs, ucs, A, C, x, y, z, rotx=0, roty=0, rotz=0, mode=\"C\"):\n sph = []\n\n if mode == \"A\":\n nA = (A-0.07)/(0.12-0.07)\n nC = (C+0.1)/(0.31+0.1)\n elif mode == \"C\":\n nA = (A-0.07)/(0.12-0.07)\n nC = C/0.307\n elif mode == \"AC\":\n nA = (A-0.07)/(0.12-0.07)\n nC = (C+0.1)/(0.31+0.1)\n else:\n nA = (A-0.07)/(0.12-0.07)\n nC = C/0.307\n\n if (type(nA) is np.float64):\n nA = np.full(len(vs), nA)\n if (type(nC) is np.float64):\n nC = np.full(len(vs), nC)\n\n for v, uc, a, c in zip(vs, ucs, nA, nC):\n if mode == \"A\":\n H0 = a\n L0 = 1/(1+np.exp((-2.8*((a-0.52)*2.4))))\n # R0, G0, B0 = colorsys.hls_to_rgb(0.02+H0*0.35, 0.5-L0*0.4, 1.0)\n R0, G0, B0 = colorsys.hls_to_rgb(1.0-H0*0.40, 0.5-L0*0.4, 1.0)\n\n elif mode == \"C\":\n H0 = c\n L0 = 1/(1+np.exp((-2.8*((c-0.52)*2.4))))\n R0, G0, B0 = colorsys.hls_to_rgb(0.02+H0*0.35, 0.5-L0*0.4, 1.0)\n\n elif mode == \"AC\":\n R0 = a*1.0\n # G0 = max(0.8-(max(a+c, 0)), 0)\n G0 = 0.0\n B0 = c*1.0\n\n else:\n R0 = 0.3\n G0 = 0.2\n B0 = 0.2\n\n R1 = 1.0 - R0\n G1 = 1.0 - G0\n B1 = 1.0 - B0\n\n sph.append(Sphere(v, 0.022,\n Texture(Pigment('color',\n [R0+uc*R1, G0+uc*G1, B0+uc*B1]),\n Finish('phong', 0.7,\n 'specular', 0.2,\n 'diffuse', 0.9,\n 'ambient', 0.1)),\n 'rotate', [rotx, 0, 0],\n 'rotate', [0, roty, 0],\n 'rotate', [0, 0, rotz],\n 'translate', [x, y, z],\n 'no_shadow'))\n\n return sph", "def aspheresurface(self):\n\t\tR = self.coefficients[0]\n\t\ttheta = np.linspace(0, 2*np.pi, 100)\n\t\trho = np.linspace(0, R, 100)\n\t\t[u,r] = np.meshgrid(theta,rho)\n\t\tX = r*cos(u)\n\t\tY = r*sin(u)\n\t\tZ = aspherepolar(self.coefficients,r)\n\t\tfig = plt.figure(figsize=(12, 8), dpi=80)\n\t\tax = fig.gca(projection='3d')\n\t\tsurf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.RdYlGn,\n\t linewidth=0, antialiased=False, alpha = 0.6)\n\t\tplt.show()\n\t\treturn 0", "def aks_mod( polynomial , r ):\n aks_mod = polynomial.coefficients\n total = aks_mod[ : r ]\n aks_mod = aks_mod[ r : ]\n while len(aks_mod) - 1 >= r :\n for i in range(r):\n total[i] += aks_mod[i]\n aks_mod = aks_mod[ r : ]\n for i in range(len(aks_mod)):\n total[i] += aks_mod[i]\n return array_poly_mod( total , polynomial.mod )", "def getRFFieldMap(self, phase=0):\n # TODO: we might want to see something else for a multi-cell cavity\n return np.sum([norm_E(self.z_array) * self.rf_peak_field * 1e6 * np.cos(pho + phase)\n for norm_E, pho in zip(self.norm_E, self.phase_offset)], 0)", "def _multi_term_cos(coef, power, multiplier, rho, theta):\n \n power2 = (coef + 1.) * -1.\n return ((coef * np.power(rho, power2) + (power * np.power(rho, power))) * np.cos(multiplier * theta))", "def _encircled_energy_core(mtf_data, radius, nu_p, dx, dy):\n integration_fourier = special.j1(2 * np.pi * radius * nu_p) / nu_p\n dat = mtf_data * integration_fourier\n return radius * dat.sum() * dx * dy", "def odf(self, sphere):\n raise NotImplementedError(\"To be implemented in sub classes\")", "def sum(self, axis=None, keepdims=False):\n return F.Sum.apply(self, axis, keepdims)", "def odf(self, sphere):\n self.gqi_vector = self.model.cache_get('gqi_vector', key=sphere)\n if self.gqi_vector is None:\n if self.model.method == 'gqi2':\n H = squared_radial_component\n # print self.gqi_vector.shape\n self.gqi_vector = np.real(H(np.dot(\n self.model.b_vector, sphere.vertices.T) *\n self.model.Lambda))\n if self.model.method == 'standard':\n self.gqi_vector = np.real(np.sinc(np.dot(\n self.model.b_vector, sphere.vertices.T) *\n self.model.Lambda / np.pi))\n self.model.cache_set('gqi_vector', sphere, self.gqi_vector)\n\n return np.dot(self.data, self.gqi_vector)", "def apply_cos_inplace(self, time: float, ncoeff: complex, opa: List[int],\n oha: List[int], opb: List[int],\n ohb: List[int]) -> None:\n absol = numpy.absolute(ncoeff)\n factor = numpy.cos(time * absol)\n\n amap, bmap = self._evaluate_map(opa, oha, opb, ohb)\n\n if amap.size != 0 and bmap.size != 0:\n xi, yi = numpy.meshgrid(amap, bmap, indexing='ij')\n if fqe.settings.use_accelerated_code:\n _sparse_scale(xi, yi, factor, self.coeff)\n else:\n self.coeff[xi, yi] *= factor", "def combine_atoms(\n mols, clash_param=1.5, min_cluster_size=2, smooth_param=0.5, smooth_grad=0.1\n):\n rdkit_atom = RDKitAtom()\n molecule_atoms = []\n for i, mol in enumerate(mols):\n atoms = rdkit_atom.generate_atoms_for_mol(mol)\n molecule_atoms.append([i, mol, atoms])", "def spherical_harmonics_vec(th,ph, lmax):\n Y = []\n lm = []\n n = lmax*(lmax+2)+1\n sph_it = spherical_harmonics_it(th,ph)\n for i in range(0, n):\n Ylm,l,m = sph_it.next()\n Y.append(Ylm)\n lm.append((l,m))\n assert l == lmax\n assert m == -lmax\n\n return Y, lm", "def sum_over_m(alm, Ylm, ls,\n lmax=100, lmin=0, mapsize=3072):\n ls_array = np.arange(lmin, lmax+1)\n result = np.zeros((len(ls_array), mapsize), dtype=complex)\n \n for i in np.arange(len(ls_array)):\n for j in np.arange(len(ls)):\n if ls[j] == ls_array[i]:\n for k in np.arange(mapsize):\n result[i,k] += alm[j] * Ylm[j,k] / ( 2.*ls_array[i] + 1. )\n\n # multiply by 2 because alm's in healpy only contain m>=0 terms\n return 2.*result.real", "def spherical2cylindrical(sph):\n cyl = np.zeros(sph.shape)\n cyl[:, 0] = sph[:, 0] * np.sin(sph[:, 2])\n cyl[:, 1] = sph[:, 1]\n cyl[:, 2] = sph[:, 0] * np.cos(sph[:, 2])\n return cyl", "def kuramoto_ODE_jac(self, t, y, arg):\n\n w, k = arg\n yt = y[:,None]\n dy = y-yt\n\n phase = [m*k[m-1]*np.cos(m*dy) for m in range(1,1+self.m_order)]\n phase = np.sum(phase, axis=0)\n\n for i in range(self.n_osc):\n phase[i,i] = -np.sum(phase[:,i])\n\n return phase", "def sum_of_e_field(r,V,X,Y,Z,exact_saddle=True):\n from project_parameters import debug\n x0,y0,z0=r[0],r[1],r[2]\n #from all_functions import spher_harm_exp\n basis,rnorm = spher_harm_bas(x0,y0,z0,X,Y,Z,3)\n c=spher_harm_exp(V,basis,rnorm) #Update these variables by abstraction.\n if debug.soef:\n print('Checking saddle: ({0},{1},{2})'.format(x0,y0,z0))\n s=c**2\n f=sum(s[1:4])/sum(s[4:9])\n real_f=np.real(f[0])\n if debug.soef:\n print('Optimization: {}'.format(real_f))\n return real_f", "def harmonic_oscillators(N=10, omega=0.1, alpha=0.2, gamma=0.05, dt=1.0):\n\n # Miscellaneous imports.\n from scipy.sparse import diags, identity, bmat, block_diag\n\n # Build the stiffness matrix.\n K_ii = np.ones((N,)) * (omega**2 / 2.0 + alpha) # Self-coupling.\n K_ij = np.ones((N - 1,)) * (-alpha / 2.0) # Nearest-neighbor coupling.\n K = diags(\n [K_ij, K_ii, K_ij], offsets=[-1, 0, 1]\n ) # Assembles the stiffness matrix.\n\n # Build the friction matrix.\n G = gamma * identity(N)\n\n # Build the dynamic matrix.\n A = bmat([[None, identity(N)], [-K, -G]])\n\n # Build the control matrix.\n B = bmat([[0 * identity(N)], [identity(N)]])\n\n # Build the observation matrix.\n C = identity(2 * N)\n\n # Build the feedthrough matrix.\n D = bmat([[0 * identity(N)], [0 * identity(N)]])\n\n # SciPy continuous-time LTI object.\n sys = lti(A.toarray(), B.toarray(), C.toarray(), D.toarray())\n\n # Return the discrete-time equivalent.\n return sys.to_discrete(dt)", "def AddEtaSeries(c_vals, times):\n\treturn [EtaSin(time, c_vals) for time in times]", "def hamiltonian(self):\n hamiltonian = self.bare_hamiltonian()\n for interaction_term in self.interaction_list:\n hamiltonian += interaction_term.hamiltonian()\n return hamiltonian", "def M31Accel(self,r):\n\n ### Call the previous functions for the halo, bulge and disk\n halo_acc = self.HernquistAccel(self.Mhalo,self.rhalo,r) #Calculating halo acceleration\n bulge_acc = self.HernquistAccel(self.Mbulge,self.rbulge,r) #\"\"\" bulge \"\"\"\n disk_acc = self.MiyamotoNagaiAccel(self.Mdisk,self.rdisk,r)#\"\"\" disk \"\"\"\n sum1 = np.add(halo_acc,bulge_acc) #summing halo and bulge first\n acc_sum = np.add(sum1,disk_acc) #then adding disk acceleration\n\n return acc_sum", "def sphere(self, x):\r\n # return np.random.rand(1)[0]**0 * sum(x**2) + 1 * np.random.rand(1)[0]\r\n return sum((x+0)**2)", "def phase(self):\r\n\r\n #XXX calcluate this from the standard output, instead of recalculating:\r\n\r\n tseries_length = self.input.data.shape[0]\r\n spectrum_length = self.spectrum.shape[-1]\r\n\r\n phase = np.zeros((tseries_length,\r\n tseries_length,\r\n spectrum_length))\r\n\r\n for i in range(tseries_length):\r\n for j in range(i, tseries_length):\r\n phase[i][j] = np.angle(\r\n self.spectrum[i][j])\r\n\r\n phase[j][i] = np.angle(\r\n self.spectrum[i][j].conjugate())\r\n return phase", "def get_totals(data, cols):\n\n spots = [len(data[data.Phase == p]) for p in pd.unique(data.Phase)]\n j = 0\n added_rows = 0\n for i in range(len(spots)):\n spots[i] += j + added_rows\n j = spots[i]\n added_rows = 1\n spots = [0] + spots\n\n end = len(cols) - 1\n\n final = pd.DataFrame(columns = data.columns)\n blank = pd.DataFrame({c:'' for c in data.columns}, index = [-1])\n\n for ind, p in enumerate(pd.unique(data.Phase)):\n plu = 4 if ind else 3\n section = data.loc[data.Phase == p]\n sums = blank.copy()\n\n sums.loc[-1, 'Deleted'] = 'Total'\n\n for u in data.columns:\n if '#' in u:\n lett = alpha[list(data.columns).index(u)]\n if 'CO' not in u:\n sums.loc[-1, u] = '=SUMIF(' + lett + str(spots[ind] + plu) + ':' + lett + str(spots[ind + 1] + 2) + ',\">0\")'\n else:\n sums.loc[-1, u] = '=SUM(' + lett + str(spots[ind] + plu) + ':' + lett + str(spots[ind + 1] + 2) + ')'\n if 'Unit_Total' in cols:\n sums.loc[-1, 'M/M_Total'] = '=SUM(' + alpha[end -2] + str(spots[ind] + plu) + ':' + alpha[end -2] + str(spots[ind + 1] + 2) + ')'\n sums.loc[-1, 'Unit_Total'] = '=SUM(' + alpha[end -1] + str(spots[ind] + plu) + ':' + alpha[end -1] + str(spots[ind + 1] + 2) + ')'\n sums.loc[-1, 'Line_Total'] = '=SUM(' + alpha[end] + str(spots[ind] + plu) + ':' + alpha[end] + str(spots[ind + 1] + 2) + ')'\n\n section = pd.concat([section, sums])\n final = pd.concat([final, section], ignore_index = True)\n\n final = final[cols]\n\n spots = [t + 1 for t in spots[1:]]\n\n return final, spots", "def harmonic_centrality(G, nbunch=None, distance=None, sources=None):\n\n nbunch = set(G.nbunch_iter(nbunch)) if nbunch is not None else set(G.nodes)\n sources = set(G.nbunch_iter(sources)) if sources is not None else G.nodes\n\n spl = partial(nx.shortest_path_length, G, weight=distance)\n centrality = {u: 0 for u in nbunch}\n for v in sources:\n dist = spl(v)\n for u in nbunch.intersection(dist):\n d = dist[u]\n if d == 0: # handle u == v and edges with 0 weight\n continue\n centrality[u] += 1 / d\n\n return centrality", "def Hamiltonian(self):\n Vmat = sparse.spdiags([self.U], [0], len(self.U), len(self.U))\n Kmat = -self.KE * Schrodinger.D2mat(numpts=len(self.x), delta=self.x[1] - self.x[0], periodic=self.periodic,\n q=self.q)\n return Kmat + Vmat", "def filter_wrapped_phase(image, k):\n ny, nx = image.shape\n assert(ny == nx) ## assert a square image for simplicity\n if (k%2 == 0):\n print(\"k has to be an integer!\")\n return\n N = nx\n i, j = np.arange(N), np.arange(N)\n ii, jj = np.meshgrid(i, j)\n filt_psi = np.zeros((N,N))\n\n inside = (jj[k/2:N-(k/2), k/2:N-(k/2)].flatten(), ii[k/2:N-(k/2), k/2:N-(k/2)].flatten())\n krange = np.linspace(-1 * (k/2), (k/2), k, dtype = 'int64') ## amount of added spaces, if k = 5, it ranges from -2 to 2\n krange_tile = np.tile(krange * N, (k, 1)).T ## tile them to make a (k/2)**2 matrix, containing for instance -2N, -N, 0, N, 2N for k=5\n k_tile = np.tile(krange, (k, 1)) ## tile to add to krange_tile\n coords_add = (krange_tile + k_tile).flatten() ## all coordinates, in a (k/2)**2 matrix, from -2N - 2: -2N + 2, -N-2 : -N+2 , -2 : 2, N -2 : N +2, 2N -2 : 2N +2\n inside = np.ravel_multi_index(inside, (N, N))\n coords_add = np.tile(coords_add, (len(inside), 1)) ## stack all differences to add to inside\n inside_tile = np.tile(inside, (coords_add.shape[1],1)).T ## stack all inside to add to differences\n all_coords = inside_tile + coords_add### a matrix of len(inside) x (k/2)**2 with all coordinates in a k x k square around a certain coordinate\n unrav_coords = np.unravel_index(all_coords, (N, N)) ## unraveled coordinates of all coordinates\n sum_sin_psi = np.sum(np.sin(image[unrav_coords]), axis = 1) ## sum over a sin (psi) over a k x k square\n sum_cos_psi = np.sum(np.cos(image[unrav_coords]), axis = 1) ## sum over a cos (psi) over a k x k square\n psi_app = np.arctan2(sum_sin_psi, sum_cos_psi)\n filt_psi[np.unravel_index(inside, (N, N))] = psi_app \n\n #### top layers\n for i in range(k/2):\n ## for indices directly above the \"inside square\"\n top = (jj[i, k/2:N-(k/2)].flatten(), ii[i, k/2: N - (k/2)].flatten())\n coords_add = (krange_tile + k_tile)[(k/2)-i:, :].flatten()\n top = np.ravel_multi_index(top, (N, N))\n coords_add = np.tile(coords_add, (len(top), 1))\n top_tile = np.tile(top, (coords_add.shape[1],1)).T\n top_coords = top_tile + coords_add\n unrav_coords = np.unravel_index(top_coords, (N, N))\n sum_sin_top = np.sum(np.sin(image[unrav_coords]), axis = 1)\n sum_cos_top = np.sum(np.cos(image[unrav_coords]), axis = 1)\n psi_top = np.arctan2(sum_sin_top, sum_cos_top)\n filt_psi[np.unravel_index(top, (N, N))] = psi_top\n\n ## indices directly below the \"inside square\"\n bot = (jj[N- 1 - i, k/2:N-(k/2)].flatten(), ii[N-1-i, k/2: N - (k/2)].flatten()) ## starting at the bottom working inwards\n coords_add = (krange_tile + k_tile)[:(k/2) + 1 + i, :].flatten()\n bot = np.ravel_multi_index(bot, (N, N))\n coords_add = np.tile(coords_add, (len(top), 1))\n bot_tile = np.tile(bot, (coords_add.shape[1],1)).T\n bot_coords = bot_tile + coords_add\n unrav_coords = np.unravel_index(bot_coords, (N, N))\n sum_sin_bot = np.sum(np.sin(image[unrav_coords]), axis = 1)\n sum_cos_bot = np.sum(np.cos(image[unrav_coords]), axis = 1)\n psi_bot = np.arctan2(sum_sin_bot, sum_cos_bot)\n filt_psi[np.unravel_index(bot, (N, N))] = psi_bot\n\n ## indices directly left of the \"inside square\"\n left = (jj[k/2:N-(k/2), i].flatten(), ii[k/2:N-(k/2), i].flatten()) ## starting at the bottom working inwards\n coords_add = (krange_tile + k_tile)[:, (k/2)-i:].flatten()\n left = np.ravel_multi_index(left, (N, N))\n coords_add = np.tile(coords_add, (len(left), 1))\n left_tile = np.tile(left, (coords_add.shape[1],1)).T\n left_coords = left_tile + coords_add\n unrav_coords = np.unravel_index(left_coords, (N, N))\n sum_sin_left = np.sum(np.sin(image[unrav_coords]), axis = 1)\n sum_cos_left = np.sum(np.cos(image[unrav_coords]), axis = 1)\n psi_left = np.arctan2(sum_sin_left, sum_cos_left)\n filt_psi[np.unravel_index(left, (N, N))] = psi_left\n\n ## indices directly left of the \"inside square\"\n right = (jj[k/2:N-(k/2), N - 1 - i].flatten(), ii[k/2:N-(k/2), N - 1 - i].flatten()) ## starting at the bottom working inwards\n coords_add = (krange_tile + k_tile)[:, :(k/2)+1+i].flatten()\n right = np.ravel_multi_index(right, (N, N))\n coords_add = np.tile(coords_add, (len(right), 1))\n right_tile = np.tile(right, (coords_add.shape[1],1)).T\n right_coords = right_tile + coords_add\n unrav_coords = np.unravel_index(right_coords, (N, N))\n sum_sin_right = np.sum(np.sin(image[unrav_coords]), axis = 1)\n sum_cos_right = np.sum(np.cos(image[unrav_coords]), axis = 1)\n psi_right = np.arctan2(sum_sin_right, sum_cos_right)\n filt_psi[np.unravel_index(right, (N, N))] = psi_right\n \n ## calculate boundaries diagonals\n left_t, right_t, left_b, right_b = (i, i), (i, -1 -i), (-1 - i, i), (-1 - i, -1 - i) \n left_t, right_t, left_b, right_b = (jj[left_t], ii[left_t]), (jj[right_t], ii[right_t]), (jj[left_b], ii[left_b]), (jj[right_b], ii[right_b])\n left_t, right_t, left_b, right_b = np.ravel_multi_index(left_t, (N, N)), np.ravel_multi_index(right_t, (N, N)), np.ravel_multi_index(left_b, (N, N)), np.ravel_multi_index(right_b, (N, N))\n coord_mat = krange_tile + k_tile\n coords_add_lt, coords_add_rt, coords_add_lb, coords_add_rb = coord_mat[(k/2)-i:, (k/2)-i:].flatten(), coord_mat[(k/2)-i:, :(k/2)+1+i].flatten(), coord_mat[:(k/2)+i+1, (k/2)-i:].flatten(), coord_mat[:(k/2)+i+1, :(k/2)+i+1].flatten()\n coords_add_tot = np.vstack((coords_add_lt, coords_add_rt, coords_add_lb, coords_add_rb))\n lt_tile, rt_tile, lb_tile, rb_tile = np.tile(left_t, (coords_add_lt.shape[0],1)).T, np.tile(right_t, (coords_add_lt.shape[0],1)).T, np.tile(left_b, (coords_add_lt.shape[0],1)).T, np.tile(right_b, (coords_add_lt.shape[0],1)).T\n coords_tile_tot = np.squeeze(np.stack((lt_tile, rt_tile, lb_tile, rb_tile)))\n coords_tot = coords_add_tot + coords_tile_tot\n unrav_coords = np.unravel_index(coords_tot, (N, N))\n sum_sin_diag = np.sum(np.sin(image[unrav_coords]), axis = 1)\n sum_cos_diag = np.sum(np.cos(image[unrav_coords]), axis = 1)\n psi_diag = np.arctan(sum_sin_diag, sum_cos_diag)\n filt_psi[np.unravel_index(np.stack((left_t, right_t, left_b, right_b)), (N, N))] = psi_diag\n\n return filt_psi", "def sum(self, axis=None, keepdims=False, dtype=None, out=None):\n return np.add.reduce(self, out=out, axis=axis, keepdims=keepdims, dtype=dtype)", "def Psi(l,m,theta,phi):\n if numpy.isscalar(theta): \n theta=numpy.array([[theta]])\n phi=numpy.array([[phi]])\n Psilm_th=numpy.zeros(theta.shape,dtype=complex)\n Psilm_ph=numpy.zeros(theta.shape,dtype=complex)\n x=numpy.cos(theta)\n thetaNonZerosIdx=numpy.where(theta!=0.0)\n if len(thetaNonZerosIdx[0]) != 0:\n Ylm=scipy.special.sph_harm(m,l,phi[thetaNonZerosIdx],theta[thetaNonZerosIdx])\n #Compute derivative of sphrHarm function w.r.t. theta:\n if l>=numpy.abs(m):\n Plmpo=legendreLM(l,m+1,x[thetaNonZerosIdx])\n YlmPmpo=math.sqrt((2*l+1)/(4*math.pi)*math.factorial(l-m)/float(math.factorial(l+m)))*Plmpo*numpy.exp(1j*m*phi[thetaNonZerosIdx])\n #YlmPmpo=sqrt((l-m)*(l+m+1))*spharm(l,m+1,theta,phi)*exp(-i*phi) %Should be equivalent to above formula.\n dtYlm=+YlmPmpo+m*x[thetaNonZerosIdx]*Ylm/numpy.sin(theta[thetaNonZerosIdx])\n # thetZerInd=[find(theta==0); find(theta==pi)]\n # dtYlm(thetZerInd)=0; %This is a fudge to remove NaNs\n else:\n dtYlm=numpy.zeros(theta[thetaNonZerosIdx].shape,dtype=complex)\n\n #dtYlm=spharmDtheta(l,m,theta,phi)\n\n Psilm_ph[thetaNonZerosIdx]=+1j*m/numpy.sin(theta[thetaNonZerosIdx])*Ylm\n Psilm_th[thetaNonZerosIdx]=+dtYlm\n #Ref: http://mathworld.wolfram.com/VectorSphericalHarmonic.html\n\n thetaZerosIdx=numpy.where(theta==0.0)\n if len(thetaZerosIdx[0]) != 0:\n if numpy.abs(m)==1:\n Yl1B=math.sqrt((2*l+1)/(4*math.pi)*math.factorial(l-m)/math.factorial(l+m))*PBl1(l,m)*numpy.exp(1j*m*phi[thetaZerosIdx])\n Plmpo=legendreLM(l,m+1,x[thetaZerosIdx])\n YlmPmpo=math.sqrt((2*l+1)/(4*math.pi)*math.factorial(l-m)/math.factorial(l+m))*Plmpo*numpy.exp(1j*m*phi[thetaZerosIdx])\n dtYlm=+YlmPmpo+m*Yl1B\n Psilm_ph[thetaZerosIdx]=+1j*m*Yl1B\n Psilm_th[thetaZerosIdx]=+dtYlm\n else:\n Plmpo=legendreLM(l,m+1,x[thetaZerosIdx])\n YlmPmpo=math.sqrt((2*l+1)/(4*math.pi)*math.factorial(l-m)/math.factorial(l+m))*Plmpo*numpy.exp(1j*m*phi[thetaZerosIdx])\n dtYlm=+YlmPmpo+0\n Psilm_ph[thetaZerosIdx]=0\n Psilm_th[thetaZerosIdx]=+dtYlm\n return Psilm_th,Psilm_ph", "def __add__(self, other):\n if isinstance(other, complex):\n return Ohm(self.ohm + other, self.ohm_unit, self.freq, self.freq_unit)\n if self.ohm_unit != other.ohm_unit:\n raise ArithmeticError(f\"The objects' ohm units {self.ohm_unit} and {other.ohm_unit} are not the same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n ohm_sum = self.ohm + other.ohm\n return Ohm(ohm_sum, self.ohm_unit, self.freq, self.freq_unit)", "def test_trotter_hamiltonian_scalar_add(nqubits=4):\n local_ham = hamiltonians.TFIM(nqubits, h=1.0, trotter=True)\n target_ham = 2 + hamiltonians.TFIM(nqubits, h=1.0, numpy=True)\n local_dense = (2 + local_ham).dense\n np.testing.assert_allclose(local_dense.matrix, target_ham.matrix)\n\n local_ham = hamiltonians.TFIM(nqubits, h=1.0, trotter=True)\n local_dense = (local_ham + 2).dense\n np.testing.assert_allclose(local_dense.matrix, target_ham.matrix)", "def sum(x, reduce_instance_dims=True, name=None): # pylint: disable=redefined-builtin\n return _numeric_combine(x, np.sum, reduce_instance_dims, name)", "def _calc_interaction_expansion(self):\n # preevaluate expansions for volume and surface phase functions\n # this returns symbolic code to be then further used\n\n volexp = self.V.legexpansion(self.t_0, self.t_ex,\n self.p_0, self.p_ex,\n self.geometry).doit()\n\n brdfexp = self.SRF.legexpansion(self.t_0, self.t_ex,\n self.p_0, self.p_ex,\n self.geometry).doit()\n\n # preparation of the product of p*BRDF for coefficient retrieval\n # this is the eq.23. and would need to be integrated from 0 to 2pi\n fPoly = expand(2 * sp.pi * volexp * brdfexp)\n\n # do integration of eq. 23\n expr = self._integrate_0_2pi_phis(fPoly)\n\n # now we do still simplify the expression to be able to express\n # things as power series of cos(theta_s)\n theta_s = sp.Symbol('theta_s')\n replacements = [(sp.sin(theta_s) ** i,\n expand((1. - sp.cos(theta_s) ** 2)\n ** sp.Rational(i, 2)))\n for i in range(1, self.SRF.ncoefs + self.V.ncoefs - 1)\n if i % 2 == 0]\n\n res = expand(expr.xreplace(dict(replacements)))\n\n return res" ]
[ "0.60549116", "0.5197901", "0.51612186", "0.5089202", "0.5049697", "0.49782285", "0.49448317", "0.4918859", "0.49002635", "0.48757395", "0.48220482", "0.47755814", "0.47722915", "0.47557205", "0.4742954", "0.4741564", "0.4729267", "0.47249097", "0.4709373", "0.47004437", "0.46857408", "0.46545032", "0.45998308", "0.45647055", "0.45631766", "0.45355248", "0.452442", "0.4514806", "0.4503379", "0.4491233", "0.44874376", "0.4487241", "0.447553", "0.44709876", "0.44657275", "0.44584814", "0.4450358", "0.44462273", "0.444596", "0.4431268", "0.4428939", "0.44183302", "0.44115272", "0.44044048", "0.4401718", "0.4397957", "0.4393394", "0.43823457", "0.43793994", "0.4370917", "0.43490502", "0.432951", "0.43288547", "0.43254706", "0.43253207", "0.43127105", "0.43039536", "0.4300853", "0.42967457", "0.42935994", "0.42909485", "0.428119", "0.42778412", "0.42775992", "0.42770064", "0.4276727", "0.42758268", "0.4268932", "0.42673218", "0.42643508", "0.42554086", "0.42535493", "0.4251259", "0.42469543", "0.42462793", "0.42441413", "0.42424965", "0.42418608", "0.42398277", "0.42368618", "0.4233927", "0.42337242", "0.42332482", "0.42326182", "0.4226279", "0.42222127", "0.4219368", "0.421928", "0.4212741", "0.42115936", "0.42085636", "0.4205027", "0.42017874", "0.4197364", "0.4196344", "0.4184428", "0.41795307", "0.41774887", "0.41748753", "0.41723403" ]
0.5082671
4
r"""Find the approximate location of a levitation trap. Find an approximate position of a acoustic levitation trap close to a starting point. This is done by following the radiation force in the sound field using an differential equation solver. The differential equation is the unphysical equation
def find_trap(array, start_position, complex_transducer_amplitudes, tolerance=10e-6, time_interval=50, path_points=1, **kwargs): from scipy.integrate import solve_ivp from numpy.linalg import lstsq if 'radius' in kwargs: from .fields import SphericalHarmonicsForce as Force, SphericalHarmonicsForceGradient as ForceGradient else: from .fields import RadiationForce as Force, RadiationForceGradient as ForceGradient evaluator = Force(array, **kwargs) + ForceGradient(array, **kwargs) mg = evaluator.fields[0].field.mg def f(t, x): F = evaluator(complex_transducer_amplitudes, x)[0] F[2] -= mg return F def bead_close(t, x): F, dF = evaluator(complex_transducer_amplitudes, x) F[2] -= mg dx = lstsq(dF, F, rcond=None)[0] distance = np.sum(dx**2, axis=0)**0.5 return np.clip(distance - tolerance, 0, None) bead_close.terminal = True outs = solve_ivp(f, (0, time_interval), np.asarray(start_position), events=bead_close, vectorized=True, dense_output=path_points > 1) if outs.message != 'A termination event occurred.': print('End criterion not met. Final path position might not be close to trap location.') if path_points > 1: return outs.sol(np.linspace(0, outs.sol.t_max, path_points)) else: return outs.y[:, -1]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def anl_solution(self):\r\n\r\n m = float(self.mass) / self.nu_m\r\n qe = 1 / self.nu_m * (self.nu_t * self.nu_t / self.nu_x) * 1.0 \\\r\n / float(self.size_tick * self.size_tick)\r\n print 'qE=', qe\r\n c = self.light_vel\r\n for i in range(0, len(self.obs.obt_g)):\r\n ddt = float(self.obs.obt[i] - self.obs.obt[i - 1])\r\n x = m * c ** 2 / qe * (math.sqrt(1.0 + (qe * self.t[i] / (m\r\n * c)) ** 2) - 1.0)\r\n self.xa_track.append(x)\r\n p = qe * self.t[i]\r\n self.pa.append(p)\r\n v = p / math.sqrt(m ** 2 + (p / c) ** 2)\r\n jv = self.t[i] * qe / (m * c)\r\n v = math.sqrt(jv * jv / (1 + jv * jv)) * c\r\n self.va.append(v)\r\n print 'Analytical solution of the differential equation of motion'", "def find_lower_tangent(l_x_as, l_y_as, r_x_as, r_y_as, r_yp_as):\n # logg = logging.getLogger(f\"c.{__name__}.find_lower_tangent\")\n # logg.debug(f\"Start find_lower_tangent\")\n\n # compute the second derivative\n r_ypp = r_yp_as[1:] - r_yp_as[:-1]\n mean_r_ypp = np.mean(r_ypp)\n\n # logg.debug(f\"r_yp_as: {r_yp_as}\")\n # logg.debug(f\"r_ypp: {r_ypp}\")\n\n if mean_r_ypp >= 0:\n # logg.debug(f\"ypp positive\")\n range_xid = range(r_x_as.shape[0])\n else:\n # logg.debug(f\"ypp negative\")\n range_xid = range(r_x_as.shape[0])[::-1]\n\n tangent_start = timer()\n for xid in range_xid:\n # point tangent to the *right* segment\n tang_op = OrientedPoint(r_x_as[xid], r_y_as[xid], slope2deg(r_yp_as[xid]))\n tang_coeff = tang_op.to_ab_line()\n\n # sample it on the *left* segment sample\n l_tang_y_as = poly_model(l_x_as, tang_coeff, flip_coeff=True)\n # ax.plot(l_x_as, l_tang_y_as, color=\"b\", ls=\"-\", marker=\"\")\n # ax.plot(l_x_as, l_tang_y_as, color=\"b\", ls=\"\", marker=\".\")\n\n # find if the left segment has some points lower than the tangent\n lower = l_y_as < l_tang_y_as\n # logg.debug(f\"lower: {lower} {np.sum(lower)}\")\n if np.sum(lower) == 0:\n # logg.debug(f\"Breaking at xid: {xid}\")\n break\n\n tangent_end = timer()\n tangent_time = tangent_end - tangent_start\n # logg.debug(f\"Time to find tangent: {tangent_end - tangent_start:.6f}\")\n\n # find distance from left segment to tangent\n dist_left_tangent = l_y_as - l_tang_y_as\n min_dist_left_tangent = np.min(dist_left_tangent)\n argmin_dist_left_tangent = np.argmin(dist_left_tangent)\n recap = f\"min_dist_left_tangent: {min_dist_left_tangent:.6f}\"\n recap += \" argmin_dist_left_tangent: {argmin_dist_left_tangent}\"\n # logg.debug(recap)\n\n if min_dist_left_tangent < 0:\n # logg.debug(f\"Tangent not found\")\n return -1, -1, None, tangent_time\n\n l_xid = argmin_dist_left_tangent\n r_xid = xid\n\n return l_xid, r_xid, l_tang_y_as, tangent_time", "def analyticalLinearSol(self, t):\n return self.c*t + self.I", "def LineSearch(Pos, Dir, dx, EFracTol, M, L, Cut,\n Accel = 1.5, MaxInc = 10., MaxIter = 10000):\n #start the iteration counter\n Iter = 0\n\n #find the normalized direction\n NormDir = Dir / np.sqrt(np.sum(Dir * Dir))\n\n #take the first two steps and compute energies\n Dists = [0., dx]\n PEs = [mdlib.calcenergy(Pos + NormDir * x, M, L, Cut) for x in Dists]\n\n #if the second point is not downhill in energy, back\n #off and take a shorter step until we find one\n while PEs[1] > PEs[0]:\n Iter += 1\n dx = dx * 0.5\n Dists[1] = dx\n PEs[1] = mdlib.calcenergy(Pos + NormDir * dx, M, L, Cut)\n\n #find a third point\n Dists = Dists + [2. * dx]\n PEs = PEs + [mdlib.calcenergy(Pos + NormDir * 2. * dx, M, L, Cut)]\n\n #keep stepping forward until the third point is higher\n #in energy; then we have bracketed a minimum\n while PEs[2] < PEs[1]:\n Iter += 1\n\n #find a fourth point and evaluate energy\n Dists = Dists + [Dists[-1] + dx]\n PEs = PEs + [mdlib.calcenergy(Pos + NormDir * Dists[-1], M, L, Cut)]\n\n #check if we increased too much in energy; if so, back off\n if (PEs[3] - PEs[0]) > MaxInc * (PEs[0] - PEs[2]):\n PEs = PEs[:3]\n Dists = Dists[:3]\n dx = dx * 0.5\n else:\n #shift all of the points over\n PEs = PEs[-3:]\n Dists = Dists[-3:]\n dx = dx * Accel\n\n #we've bracketed a minimum; now we want to find it to high\n #accuracy\n OldPE3 = 1.e300\n while True:\n Iter += 1\n if Iter > MaxIter:\n print(\"Warning: maximum number of iterations reached in line search.\")\n break\n\n #store distances for ease of code-reading\n d0, d1, d2 = Dists\n PE0, PE1, PE2 = PEs\n\n #use a parobolic approximation to estimate the location\n #of the minimum\n d10 = d0 - d1\n d12 = d2 - d1\n Num = d12*d12*(PE0-PE1) - d10*d10*(PE2-PE1)\n Dem = d12*(PE0-PE1) - d10*(PE2-PE1)\n if Dem == 0:\n #parabolic extrapolation won't work; set new dist = 0\n d3 = 0\n else:\n #location of parabolic minimum\n d3 = d1 + 0.5 * Num / Dem\n\n #compute the new potential energy\n PE3 = mdlib.calcenergy(Pos + NormDir * d3, M, L, Cut)\n\n #sometimes the parabolic approximation can fail;\n #check if d3 is out of range < d0 or > d2 or the new energy is higher\n if d3 < d0 or d3 > d2 or PE3 > PE0 or PE3 > PE1 or PE3 > PE2:\n #instead, just compute the new distance by bisecting two\n #of the existing points along the line search\n if abs(d2 - d1) > abs(d0 - d1):\n d3 = 0.5 * (d2 + d1)\n else:\n d3 = 0.5 * (d0 + d1)\n PE3 = mdlib.calcenergy(Pos + NormDir * d3, M, L, Cut)\n\n #decide which three points to keep; we want to keep\n #the three that are closest to the minimum\n if d3 < d1:\n if PE3 < PE1:\n #get rid of point 2\n Dists, PEs = [d0, d3, d1], [PE0, PE3, PE1]\n else:\n #get rid of point 0\n Dists, PEs = [d3, d1, d2], [PE3, PE1, PE2]\n else:\n if PE3 < PE1:\n #get rid of point 0\n Dists, PEs = [d1, d3, d2], [PE1, PE3, PE2]\n else:\n #get rid of point 2\n Dists, PEs = [d0, d1, d3], [PE0, PE1, PE3]\n\n #check how much we've changed\n if abs(OldPE3 - PE3) < EFracTol * abs(PE3):\n #the fractional change is less than the tolerance,\n #so we are done and can exit the loop\n break\n OldPE3 = PE3\n\n #return the position array at the minimum (point 1)\n PosMin = Pos + NormDir * Dists[1]\n PEMin = PEs[1]\n\n #if using visualization, update the display\n if UseVisual:\n if atomvis.Initialized:\n #update the positions\n atomvis.Update(PosMin)\n else:\n #initialize the visualization window\n atomvis.Init(PosMin)\n\n return PEMin, PosMin", "def _get_closest_light_ahead(self, pose):\n #\n # we have very few lights in either the simulation or the live test,\n # so it is easiest just to loop thru them rather than use KDTree\n #\n pos = pose.position\n x = pos.x\n y = pos.y\n closest_idx = -1\n closest_dist2 = None\n idx = 0\n for light in self.lights:\n xl = light.pose.pose.position.x\n yl = light.pose.pose.position.y\n\n #\n # make sure light is ahead, otherwise ignore it\n # we can only do this if the car velocity is nonzero\n #\n skip_light = False\n if self.velocity_unit_vector:\n dx = xl - x\n dy = yl - y\n car_to_light = [dx,dy]\n val = self.dot2d(car_to_light,self.velocity_unit_vector)\n if val < 0.0:\n #\n # light is behind us so continue\n #\n skip_light = True\n\n if not skip_light:\n if closest_dist2 is None:\n closest_idx = idx\n closest_dist2 = (x-xl)*(x-xl) + (y-yl)*(y-yl)\n else:\n dist2 = (x-xl)*(x-xl) + (y-yl)*(y-yl)\n if dist2 < closest_dist2:\n closest_idx = idx\n closest_dist2 = dist2\n idx+=1\n \n return closest_idx", "def lidar_relative(self):\n return self.distance", "def calcPos(self,newpol):\n\n\t\tdetlatoff=(self.offset9()-self.offset10())*cosd(newpol)+self.offset10()\n\t\tnewoffcry = (self.offset2()-self.offset3())*cosd(newpol)+self.offset3()\n\t\tnewdetoff = (self.offset4()-self.offset8())*cosd(newpol)+self.offset8() +self.offset5()\n\n\t\twl = BLi.getWavelength()\n\t\tself.thbragg = 180/pi*asin(wl/(2*self.dspace))\n\t\tnewthp=self.sign()*self.thbragg+newoffcry\n\t\tnewtthp=2*self.sign()*self.thbragg+newdetoff\n\t\tprint \"stokes=%1.2f thp=%1.2f tthp=%1.2f detlatoff=%1.2f\"%(newpol,newthp,newtthp,detlatoff)", "def findPathToClosestDot(self, gameState):\n # Here are some useful elements of the startState\n startPosition = gameState.getPacmanPosition(self.index)\n food = gameState.getFood()\n walls = gameState.getWalls()\n problem = AnyFoodSearchProblem(gameState, self.index)\n\n\n \"*** YOUR CODE HERE ***\"\n return search.bfs(problem)", "def LAT(self):\n # The maximum update amount for these element\n LateralFraction_DELTA = self.dt * (self.LateralFraction_LIMITS[1] -\n self.LateralFraction_LIMITS[0]) / (\n 2.0)\n\n # Add either positive or negative or zero delta for each\n # NOTE: 'High' is open bracket ) so the max is 1\n LateralFraction_DIRECTION = np.random.randint(-1, 2, 1)[0]\n\n # Now, modify modifiable params AND CLIP\n self.LateralFraction += LateralFraction_DIRECTION * LateralFraction_DELTA\n self.LateralFraction = np.clip(self.LateralFraction,\n self.LateralFraction_LIMITS[0],\n self.LateralFraction_LIMITS[1])", "def FindClosestPoint(self, ):\n ...", "def mrv_leadterm(e, x):\n Omega = SubsSet()\n if not e.has(x):\n return (e, S.Zero)\n if Omega == SubsSet():\n Omega, exps = mrv(e, x)\n if not Omega:\n # e really does not depend on x after simplification\n return exps, S.Zero\n if x in Omega:\n # move the whole omega up (exponentiate each term):\n Omega_up = moveup2(Omega, x)\n exps_up = moveup([exps], x)[0]\n # NOTE: there is no need to move this down!\n Omega = Omega_up\n exps = exps_up\n #\n # The positive dummy, w, is used here so log(w*2) etc. will expand;\n # a unique dummy is needed in this algorithm\n #\n # For limits of complex functions, the algorithm would have to be\n # improved, or just find limits of Re and Im components separately.\n #\n w = Dummy(\"w\", positive=True)\n f, logw = rewrite(exps, Omega, x, w)\n try:\n lt = f.leadterm(w, logx=logw)\n except (NotImplementedError, PoleError, ValueError):\n n0 = 1\n _series = Order(1)\n incr = S.One\n while _series.is_Order:\n _series = f._eval_nseries(w, n=n0+incr, logx=logw)\n incr *= 2\n series = _series.expand().removeO()\n try:\n lt = series.leadterm(w, logx=logw)\n except (NotImplementedError, PoleError, ValueError):\n lt = f.as_coeff_exponent(w)\n if lt[0].has(w):\n base = f.as_base_exp()[0].as_coeff_exponent(w)\n ex = f.as_base_exp()[1]\n lt = (base[0]**ex, base[1]*ex)\n return (lt[0].subs(log(w), logw), lt[1])", "def lib_pt_loc(sys_chars_vals, tolerance=1e-12):\r\n mu = sys_chars_vals.mu\r\n\r\n lib_loc = np.zeros((5, 3))\r\n lib_loc[3, :] = [\r\n 0.5 - mu,\r\n 3**0.5 / 2,\r\n 0,\r\n ] # L4, analytical_guessal solution known\r\n lib_loc[4, :] = [\r\n 0.5 - mu,\r\n -(3**0.5) / 2,\r\n 0,\r\n ] # L5, analytical solution known\r\n\r\n # 5th degree polynomial of L1, L2 and L3\r\n f_lib = np.array(\r\n [\r\n [1, mu - 3, 3 - 2 * mu, -mu, 2 * mu, -mu],\r\n [1, 3 - mu, 3 - 2 * mu, -mu, -2 * mu, -mu],\r\n [1, 2 + mu, 1 + 2 * mu, mu - 1, 2 * mu - 2, -1 + mu],\r\n ]\r\n )\r\n\r\n # First-order derivative of the polyomial defined in f_lib\r\n fd_lib = np.array(\r\n [\r\n [0, 5, 4 * (mu - 3), 3 * (3 - 2 * mu), 2 * -mu, 2 * mu],\r\n [0, 5, 4 * (3 - mu), 3 * (3 - 2 * mu), 2 * -mu, -2 * mu],\r\n [0, 5, 4 * (2 + mu), 3 * (1 + 2 * mu), 2 * (mu - 1), 2 * mu - 2],\r\n ]\r\n )\r\n\r\n initial_guess = np.array([0.9, 1.1, -1])\r\n\r\n for i in range(3):\r\n val = np.vander([initial_guess[i]], 6)\r\n h = np.dot(val, f_lib[i, :]) / np.dot(val, fd_lib[i, :])\r\n while abs(h) >= tolerance:\r\n val = np.vander([initial_guess[i]], 6)\r\n h = np.dot(val, f_lib[i, :]) / np.dot(val, fd_lib[i, :])\r\n lib_loc[i, 0] = initial_guess[i] - h\r\n\r\n initial_guess[i] = lib_loc[i, 0]\r\n\r\n if i == 0:\r\n lib_loc[i, 0] = 1 - mu - lib_loc[i, 0]\r\n elif i == 1:\r\n lib_loc[i, 0] = 1 - mu + lib_loc[i, 0]\r\n elif i == 2:\r\n lib_loc[i, 0] = -mu - lib_loc[i, 0]\r\n\r\n return lib_loc", "def get_tissue_coords(self, xdva, ydva):\n if self.eye == 'LE':\n raise NotImplementedError\n # Convert x, y (dva) into polar coordinates:\n theta, rho_dva = utils.cart2pol(xdva, ydva)\n # Add RGC displacement:\n meridian = np.where(xdva < 0, 'temporal', 'nasal')\n rho_dva += self._watson_displacement(rho_dva, meridian=meridian)\n # Convert back to x, y (dva):\n x, y = utils.pol2cart(theta, rho_dva)\n # Convert to retinal coords:\n return dva2ret(x), dva2ret(y)", "def estimated_distance(self, log=False):\n\t\t\n\t\tx0 = GRAVITY - self.thrust*.95 / (self.total_mass - self.fuel_consumption * 0.0)\n\t\tx1 = GRAVITY - self.thrust*.95 / (self.total_mass - self.fuel_consumption * 1.0)\n\n\t\t# Derivative at x=0 and x=1\n\t\tu = x0\n\t\tv = x1\n\t\t# Initial height at x=0\n\t\ty = abs(self.velocity)\n\n\t\tif log:\n\t\t\tprint(f'u: {u}, v: {v}, y: {y}\\nEstimated distance: {get_positive_area(u, v, y)}\\n')\n\t\t\n\t\treturn get_positive_area(u, v, y)", "def closest_cruising_altitude(altitude):\n return 1000 * ((altitude + 500) // 1000)", "def findPathToClosestDot(self, gameState):\n # Here are some useful elements of the startState\n startPosition = gameState.getPacmanPosition()\n food = gameState.getFood()\n walls = gameState.getWalls()\n problem = AnyFoodSearchProblem(gameState)\n\n \"*** YOUR CODE HERE ***\"\n return breadthFirstSearch(problem)\n # util.raiseNotDefined()", "def calc_nearest_ind(self, robot_pose):\n pass", "def get_lookahead_point(self):\n lookahead_target_dist = self.lookahead_dist #+ (1 + self.curr_v)\n\n if self.path_point_idx == len(self.current_path) - 1 or self.path_point_idx == -1:\n #End of path, no more lookahead\n return self.path_point\n\n prev_pt = self.current_path[self.path_point_idx]\n curr_pt = self.current_path[self.path_point_idx + 1]\n pt_dist = np.hypot((prev_pt - curr_pt)[0], (prev_pt - curr_pt)[1])\n curr_dist = pt_dist\n c = self.path_point_idx\n while curr_dist < lookahead_target_dist and c < len(self.current_path) - 1:\n prev_pt = self.current_path[c]\n curr_pt = self.current_path[c + 1]\n pt_dist = np.hypot((prev_pt - curr_pt)[0], (prev_pt - curr_pt)[1])\n curr_dist += pt_dist\n c += 1\n\n if curr_dist < lookahead_target_dist:\n return self.current_path[-1]\n else:\n #Interpolate to get the actual lookahead point\n frac = (curr_dist - lookahead_target_dist) / pt_dist\n pt = frac * prev_pt + (1-frac) * curr_pt\n return pt", "def FindClosestInsertedPoint(self, ):\n ...", "def calibrate(self):\n\t\twl = BLi.getWavelength()\n\t\tif abs(self.stokes()) <= .5:\n\t\t\txxx=self.sign()*180/pi*asin( wl/(2*self.dspace)) - (self.thp())\n\t\t\tself.offset2(-xxx)\n\t\t\tyyy=self.tthp()-self.sign()*2*180/pi*asin(wl/(2*self.dspace))-self.offset5()\n\t\t\tself.offset4(yyy)\n\t\t\tself.offset9(self.dettrans())\n\t\telif abs(self.stokes()-90.) <= .5:\n\t\t\txxx=self.sign()*180/pi*asin( wl/(2*self.dspace)) - (self.thp())\n\t\t\tself.offset3(-xxx)\n\t\t\tyyy=self.tthp()-self.sign()*2*180/pi*asin(wl/(2*self.dspace))-self.offset5()\n\t\t\tself.offset8(yyy)\n\t\t\tself.offset10(self.dettrans())\n\t\telse:\n\t\t\tprint \"Can't calibrate at stokes=\",self.stokes()\n\t\treturn [self.sign(),self.offset2(), self.offset3(),self.offset4(),self.offset5(),self.offset8(),self.offset9(),self.offset10()]", "def approach_gps(g_lat,g_lon,emily_lat_start, emily_lon_start, pose_rad, Parameters): #approach a gps position using potential fields\r\n\tx_goal,y_goal = latlongtoxy(g_lat,g_lon,g_lat)\r\n\tx_e_start,y_e_start = latlongtoxy(emily_lat_start,emily_lon_start,g_lat)\r\n\r\n\tprint (\"\\n HERE I AM\\n\\n\")\r\n\r\n\tdist = haver_distance(g_lat, g_lon, emily_lat_start, emily_lon_start)\r\n\tinitial_dist = dist\r\n\r\n\tprint ('Distance: ',dist)\r\n\theading = get_heading(emily_lat_start, emily_lon_start, g_lat, g_lon)\r\n print ('After get heading')\r\n\t# Eric: I'm not sure if turn_towards is necessary for a successful run.\r\n\t#turn_towards(heading)\r\n\tprint ('After Turn towards')\r\n\t#turn towards the goal initially\r\n\r\n\tstart_time = time.time()\r\n\tcurrent_time = 0\r\n\tdstore = []\r\n\thstore = []\r\n\twhile(dist >= goal_radius):\r\n\r\n\t\t#------------ code for reading gps location of emily and its orientation ------\r\n\t\te_lat = vehicle.location.global_frame.lat\r\n\t\te_lon = vehicle.location.global_frame.lon\r\n\t\te_heading = vehicle.heading * pi/180\t\t# convert heading to radians\r\n\t\t#------------------ get e_lat,e_lon, e_orient ---------------------\r\n\r\n\r\n\t\tx_e,y_e = latlongtoxy(e_lat,e_lon,g_lat)\t\t\t#change latitude and longitude to xy\r\n\r\n\t\t#x,y are given to approach victim function as y,x to algin the north heading and direction in x,y\r\n\r\n\t\tdx,dy = approach_victim_behaviour(y_goal,x_goal, y_e,x_e, pose_rad, Parameters)\t#get potential field vector\r\n\t\trc1, rc3 = dxdytorc(dx,dy, e_heading,g_lon)\t\t\t\t\t#get rc parameters\r\n\t\tdist = haver_distance(g_lat, g_lon, e_lat, e_lon)\t\t\t\t#haversine distance\r\n\r\n\t\tcurrent_time = time.time() - start_time\r\n\t\tprint (\"Time, Heading, Distance\")\r\n\t\tprint (current_time, e_heading*180/pi, dist)\r\n\t\tdstore.append(dist)\r\n\t\thstore.append(e_heading*180/pi)\r\n\t\t#code for sending the writing the rc commands\r\n\t\t# 3 is the thrust control\r\n\t\t#vehicle.channels.overrides = {'3':rc3}\r\n\t\tsendThrottleCommand(rc3, enableThrottle)\r\n\t\ttime.sleep(0.5)\r\n\t\tvehicle.channels.overrides = {'1':rc1}\r\n\t\tprint (\"Rudder: \",rc1)\r\n\t\tprint (\"Throttle: \",rc3)\r\n\t\tsaveToLog(e_lat, e_lon,dist,rc1,rc3)\r\n\t\ttime.sleep(0.5)\r\n\tprint(initial_dist)\r\n\tprint(\"intial \", emily_lat_start,emily_lon_start)\r\n\tprint(\"final \",e_lat,e_lon)\r\n\tplt.plot(dstore)\r\n\t#plt.title('Distance form home vs time')\r\n\tplt.xlabel(\"Time\")\r\n\tplt.ylabel('Distance')\r\n\tplt.show()\r\n\tplt.plot(hstore)\r\n\tplt.show()", "def get_closest_waypoint(self, pose):\n #TODO implement - Done\n # Iterate the base_waypoints' x value with current position's x value and find the closest\n # match, and pick that waypoint location index. \n min_idx = 0\n min_dist = None\n cur_x = pose.position.x\n cur_y = pose.position.y\n if self.waypoints is not None:\n for i, wp in enumerate(self.waypoints):\n wp_x = wp.pose.pose.position.x\n wp_y = wp.pose.pose.position.y\n dist = np.sqrt((cur_x - wp_x)**2 + (cur_y - wp_y)**2)\n if min_dist is None or min_dist >= dist:\n min_dist = dist\n min_idx = i\n \n # check whether the identified index is behind the current position, if so, move it by 1 index\n # https://gamedev.stackexchange.com/questions/75072/how-can-i-compare-two-quaternions-for-logical-equality\n # rospy.logwarn('min_idx before = %d', min_idx)\n eps = 1e-12\n if self.waypoints is not None:\n q1 = self.waypoints[min_idx].pose.pose.orientation\n q2 = pose.orientation\n q1_a = np.array([q1.x, q1.y, q1.z, q1.w])\n q2_a = np.array([q2.x, q2.y, q2.z, q2.w])\n direction = abs(np.dot(q1_a, q2_a))\n #rospy.logwarn('calculated direction %f', direction)\n wp_x = self.waypoints[min_idx].pose.pose.position.x\n if direction > 1-eps:\n if wp_x < cur_x:\n min_idx += 1\n else:\n min_idx -= 1\n else:\n if wp_x < cur_x:\n min_idx -= 1\n else:\n min_idx += 1\n\n # rospy.logwarn('min_idx after = %d', min_idx)\n return min_idx", "def exactsolution(x, t, u):\n if 0 <= (x - u*t) and (x - u*t) <= 0.2:\n temp = 1 - (10 * (x - u*t) -1)**2\n else:\n temp = 0\n return temp", "def solve(self, theta):\n ndim = len(theta)\n nspots = int((ndim - 3) / 3)\n y = np.ones_like(self.x)\n for i in range(1, nspots + 1):\n y += -1 + eker(self.x, np.append(theta[:3], theta[3 * i:3 * (i + 1)]), l1=self.l1, l2=self.l2, ir=self.ir)\n return y", "def _hill_diff(self, position):\n if position < 0:\n return 2 * position + 1\n else:\n return (1/math.sqrt(1 + 5 * position ** 2)\n - 5 * position ** 2 * (1 + 5 * position ** 2)**-1.5)", "def leff(self):\n with Vega() as v:\n s = self.reinterp(v.wavelength)\n w = s._wavelength\n if s.transmit.max() > 0:\n leff = np.trapz(w * s.transmit * v.flux.value, w, axis=-1)\n leff /= np.trapz(s.transmit * v.flux.value, w, axis=-1)\n else:\n leff = float('nan')\n if s.wavelength_unit is not None:\n leff = leff * Unit(s.wavelength_unit)\n if self.wavelength_unit is not None:\n return leff.to(self.wavelength_unit)\n return leff\n else:\n return leff", "def find_relative_time_reference(self, search_direction, tags, timex, timesIndex):\n \n if search_direction == 'Backward':\n ts = [r[2] for r in self.timexImpactZones if r[0]<=timex.getStartPos() and r[1]>=timex.getEndPos()]\n if ts:\n return ts[-1]\n else:\n return None\n \n parts = tags[timesIndex+1:]\n \n anchorTagSet = set(['Vaccine', 'Drug', 'Vaccination', 'Injection', 'Hospitalization', 'Administration']) \n doseNum = 0\n doseTag = [tg for tg in parts if tg[1]=='DoseIndicator']\n counts = []\n doseTagRange = 5\n if doseTag:\n counts = [(i, tg[0]) for i, tg in enumerate(parts) if tg[1]=='Count'] \n for i, tag in enumerate(parts):\n if tag[1]=='Drug' or tag[1]=='Vaccine':\n if counts:\n dist = 10000\n doseword = None\n for k, w in counts:\n if abs(k-i) < dist:\n dist = abs(k-i)\n doseword = w\n if doseword and dist<doseTagRange:\n doseNum = util.text2num.convertOrdinal(doseword) \n \n t = self.find_time_reference_with_tag(tag[1], tag[0], timex.getSentNum(), doseNum)\n if t:\n return t\n \n if tag[1] in ['Hospitalization', 'Administration']:\n t = self.find_time_reference_with_tag(tag[1], tag[0], timex.getSentNum())\n if t:\n return t\n \n if tag[1] in ['Vaccination', 'Injection']:\n if i+2<len(parts) and parts[i+1][0] in ['with', 'of', 'to'] and parts[i+2][1] in ['Drug', 'Vaccine']:\n continue\n t = self.find_time_reference_with_tag(tag[1], tag[0], timex.getSentNum())\n if t:\n return t\n \n ##: No reference tag is found, search backward for a valid time range\n ##: In ideal case, this should \"return None\" directly. However, considering that the current lexicon is not \n ##: complete enough, it's very likely some Vaccines or drugs are not tagged, we return the previous time\n ##: in the current development stage.\n ts = [r[2] for r in self.timexImpactZones if r[0]<=timex.getStartPos() and r[1]>=timex.getEndPos()]\n if ts:\n return ts[-1]\n \n return None", "def get_closest_waypoint(self, pose):\n if self.kdtree:\n return kdtree_closest_point(self.kdtree,\n (pose.position.x,\n pose.position.y))\n else:\n return 0", "def integrateModel(self, tend, initial=[1.0, 1.0, 0.0]):\n\n dt = 0.1\n self.ts = np.arange(0.0, tend + dt, dt)\n\n r = sp.integrate.solve_ivp(self.derv, (0, tend), initial, t_eval=self.ts, method='Radau') # uses RK45\n self.results = np.transpose(r.y)\n\n ent_angle = 1.0 * atan2(self.results[-1, 1], self.results[\n -1, 0]); # times negative one because VDP runs clockwise versus counterclockwise\n if (ent_angle < 0.0):\n ent_angle += 2 * sp.pi;\n\n ent_angle = ent_angle * 24.0 / (2.0 * sp.pi);\n return (ent_angle)", "def get_closest_waypoint(self, pose):\n if self.kdtree:\n return kdtree_closest_point(self.kdtree,\n (pose.x,\n pose.y))\n else:\n return 0", "def solve_linear_harmonic_oscillator(t, initial_ampl, initial_velocity, resonance_freq, damping = 0.0, drive_ampl = 0.0, drive_ang_freq = 0.0):\n\n\n x = initial_ampl\n p = initial_velocity\n y0 = [initial_ampl, initial_velocity]\n y1 = odeint(dy, y0, t, args=(damping, resonance_freq, drive_ampl, drive_ang_freq)) # under damped\n return y1[:,0], y1[:,1]", "def distance_to_current_waypoint():\n nextwaypoint = vehicle.commands.next\n if nextwaypoint == 0:\n return None\n missionitem = vehicle.commands[nextwaypoint -\n 1] #commands are zero indexed\n lat = missionitem.x\n lon = missionitem.y\n alt = missionitem.z\n targetWaypointLocation = LocationGlobalRelative(lat, lon, alt)\n distancetopoint = get_distance_metres(vehicle.location.global_frame,\n targetWaypointLocation)\n return distancetopoint", "def test_el_below_lcl():\n p = [902.1554, 897.9034, 893.6506, 889.4047, 883.063, 874.6284, 866.2387, 857.887,\n 849.5506, 841.2686, 833.0042, 824.7891, 812.5049, 796.2104, 776.0027, 751.9025,\n 727.9612, 704.1409, 680.4028, 656.7156, 629.077, 597.4286, 565.6315, 533.5961,\n 501.2452, 468.493, 435.2486, 401.4239, 366.9387, 331.7026, 295.6319, 258.6428,\n 220.9178, 182.9384, 144.959, 106.9778, 69.00213] * units.hPa\n t = [-3.039381, -3.703779, -4.15996, -4.562574, -5.131827, -5.856229, -6.568434,\n -7.276881, -7.985013, -8.670911, -8.958063, -7.631381, -6.05927, -5.083627,\n -5.11576, -5.687552, -5.453021, -4.981445, -5.236665, -6.324916, -8.434324,\n -11.58795, -14.99297, -18.45947, -21.92021, -25.40522, -28.914, -32.78637,\n -37.7179, -43.56836, -49.61077, -54.24449, -56.16666, -57.03775, -58.28041,\n -60.86264, -64.21677] * units.degC\n td = [-22.08774, -22.18181, -22.2508, -22.31323, -22.4024, -22.51582, -22.62526,\n -22.72919, -22.82095, -22.86173, -22.49489, -21.66936, -21.67332, -21.94054,\n -23.63561, -27.17466, -31.87395, -38.31725, -44.54717, -46.99218, -43.17544,\n -37.40019, -34.3351, -36.42896, -42.1396, -46.95909, -49.36232, -48.94634,\n -47.90178, -49.97902, -55.02753, -63.06276, -72.53742, -88.81377, -93.54573,\n -92.92464, -91.57479] * units.degC\n prof = parcel_profile(p, t[0], td[0]).to('degC')\n el_p, el_t = el(p, t, td, prof)\n assert_nan(el_p, p.units)\n assert_nan(el_t, t.units)", "def closer_ang(x,a,dir=0):\r\n if dir == 0:\r\n return a + smaller_ang(x-a)\r\n elif dir == 1:\r\n return a + (x-a)%(2*pi)\r\n elif dir == -1:\r\n return a + (x-a)%(2*pi) - 2*pi", "def calc_refl(velocity, shotloc_x, shotloc_z, layer_idxs):\n solver_dg = pykonal.EikonalSolver(coord_sys=\"cartesian\")\n solver_dg.vv.min_coords = velocity.min_coords\n solver_dg.vv.node_intervals = velocity.node_intervals\n solver_dg.vv.npts = velocity.npts\n solver_dg.vv.values = velocity.values\n\n #shotloc = 2.56 # km\n src_idx = (int((shotloc_x - velocity.min_coords[0])/velocity.node_intervals[0]), int(shotloc_z/velocity.node_intervals[1]), 0)\n solver_dg.tt.values[src_idx] = 0\n solver_dg.unknown[src_idx] = False\n solver_dg.trial.push(*src_idx)\n solver_dg.solve()\n\n solver_ug = pykonal.EikonalSolver(coord_sys=\"cartesian\")\n solver_ug.vv.min_coords = solver_dg.vv.min_coords\n solver_ug.vv.node_intervals = solver_dg.vv.node_intervals\n solver_ug.vv.npts = solver_dg.vv.npts\n solver_ug.vv.values = solver_dg.vv.values\n\n for ix in range(solver_ug.tt.npts[0]):\n #idx = (ix, solver_ug.tt.npts[1]-1, 0)\n idx = (ix, layer_idxs[ix], 0)\n solver_ug.tt.values[idx] = solver_dg.tt.values[idx]\n #print(idx, solver_dg.tt.values[idx])\n solver_ug.unknown[idx] = False\n solver_ug.trial.push(*idx)\n solver_ug.solve()\n \n return solver_ug.tt.values[:,0,0]", "def findPathToClosestDot(self, gameState):\n startPosition = gameState.getPacmanPosition(self.index)\n food = gameState.getFood()\n walls = gameState.getWalls()\n problem = AnyFoodSearchProblem(gameState, self.index)\n return search.bfs(problem)\n util.raiseNotDefined()", "def get_reward(self):\n #original reward function: reward = 1.-.3*(abs(self.sim.pose[:3] - self.target_pos)).sum()\n thrusts = self.sim.get_propeler_thrust(self.sim.prop_wind_speed)\n linear_forces = self.sim.get_linear_forces(thrusts)\n distance = np.linalg.norm(self.target_pos - self.sim.pose[:3])\n #speed = math.sqrt(np.square(self.sim.find_body_velocity()).sum())\n #with 300x300x300m env, the max distance from one corner to another is 519\n max_distance = 519\n #Focus quadcopter on not crashing but first rewarding an upward linear force until at the height of the target\n if self.sim.pose[2] < self.target_pos[2]:\n #velocity_discount = 1/speed\n reward = np.tanh(linear_forces[2])\n #after getting to the correct z-coordinate, move to the correct y-coordinate\n elif self.sim.pose[1] < self.target_pos[1]:\n #velocity_discount = 1/speed\n reward = 1 + np.tanh(linear_forces[1])\n #finally, after getting rewards for the x and y coordinates, give reward for distance\n #at this stage, the drone will have overshot the x and y coordinates, but it would be in a better area to\n #start searching for the x coordinate\n elif distance > 1 and self.sim.pose[2] > self.target_pos[2] and self.sim.pose[1] > self.target_pos[1] :\n reward = 2 + (1-math.pow((distance/300),.04))\n elif distance < 1:\n self.success = True\n reward = 100\n #possible reward for hover: np.exp(-np.square(linear_forces[2]))\n return reward", "def get_lift_position(self) -> float:\n\n return self.send(self.cmd.GET_LIFT_ACT)", "def geocentricToApparentRadiantAndVelocity(ra_g, dec_g, vg, lat, lon, elev, jd, include_rotation=True):\n\n\n # Compute ECI coordinates of the meteor state vector\n state_vector = geo2Cartesian(lat, lon, elev, jd)\n\n eci_x, eci_y, eci_z = state_vector\n\n\n # Assume that the velocity at infinity corresponds to the initial velocity\n v_init = np.sqrt(vg**2 + (2*6.67408*5.9722)*1e13/vectMag(state_vector))\n\n\n # Calculate the geocentric latitude (latitude which considers the Earth as an elipsoid) of the reference \n # trajectory point\n lat_geocentric = np.degrees(math.atan2(eci_z, math.sqrt(eci_x**2 + eci_y**2)))\n\n\n\n\n ### Uncorrect for zenith attraction ###\n\n # Compute the radiant in the local coordinates\n azim, elev = raDec2AltAz(ra_g, dec_g, jd, lat_geocentric, lon)\n\n # Compute the zenith angle\n eta = np.radians(90.0 - elev)\n\n # Numerically correct for zenith attraction\n diff = 10e-5\n zc = eta\n while diff > 10e-6:\n \n # Update the zenith distance\n zc -= diff\n\n # Calculate the zenith attraction correction\n delta_zc = 2*math.atan((v_init - vg)*math.tan(zc/2.0)/(v_init + vg))\n diff = zc + delta_zc - eta\n\n\n # Compute the uncorrected geocentric radiant for zenith attraction\n ra, dec = altAz2RADec(azim, 90.0 - np.degrees(zc), jd, lat_geocentric, lon)\n\n ### ###\n\n\n\n # Apply the rotation correction\n if include_rotation:\n\n # Calculate the velocity of the Earth rotation at the position of the reference trajectory point (m/s)\n v_e = 2*math.pi*vectMag(state_vector)*math.cos(np.radians(lat_geocentric))/86164.09053\n\n\n # Calculate the equatorial coordinates of east from the reference position on the trajectory\n azimuth_east = 90.0\n altitude_east = 0\n ra_east, dec_east = altAz2RADec(azimuth_east, altitude_east, jd, lat, lon)\n\n # Compute the radiant vector in ECI coordinates of the apparent radiant\n v_ref_vect = v_init*np.array(raDec2Vector(ra, dec))\n\n\n v_ref_nocorr = np.zeros(3)\n\n # Calculate the derotated reference velocity vector/radiant\n v_ref_nocorr[0] = v_ref_vect[0] + v_e*np.cos(np.radians(ra_east))\n v_ref_nocorr[1] = v_ref_vect[1] + v_e*np.sin(np.radians(ra_east))\n v_ref_nocorr[2] = v_ref_vect[2]\n\n\n # Compute the radiant without Earth's rotation included\n ra_norot, dec_norot = vector2RaDec(vectNorm(v_ref_nocorr))\n v_init_norot = vectMag(v_ref_nocorr)\n\n ra = ra_norot\n dec = dec_norot\n v_init = v_init_norot\n\n\n\n return ra, dec, v_init", "def get_approx_solution(self, solver):\n tour = solver.solve(self)\n print('The cost is {}.'.format(get_cost(tour,self)))\n self.tours[solver.__class__.__name__] = tour\n return tour", "def lunarperigee(time):\n dtor = np.pi / 180\n t1 = 1 + time\n t2 = t1 * t1\n t3 = t2 * t1\n perigee = (\n 334.329653 * dtor\n + 4069.0340329575 * dtor * t1\n - 0.010325 * dtor * t2\n - 1.2e-5 * dtor * t3\n )\n return perigee", "def _distance_next(self):\n\n self.distance = 10\n\n # Here a set index to 0 if the car is finishing a lap\n # Also reset the farthest\n if self.index > (len(self.x_trajectory) - 6) and self.closed:\n self.index = 0\n self.farthest = -1\n self.laps += 1\n\n for w in range(self.index, self.index + 20):\n\n self.dist_point = math.sqrt((self.x_trajectory[w] - self.x)**2\n + (self.y_trajectory[w] - self.y)**2)\n\n if self.dist_point < self.distance:\n self.distance = self.dist_point\n self.index = w\n\n if w >= (len(self.x_trajectory) - 1):\n break\n\n self._calc_side()\n\n self.distance = self.distance * self.sign\n\n return self.distance", "def distance_to_current_waypoint(vehicle):\n nextwaypoint = vehicle.commands.next\n if nextwaypoint==0:\n return None\n missionitem=vehicle.commands[nextwaypoint-1] #commands are zero indexed\n lat = missionitem.x\n lon = missionitem.y\n alt = missionitem.z\n targetWaypointLocation = LocationGlobalRelative(lat,lon,alt)\n distancetopoint = get_distance_metres(vehicle.location.global_frame, targetWaypointLocation)\n return distancetopoint", "def lspe_coordinates (self, time):\n\n return (self.base_lat,\n self.base_long\n + time * 2 * np.pi * (1 + 1 / self.rev_days) / SECONDS_PER_DAY)", "def _approx_pot(wair,temp,pres,ppot,airf,dhum):\n pvsat0 = _PTPE*numpy.exp((_AVI+_BVI)*(1 - _TTP/temp)\n + _BVI*numpy.log(_TTP/temp))\n pvmax0 = pres * (1-wair)/(_EPSW*wair + 1-wair)\n if pvmax0 > pvsat0:\n # Parcel starts saturated\n pv0 = pvsat0\n a0 = (pres-pv0) / (pres-pv0 + _EPSW*pv0)\n ceff0 = (wair*_CDRY + wair*(1-a0)/a0*_CVAP + (1-wair/a0)*_CICE\n + wair*_RWAT*(1-a0)/a0*pres/(pres-pv0)\n * ((_AVI+_BVI)*_TTP/temp - _BVI)**2)\n reff0 = wair*(_RDRY + _RWAT*(1-a0)/a0\n + _RWAT*(1-a0)/a0*pres/(pres-pv0) * ((_AVI+_BVI)*_TTP/temp - _BVI))\n pvmaxt = pvmax0 * (_TTP/temp)**(ceff0/reff0)\n ginv0 = ceff0/reff0\n r = (_AVI+_BVI)/(ginv0+_BVI) - 1\n v = numpy.log((_TTP/temp)**ginv0 * pvmax0/_PTPE)/(ginv0+_BVI)\n if pvmaxt > _PTPE or v <= r:\n # Parcel is always ice-saturated\n tpot = temp * (ppot/pres)**(reff0/ceff0)\n pv2 = _PTPE*numpy.exp((_AVI+_BVI)*(1 - _TTP/tpot)\n + _BVI*numpy.log(_TTP/tpot))\n apot = (ppot-pv2) / (ppot-pv2 + _EPSW*pv2)\n else:\n # Find where parcel de-saturates\n x = maths4.lamb2(v,r)\n ticl = _TTP/x\n picl = pres * (ticl/temp)**ginv\n if ppot < picl:\n # Parcel ends saturated\n tpot = temp * (ppot/pres)**(reff0/ceff0)\n pv2 = _PTPE*numpy.exp((_AVI+_BVI)*(1 - _TTP/tpot)\n + _BVI*numpy.log(_TTP/tpot))\n apot = (ppot-pv2) / (ppot-pv2 + _EPSW*pv2)\n else:\n # Parcel ends unsaturated\n p1 = picl\n t1 = ticl\n ceff1 = wair*_CDRY + (1-wair)*_CVAP\n reff1 = wair*_RDRY + (1-wair)*_RWAT\n tpot = t1 * (ppot/p1)**(reff1/ceff1)\n apot = wair\n else:\n # Parcel starts unsaturated\n ticl, picl, __ = iceair4a._approx_icl(wair,temp,pres,dhum)\n if ppot < picl:\n # Parcel ends saturated\n p1 = picl\n t1 = ticl\n pv1 = _PTPE*numpy.exp((_AVI+_BVI)*(1 - _TTP/t1)\n + _BVI*numpy.log(_TTP/t1))\n a1 = (p1-pv1) / (p1-pv1 + _EPSW*pv1)\n ceff1 = (wair*_CDRY + (1-wair)*_CVAP\n + (1-wair)*_RWAT*p1/(p1-pv1) * ((_AVI+_BVI)*_TTP/t1 - _BVI)**2)\n reff1 = (wair*_RDRY + (1-wair)*_RWAT\n + (1-wair)*_RWAT*p1/(p1-pv1) * ((_AVI+_BVI)*_TTP/t1 - _BVI))\n tpot = t1 * (ppot/p1)**(reff1/ceff1)\n pv2 = _PTPE*numpy.exp((_AVI+_BVI)*(1 - _TTP/tpot)\n + _BVI*numpy.log(_TTP/tpot))\n apot = (ppot-pv2) / (ppot-pv2 + _EPSW*pv2)\n else:\n # Parcel ends unsaturated\n ceff1 = wair*_CDRY + (1-wair)*_CVAP\n reff1 = wair*_RDRY + (1-wair)*_RWAT\n tpot = temp * (ppot/pres)**(reff1/ceff1)\n apot = wair\n dhpot = ppot/(_RDRY*tpot) / (apot + (1-apot)/_EPSW)\n return apot, tpot, dhpot", "def closest_on_screen_point(trajectory, viewpoint, yaw, gaze_on_screen):\n\n traj_angles = dp.world_to_angles_through_screen(trajectory, viewpoint, yaw) \n #pprint(traj_angles)\n\n #onscreen_idx, dists, *_ = find_closest_index(traj_angles, gaze_on_screen)\n #idx = closest_node(traj_angles, gaze_on_screen)\n idx = find_closest_index(traj_angles, gaze_on_screen)\n # print(idx)\n\n #traj_ref = trajectory[idx, :]\n screen_ref = traj_angles[idx, :]\n world_ref = trajectory[idx, :]\n\n path_dist = ab_path_length(trajectory, viewpoint, world_ref)\n path_dist /= 8.0 #time headway\n\n #plot_traj(screen_ref, gaze_on_screen, traj_angles)\n\n return(idx, screen_ref, world_ref, path_dist)#, traj_angles)", "def position(t):\n return c + tangent_vec * 7 * t ** 2", "def _position_and_velocity(self, jd):\n pos, vel = terra(self.latitude.radians, self.longitude.radians,\n self.elevation.au, jd.gast)\n pos = einsum('ij...,j...->i...', jd.MT, pos)\n vel = einsum('ij...,j...->i...', jd.MT, vel)\n return pos, vel", "def SearchMaxElongation(body, startTime):\n if body == Body.Mercury:\n s1 = 50.0\n s2 = 85.0\n elif body == Body.Venus:\n s1 = 40.0\n s2 = 50.0\n else:\n raise InvalidBodyError()\n syn = _SynodicPeriod(body)\n iter = 1\n while iter <= 2:\n plon = EclipticLongitude(body, startTime)\n elon = EclipticLongitude(Body.Earth, startTime)\n rlon = _LongitudeOffset(plon - elon) # clamp to (-180, +180]\n\n # The slope function is not well-behaved when rlon is near 0 degrees or 180 degrees\n # because there is a cusp there that causes a discontinuity in the derivative.\n # So we need to guard against searching near such times.\n if rlon >= -s1 and rlon < +s1:\n # Seek to the window [+s1, +s2].\n adjust_days = 0.0\n # Search forward for the time t1 when rel lon = +s1.\n rlon_lo = +s1\n # Search forward for the time t2 when rel lon = +s2.\n rlon_hi = +s2\n elif rlon > +s2 or rlon < -s2:\n # Seek to the next search window at [-s2, -s1].\n adjust_days = 0.0\n # Search forward for the time t1 when rel lon = -s2.\n rlon_lo = -s2\n # Search forward for the time t2 when rel lon = -s1.\n rlon_hi = -s1\n elif rlon >= 0.0:\n # rlon must be in the middle of the window [+s1, +s2].\n # Search BACKWARD for the time t1 when rel lon = +s1.\n adjust_days = -syn / 4.0\n rlon_lo = +s1\n rlon_hi = +s2\n # Search forward from t1 to find t2 such that rel lon = +s2.\n else:\n # rlon must be in the middle of the window [-s2, -s1].\n # Search BACKWARD for the time t1 when rel lon = -s2.\n adjust_days = -syn / 4.0\n rlon_lo = -s2\n # Search forward from t1 to find t2 such that rel lon = -s1.\n rlon_hi = -s1\n\n t_start = startTime.AddDays(adjust_days)\n t1 = SearchRelativeLongitude(body, rlon_lo, t_start)\n if t1 is None:\n return None\n\n t2 = SearchRelativeLongitude(body, rlon_hi, t1)\n if t2 is None:\n return None\n\n # Now we have a time range [t1,t2] that brackets a maximum elongation event.\n # Confirm the bracketing.\n m1 = _neg_elong_slope(body, t1)\n if m1 >= 0.0:\n raise InternalError() # there is a bug in the bracketing algorithm!\n\n m2 = _neg_elong_slope(body, t2)\n if m2 <= 0.0:\n raise InternalError() # there is a bug in the bracketing algorithm!\n\n # Use the generic search algorithm to home in on where the slope crosses from negative to positive.\n tx = Search(_neg_elong_slope, body, t1, t2, 10.0)\n if tx is None:\n return None\n\n if tx.tt >= startTime.tt:\n return Elongation(body, tx)\n\n # This event is in the past (earlier than startTime).\n # We need to search forward from t2 to find the next possible window.\n # We never need to search more than twice.\n startTime = t2.AddDays(1.0)\n iter += 1", "def test_locate():\n x, y = hyperbolic_location.locate(-.290955, -.08254229)\n print(x, y)", "def findPathToClosestDot(self, gameState):\n\n # Here are some useful elements of the startState\n # startPosition = gameState.getPacmanPosition()\n # food = gameState.getFood()\n # walls = gameState.getWalls()\n # problem = AnyFoodSearchProblem(gameState)\n\n # *** Your Code Here ***\n problem = AnyFoodSearchProblem(gameState)\n return search.uniformCostSearch(problem)", "def get_closest_waypoint(self, x, y):\n closest_idx = self.waypoint_tree.query([x, y])[1] # ckd tree (1st closest, idx)\n\n # Check if closest waypoint is ahead or behind vehicle\n closest_coord = self.waypoints_2d[closest_idx]\n prev_coord = self.waypoints_2d[closest_idx - 1]\n\n # Equation for hyperplane through closest_coors\n cl_vect = np.array(closest_coord)\n prev_vect = np.array(prev_coord)\n pos_vect = np.array([x, y])\n\n val = np.dot(cl_vect - prev_vect, pos_vect - cl_vect)\n # Car is ahead of the closest waypoint\n if val > 0:\n closest_idx = (closest_idx + 1) % len(self.waypoints_2d)\n\n return closest_idx", "def analyticSol (x):\n\treturn x*(1-x);", "def T_L(Td, taue):\n return np.sqrt(np.pi)/2.0 * taue * np.exp(-(np.pi*taue/(4*Td))*(np.pi*taue/(4*Td)))", "def find_closest_path(self):\n\t\tclosest_distance = sys.maxint\n\t\tclosest_path = 0\n\t\tbike_position = (self.map_model.bike.xB, self.map_model.bike.yB)\n\t\tfor path_index in range(len(self.map_model.paths)):\n\t\t\tnearest_point = geometry.nearest_point_on_path(self.map_model.paths[path_index], bike_position)\n\t\t\tdistance_to_bike = geometry.distance(nearest_point, bike_position)\n\t\t\tif (closest_distance > distance_to_bike):\n\t\t\t\tclosest_distance = distance_to_bike\n\t\t\t\tclosest_path = path_index \n\t\tdisp_next = self.displacement_to_turn(target_path = (closest_path+1)%len(self.map_model.paths))\n\t\ttarget_path = (closest_path+1)%len(self.map_model.paths)\n\t\tdistance_next = geometry.distance_from_path(bike_position, self.map_model.paths[target_path])\n\t\tif disp_next - np.abs(distance_next)>-0.01:\n\t\t\tclosest_path = np.mod(closest_path + 1,len(self.map_model.paths))\n\t\treturn closest_path", "def solveVerticalTrajectory(self,t,T_s,T_i,el,v,coord,alt_sp,v_sp):\n\n bal = sphere_balloon.Sphere_Balloon()\n rad = radiation.Radiation()\n\n T_atm = rad.getTemp(el)\n p_atm = rad.getPressure(el)\n rho_atm = rad.getDensity(el)\n\n rho_int = p_atm/(self.Rsp_air*T_i)\n tm_air = rho_int*self.vol*self.Cp_air0\n\n #Numerically integrate change in Surface Temperature\n coord[\"alt\"] = el\n q_rad = rad.get_rad_total(t,coord)\n q_surf = bal.get_sum_q_surf(q_rad, T_s, el, v)\n q_int = bal.get_sum_q_int(T_s, T_i, el)\n dT_sdt = (q_surf-q_int)/self.k\n\n #Numerically integrate change in Surface Temperature\n tm_air = rho_atm*self.vol*self.Cp_air0\n dT_idt = (q_int-self.get_convection_vent(T_i,el))/tm_air\n\n #Add the new surface and internal Temperatures\n T_s_new = T_s+dT_sdt*self.dt\n T_i_new = T_i+dT_idt*self.dt\n\n #solve for accellration, position, and velocity\n dzdotdt = self.get_acceleration(v,el,T_s,T_i)\n zdot = v + dzdotdt*self.dt\n z = el+zdot*self.dt\n\n #Add the new velocity and position\n if z < self.min_alt:\n v_new = 0\n el_new = self.min_alt\n else:\n v_new = zdot\n el_new = z\n\n # Venting commands for an altitude setpoint. Vent is either on or off.\n if el_new > alt_sp:\n self.mdot = self.vent\n\n if el_new < alt_sp:\n self.mdot = 0\n\n return [T_s_new,T_i_new,T_atm,el_new,v_new, q_rad, q_surf, q_int]", "def get_closest_waypoint_idx(self):\n\n # TODO:\n # The churchlot waypoints are roughly circular but have self-\n # intersecting endpoints, so I'm not sure how this code will \n # yield good results. Might need some additional filtering\n # logic to force a choice consistent with the vehicle pose yaw\n # in order to avoid jumping onto the wrong path.\n\n # Vehicle position short reference\n pos = self.pose.pose.position\n\n # Find the closest waypoint index\n # If closest index is zero bump to 1 since we don't want slice for \n # prev_coord to look at the final map waypoint.\n closest_idx = max(self.waypoint_tree.query([pos.x, pos.y], 1)[1], 1)\n\n # Get closest point\n closest_coord = self.waypoints_2d[closest_idx]\n prev_coord = self.waypoints_2d[closest_idx-1]\n\n # Convert coordinates into 2D numpy vectors\n closest_vec = np.array(closest_coord)\n prev_vec = np.array(prev_coord)\n pos_vec = np.array([pos.x, pos.y])\n\n # Find vec(close-prev) dot vec(pos-close) \n val = np.dot(closest_vec - prev_vec, pos_vec - closest_vec)\n\n # If pos is ahead of closest...\n if val > 0: \n\n # Advance index so that closest is ahead of pos\n closest_idx = (closest_idx + 1) % len(self.waypoints_2d)\n\n # Return closest index\n return closest_idx", "def getLocalPosition(self, x, y, psi):\n PointAndTangent = self.PointAndTangent\n CompletedFlag = 0\n\n\n\n for i in range(0, PointAndTangent.shape[0]):\n if CompletedFlag == 1:\n break\n\n if PointAndTangent[i, 5] == 0.0: # If segment is a straight line\n # Extract the first final and initial point of the segment\n xf = PointAndTangent[i, 0]\n yf = PointAndTangent[i, 1]\n xs = PointAndTangent[i - 1, 0]\n ys = PointAndTangent[i - 1, 1]\n\n psi_unwrap = np.unwrap([PointAndTangent[i - 1, 2], psi])[1]\n epsi = psi_unwrap - PointAndTangent[i - 1, 2]\n\n # Check if on the segment using angles\n if (la.norm(np.array([xs, ys]) - np.array([x, y]))) == 0:\n s = PointAndTangent[i, 3]\n ey = 0\n CompletedFlag = 1\n\n elif (la.norm(np.array([xf, yf]) - np.array([x, y]))) == 0:\n s = PointAndTangent[i, 3] + PointAndTangent[i, 4]\n ey = 0\n CompletedFlag = 1\n else:\n if np.abs(computeAngle( [x,y] , [xs, ys], [xf, yf])) <= np.pi/2 and np.abs(computeAngle( [x,y] , [xf, yf], [xs, ys])) <= np.pi/2:\n v1 = np.array([x,y]) - np.array([xs, ys])\n angle = computeAngle( [xf,yf] , [xs, ys], [x, y])\n s_local = la.norm(v1) * np.cos(angle)\n s = s_local + PointAndTangent[i, 3]\n ey = la.norm(v1) * np.sin(angle)\n\n if np.abs(ey)<= self.halfWidth + self.slack:\n CompletedFlag = 1\n\n else:\n xf = PointAndTangent[i, 0]\n yf = PointAndTangent[i, 1]\n xs = PointAndTangent[i - 1, 0]\n ys = PointAndTangent[i - 1, 1]\n\n r = 1 / PointAndTangent[i, 5] # Extract curvature\n if r >= 0:\n direction = 1\n else:\n direction = -1\n\n ang = PointAndTangent[i - 1, 2] # Extract angle of the tangent at the initial point (i-1)\n\n # Compute the center of the arc\n CenterX = xs + np.abs(r) * np.cos(ang + direction * np.pi / 2) # x coordinate center of circle\n CenterY = ys + np.abs(r) * np.sin(ang + direction * np.pi / 2) # y coordinate center of circle\n\n # Check if on the segment using angles\n if (la.norm(np.array([xs, ys]) - np.array([x, y]))) == 0:\n ey = 0\n psi_unwrap = np.unwrap([ang, psi])[1]\n epsi = psi_unwrap - ang\n s = PointAndTangent[i, 3]\n CompletedFlag = 1\n elif (la.norm(np.array([xf, yf]) - np.array([x, y]))) == 0:\n s = PointAndTangent[i, 3] + PointAndTangent[i, 4]\n ey = 0\n psi_unwrap = np.unwrap([PointAndTangent[i, 2], psi])[1]\n epsi = psi_unwrap - PointAndTangent[i, 2]\n CompletedFlag = 1\n else:\n arc1 = PointAndTangent[i, 4] * PointAndTangent[i, 5]\n arc2 = computeAngle([xs, ys], [CenterX, CenterY], [x, y])\n if np.sign(arc1) == np.sign(arc2) and np.abs(arc1) >= np.abs(arc2):\n v = np.array([x, y]) - np.array([CenterX, CenterY])\n s_local = np.abs(arc2)*np.abs(r)\n s = s_local + PointAndTangent[i, 3]\n ey = -np.sign(direction) * (la.norm(v) - np.abs(r))\n psi_unwrap = np.unwrap([ang + arc2, psi])[1]\n epsi = psi_unwrap - (ang + arc2)\n\n if np.abs(ey) <= self.halfWidth + self.slack: # OUT OF TRACK!!\n CompletedFlag = 1\n\n # if epsi>1.0:\n # print \"epsi Greater then 1.0\"\n # pdb.set_trace()\n\n if CompletedFlag == 0:\n s = 10000\n ey = 10000\n epsi = 10000\n #print \"Error!! POINT OUT OF THE TRACK!!!! <==================\"\n # pdb.set_trace()\n\n return s, ey, epsi, CompletedFlag", "def OrbitPos(self, rv, t, m):\n \n params = np.array(rv)\n params = params.flatten()\n \n def GravityODE(rv,t):\n G = 6.67e-11\n m = 5.972e24\n x = rv[0]\n y = rv[1]\n vx = rv[2]\n vy = rv[3]\n \n dvydt = -((G*m*y)/((x**2+y**2)**(3/2)))\n dvxdt = -((G*m*x)/((x**2+y**2)**(3/2)))\n dxdt = vx\n dydt = vy\n\n pos_derivs = np.array([dxdt,dydt])\n v_deriv = np.array([dvxdt,dvydt])\n derivs = np.hstack((pos_derivs,v_deriv))\n \n return derivs \n \n satellite_orbit = integrate.odeint(GravityODE,params,t)\n \n return satellite_orbit[:,0],satellite_orbit[:,1]", "def extrapolate_littrow_sol(p, loc, ll, iteration=0):\n\n func_name = __NAME__ + '.extrapolate_littrow_sol()'\n # get parameters from p\n fit_degree = p['IC_LITTROW_ORDER_FIT_DEG']\n t_order_start = p['IC_HC_T_ORDER_START']\n n_order_init = p['IC_LITTROW_ORDER_INIT_{0}'.format(iteration)]\n\n # get parameters from loc\n litt_param = loc['LITTROW_PARAM_{0}'.format(iteration)]\n\n # get the dimensions of the data\n ydim, xdim = loc['HCDATA'].shape\n # get the pixel positions\n x_points = np.arange(xdim)\n # construct the Littrow cut points (in pixels)\n x_cut_points = loc['X_CUT_POINTS_{0}'.format(iteration)]\n # construct the Littrow cut points (in wavelength)\n ll_cut_points = ll[n_order_init][x_cut_points]\n\n # set up storage\n littrow_extrap = np.zeros((ydim, len(x_cut_points)), dtype=float)\n littrow_extrap_sol = np.zeros_like(loc['HCDATA'])\n littrow_extrap_param = np.zeros((ydim, fit_degree + 1), dtype=float)\n\n # calculate the echelle order position for this order\n echelle_order_nums = t_order_start - np.arange(ydim)\n # calculate the inverse echelle order nums\n inv_echelle_order_nums = 1.0 / echelle_order_nums\n\n # loop around the x cut points\n for it in range(len(x_cut_points)):\n # evaluate the fit for this x cut (fractional wavelength contrib.)\n cfit = np.polyval(litt_param[it][::-1], inv_echelle_order_nums)\n # evaluate littrow fit for x_cut_points on each order (in wavelength)\n litt_extrap_o = cfit * ll_cut_points[it]\n # add to storage\n littrow_extrap[:, it] = litt_extrap_o\n\n for order_num in range(ydim):\n # fit the littrow extrapolation\n param = nanpolyfit(x_cut_points, littrow_extrap[order_num],\n fit_degree)[::-1]\n # add to storage\n littrow_extrap_param[order_num] = param\n # evaluate the polynomial for all pixels in data\n littrow_extrap_sol[order_num] = np.polyval(param[::-1], x_points)\n\n # add to storage\n loc['LITTROW_EXTRAP_{0}'.format(iteration)] = littrow_extrap\n loc['LITTROW_EXTRAP_SOL_{0}'.format(iteration)] = littrow_extrap_sol\n loc['LITTROW_EXTRAP_PARAM_{0}'.format(iteration)] = littrow_extrap_param\n\n sources = ['LITTROW_EXTRAP_{0}'.format(iteration),\n 'LITTROW_EXTRAP_SOL_{0}'.format(iteration),\n 'LITTROW_EXTRAP_PARAM_{0}'.format(iteration)]\n loc.set_sources(sources, func_name)\n # return loc\n return loc", "def estimate_lwdown(tairK, rh):\n zeroC = 273.15\n\n sat_vapress = 611.2 * np.exp(17.67 * ((tairK - zeroC) / (tairK - 29.65)))\n vapress = np.maximum(5.0, rh) / 100. * sat_vapress\n lw_down = 2.648 * tairK + 0.0346 * vapress - 474.0\n\n return lw_down", "def __get_closest_waypoint_index(self, x, y):\n return self.__waypoint_tree.query([x, y], 1)[1]", "def calc_location_response(longitude, latitude, arms, opening=90.):\n phi = radians(longitude)\n theta = radians(latitude)\n angle = radians(arms)\n op = radians(opening)\n r = 6.4e6\n location = r * xyz(phi, theta)\n r_hat = location / linalg.norm(location)\n # Take North, project onto earth's surface...\n e_n = array([0,0,1])\n e_n = e_n - r_hat * inner(e_n, r_hat)\n # normalize\n e_n = e_n / linalg.norm(e_n)\n # and calculate east\n e_e = cross(e_n, r_hat)\n # Calculate arm vectors\n u_y = e_e * sin(angle) + e_n * cos(angle)\n u_x = e_e * sin(angle + op) + e_n * cos(angle + op)\n response = array(1./2 * (outer(u_x, u_x) - outer(u_y, u_y)), dtype=float32)\n return location, response", "def test_lfc_pos_area_below_lcl():\n p = [902.1554, 897.9034, 893.6506, 889.4047, 883.063, 874.6284, 866.2387, 857.887,\n 849.5506, 841.2686, 833.0042, 824.7891, 812.5049, 796.2104, 776.0027, 751.9025,\n 727.9612, 704.1409, 680.4028, 656.7156, 629.077, 597.4286, 565.6315, 533.5961,\n 501.2452, 468.493, 435.2486, 401.4239, 366.9387, 331.7026, 295.6319, 258.6428,\n 220.9178, 182.9384, 144.959, 106.9778, 69.00213] * units.hPa\n t = [-3.039381, -3.703779, -4.15996, -4.562574, -5.131827, -5.856229, -6.568434,\n -7.276881, -7.985013, -8.670911, -8.958063, -7.631381, -6.05927, -5.083627,\n -5.11576, -5.687552, -5.453021, -4.981445, -5.236665, -6.324916, -8.434324,\n -11.58795, -14.99297, -18.45947, -21.92021, -25.40522, -28.914, -32.78637,\n -37.7179, -43.56836, -49.61077, -54.24449, -56.16666, -57.03775, -58.28041,\n -60.86264, -64.21677] * units.degC\n td = [-22.08774, -22.18181, -22.2508, -22.31323, -22.4024, -22.51582, -22.62526,\n -22.72919, -22.82095, -22.86173, -22.49489, -21.66936, -21.67332, -21.94054,\n -23.63561, -27.17466, -31.87395, -38.31725, -44.54717, -46.99218, -43.17544,\n -37.40019, -34.3351, -36.42896, -42.1396, -46.95909, -49.36232, -48.94634,\n -47.90178, -49.97902, -55.02753, -63.06276, -72.53742, -88.81377, -93.54573,\n -92.92464, -91.57479] * units.degC\n prof = parcel_profile(p, t[0], td[0]).to('degC')\n lfc_p, lfc_t = lfc(p, t, td, prof)\n assert_nan(lfc_p, p.units)\n assert_nan(lfc_t, t.units)", "def get_probe_location(self):\n\n probe_x, probe_y = self.position\n\n if self.previous_direction == (1, 0):\n probe_x += CAR_LENGTH - 1\n elif self.previous_direction == (0, 1):\n probe_y += CAR_LENGTH - 1\n\n return probe_x, probe_y", "def get_wind(self, time, alt, lat, lng, pressure_heights=None):\n t_val = time / 3.0\n t_idx = int(t_val)\n t_lerp = t_val - t_idx\n t_lerp_m = 1.0 - t_lerp\n \n lat_val = (lat + 90.0) * 2.0\n lat_idx = int(lat_val)\n lat_lerp = lat_val - lat_idx\n lat_lerp_m = 1.0 - lat_lerp\n\n lng_val = lng * 2.0\n lng_idx = int(lng_val)\n lng_lerp = lng_val - lng_idx\n lng_lerp_m = 1.0 - lng_lerp\n \n if pressure_heights is None:\n pressure_heights = self.get_pressure_heights(time, lat, lng)\n\n p_idx = bisect.bisect(pressure_heights, alt) - 1\n\n if p_idx < 0:\n p_idx = 0\n elif p_idx > self.shape[1] - 1:\n p_idx = self.shape[1] - 2\n\n a_llll = self._read_var(t_idx, p_idx, 0, lat_idx, lng_idx)\n a_lllr = self._read_var(t_idx, p_idx, 0, lat_idx, lng_idx + 1)\n a_llrl = self._read_var(t_idx, p_idx, 0, lat_idx + 1, lng_idx)\n a_llrr = self._read_var(t_idx, p_idx, 0, lat_idx + 1, lng_idx + 1)\n a_lrll = self._read_var(t_idx, p_idx + 1, 0, lat_idx, lng_idx)\n a_lrlr = self._read_var(t_idx, p_idx + 1, 0, lat_idx, lng_idx + 1)\n a_lrrl = self._read_var(t_idx, p_idx + 1, 0, lat_idx + 1, lng_idx)\n a_lrrr = self._read_var(t_idx, p_idx + 1, 0, lat_idx + 1, lng_idx + 1)\n a_rlll = self._read_var(t_idx + 1, p_idx, 0, lat_idx, lng_idx)\n a_rllr = self._read_var(t_idx + 1, p_idx, 0, lat_idx, lng_idx + 1)\n a_rlrl = self._read_var(t_idx + 1, p_idx, 0, lat_idx + 1, lng_idx)\n a_rlrr = self._read_var(t_idx + 1, p_idx, 0, lat_idx + 1, lng_idx + 1)\n a_rrll = self._read_var(t_idx + 1, p_idx + 1, 0, lat_idx, lng_idx)\n a_rrlr = self._read_var(t_idx + 1, p_idx + 1, 0, lat_idx, lng_idx + 1)\n a_rrrl = self._read_var(t_idx + 1, p_idx + 1, 0, lat_idx + 1, lng_idx)\n a_rrrr = self._read_var(t_idx + 1, p_idx + 1, 0, lat_idx + 1,\n lng_idx + 1)\n\n u_llll = self._read_var(t_idx, p_idx, 1, lat_idx, lng_idx)\n u_lllr = self._read_var(t_idx, p_idx, 1, lat_idx, lng_idx + 1)\n u_llrl = self._read_var(t_idx, p_idx, 1, lat_idx + 1, lng_idx)\n u_llrr = self._read_var(t_idx, p_idx, 1, lat_idx + 1, lng_idx + 1)\n u_lrll = self._read_var(t_idx, p_idx + 1, 1, lat_idx, lng_idx)\n u_lrlr = self._read_var(t_idx, p_idx + 1, 1, lat_idx, lng_idx + 1)\n u_lrrl = self._read_var(t_idx, p_idx + 1, 1, lat_idx + 1, lng_idx)\n u_lrrr = self._read_var(t_idx, p_idx + 1, 1, lat_idx + 1, lng_idx + 1)\n u_rlll = self._read_var(t_idx + 1, p_idx, 1, lat_idx, lng_idx)\n u_rllr = self._read_var(t_idx + 1, p_idx, 1, lat_idx, lng_idx + 1)\n u_rlrl = self._read_var(t_idx + 1, p_idx, 1, lat_idx + 1, lng_idx)\n u_rlrr = self._read_var(t_idx + 1, p_idx, 1, lat_idx + 1, lng_idx + 1)\n u_rrll = self._read_var(t_idx + 1, p_idx + 1, 1, lat_idx, lng_idx)\n u_rrlr = self._read_var(t_idx + 1, p_idx + 1, 1, lat_idx, lng_idx + 1)\n u_rrrl = self._read_var(t_idx + 1, p_idx + 1, 1, lat_idx + 1, lng_idx)\n u_rrrr = self._read_var(t_idx + 1, p_idx + 1, 1, lat_idx + 1,\n lng_idx + 1)\n\n v_llll = self._read_var(t_idx, p_idx, 2, lat_idx, lng_idx)\n v_lllr = self._read_var(t_idx, p_idx, 2, lat_idx, lng_idx + 1)\n v_llrl = self._read_var(t_idx, p_idx, 2, lat_idx + 1, lng_idx)\n v_llrr = self._read_var(t_idx, p_idx, 2, lat_idx + 1, lng_idx + 1)\n v_lrll = self._read_var(t_idx, p_idx + 1, 2, lat_idx, lng_idx)\n v_lrlr = self._read_var(t_idx, p_idx + 1, 2, lat_idx, lng_idx + 1)\n v_lrrl = self._read_var(t_idx, p_idx + 1, 2, lat_idx + 1, lng_idx)\n v_lrrr = self._read_var(t_idx, p_idx + 1, 2, lat_idx + 1, lng_idx + 1)\n v_rlll = self._read_var(t_idx + 1, p_idx, 2, lat_idx, lng_idx)\n v_rllr = self._read_var(t_idx + 1, p_idx, 2, lat_idx, lng_idx + 1)\n v_rlrl = self._read_var(t_idx + 1, p_idx, 2, lat_idx + 1, lng_idx)\n v_rlrr = self._read_var(t_idx + 1, p_idx, 2, lat_idx + 1, lng_idx + 1)\n v_rrll = self._read_var(t_idx + 1, p_idx + 1, 2, lat_idx, lng_idx)\n v_rrlr = self._read_var(t_idx + 1, p_idx + 1, 2, lat_idx, lng_idx + 1)\n v_rrrl = self._read_var(t_idx + 1, p_idx + 1, 2, lat_idx + 1, lng_idx)\n v_rrrr = self._read_var(t_idx + 1, p_idx + 1, 2, lat_idx + 1,\n lng_idx + 1)\n\n a_lll = a_llll * t_lerp_m + a_rlll * t_lerp\n a_llr = a_lllr * t_lerp_m + a_rllr * t_lerp\n a_lrl = a_llrl * t_lerp_m + a_rlrl * t_lerp\n a_lrr = a_llrr * t_lerp_m + a_rlrr * t_lerp\n a_rll = a_lrll * t_lerp_m + a_rrll * t_lerp\n a_rlr = a_lrlr * t_lerp_m + a_rrlr * t_lerp\n a_rrl = a_lrrl * t_lerp_m + a_rrrl * t_lerp\n a_rrr = a_lrrr * t_lerp_m + a_rrrr * t_lerp\n\n u_lll = u_llll * t_lerp_m + u_rlll * t_lerp\n u_llr = u_lllr * t_lerp_m + u_rllr * t_lerp\n u_lrl = u_llrl * t_lerp_m + u_rlrl * t_lerp\n u_lrr = u_llrr * t_lerp_m + u_rlrr * t_lerp\n u_rll = u_lrll * t_lerp_m + u_rrll * t_lerp\n u_rlr = u_lrlr * t_lerp_m + u_rrlr * t_lerp\n u_rrl = u_lrrl * t_lerp_m + u_rrrl * t_lerp\n u_rrr = u_lrrr * t_lerp_m + u_rrrr * t_lerp\n\n v_lll = v_llll * t_lerp_m + v_rlll * t_lerp\n v_llr = v_lllr * t_lerp_m + v_rllr * t_lerp\n v_lrl = v_llrl * t_lerp_m + v_rlrl * t_lerp\n v_lrr = v_llrr * t_lerp_m + v_rlrr * t_lerp\n v_rll = v_lrll * t_lerp_m + v_rrll * t_lerp\n v_rlr = v_lrlr * t_lerp_m + v_rrlr * t_lerp\n v_rrl = v_lrrl * t_lerp_m + v_rrrl * t_lerp\n v_rrr = v_lrrr * t_lerp_m + v_rrrr * t_lerp\n\n a_ll = a_lll * lat_lerp_m + a_lrl * lat_lerp\n a_lr = a_llr * lat_lerp_m + a_lrr * lat_lerp\n a_rl = a_rll * lat_lerp_m + a_rrl * lat_lerp\n a_rr = a_rlr * lat_lerp_m + a_rrr * lat_lerp\n\n u_ll = u_lll * lat_lerp_m + u_lrl * lat_lerp\n u_lr = u_llr * lat_lerp_m + u_lrr * lat_lerp\n u_rl = u_rll * lat_lerp_m + u_rrl * lat_lerp\n u_rr = u_rlr * lat_lerp_m + u_rrr * lat_lerp\n\n v_ll = v_lll * lat_lerp_m + v_lrl * lat_lerp\n v_lr = v_llr * lat_lerp_m + v_lrr * lat_lerp\n v_rl = v_rll * lat_lerp_m + v_rrl * lat_lerp\n v_rr = v_rlr * lat_lerp_m + v_rrr * lat_lerp\n\n a_l = a_ll * lng_lerp_m + a_lr * lng_lerp\n a_r = a_rl * lng_lerp_m + a_rr * lng_lerp\n\n u_l = u_ll * lng_lerp_m + u_lr * lng_lerp\n u_r = u_rl * lng_lerp_m + u_rr * lng_lerp\n\n v_l = v_ll * lng_lerp_m + v_lr * lng_lerp\n v_r = v_rl * lng_lerp_m + v_rr * lng_lerp\n\n p_lerp = ((alt - a_l) / (a_r - a_l))\n p_lerp_m = 1.0 - p_lerp\n\n u = u_l * p_lerp_m + u_r * p_lerp\n v = v_l * p_lerp_m + v_r * p_lerp\n\n return u, v", "def work_dos():\n #potential = 2x**2+x**2y+y**2\n x1,y1 = (2, -3)\n x2,y2 = (-1, 2)\n p1 = (2*(x1**2)) + ((x1**2)*y1) + (y1**2)\n p2 = (2*(x2**2)) + ((x2**2)*y2) + (y2**2)\n sol = p1 - p2\n sol = abs(sol)\n print(f'The vector field F=(4x+2xy,x2+2y) \\n'\n 'along the curve C parametrized by r(t)=(3t−1,−5t+2) \\n '\n f'for 0 ≤ t ≤ 1 is: {sol}')", "def _GetHorizonAnglesLegacy(its_elev, height_cbsd, height_rx, refractivity):\n num_points = int(its_elev[0])\n step = its_elev[1]\n dist = num_points * step\n\n # Find the refractivity at the average terrain height\n start_avg = int(3.0 + 0.1 * num_points)\n end_avg = num_points - start_avg + 6\n zsys = np.mean(its_elev[start_avg-1:end_avg])\n refractivity *= np.exp(-zsys/9460.0)\n\n # Find the ray down-curvature per meter\n gma = 157e-9\n gme = gma*(1.0 - 0.04665 * np.exp(refractivity/179.3))\n\n alt_cbsd = its_elev[2] + height_cbsd\n alt_rx = its_elev[num_points+2] + height_rx\n qc = 0.5 * gme\n q = qc * dist\n # theta0 and theta1 the slopes, dl0 and dl1 the horizon distances\n theta1 = (alt_rx - alt_cbsd) / dist\n theta0 = theta1 - q\n theta1 = -theta1 - q\n dl0 = dist\n dl1 = dist\n\n if num_points >= 2:\n sa = 0.0\n sb = dist\n wq = True\n for i in range(1, num_points):\n sa += step\n sb -= step\n q = its_elev[i+2] - (qc*sa + theta0) * sa - alt_cbsd\n if q > 0.0:\n theta0 += q/sa\n dl0 = sa\n wq = False\n if not wq:\n q = its_elev[i+2] - (qc*sb + theta1) * sb - alt_rx\n if q > 0.0:\n theta1 += q/sb\n dl1 = sb\n\n return (np.arctan(theta0) * 180/np.pi,\n np.arctan(theta1) * 180/np.pi,\n dl0,\n dl1)", "def deriv_angvel(self, t: float,endBehavior: str = 'halt') -> Vector3:\n cw = GeodesicTrajectory.deriv(self,t,endBehavior)\n return so3.deskew(cw)", "def findPathToClosestDot(self, gameState):\n # Here are some useful elements of the startState\n startPosition = gameState.getPacmanPosition(self.index)\n food = gameState.getFood()\n walls = gameState.getWalls()\n problem = AnyFoodSearchProblem(gameState, self.index)\n\n return search.breadthFirstSearch(problem)\n util.raiseNotDefined()", "def get_point_on_rhumb_line(self, distance, bearing):\n\n # bearing in assumed to be in degrees\n bearing_in_radians = math.radians(bearing)\n # ad is the angular distance (in radians) traveled.\n ad = distance/A_WGS84\n # dlat is the net change in latitude\n dlat = ad*math.cos(bearing_in_radians)\n # lat1 is the latitude of the starting point in radians.\n lat1 = math.radians(self.lat)\n # lng1 is the longitude of the starting point in radians.\n lng1 = math.radians(self.lng)\n # The new latitude, in radians, is the starting latitude plus the change in latitude\n lat2 = lat1 + dlat\n \n dPhi = math.log(math.tan(lat2/2+math.pi/4)/math.tan(lat1/2+math.pi/4))\n if dPhi == 0:\n q = math.cos(lat1)\n else:\n q = dlat/dPhi\n # The change in longitude\n dlng = ad*math.sin(bearing_in_radians)/q\n # If the latitude happens to go beyond the pole\n if math.fabs(lat2) > math.pi/2:\n # if the latitude is beyond 90 degrees north\n if lat2>0:\n # latitude should be 90 degree north\n lat2 = math.pi/2\n else:\n # latidude is beyond 90 degree south\n # latitude should be 90 degrees south\n lat2 = -1*math.pi/2\n # The new longitude, in radians \n lng2 = (lng1+dlng+math.pi)%(2*math.pi)-math.pi\n \n lng2d = math.degrees(lng2)\n lat2d = math.degrees(lat2)\n return Point(float(truncate(lng2d,DEGREE_DIGITS)), float(truncate(lat2d,DEGREE_DIGITS)))", "def get_linear_track_pos(self):\r\n return self._arm.get_linear_track_pos()", "def _calc_delta_theta(self):\n\n # Difference between the vehicle angle and the trajectory angle\n next_index = self.index + 5\n\n while next_index >= len(self.x_trajectory):\n next_index = next_index - 1\n\n self.trajec_angle = math.atan2((self.y_trajectory[next_index]\n - self.y_trajectory[self.index]),\n (self.x_trajectory[next_index]\n - self.x_trajectory[self.index]))\n # to set trajec_angle between [0,2pi]\n if self.trajec_angle < 0:\n self.trajec_angle = math.pi + self.trajec_angle + math.pi\n\n self.delta_theta = self.trajec_angle - self.theta\n # if the difference is bigger than 180 is because\n # someone went throug a lap\n\n if self.delta_theta > math.pi:\n self.delta_theta = self.delta_theta - 2 * math.pi\n\n if self.delta_theta < -math.pi:\n self.delta_theta = self.delta_theta + 2 * math.pi\n\n return self.delta_theta", "def calculate_target_location(self, alphas, epsilons, data_collected):\n if len(alphas) == 1:\n \tfor i in range(0, self.number_sampling_points-1):\n \t\talphas.append(alphas[0])\n \t\tepsilons.append(epsilons[0])\n\n # if self.target_location == None:\n # # dBm_list = []\n # # for sample in data_collected[0][3]:\n # # dBm_list.append(sample)\n\n # # average_dBm = sum(dBm_list) / float(len(dBm_list))\n # # radius_target_position = 10 ** ((average_dBm - self.epsilon) / self.alpha)\n # # ###TODO: fix radius_target_position\n # # if radius_target_position > self.altitude:\n # # horizontal_distance = sqrt((radius_target_position**2) - (self.altitude**2))\n # # else:\n # # horizontal_distance = 0\n\n # local_position = self.dc.read_gps()\n # local_coord = Coordinate(local_position.lat, local_position.lon)\n\n # first_emulated_target = local_coord.offset_toward_target(self.region.center(), DISTANCE_TO_TARGET)\n\n # self.log.debug('=========================================================================')\n # self.log.debug('Calculated emulated target at location: {}'.format(first_emulated_target))\n # self.log.debug('=========================================================================')\n\n # return first_emulated_target\n\n # else:\n prediction = predict(dronenum=self.number_sampling_points,\n maxRun=1,\n numIterations=GDParameters.NUM_ITERATIONS,\n numEpoch=GDParameters.NUM_EPOCH,\n threshold=GDParameters.THRESHOLD,\n learning_rate=GDParameters.LEARNING_RATE,\n numberBatch=1,\n data_length=NUMBER_SAMPLES*self.number_sampling_points)\n\n try:\n target = prediction.swarm(drone_data=data_collected,\n alphas=alphas,\n epsilons=epsilons)\n except IndexError:\n self.log.warn('Target localization failed. Data not good enough.')\n return False\n\n computed_target_position = Coordinate(target[0], target[1])\n\n self.log.debug('=========================================================================')\n self.log.debug('Calculated new target at location: {}'.format(computed_target_position))\n\n if IS_SIMULATION:\n error = computed_target_position.distance_to(self.current_simulated_target)\n self.log.debug('Simulated error: {err}, Simulated target has moved {dist} meters to: {loc}'.format(\n err=error,\n dist=self.target_meters_moved,\n loc=self.current_simulated_target\n ))\n self.log.debug('=========================================================================')\n\n if not self.region.contains(computed_target_position) and not IS_SIMULATION:\n self.log.debug('New target is out of region')\n self.log.debug('Setting new target location as the latest one calculated')\n return self.target_location\n\n return computed_target_position", "def g(point, contact_point, force_direction, ball_loc, t):\n # line equation = ball_loc + t*direction\n # distance to the ooi\n #distance = ( np.linalg.norm( np.cross((ball_loc[:2] - point[:2]), force_direction[:2], 0, 0) ) / \n # np.linalg.norm(force_direction[:2]))\n direction = force_direction\n force_direction = force_direction + contact_point\n print force_direction\n distance = np.linalg.norm(np.cross(point[:2] - contact_point[:2], point[:2] -\n force_direction[:2], 0 , 0)) / np.linalg.norm(abs(force_direction[:2] -\n contact_point[:2]))\n #the smaller the distance, the bigger the number\n distance = 100 / distance\n\n global accuracy_point \n accuracy_point= accuracy_point + [distance]\n\n retract_distance_x = math.sqrt(np.vdot(contact_point[0] - point[0],\n contact_point[0] - point[0]))\n retract_distance_y = math.sqrt(np.vdot(contact_point[1] - point[1],\n contact_point[1] - point[1]))\n retract_distance_z = math.sqrt(np.vdot(contact_point[2] - point[2], contact_point[2] - point[2]))\n global xy\n xy = xy + [retract_distance_x + retract_distance_y]\n global z \n z = z + [retract_distance_z * 0.3]\n\n retract_distance = 0\n # the retraction distance gets favored in the x and y directions\n retract_distance = (direction[0] * retract_distance_x +\n direction[1] *\n retract_distance_y + 0.3 * retract_distance_z)\n #force_direction[1] * retract_distance_y + force_direction[2] * retract_distance_z)\n global distance_point \n print retract_distance\n distance_point = distance_point + [np.ndarray.tolist(retract_distance)[0][0]]\n return (retract_distance, distance)", "def closest_interpolated_distance(start_log, end_log, waypoint, utm):\n # If no provided end, use start distance.\n if end_log is None:\n return start_log.uas_position.distance_to(waypoint.position)\n\n # Verify that aircraft velocity is within bounds. Don't interpolate if\n # it isn't because the data is probably erroneous.\n d = start_log.uas_position.distance_to(end_log.uas_position)\n t = (end_log.timestamp - start_log.timestamp).total_seconds()\n if (t > settings.MAX_TELMETRY_INTERPOLATE_INTERVAL_SEC or\n (d / t) > settings.MAX_AIRSPEED_FT_PER_SEC):\n return start_log.uas_position.distance_to(waypoint.position)\n\n def uas_position_to_tuple(pos):\n return (pos.gps_position.latitude, pos.gps_position.longitude,\n pos.altitude_msl)\n\n # Linearly interpolate between start and end telemetry and find the\n # closest distance to the waypoint.\n start = uas_position_to_tuple(start_log.uas_position)\n end = uas_position_to_tuple(end_log.uas_position)\n point = uas_position_to_tuple(waypoint.position)\n return distance.distance_to_line(start, end, point, utm)", "def _phi2psi(self):\n try:\n locq = self.param_q(self.rhotor)\n except:\n self._readeqdsk(self.shot)\n locq = self.param_q(self.rhotor)\n \n locphi = self.rhotor**2\n psi = integrate.cumtrapz(1/locq,locphi)\n psi = np.concatenate([[0], psi])\n psi = psi/max(psi)\n self.param_psi = interpolate.interp1d(self.rhotor, psi) \n \n\n # tmpnum=100000\n # locq = self.param_q(np.linspace(0,1,tmpnum)) #augmenting precision near the core\n # locphi = self.rhotor**2\n # locphi_p = interpolate.interp1d(np.linspace(0,1,len(locphi)),locphi)\n # locphi = locphi_p(np.linspace(0,1,tmpnum))\n # psi = integrate.cumtrapz(1/locq,locphi)\n # psi = np.concatenate([[0], psi])\n # psi = psi/max(psi)\n # rhopsi = psi**0.5\n # self.param_psi = interpolate.interp1d(np.linspace(0,1,tmpnum), rhopsi)", "def pos(self, time):\n if (time < self.ti):\n t = 0\n elif (time > self.tf):\n t = self.tf - self.ti\n else:\n t = time - self.ti\n return self.a0 + self.a1 * t + self.a2 * pow(t, 2) + self.a3 * pow(t, 3) + self.a4 * pow(t, 4) + self.a5 * pow(t, 5)", "def track_closest_aircraft(latitude, longitude, elevation, host, port, q):\n try:\n d = Dump1090(host, port)\n q.put(True)\n except IOError as exc:\n q.put(exc)\n return\n\n target = None\n target_distance = None\n aad = AzimuthAltitudeDistance(latitude, longitude, elevation)\n\n try:\n for aircraft in d.updates():\n lat, lng = aircraft.position\n azimuth, altitude, distance = aad.calculate(\n lat,\n lng,\n aircraft.altitude\n )\n\n # if we don't have a target we do now\n # or target is old, then use this new aircraft\n # or new aircraft isn't the target, but it is closer, so we switch!\n if (target is None or target.age > 60 or\n (target.icao != aircraft.icao and distance < target_distance)):\n target = aircraft\n\n # if we aren't the target at this point then bail\n if target.icao != aircraft.icao:\n continue\n\n target = aircraft\n target_distance = distance\n q.put((target, azimuth, altitude, distance))\n except KeyboardInterrupt:\n pass", "def _hill_diff_diff(self, position):\n if position < 0:\n return 2\n else:\n return position * ((75 * (position ** 2)/((1 + 5 * position**2)**2.5)) - 5/((1 + 5 * position ** 2)**2.5)) \\\n - 10 * position/((1 + 5 * position ** 2)**1.5)", "def solar_angles(df, lat, lon, alt=0):\n\n jd = pd.Timestamp(df).to_julian_date()\n\n # offset (2451543.5)\n d_offset = pd.Timestamp('1999-12-31 00:00:00').to_julian_date()\n\n d = jd - d_offset\n\n\n # Keplerian elements for the sun (geocentric)\n w = 282.9404 + 4.70935E-5 * d # longitude of perihelion [degrees]\n a = 1.0 # mean distance [AU]\n e = 0.016709 - 1.151E-9 * d # eccentricity [-]\n M = np.mod(356.0470 + 0.9856002585 * d, 360.0) # mean anomaly [degrees]\n L = w + M # Sun's mean longitude [degrees]\n oblecl = 23.4393 - 3.563E-7 * d # Sun's obliquity of the eliptic [degrees]\n\n # Auxiliary angle [degrees]\n E = M + (180.0 / np.pi) * e * np.sin(np.deg2rad(M)) * (1.0 + e * np.cos(np.deg2rad(M)))\n\n # Rectangular coordinates in the plane of the ecliptic (x-axis toward perihelion)\n x = np.cos(np.deg2rad(E)) - e\n y = np.sin(np.deg2rad(E)) * np.sqrt(1 - (e ** 2))\n\n # Distance (r) and true anomaly (v)\n r = np.sqrt((x ** 2) + (y ** 2))\n v = np.rad2deg(np.arctan2(y, x))\n\n # Longitude of the sun\n lon_sun = v + w\n\n # Ecliptic rectangular coordinates\n xeclip = r * np.cos(np.deg2rad(lon_sun))\n yeclip = r * np.sin(np.deg2rad(lon_sun))\n zeclip = 0.0\n\n # Rotate coordinates to equatorial rectangular coordinates\n xequat = xeclip\n yequat = yeclip * np.cos(np.deg2rad(oblecl)) + zeclip * np.sin(np.deg2rad(oblecl))\n zequat = yeclip * np.sin(np.deg2rad(23.4406)) + zeclip * np.cos(np.deg2rad(oblecl))\n\n # Convert equatorial rectangular coordinates to right-ascension (RA) and declination\n r = np.sqrt(xequat ** 2 + yequat ** 2 + zequat ** 2) - (alt / 149598000.0)\n RA = np.rad2deg(np.arctan2(yequat, xequat))\n delta = np.rad2deg(np.arcsin(zequat / r))\n\n # Calculate local siderial time\n uth = df.hour + (df.minute / 60.0) + (df.second / 3600.0)\n gmst0 = np.mod(L + 180.0, 360.0) / 15.0\n sidtime = gmst0 + uth + (lon / 15.0)\n\n # Replace RA with hour-angle (HA)\n HA = sidtime * 15.0 - RA\n\n # Convert to rectangular coordinates\n x = np.cos(np.deg2rad(HA)) * np.cos(np.deg2rad(delta))\n y = np.sin(np.deg2rad(HA)) * np.cos(np.deg2rad(delta))\n z = np.sin(np.deg2rad(delta))\n\n # Rotate along an axis going East-West\n xhor = x * np.cos(np.deg2rad(90.0 - lat)) - z * np.sin(np.deg2rad(90.0 - lat))\n yhor = y\n zhor = x * np.sin(np.deg2rad(90.0 - lat)) + z * np.cos(np.deg2rad(90.0 - lat))\n\n # Find azimuthal and elevation angles\n azimuthal = np.rad2deg(np.arctan2(yhor, xhor)) + 180.0\n elevation = np.rad2deg(np.arcsin(zhor))\n\n zenith = 90.0 - elevation\n\n return np.column_stack((zenith, elevation, azimuthal))", "def target_position(self, time):\n # get joint positions and use fk to get end effector position?\n # ar_tag from topic\n\n cur_pos = self.target_velocity(time)*time + self.start_pos\n\n self.points_generated.append(cur_pos)\n #print(self.start_pos)\n # print(cur_pos)\n return cur_pos", "def Findlt(l,sp,rhs):\n m = sp.M(l)\n return (m / l**3) - rhs", "def getOrdinate(self):\n return self.point.y - self.slope * self.point.x", "def ecliptic_position(self):\n vector = _ECLIPJ2000.dot(self.position.au)\n return Distance(vector)", "def _plunge_distance(self, volume):\n percent = self._volume_percentage(volume)\n top = self._get_plunger_position('top')\n bottom = self._get_plunger_position('bottom')\n travel = bottom - top\n if travel <= 0:\n self.robot.add_warning('Plunger calibrated incorrectly')\n return travel * percent", "def getPointAwayFrom(startPoint, direction, distance):\n x = vectorMultiply(direction, distance)\n return vectorAdd(startPoint, x)", "def findPotential(L, boundaryConditions, Minv = None):\n\tX = findStableState(L, boundaryConditions, Minv)\n\treturn np.trace(X.T.dot(L).dot(X))", "def FindClosestPointWithinRadius(self, p_float, , p_float_4):\n ...", "def _get_next_position(x: float, y: float, heading: float, state: str, hp_info:\n pd.DataFrame, rw_info: pd.DataFrame, ac: int,\n CURR_LANDING_AC) -> Tuple[float, float, float, str]:\n\n if state == \"A\":\n\n radius = np.sqrt(x ** 2 + y ** 2)\n\n min_R = CONTROL_ZONE_RADIUS - MIN_SEPARATION - POSIITION_TOLERANCE\n max_R = CONTROL_ZONE_RADIUS - MIN_SEPARATION + POSIITION_TOLERANCE\n\n if (min_R < radius) | (radius < max_R):\n\n hp_ind = _get_closest_control_zone(x, y, hp_info)\n\n if hp_info[2][hp_ind] == 0:\n\n state_new = \"C\"\n heading_new = _get_ac_heading(hp_info[0][hp_ind] - x, hp_info[1][hp_ind] - y)\n\n else:\n\n state_new = \"B\"\n heading_new = (hp_info[2][hp_ind] + np.pi / 2) % (2 * np.pi)\n\n else:\n\n state_new = \"A\"\n heading_new = heading\n\n x_new = x + MAX_SPEED * np.sin(heading_new) / TIME_STEP_FREQUENCY\n y_new = y + MAX_SPEED * np.cos(heading_new) / TIME_STEP_FREQUENCY\n\n elif state == \"B\":\n\n hp_ind = _get_closest_control_zone(x, y, hp_info)\n\n if hp_info[2][hp_ind] == 0:\n\n state_new = \"C\"\n heading_new = _get_ac_heading(hp_info[0][hp_ind] - x, hp_info[1][hp_ind] - y)\n\n else:\n\n state_new = \"B\"\n heading_new = heading - MAX_SPEED / (TIME_STEP_FREQUENCY * (CONTROL_ZONE_RADIUS - MIN_SEPARATION))\n\n x_new = x + MAX_SPEED * np.sin(heading_new) / TIME_STEP_FREQUENCY\n y_new = y + MAX_SPEED * np.cos(heading_new) / TIME_STEP_FREQUENCY\n\n elif state == \"C\":\n\n hp_ind = _get_closest_control_zone(x, y, hp_info)\n dist = np.sqrt((hp_info[0][hp_ind] - x) ** 2 + (hp_info[1][hp_ind] - y) ** 2)\n\n if dist < POSIITION_TOLERANCE + 1:\n\n state_new = \"D\"\n heading_new = heading\n\n x_new = x\n y_new = y\n\n else:\n\n state_new = \"C\"\n heading_new = heading\n\n x_new = x + MAX_SPEED * np.sin(heading_new) / TIME_STEP_FREQUENCY\n y_new = y + MAX_SPEED * np.cos(heading_new) / TIME_STEP_FREQUENCY\n\n elif state == \"D\":\n\n if ac == CURR_LANDING_AC:\n\n row_ind, x_ind, y_ind = _get_closest_threshold(x, y, rw_info)\n\n state_new = \"E\"\n heading_new = _get_ac_heading(rw_info[x_ind][row_ind] - x, rw_info[y_ind][row_ind] - y)\n\n x_new = x + MAX_SPEED * np.sin(heading_new) / TIME_STEP_FREQUENCY\n y_new = y + MAX_SPEED * np.cos(heading_new) / TIME_STEP_FREQUENCY\n\n else:\n\n state_new = \"D\"\n heading_new = heading\n\n x_new = x\n y_new = y\n\n elif state == \"E\":\n\n row_ind, x_ind, y_ind = _get_closest_threshold(x, y, rw_info)\n dist = np.sqrt((rw_info[x_ind][row_ind] - x) ** 2 + (rw_info[y_ind][row_ind] - y) ** 2)\n\n if (dist < MIN_SEPARATION) | (CURR_LANDING_AC == ac):\n\n x_ind = 0 if x_ind == 2 else 2\n y_ind = 1 if y_ind == 3 else 3\n\n CURR_LANDING_AC += 1\n\n state_new = \"F\"\n heading_new = _get_ac_heading(rw_info[x_ind][row_ind] - x, rw_info[y_ind][row_ind] - y)\n\n x_new = x + MAX_SPEED * np.sin(heading_new) / TIME_STEP_FREQUENCY\n y_new = y + MAX_SPEED * np.cos(heading_new) / TIME_STEP_FREQUENCY\n\n else:\n\n state_new = \"E\"\n heading_new = heading\n\n x_new = x + MAX_SPEED * np.sin(heading_new) / TIME_STEP_FREQUENCY\n y_new = y + MAX_SPEED * np.cos(heading_new) / TIME_STEP_FREQUENCY\n\n elif state == \"F\":\n\n row_ind, x_ind, y_ind = _get_closest_threshold(x, y, rw_info)\n dist = np.sqrt((rw_info[x_ind][row_ind] - x) ** 2 + (rw_info[y_ind][row_ind] - y) ** 2)\n\n if abs(dist - RUNWAY_LENGTH / 2) < POSIITION_TOLERANCE:\n x_new, y_new, heading_new, state_new = -1, -1, -1, \"END\"\n\n else:\n\n state_new = \"F\"\n heading_new = heading\n\n x_new = x + MAX_SPEED * np.sin(heading_new) / TIME_STEP_FREQUENCY\n y_new = y + MAX_SPEED * np.cos(heading_new) / TIME_STEP_FREQUENCY\n\n else:\n\n x_new, y_new, heading_new, state_new = -1, -1, -1, \"END\"\n\n return x_new, y_new, heading_new, state_new", "def getLinescanPos(self):\n return self.handle.pos().toPoint()", "def find_closest_trajectory_pose(self):\n np_state = numpy.array([[self.x], [self.y]])\n temp_distance = numpy.sum(\n (self.np_trajectory[0:2, :] - np_state) ** 2,\n axis=0)\n best_idx = numpy.argmin(temp_distance)\n return best_idx", "def get_lift(self):\n return 0.0", "def find_rpt_coords(self) -> (int, int):\n start_size = self.size\n end_size = self.size + len(self.allele)\n coord = self.coord\n fasta_alt = self.fasta_alt\n while self.allele == fasta_alt:\n coord += len(self.allele)\n start_size += len(self.allele)\n end_size += len(self.allele)\n fasta_alt = self.seq[start_size:end_size]\n new_start = coord - len(self.allele)\n new_end = new_start + len(self.allele) - 1\n return new_start, new_end", "def approximate_position(self, at_time: int) -> BasePosition:\n pass", "def get_closest_waypoint_idx(self):\n\n # Position\n x = self.car_pose.pose.position.x\n y = self.car_pose.pose.position.y\n closest_idx = self.waypoint_tree.query([x, y], 1)[1]\n\n # Coordinates\n closest_coord = self.waypoints_2d[closest_idx]\n prev_coord = self.waypoints_2d[closest_idx - 1]\n\n # Hyper Plane\n cl_vect = np.array(closest_coord)\n prev_vect = np.array(prev_coord)\n pos_vect = np.array([x, y])\n\n val = np.dot(cl_vect - prev_vect, pos_vect - cl_vect)\n\n if val > 0:\n closest_idx = (closest_idx + 1) % len(self.waypoints_2d)\n\n return closest_idx", "def search_parking_lot(self):\n\n self.start_driving()\n self.velocity = 8\n self.distance = 250 # maximum searching distance\n self.angle = 1.5 # TODO\n self.drive_thread.reset()\n\n vacant_distance = 0\n\n while self.drive_thread.driven_distance < self.distance:\n time.sleep(0.1)\n\n if self.sensor_manager.right > 25:\n vacant_distance += 1\n else:\n vacant_distance = 0\n\n if vacant_distance >= 35:\n while self.sensor_manager.right > 25:\n time.sleep(0.1)\n\n distance_right = self.sensor_manager.right\n\n if 14 <= distance_right <= 18:\n self.angle = 0\n self.distance = 35\n self.drive_thread.reset()\n\n while self.drive_thread.driven_distance < self.distance:\n time.sleep(0.1)\n elif distance_right > 18:\n self.adjust_starting_position(\"left\")\n elif distance_right < 14:\n self.adjust_starting_position(\"right\")\n \n break\n\n self.stop_driving()", "def pointing_pos_lspe (self, time):\n\n return (self.elevation, self.start_angle + time * self.omega_rot)", "def optimal_tilt(lat):\n lat = abs(lat)\n if lat <= 25:\n return lat * 0.87\n elif lat <= 50:\n return (lat * 0.76) + 3.1\n else: # lat > 50\n # raise NotImplementedError('Not implemented for latitudes beyond 50.')\n return 40 # Simply use 40 degrees above lat 50", "def optimal_tilt(lat):\n lat = abs(lat)\n if lat <= 25:\n return lat * 0.87\n elif lat <= 50:\n return (lat * 0.76) + 3.1\n else: # lat > 50\n # raise NotImplementedError('Not implemented for latitudes beyond 50.')\n return 40 # Simply use 40 degrees above lat 50" ]
[ "0.5917251", "0.5454485", "0.53869474", "0.53047276", "0.5189361", "0.51892936", "0.5161829", "0.5157585", "0.51523453", "0.51253486", "0.51195055", "0.5104085", "0.5075305", "0.5061984", "0.50457203", "0.50427437", "0.50212985", "0.49925196", "0.498679", "0.49814695", "0.49780074", "0.49516854", "0.49278033", "0.4910076", "0.49099723", "0.48954636", "0.48867476", "0.48846233", "0.48824978", "0.48753715", "0.4875046", "0.48736918", "0.48714206", "0.4870952", "0.48699567", "0.48683298", "0.4863956", "0.48636043", "0.4860867", "0.4857801", "0.48563534", "0.48562726", "0.48485288", "0.483311", "0.4831953", "0.48266938", "0.48249516", "0.48225012", "0.48171034", "0.48130924", "0.48075363", "0.48030308", "0.4802049", "0.48010734", "0.47886232", "0.4783123", "0.478288", "0.4778349", "0.47779018", "0.47742307", "0.47739854", "0.47670606", "0.47634163", "0.4762165", "0.47575802", "0.47569597", "0.47558954", "0.47505277", "0.47436374", "0.47429824", "0.47420225", "0.474025", "0.47326693", "0.47140548", "0.4712761", "0.47112843", "0.4708526", "0.47003308", "0.4698485", "0.46972227", "0.46938714", "0.4692626", "0.46851876", "0.46834087", "0.46816412", "0.46809065", "0.467127", "0.4662187", "0.46612325", "0.46596935", "0.46455812", "0.4643585", "0.4642128", "0.46384993", "0.4633083", "0.46328443", "0.4631135", "0.46302772", "0.46289295", "0.46289295" ]
0.63866895
0
Do an internal (non302) redirect to the front page. Preserves the user agent's requested URL.
def show_main_page(request, error_msg=None): request.method='GET' return MainPage(request, error_msg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def home_page():\n return redirect(url_for(_DEFAULT_ROUTE, _external=True))", "def root_redirect():\r\n return redirect(url_for(\"display_top\"))", "def redirect(url):", "def redirect(self, url):\n raise RequestRedirect(url)", "def redirect(uri):\n response = HttpResponse('', status=302)\n response['Location'] = uri\n return response", "def redirect_to_last_known_url(cookie):\n requested_path = request.cookies.get('rucio-requested-path')\n if not requested_path:\n requested_path = request.environ.get('REQUEST_URI')\n resp = add_cookies(make_response(redirect(requested_path, code=303)), cookie)\n\n return resp", "def redirect_to():\n\n args_dict = request.args.items()\n args = CaseInsensitiveDict(args_dict)\n\n # We need to build the response manually and convert to UTF-8 to prevent\n # werkzeug from \"fixing\" the URL. This endpoint should set the Location\n # header to the exact string supplied.\n response = app.make_response(\"\")\n response.status_code = 302\n if \"status_code\" in args:\n status_code = int(args[\"status_code\"])\n if status_code >= 300 and status_code < 400:\n response.status_code = status_code\n response.headers[\"Location\"] = args[\"url\"].encode(\"utf-8\")\n\n return response", "def first_request():\n heroku_url: str = 'https://justice-ndou.herokuapp.com/'\n registered_domain: str = 'https://justice-ndou.herokuapp.com/'\n\n if request.host_url.lower().startswith(heroku_url):\n return redirect(request.host_url.lower().replace(heroku_url, registered_domain)), 301", "def redirect(self):\n new_url = self.server.url + options.script_alias + '/'\n self.send_response(301, \"Moved (redirection follows)\")\n self.send_header(\"Content-type\", \"text/html\")\n self.send_header(\"Location\", new_url)\n self.end_headers()\n self.wfile.write(\"\"\"<html>\n<head>\n<meta http-equiv=\"refresh\" content=\"1; URL=%s\">\n</head>\n<body>\n<h1>Redirection to <a href=\"%s\">ViewVC</a></h1>\nWait a second. You will be automatically redirected to <b>ViewVC</b>.\nIf this doesn't work, please click on the link above.\n</body>\n</html>\n\"\"\" % tuple([new_url]*2))", "def redirect(url, status=None):\n raise cherrypy.HTTPRedirect(url, status)", "def redirectReferer():\n referer = request.headers.get('referer')\n if not referer:\n referer = url_for('catelog')\n return redirect(referer)", "def redirect_dest(fallback):\n dest = request.args.get('next')\n try:\n if dest.startswith('/') or dest.startswith(request.host_url):\n return redirect(dest)\n dest_url = url_for(dest)\n except:\n return redirect(fallback)\n return redirect(dest_url)", "def redirect(to):\r\n def _redirect(environ, start_response):\r\n args, kwargs = environ['wsgiorg.routing_args']\r\n start_response('301 MOVED PERMANENTLY',\r\n [('Location', to.format(*args, **kwargs))])\r\n return []\r\n return _redirect", "def redirect_old_featured(page):\r\n return redirect(url_for('.index', page=page), 301)", "def _safe_postlogin_redirect(redirect_to, safehost, default_redirect='/'):\r\n if is_safe_url(url=redirect_to, host=safehost):\r\n return redirect(redirect_to)\r\n return redirect(default_redirect)", "def redirect(self, url):\n self.setResponseCode(responsecode.FOUND)\n self.setHeader(\"location\", url)", "def start_page():\n if not _home:\n abort(404)\n return redirect(_home)", "def start_page():\n if not _home:\n abort(404)\n return redirect(_home)", "def redirect(self, location):\n self.redirect_see_other(location)", "def toLanding():\n return redirect(url_for('landingurl'))", "def redirect(self, url):\n if self.in_canvas:\n return HttpResponse('<fb:redirect url=\"%s\" />' % (url,))\n elif re.search(\"^https?:\\/\\/([^\\/]*\\.)?facebook\\.com(:\\d+)?\", url.lower()):\n return HttpResponse('<script type=\"text/javascript\">\\ntop.location.href = \"%s\";\\n</script>' % url)\n else:\n return HttpResponseRedirect(url)", "def redirector(status, start_response, exc_info=None):\n session['login.pre_uri'] = environ['PATH_INFO']\n session.save()\n start_response('302 Found',[(\"Location\",\"/login\"),(\"Content-type\",\"text\")])\n return []", "def homepage():\n if g.user:\n return redirect(f\"/user/{g.user.id}\")\n else:\n return redirect(\"/landing\")", "def _redirect(self, url):\n logger.debug('Redirecting to URL %s', url)\n segments = urllib.parse.urlparse(url)\n\n host = segments.netloc\n if host != self._host:\n self.new_connection(host)\n\n relurl = urllib.parse.urlunparse(('', '') + segments[2:])\n try:\n self._raw_get(relurl)\n except http.client.HTTPException as e:\n logger.debug('Got exception: %s.', e)\n raise DDGConnectionError(\"Failed to get '%s'.\" % url)", "def handle_forbidden_for_homepage(self, request):\n\n login_url = request.link(Auth.from_request_path(request), name='login')\n\n if URL(request.url).path() == '/':\n return morepath.redirect(login_url)\n\n return handle_forbidden(self, request)", "def redirect(self, location):\n self.status=302\n headers=self.headers\n headers['status']='302 Moved Temporarily'\n headers['location']=location\n return location", "def redirect_view(request):\n path = request.GET.get(\"to\") or \"/\"\n return redirect(path if path.startswith(\"/\") else f\"/{path}\", permanent=True)", "def redirect(short):\n link_user = request.cookies.get('linkuser')\n user_browser = request.user_agent.browser\n time_stamp = datetime.now()\n action = \"redirect\"\n lat = \"\"\n longitude = \"\"\n\n if link_user == None:\n link_user = get_cookie_val()\n\n if str(short) in db:\n url = db.get(str(short),'/')\n clicks[str(short)] += 1\n app.logger.debug(\"Redirecting to \" + url + \" with clicks \" + str(clicks[str(short)]))\n \n ## log user action\n logline = [str(time_stamp), link_user, user_browser, action, url, short, lat, longitude ]\n write_log(logline)\n\n return flask.redirect(url)\n else:\n ## log user action\n logline = [str(time_stamp), link_user, user_browser, action, \"404\", short ]\n write_log(logline)\n\n return flask.render_template('404.html',short=short), 404", "def redirect_permanent(self, location):\n self.status = 308\n self.set_header('Location', location)", "def redirect(self, request, redirect_url):\n response_headers = [('Content-type', 'text/plain'),\n ('Location', redirect_url)]\n request['start']('302 REDIRECT', response_headers)\n return [\"Redirecting to %s\" % redirect_url]", "def _send_301(self, new_url):\n try:\n self.send_response(301)\n self.send_header('Location', new_url)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n except UnicodeEncodeError:\n self._send_internal_server_error()", "def homepage():\n return redirect('index.html')", "def redirect_url(default='front_page'):\n return request.args.get('next') or \\\n request.referrer or \\\n url_for(default)", "def redirect(url, code=None):\r\n if code is None:\r\n code = 303 if request.get('SERVER_PROTOCOL') == \"HTTP/1.1\" else 302\r\n location = urljoin(request.url, url)\r\n raise HTTPResponse(\"\", status=code, header=dict(Location=location))", "def redirectPage() -> Response:\n # pass in the function name\n return redirect(url_for('view.loadMainPage'))", "def ssl_redirect():\n if request.get_header('X-Forwarded-Proto', 'http') != 'https':\n redirect(request.url.replace('http://', 'https://', 1), code=301)", "def redirect(self, redirect_to, params=None, back_url=\"\", terminate=False):\n if params or back_url:\n redirect_to = self.generate_redirect_url(redirect_to, params, back_url)\n\n if terminate:\n raise RedirectException(redirect_to)\n\n if self.is_action():\n self.page.action('goTo', [redirect_to])\n\n else:\n response.redirect(redirect_to)", "def redirect_permanently(self, location):\n self.status = 301\n self.set_header('Location', location)", "def redirect(self, url, sendcookie=False):\n self._sendheader(sendcookie, 302, \"Found\");\n self.ostrm.write(\"Location: %s\\r\\n\\r\\n\" % url)\n self.ostrm.flush()", "def redirect_temporary(self, location):\n self.status = 307\n self.set_header('Location', location)", "def redirect_to_sso(self, next_url):\n resolved_url = resolve_url(self.sso_redirect_url)\n login_url_parts = list(urlparse(resolved_url))\n querystring = QueryDict(login_url_parts[4], mutable=True)\n querystring[settings.SSO_PROXY_REDIRECT_FIELD_NAME] = next_url\n login_url_parts[4] = querystring.urlencode(safe='/')\n\n return HttpResponseRedirect(urlunparse(login_url_parts))", "def _redirect_from_referrer(self, request, wiki_path):\r\n course_id = course_id_from_url(request.META.get('HTTP_REFERER'))\r\n if course_id:\r\n # See if we are able to view the course. If we are, redirect to it\r\n try:\r\n _course = get_course_with_access(request.user, 'load', course_id)\r\n return redirect(\"/courses/{course_id}/wiki/{path}\".format(course_id=course_id.to_deprecated_string(), path=wiki_path))\r\n except Http404:\r\n # Even though we came from the course, we can't see it. So don't worry about it.\r\n pass", "def resolveRedirect(self, useHEAD=False):\n conn = self.getConnection()\n try:\n if useHEAD:\n conn.request('HEAD', '%s%s' % (self.path, self.query), None,\n self.header)\n else:\n conn.request('GET', '%s%s' % (self.path, self.query), None,\n self.header)\n self.response = conn.getresponse()\n # read the server's encoding, in case we need it later\n self.readEncodingFromResponse(self.response)\n except httplib.BadStatusLine:\n # Some servers don't seem to handle HEAD requests properly,\n # e.g. http://www.radiorus.ru/ which is running on a very old\n # Apache server. Using GET instead works on these (but it uses\n # more bandwidth).\n if useHEAD:\n return self.resolveRedirect(useHEAD=False)\n else:\n raise\n if self.response.status >= 300 and self.response.status <= 399:\n # to debug, print response.getheaders()\n redirTarget = self.response.getheader('Location')\n if redirTarget:\n try:\n redirTarget.encode('ascii')\n except UnicodeError:\n redirTarget = redirTarget.decode(\n self.getEncodingUsedByServer())\n if redirTarget.startswith('http://') or \\\n redirTarget.startswith('https://'):\n self.changeUrl(redirTarget)\n return True\n elif redirTarget.startswith('/'):\n self.changeUrl(u'%s://%s%s'\n % (self.protocol, self.host, redirTarget))\n return True\n else: # redirect to relative position\n # cut off filename\n directory = self.path[:self.path.rindex('/') + 1]\n # handle redirect to parent directory\n while redirTarget.startswith('../'):\n redirTarget = redirTarget[3:]\n # some servers redirect to .. although we are already\n # in the root directory; ignore this.\n if directory != '/':\n # change /foo/bar/ to /foo/\n directory = directory[:-1]\n directory = directory[:directory.rindex('/') + 1]\n self.changeUrl('%s://%s%s%s'\n % (self.protocol, self.host, directory,\n redirTarget))\n return True\n else:\n return False # not a redirect", "def redirect_found(self, location):\n self.status = 302\n self.set_header('Location', location)", "def redirect(url, code=302):\n exc = status_map[code]\n raise exc(location=url).exception", "def redirect_see_other(self, location):\n self.status = 303\n self.set_header('Location', location)", "def test_edge_redirect_to_login(self):\r\n\r\n request = self.factory.get('/')\r\n request.user = AnonymousUser()\r\n\r\n # HTTP Host changed to edge.\r\n request.META[\"HTTP_HOST\"] = \"edge.edx.org\"\r\n response = index(request)\r\n\r\n # Response should be instance of HttpResponseRedirect.\r\n self.assertIsInstance(response, HttpResponseRedirect)\r\n # Location should be \"/login\".\r\n self.assertEqual(response._headers.get(\"location\")[1], \"/login\")", "def _follow_redirect(self, uri, method, body, headers, response,\n content, max_redirects):\n (scheme, authority, absolute_uri,\n defrag_uri) = httplib2.urlnorm(httplib2.iri2uri(uri))\n if self.cache:\n cachekey = defrag_uri\n else:\n cachekey = None\n\n # Pick out the location header and basically start from the beginning\n # remembering first to strip the ETag header and decrement our 'depth'\n if \"location\" not in response and response.status != 300:\n raise httplib2.RedirectMissingLocation(\n \"Redirected but the response is missing a Location: header.\",\n response, content)\n # Fix-up relative redirects (which violate an RFC 2616 MUST)\n if \"location\" in response:\n location = response['location']\n (scheme, authority, path, query,\n fragment) = httplib2.parse_uri(location)\n if authority is None:\n response['location'] = httplib2.urlparse.urljoin(uri, location)\n pywikibot.debug(u\"Relative redirect: changed [%s] to [%s]\"\n % (location, response['location']),\n _logger)\n if response.status == 301 and method in [\"GET\", \"HEAD\"]:\n response['-x-permanent-redirect-url'] = response['location']\n if \"content-location\" not in response:\n response['content-location'] = absolute_uri\n httplib2._updateCache(headers, response, content, self.cache,\n cachekey)\n\n headers.pop('if-none-match', None)\n headers.pop('if-modified-since', None)\n\n if \"location\" in response:\n location = response['location']\n redirect_method = ((response.status == 303) and\n (method not in [\"GET\", \"HEAD\"])\n ) and \"GET\" or method\n return self.request(location, redirect_method, body=body,\n headers=headers,\n max_redirects=max_redirects - 1)\n else:\n raise httplib2.RedirectLimit(\n \"Redirected more times than redirection_limit allows.\",\n response, content)", "def localRedirect(self, path, setcookie=False):\n server = os.environ['SERVER_NAME']\n if os.environ.has_key('SERVER_PORT') and \\\n os.environ['SERVER_PORT'] is not None and \\\n os.environ['SERVER_PORT'] != '80':\n server += \":%s\" % os.environ['SERVER_PORT']\n\n if not path.startswith('/'):\n path = \"/%s\" % path\n\n self.redirect(\"http://%s%s\" % (server, path), setcookie)", "def homepage_redirect():\n return redirect('/upload_file')", "def redirect_nonwww():\n urlparts = urlparse(request.url)\n if urlparts.netloc != 'www.mealscount.com':\n return redirect('https://www.mealscount.com/', code=301)", "def get(self, request, *args, **kwargs):\n\n if not reverse('secure_login') in request.META['HTTP_REFERER']:\n request.session['facebook_login_redirect_url'] = \\\n request.META['HTTP_REFERER']\n\n return redirect(unquote(request.GET['auth_url']))", "def get(self, request):\n return redirect('http://localhost:3000/')", "def test_redirect_login_page(self):\n r = requests.get(self.url, allow_redirects=False)\n self.assertEqual(r.status_code, 302)\n self.assertRegexpMatches(r.headers['location'], '%s/login.*' % self.url)", "def home_page():\n return redirect('/users')", "def homepage():\n return redirect(\"/posts\")", "def process_response(self, request, response):\r\n if response.status_code == 302 and response['Location'].startswith('/wiki/'):\r\n wiki_path = urlparse(response['Location']).path.split('/wiki/', 1)[1]\r\n\r\n response = self._redirect_from_referrer(request, wiki_path) or response\r\n\r\n # END HACK: _transform_url must be set to a no-op function after it's done its work\r\n reverse._transform_url = lambda url: url # pylint: disable=W0212\r\n return response", "def __before__(self, action, environ):\n host = request.headers.get('Host')\n if not (host and host in app_globals.merchants.domain_map):\n prot, host, path, params, query, fragment = urlparse.urlparse(request.url)\n return redirect(urlparse.urlunparse((prot, app_globals.default_host, path, params, query, fragment)))\n else:\n protocol = request.headers.get('X-Forwarded-Proto', 'http')\n request.merchant = app_globals.merchants.domain_map[host]\n request.qualified_host = '%s://%s'%(protocol, host)\n request.is_secured = protocol == 'https'\n log.info('%s, %s, %s', '-'*80, protocol , protocol == 'https')\n if not websession.get('region'):\n region = request.headers.get(\"X-COUNTRY\", app_globals.country_choices.fallback.code).lower()\n region = app_globals.country_choices.map.get(region, app_globals.country_choices.fallback).code\n websession['region'] = region\n c.messages = websession.get('messages', [])\n c.user = websession.get('user', ANONUSER)\n c.user._statics = app_globals.statics_service\n c.furl = str(request.params.get(\"furl\") or request.url)\n log.info('[%s] [%s] [%s] Incoming Request at %s', c.user.u_id, websession['region'], request.headers.get('Host'), url.current())\n\n if 'lang' not in websession or websession['lang'] not in app_globals.LANGUAGES:\n websession['lang'] = negotiate_locale(request.accept_language, app_globals.LANGUAGES)\n set_lang(websession['lang'])", "def test_redirect_view(self):\n # TODO: Get test to work.\n client = Client()\n #response = client.get(reverse(testurl))\n #self.assertEqual(301, response.status_code)", "def redirect_to(self, to, *args, **kwargs):\n\n response = redirect(to, *args, **kwargs)\n\n # By default, raise a redirect, which will cause the view\n # processing to stop and return this redirect.\n if kwargs.pop(\"raise\", True):\n raise response\n else:\n return response", "def second_page():\n return redirect(url_for('index'))", "def test_redirect(self):\n self.app.app.preprocess_request()\n\n resp = self.r(\n ({}, # data\n 302, # status code\n None, # headers\n 'http://google.com/', # redirect_uri\n )\n )\n\n self.assertIsInstance(\n resp,\n werkzeug.wrappers.Response,\n )\n self.assertEqual(302, resp.status_code)\n self.assertEqual('http://google.com/', resp.location)", "def homepage( request ):\n if \"email\" in request.session:\n return redirect( '/home' )\n return render_to_response( 'index.html' )", "def forward_to(id):\n\n db = init_connection_engine()\n\n if id == 'short_URL':\n return redirect(url_for('index'))\n else:\n # Looking up the URL by its ID in the DB.\n try:\n # Using a with statement ensures that the connection is always released\n # back into the pool at the end of statement (even if an error occurs).\n with db.connect() as conn:\n lookup_url = \"SELECT url_data FROM url_list WHERE url_id='\" + id + \"';\"\n target_url = conn.execute(lookup_url).fetchone()\n # If target URL is not found.\n if not target_url:\n flash('Not found')\n return redirect(url_for('index'))\n # If something goes wrong.\n except:\n flash('Something went wrong')\n return redirect(url_for('index'))\n\n return redirect(target_url[0])", "def _redirect(self):\n \n # Redirect URL is held in 'r' URL arg of this request\n b64encReturnTo = str(request.params.get('r', ''))\n\n if b64encReturnTo:\n # Decode the return to address\n try:\n b64decReturnTo = base64.urlsafe_b64decode(b64encReturnTo)\n except Exception, e:\n log.error(\"logout - decoding return URL: %s\" % e) \n c.xml = \"Error carrying out browser redirect following logout\"\n response.status_code = 400\n return render('ndg.security.kid', 'ndg.security.error')\n \n # Check for 'getCredentials' - avoid in case username/password\n # contained in the URL!\n getCredentialsIdx = b64decReturnTo.rfind('/getCredentials')\n if getCredentialsIdx != -1:\n log.debug(\"Reverting request URL from getCredentials to \"\n \"login...\")\n b64decReturnTo = b64decReturnTo[:getCredentialsIdx] + '/login'\n \n # Add flag indicating to caller that logout succeeded. The caller\n # can use this to remove any security cookie present in their\n # domain - See:\n # ndg.security.client.ssoclient.ssoclient.lib.base.BaseController\n if '?' in b64decReturnTo:\n b64decReturnTo += '&logout=1'\n else:\n b64decReturnTo += '?logout=1'\n\n # and now go back to whence we had come\n log.debug(\"LogoutController._redirect: redirect to %s\" %\n b64decReturnTo)\n h.redirect_to(b64decReturnTo)\n else:\n log.debug(\"LogoutController._redirect: no redirect URL set.\")\n response.status_code = 400\n c.errorPageHeading = \"Log out\"\n if getattr(c, \"loggedIn\", False):\n c.xml = \"Logged out\"\n else:\n c.xml = (\"An error occurred logging out. Please report the \"\n \"problem to your site administrator\") \n \n return render('ndg.security.kid', 'ndg.security.error')", "def redirect_heroku():\n urlparts = urlparse(request.url)\n domain_name = \"rsvp.tiks-ultimate.in\"\n old_domain_name = \"thatte-idli-rsvp.herokuapp.com\"\n fly_domain_name = \"tiks-ultimate-rsvp.fly.dev\"\n if urlparts.netloc in {old_domain_name, fly_domain_name}:\n urlparts_list = list(urlparts)\n urlparts_list[1] = domain_name\n return redirect(urlunparse(urlparts_list), code=301)", "def index():\n return redirect(url_for('second_page'))", "def get(self, request):\n return redirect('start:home')", "def get(self, request):\n return redirect('start:home')", "def get(self, request):\n return redirect('start:home')", "def test_homepage_redirect(self):\n with self.client as client:\n resp = client.get(\"/\", follow_redirects=True)\n html = resp.get_data(as_text=True)\n\n self.assertEqual(resp.status_code, 200)\n self.assertIn('Davis Test', html)", "def __before__(self):\n \n if not u'REMOTE_USER' in session: \n if not request.environ[u'PATH_INFO'] in self.public_urls:\n log.debug('PATH_INFO: %s' % request.environ[u'PATH_INFO'])\n #session[u'path_before_login'] = request.environ[u'PATH_INFO']\n #session.save()\n redirect(url('/users/index'))", "def redirect( self, url, code = 303):\n self.res.status = code\n self.res.location = url\n self.res.content_type = 'text/html'\n self.res.content_length = None\n self.start_response(self.res.status, self.res.headerlist)\n return ['']", "def index():\n return redirect(url_for(\"home\"))", "def _redirect_to_next_page(request):\n url = request.META.get('HTTP_REFERER')\n query = requests.utils.urlparse(url).query\n params = dict(x.split('=') for x in query.split('&'))\n if 'next' in params:\n nextPage = params['next']\n return redirect(nextPage)", "def gonext():\n next = cherrypy.request.params.get('next', '')\n if next != '':\n redirect(next)", "async def redirect(self, location, msg=None):\n self.code = 302\n self.add_header('Location', location)\n if msg:\n self.add_header('Content-Length', len(msg))\n await self._send_headers()\n if msg:\n await self.send(msg)", "def test_redirects_shortlink_without_http_scheme(self):\n rv = self.post('www.seinfeld.com')\n assert '<a href=\"TheStakeOut\">TheStakeOut</a> is now short for <a href=\"www.seinfeld.com\">www.seinfeld.com</a>!' in rv.data\n rv = self.app.get('/TheStakeOut')\n assert rv.status_code == 302\n assert rv.location == 'http://www.seinfeld.com'", "def handle_fallthrough(event, path, query):\n # If no fallthough response provider, 302 the whole website to the HOST that\n # was input\n if variables.FALLTHROUGH == None:\n return redirect('//' + variables.HOST + path + query)\n # If we asked to fallthrough to the origin, just return the original request\n # so that Cloudfront continues on its merry way\n elif variables.FALLTHROUGH == 'origin':\n return event['Records'][0]['cf']['request']\n # Otherwise use the fallthrough as is\n else:\n return variables.FALLTHROUGH", "def _build_page_401_response(self, request):\n _login_url = request.build_absolute_uri(reverse('account:login_page'))\n\n _next = request.build_absolute_uri()\n _redirect = build_redirect_url(_next, _login_url, 'refer_url')\n return HttpResponseRedirect(_redirect)", "def send302(start_response, location):\n start_response('302 Found', [('Location', location)])\n return [YZ_MOVED_TO + location]", "def _build_page_401_response_to_platform(self, request):\n _next = request.build_absolute_uri()\n if self._conf.ADD_CROSS_PREFIX:\n _next = self._conf.CROSS_PREFIX + _next\n\n _login_url = build_redirect_url(_next,\n self._conf.LOGIN_URL,\n self._conf.C_URL,\n extra_args=self._build_extra_args())\n return HttpResponseRedirect(_login_url)", "def redirect_request(self, req, fp, code, msg, headers, newurl):\n m = req.get_method()\n if (code in (301, 302, 303, 307) and m in (\"GET\", \"HEAD\")\n or code in (301, 302, 303) and m == \"POST\"):\n # Strictly (according to RFC 2616), 301 or 302 in response\n # to a POST MUST NOT cause a redirection without confirmation\n # from the user (of urllib2, in this case). In practice,\n # essentially all clients do redirect in this case, so we\n # do the same.\n # be conciliant with URIs containing a space\n newurl = newurl.replace(' ', '%20')\n newheaders = dict((k,v) for k,v in req.headers.items()\n if k.lower() not in (\"content-length\", \"content-type\")\n )\n return Request2(newurl,\n headers=newheaders,\n origin_req_host=req.get_origin_req_host(),\n unverifiable=True,\n method=\"GET\" if code == 303 else m)\n else:\n raise urllib2.HTTPError(req.get_full_url(), code, msg, headers, fp)", "def redirect_to_original_url(query_short_url):\n db_url = Url.query.filter_by(short_url=query_short_url).first_or_404()\n db_url.views += 1\n db.session.commit()\n return redirect(db_url.original_url)", "def idx(_request):\n return HttpResponseRedirect('/home')", "def process_request(self, request):\n try:\n if request.user.is_anonymous():\n for url in self.public_urls:\n if url.match(request.path[1:]):\n return None\n return HttpResponseRedirect(\"%s?next=%s\" % (self.login_url, request.path))\n except AttributeError:\n return HttpResponseRedirect(\"%s?next=%s\" % (self.login_url, request.path))", "def get_redirect_url(request):\n next = request.POST.get('next', request.GET.get('next'))\n if not is_safe_url(url=next, host=request.get_host()):\n next = request.META.get('HTTP_REFERER')\n if not is_safe_url(url=next, host=request.get_host()):\n next = '/'\n return next", "def catch_all(path):\n return redirect('/', code=302)", "def process_request(self, request):\r\n try:\r\n if request.user.is_anonymous():\r\n for url in public_urls:\r\n if url.match(request.path[1:]):\r\n return None\r\n return HttpResponseRedirect(\"%s?next=%s\" % (login_url, request.path))\r\n except AttributeError:\r\n return HttpResponseRedirect(\"%s?next=%s\" % (login_url, request.path))", "def test_index_redirect(self):\n response = self.app.get(\"/\")\n self.assertEqual(response.status_code, 302,\n \"/ did not redirect to login when user is not logged in\")\n self.assertTrue(\n response.location.endswith(\"/accounts/login/\"),\n \"Redirect location did not end with /accounts/login/\"\n )", "def set_referer(self, url):\n self._opener.set_header(('Referer', urlrewrite.get_referer(url)))", "def test_redirect(self):\r\n sess = FuturesSession()\r\n future = sess.get(httpbin('redirect-to?url=get'))\r\n self.assertIsInstance(future, Future)\r\n resp = future.result()\r\n self.assertIsInstance(resp, Response)\r\n self.assertEqual(200, resp.status_code)\r\n\r\n future = sess.get(httpbin('redirect-to?url=status/404'))\r\n resp = future.result()\r\n self.assertEqual(404, resp.status_code)", "async def docs_redirect():\n return RedirectResponse(url=\"/docs\")", "def go_home(request):\n\n url = request.route_url('home', _app_url=get_app_url(request))\n return HTTPFound(location=url)", "def redirect(self, path):\n self.get_controller().redirect(path)", "def test_redirect_for_patient_home_route(self):\n\n result = self.client.get(\"/patient/1\", follow_redirects=True)\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"Account Details\", result.data)\n\n result = self.client.get(\"/patient/4\", follow_redirects=True)\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"not authorized\", result.data)", "def redirect_to_user(username):\n redirect(url_for('users', username=username))", "def redirect_old_draft(page):\r\n return redirect(url_for('.draft', page=page), 301)", "def redirect_func(request, tiny_url):\n if tiny_url:\n try:\n url_obj = UrlMap.objects.get(short_url=tiny_url)\n return redirect(url_obj.original_url)\n except Exception as e:\n return render(request, 'shortifyUrl/index.html',\n {'some_data': 'Could not find matching URL in DB, Exception : {}'.format(e)})", "def redirect_handler_factory(url):\n class RedirectHandler(http.server.SimpleHTTPRequestHandler):\n def do_GET(self):\n self.send_response(302)\n self.send_header('Location', url)\n self.end_headers()\n\n return RedirectHandler", "def landing():\n if g.user:\n return render_template('landing.html', user=g.user)\n return redirect(url_for('login'))" ]
[ "0.69821596", "0.6857515", "0.68187505", "0.65282667", "0.65206647", "0.64767575", "0.6462469", "0.6432681", "0.6411949", "0.63972974", "0.6323725", "0.63229686", "0.6311373", "0.63092226", "0.62773234", "0.6263432", "0.625649", "0.625649", "0.6236038", "0.6210916", "0.61977", "0.61884624", "0.61852336", "0.61392367", "0.60987794", "0.6094217", "0.60893625", "0.608854", "0.6073304", "0.60526896", "0.6028781", "0.60078645", "0.59998477", "0.5999453", "0.59986997", "0.5978703", "0.59498143", "0.5946792", "0.59375685", "0.5881578", "0.5879655", "0.58109605", "0.57982093", "0.5797895", "0.5796693", "0.5786976", "0.5736239", "0.57079357", "0.57053214", "0.5700218", "0.5692642", "0.5680683", "0.56706357", "0.5657833", "0.564712", "0.5646335", "0.5645821", "0.5645807", "0.5624756", "0.5610225", "0.5603481", "0.5589747", "0.557917", "0.55726963", "0.5570147", "0.5567361", "0.55490184", "0.5527967", "0.5527967", "0.5527967", "0.55183536", "0.550762", "0.54837024", "0.54584706", "0.5451364", "0.5445373", "0.5440122", "0.54261684", "0.5425636", "0.5415293", "0.541101", "0.5409365", "0.5401787", "0.5396481", "0.5393459", "0.5389991", "0.53847647", "0.53722316", "0.5366136", "0.53581935", "0.5356793", "0.5355584", "0.5353458", "0.53527635", "0.53509694", "0.5350396", "0.5350195", "0.5340852", "0.5329055", "0.5322858", "0.53188115" ]
0.0
-1
Return criteria given by user and the column in the database it refers to.
def select(): file_title, song_title = [None, None], [None, None] artist, data, tag, form = [None, None], [None, None], [None, None], [None, None] while True: file_title[0] = input("Would you like to select by file name?[Y/N]\t") if file_title[0] == 'Y': file_title[1] = input("Give file name:\t") break elif file_title[0] == 'N': break else: print("Unknown answer. Please respond with Y or N...") while True: song_title[0] = input("Would you like to select by song title?[Y/N]\t") if song_title[0] == 'Y': song_title[1] = input("Give song title:\t") break elif song_title[0] == 'N': break else: print("Unknown answer. Please respond with Y or N...") while True: artist[0] = input("Would you like to select by artist?[Y/N]\t") if artist[0] == 'Y': artist[1] = input("Give artist name:\t") break elif artist[0] == 'N': break else: print("Unknown answer. Please respond with Y or N...") while True: data[0] = input("Would you like to select by release date?[Y/N]\t") if data[0] == 'Y': data[1] = input("Give release date:\t") break elif data[0] == 'N': data[1] = None break else: print("Unknown answer. Please respond with Y or N...") while True: tag[0] = input("Would you like to select by tags?[Y/N]\t") if tag[0] == 'Y': tag[1] = input("Give a tag:\t") break elif tag[0] == 'N': tag[1] = None break else: print("Unknown answer. Please respond with Y or N...") while True: form[0] = input("Would you like to select by format?[Y/N]\t") if form[0] == 'Y': form[1] = input("Give format:\t") break elif form[0] == 'N': form[1] = None break else: print("Unknown answer. Please respond with Y or N...") where = "" # Saves the where-clause for the database interrogation criteria = tuple() # Saves the criteria given by the user if file_title[0] == 'Y': where += nameof(file_title) + " = %s AND " criteria += (file_title[1],) if song_title[0] == 'Y': where += nameof(song_title) + " = %s AND " criteria += (song_title[1],) if artist[0] == 'Y': where += nameof(artist) + " = %s AND " criteria += (artist[1],) if data[0] == 'Y': where += nameof(data) + " = %s AND " criteria += (data[1],) if tag[0] == 'Y': where += nameof(tag) + " LIKE %s AND " criteria += ("%" + tag[1] + "%",) if form[0] == 'Y': where += nameof(artist) + " = %s AND " criteria += (form[1],) return criteria, where
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def search_column_with_constraint(db, table, column, condition_col, condition_val):\n condition = condition_col + \" = '\" + str(condition_val) + \"'\"\n result = select_columns(db, table, column, condition=condition)\n\n return result", "def get_criteria(self):\n\n\t\treturn self.__criteria", "def get_data(db, columns, table, condition=\"\"):\n cur = db.cursor()\n cur.execute(SELECT.format(columns, table) + \" \" + condition)\n return cur.fetchall()", "def get_column(col_to_search, value_to_match, col_to_get, table, db_file):\n \n try:\n conn, c = connect_to_db(db_file) \n c.execute('SELECT {cg} FROM {t} WHERE {col}=\"{value}\"'.format(t=safe(table), \n cg=safe(col_to_get), col=safe(col_to_search), value=safe(value_to_match)))\n column = c.fetchone()\n conn.close()\n return column\n except Exception as e:\n print(\"Error when trying to fetch row in table\", table, \"in database file\", db_file)\n print(e)\n return None", "def getAccounts(owner_id, column: str=None):\n cursor = connection.cursor(dictionary=True)\n # replace \"account_type\" and \"owner_id\" for better readability\n if column == None:\n column = \"account_number, account_type, owner_id, open_date, status, balance\"\n join=\"\"\n\n if \"account_type\" in column:\n column = column.replace(\"account_type\", \" t.description as account_type \")\n join = \" inner join accounttype as t on account.account_type = t.id \"\n if \"owner_id\" in column:\n column = column.replace(\"owner_id\", \" o.fname as owner_id \")\n join = join + \"inner join user as o on o.id = account.owner_id \" \n\n statement = \"select \"+column+\" from account \"+join + f\"where owner_id ={owner_id}\" \n cursor.execute(statement)\n accounts = cursor.fetchall()\n cursor.close()\n return accounts", "def get_column_ontology_details(self, column_name):\n ontology_details = []\n \n try:\n con = self.getMetadataDatabaseConnection()\n ontologies = con.cursor()\n con.cursor().callproc('qiime_assets.get_column_ontologies', [column_name, ontologies])\n query_results=[]\n for row in ontologies:\n # row[0] = short_name\n # row[1] = bioportal_id\n # row[2] = ontology_branch_id\n ontology_details.append((row[0], row[1], row[2]))\n return ontology_details\n except Exception, e:\n print 'Exception caught: %s.\\nThe error is: %s' % (type(e), e)\n return False", "def select (a_data,a_column) :\n return a_data[a_column]", "def _retrieve_db_columns():\n\n # Grab the default columns and their details\n hard_coded_columns = copy.deepcopy(VIEW_COLUMNS_PROPERTY)\n\n md = MappingData()\n for c in hard_coded_columns:\n if not md.find_column(c['table'], c['name']):\n print \"Could not find column field in database for {}\".format(c)\n\n return hard_coded_columns", "def getAccountById(accountNumber, column: str=\"*\"):\n cursor = connection.cursor(dictionary=True)\n if column == None:\n column = \"account_number, account_type, owner_id, open_date, status, balance\"\n join=\"\"\n\n if \"account_type\" in column:\n column = column.replace(\"account_type\", \" t.description as account_type \")\n join = \" inner join accounttype as t on account.account_type = t.id \"\n if \"owner_id\" in column:\n column = column.replace(\"owner_id\", \" o.fname as owner_id \")\n join = join + \"inner join user as o on o.id = account.owner_id \" \n\n statement = \"select \"+column+\" from account \"+join + f\"where account_number ={accountNumber}\" \n cursor.execute(statement)\n account = cursor.fetchall()\n cursor.close()\n return account", "def findExtraColumnMatch(self, column_name):\n column_name = column_name.upper()\n con = self.getMetadataDatabaseConnection()\n matches = []\n results = con.cursor()\n con.cursor().callproc('qiime_assets.find_extra_column_match', [column_name, results])\n #for row in results:\n for row in results:\n matches.append(row[0])\n \n return matches", "def get_customer_columns():\n return cr.get_columns()", "def fetch_user_from_db(self) -> pd.DataFrame:\n query = f\"select * from {TABLE} where user_handle = {self.user}\"\n return db_main.read_table(DATABASE_ENV, query)", "def _get_column(self, name):\r\n return self.column(name)", "def search_user(search_dict,cur):\n if search_dict!={}:\n\n psql_base=\"\"\" select distinct * \n from users \n where \"\"\"\n\n psql_where=\"\"\n \n for (key,value) in search_dict.items() :\n psql_where= f\"\"\"{key}='{value}' and \"\"\"+psql_where\n\n psql=psql_base+psql_where[:-4]+\";\"\n cur.execute(psql)\n record=cur.fetchall()\n\n else:\n record=None\n columns_names=['name','last_name','email','tel','user_name','password','user_type']\n #['prenom','nom','email','tel','Nom utilisateur','type utilisateur','nombre de resultat']\n \n return columns_names,record", "def get_criteria_values(self) -> list or None:\n\n # query_string = 'SELECT * FROM [{}];'.format(self.settings.excelSheetName)\n query_string = 'SELECT DISTINCT [{}] FROM [{}];'.format(self.settings.excelCriteriaSelector,\n self.settings.excelSheetName)\n\n if self.connected and self.settings.excelSheetName:\n cursor = self.workbook.cursor()\n if cursor:\n criteria_values = []\n for row in cursor.execute(query_string):\n criteria_values.append(row[self.settings.excelCriteriaSelector.lower()])\n cursor.close()\n return criteria_values\n\n return None", "def show_where(self, aggregate=False):\n # Return criteria for all tables.\n tables = ['game', 'drive', 'play', 'play_player', 'player']\n with Tx(self._db) as cur:\n return self._sql_where(cur, tables, aggregate=aggregate)\n return ''", "def _get_column(cls, name):\r\n return cls._columns[name]", "def where_column(self, column1, column2):\n self._wheres += ((QueryExpression(column1, \"=\", column2, \"column\")),)\n return self", "def _get_user_by_criteria(id_, email):\n criteria = dict()\n try:\n if id_:\n criteria[\"id_\"] = id_\n elif email:\n criteria[\"email\"] = email\n return User.query.filter_by(**criteria).one_or_none()\n except StatementError as e:\n print(e)\n return None", "def searchUsers(self,conds,_from,to,order_by,desc,admin_obj):\n self.__searchUsersCheckInput(conds,_from,to,order_by,desc,admin_obj)\n search_helper=user_main.getAttributeManager().runAttrSearchers(conds,admin_obj)\n return search_helper.getUserIDs(_from,to,order_by,desc)", "def searchDatabase(metaDataBaseFile, conditions,var_from_database_to_select):\n # Read meta data and find the indices of interest\n # The indices will be those that are in the dataframe so that the user can index\n # the data frame to extract current_exp_ID in order to load the dataset\n metaData_frame = pd.read_csv(metaDataBaseFile,sep='\\t',header=0)\n positive_indices = {}\n for condition_indentifier, condition in conditions.items():\n if condition.startswith('#'):\n pass\n else:\n currIndices = metaData_frame\\\n [metaData_frame[condition_indentifier]==condition].index.values\n positive_indices[condition_indentifier] = set(currIndices.flatten())\n \n common_indices = list(set.intersection(*positive_indices.values()))\n \n data_to_select = pd.DataFrame()\n for variable in var_from_database_to_select:\n data_to_select[variable] = metaData_frame[variable].iloc[common_indices]\n \n \n \n \n return data_to_select", "def where(self, column, *args):\n\n operator, value = self._extract_operator_value(*args)\n\n if value is None:\n value = \"\"\n elif value is True:\n value = \"1\"\n elif value is False:\n value = \"0\"\n\n if inspect.isfunction(column):\n builder = column(self.new())\n self._wheres += (\n (QueryExpression(None, operator, SubGroupExpression(builder))),\n )\n elif isinstance(value, QueryBuilder):\n self._wheres += (\n (QueryExpression(column, operator, SubSelectExpression(value))),\n )\n else:\n self._wheres += ((QueryExpression(column, operator, value, \"value\")),)\n return self", "def select_column(\n self, cursor: sqlite3.Cursor, connector_value: Tuple[int, ...]\n ) -> Mapping[int, List[PrimitiveTypes]]:\n\n if not self.connector:\n raise Exception(f\"{self.model.table} has not been attached to a model\")\n\n where: Filters = dict(self.selectors)\n where[self.connector] = connector_value\n\n sql, params = self.where({}, where)\n sql = (\n f\"SELECT [{self.connector}], [{self.field}] FROM [{self.model.table}] WHERE \"\n + sql\n )\n\n _LOGGER.debug(sql)\n _LOGGER.debug(params)\n\n cursor.execute(sql, params)\n\n result: Dict[int, List[Any]] = dict(zip(connector_value, [[]] * len(connector_value)))\n\n for connected, value in cursor.fetchall():\n result[connected].append(value)\n\n return result", "def selection(self, clause):\n result = DBTable()\n result.columnNames = self.columnNames\n if clause.operator == '=':\n for rec in self.records:\n if rec[clause.operand1] == clause.operand2:\n result.records.append(rec)\n return result", "def _get_column(self, column_or_label):\n c = column_or_label\n if isinstance(c, collections.Hashable) and c in self.column_labels:\n return self[c]\n else:\n assert len(c) == self.num_rows, 'column length mismatch'\n return c", "def search_customer(login=\"\", name=\"\", phone=\"\", email=\"\", permission=\"\"):\n with MY_CONNECTION as connection:\n cursor = connection.cursor()\n cursor.execute(\n \"\"\"\n SELECT id_customer, login, customer_name, phone, email, perm\n FROM Customers\n WHERE login=? OR customer_name=? OR phone=? OR email=? or perm=?\n \"\"\",\n (login, name, phone, email, permission))\n return cursor.fetchall()", "def _select_data(\n self, db: str, table: str, column_filters: Dict[str, str]\n ) -> List[List]:\n pass", "def _get_criteria(self):\n for molecule in self.values():\n molecule.get_criterion()", "def get_column_information(self, column_name, table, verbose=True): \n \n assert(self.connected)\n try: \n assert(self.check_table(table, verbose=False)) \n except AssertionError: \n raise TableNotFoundError\n \n if (not self.check_column(column_name, table, verbose=False)):\n return\n \n GET_COLUMN_INFO_COMMAND = (\"SELECT COLUMN_TYPE, IS_NULLABLE, COLUMN_KEY, EXTRA \"\n \t\t\"FROM INFORMATION_SCHEMA.COLUMNS \"\n \t\t\"WHERE TABLE_NAME='{0}' and COLUMN_NAME = '{1}'\".format(table,column_name))\n \n self.cursor.execute(GET_COLUMN_INFO_COMMAND)\n \n for row in self.cursor:\n break\n \n info = {'type' : row[0],\n 'not_null' : row[1] != 'YES' , \n 'foreign_key' : row[2] == 'MUL',\n 'auto_incremenet' : row[3] == 'auto_increment'}\n \n if verbose: print(\"Column with label '{0}' found in table '{1}'\".format(column_name, table))\n \n \n return info", "def __contains__(self, column):\n if isinstance(column, orb.Column):\n return self.__model == column.schema().model() and self.__column == column.name()\n else:\n return column == self.__column", "def get_things(self):\n\n things = Thing.query.filter_by(user_id=self.user_id).all()\n column_names = [str(name).split(\".\")[1] for name in Thing.__table__.columns]\n\n return column_names, things", "def oreDbQuery():\n # TODO: Change TABLE name to the Crop Slection table that has not yet been Created :-(\n # TODO: Currently using the old DB for Crop Lookup table\n\n c.execute('SELECT DISTINCT Crop, GrpNo, GrpName, SubGrpNo, SubGrpName, Category FROM CCA')\n\n return c.fetchall()", "def query_b(geoType,keyCol,**valConstraint):\n query = \"SELECT \" + \"osm_id\"\n for a in keyCol: query+= \",\"+ a \n query += \" FROM \" + geoType + \" WHERE \"\n # If there are values in the dictionary, add constraint clauses\n if valConstraint: \n for a in [*valConstraint]:\n # For each value of the key, add the constraint\n for b in valConstraint[a]: query += a + b\n query+= \" AND \"\n # Always ensures the first key/col provided is not Null.\n query+= \"\"+str(keyCol[0]) +\" IS NOT NULL\" \n return query", "async def get_object_by_param(model, column, conn: Database, data):\n query = select([model]).where(column == data)\n return await get_objects(conn, query)", "def fetchColumnData():\n if request.method ==\"GET\": \n return {'data':db.get_columns() }\n return{}", "def select_columns(db, table, columns, condition=None):\n mycursor = db.cursor()\n if isinstance(columns, str):\n sql = \"SELECT \" + columns + \" FROM \" + table\n else: # columns is a list of columns\n sql = \"SELECT \" + parse_sql_param_from_array(columns) + \" FROM \" + table\n if condition:\n sql += \" WHERE \" + condition\n mycursor.execute(sql)\n result = mycursor.fetchall()\n return result", "def get_rows(column_to_search, value_to_match, table, db_file):\n \n try:\n conn, c = connect_to_db(db_file) \n c.execute('SELECT * FROM {t} WHERE {col}=\"{value}\"'.format(t=safe(table), \n col=safe(column_to_search), value=value_to_match))\n row = c.fetchall()\n conn.close()\n return row\n except Exception as e:\n print(\"Error when trying to get row in table\", table, \"in\", db_file)\n print(e)\n return None", "def get_result_from_db() -> list:\n global choice, confirmation, res, data, column_names, result, choice_row\n column_names = data[0]\n try:\n confirmation.after(1, confirmation.destroy)\n except AttributeError:\n pass\n choice_row = choice.get()\n res = place_for_enter.get()\n if choice_row in column_names:\n result = simple_search_from_db(data_base, table, choice_row, res)\n return result\n else:\n mistake_select_value()", "def getByField(database,field):\n correspondant=[]\n for key,usr in database.items():\n if field == usr.fieldStudy:\n correspondant.append(usr)\n return correspondant, False\n else:\n return correspondant,True", "def get_target_column(self, source, column):\n # Refactor as never called without column\n tbl_col = source + '.' + column\n mapping = self.mapping.get(tbl_col, None)\n # not found? We look for wildcards\n if mapping is None:\n # wildcard, we match the source\n\n # partial wildcard, we match only for the table\n partial_pattern = '%s.*' % source\n if partial_pattern in self.mapping:\n if self.mapping[partial_pattern]:\n # In this function we replace the star with the sought\n # column if autoforget is not enabled, or it is present\n # in the target table\n # if column not in self.explicit_columns.get(source, [column]):\n # LOG.warn('Source table %s contains column %s that '\n # 'isn\\'t present in target', (source, column))\n return {k.replace('*', column): v\n for k, v in self.mapping[partial_pattern].items()\n if column in self.explicit_columns.get(source, [column])}\n return {tbl_col: None}\n elif '.*' in self.mapping:\n return {tbl_col: None}\n return mapping", "def column(self):\n return self[\"column\"]", "def getCustomColumnDetails(self, column_name):\n column_info = {}\n con = self.getMetadataDatabaseConnection()\n found = False\n column_name = column_name.upper()\n \n # Init some variables\n description = ''\n extra_table_name = ''\n common_table_name = ''\n data_type = ''\n \n # Figure out if exists in \"Extra\" table\n statement = \"\"\"select table_name from all_tab_columns where column_name = '%s' and table_name like 'EXTRA_%%'\"\"\"\\\n % column_name\n try:\n results = con.cursor().execute(statement).fetchone()\n extra_table_name = results[0]\n found = True\n except:\n extra_table_name = None\n \n # Figure out if exists in a factored table\n statement = \"\"\"select table_name from all_tab_columns where column_name = '%s' and table_name like 'COMMON_EXTRA_%%'\"\"\"\\\n % column_name\n try:\n results = con.cursor().execute(statement).fetchone()\n common_table_name = results[0]\n found = True\n except:\n common_table_name = None\n \n if found:\n try:\n statement = \"\"\"select data_type, description from extra_column_metadata where upper(column_name) = '%s'\"\"\" % column_name\n results = con.cursor().execute(statement).fetchone()\n data_type = results[0]\n description = results[1]\n except:\n data_type = None\n description = None\n else:\n data_type = None\n \n column_info['description'] = description\n column_info['extra_table_name'] = extra_table_name\n column_info['common_table_name'] = common_table_name\n column_info['data_type'] = data_type\n \n return column_info", "def query_assignment_by_user(cls, user_key):\n return list(cls.query(cls.user == user_key))", "def get(self, field, table=None, **constraints):\n keys = constraints.keys()\n table = (table or\n (len(keys) == 1 and keys[0].endswith('_id') and keys[0][:-3]) or\n (field.endswith('_id') and field[:-3]))\n condition = ' and '.join(key + ' = %s' for key in keys)\n for row in self.iter(\n 'select %s from %s where %s' % (field, table, condition),\n *(constraints[key] for key in keys)):\n return row[0]", "def find_car_owner(database, user):\n try:\n # check if user is an officer\n c = database.cursor()\n c.execute('SELECT utype FROM users WHERE uid = ?', (user, ))\n user_type = c.fetchone()[0]\n\n # If user is an officer \n if user_type == 'o':\n print(pm.car_own)\n c = database.cursor()\n\n make = str(input(\"Make: \"))\n model = str(input(\"Model: \"))\n year = int(input(\"Year: \"))\n color = str(input(\"Color: \"))\n plate = str(input(\"Plate: \"))\n\n c.execute(\"\"\"SELECT DISTINCT p.fname, p.lname FROM persons p JOIN registrations r ON (r.fname, r.lname) = \n (p.fname, p.lname) JOIN vehicles v ON r.vin = v.vin WHERE v.make = ? OR v.model = ? OR v.year = ? OR v.color = ?\n OR r.plate = ?\"\"\", (make, model, year, color, plate))\n result = c.fetchall()\n\n if len(result) > 4:\n c.execute(\"\"\"SELECT DISTINCT r.fname, r.lname, v.make, v.model, v.year, v.color, r.plate FROM persons p JOIN registrations r\n ON (r.fname, r.lname) = (p.fname, p.lname) JOIN vehicles v ON r.vin = v.vin WHERE v.make = ? OR v.model = ? OR\n v.year = ? OR v.color = ? OR r.plate = ?\"\"\", (make, model, year, color, plate))\n result = c.fetchall()\n for values in result:\n print(\"\\n-----------------------------------------\")\n print(f\"Full Name: {values[0]} {values[1]}\")\n print(\"------------------------------------------\")\n print(f\"Make: {values[2]}\")\n print(f\"Model: {values[3]}\")\n print(f\"Year: {values[4]}\")\n print(f\"Color: {values[5]}\")\n print(f\"Plate: {values[6]}\")\n elif len(result) <= 4:\n c.execute(\"\"\"SELECT DISTINCT r.fname, r.lname, v.make, v.model, v.year, v.color, r.plate, r.regdate, r.expiry FROM persons p JOIN registrations r\n ON (r.fname, r.lname) = (p.fname, p.lname) JOIN vehicles v ON r.vin = v.vin WHERE v.make = ? OR v.model = ? OR\n v.year = ? OR v.color = ? OR r.plate = ?\"\"\", (make, model, year, color, plate))\n result = c.fetchall()\n for values in result:\n print(\"\\n-----------------------------------------\")\n print(f\"Full Name: {values[0]} {values[1]}\")\n print(\"------------------------------------------\")\n print(f\"Make: {values[2]}\")\n print(f\"Model: {values[3]}\")\n print(f\"Year: {values[4]}\")\n print(f\"Color: {values[5]}\")\n print(f\"Plate: {values[6]}\")\n print(f'Registration Date: {values[7]}')\n print(f\"Expiry: {values[8]}\")\n \n\n print(pm.all_done)\n else:\n print(pm.for_officers_only)\n sys.exit()\n except:\n print(pm.something_went_wrong)\n sys.exit()", "def get_column_def(self):\r\n return '{} {}'.format(self.cql, self.db_type)", "def _sql_where(self, cursor, table, prefix=None, aggregate=False):\n assert False, \"subclass responsibility\"", "def get_account_columns():\n return ar.get_columns()", "def get(table, field, val):\n\n # return session.query(table).filter(getattr(table, field).like(val)).all()\n return session.query(table).filter(getattr(table, field) == val).all()", "def search_user(user, conditions=[],fields=[], filters={}):\n return db((db.auth_user.first_name.like(user+'%')),*conditions).select(*fields,**filters)", "def get_where(self, **kwargs):\n assert len(kwargs) == 1\n match_k, match_v = list(kwargs.items())[0]\n query = {'orderBy': f'\"{match_k}\"', 'equalTo': f'\"{match_v}\"'}\n query.update(self.params)\n return self.firebase.get('users', None, params=query) or {}", "def query(self, columns, table, matchColumn=None, matchValue=None):\n if matchValue and matchColumn:\n sql = \"\"\"SELECT %s FROM %s WHERE %s='%s'\"\"\" % (','.join(columns), table, matchColumn, matchValue)\n print \"SQL Statement: \" + sql\n else:\n sql = \"\"\"SELECT %s FROM %s\"\"\" % (','.join(columns))\n\n self.db.query(sql)\n queryResult = self.db.store_result()\n return queryResult.fetch_row(maxrows=0)", "def get_columns(request):\n organization_id = request.GET.get('organization_id', '')\n is_project = request.GET.get('is_project', '')\n all_fields = request.GET.get('all_fields', '')\n is_project = True if is_project.lower() == 'true' else False\n all_fields = True if all_fields.lower() == 'true' else False\n return utils_get_columns(is_project, organization_id, all_fields)", "def _confound_strat(strategy, confound_raw):\n\n param = [\n col\n for col in confound_raw.columns\n for conf in confound_dict[strategy]\n if (conf in col)\n ]\n return param", "def get_search_criteria_values(**kwargs):\n result = {}\n for item in kwargs.items():\n search_criteria = SearchCriteria.query.filter_by(text=item[0]).first()\n if search_criteria:\n uctm = UserCreatedTextMapper.query.filter_by(\n user_text=item[1],\n search_criteria=search_criteria.id).first()\n if uctm:\n result[item[0]] = uctm.search_criteria_value\n else:\n return None\n else:\n app.logger.error('Wrong search criteria: {}'.format(search_criteria))\n return None\n return result", "def _search(self, *args, **kwargs): # should return Formulas obj\n # Find all Matches\n if kwargs:\n col = list(kwargs)[0]\n args = kwargs[col]\n if isinstance(args, str):\n args = (args, )\n else:\n col = self._formula_col\n match = self.data[col].str.contains('|'.join(args))\n return self.data[match]\n\n # creat a subset of data that is a Formulas obj", "def find_column(self, columns):\n for column in columns:\n if self.match(column):\n return column\n return None", "def where(**kwargs):\n return QueryBuilder(Card).where(**kwargs)", "def __call__(self, *columns: str, att=None, limit: int=0, order: str or None=None, desc: bool=False):\n args = tuple()\n if all(column in self.columns for column in columns): # check if all collumn given really exist in the refered table, otherwise raise NameError\n if len(columns) == 0: # if no column is given, query should return all entries. Note: empty tuples return True for the previous check\n cols = '*'\n elif len(columns) == 1:\n cols = columns[0]\n else:\n cols = ', '.join(columns)\n sql = \"SELECT {} FROM {}\".format(cols, self.name)\n if att:\n if not isinstance(att, dict): # check if there was just one att given (should be string) or a dict of columns-arguments pairs\n sql += \" WHERE {}=?\".format(self.priority)\n args = (att,) # args should be a tuple even with only one arg\n else:\n keys = tuple(att.keys()) # att should be a dict?\n if all(column in self.columns for column in keys):\n if len(keys) == 1:\n sql += ' WHERE ' + keys[0] + '=?'\n else:\n sql += ' WHERE ' + '=? AND '.join(keys) + '=?'\n args = tuple(att[key] for key in keys)\n else:\n raise NameError('Column for attribute does not exist in referred table.')\n if order:\n if order in self.columns:\n sql += ' ORDER BY {}'.format(order)\n else:\n raise NameError('ORDER column does not exist in referred table.')\n if desc:\n sql += ' DESC'\n self.main.queue.append(Operation(sql, args, limit, True, False))\n self.main.fetchable += 1\n return self.main.fetchable - 1\n else:\n raise NameError('Requested column query does not exist in referred table.')", "def lookup_sqlacolumn_for_field(cls, fieldid):\n for field in cls.fieldlist:\n if (field.id==fieldid):\n return field.get_sqlacolumn()\n raise Exception(\"Could not find field {0}\".format(fieldid))\n #return None", "def criteria(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___Criterion]:", "def locate(self, column, cond_inp):\n\n try:\n return self.df_input.loc[self.df_input[column] == cond_inp]\n except Exception as e:\n print(e)", "def for_user(self, user):\n return self.get_query_set().filter(owner=user)", "def get(self, **args ):\n # Make sure its a valid argument\n for key in args.keys():\n if not key in self.schema:\n raise BadArgument(\"Key %s not a valid argument\" % key )\n\n query = STD.select('*')\n query = query.where( args )\n item = query.list()\n\n # If a list return make sure there is only one item\n if isinstance(item, collections.Iterable):\n if len(item) > 1:\n raise NotUnique(\"More than one items found\")\n if len(item) == 0:\n print \"No items found\"\n return None\n else:\n item = item[0]\n return item", "def get_sql_columns(self, request):\n cur = self.execute(request)\n col_name_list = [tuple[0] for tuple in cur.description]\n cur.close()\n return col_name_list", "def __query_db(self, druggable: bool = False):\n # TODO remove if not used again soon\n target = self.node_name.upper() # Target symbol upper case for humans\n if druggable:\n rels = EDGE_MAPPER['causal']\n\n elif self.edge in EDGE_MAPPER:\n rels = EDGE_MAPPER[self.edge]\n\n else:\n rels = [self.edge]\n\n table = Druggable if druggable else General\n\n query = session().query(table).filter_by(target_symbol=target, target_type=self.node_type)\n results = pd.read_sql(query.statement, query.session.bind)\n\n # Filter by edges and pmods\n filtered_df = results[results['relation_type'].isin(rels)]\n if self.pmods:\n filtered_df = filtered_df[filtered_df['pmod_type'].isin(self.pmods)]\n\n return filtered_df if not filtered_df.empty else None", "def searchColumn(self, table: Table, name: str) -> Column:\n if table:\n for col in table.columns:\n if col.name.lower() == name.lower():\n return col\n return None", "def getUserBycID(self, cID):\n\n cursor = self.conn.cursor()\n query = \"SELECT ufirstname, ulastname, udescription, urole, uclassification, email, pin \" \\\n \"FROM Users natural inner join Credential \" \\\n \"WHERE cID= %s;\"\n cursor.execute(query, (cID,))\n result = cursor.fetchone()\n return result", "def get_column_def(self):\r\n db_type = self.db_type.format(self.value_type.db_type)\r\n return '{} {}'.format(self.cql, db_type)", "def searchUser(database):\n print(\"How do you want to search for a user\\n1.name\\n2.field\\n3.year of study\\n4.areas of interest\\n5.Quit\")\n choice=int(input(\"Your choice :\"))\n if choice==1:\n searchByName(database)\n elif choice==2:\n searchByField(database)\n elif choice==3: \n searchByYear(database)\n elif choice==4:\n searchByInterest(database)\n elif choice==5:\n return", "def getFilter(self):\n col = self.filtercol.get()\n val = self.filtercolvalue.get()\n op = self.operator.get()\n booleanop = self.booleanop.get()\n return col, val, op, booleanop", "def searchByField(database):\n field=str(input(\"What is his field name :\"))\n usrs,find=getByField(database,field)\n for usr in usrs:\n print(usr)", "def commonValues( self, connection ):\n\t\tconstraint = self.schema.foreign()\n\t\tassert constraint\n\t\tfields = constraint.foreignFields[:]\n\t\tforeignTable = constraint.lookupName( constraint.foreignTable )\n\t\tif hasattr( foreignTable, 'friendlyNameField'):\n\t\t\tsql = \"\"\"SELECT\n\t\t\t\t%(friendlyNameField)s,%(fields)s\n\t\t\tFROM\n\t\t\t\t%(foreignTable)s\n\t\t\tORDER BY\n\t\t\t\tUPPER( %(friendlyNameField)s );\"\"\"\n\t\t\tfriendlyNameField = foreignTable.friendlyNameField\n\t\telse:\n\t\t\tsql = \"\"\"SELECT\n\t\t\t\t%(friendlyNameField)s,%(fields)s\n\t\t\tFROM\n\t\t\t\t%(foreignTable)s\n\t\t\tORDER BY\n\t\t\t\t%(friendlyNameField)s;\"\"\"\n\t\t\tfriendlyNameField = fields[0]\n\t\tfields = \",\".join(fields)\n\t\trecords = sqlquery.SQLQuery(\n\t\t\tsql=sql,\n\t\t\t#debug = 1,\n\t\t)( connection, fields=fields,\n\t\t\tforeignTable=constraint.foreignTable,\n\t\t\tfriendlyNameField=friendlyNameField,\n\t\t).fetchall()\n\t\treturn records", "def select_budget_analysts(\n df: pd.DataFrame, search_colname: Union[str, List] = \"simple_job_title\"\n) -> pd.DataFrame:\n if df.columns.isin(list(search_colname)).any():\n # do the budget filtering\n pass\n else:\n raise Exception(f\"{search_colname} column does not exist\")", "def _query_df(self, opt: dict):\n query = []\n for k in self.unique_keys:\n val = opt[k]\n if isinstance(val, str):\n query.append(f\"{k}=='{val}'\")\n else:\n query.append(f\"{k}=={val}\")\n return self.data.query(\" and \".join(query).strip())", "def user_columns(self):\n if self.columns[0] == 'rowid':\n return self.columns[1:]\n else:\n return self.columns", "def get_data_column ( self, object ):\n return getattr( object, self.name )", "def get_or_create_column_settings(self, column):\n found = None\n for gv in self.obj_payload[\"values\"]:\n if gv[\"column\"] == column:\n found = gv\n break\n if found is None:\n found = {\"column\" : column}\n self.obj_payload[\"values\"].append(found)\n return found", "def passes(self, attribute, key, dictionary):\n return (\n config(\"database.DB\")\n .connection(self.connection)\n .table(self.table)\n .where(self.column, attribute)\n .first()\n is None\n )", "def _get_join_tables_and_columns(clause):\n pattern = re.compile(r\"^(.*)\\.(.*?)\\s*=\\s*(.*)\\.(.*)$\")\n match = pattern.match(clause)\n assert match, clause\n return match.group(1), match.group(2), match.group(3), match.group(4)", "def __get__(self, instance, owner):\r\n try:\r\n return instance._values[self.column.column_name].getval()\r\n except AttributeError as e:\r\n return self.query_evaluator", "def get_table_col(colnames=\"pmid\", \n condition=\"\",\n table=\"test_articles_1\", \n database=\"tumba\", \n user=\"xui\", \n password=\"mmceez\", \n host=\"localhost\", \n port=\"5433\"):\n engine = create_engine(f\"postgresql://{user}:{password}@{host}:{port}/{database}\", echo = False) \n Query = f\"select {colnames} from {table} {condition};\"\n result = pd.read_sql(sqlalchemy.text(Query), engine)\n \n return(result)", "def column_selection(type1, cat):\n col_selection = []\n for col in cat.colnames:\n if col == \"_RAJ2000\":\n continue\n if col == \"_DEJ2000\":\n continue\n desc = cat[col].info.description\n f = any([(ban in desc) for ban in BANNED_KEYWORDS])\n if f is False:\n col_selection.append(col)\n return col_selection", "def _get_features(self, session, problem_name=\"\", user_name=\"\"):\n\n #TODO pivot metrics tables\n query = session.query(Feature, User.name)\n #query = session.query(Feature, User.name, Metric)\n\n if user_name:\n query = query.filter(User.name == user_name)\n\n if problem_name:\n query = query.filter(Feature.problem.name == problem_name)\n\n return query", "def get_rs_query(self, cols_list):\n rs_query = \"\"\"\n SELECT\n user_id\n \"\"\"\n\n for question in cols_list:\n if question in self.features:\n rs_query += \"\"\"\n ,COALESCE(CASE WHEN question = '{question}' THEN regexp_replace(response, '\\\\[|\\\\]|\"', '') END, 'unspecified') AS {question_cleaned}\n \"\"\".format(question=question,\n question_cleaned=question.replace(\" \", \"_\"))\n\n rs_query += \"\"\"\n FROM {schema}.{table}\n WHERE 1=1\n AND response_time > (current_timestamp - interval '7 day')\n AND user_id IS NOT NULL\n \"\"\".format(schema=self.rs_schema, table=self.rs_table)\n\n return rs_query", "def info(self):\r\n cur = self.db.cursor()\r\n cur.execute(\"select * from lic where idx='USER'\")\r\n info = cur.fetchone()\r\n cur.close()\r\n return info", "def get_column_info(config):\n columns = config.view.columns\n colnames = dict(zip(columns, list(s.replace(\"_\", \" \") for s in columns)))\n colnames.update(config.view.colnames)\n column = config.view.column\n return columns, colnames, column", "def searchAll(name, table, field, goal):\n connection, cursor = DBconnect(name)\n cursor.execute(\"SELECT * FROM \"+table+\" WHERE \"+field+\"=:Id\",{\"Id\": goal})\n result = cursor.fetchall()\n DBdisconnect(connection)\n return result", "def get_column_def(self):\r\n db_type = self.db_type.format(\r\n self.key_type.db_type,\r\n self.value_type.db_type\r\n )\r\n return '{} {}'.format(self.cql, db_type)", "def select(table,query_field='',query_value='',fields=''):\n query=None\n if query_field:\n query = db[table][query_field]==query_value\n if fields:\n fields=['%s.%s' % (table,f.strip()) for f in fields.split(',')]\n else:\n fields=None\n return crud.select(db[table],query=query,fields=fields,headers='fieldname:capitalize')", "def get_basic_query_cond(column: str, val: str, query_params: dict):\n if val is not None:\n query_params[column] = val\n return 'AHJ.' + column + '=%(' + column + ')s AND '\n return ''", "def search_user_by_email(self,useremail, cursor):\n sql = \"SELECT * FROM users WHERE useremail=%s\"\n cursor.execute(sql,(useremail,))\n return cursor", "def where(self, column_or_label, value=None):\n column = self._get_column(column_or_label)\n if value is not None:\n column = column == value\n return self.take(np.nonzero(column)[0])", "def get_search_rec_data(self, collid, cf_columns, rec_search_name):\n\n sql_param = {}\n sql_param[str(len(sql_param) + 1)] = collid\n recurs, _ = self._get_collections_tree_query(\":%s\" % (len(sql_param)))\n recur_sql = GenericSQLSelect([COL_NAME_COLL_COLLID], False, [GEN_RECUR_NAME])\n tree_sql = str(SQLConcatExpr(recurs, recur_sql))\n cond1 = self._get_union_shared_collection_cond(tree_sql, sql_param)\n cmap_tbl = str(GenericSQLSelect([COL_NAME_COLLMAP_MEASID], True, [TABLE_NAME_COLLMAP], cond1))\n cond1 = SQLBinaryExpr(COL_NAME_FILES_MEASID, OP_IN, \"(%s)\" % (cmap_tbl))\n cond2 = SQLBinaryExpr(SQLFuncExpr(self.db_func_map[DB_FUNC_NAME_LOWER],\n COL_NAME_FILES_RECFILEID), OP_LIKE, \"(%s)\" % (\"'\"+OP_MOD+rec_search_name.lower()+OP_MOD+\"'\"))\n cond = SQLBinaryExpr(cond1, OP_AND, cond2)\n\n stmt = str(GenericSQLSelect(cf_columns, False, [TABLE_NAME_FILES], cond))\n return stmt", "def fetch_where(self, tablename, where):\n\n if type(where) != str:\n raise NotAStringError(\"please provide a valid where clause\")\n\n query = 'select * from ' + tablename + ' where ' + where\n\n try:\n self.__cur.execute(query)\n except Exception as e:\n self.__conn.rollback()\n raise e\n\n fetcheddata = self.__cur.fetchall()\n fetcheddata = self.__helper._functions__rowtodict(fetcheddata)\n return fetcheddata", "def query_predicate(self, p):\n self.setQuery(\"\"\"\n Select ?s ?o where {\n ?s %s ?o\n } ORDER BY (?s)\"\"\" % (p))\n\n try:\n rval = self.query()\n g = rval.convert()\n return [(x['s']['value'], x['o']['value']) for x in g['results']['bindings']]\n except:\n print \"Select failed\"\n traceback.print_exc(file=sys.stdout)", "def __init__(self, *column, **options):\n # initialized with (model, column)\n if len(column) == 2:\n self.__model, self.__column = column\n elif len(column) == 1:\n column = column[0]\n\n try:\n if issubclass(column, orb.Model):\n self.__model = column\n self.__column = column.schema().idColumn().name()\n except StandardError:\n if isinstance(column, orb.Column):\n self.__model = column.schema().model()\n self.__column = column.name()\n else:\n self.__model = None\n self.__column = column\n else:\n self.__model = None\n self.__column = None\n\n self.__op = options.get('op', Query.Op.Is)\n self.__caseSensitive = options.get('caseSensitive', False)\n self.__value = options.get('value', None)\n self.__inverted = options.get('inverted', False)\n self.__functions = options.get('functions', [])\n self.__math = options.get('math', [])", "def get(self, table, field, condition, *parameters, **kwparameters):\n data = self.select(table, field, condition, *parameters, **kwparameters)\n return data[0] if data else []", "def search_all_user(search_dict,cur):\n record=None\n if search_dict!={}:\n psql_where=\"\"\n \n for (key,value) in search_dict.items() :\n psql_where= f\"\"\"{key}='{value}' and \"\"\"+psql_where\n psql_base=f\"\"\" select distinct id,last_name,name,email,tel,user_name,user_type,\n (select count(*) from users where {psql_where[:-4]}) as nb\n from users \n where \"\"\"\n\n psql=psql_base+psql_where[:-4]\n record=cur.fetchall()\n cur.execute(psql)\n record=cur.fetchall()\n \n else:\n record=None\n\n \n return record", "def find_some(self,table,field_list,**query_dict):\n start_sql = 'SELECT '\n sql = ''\n query_sql = ''\n for field in field_list: start_sql += field + ',' \n start_sql = start_sql[0:-1] + ' FROM %s WHERE ' % (table)\n try:\n if query_dict:\n for index in query_dict:\n if not isinstance(query_dict[index],dict): query_sql += \" %s = '%s' and\" % (index,query_dict[index]) \n else: query_sql += \" %s %s '%s' and\" % (index,query_dict[index]['rule'],query_dict[index]['value'])\n sql = (start_sql + query_sql)[0:-3] \n info_list = self.db.query(sql)\n except Exception,e: self.treat_except(e) \n return info_list", "def query(self, **columns) -> pd.DataFrame:\n conditions = [\"{0} == '{1}'\".format(col, val) for col, val in columns.items()]\n results = self._table.query(\" & \".join(conditions))\n\n return results" ]
[ "0.5852314", "0.57501423", "0.5629169", "0.5598633", "0.5358296", "0.5331501", "0.5246509", "0.52190596", "0.5164159", "0.51409566", "0.50485057", "0.50430083", "0.50312936", "0.503079", "0.5022927", "0.49895406", "0.49535307", "0.49471778", "0.49386907", "0.49316448", "0.49198973", "0.49136564", "0.49001983", "0.48846945", "0.48818007", "0.48721248", "0.48594657", "0.48457488", "0.48370463", "0.48339376", "0.48264435", "0.48181325", "0.48150733", "0.48061886", "0.48023507", "0.48010427", "0.47974676", "0.47915453", "0.47904238", "0.47771016", "0.4774446", "0.4772312", "0.47720176", "0.47661567", "0.47647184", "0.47510034", "0.47497237", "0.47440416", "0.47340426", "0.47288144", "0.47282985", "0.47256494", "0.4721439", "0.4720588", "0.4700243", "0.46989295", "0.4690944", "0.468691", "0.46820801", "0.46810493", "0.46764454", "0.46610644", "0.46595216", "0.46557668", "0.4652559", "0.46444824", "0.46394306", "0.4636373", "0.46363038", "0.46356037", "0.46339726", "0.4631781", "0.46271133", "0.4625596", "0.46231246", "0.462113", "0.460918", "0.46073878", "0.46056536", "0.4601939", "0.46008795", "0.45999116", "0.4584201", "0.45821086", "0.45774263", "0.4572583", "0.4566787", "0.45534375", "0.45501408", "0.45499766", "0.45456406", "0.4542731", "0.4542133", "0.45368248", "0.45304817", "0.45287925", "0.45264128", "0.45189", "0.45067364", "0.45048574", "0.450452" ]
0.0
-1
Initialization of the tool Make database and table and connect to the database
def __init__(self): self.cnx = mysql.connector.connect(user='root', password='', host='127.0.0.1', database='songstorage') # Connect to mySQL, username root, password none self.cursor = self.cnx.cursor() # Initialize the mySQL cursor self.cursor.execute("SELECT COUNT(*) FROM information_schema.tables WHERE table_schema = 'songstorage'" " AND table_name = 'songs'") # Check the existence of the table result = self.cursor.fetchall() if result[0][0] == 0: self.cursor.execute( "CREATE TABLE songs (ID INT NOT NULL AUTO_INCREMENT, file_title VARCHAR(255), song_title VARCHAR(" "255), artist VARCHAR(255), form VARCHAR(255), data VARCHAR(255), tag VARCHAR(255), PRIMARY KEY (" "ID))") # Create the table if it doesn't already exist
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _init_db(self):\n cursor = self._main_connection.cursor()\n cursor.execute(self.sql[\"create_table\"])\n self._main_connection.commit()", "def create_db(self):", "def setdb():\n\n if not database_exists(DB_URL):\n print('Creating database.')\n create_database(DB_URL)\n\n print('Creating tables.')\n db.create_all()\n print('Shiny!')", "def initialize():\n\n db.connect() # Se conecta\n db.create_tables([Entry], safe=True) # Crea las tablas\n # safe=true evita crear modelos ya creados", "def setup_db(self) -> None:\n conn = mysql.connector.connect(\n user=self.app.config[\"DATABASE_USER\"], password=self.app.config[\"DATABASE_PASSWORD\"],\n host=self.app.config[\"DATABASE_HOST\"], port=self.app.config[\"DATABASE_PORT\"], raise_on_warnings=True\n )\n try:\n cursor = conn.cursor()\n cursor.execute(\n \"CREATE DATABASE IF NOT EXISTS {} CHARACTER SET utf8\".format(self.app.config[\"DATABASE_NAME\"])\n )\n conn.commit()\n except:\n raise\n else:\n with self.DBManager(self.app) as connection:\n for model in sorted(lib.get_subclasses(lib.models.Model), key=lambda x: x.index):\n model.setup_table(connection=connection)\n finally:\n conn.close()", "def setup_database(self):\n self.db.setup_database()", "def initialize():\n DATABASE.connect()\n DATABASE.create_tables([User, Entry], safe=True)\n DATABASE.close()", "def init_db():\n # with current_app.open_resource(\"schema.sql\") as f:\n # db.executescript(f.read().decode(\"utf8\"))\n print(\"初始化数据库脚本文件!!!\")", "def initialize_database():\n # TODO: Refactor the funtime library\n this.db = Store(this.host).create_lib(this.store_name).get_store()", "def init(self):\n self.db.connect()\n try:\n self.db.create_tables([JambiModel], safe=True)\n JambiModel.create(ref='0')\n self.logger.info('Database initialized')\n except IntegrityError:\n self.logger.info('Database was already initialized')\n self.db.close()", "def _create_database(self):\n self._connect()\n cursor = self._connection.cursor()\n cursor.execute(make_table_creation_command(\"reviews\", FIELD_DESCRIPTIONS))\n self._connection.commit()", "def initialize():\n \n db.connect()\n db.create_tables([Product], safe=True)", "def initialize():\n db.connect()\n db.create_tables([Entry], safe=True)", "def initialize():\n db.connect()\n db.create_tables([Entry], safe=True)", "def initialise_bdd(self):\n print(fr.FR[1])\n self.base.create_database(\"sql/p5.sql\")\n print(fr.FR[2])\n self.category_table.save_category()\n print(fr.FR[3])", "def create_database():\n with connection:\n connection.execute(CREATE_MOVIE_TABLE)\n connection.execute(CREATE_USER_TABLE)\n connection.execute(CREATE_WATCHED_TABLE)", "def __init__(self):\n\n self.connection = sqlite3.connect(self.dabatabase_name, uri=True)\n self.connection.cursor()\n self.connection.execute(self._create_table_stm)\n self.connection.commit()", "def main():\r\n\r\n # delete the database file if it already exists\r\n db_path = Path('../../data/db.sqlite')\r\n db_path.unlink(missing_ok=True)\r\n\r\n # create the database\r\n with sqlite3.connect(db_path) as connection:\r\n create_database(connection)", "def init_db_command() -> None:\n db.create_all()\n click.echo(\"Initialized database.\")", "def initDb(self) -> None:\n try: \n connection = None \n dump = open('db/db.sql')\n sql_str = dump.read() \n connection = self.connect()\n cursor = connection.cursor()\n cursor.executescript(sql_str)\n acs_ports = JsonSettings.parseJson('settings.json','AcsPorts')\n db_ports = JsonSettings.parseJson('settings.json','DbPorts')\n acs_port_names = JsonSettings.getKeys('acs_port_', acs_ports) \n db_port_names = JsonSettings.getKeys('db_port_', db_ports)\n for acs_port_name in acs_port_names:\n cursor.execute(f\"ALTER TABLE ports ADD COLUMN {acs_port_name} INTEGER\")\n for db_port_name in db_port_names:\n cursor.execute(f\"ALTER TABLE ports ADD COLUMN {db_port_name} INTEGER\") \n except Exception as e:\n logging.error(f'{self.cn} Error \\n{e}', exc_info=1)\n finally:\n connection.commit() \n tables = cursor.execute(\"SELECT name FROM sqlite_master WHERE type='table' AND name != 'sqlite_sequence'\")\n logging.info(f'{self.cn} Database created with tables:\\n{tables.fetchall()}')\n if connection:\n connection.close()", "def make_db():\n\n db.create_all()", "def _create_db(self):\n self.db = easydms.dbcore.Database(\":memory:\")\n self.db.create_db()", "def init_database():\n database.init(DATABASE_NAME)\n database.connect()\n database.execute_sql('PRAGMA foreign_keys = ON')\n if not database.table_exists([Customer]):\n database.create_tables([Customer])\n database.close()", "def init_db_command():\n students_db.create_all()\n click.echo('Initialized the database.')", "def init_db_command():\n db.create_all()\n Contact.as_unique(db.session, email='[email protected]', name='No Name')\n ac = Account.as_unique(db.session, email='[email protected]',\n nickname='no.name')\n db.session.flush()\n Thread.as_unique(db.session, account=ac, thread_id='No thread')\n db.session.commit()\n click.echo('Initialized the database.')", "def create():\n\tcreate_db()", "def create_db():\n db.create_all()\n print('Database structure created successfully')", "def setup(db_name = 'net.db', **extra_params):\n global db_run # Imports the DB from the simulator\n \n# # If the file already exists delete it\n if DEBUG: print \"[ pyNN ] : Opening DB\", os.path.abspath(db_name)\n if os.path.exists(db_name):\n if DEBUG: print \"[ pyNN ] : DB already initialized... cleaning up... removing file %s\" % db_name\n os.remove(db_name)\n db_run = db(db_name) # Creates the DB \n db_run.init_db() # Initializes the DB\n return(db_run)", "def main():\n cur, conn = create_database()\n \n drop_tables(cur, conn)\n create_tables(cur, conn)\n\n conn.close()", "def init_db():\n db.drop_all()\n db.create_all()\n\n print(\"Initialized Connect 4 Database.\")", "def init_db():\n # Open connection to the database\n conn = sqlite3.connect(DB_PATH)\n cursor = conn.cursor()\n\n # Open the schema file and execute its SQL code\n with current_app.open_resource('schema.sql') as db_schema:\n cursor.executescript(db_schema.read().decode('utf8'))\n\n # Save (commit) the changes\n conn.commit()\n\n # We can also close the connection if we are done with it.\n conn.close()", "def init_db():\n # We are setting the module variables here for the first time, so disable the warning\n global DB_USER_TABLE # pylint: disable=global-variable-undefined\n global DB_CUSTOMER_TABLE # pylint: disable=global-variable-undefined\n global DB_USER_CUSTOMER_RELS_TABLE # pylint: disable=global-variable-undefined\n global DB_TICKET_TABLE # pylint: disable=global-variable-undefined\n global DB_COMMENT_TABLE # pylint: disable=global-variable-undefined\n\n db = TinyDB(app.config['DB_NAME'])\n\n DB_USER_TABLE = db.table('users')\n DB_CUSTOMER_TABLE = db.table('customers')\n DB_USER_CUSTOMER_RELS_TABLE = db.table('user_customer_rels')\n DB_TICKET_TABLE = db.table('tickets')\n DB_COMMENT_TABLE = db.table('comments')", "def initialize():\n DATABASE.connect()\n DATABASE.create_tables([User], safe=True)\n DATABASE.close()", "def init():\n\n # delete existing file\n if os.path.exists(DBFILE):\n os.remove(DBFILE)\n\n db = sqlite3.connect(DBFILE)\n # create tables\n create(db, PARAGRAPH, \"paragraph\")\n create(db, QUESTION, \"question\")\n create(db, ANSWER, \"answer\")\n\n return db", "def init_database(self):\n # init_database(self.engine)", "def init():\n print(\"Executing initialization\")\n print(db.dsn)\n cursor = yield momoko.Op(\n db.execute,\n \"\"\"\n DROP SCHEMA public CASCADE;\n CREATE SCHEMA public;\n CREATE TABLE game\n (\n game_id text PRIMARY KEY,\n players integer,\n state bytea,\n timestamp timestamp\n );\n CREATE UNIQUE INDEX ix_game_id\n ON game\n (game_id);\n CREATE INDEX ix_timestamp\n ON game\n (timestamp);\n \"\"\")\n try:\n print(cursor.fetchall())\n except psycopg2.ProgrammingError:\n pass\n io = ioloop.IOLoop.instance()\n io.stop()", "def setup(self):\n #print \"Creating test database...\"\n files = glob.glob(os.path.join(self.home_dir, 'sqlFiles', '*.sql'))\n for fls in files:\n loc = fls.rfind('/')\n #print(\" \" + fls.replace('.sql', '')[loc + 1:])\n flh = open(fls, 'r')\n curs = self.cursor()\n curs.executescript(flh.read())\n self.commit()\n curs.close()\n flh.close()\n for fls in ['INSERTS', 'TRIGGERS']:\n #print(fls)\n flh = open(os.path.join(self.home_dir, 'sqlFiles', fls), 'r')\n curs = self.cursor()\n curs.executescript(flh.read())\n self.commit()\n curs.close()\n flh.close()", "def init_db():\n db = get_db()\n Page.create_table(db)\n PageVersion.create_table(db)\n User.create_table(db)", "def create_database_and_tables():\n create_tables(\"gui_database.db\")\n tkinter.Button(m, text=\"Add Person\", width=25, state=tkinter.NORMAL, command=add_person).grid(row=1, column=0)\n tkinter.Button(m, text=\"Add Student\", width=25, state=tkinter.NORMAL, command=add_student).grid(row=2, column=0)", "def __init_database(self):\n from admin.database import init_db\n init_db()", "def __init__(self):\n self.__db = sqlite3.connect(DB_PATH)\n self.__cur = self.__db.cursor()\n self.__create_tables()", "def setup_table(self):\n\n self.setup.create_basic_table_in_dev()\n self.setup.insert_random_records_into_dev()", "def initialize():\n DATABASE.connect()\n DATABASE.drop_tables([Journal], safe=True)\n DATABASE.create_tables([Journal], safe=True)\n DATABASE.close()", "def main():\n db = _db.Database(experiment.ORACLE_PATH)\n db.populate_kernel_names_table()\n db.commit()", "def init_db():\n\tdb.drop_all()\n\tdb.create_all()\n\n\tprint(\"Initialized Database.\")\n\treturn", "def CreateDB(self) :\r\n\t\ttry :\r\n\t\t\tself.DB_Cursor.execute(self.SQLCMDs['CreateClassTable'])\r\n\t\t\tfor ii,classname in enumerate(self.SQLCMDs['ClassesList']) :\r\n\t\t\t\tself.DB_Cursor.execute(self.SQLCMDs['InsertClass'],(ii,classname))\r\n\r\n\t\t\tself.DB_Cursor.execute(self.SQLCMDs['CreateSetTable'])\r\n\t\t\tfor ii,setname in enumerate(self.SQLCMDs['SetList']) :\r\n\t\t\t\tself.DB_Cursor.execute(self.SQLCMDs['InsertSet'],(ii,setname))\r\n\r\n\t\t\tself.DB_Cursor.execute(self.SQLCMDs['CreateSampleTable'])\r\n\t\t\tself.DB_Cursor.execute(self.SQLCMDs['CreateDictListTable'])\r\n\t\t\tself.DB_Cursor.execute(self.SQLCMDs['CreateDictBuildTable'])\r\n\t\t\tself.DB_Cursor.execute(self.SQLCMDs['CreateWordLists'])\r\n\t\t\tself.DB_Cursor.execute(self.SQLCMDs['CreateFeatureTable'])\r\n\t\t\tself.DB_Connect.commit()\r\n\t\texcept Exception as detail:\r\n\t\t\tlogging.error(\"Failed to create the database: %s\"%detail)\r\n\t\t\tself.DB_Connect.rollback()\r\n\t\treturn", "def create_db():\n db.create_all()\n print ('Intialized....!')", "def main():\n cur, conn = create_database()\n \n drop_tables(cur, conn)\n create_tables(cur, conn)\n\n cur.close();\n conn.close()", "def __init__(self):\n self.db = Databank()\n self.db.connection()\n # self.db.cursor.execute('USE library')", "def setup():\n global connection\n connection = MySQLdb.connect(host=config.get('mysql.host'),\n user=config.get('mysql.user'),\n passwd=config.get('mysql.password'),\n db=config.get('mysql.db'),\n ssl={'ca' : config.get('mysql.cert')})\n init_model(connection)", "def setUpClass(cls):\n cls.database_connection = DatabaseHandler(database_path)\n cls.database_connection.connect()\n processing.create_table_if_not_exist(cls.database_connection, table_name)\n cls.database_connection.close()", "def setup(self):\n self.load_connection_info(self.ini_filename)\n if self.conn_info:\n self.logger.info('Load connection info of Postgres')\n\n psql_connection_info = f\"dbname={self.conn_info['dbname']} \" \\\n f\"user={self.conn_info['user']} \" \\\n f\"password={self.conn_info['password']} \" \\\n f\"port={self.conn_info['port']}\" \n \n check_db = self.create_db(psql_connection_info)\n\n connection = psycopg2.connect((\n f\"dbname=password_manager \" \\\n f\"user={self.conn_info['user']} \" \\\n f\"password={self.conn_info['password']} \" \\\n f\"port={self.conn_info['port']}\")) \n cursor = connection.cursor()\n\n if check_db:\n self.logger.info('Database has been created')\n\n check_tables = self.create_tables(connection, \n cursor, \n self.sql_query_table_person, \n self.sql_query_table_login_data)\n \n if check_tables:\n self.logger.info('Tables have been created')\n else:\n self.logger.info('Tables do not exist')\n else:\n self.logger.info('Database does not exist')\n \n connection.close()\n cursor.close()\n else:\n self.logger.info('Connection to Postgres could not esablished')", "def create_db(self):\n return None", "def init_db_command():\r\n init_db()\r\n click.echo('Initialized the database.')", "def init_db_command():\n init_db()\n click.echo(\"Initialized the database.\")", "def init_db_command():\n init_db()\n click.echo(\"Initialized the database.\")", "def setup_database() -> sqlite3.Cursor:\n conn = sqlite3.connect(':memory:')\n cursor = conn.cursor()\n\n insert_books(cursor)\n insert_lookups(cursor)\n\n return cursor", "def dbinit( *args, **kwargs ):", "def create_database():\n create_db(app)", "def initdb_cmd():\n init_db()\n print(\"database initialized\")", "def init_db() -> None:\n conn = sqlite3.connect('../Utils/map_storage.db')\n cursor = conn.cursor()\n\n with conn:\n station_cmd = \"\"\"CREATE TABLE IF NOT EXISTS\n nodes(city TEXT, name TEXT, is_station TEXT, x INT, y INT, zone TEXT)\"\"\"\n\n cursor.execute(station_cmd)\n\n connection_cmd = \"\"\"CREATE TABLE IF NOT EXISTS\n connections(city TEXT, name_1 TEXT, name_2 TEXT, color TEXT)\"\"\"\n\n cursor.execute(connection_cmd)", "def init_database(self):\n engine = create_engine('sqlite:///todo.db?check_same_thread=False')\n self.Base.metadata.create_all(engine)\n self.session = sessionmaker(bind=engine)()", "def setup_db(self, dbfile):\n print \"db:\", dbfile\n # \"connect to the file\" (or create it)\n self.con = sqlite3.connect(dbfile)\n # create the tables if they don't exist\n self.con.execute(\n 'create table if not exists fc(feature,category,count)')\n self.con.execute('create table if not exists cc(category,count)')\n self.con.execute('create table if not exists ct(category,threshold)')", "def create_db():\n db.create_all()\n click.echo(\"Banco de dados criado\")", "def start_app() -> sqlite3.Connection:\n con = sqlite3.connect(db_name)\n cur = con.cursor()\n cur.execute('PRAGMA foreign_keys = ON')\n con.commit()\n if not check_table(cur, 'user'):\n _create_table_user(cur)\n if not check_table(cur, 'pushup'):\n _create_table_pushup(cur)\n if not check_table(cur, 'plank'):\n _create_table_plank(cur)\n return con", "def __init__(self, db_location = ':memory:'):\n self.connection = sqlite3.connect(db_location)\n self.cur = self.connection.cursor()\n self.create_table()", "def init_database(self):\n init_database(self.engine)", "def init_db_command():\n init_db()\n close_db()\n click.echo('Initialized the database.')", "def setup_db_conn():\n # TODO update so DB does not have to be hard coded\n # Currently DB is hardcoded", "def main():\n database = dbdir + \"pyflangun.db\"\n sql_create_airports_table = \"\"\"CREATE TABLE IF NOT EXISTS airports (\n id integer PRIMARY KEY,\n name text NOT NULL,\n icao text\n );\"\"\"\n sql_create_weather_table = \"\"\"CREATE TABLE IF NOT EXISTS weather (\n id integer PRIMARY KEY,\n icao text NOT NULL,\n temp_f integer NOT NULL,\n dew_pt_f integer NOT NULL,\n status integer NOT NULL,\n FOREIGN KEY (icao) REFERENCES airports (icao)\n );\"\"\"\n # create a database connection\n conn = create_connection(database)\n # create tables\n if conn is not None:\n create_table(conn, sql_create_airports_table)\n create_table(conn, sql_create_weather_table)\n else:\n print(\"Error! cannot create the database connection.\")", "def create_db():\n db.create_all()\n click.echo(\"DB criado com sucesso!\")", "def init_db_command():\n init_db()\n click.echo('Initialized the database.')", "def init_db_command():\n init_db()\n click.echo('Initialized the database.')", "def init_db_command():\n init_db()\n click.echo('Initialized the database.')", "def init_db_command():\n init_db()\n click.echo('Initialized the database.')", "def init_db_command():\n init_db()\n click.echo('Initialized the database.')", "def init_db_command():\n init_db()\n click.echo('Initialized the database.')", "def init_db_command():\n init_db()\n click.echo('Initialized the database.')", "def init_db_command():\n init_db()\n click.echo('Initialized the database.')", "def connectDB(self):\n dbFilePath=\"%s%s%s.sqlite\"%(self.settings.logDir,os.sep,self.settings.mainLogFileName)\n self.DBconnection=sqlite3.connect(dbFilePath, check_same_thread = False) # 026 check_same_thread needed in bow.\n self.DBcursor=self.DBconnection.cursor()\n self.createTableBOW(self.DBcursor)", "def create_db():\n db.create_all()\n print(\"DB Created\")", "def init_db_command():\n click.echo(\"Initializing the database.\")\n init_db()\n click.echo(\"Initialized the database.\")", "def initdb_command():\n db.drop_all()\n db.create_all()\n if LOAD_DUMMY_DATA:\n setup_dummy_data()\n\n print('Initialized the database.')", "def setup_database():\n from django.core.management import call_command\n from django import setup\n setup()\n call_command('migrate', verbosity=0, interactive=False)\n call_command('loaddata', data('initial_data.json'), verbosity=0, interactive=False)", "def init_db_command():\n db_init()\n click.echo('Initialized the database.')", "def _db_setup(self):\n self.get_connection()\n sql_file = open(db_config.DATABASE_TABLES_SETUP_FILE, 'r')\n with self.conn.cursor() as cur:\n cur.execute(sql_file.read())\n self.conn.commit()\n logger.info(f'The script {db_config.DATABASE_TABLES_SETUP_FILE} has run.')", "def init_db_command():\n init_db()\n click.echo('Initialized the database.')", "def __init__(self, database_name):\n self.conn = sqlite3.connect(\"output/%s.db\" % database_name)", "def setup_database():\n\n user = 'bard'\n password = 'STORY'\n database = 'story'\n DSN = f\"postgresql://{user}:{password}@postgres:5432/{database}\"\n engine = create_engine(DSN)\n register_tables(engine)\n return engine", "def init_db(self):\n print(\"Initializing database...\", end='')\n self.cursor.execute(\"DROP DATABASE %s\" % self.db.database)\n self.__init__(self.db_name)\n self.cursor.execute(\"USE %s\" % self.db.database)\n\n # Book\n self.cursor.execute(\n \"\"\"CREATE TABLE Book (\n ISBN VARCHAR(13),\n title VARCHAR(300) COLLATE utf8_general_ci,\n publisher VARCHAR(100) COLLATE utf8_general_ci,\n lang VARCHAR(40),\n publicationDate DATE,\n pageCount SMALLINT CHECK(pageCount >= 0),\n stock SMALLINT CHECK(stock >= 0),\n price DECIMAL(5,2),\n subject VARCHAR(100),\n avg_rating DECIMAL(4,2) CHECK(avg_rating <= 10.00),\n total_rating_score INT DEFAULT 0,\n num_ratings INT DEFAULT 0,\n PRIMARY KEY (ISBN))\"\"\")\n\n # Author\n self.cursor.execute(\n \"\"\"CREATE TABLE Author (\n ID INT AUTO_INCREMENT,\n name VARCHAR(200) COLLATE utf8_general_ci,\n lang VARCHAR(40),\n PRIMARY KEY (ID))\"\"\")\n\n # CustomerPersonal\n self.cursor.execute(\n \"\"\"CREATE TABLE CustomerPersonal (\n phone CHAR(10),\n address VARCHAR(300) NOT NULL,\n PRIMARY KEY (phone))\"\"\")\n\n # CustomerCredentials\n self.cursor.execute(\n \"\"\"CREATE TABLE CustomerCredentials (\n loginID VARCHAR(30),\n firstName VARCHAR(50) NOT NULL,\n lastName VARCHAR(50) NOT NULL,\n salt VARBINARY(32) NOT NULL,\n pass_key VARBINARY(32) NOT NULL,\n phone CHAR(10) NOT NULL,\n PRIMARY KEY (loginID),\n FOREIGN KEY (phone) REFERENCES CustomerPersonal(phone)\n ON UPDATE CASCADE ON DELETE RESTRICT)\"\"\")\n\n # ManagerPersonal\n self.cursor.execute(\n \"\"\"CREATE TABLE ManagerPersonal (\n phone CHAR(10),\n address VARCHAR(300) NOT NULL,\n PRIMARY KEY (phone))\"\"\")\n\n # ManagerCredentials\n self.cursor.execute(\n \"\"\"CREATE TABLE ManagerCredentials (\n loginID VARCHAR(30),\n managerID INT UNIQUE NOT NULL AUTO_INCREMENT,\n firstName VARCHAR(50),\n lastName VARCHAR(50),\n salt VARBINARY(32) NOT NULL,\n pass_key VARBINARY(32) NOT NULL,\n phone CHAR(10) NOT NULL,\n PRIMARY KEY (loginID),\n FOREIGN KEY (phone) REFERENCES ManagerPersonal(phone)\n ON UPDATE CASCADE ON DELETE RESTRICT)\"\"\")\n\n # Comment\n self.cursor.execute(\n \"\"\"CREATE TABLE Comment (\n commentID INT AUTO_INCREMENT,\n ISBN VARCHAR(13) NOT NULL,\n loginID VARCHAR(30) NOT NULL,\n score TINYINT NOT NULL,\n message TEXT,\n veryUseful INT DEFAULT 0,\n useful INT DEFAULT 0,\n useless INT DEFAULT 0,\n avg_usefulness DECIMAL (3,2),\n commentDate DATETIME,\n PRIMARY KEY (commentID),\n FOREIGN KEY (ISBN) REFERENCES Book(ISBN)\n ON UPDATE RESTRICT ON DELETE CASCADE,\n FOREIGN KEY (loginID) REFERENCES CustomerCredentials(loginID)\n ON UPDATE CASCADE ON DELETE CASCADE)\"\"\")\n\n # OrderLog\n self.cursor.execute(\n \"\"\"CREATE TABLE OrderLog (\n orderNumber INT AUTO_INCREMENT,\n loginID VARCHAR(30) NOT NULL,\n orderDate DATE,\n PRIMARY KEY (orderNumber),\n FOREIGN KEY (loginID) REFERENCES CustomerCredentials(loginID)\n ON UPDATE CASCADE ON DELETE CASCADE)\"\"\")\n\n # Return Request\n self.cursor.execute(\n \"\"\"CREATE TABLE ReturnRequest (\n requestID INT AUTO_INCREMENT,\n orderNumber INT NOT NULL,\n requestDate DATE,\n ISBN VARCHAR(13) NOT NULL,\n quantity SMALLINT,\n status VARCHAR(25) DEFAULT 'PENDING',\n PRIMARY KEY (requestID),\n FOREIGN KEY (orderNumber) REFERENCES OrderLog(orderNumber)\n ON UPDATE RESTRICT ON DELETE CASCADE)\"\"\")\n\n # # HasKeyword\n # self.cursor.execute(\n # \"\"\"CREATE TABLE HasKeyword (\n # ISBN VARCHAR(13),\n # word VARCHAR(50) COLLATE utf8_general_ci,\n # PRIMARY KEY (ISBN, word),\n # FOREIGN KEY (ISBN) REFERENCES Book(ISBN)\n # ON UPDATE RESTRICT ON DELETE CASCADE)\"\"\")\n\n # Wrote\n self.cursor.execute(\n \"\"\"CREATE TABLE Wrote (\n authorID INT,\n ISBN VARCHAR(13),\n PRIMARY KEY (authorID, ISBN),\n FOREIGN KEY (authorID) REFERENCES Author(ID)\n ON UPDATE RESTRICT ON DELETE RESTRICT,\n FOREIGN KEY (ISBN) REFERENCES Book(ISBN)\n ON UPDATE RESTRICT ON DELETE CASCADE)\"\"\")\n\n # ProductOf\n self.cursor.execute(\n \"\"\"CREATE TABLE ProductOf (\n ISBN VARCHAR(13),\n orderNumber INT,\n quantity SMALLINT CHECK(quantity > 0),\n PRIMARY KEY (ISBN, orderNumber),\n FOREIGN KEY (ISBN) REFERENCES Book(ISBN)\n ON UPDATE RESTRICT ON DELETE CASCADE,\n FOREIGN KEY (orderNUmber) REFERENCES OrderLog(orderNumber)\n ON UPDATE RESTRICT ON DELETE CASCADE)\"\"\")\n\n # Trusts\n self.cursor.execute(\n \"\"\"CREATE TABLE Trusts (\n loginID VARCHAR(30),\n otherLoginID VARCHAR(30) CHECK(loginID<>otherLoginID),\n trustStatus VARCHAR(9) CHECK(trustStatus = 'TRUSTED' OR trustStatus = 'UNTRUSTED'),\n PRIMARY KEY (loginID, otherLoginID),\n FOREIGN KEY (loginID) REFERENCES CustomerCredentials(loginID)\n ON UPDATE CASCADE ON DELETE CASCADE,\n FOREIGN KEY (otherLoginID) REFERENCES CustomerCredentials(loginID)\n ON UPDATE CASCADE ON DELETE CASCADE)\"\"\")\n\n # Rates\n self.cursor.execute(\n \"\"\"CREATE TABLE Rates (\n loginID VARCHAR(30),\n commentID INT,\n rating VARCHAR(10) NOT NULL,\n PRIMARY KEY (loginID, commentID),\n FOREIGN KEY (loginID) REFERENCES CustomerCredentials(loginID)\n ON UPDATE CASCADE ON DELETE CASCADE,\n FOREIGN KEY (commentID) REFERENCES Comment(commentID)\n ON UPDATE RESTRICT ON DELETE CASCADE)\"\"\"\n )\n\n print(\"done\")", "def main() :\n\n global dbMan\n dbMan = DBManager( 'CMS_HCL_APPUSER_R/HCAL_Reader_55@localhost:1521/cms_omds_lb.cern.ch' )", "def os_start_db( self, ):\r\n pass", "def initialize():\n db.connect()\n db.create_tables([Expense], safe=True)", "def create_db(self, cnx):\n self.cursor = cnx.cursor()\n try:\n # Opening the file containing the SQL script\n sql_file = open(PATH_FILE, 'r')\n # Read file\n sql_text = sql_file.read()\n sql_stmts = sql_text.split(';')\n for s in sql_stmts:\n self.cursor.execute(s)\n # Graphical interface with Tkinter\n self.text_db_is_create = \"The database is created.\"\n # Mode console\n print(\"The database is created\")\n # Make sure db is committed\n self.cnx.commit()\n\n except connector.Error as err:\n # Graphical interface with Tkinter\n self.text_db_is_create = \"Failed creating database.\"\n # Mode console\n print(\"Failed creating database: {}\".format(err))\n exit(1)\n\n else:\n self.cursor.close()\n return cnx", "def init_db(self):\n\n # The user can provide a custom string\n if self.database is None:\n self.logger.error(\"You must provide a database url, exiting.\")\n sys.exit(1)\n\n self.engine = create_engine(self.database, convert_unicode=True)\n self.session = scoped_session(\n sessionmaker(autocommit=False, autoflush=False, bind=self.engine)\n )\n\n # Database Setup\n Base.query = self.session.query_property()\n\n # import all modules here that might define models so that\n # they will be registered properly on the metadata. Otherwise\n # you will have to import them first before calling init_db()\n import expfactory.database.models\n\n self.Base = Base\n self.Base.metadata.create_all(bind=self.engine)", "def init_new_db(args):\n Base.metadata.drop_all(engine)\n Base.metadata.create_all(engine)\n session = Session()\n session.add(Environment(name='normal', slickurl='http://slicker.homestead-corp.com/slickij', buildurl='?', filename='hs-tcrunij.tar.gz', tcrunijsubdir='hs-tcrunij/tcrunij'))\n session.add(Environment(name='dev', slickurl='http://octomom.homestead-corp.com/slickij', buildurl='?', filename='tcrunij.tar.gz', tcrunijsubdir='tcrunij/tcrunij'))\n session.commit()", "def __init__(self):\n try:\n self.con = connector.connect(host='localhost', port='3306',user='root',password='Ganesh@298',database='pythontest')\n query = 'create table if not exits user(userId int primary key,userName varchar(200),phone varchar(12))'\n cur = self.con.cursor()\n cur.execute(query)\n logger.info(\"created\")\n except Exception as e:\n logger.error('Failed to connect database', e)", "def setupDatabases(self):\n param = self.getDefaultDatabaseConnectionParameter()\n db = DatabaseFactory.getDatabase(self.defaultDriver(), {})\n db.createDatabase(param)\n db.connect(param)\n if db.isConnected():\n self.settingsDb = db\n db.createObservations()\n db.createSensors()\n else:\n return False\n # replace by settings validation method later\n return self.check()", "def setup_database():\n with sqlite3.connect(DB_STRING) as con:\n con.execute(\"CREATE TABLE data (code, message)\")", "def _initDb(self):\n CREATE_TOKEN_TABLE = '''create table token\n (token text, id int primary key)\n '''\n CREATE_DOCS_TABLE = '''create table docs\n (local_path text, resource_id text primary key, etag text, title text)\n '''\n \n try:\n self.db.execute(CREATE_TOKEN_TABLE)\n self.db.execute(CREATE_DOCS_TABLE)\n except sqlite3.OperationalError, error:\n pass", "def create_tables():\r\n db = connect_database()\r\n table_wait = \"waiting\"\r\n table_helped = \"helped\"\r\n table_help = \"help\"\r\n param_name = ['cus_num', 'name', 'username', 'ru_id', 'os_platform', 'description']\r\n param_type1 = ['INTEGER PRIMARY KEY AUTOINCREMENT', 'TEXT', 'TEXT', 'TEXT', 'TEXT', 'TEXT']\r\n param_type2 = ['INTEGER PRIMARY KEY', 'TEXT', 'TEXT', 'TEXT', 'TEXT', 'TEXT']\r\n with db:\r\n create_table(db, table_wait, param_name, param_type1)\r\n create_table(db, table_helped, param_name, param_type2)\r\n create_table(db, table_help, param_name, param_type2)\r\n db.close()" ]
[ "0.7486164", "0.7400551", "0.7391027", "0.73457515", "0.7313018", "0.726847", "0.7231708", "0.71860963", "0.7173379", "0.71718526", "0.7118039", "0.7113357", "0.7111005", "0.7111005", "0.7058268", "0.70503473", "0.70164067", "0.6996071", "0.6991648", "0.69865584", "0.6979697", "0.6977646", "0.69767714", "0.696909", "0.6966804", "0.69663465", "0.6957075", "0.6955296", "0.69519556", "0.69383013", "0.69338137", "0.6931848", "0.6924019", "0.6919347", "0.69108415", "0.6896852", "0.68908966", "0.68804646", "0.6871736", "0.6868855", "0.686734", "0.6865559", "0.6852465", "0.68499726", "0.68357575", "0.68327063", "0.68256885", "0.6823942", "0.682365", "0.6822247", "0.68102956", "0.68063897", "0.6793526", "0.67864233", "0.6783838", "0.6783838", "0.67822284", "0.6779421", "0.6776869", "0.677653", "0.6773636", "0.6768486", "0.6768311", "0.6755601", "0.675284", "0.6750776", "0.674716", "0.6746375", "0.67461044", "0.6744072", "0.6743814", "0.6740476", "0.6740476", "0.6740476", "0.6740476", "0.6740476", "0.6740476", "0.6740476", "0.6740476", "0.67388463", "0.6732661", "0.6726352", "0.67263204", "0.6720774", "0.67159", "0.6714074", "0.67134464", "0.67115355", "0.6705504", "0.670406", "0.6703812", "0.66984993", "0.6696253", "0.669393", "0.6690267", "0.6689581", "0.6679458", "0.6676343", "0.6674005", "0.66652924", "0.66619414" ]
0.0
-1
Waits for a command and calls the right function.
def start_tool(self): while True: com = input("Give a command. Type H for help...:\t ").lower() # Receive a command from the user if com == 'h': print('Available commands: H, Exit, Play, Stop, Pause, Add_song, Delete_song, Modify_data, ' 'Create_save_list, Search') elif com == 'exit': if self.isPlaying: # Check if there's any song playing or paused and stop it before exiting self.isPlaying = False self.playSong[0].stop() self.playSong.clear() print("Exiting...") break elif com == 'play': if not self.isPlaying: # Play the song if none is currently paused self.play_song() else: stop = input( "There is a song paused or currently playing. Do you want to stop it and play another song? [" "Y/N]\t").lower() while True: if stop == 'n': print("The song current song will be resumed...") self.playSong[0].play() # Resume the paused song break elif stop == 'y': self.stop_song() # Stop the paused song self.play_song() # Play another song break else: print("Unknown command. Please answer with Y or N...") elif com == 'stop': self.stop_song() elif com == 'pause': self.pause_song() elif com == 'add_song': print(self.add_song()) elif com == 'delete_song': self.delete_song() elif com == 'modify_data': self.modify_data() elif com == "create_save_list": self.create_save_list() elif com == "search": self.search() else: print("Unknown command. Try again...")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def do_command(command):\n send_command(command)\n # time.sleep(0.1) # may be required on slow machines\n response = get_response()\n print(\"Rcvd: <<< \" + response)\n return response", "def _execute(self):\n LOG.info(\"Waiting for a message...\")", "def execute_command_async(self, command):\n raise NotImplementedError", "def _do_command(self, cmd, unit, **kwargs):\n self._do_cmd_resp(cmd, unit, write_delay=INTER_CHARACTER_DELAY, **kwargs)", "def waitCmd(self, cmd):\n Hal.cmds.append(cmd)\n # we're not using cmd.wait() since that is not interruptable\n while cmd.poll() is None:\n self.busyWait()\n Hal.cmds.remove(cmd)", "def perform(self, command: Command[E]) -> E:\n self.wait.for_(command)\n return typing.cast(E, self)", "async def module_command_wait(self, ctx, parsed):\n factor = {\"ms\": 1e-3, \"s\": 1, \"m\": 60, \"h\": 3600}\n delay = parsed.args[\"delay\"]\n try:\n delay = float(delay)\n except ValueError:\n suffix = \"\".join(filter(str.isalpha, delay[-2:]))\n if suffix not in factor.keys():\n await ctx.reply_command_result(\n parsed,\n f\"Invalid delay suffix. Valid suffixes: {', '.join(factor.keys())}\",\n )\n delay = float(delay.replace(suffix, \"\")) * factor[suffix]\n cmd = parsed.args[\"command\"]\n args = \" \".join(parsed.args[\"args\"]) or None\n # XXX: Should have a proper way to do this in abc.Message\n parsed.msg.content = f\"{self.cmdprefix}{cmd}\"\n parsed.msg.content += f\" {args}\" if args else \"\"\n parsed.msg.clean_content = parsed.msg.content\n await self.module_commanded(parsed.msg, ctx, delay)", "async def run_command(device, command):\n print(\"Waiting for button presses ...\")\n async for event in device.async_read_loop():\n if EV_KEY == event.type:\n key_event = evdev.KeyEvent(event)\n if evdev.KeyEvent.key_down == key_event.keystate:\n os.system(command)", "async def _wait_execute(self, address, command, args, kw):\n conn = await self.acquire(command, args)\n try:\n return (await conn.execute(command, *args, **kw))\n finally:\n self.release(conn)", "def ask(self, command, query_delay=0):\n self.write(command)\n self.wait_for(query_delay)\n return self.read()", "async def _run_command(self, command, *args, **kwargs):\n pass", "def run_cmd(self, cmd, timeout,\n force_execution=False,\n wait_for_response=True,\n silent_mode=False):\n raise DeviceException(DeviceException.FEATURE_NOT_IMPLEMENTED)", "def runCommand(self, command):\n self._expectingCommand = defer.Deferred()\n self.clearBuffer()\n if isinstance(command, unicode):\n command = command.encode(\"utf-8\")\n self.transport.write(command + b'\\n')\n return self._expectingCommand", "def issue(self, cmd):\n self.send([cmd])\n return self.read_until_prompt()[1:] # drop the echo", "async def menu(self, func, *args):\r\n self.waiting = True\r\n func2 = self.cfunc(func)\r\n while self.waiting:\r\n try:\r\n msg = await bot.wait_for_message(author=self.author, check=func2)\r\n if self.active and not self.ended:\r\n self.waiting = False\r\n return (func(msg, *args))\r\n elif self.ended:\r\n self.waiting = False\r\n raise CommandEndedError\r\n else:\r\n pass\r\n except discord.HTTPException:\r\n pass", "async def async_execute_command(self, command, notif):\n if command.startswith('MCU'):\n value = await self.async_call_linkplay_tcpuart(command)\n elif command == 'Reboot':\n value = await self.async_call_linkplay_httpapi(\"getStatus:ip:;reboot;\", None)\n elif command == 'PromptEnable':\n value = await self.async_call_linkplay_httpapi(\"PromptEnable\", None)\n elif command == 'PromptDisable':\n value = await self.async_call_linkplay_httpapi(\"PromptDisable\", None)\n elif command == 'RouterMultiroomEnable':\n value = await self.async_call_linkplay_httpapi(\"setMultiroomLogic:1\", None)\n elif command == 'SetRandomWifiKey':\n from random import choice\n from string import ascii_letters\n newkey = (''.join(choice(ascii_letters) for i in range(16)))\n value = await self.async_call_linkplay_httpapi(\"setNetwork:1:{0}\".format(newkey), None)\n if value == 'OK':\n value = value + \", key: \" + newkey\n else:\n value = \"key: \" + newkey\n elif command.startswith('SetApSSIDName:'):\n ssidnam = command.replace('SetApSSIDName:', '').strip()\n if ssidnam != '':\n value = await self.async_call_linkplay_httpapi(\"setSSID:{0}\".format(ssidnam), None)\n if value == 'OK':\n value = value + \", SoftAP SSID set to: \" + ssidnam\n else:\n value == \"SSID not specified correctly. You need 'SetApSSIDName: NewWifiName'\"\n elif command.startswith('WriteDeviceNameToUnit:'):\n devnam = command.replace('WriteDeviceNameToUnit:', '').strip()\n if devnam != '':\n value = await self.async_call_linkplay_httpapi(\"setDeviceName:{0}\".format(devnam), None)\n if value == 'OK':\n self._name = devnam\n value = value + \", name set to: \" + self._name\n else:\n value == \"Device name not specified correctly. You need 'WriteDeviceNameToUnit: My Device Name'\"\n elif command == 'TimeSync':\n import time\n tme = time.strftime('%Y%m%d%H%M%S')\n value = await self.async_call_linkplay_httpapi(\"timeSync:{0}\".format(tme), None)\n if value == 'OK':\n value = value + \", time: \" + tme\n elif command == 'Rescan':\n self._unav_throttle = False\n self._first_update = True\n # await self.async_schedule_update_ha_state(True)\n value = \"Scheduled to Rescan\"\n elif command == 'Update':\n # await self.async_schedule_update_ha_state(True)\n value = \"Scheduled to Update\"\n else:\n value = \"No such command implemented.\"\n _LOGGER.warning(\"Player %s command: %s, result: %s\", self.entity_id, command, value)\n\n _LOGGER.debug(\"Player %s executed command: %s, result: %s\", self.entity_id, command, value)\n\n if notif:\n self.hass.components.persistent_notification.async_create(\"<b>Executed command:</b><br>{0}<br><b>Result:</b><br>{1}\".format(command, value), title=self.entity_id)", "def wait():\n pass", "def issueCommand(self,command, timeout=3, message=None):\n p = self.spawnProc\n p.sendline(command)\n #self._checkCommandStatus() ", "def __send(self, cmd, delay=.1):\n\n self.__write(cmd)\n\n if delay is not None:\n print(\"wait: %d seconds\" % delay)\n time.sleep(delay)\n\n return self.__read()", "def do_command(command):\n send_command(command)\n response = get_response()\n print(\"Rcvd: <<< \\n\" + response)\n return response", "def _send_command(self, cmd, wait_msg=None):\n try:\n if not (self.sock_thr and self.sock_thr.is_active()):\n if cmd not in (TSMessage.STOP, TSMessage.SHUTDOWN):\n self._createThread()\n else:\n return\n # Waiting message if need\n log.d('>> \"{0}\"'.format(cmd))\n if wait_msg:\n with self.waiting.lock:\n log.d('wait message: {0}'.format(wait_msg))\n try:\n self.waiting.msg = wait_msg\n self.waiting.event.clear()\n self.waiting.abort.clear()\n self.sock.send(to_bytes(cmd + '\\r\\n'))\n for t in range(AcePlayer.TIMEOUT_FREEZE * 3): # @UnusedVariable\n log.d(\"waiting message {msg} ({t})\".format(msg=wait_msg, t=t))\n if not self.waiting.msg or self.sock_thr.error or defines.isCancel():\n raise ValueError('Abort waiting message: \"{0}\"'.format(wait_msg))\n if self.waiting.wait(1):\n return self.waiting.msg\n\n self.parent.showStatus(\"Ошибка ожидания. Операция прервана\")\n raise ValueError('AceEngine is freeze')\n\n except Exception as e:\n log.e('_wait_message error: {0}'.format(uni(e)))\n self.waiting.msg = None\n if not Flags.manual_stopped.is_set():\n self.autoStop()\n return\n\n else:\n self.sock.send(to_bytes(cmd + '\\r\\n'))\n return True\n\n except Exception as e:\n log.e('_send_command error: \"{0}\" cmd: \"{1}\"'.format(uni(e), cmd))\n\n if self.sock_thr and self.sock_thr.is_active():\n self.sock_thr.end()", "async def async_send_command(self, cmd_type, command=\"\"):\n data_cmd = None\n\n if cmd_type == \"setvolume\": # sets volume\n data_cmd = _command(COMMAND_SET_VOLUME, [int(command)])\n elif cmd_type == \"stepvolume\": # steps volume up or down\n if command == \"up\":\n data_cmd = _command(COMMAND_VOLUME_UP)\n elif command == \"down\":\n data_cmd = _command(COMMAND_VOLUME_DOWN)\n elif cmd_type == \"audiomute\": # mutes audio\n if command == \"on\":\n data_cmd = _command(COMMAND_MUTE)\n elif command == \"off\":\n data_cmd = _command(COMMAND_UNMUTE)\n elif cmd_type == \"selectchannel\": # changes channel\n data_cmd = _command(COMMAND_SET_CHANNEL, [command])\n elif cmd_type == \"stepchannel\": # steps channel up or down\n if command == \"up\":\n data_cmd = _command(COMMAND_CHANNEL_UP)\n elif command == \"down\":\n data_cmd = _command(COMMAND_CHANNEL_DOWN)\n else:\n return\n\n await self._async_send_command(data_cmd)", "def respond(cmd,t,p):\n\tt.write(cmd)\n\treturn wait(t,p)", "def __run_cmd(self, command, timeout, expected_result):\n if command.strip().lower().find(\"[sleep(\") != -1:\n command = command.strip().lower().replace(\"[sleep(\", \"\")\n command = command.replace(\")]\", \"\")\n sleep_time = float(command)\n time.sleep(sleep_time)\n status = Global.SUCCESS\n output = \"Success\"\n elif command.strip().lower().find(\"[usb_plug]\") != -1:\n if self._device is not None and self._io_card is not None:\n self._io_card.usb_host_pc_connector(True)\n self._device.connect_board()\n status = Global.SUCCESS\n output = \"Success\"\n else:\n self._logger.error(\"Cannot execute usb_plug, no io card configured.\")\n status = Global.FAILURE\n output = \"Cannot execute usb_plug, no io card configured.\"\n elif command.strip().lower().find(\"[usb_unplug]\") != -1:\n if self._device is not None and self._io_card is not None:\n self._device.disconnect_board()\n self._io_card.usb_host_pc_connector(False)\n status = Global.SUCCESS\n output = \"Success\"\n else:\n self._logger.error(\"Cannot execute usb_unplug, no io card configured.\")\n status = Global.FAILURE\n output = \"Cannot execute usb_unplug, no io card configured.\"\n elif command.strip().lower().find(\"[press_power_button(\") != -1:\n command = command.strip().lower().replace(\"[press_power_button(\", \"\")\n command = command.replace(\")]\", \"\")\n press_button_time = float(command)\n\n if self._io_card is not None:\n self._io_card.press_power_button(press_button_time)\n status = Global.SUCCESS\n output = \"Success\"\n else:\n self._logger.error(\"Cannot execute press_power_button, no io card configured.\")\n status = Global.FAILURE\n output = \"Cannot execute press_power_button, no io card configured.\"\n elif command.strip().lower().find(\"[control_relay(\") != -1:\n command = command.strip().lower().replace(\"[control_relay(\", \"\")\n command = command.replace(\")]\", \"\")\n relay_nbr = int(command.split(\",\")[0].strip())\n state = command.split(\",\")[1].strip().lower()\n\n if self._io_card is not None:\n if state == \"on\":\n self._io_card.enable_line(relay_nbr)\n elif state == \"off\":\n self._io_card.disable_line(relay_nbr)\n status = Global.SUCCESS\n output = \"Success\"\n else:\n self._logger.error(\"Cannot execute press_relay, no io card configured.\")\n status = Global.FAILURE\n output = \"Cannot execute press_relay, no io card configured.\"\n else:\n # Handle multi phone, if we issue adb command, add serial number if we have it\n if \"adb\" in command.lower():\n command = self._device.format_cmd(command)\n\n # If curlUtilities is called add the path to Campaign_report\n elif command.strip().lower().find(\"curlutilities\") != -1:\n # Add path to campaign report in CurlUtilities command\n report_tree = \\\n self._global_config.campaignConfig.get(\"campaignReportTree\")\n command += \\\n \" --output=%s\" % report_tree.get_report_path()\n\n if \"[MY_PATH]\" in command:\n command = command.replace(\"[MY_PATH]\",\n os.path.dirname(\n os.path.abspath(\n self._tc_parameters.get_file_path()))\n + os.sep)\n\n if \"[MY_DEVICE_MODEL]\" in command:\n command = command.replace(\"[MY_DEVICE_MODEL]\", self._device.get_phone_model())\n\n # We use the same python that ACS\n if \"python\" in command:\n command_list = command.split(\" \")\n # pyc replacement instead of py curently only works if RUN_FROM_TCDIRECTORY\n # is set to true\n if self._run_from_tc_directory:\n execution_path = os.path.join(self._execution_config_path,\n os.path.dirname(self._name))\n for index, command_element in enumerate(command_list):\n if command_element.endswith(\".py\"):\n if os.path.isfile(os.path.join(execution_path, command_element)) is False:\n pyc_cmd = command_element[:-2] + \"pyc\"\n if os.path.isfile(os.path.join(execution_path, pyc_cmd)):\n command_list[index] = pyc_cmd\n\n command = \" \".join(command_list)\n python_path = sys.executable\n command = command.replace(\"python\", python_path)\n self._logger.info(\"Using python: %s\" % python_path)\n\n if any(\"acs.py\" in cmd.lower() for cmd in command):\n # Put report into sub folder for analysis in case of error\n report_path = self._global_config.campaignConfig.get(\"campaignReportTree\").get_report_path()\n report_subfolder = os.path.join(report_path, os.path.basename(self._name))\n self._logger.info(\"Detailed results will be found at: {0}\".format(report_subfolder))\n command = \"{0} --report_folder={1}\".format(command, report_subfolder)\n\n status, _ = \\\n self.__internal_exec(command, timeout, expected_result)\n if status == Global.SUCCESS:\n output = \"Success\"\n else:\n output = \"Did not found expected result: {0}\".format(expected_result)\n\n else:\n status, stdout = \\\n self.__internal_exec(command, timeout, expected_result)\n output = \"output: {0}\".format(stdout.rstrip(\"\\r\\n\"))\n self._logger.info(output)\n\n # Remove special characters which could be stored in output message\n allowed_characters = '[^a-zA-Z0-9\\-\\+\\=\\'\\.\\:\\,\\;\\!\\?\\%\\(\\)\\#\\*\\@\\_\\n\\t]'\n parsed_output = re.sub(allowed_characters, ' ', output)\n\n return status, parsed_output", "async def module_commanded(self, cmd_msg: abc.Message, ctx: Context, delay: float = None):\n\n async def delay_wrapper(seconds: float, coro):\n try:\n await asyncio.sleep(seconds)\n await coro\n except asyncio.CancelledError:\n coro.close()\n\n invoker = cmd_msg.source\n dest = cmd_msg.destination\n cmd_str = cmd_msg.clean_content\n if not cmd_str.startswith(self.cmdprefix):\n raise NotACommand(f\"Not a command string: {cmd_str}\")\n try:\n name, *args = shellish_split(cmd_str)\n except ValueError:\n await self.module_send_event(\"invalid_command\", ctx, cmd_msg, CmdErrorType.BadSyntax)\n return\n name = name.lstrip(self.cmdprefix)\n if delay:\n self.logger.debug(\n f\"Received delayed command from '{invoker}' at '{dest}': \"\n f\"{name=}, {args=}. Executing in {delay} seconds.\"\n )\n else:\n self.logger.debug(f\"Received command from '{invoker}' at '{dest}': {name=}, {args=}\")\n try:\n cmd = self._commands[name]\n namespace = cmd.parse_args(args)\n except KeyError:\n await self.module_send_event(\"invalid_command\", ctx, cmd_msg, CmdErrorType.NotFound)\n return\n except (ArgumentError, ArgumentTypeError, CommandParseError):\n await self.module_send_event(\"invalid_command\", ctx, cmd_msg, CmdErrorType.BadSyntax)\n return\n method = getattr(cmd.module.handle, f\"module_command_{name}\", None)\n if callable(method):\n parsed = ParsedCommand(name, vars(namespace), cmd, cmd_msg)\n if delay:\n self._delayed_command_count += 1\n wait_id = self._delayed_command_count\n info = WaitingCmdInfo(wait_id, cmd_str, delay, invoker, dest, time.time())\n task = self.eventloop.create_task(\n delay_wrapper(delay, method(ctx, parsed)),\n name=f\"ZeroBot_Wait_Cmd_{wait_id}\",\n )\n self._delayed_commands[wait_id] = (task, info)\n await task\n del self._delayed_commands[wait_id]\n else:\n await method(ctx, parsed)", "def do_wait(self):\n pass", "def _sendingCommand(self): \n\n while True:\n self.tello.send_command('command') \n time.sleep(5)", "def run_and_wait():\n self.busy.put(True)\n action()\n self.busy.put(False)\n status._finished(success=True)", "def LaunchAndWait(cmd):\n call(cmd)", "def complete_cmd(self):\r\n if self.select_cmd is not None:\r\n self.do_cmd()", "def _execute(self, command):\n \"\"\"\n Confirm the command was correctly echoed back and then ask for\n its return code\n \"\"\"\n self.telnet_client.write((command + \"\\r\\n\").encode())\n resp = self.telnet_client.read_until((command + \"\\r\\n\").encode())\n while True:\n resp = self.telnet_client.read_until(self.prompt.encode())\n if resp is not None:\n break\n\n stdout = resp.decode()\n stderr = \"\"\n self.telnet_client.write(\"echo $?\\r\\n\".encode())\n _, match, _ = self.telnet_client.expect([re.compile(br'(\\d+)')],\n TelnetControl.TELNET_TIMEOUT)\n exit_code = int(match.group(1).decode())\n\n if exit_code != 0:\n stderr = resp.decode()\n return exit_code, stdout, stderr", "def command_callback(self, command):\n while not self.socket_available: # wait for socket to be available\n pass\n self.socket_available = False # block socket from being used in other processes\n if self.robot.is_in_error():\n self.robot.ResetError()\n self.robot.ResumeMotion()\n reply = self.robot.exchange_msg(command.data, decode=False)\n self.socket_available = True # Release socket so other processes can use it\n if reply is not None:\n self.reply_publisher.publish(reply)", "def async_send_command(self, command: TelnetCommand) -> Coroutine:\n _LOGGER.debug(\"queueing command: %s\", command.message)\n # Give command a unique sequence id and increment\n command.set_sequence(self._sequence)\n self._sequence += 1\n # Push command onto queue\n status, cancel = self._command_queue.push(command)\n # Determine the type of awaitable response to return\n if status == const.QUEUE_FAILED:\n _LOGGER.debug(\"Command not queued: %s\", command.message)\n return cancel.wait()\n if status == const.QUEUE_CANCEL:\n try:\n _LOGGER.debug(\"Command overwritten: %s\", command.message)\n self._expected_responses[cancel].overwrite_command(command)\n return self._expected_responses[cancel].wait()\n except KeyError:\n # Can happen when a query returns multiple responses to one query\n _LOGGER.debug(\"Command already resolved: %s\", command.message)\n return none()\n if status == const.QUEUE_NO_CANCEL:\n _LOGGER.debug(\"Command queued: %s\", command.message)\n self._expected_responses[command] = ExpectedResponse(command, self)\n return self._expected_responses[command].wait()", "async def _run_command(self, cmd, timeout=None, prompt_re=None):\n if not self._connected:\n raise RuntimeError(\n \"Not Connected\", \"status: %r\" % self.exit_status, self.key\n )\n\n # Ideally there should be no data on the stream. We will in any case\n # drain any stale data. This is mostly for debugging and making sure\n # that we are in sane state\n stale_data = await self._stream_reader.drain()\n if len(stale_data) != 0:\n self.logger.warning(\"Stale data on session: %s\", stale_data)\n\n output = []\n\n commands = cmd.splitlines()\n for command in commands:\n cmdinfo = self._devinfo.get_command_info(\n command, self._opts.get(\"command_prompts\")\n )\n\n self.logger.info(\"RUN: %r\", cmdinfo.cmd)\n\n # Send any precmd data (e.g. \\x15 to clear the commandline)\n if cmdinfo.precmd:\n self._stream_writer.write(cmdinfo.precmd)\n\n self._stream_writer.write(cmdinfo.cmd)\n\n try:\n prompt = prompt_re or cmdinfo.prompt_re\n\n resp = await asyncio.wait_for(\n self._wait_response(command, prompt),\n timeout or self._devinfo.vendor_data.cmd_timeout_sec,\n loop=self._loop,\n )\n output.append(self._format_output(command, resp))\n except asyncio.TimeoutError:\n self.logger.error(\"Timeout waiting for command response\")\n data = await self._stream_reader.drain()\n raise RuntimeError(\"Command Response Timeout\", data[-200:])\n\n return b\"\\n\".join(output).rstrip()", "def _wait_what(self, expected):\r\n \r\n self._msg_server(cb.WAITWHATSERVER % (expected))", "async def _run_cmd(self, cmd, timeout=5):\n try:\n self._flush_buffer()\n self.pexpect_child.sendline(cmd)\n ret = self.pexpect_child.expect_exact(\n [self.cmd_prompt, pexpect.TIMEOUT], timeout=timeout\n )\n stdout = self.parse_cmd_output(self.pexpect_child.before) if ret == 0 else \"\"\n self.pexpect_child.sendline(\"echo $?\")\n ret = self.pexpect_child.expect_exact(\n [self.cmd_prompt, pexpect.TIMEOUT], timeout=timeout\n )\n exit_status = self.parse_cmd_output(self.pexpect_child.before) if ret == 0 else -1\n try:\n exit_status = int(exit_status)\n except ValueError:\n exit_status = -1\n return exit_status, stdout\n except Exception as e:\n self.applog.exception(\"Exception occured --> _run_command\", exc_info=e)\n raise", "async def run_command(self, cmd: str) -> None:\n # pause logic\n if not self.running.is_set():\n self.add_to_output(\"Paused...\")\n await self.running.wait()\n result = None\n # get/create ssh connection to miner\n conn = await self.get_connection(\"root\", \"admin\")\n # send the command and store the result\n for i in range(3):\n try:\n result = await conn.run(cmd)\n except:\n if i == 3:\n self.add_to_output(f\"Unknown error when running the command {cmd}...\")\n return\n pass\n # let the user know the result of the command\n if result is not None:\n if result.stdout != \"\":\n self.add_to_output(result.stdout)\n if result.stderr != \"\":\n self.add_to_output(\"ERROR: \" + result.stderr)\n elif result.stderr != \"\":\n self.add_to_output(\"ERROR: \" + result.stderr)\n else:\n self.add_to_output(cmd)", "def command(self, cmd, recvSize=0, callback=None):\n if not self.isConnected:\n # If we're shutting down, break the chain of polling callbacks...\n return\n\n if callback and recvSize:\n self.lock.acquire()\n self.recv.queue.put((recvSize, callback))\n self.send.queue.put(cmd)\n self.lock.release()\n elif cmd:\n self.send.queue.put(cmd)", "async def run_cmd(self, cmd, timeout=5):\n try:\n if not await self.logged_in():\n self.applog.info(f\"Logging in to console\")\n await self._login()\n self.applog.info(f\"Executing command {cmd}\")\n return await self._run_cmd(cmd, timeout)\n except Exception as e:\n self.applog.exception(\"Exception occured --> run_command\", exc_info=e)\n raise", "def run_cli(self, command=None):\n if command is None:\n return \"FAILURE\"\n\n conn_expect = self.get_expect_connection()\n expect = ['bytes*', '>']\n response = conn_expect.request(\"read_until_prompt\",\n command,\n expect,\n None)\n if response is 'FAILURE':\n return 'FAILURE'\n\n return response", "async def wait_for(self, seqno, cmd, timeout=5):\n if seqno in self.listeners:\n raise Exception(f\"listener exists for {seqno}\")\n\n self.debug(\"Command %d waiting for seq. number %d\", cmd, seqno)\n self.listeners[seqno] = asyncio.Semaphore(0)\n try:\n await asyncio.wait_for(self.listeners[seqno].acquire(), timeout=timeout)\n except asyncio.TimeoutError:\n self.warning(\n \"Command %d timed out waiting for sequence number %d\", cmd, seqno\n )\n del self.listeners[seqno]\n raise\n\n return self.listeners.pop(seqno)", "def _execute(self, message):\n logging.info(__name__ + ' : Send the following command to the device: %s' % message)\n self.visa_handle.write('@%s%s' % (self._number, message))\n sleep(70e-3) # wait for the device to be able to respond\n result = self._read()\n if result.find('?') >= 0:\n print(\"Error: Command %s not recognized\" % message)\n else:\n return result", "def cmd(self, command, timeout = 60):\n retstr = \"\"\n\t#log.debug(\"%s\"%command)\n\tlog.cmd(command)\n try:\n\t self.ses.delaybeforesend = 0.5\n self.ses.sendline(command)\n self.ses.expect(ixia_prompt_regex, timeout)\n\t #log.info(\"before %s; after %s\" %(self.ses.before, self.ses.after))\n retstr += self.ses.before\n except TIMEOUT:\n misc.TestError(\"Timeout in Ixia.cmd for command %s\\n\" % command)\n return retstr.strip().splitlines()[-1]", "def performCommand(self, game, command):\r\n game.currentTurn.perform(command)", "def wait():\n time.sleep(1)", "def wait_for_command_execution(self, timeout=None, check_fun=None):\n if check_fun is None:\n def check_fun2(buf, whole_data):\n # TODO: expose via logging config entry\n if self.verbose_logger is not None:\n self.verbose_logger.debug(\"expecting '%s', got: '%s'\", self.shell_prompt, buf)\n\n return self.re_shell_prompt.search(whole_data)\n\n check_fun = check_fun2\n try:\n res = self.process_output(\n NetUtil.wait_for_socket_result(self.sock,\n check_fun,\n read_buf_size=SOCKET_READ_BUF_SIZE,\n timeout=timeout\n )\n )\n except NetUtil.Timeout as e:\n # netstat_uds = run_shell(\"netstat -ape -A unix\")\n # open_fds = run_shell('ls -l /proc/%s/fd/' % os.getpid())\n # lsof = run_shell('lsof -U')\n # debug:\n\n # Active Unix Domain Sockets:\n # %s.\n # Open file handles (Unix):\n # %s\n # lsof:\n # %s\n # % (netstat_uds, open_fds, lsof))\n # log exception to node log\n if self.brief_logger:\n self.brief_logger.exception(e)\n\n raise\n return res", "def wait(self):\n return self.bot_client.send_command(_Command.Wait)", "def wait_till_read_out():\n\n\trespond = send_command('waitreadout')", "def execute(self, cmd=\"\", msg=\"\", speak=False, duration=0):\n\n self.speak = speak\n\n if self.server or not self.testing:\n if self.speak:\n self.say(msg)\n try:\n subprocess.Popen([\"notify-send\", \"Dragonfire\", msg])\n except BaseException:\n pass\n if cmd != \"\":\n time.sleep(duration)\n try:\n subprocess.Popen(cmd, stdout=FNULL, stderr=FNULL)\n except BaseException:\n pass\n return msg", "def wait_for(self, expected=r'\\s\\$', timeout=60, shell='sh'):\n time.sleep(1)\n timeout -= 2\n tnh = self.handle\n time_int = 10\n time_out = 0\n got = ''\n timeout_occ = 0\n if isinstance(expected, list):\n if shell == 'csh':\n for _ele_i, _ele_j in enumerate(expected):\n expected[_ele_i] = re.sub(r'\\s$', r'(\\s|\\t)',\n expected[_ele_i])\n expected = '|'.join(expected)\n while True:\n start_time = time.time()\n _rd, _wr, _err = select([tnh], [], [], time_int)\n if _rd:\n data = tnh.recv(4096)\n data = data.decode(\"utf-8\")\n got = got + data\n end_time = time.time()\n if re.search(r'{0}\\s?$'.format(expected), got):\n break\n time_out += (end_time - start_time)\n if int(time_out) > timeout:\n timeout_occ = 1\n break\n if timeout_occ:\n return False, got\n return True, got", "def __wait_for(self, cmd_byte, rx_bytes, timeout_seconds=1.0):\n if not self.is_valid():\n return False\n t = time.time()\n remain = timeout_seconds\n while 1:\n #num_read = self.__usb_if.MPUSBRead(self.__handle_read, rx_bytes, int(remain*1000))\n #if (num_read > 0) and (rx_bytes[0]==cmd_byte):\n # return True\n rx = self.__usb_if.read(int(remain*1000))\n num_read = len(rx)\n if rx:\n rx_bytes[:] = rx\n if (num_read > 0) and (rx_bytes[0]==cmd_byte):\n return True\n remain = timeout_seconds - (time.time()-t)\n if remain <= 0:\n break\n time.sleep(0.001)\n #end 1 loop\n return False\n #end __wait_for()", "def runCommand(command):\n None", "async def execute(self, client, message, arg):\n\t\treturn", "def _command(self, command, close_delay=0.0):\n self._open()\n try:\n self.__write_command(self._serial_port, command)\n if command.startswith(\"reboot\"):\n return\n\n response = self.__get_response(self._serial_port)\n if response[0].startswith(\"*E\"):\n raise errors.DeviceError(\"Device {} command failed. \"\n \"Unable to write command: {} \"\n \"to serial port: {} Err: {!r}\".format(\n self.name, command, self._serial_port,\n response[0]))\n finally:\n if close_delay > 0.0:\n time.sleep(close_delay)\n self.close()\n\n # Discard the last line which is the prompt\n return response[:-1]", "def wait(self, timeoout=None, state=\"C-completed\"):", "async def _wait_response(self, cmd, prompt_re):\n self.logger.debug(\"Waiting for prompt\")\n resp = await self.wait_prompt(prompt_re)\n return resp", "async def command_interpreter(self, command: str) -> None:\n for cls in GlobalCommandRegistry:\n if not asyncio.iscoroutinefunction(GlobalCommandRegistry[cls].main):\n continue\n if command.startswith(tuple(GlobalCommandRegistry[cls].helper['name'])):\n result = await asyncio.gather(GlobalCommandRegistry[cls](command, self.print_queue).main())\n if result is False:\n print(\"Result is false?!\")\n raise KeyboardInterrupt", "def askForCommand(self,command): \n\n\t\tcurrentCommand = 'Simple 2F Gripper Controller\\n-----\\nCurrent command:'\n\t\tcurrentCommand += ' rACT = ' + str(command.rACT)\n\t\tcurrentCommand += ', rGTO = ' + str(command.rGTO)\n\t\tcurrentCommand += ', rATR = ' + str(command.rATR)\n\t\tcurrentCommand += ', rPR = ' + str(command.rPR )\n\t\tcurrentCommand += ', rSP = ' + str(command.rSP )\n\t\tcurrentCommand += ', rFR = ' + str(command.rFR )\n\n\n\t\tprint currentCommand\n\n\t\tstrAskForCommand = '-----\\nAvailable commands\\n\\n'\n\t\tstrAskForCommand += 'r: Reset\\n'\n\t\tstrAskForCommand += 'a: Activate\\n'\n\t\tstrAskForCommand += 'c: Close\\n'\n\t\tstrAskForCommand += 'o: Open\\n'\n\t\tstrAskForCommand += '(0-255): Go to that position\\n'\n\t\tstrAskForCommand += 'f: Faster\\n'\n\t\tstrAskForCommand += 'l: Slower\\n'\n\t\tstrAskForCommand += 'i: Increase force\\n'\n\t\tstrAskForCommand += 'd: Decrease force\\n'\n\t\t\n\t\tstrAskForCommand += '-->'\n\n\t\treturn raw_input(strAskForCommand)\n\t\t#return raw_input(strAskForCommand)", "def user_next_command(self, tracer):\n try:\n self.queue.get_nowait()(tracer)\n except Empty:\n return", "def execute_command(command):\r\n if 0 == len(command):\r\n return\r\n\r\n if command[0] in verbs[\"move\"]:\r\n if len(command) <= 1:\r\n wrap_print(\"go where?\")\r\n else:\r\n execute_go(command[1])\r\n\r\n elif command[0] in verbs[\"take\"]:\r\n if len(command) <= 1:\r\n wrap_print(\"Take what?\")\r\n else:\r\n item_id = get_multi_word_string(command, items)\r\n execute_take(item_id)\r\n\r\n elif command[0] in verbs[\"drop\"]:\r\n if len(command) <= 1:\r\n wrap_print(\"Drop what?\")\r\n else:\r\n item_id = get_multi_word_string(command, items)\r\n execute_drop(item_id)\r\n\r\n elif command[0] in verbs[\"use\"]:\r\n if len(command) <= 1:\r\n wrap_print(\"use what?\")\r\n else:\r\n item_id = get_multi_word_string(command, current_room[\"items\"])\r\n if item_id is False:\r\n item_id = get_multi_word_string(command, inventory)\r\n execute_use(item_id)\r\n\r\n elif command[0] in verbs[\"look\"]:\r\n if len(command) == 1:\r\n print_room(current_room)\r\n elif command[1] in nouns[\"inventory\"]:\r\n print_inventory_items(inventory)\r\n elif command[1] in nouns[\"self\"]:\r\n print_condition()\r\n else:\r\n item_id = get_multi_word_string(command, current_room[\"items\"])\r\n if item_id is False:\r\n item_id = get_multi_word_string(command, inventory)\r\n entity_name = get_multi_word_string(command, [entity[\"name\"] for entity in current_room[\"entities\"].values()])\r\n entity_id = entity_get_id_from_name(entity_name, current_room[\"entities\"].values())\r\n if item_id in inventory.keys():\r\n wrap_print(items[item_id][\"description\"])\r\n elif item_id in current_room[\"items\"].keys():\r\n wrap_print(items[item_id][\"description\"])\r\n elif entity_id in current_room[\"entities\"].keys():\r\n wrap_print(entities[entity_id][\"description\"])\r\n else:\r\n wrap_print(\"You can not view that.\")\r\n\r\n elif command[0] in verbs[\"attack\"]:\r\n if len(command) > 2:\r\n item_id = get_multi_word_string(command, items)\r\n entity_name = get_multi_word_string(command, [entity[\"name\"] for entity in current_room[\"entities\"].values()])\r\n entity_id = entity_get_id_from_name(entity_name, current_room[\"entities\"].values())\r\n if len(command) <= 1:\r\n wrap_print(\"attack what?\")\r\n elif entity_id not in current_room[\"entities\"].keys():\r\n wrap_print(\"You cannot attack that.\")\r\n elif len(command) <= 2:\r\n wrap_print(\"What with?\")\r\n elif item_id not in inventory.keys():\r\n wrap_print(\"You do not have a that item.\")\r\n elif items[item_id][\"damage\"] == False:\r\n wrap_print(\"You cannot attack using that item.\")\r\n else:\r\n execute_attack(entity_id, item_id)\r\n\r\n elif command[0] == \"help\":\r\n print(\"To move in a given direction type: go <DIRECTION>\")\r\n print(\"To pick up an item type: take <ITEM>\")\r\n print(\"To drop an item type: drop <ITEM>\")\r\n print(\"To use an item type: use <ITEM>\")\r\n print(\"To look at something of interest type: view <ITEM>\")\r\n print(\"to attack a character type: attack <CHARACTER> with <item>\")\r\n print(\"to : attack <CHARACTER> with <item>\")\r\n print(\"To quit the game type: quit\\n\")\r\n wrap_print(\"\"\"Verb variations are supported, so 'run south', or 'inspect item' are valid inputs.\"\"\")\r\n wrap_print(\"\"\"Items and characters with multiple words in their name are also supported like regular items.\"\"\")\r\n\r\n elif command[0] == \"quit\":\r\n if len(command) == 1:\r\n wrap_print(\"goodbye!\")\r\n global playing\r\n playing = False\r\n\r\n else:\r\n wrap_print(\"That makes no sense.\")", "def Wait(p_question: str):\n input(p_question)\n return", "async def async_run_command(\n self, hass: HomeAssistant, command_name: str | None\n ) -> None:\n try:\n await hass.async_add_executor_job(\n self._client.run_command, self._alias, command_name\n )\n except PyNUTError as err:\n raise HomeAssistantError(\n f\"Error running command {command_name}, {err}\"\n ) from err", "def answer_waiting_call(self) -> None:", "def send_command(self, command, timeout=5):\n self.log('> ' + command)\n self.sendline(command)\n\n index = self.expect([SHELL_PROMPT, SHELL_ERROR_PREFIX], timeout=timeout)\n if index == 1:\n try:\n self.expect(SHELL_PROMPT, timeout=SHELL_TIMEOUT_DEFAULT)\n except:\n pass\n\n raise CommandFailure(command)\n\n return self.before", "def do_cmd(cmd,sock):\n\n buffer = ''\n \n # Write the command and wait one second.\n print 'writing command '+cmd \n sock.send(cmd+SBE37_NEWLINE)\n time.sleep(1)\n \n # Block to receive all data.\n # Continue reading if the received data does not include a prompt.\n # Break out when the received data ends in a prompt.\n while True:\n try:\n data = ''\n data = sock.recv(1024)\n buffer += data\n except:\n raise\n else:\n #print 'received '+str(len(data))+' bytes' \n if buffer.endswith(SBE37Prompt.COMMAND):\n break\n elif buffer.endswith(SBE37Prompt.AUTOSAMPLE):\n break\n elif buffer.endswith(SBE37Prompt.BAD_COMMAND):\n break\n\n return buffer", "def run_wait(self, cmdline, msg='', interval=2.0, tick='.'):\n if len(msg) > 0:\n sys.stdout.write(\">> %s...\" % (msg))\n sys.stdout.flush()\n\n self.send(cmdline+\"\\n\")\n\n fullmsg = ''\n while True:\n if self.ready():\n resp = self.recv()\n fullmsg += resp\n if resp.endswith(\"$ \") or resp.endswith(\"# \"):\n break\n else:\n if tick:\n sys.stdout.write(tick)\n sys.stdout.flush()\n time.sleep(interval)\n\n return '\\n'.join(fullmsg.splitlines()[1:-1])", "def RunAsyncCommand(devices, command):\n for device in devices:\n device.AsyncCommand(command)\n for device in devices:\n device.WaitForTasks()", "def executor(command, *args, **kwargs):\n command = command(*args, **kwargs)\n command.execute()\n command.reply()", "async def _send_command(self, command):\n send_message = \"\"\n\n for i in command:\n send_message += chr(i)\n result = None\n for data in send_message:\n if self.serial_port is not None:\n try:\n result = self.write(data)\n except():\n logerr('Cannot send command')\n return result", "def execute_and_wait(account_id, auth_token, to_phone_number, from_phone_number, command):\n start = time.time()\n os.system(command)\n end = time.time()\n execution_time = end-start\n if execution_time<1000:\n time_string = \"less than a second\"\n else:\n time_string=(humanize.naturaldelta(datetime.timedelta((end - start))))\n\n client = Client(account_id, auth_token) # Find these values at https://twilio.com/user/account\n\n client.api.account.messages.create(\n to=to_phone_number,\n from_=from_phone_number,\n body=(\"Your command: \"+command+\" executed in \"+time_string))", "def waitUntilSuccess():", "def _send_command(self, cmd, fb_required=False, res_pattern=None):\n if not cmd.endswith(';'):\n raise RuntimeError(\"Missing a semicolon at the end of the\"\n \" command. Invalid command.\")\n self._arduino.write(bytearray(cmd, 'ascii'))\n if fb_required:\n if not res_pattern:\n raise RuntimeError(\"You can't use fb_required without\"\n \" providing a response pattern. Usage: \"\n \"_send_command(cmd, \"\n \" fb_required=True, \"\n \" res_pattern=<some_string>\")\n print(\"This is a blocking request. Waiting for feedback from the\"\n \" Arduino.\")\n time.sleep(0.1)\n res = self._arduino.readline().decode('utf-8')\n while res_pattern not in res:\n time.sleep(0.05)\n res = self._arduino.readline().decode('utf-8')\n return res", "def wait() -> None:\n\n process_input(input())", "def perform_command(self, command_str):\n print \"Received command: {}\".format(command_str)\n commands = command_str.split('#')\n if len(commands) == 2:\n data = None\n elif len(commands) > 2:\n data = commands[2:]\n elif command_str == '':\n # Gets received when the server socket is closed (should be -1)\n print \"Command suggests socket is being closed, ignore this command and close the socket\"\n self.stop()\n else:\n print \"Not a command, must be of form COMMAND#ACTION: {}\".format(command_str)\n\n if len(commands) >= 2:\n command = commands[0]\n action = commands[1]\n if command in COMMANDS and action in ACTIONS:\n self.request_queue.put((command, action, data))\n else:\n print \"Corrupt action\"", "def do_command(self, command, c, e):\n # get command type\n cmdtype = self.__resolveCommandType(command, e)\n\n # ensure the cmd is valid\n if self.__commandExists(command, cmdtype):\n try:\n # only if command is registered\n if self.__commandHandlers[cmdtype].has_key(command):\n # check for recovered db\n if EVENT_MCX_DATABASE_RECOVERED.isSet():\n self.__databaseAvailable = True\n\n # if database required but not available\n if self.__commandHandlers[cmdtype][command]['db'] == True and not self.__databaseAvailable:\n # tell the user\n self.__privMsg(c, e, DATABASE_SERVER_NOT_AVAILABLE)\n # otherwise execute command\n else:\n self.__commandHandlers[cmdtype][command]['func'](c, e)\n # command not registered, tell the user\n else:\n self.__privMsg(c, e, (COMMAND_NOT_FOUND % command))\n # database was set, but is not available anymore\n except NoDatabaseException, (error):\n self.__databaseAvailable = False\n self.__privMsg(c, e, DATABASE_CONNECTION_INTERRUPTED)\n # fire event\n if not EVENT_MCX_DATABASE_LOST.isSet():\n EVENT_MCX_DATABASE_LOST.set()\n # command does not exist\n else:\n self.__privMsg(c, e, (COMMAND_NOT_FOUND % command))", "async def _send_request(self, command, waitResponse=False):\n # Make sure we're connected.\n await self._perform_connect()\n \n while self._timeout > 0:\n self.logger.debug('waiting for previous command response')\n await asyncio.sleep(1)\n \n self.logger.debug(\"Sending command: %s\", command)\n await self._websocket.send(command)\n if waitResponse and self.connected:\n while (int(self._received_sequence) < int(self._sequence)) and self._timeout < 5:\n self._timeout += 1\n self.logger.debug('waiting for response sequence: %s, current sequence: %s' % (self._sequence,self._received_sequence))\n await asyncio.sleep(1)\n self._timeout = 0", "def executeCommand(command):\n time.sleep(1)\n #return os.system(command)\n subprocess.Popen(command, shell=True)", "async def command(self,ctx):\n await ctx.send(\"Yes this is a command.\")", "def _command(self, *cmd, handler=None):", "async def _async_send_command(self, data_cmd):\n device_id = self._device_id\n if not device_id:\n return\n if not data_cmd:\n return\n\n api_device = f\"{API_DEVICES}/{device_id}\"\n api_command = f\"{api_device}/commands\"\n\n async with self._session.post(\n api_command,\n headers=_headers(self._api_key),\n data=data_cmd,\n raise_for_status=True,\n ) as resp:\n await resp.json()\n\n await self._device_refresh()", "def send_command(client, device_label, device_command, device_hold_secs=0):\n device_id = devices[device_label]['id']\n func = client.send_command(device_id, device_command, device_hold_secs)\n run_in_loop_now('send_command', func)\n print(\"Sent: \" + device_command + \" to \" + device_label)\n return", "def run_at_command(self, cmd=\"AT\\r\", timeout=1000):\n self.__atresponse_received = False\n # Send command via serial\n if self._serport is None:\n raise StationException(\"Port \" + self.portname + \" is not open\")\n\n # Skip wireless packets\n self._atresponse = \"(\"\n # Send serial packet\n self._serport.send(cmd)\n \n # Wait for response from modem\n while len(self._atresponse) == 0 or self._atresponse[0] == '(':\n if not self._wait_for_response(timeout):\n return None\n # Return response received from gateway\n return self._atresponse", "def wait_for(func):\n \n while not func() and not rospy.is_shutdown():\n time.sleep(0.01)", "def wait(cls, quad):\n\t\twait_time = cls.get_address_value(quad.result)\n\t\ttime.sleep(wait_time/1000.0)", "def get(self, cmd, wait_sleep=0.3) -> bytes:\n logger.debug(f\"get(cmd={cmd}, wait_sleep={wait_sleep})\")\n self.write(cmd)\n result = self.read(wait_sleep=wait_sleep)\n return result", "def async_check(word):\n loop = asyncio.get_event_loop()\n loop.run_until_complete(async_cli(word))", "def wait(self):\n\t\twhile True:\n\t\t\tr1 = self.zaberSend(self.translation[\"hor\"], self.cmd[\"returnStatus\"], data=0)\n\t\t\tr2 = self.zaberSend(self.translation[\"ver\"], self.cmd[\"returnStatus\"], data=0)\n\t\t\tif r1[2] == 0 and r2[2] == 0:\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\ttime.sleep(.01)", "def _invoke_cmd(self, cmd):\n if cmd in self.COMMANDS:\n self.COMMANDS[cmd]()\n else:\n print(ERROR_UNKNOWN_COMMAND.format(cmd=cmd))", "def wait(delay=2):\n time.sleep(delay)", "async def _send_command(self, command):\n send_message = \"\"\n\n for i in command:\n send_message += chr(i)\n result = None\n for data in send_message:\n try:\n result = await self.write(data)\n except():\n if self.log_output:\n logging.exception('cannot send command')\n else:\n print('cannot send command')\n return result", "def _process_command_queue(self, command_queue):\n while True:\n if len(command_queue) > 0:\n command_tuple = command_queue.pop()\n func, kwargs = command_tuple[0], command_tuple[1]\n getattr(self, func)(**kwargs)\n time.sleep(.5)", "def do(self, line): \n self.interface.onecmd(line)", "def handle_command(command, event, bot):\n print('slack::cmd::{}'.format(command))\n\n cmd_list = command.split(' ')\n cmd = cmd_list[0].lower()\n args = cmd_list[1:] if len(cmd_list) else 0\n\n if cmd == 'help':\n response, success = handle_command_help()\n\n elif cmd == 'accounts':\n response, success = handle_command_accounts(args, event, bot)\n\n elif cmd == 'assets':\n response, success = handle_command_assets(args, event, bot)\n\n elif cmd == 'publish':\n response, success = handle_command_publish(args, event, bot)\n\n elif cmd == 'self':\n response, success = handle_command_self(args, event, bot)\n\n elif 'reaction_' in cmd:\n response, success = handle_command_reaction(args, event, bot)\n else:\n response, success = handle_command_help()\n\n print('slack::cmd::{}::success::{}'.format(command, success))\n return success, response", "def handle_command(command, channel):\n #Default respons is help text for the user\n default_response = \"This don't exist m8. Try *{}*.\".format(\"!price trx\")\n #Finds and executes the given command, filling in response\n response = None\n \n if command.lower() in name_id_map:\n req = requests.get(url = 'https://api.coinmarketcap.com/v1/ticker/' + name_id_map[command.lower()] + '/')\n coin = req.json()\n text =format_coin_output(coin[0])\n slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=text,\n )\n elif command.lower() in symbol_id_map:\n req = requests.get(url = 'https://api.coinmarketcap.com/v1/ticker/' + symbol_id_map[command.lower()] + '/')\n coin = req.json()\n text = format_coin_output(coin[0])\n slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=text,\n )\n elif command == '!top':\n text = top_coins()\n slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=text,\n )\n elif command == '!exit':\n text = \":wasssap3::wasssap3:ABANDON SHIP!!!:wasssap3::wasssap3:\\n :rotating_light:EXIT ALL MARKETS:rotating_light:\\n\"\n slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=text,\n )\n elif command == '!ping':\n text = \"Still scavaging the moon.\\n\"\n slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=text,\n )\n else:\n slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=default_response,\n )", "def _wait_lift_cmd(xbee):\n shared.status['command'] = 'STDBY'\n util.log_info(\"%s Standby, awaiting 'LIFT'.\" % shared.AGENT_ID)\n \n wait_count = 0\n while True:\n time.sleep(.1)\n wait_count = wait_count + 1\n \n if shared.status['command'] == 'LIFT':\n comm.xbee_broadcast(xbee, \"IFO,%s cleared for takeoff.\" % shared.AGENT_ID)\n util.log_info(\"'LIFT' received! Taking off!\")\n return True\n \n elif shared.status['command'] == 'EXIT':\n comm.xbee_broadcast(xbee, \"IFO,%s abort takeoff.\" % shared.AGENT_ID)\n util.log_info(\"'EXIT' received. Abort takeoff.\")\n return False\n \n elif wait_count >= 100:\n wait_count = 0\n comm.xbee_broadcast(xbee,\"IFO,%s standby. Alt: %.2f m.\" % (shared.AGENT_ID, shared.des_alt))", "def _run_cmd_until_condition(self, cmd, cond, retry_count=None,\r\n retry_count_interval=5):\r\n count = 0\r\n while True:\r\n try:\r\n std_out, std_err = self._execute(cmd)\r\n except Exception: # pylint: disable=broad-except\r\n LOG.debug(\"Command %r failed while waiting for condition\",\r\n cmd)\r\n count += 1\r\n if retry_count and count >= retry_count:\r\n raise exceptions.ArgusTimeoutError(\r\n \"Command {!r} failed too many times.\"\r\n .format(cmd))\r\n time.sleep(retry_count_interval)\r\n else:\r\n if std_err:\r\n raise exceptions.ArgusCLIError(\r\n \"Executing command {!r} failed with {!r}\"\r\n .format(cmd, std_err))\r\n elif cond(std_out):\r\n break\r\n else:\r\n time.sleep(retry_count_interval)", "def execute_command_with_timeout(self, **kwargs):\n args = kwargs\n key = args.get(\"key\", ComType.cmd_com)\n command = args.get(\"command\", None)\n case_type = args.get(\"case_type\", \"\")\n receiver = args.get(\"receiver\", None)\n timeout = args.get(\"timeout\", TIMEOUT)\n return self.com_dict.get(key).execute_command_with_timeout(\n command=command, case_type=case_type,\n timeout=timeout, receiver=receiver)", "def _exec_and_wait(command):\n proc = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)\n proc.wait()\n return proc.stdout.read()", "async def _wait_for_data(self, current_command, number_of_bytes):\n while number_of_bytes:\n next_command_byte = await self.read()\n current_command.append(next_command_byte)\n number_of_bytes -= 1\n return current_command", "def cmd_wait(con, run_cmd):\n # May take up to 5 minutes\n sleep(5)\n ret = False\n for _ in range(25):\n try:\n result = con.run(run_cmd, hide=True)\n if result.return_code == 0:\n ret = True\n break\n except (ConnectionError, NoValidConnectionsError):\n sleep(10)\n\n return ret", "def wait_until(self, check, timeout=None):\n self._wait_in_process_loop(lambda: (check(),None),timeout=timeout)" ]
[ "0.6850583", "0.6637179", "0.66318923", "0.66277975", "0.65436953", "0.64919645", "0.64890045", "0.6457957", "0.6421299", "0.63659066", "0.63242984", "0.62695634", "0.62560266", "0.6223955", "0.62138736", "0.61878145", "0.61755884", "0.6169835", "0.6162347", "0.6137532", "0.6127769", "0.61051035", "0.60903984", "0.6079448", "0.6074302", "0.60526353", "0.60338867", "0.6021543", "0.60105544", "0.6002351", "0.5986964", "0.59835833", "0.5948147", "0.59390134", "0.59154695", "0.59039325", "0.58951026", "0.5885997", "0.58786136", "0.5867711", "0.58635014", "0.5856676", "0.58424234", "0.58385456", "0.57897806", "0.5767408", "0.5767395", "0.5766974", "0.57645714", "0.57561123", "0.57513297", "0.574789", "0.5742409", "0.57324266", "0.57173014", "0.57050025", "0.57046217", "0.5702403", "0.5687952", "0.5678643", "0.5668928", "0.56656647", "0.5651563", "0.5636818", "0.5628613", "0.5615633", "0.5606241", "0.56040645", "0.5599842", "0.5598207", "0.5588292", "0.5586127", "0.5583352", "0.5580697", "0.5576821", "0.5576555", "0.5558237", "0.5553953", "0.5547481", "0.5544615", "0.55351764", "0.553003", "0.5528262", "0.5525871", "0.551489", "0.5511291", "0.55062157", "0.55046976", "0.5495679", "0.5494095", "0.54922485", "0.5490777", "0.548556", "0.5483742", "0.5480948", "0.54755044", "0.5470727", "0.5468841", "0.5467522", "0.5461767", "0.5458618" ]
0.0
-1
Play a song based on its path.
def play_song(self): path = input('Give path to wanted song: ') # Request path to song path = path.replace('\\', '/') if not self.path_storage_re.match(path): # Check if the wanted song is from the storage directory print("Give a valid path") else: p = vlc.MediaPlayer(path) # Create VLC instance and play the song p.play() self.playSong.append(p) self.isPlaying = True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def play(path):\n sound = AudioSegment.from_mp3(path)\n playback.play(sound)", "def play(self, songpos=None):\n # TODO: implement songpos !\n if songpos is None:\n resp = yield from self.command('play')\n return True", "def play(song):\n # Show the metadata\n if (verbose==True):\n for s in song.keys():\n print s, \":\", \n print song[s]\n else:\n print \"Title:\", song[\"title\"]\n print \"Artisit:\", song[\"artist\"]\n print \"Album:\", song[\"albumtitle\"]\n print \"Year\", song[\"public_time\"]\n print \"Company:\", song[\"company\"]\n print \"Length\", song[\"length\"]\n print \"Playing...\"\n mp3_url = song[\"url\"]\n song_length = song[\"length\"]\n p = subprocess.Popen([\"mplayer\", \"-msglevel\", \"all=0\", mp3_url])\n\n # At the same time, download the song:\n u = urllib2.urlopen(mp3_url)\n local_mp3 = open(song[\"title\"] + \"-\" + song[\"artist\"] + \".mp3\", \"w\")\n local_mp3.write(u.read())\n local_mp3.close()\n # time.sleep(song_length)\n i = 0\n while(True):\n time.sleep(1)\n i += 1\n if i == song_length:\n # Kill the process when the song is finished.\n p.terminate()\n print \"#\" * 80\n break", "def play(sound):\n if SOUNDDIR != \"\":\n call([\"aplay\", SOUNDDIR + sound])", "def _play_audio(self, path_or_location):\n url = path_or_location.replace('https', 'http')\n audi_commd = self._vlc_audio_command + [url]\n logger.info('VLC command: {}'.format(audi_commd))\n process = subprocess.Popen(audi_commd)\n self._player_pid = process.pid\n logger.info(\"vlc pid \" + str(process.pid))\n\n # add pid to child_pids\n self._child_pids[process.pid] = True", "def update(self, song: int) -> None:\n if 0 <= song < len(self.sounds):\n self.sounds[song].play()", "def play_async(path):\n p = Process(target=play, args=(path,))\n p.start()", "async def play(self, msg, *, song):\n if msg.guild.id in self.player:\n if msg.voice_client.is_playing() is True: # NOTE: SONG CURRENTLY PLAYING\n return await self.queue(msg, song)\n\n if self.player[msg.guild.id]['queue']:\n return await self.queue(msg, song)\n\n if msg.voice_client.is_playing() is False and not self.player[msg.guild.id]['queue']:\n return await self.start_song(msg, song)\n\n else:\n # IMPORTANT: THE ONLY PLACE WHERE NEW `self.player[msg.guild.id]={}` IS CREATED\n self.player[msg.guild.id] = {\n 'player': None,\n 'queue': [],\n 'author': msg,\n 'name': None,\n \"reset\": False,\n 'repeat': False,\n 'volume': 0.5\n }\n return await self.start_song(msg, song)", "def play_sound(self):\n # http://soundbible.com/2103-1-Person-Cheering.html\n my_path = os.path.dirname(__file__)\n sound_path = os.path.join(my_path, 'yay.mp3')\n sound = SoundLoader.load(sound_path)\n sound.play()", "def load(self, song):\n self.currentSongName = song\n self.currentSong = pygame.mixer.music.load(song)", "def play_music(self):\n song_index = -1\n if self.num_songs == 0:\n sys.stdout.write(\"No songs found\\n\")\n sys.exit(0)\n \n # FIXME: spacebar/pause is an mplayer-specific command\n sys.stdout.write(\"Press spacebar to pause songs\\n\")\n sys.stdout.write(\"Press ctrl+c once to skip a song\\n\")\n sys.stdout.write(\"Hold ctrl+c to exit\\n\")\n sys.stdout.write(\"%d files found.\\n\" % self.num_songs)\n while True:\n try:\n song_index = self._get_song_index(song_index)\n if song_index == None:\n sys.exit(0)\n song = self.songs[song_index]\n sys.stdout.write(\"%s\\n\" % song)\n \n # Disabled the following as it got pretty annoying seeing a \n # torrent of notifications for non-music files (mplayer \n # gracefully skips these). \n #try:\n # notify_cmd=\"notify-send -t 1000 '%s'\" % \\\n # song.split(\"/\")[-1]\n # subprocess.check_call(notify_cmd, shell=True)\n #except:\n # pass\n #FIXME: escape quotes in songs\n play_cmd = '\"%s\" \"%s\" > /dev/null 2>&1 ' % \\\n (self.music_client, song) \n subprocess.check_call(play_cmd, shell=True)\n except KeyboardInterrupt:\n try:\n # HACK to allow repeated ctrl+c to exit outright\n time.sleep(0.1) \n except KeyboardInterrupt:\n sys.stderr.write(\"\\nExiting...\\n\")\n sys.exit(0)", "async def play(self, ctx, *, filename: str):\r\n if not ctx.voice_client:\r\n await self.connect(ctx)\r\n if filename not in self.audio_files:\r\n await ctx.send(\"File {0} not found\".format(filename))\r\n await self.audiofiles(ctx)\r\n else:\r\n ctx.voice_client.play(discord.FFmpegPCMAudio(source=\"{0}{1}.mp3\".format(self.audio_base_dir, filename)))\r\n await ctx.message.delete()", "async def play(self, ctx, *, song: str):\n state = self.get_voice_state(ctx.message.server)\n opts = {\n 'default_search': 'ytsearch',\n 'quiet': True,\n }\n\n if state.voice is None:\n success = await ctx.invoke(self.summon)\n if not success:\n return\n if state.voice.channel != ctx.message.author.voice_channel:\n await self.bot.say('You can only modify the queue if you\\'re in the same channel as me!')\n return\n if len(state.songs._queue) >= 6:\n await self.bot.say('There can only be up to 6 items in queue!')\n return\n\n status = await self.bot.say('Loading... 🌚')\n pg_task = self.loop.create_task(self.progress(status, 'Loading'))\n state.voice.encoder_options(sample_rate=48000, channels=2)\n try:\n player = await state.voice.create_ytdl_player(song, ytdl_options=opts, after=state.toggle_next)\n except Exception as e:\n if type(e).__name__.endswith('DownloadError') or type(e).__name__.endswith('IndexError'):\n pg_task.cancel()\n await self.bot.delete_message(status)\n await self.bot.say('**That video couldn\\'t be found!**')\n return False\n else:\n raise e\n\n player.volume = 0.7\n entry = VoiceEntry(ctx.message, player, False)\n was_empty = state.songs.empty()\n await state.songs.put(entry)\n if state.current:\n await self.bot.say('Queued ' + str(entry))\n pg_task.cancel()\n await self.bot.delete_message(status)", "def play_local(self, music, device):\n # Look at all the files in the specified directory and add their URIs.\n mp3s = []\n try:\n files = os.listdir(music)\n except OSError, ex:\n logging.warning(\"OS Error: %s\", ex)\n return\n for filename in files:\n if filename.endswith(\".mp3\"):\n mp3s.append(os.path.join(self.webserver, music,\n urllib.pathname2url(filename)))\n\n device.play(sorted(mp3s))", "def play(filename):\n SoundClient(blocking=True).playWave(filename)", "def play_music(sid):\n # Get the parameters for the get_song_id request\n artist = None\n album = None\n title = None\n if not request.json:\n # If no JSON parameters were given, just resume playing the song\n db = sqlite3.connect('/home/tropius/TROPIUS/TROPIUS.db')\n host = hosts.get_detail(db, sid)\n spotify.resume(host['ip'])\n return jsonify({})\n else:\n try:\n # Get the host data from the database\n db = sqlite3.connect('/home/tropius/TROPIUS/TROPIUS.db')\n host = hosts.get_detail(db, sid)\n artist = None\n album = None\n track = None\n if request.json.has_key('track') and request.json.get('track'):\n track = request.json.get('track')\n elif request.json.has_key('album') and request.json.get('album'):\n album = request.json.get('album')\n elif request.json.has_key('artist') and request.json.get('artist'):\n artist = request.json.get('artist')\n else:\n spotify.resume(host['ip'])\n return jsonify({})\n spotify.compound_play(host['ip'], artist=artist, album=album, song=track)\n return jsonify({})\n except:\n abort(400)", "async def play(self, ctx, *, query):\n\n source = discord.PCMVolumeTransformer(discord.FFmpegPCMAudio(query))\n ctx.voice_client.play(source, after=lambda e: print('Player error: %s' % e) if e else None)\n\n await ctx.send('Now playing: {}'.format(query))", "async def play(self, ctx, *, query):\n\n source = discord.PCMVolumeTransformer(discord.FFmpegPCMAudio(query))\n ctx.voice_client.play(source, after=lambda e: print('Player error: %s' % e) if e else None)\n\n await ctx.send('Now playing: {}'.format(query))", "def load_song(self, path):\n self._menu_select('File->Open')\n self._open_file(path)\n try:\n # Get the annoying Comments window out of the way\n self._app.Comments.minimize()\n except MatchError:\n pass", "def play(self, position=None):\n if position!=None and (0 <= position < len(self.playlist)):\n self.playlist_id = position\n self.stop()\n if not self.is_playing:\n self.logger.debug(\"play: {}\".format(os.path.join(self.path_songs, self.playlist[self.playlist_id])))\n self.tstamp_play = time.time()\n self.is_playing = True\n self.thread = CommandPlayer(\n command=self.player_command.format(\n file=shlex.quote(\n os.path.join(self.path_songs, self.playlist[self.playlist_id])\n )\n ),\n callback=self._callback\n )\n self.thread.start()", "def playSound(self,sound):\n sound.play()", "def playsong(song, failcount=0, override=False):\n # pylint: disable=R0911,R0912\n if not Config.PLAYER.get or not has_exefile(Config.PLAYER.get):\n g.message = \"Player not configured! Enter %sset player <player_app> \"\\\n \"%s to set a player\" % (c.g, c.w)\n return\n\n if Config.NOTIFIER.get:\n subprocess.Popen(shlex.split(Config.NOTIFIER.get) + [song.title])\n\n # don't interrupt preloading:\n while song.ytid in g.preloading:\n writestatus(\"fetching item..\")\n time.sleep(0.1)\n\n try:\n streams.get(song, force=failcount, callback=writestatus)\n\n except (IOError, URLError, HTTPError, socket.timeout) as e:\n dbg(\"--ioerror in playsong call to streams.get %s\", str(e))\n\n if \"Youtube says\" in str(e):\n g.message = F('cant get track') % (song.title + \" \" + str(e))\n return\n\n elif failcount < g.max_retries:\n dbg(\"--ioerror - trying next stream\")\n failcount += 1\n return playsong(song, failcount=failcount, override=override)\n\n elif \"pafy\" in str(e):\n g.message = str(e) + \" - \" + song.ytid\n return\n\n except ValueError:\n g.message = F('track unresolved')\n dbg(\"----valueerror in playsong call to streams.get\")\n return\n\n try:\n cmd, songdata = generate_real_playerargs(song, override, failcount)\n\n except (HTTPError) as e:\n\n # Fix for invalid streams (gh-65)\n dbg(\"----htterror in playsong call to gen_real_args %s\", str(e))\n if failcount < g.max_retries:\n failcount += 1\n return playsong(song, failcount=failcount, override=override)\n else:\n g.message = str(e)\n return\n\n except IOError as e:\n # this may be cause by attempting to play a https stream with\n # mplayer\n # ====\n errmsg = e.message if hasattr(e, \"message\") else str(e)\n g.message = c.r + str(errmsg) + c.w\n return\n\n songdata = \"%s; %s; %s Mb\" % songdata\n writestatus(songdata)\n dbg(\"%splaying %s (%s)%s\", c.b, song.title, failcount, c.w)\n dbg(\"calling %s\", \" \".join(cmd))\n returncode = launch_player(song, songdata, cmd)\n failed = returncode not in (0, 42, 43)\n\n if failed and failcount < g.max_retries:\n dbg(c.r + \"stream failed to open\" + c.w)\n dbg(\"%strying again (attempt %s)%s\", c.r, (2 + failcount), c.w)\n writestatus(\"error: retrying\")\n time.sleep(1.2)\n failcount += 1\n return playsong(song, failcount=failcount, override=override)\n\n return returncode", "def play_sound():\r\n try:\r\n\r\n file_name = r\"sound.mp3\"\r\n playsound.playsound(file_name)\r\n except OSError:\r\n AudioSegment.converter=r\"/Users/russelllamb/Downloads/ffmpeg\"\r\n sound=AudioSegment.from_mp3(r\"sound.mp3\")\r\n play(sound)", "async def _play(self, bot, server_id):\n srv = self.get_server_dict(server_id)\n srv['song'] = self.dequeue(server_id)\n if not srv['song']:\n await self._finish_playback(bot, server_id)\n return\n try:\n srv['player'] = srv['voice'].create_ffmpeg_player(srv['song'][0], stderr=subprocess.PIPE, before_options=self._ffmpeg_options, after=lambda: self._after(bot, server_id))\n await bot.change_presence(game = Game(name=srv['song'][1]))\n except Exception as ex:\n #shit's fucked\n self.logger.exception(ex, exc_info=True)\n await self._finish_playback(bot, server_id)\n return\n srv['player'].volume = srv['volume']\n srv['player'].start()", "def play_sound(self, sound) -> None:\n pass", "def play_sound(file_path: Path) -> None:\n threading.Thread(target=playsound.playsound, args=(file_path,), daemon=True).start()", "async def play(self, ctx, *, query):\n source = discord.PCMVolumeTransformer(discord.FFmpegPCMAudio(query))\n ctx.voice_client.play(source, after=lambda e: print('Player error: %s' % e) if e else None)", "def do_play(*_args):\n print(last_wav_path)\n if last_wav_path and last_wav_path.is_file():\n threading.Thread(\n target=lambda: subprocess.check_call(\n [\"aplay\", \"-q\", str(last_wav_path)]\n )\n ).start()", "def play_podcast(url, name):\n\n player.play(url, name)", "def on_play(self, event, song):\n\n if event.guild.id not in self.players:\n raise CommandFail('Not in a voice channel here.')\n\n playables = list(YoutubeDLPlayable.from_url(song))\n for playable in playables:\n self.players[event.guild.id].play(playable, event)", "def stream_song(self, path, e):\n if path == ERROR:\n self.send_streaming_message(INVALID_REQ)\n return\n # sends metadata\n sample_rate, channels, my_format = get_metadata(path)\n to_send = sample_rate + DOLLAR + channels + DOLLAR + my_format\n skip_amount = get_byte_num(path)\n print('to send', to_send)\n\n # sends a chunk each iteration of the loop\n self.send_streaming_message(to_send)\n with open(path, 'rb') as song:\n data = song.read(MSG_SIZE)\n self.send_streaming_message(data)\n while data != EMPTY_MSG:\n data = song.read(MSG_SIZE)\n\n # if the user wanted to pause the function will raise\n # a thread event and stop until instructed to continue\n # from outside the thread\n if self.pause:\n e.wait()\n\n # checks if the song should stop playing/ go backwards/forwards\n if not self.skip_q.empty():\n msg = self.skip_q.get()\n if msg == FORWARD_ACTION:\n song.read(int(skip_amount))\n elif msg == BACKWARD_ACTION:\n song.seek(-1 * int(skip_amount), ONE)\n elif msg == STOP:\n self.send_streaming_message(FINISH)\n return\n # sends the chunk and pauses\n self.send_streaming_message(data)\n time.sleep(MSG_SIZE * NO_LAG_MOD / int(sample_rate))\n self.send_streaming_message(FINISH)", "def play(self):\n if self.state == 'play':\n super().play()\n return\n if self.player is None:\n self.load()\n\n self.player.play()\n self.state = 'play'\n super().play()", "def play(self, play=True):\n spotify.Error.maybe_raise(lib.sp_session_player_play(\n self._session._sp_session, play))", "async def loop_song(self, msg):\n source = discord.PCMVolumeTransformer(\n discord.FFmpegPCMAudio(self.player[msg.guild.id]['name']))\n loop = asyncio.get_event_loop()\n try:\n msg.voice_client.play(\n source, after=lambda a: loop.create_task(self.done(msg)))\n msg.voice_client.source.volume = self.player[msg.guild.id]['volume']\n # if str(msg.guild.id) in self.music:\n # msg.voice_client.source.volume=self.music['vol']/100\n except Exception as Error:\n # Has no attribute play\n print(Error) # NOTE: output back the error for later debugging", "def play(name, volume=1.0):\n s = load(name)\n s.set_volume(volume)\n s.play()", "def play(self, loop):\n if self.playMusic:\n pygame.mixer.music.play(loop)", "def play(self):\n self.playing = True\n # FIXME?: Why is this not doing anything? Shouldn't it be calling into the player API?", "def play(self):\n\t\tprint(\"play args:\")\n\t\tprint(args)\n\t\tpyglet.clock.schedule_once( self.play_next,\n\t\t\t\t\t\t\t\t\tself._autonext_interval_msec)\n\t\t# instead of using interval schedules, it just callls the same\n\t\t# function repeated so if the system is backed up it won't create\n\t\t# additional problems\n\t\tself._playing = True", "async def play(self, ctx: commands.Context, *, query: t.Optional[str]) -> None:\n if query is None:\n # Maybe the user didn't know to pass in a query?\n embed = discord.Embed(colour=Colours.regular, timestamp=Embeds.now())\n embed.description = (\n \"No query passed in. Try passing in something: `$play arabic music`\"\n )\n embed.set_footer(\n text=\"See $help voice for more commands.\", icon_url=Icons.info\n )\n return await ctx.send(embed=embed)\n\n query = query.strip(\"<>\")\n search = not Regexes.url(query)\n if (result := await self.get_tracks(query, search, True)) is not None:\n # Ensure that we're connected before playing.\n player = self.get_player(ctx.guild)\n await ctx.invoke(self.connect, channel=None)\n if not player.is_connected:\n return\n\n embed = discord.Embed(\n title=\"Now queued.\" if player.is_playing else \"Now playing.\",\n description=f\"[{result.title}]({result.uri})\",\n colour=Colours.regular,\n timestamp=Embeds.now(),\n )\n\n m, s = self.get_formatted_length(result.length, False)\n embed.set_footer(\n text=f\"Track goes for {m} minutes and {s} seconds.\",\n icon_url=ctx.author.avatar_url,\n )\n if result.thumb is not None:\n embed.set_thumbnail(url=result.thumb)\n await ctx.send(embed=embed)\n\n player.queue.add_tracks(result)\n if not player.is_playing:\n await player.playback()\n else:\n fail = Embeds.status(success=False, desc=\"Failed to find any results.\")\n await ctx.send(embed=fail)", "def play(self, volume=100):\n # self.stdin_queue.put(\"play\")\n self._direct_stdin_writer(\"play\")\n self._media_volume = volume\n self._update_volume()", "def play_audio(self):\n if not self.voice.get_busy():\n self.voice.play(self.sound)\n else:\n pass", "def play_sound(self, sound):\n try:\n if self.se_volume != 0:\n self.sound_lib[sound].play()\n except:\n print \"Couldn't play the sound '\", sound, \"'!\"", "def play_sound(self, char):\n\n player = self._sounds.get(char)\n if player:\n player.play()", "def play_wakeup_music(self):\n list_of_music_files = []\n for track in os.listdir(project_path + '/music'):\n if track.endswith(\".mp3\"):\n list_of_music_files.append(str(project_path + '/music/' + str(track)))\n\n # figure out random track of the found mp3 files\n random_track = randint(0, len(list_of_music_files)-1)\n\n self.play_mp3_file(list_of_music_files[random_track])", "def playMusic(self, music = None, index=0):\n if music == None:\n music = self.battleMusic\n if isinstance(music, list):\n self.songIndex = index\n while self.songIndex > len(music)-1:\n self.songIndex -= len(music)\n pygame.mixer.music.load(music[self.songIndex])\n else:\n self.songIndex = 0\n pygame.mixer.music.load(music)\n self.currentMusic = music\n pygame.mixer.music.play()\n pygame.mixer.music.set_volume(self.musicVolume)\n pygame.mixer.music.set_endevent(pygame.constants.USEREVENT)", "async def _play(self, ctx: commands.Context, *, search: str):\n\n if not ctx.voice_state.voice:\n await ctx.invoke(self._join)\n\n async with ctx.typing():\n try:\n source = await YTDLSource.create_source(ctx, search, loop=self.bot.loop)\n except YTDLError as e:\n await ctx.send('An error occurred while processing this request: {}'.format(str(e)))\n else:\n song = Song(source)\n\n await ctx.voice_state.songs.put(song)\n await ctx.send('Enqueued {}'.format(str(source)))", "async def _play(self, ctx: commands.Context, *, search: str):\n\n if not ctx.voice_state.voice:\n await ctx.invoke(self._join)\n\n async with ctx.typing():\n try:\n source = await YTDLSource.create_source(ctx, search, loop=self.bot.loop)\n except YTDLError as e:\n await ctx.send('An error occurred while processing this request: {}'.format(str(e)))\n else:\n song = Song(source)\n\n await ctx.voice_state.songs.put(song)\n await ctx.send('Enqueued {}'.format(str(source)))", "async def play(self):\n if self.repeat and self.current is not None:\n self.queue.append(self.current)\n\n self.current = None\n self.position = 0\n self.paused = False\n\n if not self.queue:\n await self.stop()\n await self._bot.lavalink.client._trigger_event('QueueEndEvent', self.guild_id)\n else:\n if self.shuffle:\n track = self.queue.pop(randrange(len(self.queue)))\n else:\n track = self.queue.pop(0)\n\n self.current = track\n await self._bot.lavalink.ws.send(op='play', guildId=self.guild_id, track=track.track)\n await self._bot.lavalink.client._trigger_event('TrackStartEvent', self.guild_id)", "def play(self, play):\n\n self._play = play", "def get_next_song(self):\r\n if self.timestamp:\r\n delta = datetime.datetime.now() - self.timestamp\r\n if delta < timedelta(seconds=3):\r\n self.log.warning(u\"Song '%s' stopped playing after less than 3 seconds for some reason!\" % self.meta)\r\n time.sleep(3)\r\n self.timestamp = datetime.datetime.now()\r\n\r\n song = self.findQueued()\r\n\r\n self.meta = u\"%s - %s\" % (song.artist(), song.title)\r\n self.log.debug(\"Now playing \\\"%s\\\" [ID %s]\" % (song.title, song.id))\r\n self.song = song\r\n\r\n try:\r\n filepath = song.file.path.encode(self.fsenc)\r\n except:\r\n try:\r\n filepath = song.file.path.encode(self.sysenc)\r\n except:\r\n filepath = song.file.path\r\n self.log.debug(\"Returning path %s\" % filepath)\r\n return filepath", "def play(*args, forward: bool=True, playSound: bool=True, record: bool=True, sound:\n Union[AnyStr, bool]=\"\", state: bool=True, wait: bool=True, q=True, query=True,\n **kwargs)->Union[None, Any]:\n pass", "def play(self, filename, callback) :\n raise NotImplementedError(\"play not implemented\")", "def play(self):\n spotifyconnect.Error.maybe_raise(lib.SpPlaybackPlay())", "def playmusic(self, soundfile):\n clock = pygame.time.Clock()\n pygame.mixer.music.load(soundfile)\n pygame.mixer.music.play()\n while pygame.mixer.music.get_busy():\n clock.tick(FRAMERATE)", "def play(videoid):\n common.debug('Playing {}'.format(videoid))\n metadata = api.metadata(videoid)\n common.debug('Metadata is {}'.format(metadata))\n\n if not _verify_pin(metadata[0].get('requiresPin', False)):\n ui.show_notification(common.get_local_string(30106))\n xbmcplugin.endOfDirectory(g.PLUGIN_HANDLE, succeeded=False)\n return\n\n list_item = get_inputstream_listitem(videoid)\n infos, art = infolabels.add_info_for_playback(videoid, list_item)\n common.debug('Sending initialization signal')\n common.send_signal(common.Signals.PLAYBACK_INITIATED, {\n 'videoid': videoid.to_dict(),\n 'infos': infos,\n 'art': art,\n 'timeline_markers': get_timeline_markers(metadata[0]),\n 'upnext_info': get_upnext_info(videoid, (infos, art), metadata)})\n xbmcplugin.setResolvedUrl(\n handle=g.PLUGIN_HANDLE,\n succeeded=True,\n listitem=list_item)", "def play_music(music_file):\n music_file = stim(music_file)\n clock = pygame.time.Clock()\n try:\n pygame.mixer.music.load(music_file)\n print \"Music file %s loaded!\" % music_file\n except pygame.error:\n print \"File %s not found! (%s)\" % (music_file, pygame.get_error())\n return\n pygame.mixer.music.play()\n while pygame.mixer.music.get_busy():\n # check if playback has finished\n clock.tick(30)", "def song(self, value):\r\n self._song_id = value\r\n data = Song(value)\r\n self.songtitel = data.songtitel if data.found else \"\"", "def start_soundtrack(self):\n sources = screens['Combat']['music']\n self.source = choice(sources)\n Logger.info(\n 'Application: Chose \"{}\" as the combat music.'.format(self.source)\n )\n try:\n SoundManager.music[self.source]\n except KeyError:\n SoundManager.add_music(self.source, self)\n SoundManager.play_music(self.source)", "def playSound(self, filename, volume = 0.0, pan = 0.0, frequency = 44100, looping = False, play = True):\r\n m = re.match(self.soundRe, filename)\r\n if m:\r\n g = m.groups()[1]\r\n filename = filename.replace(g, str(int(int(g.strip('*')) * self.random.random()) + 1))\r\n try:\r\n s = stream.FileStream(file = filename)\r\n except BassError as e:\r\n raise BassError(e.code, 'Error playing file %s: %s.' % (filename, str(e)))\r\n v = self.baseVolume + volume\r\n if v < 0.0:\r\n v = 0.0\r\n elif v > 1.0:\r\n v = 1.0\r\n s.set_volume(v)\r\n p = 0.0 + pan\r\n if p > 1.0:\r\n p = 1.0\r\n elif p < -1.0:\r\n p = -1.0\r\n s.set_pan(p)\r\n s.set_frequency(frequency)\r\n s.set_looping(looping)\r\n if play:\r\n threading.Thread(name = 'Sound Player', target = s.play_blocking).start()\r\n return s", "def PlayMusic(self,protoId):\r\n app=GetApplication() \r\n if not protoId:\r\n app.MusicFadeOut(2000)\r\n else:\r\n resId= random.choice(self.prototypes[protoId].resources)\r\n music=app.resourcemanager.Load( resId )\r\n app.PlayMusic(music)", "def play_audio(file: str) -> None:\n pygame.mixer.init()\n pygame.mixer.music.load(file)\n pygame.mixer.music.play()\n\n while pygame.mixer.music.get_busy():\n continue", "async def play(client, message, voicePlayerList):\r\n #Check to ensure that a valid voice client was passed\r\n voiceConnectionExists = False\r\n voice = None\r\n for connection in client.voice_clients:\r\n if connection.server == message.server:\r\n voiceConnectionExists = True\r\n voice = connection\r\n # If there is a valid voice client, try to create an audio player\r\n if voiceConnectionExists:\r\n #Create a filepath with the users input\r\n playFilePath = 'audio/'\r\n messageContentList = message.content.split(' ')\r\n if not len(messageContentList) > 1:\r\n # This is not a valid command, notify the user\r\n playError = 'I\\'m not sure what you want me to do. '\r\n playError += 'Please use the format:\\n```\\n'\r\n playError += 'play {songtitle}\\n```'\r\n await client.send_message(message.channel, playError)\r\n return\r\n playFilePath += messageContentList[1] # Index 1 contains the song\r\n playFilePath += '.mp3'\r\n #Check if the file exists\r\n if os.path.isfile(playFilePath):\r\n #Create a list to be appended to the queue\r\n #List will contain ['local', local_mp3_id]\r\n #Will be used by songFinished to identify the type of player needed\r\n playerListAppend = []\r\n playerListAppend.append('local')\r\n playerListAppend.append(playFilePath)\r\n voicePlayerList.append(playerListAppend)\r\n if len(voicePlayerList) == 1:\r\n #There is nothing currently playing\r\n #Display a currently playing message first\r\n nowPlaying = 'Now Playing:```prolog\\n'\r\n nowPlaying += playFilePath\r\n nowPlaying += '\\n```'\r\n await client.send_message(message.channel, nowPlaying)\r\n #Start a new player\r\n mp3Player = voice.create_ffmpeg_player(playFilePath,\r\n options='-loglevel panic -hide_banner',\t\t\t\t\t\t\t\t after=lambda: songFinished(client, message, voice, voicePlayerList))\r\n #Before starting it, replace the 0 index of the queue\r\n #With the player so it can be stopped if needed\r\n voicePlayerList[0] = mp3Player\r\n mp3Player.start()\r\n else:\r\n #This has been added the queue\r\n #Send a notification\r\n notification = 'I\\'ve added '\r\n notification += playFilePath\r\n notification += ' to the queue!'\r\n await client.send_message(message.channel, notification)\r\n\r\n else: \r\n #No file was found, notify the user\r\n playError = 'I can\\'t find a song with the name \\''\r\n playError += messageContentList[1]\r\n playError += '\\'!'\r\n await client.send_message(message.channel, playError)\r\n return\r\n else:\r\n playError = 'I have to be connected to a voice channel to do that!\\n'\r\n playError += 'Use the \\'connect\\' command to summon me!'\r\n await client.send_message(message.channel, playError)\r\n return", "def start(self):\n\tglobal mode\n\tmode=\"./music/\"\n\tglobal message\n\tif message!=2:\n\t\tmessage=1\n\t\tbot.loop.create_task(play())", "def choose_song(my_name):\n my_name = my_name.split(ET)[ZERO]\n path = ''\n for filename in os.listdir(str(Path.cwd()) + '/songs'):\n name = filename.split('\\\\')[-1]\n name = name.split('.')[ZERO]\n name = name.split(ET)[ZERO]\n if filename.endswith(\".wav\") and my_name == name:\n path = str(Path.cwd()) + r'\\songs\\%s' % filename\n if os.path.exists(path):\n return path\n else:\n return ERROR", "def loadAudio(self,path):\r\n if self.vid:# Release video to access\r\n self.vid.release()\r\n # Check if has audio\r\n mixer.music.unload()\r\n command = \"ffprobe -i \\\"{0}\\\" -show_streams -select_streams a -loglevel error\".format(path)\r\n result = run(command,stdout=PIPE,stderr=PIPE,universal_newlines=True,shell=True)\r\n if result.stdout.startswith(\"[STREAM]\"):# Contains audio\r\n self.hasAudio = True\r\n else:\r\n self.hasAudio = False\r\n return\r\n print(\"Preparing Audio...\",end=\"\")\r\n filename = \"project_audio.mp3\"\r\n self.aud_path = filename\r\n t_start = time.time()\r\n # Extract audio using ffmpeg, always overwrite\r\n command = \"ffmpeg -y -i \\\"{0}\\\" \\\"{1}\\\"\".format(path,filename)\r\n result = run(command,stdout=PIPE,stderr=PIPE,universal_newlines=True,shell=True)\r\n## print(result.stderr)\r\n t_end = time.time()\r\n print(\"Done[{0}]\".format(int(t_end-t_start)))\r\n try:\r\n mixer.music.unload()\r\n mixer.music.load(filename)\r\n except:\r\n print(\"Error Loading Audio\")\r\n self.hasAudio = False\r\n self.vid = cv2.VideoCapture(self.vid_path)# Reload video component\r\n # Launch in GUI Thread\r", "def play_prog(self):\r\n\r\n serial_number = range(47845, 47869)\r\n chord_number = range(1, 25)\r\n for i in self.cnv:\r\n # Look for matching audio files and play them.\r\n try:\r\n filename = \"audio files/{}__{}.wav\".format(serial_number[i-1], chord_number[i-1])\r\n playsound.playsound(filename)\r\n except FileNotFoundError:\r\n print('Error: audio files not found.')", "def handle_play(self, message):\n self.audio_service.resume()", "async def async_play_track(self, track):\n if not len(self._trackq) > 0 or track is None:\n return\n\n track.hass = self.hass # render template\n trackn = track.async_render()\n\n if not self._slave_mode:\n try:\n index = [idx for idx, s in enumerate(self._trackq) if trackn in s][0]\n except (IndexError):\n return\n\n if not index > 0:\n return\n\n value = await self.async_call_linkplay_httpapi(\"setPlayerCmd:playLocalList:{0}\".format(index), None)\n if value != \"OK\":\n _LOGGER.warning(\"Failed to play media track by name. Device: %s, Got response: %s\", self.entity_id, value)\n return False\n else:\n self._state = STATE_PLAYING\n self._playing_tts = False\n self._media_title = None\n self._media_artist = None\n self._media_album = None\n self._trackc = None\n self._icecast_name = None\n self._playhead_position = 0\n self._duration = 0\n self._position_updated_at = utcnow()\n self._media_image_url = None\n self._media_uri = None\n self._media_uri_final = None\n self._ice_skip_throt = False\n self._unav_throttle = False\n # await self.async_schedule_update_ha_state(True)\n return True\n else:\n await self._master.async_play_track(track)", "def play(self):\n\n try:\n if self.source is None:\n # If there is no source-file, write the data to a temporary WAV-file ...\n tmpFile = tempfile.NamedTemporaryFile(suffix='.wav', delete=False)\n tmpFile.close()\n self.write_wav(tmpFile.name)\n \n # ... and play that file\n if sys.platform=='win32':\n winsound.PlaySound(tmpFile.name, winsound.SND_FILENAME)\n elif sys.platform == 'darwin':\n cmd = ['afplay', tmpFile.name]\n subprocess.run(cmd)\n else:\n pygame.init()\n pygame.mixer.music.load(tmpFile.name)\n pygame.mixer.music.play()\n time.sleep(self.duration)\n \n # If you want to use FFMPEG instead, use the following commands:\n #cmd = [self.ffmpeg_info.ffplay, '-autoexit', '-nodisp', '-i', tmpFile.name]\n #subprocess.run(cmd)\n \n elif os.path.exists(self.source):\n # If you have a given input file ...\n print('Playing ' + self.source)\n \n # ... then play that one\n if sys.platform == 'win32':\n winsound.PlaySound(str(self.source), winsound.SND_FILENAME)\n elif sys.platform == 'darwin':\n cmd = ['afplay', str(self.source)]\n subprocess.run(cmd)\n else:\n pygame.init()\n pygame.mixer.music.load(self.source)\n pygame.mixer.music.play()\n time.sleep(self.duration)\n \n # If you want to use FFMPEG instead, use the following commands:\n #cmd = [self.ffmpeg_info.ffplay, '-autoexit', '-nodisp', '-i', self.source]\n #subprocess.run(cmd)\n \n except SystemError:\n print('If you don''t have FFMPEG available, you can e.g. use installed audio-files. E.g.:')\n print('import subprocess')\n print('subprocess.run([r\"C:\\Program Files (x86)\\VideoLAN\\VLC\\vlc.exe\", r\"C:\\Music\\14_Streets_of_Philadelphia.mp3\"])')", "def record_and_play(self, song):\n ret = self.rpc.call(\"record_and_play\",\n params=dict(song=song, times=100, volume=2),\n marshaller=dict_marshaller,\n unmarshaller=numpy_unmarshaller)\n for block in ret:\n yield block", "def play_music1(music_file):\n clock = pygame.time.Clock()\n try:\n pygame.mixer.music.load(music_file)\n print (\"Music file %s loaded!\" % music_file)\n except pygame.error:\n print (\"File %s not found! (%s)\" % (music_file, pygame.get_error()))\n return\n pygame.mixer.music.play()\n while pygame.mixer.music.get_busy():\n # check if playback has finished\n clock.tick(30)", "def play(options):\n signal = audio.play(options.ipath)\n if options.plot:\n plotter.plot(**{options.opath: signal.data})", "def play( self, loop = 0 ):\n self.sound.set_volume( Sound.our_sound_volume )\n self.channel = self.sound.play( loop )", "def play(self, *args, **kwargs) -> None:\n raise NotImplementedError()", "def _stream_from_url(self):\n if self.songObj is None:\n self._extract_data()\n else:\n self._extract_songObj()\n\n logger.debug(self.title)\n\n # Now search the song locally\n if not self.dont_cache_search:\n match = search_locally(self.title)\n if match:\n # Update the URL cache. This is necessary for the old songs.\n update_URL_cache(self.title, self.URL)\n # Change the value to local path\n self.stream_url = match[1]\n else:\n self._dw()\n else:\n logger.info(\"Searching locally disabled.\")\n if self.stream_url == \"\":\n self._get_youtube_data_url()\n\n direct_to_play(self.stream_url, self.show_lyrics, self.title)", "async def play_url(self, url: str):\n await self._pytheos.api.browse.play_url(self.id, url)", "async def play(self, ctx, *, query: str):\n # Get the player for this guild from cache.\n player = self.bot.lavalink.player_manager.get(ctx.guild.id)\n\n # If player is paused - unpause, return\n if player.paused:\n return await player.set_pause(False)\n\n # Remove leading and trailing <>. <> may be used to suppress embedding links in Discord.\n query = query.strip('<>')\n\n # Check if the user input might be a URL. If it isn't, we can Lavalink do a YouTube search for it instead.\n # SoundCloud searching is possible by prefixing \"scsearch:\" instead.\n if not url_rx.match(query):\n query = f'ytsearch:{query}'\n\n # Get the results for the query from Lavalink.\n results = await player.node.get_tracks(query)\n\n # Results could be None if Lavalink returns an invalid response (non-JSON/non-200 (OK)).\n # ALternatively, resullts['tracks'] could be an empty array if the query yielded no tracks.\n if not results or not results['tracks']:\n return await ctx.send(embed=self.error_embed(f\"No results found for `{query}`\"))\n\n embed = discord.Embed(color=discord.Color.blurple())\n\n # Valid loadTypes are:\n # TRACK_LOADED - single video/direct URL)\n # PLAYLIST_LOADED - direct URL to playlist)\n # SEARCH_RESULT - query prefixed with either ytsearch: or scsearch:.\n # NO_MATCHES - query yielded no results\n # LOAD_FAILED - most likely, the video encountered an exception during loading.\n if results['loadType'] == 'PLAYLIST_LOADED':\n tracks = results['tracks']\n\n for track in tracks:\n # Add all of the tracks from the playlist to the queue.\n length = track[\"info\"][\"length\"]\n track = lavalink.models.AudioTrack(\n track, requester=ctx.author.id, recommended=True, length=length)\n player.add(requester=ctx.author.id, track=track)\n\n embed.title = ''\n embed.description = f'Queued **{results[\"playlistInfo\"][\"name\"]}** - {len(tracks)} tracks'\n else:\n track = results['tracks'][0]\n embed.title = \"\"\n embed.description = f'Queued [{track[\"info\"][\"title\"]}]({track[\"info\"][\"uri\"]}) [{ctx.message.author.mention}]'\n length = track[\"info\"][\"length\"]\n\n # You can attach additional information to audiotracks through kwargs, however this involves\n # constructing the AudioTrack class yourself.\n track = lavalink.models.AudioTrack(\n track, requester=ctx.author.id, recommended=True, length=length)\n player.add(requester=ctx.author.id, track=track)\n\n # Save text channel in which bot command was sent\n # for further reply\n self.preferred_channels[str(ctx.guild.id)] = ctx.message.channel.id\n\n await ctx.send(embed=embed)\n\n # We don't want to call .play() if the player is playing as that will effectively skip\n # the current track.\n if not player.is_playing:\n await player.play()", "def play(self, context=None):\n\n self.nowPlaying = True\n\n # Open file for reading\n wf = wave.open(self.path + '/' + self.name, 'rb')\n p = pyaudio.PyAudio()\n\n # Open stream for playback\n stream = p.open( format = p.get_format_from_width( wf.getsampwidth() ),\n channels = wf.getnchannels(),\n rate = wf.getframerate(), output = True)\n\n # Read file in chunks of 1024 bytes\n data = wf.readframes(1024)\n\n # Read while there is data left to read\n # If nowPlaying is False, user has clicked Stop\n while data != '' and self.nowPlaying:\n stream.write(data)\n data = wf.readframes(1024)\n\n stream.stop_stream()\n stream.close()\n\n p.terminate()\n\n self.nowPlaying = False\n\n # Callback to UI to signal that audio has finished playing\n if context is not None:\n context.stopAudio()", "def change_music(self, track):\n try:\n if self.bg_volume != 0:\n self.current = self.music_lib[track]\n pygame.mixer.music.load(self.current)\n pygame.mixer.music.play(-1)\n self.current = track\n else:\n pygame.mixer.music.stop()\n except:\n print \"Couldn't load track '\", track + \"'!\"", "async def async_media_play(self) -> None:\n await self._projector.send_command(PLAY)", "def media_play(self):\n self._state = STATE_PLAYING", "def media_play(self):\n self._state = STATE_PLAYING", "def import_song(self, song, playlist):\n\n try:\n song_uri = self.find_song_uri(song)\n except SongNotFoundError as e:\n print(f\"could not find song {song} to add to playlist '{playlist['name']}'\")\n else:\n self.add_song_to_playlist(song_uri, playlist[\"id\"])", "def play(self,event=None):\r\n # If play -> play, ignore or if no video data\r\n if self.isPlaying() or self.isEmpty():\r\n return\r\n # If stop -> play, restart clip\r\n elif self.isStopped():\r\n if self.hasAudio:\r\n mixer.music.play(loops=0)\r\n self.startTimestamp = time.time()\r\n # If pause -> play, set progress and resume\r\n elif self.isPaused():\r\n if self.hasAudio:\r\n mixer.music.unload()\r\n mixer.music.load(self.aud_path)\r\n self.seek(self.progress)\r\n return\r\n self.state = VideoPlayer.State.PLAYING\r\n self.root.after(0,self.stream)", "def quick_play(self, index=0):\n self.play(self.download(self.results[index]))", "def selectPlay(id):\n\tsong = music.song()\n\tsql = \"SELECT id, title, path, filename, hash, base FROM songs \" \\\n\t\t+ \"WHERE id = \" + str(id) + \";\"\n\tc, conn = connect()\n\tc.execute(sql)\n\tsinfo = c.fetchone()\n\t\n\tif sinfo[0]:\n\t\tsong.id = sinfo[0]\n\tif sinfo[1]:\n\t\tsong.name = sinfo[1]\n\tif sinfo[2]:\n\t\tsong.path = sinfo[2]\n\tif sinfo[3]:\n\t\tsong.filename = sinfo[3]\n\tif sinfo[4]:\n\t\tsong.hash = sinfo[4]\n\tif sinfo[5]:\n\t\tsong.base = sinfo[5]\n\t\n\treturn song", "async def play_sound(self) -> None:\n await self.core.loop.run_in_executor(None, self.device.play_sound)", "def play_media(self, item):\n self.play_media_event.clear()\n\n def app_launched_callback():\n try:\n self._send_start_play(item)\n finally:\n self.play_media_event.set()\n\n self.launch(app_launched_callback)", "async def playnow(self, ctx):\n server = ctx.message.server\n author = ctx.message.author\n voice_channel = author.voice_channel\n\n # Checking already connected, will join if not\n\n if not self.voice_connected(server):\n try:\n self.has_connect_perm(author, server)\n except AuthorNotConnected:\n await self.bot.say(\"You must join a voice channel before I can\"\n \" play anything.\")\n return\n except UnauthorizedConnect:\n await self.bot.say(\"I don't have permissions to join your\"\n \" voice channel.\")\n return\n except UnauthorizedSpeak:\n await self.bot.say(\"I don't have permissions to speak in your\"\n \" voice channel.\")\n return\n else:\n await self._join_voice_channel(voice_channel)\n else: # We are connected but not to the right channel\n if self.voice_client(server).channel != voice_channel:\n pass # TODO: Perms\n\n # Checking if playing in current server\n\n if self.is_playing(server):\n await self.bot.say(\"I'm already playing a song on this server!\")\n return # TODO: Possibly execute queue?\n\n # If not playing, spawn a downloader if it doesn't exist and begin\n # downloading the next song\n\n if self.currently_downloading(server):\n await self.bot.say(\"I'm already downloading a file!\")\n return\n\n lists = self._list_local_playlists()\n\n if not any(map(lambda l: os.path.split(l)[1] == name, lists)):\n await self.bot.say(\"Local playlist not found.\")\n return\n\n self._play_local_playlist(server, name)", "def wavplay(filename):\n\tif (os.path.isfile(filename) == False): # raise error if wrong input file\n\t\tprint(\"Input file does not exist. Make sure you computed the analysis/synthesis\")\n\telse:\n\t\tif sys.platform == \"linux\" or sys.platform == \"linux2\":\n\t\t # linux\n\t\t subprocess.call([\"aplay\", filename])\n\n\t\telif sys.platform == \"darwin\":\n\t\t\t# OS X\n\t\t\tsubprocess.call([\"afplay\", filename])\n\t\telse:\n\t\t\tprint(\"Platform not recognized\")", "def read_audio_from_path(path: str) ->Optional[TorchAudioTuple]:\n bytes_obj = get_bytes_obj_from_path(path)\n return read_audio_from_bytes_obj(bytes_obj)", "def singleMode(self):\n print(\"[b blue] Single Song Mode [/]\")\n\n pathForSong = qr.path(\n \"Select path for downloading songs:\",\n only_directories=True,\n qmark=\">>>\",\n ).ask()\n\n # os.chdir(pathForSong)\n sp.run(\n [\n \"cd\",\n f\"{pathForSong}\",\n ],\n shell=True,\n )", "async def play(self, ctx, *, query):\n player = self.bot.lavalink.players.get(ctx.guild.id)\n query = query.strip('<>')\n if player.is_connected:\n if not ctx.author.voice or not ctx.author.voice.channel or player.connected_channel.id != ctx.author.voice.channel.id:\n return await ctx.send(\"You have to be in my voice channel to queue a song :no_entry:\")\n else:\n if not ctx.author.voice or not ctx.author.voice.channel:\n return await ctx.send(\"Join a voice channel :no_entry:\")\n else:\n player.store('sessionowner', ctx.author.id)\n player.store('channel', ctx.channel.id)\n await player.connect(ctx.author.voice.channel.id)\n if not url_re.match(query):\n query = \"ytsearch:{}\".format(query)\n results = await self.bot.lavalink.get_tracks(query)\n if not results or not results['tracks']:\n return await ctx.send(\"I could not find any songs matching that query :no_entry:\")\n s=discord.Embed()\n if results[\"loadType\"] == \"PLAYLIST_LOADED\":\n tracks = results[\"tracks\"]\n for track in tracks:\n player.add(requester=ctx.author.id, track=track)\n s.description = \"Enqueued {} with **{}** tracks <:done:403285928233402378>\".format(results['playlistInfo']['name'], len(tracks))\n await self.bot.get_channel(player.fetch('channel')).send(embed=s)\n else:\n track = results[\"tracks\"][0]\n player.add(requester=ctx.author.id, track=track)\n timetill = 0\n for x in player.queue:\n timetill += x.duration\n if player.current:\n timetill += player.current.duration - player.position\n else:\n timetill = 0 \n index = [x.track for x in player.queue].index(track[\"track\"]) + 1\n s.set_author(name=\"Added to Queue\", icon_url=ctx.author.avatar_url)\n s.set_thumbnail(url=\"https://img.youtube.com/vi/{}/default.jpg\".format(track[\"info\"][\"identifier\"]))\n s.add_field(name=\"Song\", value=\"[{}]({})\".format(track[\"info\"][\"title\"], track[\"info\"][\"uri\"]), inline=False)\n s.add_field(name=\"Duration\", value=self.format_time(track[\"info\"][\"length\"]), inline=True)\n s.add_field(name=\"Position in Queue\", value=index)\n if timetill != 0:\n s.add_field(name=\"Estimated time till playing\", value=self.format_time(timetill-track[\"info\"][\"length\"]))\n else:\n s.add_field(name=\"Estimated time till playing\", value=\"Next\")\n await self.bot.get_channel(player.fetch('channel')).send(embed=s)\n if not player.is_playing:\n await player.play()", "def get_audio(path):\n return send_from_directory('audio', path)", "def play_audio(filename):\n os.system(AUDIOPLAYER + ' ' + filename)", "def play_music(music_file):\n clock = pygame.time.Clock()\n #try-catch for playing audio from MIDI file\n try:\n pygame.mixer.music.load(music_file)\n print \"Music file %s loaded!\" % music_file\n self.update()\n except pygame.error:\n print \"File %s not found! (%s)\" % (music_file, pygame.get_error())\n return\n pygame.mixer.music.play() #plays MIDI file\n self.update() #updates frame", "def PlayItem(self, item=None, type=None):\n xbmc = Server(self.url('/jsonrpc', True))\n self.logger.debug(\"Playing a file from the type \" + type)\n\n if type == 'movie':\n return xbmc.Player.Open(item={'movieid': int(item)})\n if type == 'episode':\n return xbmc.Player.Open(item={'episodeid': int(item)})\n if type == 'artist':\n return xbmc.Player.Open(item={'artistid': int(item)})\n if type == 'album':\n return xbmc.Player.Open(item={'albumid': int(item)})\n if type == 'song':\n return xbmc.Player.Open(item={'songid': int(item)})\n if type == 'channel':\n return xbmc.Player.Open(item={'channelid': int(item)})\n\n return xbmc.Player.Open(item={'file': item})", "def play_music(self):\n if self.music is not None and not self.music_playing:\n self.music_playing = True\n if self.is_running:\n cocos.audio.music.control.play()", "def play_video(path):\n # Create a playable item with a path to play.\n play_item = xbmcgui.ListItem(path=path)\n vid_url = play_item.getfilename()\n if 'WatchOnlineMovies' not in vid_url:\n stream_url = resolve_url(vid_url)\n if stream_url:\n play_item.setPath(stream_url)\n # Pass the item to the Kodi player.\n xbmcplugin.setResolvedUrl(_handle, True, listitem=play_item)", "def music():\n pygame.mixer.init()\n pygame.mixer.music.load(\"1.wav\")\n pygame.mixer.music.play(100)" ]
[ "0.79737085", "0.68196875", "0.66903174", "0.66628766", "0.65618736", "0.6544999", "0.648962", "0.64856863", "0.6419521", "0.6384046", "0.6374167", "0.633997", "0.63393456", "0.63371986", "0.63166153", "0.6316028", "0.6295789", "0.6295789", "0.62778527", "0.6240952", "0.62402165", "0.6212399", "0.6185112", "0.61553085", "0.6151835", "0.613386", "0.6110401", "0.6086122", "0.6072462", "0.60707605", "0.60612947", "0.60355866", "0.60328186", "0.6022408", "0.6014971", "0.60031354", "0.5997123", "0.5995664", "0.597556", "0.5948039", "0.59386206", "0.5936748", "0.5936222", "0.59325457", "0.5906839", "0.5882114", "0.5882114", "0.58744293", "0.58675283", "0.58566374", "0.5852647", "0.5846506", "0.5846005", "0.5828515", "0.58087504", "0.5801738", "0.5784064", "0.5772883", "0.57684076", "0.57476825", "0.57447356", "0.57330835", "0.57329947", "0.57321024", "0.5729765", "0.5720124", "0.57198554", "0.5716872", "0.5714498", "0.5711866", "0.57079935", "0.57078505", "0.56861633", "0.56760556", "0.5672775", "0.56675345", "0.5666371", "0.5664485", "0.56585616", "0.5657684", "0.5639298", "0.5639298", "0.56304526", "0.5624295", "0.56231457", "0.5619904", "0.5607685", "0.5601629", "0.5598725", "0.5597104", "0.55943537", "0.55864346", "0.55830544", "0.5579835", "0.55753976", "0.55569154", "0.5555769", "0.5555456", "0.55396193", "0.5533482" ]
0.8189756
0
Stop the current playing/paused song.
def stop_song(self): if self.isPlaying: self.playSong[0].stop() self.playSong.clear() self.isPlaying = False print("Music stopped") else: print("Play a song first...")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def _stop(self, ctx: commands.Context):\n ctx.voice_state.songs.clear()\n\n if ctx.voice_state.is_playing:\n ctx.voice_state.voice.stop()\n return await ctx.send(embed=embed_msg(description=\"🛑 Stopped the music\"))\n\n else:\n return await ctx.send('Cannot stop. Not playing any song right now.')", "def stop(self):\n if self.player and self.player.is_playing():\n self.player.pause()\n super().stop()", "def stop(self):\n if self.is_playing:\n self.is_playing = False\n self.tstamp_play = None\n self.thread.stop()", "def stop_soundtrack(self):\n SoundManager.remove_music(self.source, self)", "def stop_soundtrack(self):\n SoundManager.remove_music(self.source, self)", "async def _stop(self, ctx: commands.Context):\n\n ctx.voice_state.songs.clear()\n\n if not ctx.voice_state.is_playing:\n ctx.voice_state.voice.stop()\n await ctx.message.add_reaction('⏹')", "async def _stop(self, ctx: commands.Context):\n\n ctx.voice_state.songs.clear()\n\n if ctx.voice_state.is_playing:\n ctx.voice_state.voice.stop()\n await ctx.message.add_reaction('⏹')", "def stop():\n if playlist_box.size() > 0:\n # Displaying Status\n track_status.set(\"(Stopped)\")\n play_pause_btn.configure(image=play_img)\n track_pos_slider.configure(state=\"disabled\")\n reset_track()\n cancel_update_play_time_loop()\n cancel_track_end_event_loop()\n # Stopped Song\n # pygame.mixer.music.stop()\n # BUG Using rewind and pause instead of stop,\n # Reason: after stoping track and playing the same track,\n # an \"End of track\" event is generated, BUGGG???\n pygame.mixer.music.rewind()\n pygame.mixer.music.pause()", "def stop(self):\n self.stopped = True\n # FIXME?: Why is this not doing anything? Shouldn't it be calling into the player API?", "async def stop(self, ctx):\n server = ctx.message.server\n state = self.get_voice_state(server)\n\n if state.is_playing():\n player = state.player\n player.stop()\n\n try:\n state.speech_player.cancel()\n state.audio_player.cancel()\n del self.voice_states[server.id]\n await state.voice.disconnect()\n await self.bot.say('Stopped.')\n except:\n await self.bot.say('Couldn\\'t stop.')\n pass", "def stop_music(self):\n self.load_music(None)", "def _control_stop(self):\n self.player.stop()", "async def stop(self, ctx):\n if not await self.control_checks(ctx):\n return\n server_id = ctx.message.server.id\n srv = self.get_server_dict(server_id)\n srv['queue'].clear()\n if self.is_playing(server_id):\n srv['player'].stop()", "def stopping_sound(self):\n logger.warning('current sound play is being stopped')\n self.stop_sound = True", "async def stop_(client, event):\n player = get_player_or_abort(client, event)\n\n await player.stop()\n return 'Stopped playing'", "def stop(self):\n if logging.getLogger().getEffectiveLevel() != 10:\n try:\n self._player.terminate()\n except AttributeError as e: # Make things a bit more user friendly and allow a stop command even if not playing\n if str(e) == \"'Player' object has no attribute '_player'\":\n return\n else:\n raise AttributeError(str(e)) # Only catch the known error and raise any others to pass them through\n logging.debug(\"Stopping Playback\")", "def stop_video(self):\n\n # Enabling all the buttons, the speedCombo and the checkbox\n self.enable_btns()\n\n if self.mediaPlayer.state() == QMediaPlayer.PlayingState or self.mediaPlayer.state() == QMediaPlayer.PausedState:\n self.mediaPlayer.stop()\n else:\n pass", "def stop(self):\n self.set_state_null()\n self.player = None", "def toggle_play(self):\n\t\tif self._playing:\n\t\t\tself.stop()\n\t\t\tself._playing = False\n\t\telse:\n\t\t\tself.play()\n\t\t\tself._playing = True", "def stop_video(self):\n if self.now_playing_videoid:\n # remove the current video id from the record\n video_playing = self._video_library.get_video(self.now_playing_videoid)\n print(f\"Stopping video: {video_playing.title}\")\n self.now_playing_videoid = ''\n self.pause = False\n else: \n print(f\"Cannot stop video: No video is currently playing\")\n\n # print(\"stop_video needs implementation\")", "def stop_video(self):\n if self._current_video is None:\n print(\"Cannot stop video: No video is currently playing\")\n return\n print(f\"Stopping video: {self._current_video.title}\")\n self._current_video = None\n self._paused = False", "async def stop(self, ctx):\n voice = discord.utils.get(self.bot.voice_clients, guild=ctx.guild)\n\n if voice:\n if voice.is_playing():\n voice.stop()\n await ctx.send(\"Playing stopped.\") \n else:\n await ctx.message.add_reaction('\\U0001F615')\n await ctx.send(\"Not playing anything right now.\")\n else:\n await ctx.message.add_reaction('\\U0001F615')\n await ctx.send(\"Not in a voice channel.\")", "def pause_song(self):\r\n if self.isPlaying:\r\n self.playSong[0].pause()\r\n print(\"Song paused. To continue type Play.\")\r\n else:\r\n print(\"Play a song first...\")", "def stop(self,event=None):\r\n # If no video data\r\n if self.isEmpty():\r\n return\r\n if self.hasAudio:\r\n mixer.music.stop()\r\n self.state = VideoPlayer.State.STOPPED\r\n self.progress = 0\r\n self.startTimestamp = time.time()", "def stop_state(self):\n\n QMetaObject.invokeMethod(self.video_player, \"stop\", Qt.QueuedConnection)\n self.video_playing = False", "async def stop(self, ctx: commands.Context) -> None:\n player = self.get_player(ctx.guild)\n\n if not player.queue.empty:\n await player.stop()\n player.queue.clear_queue()\n embed = Embeds.status(\n success=True, desc=\"Playback stopped and queue cleared.\"\n )\n await ctx.send(embed=embed)", "async def stop(self):\n _LOGGER.debug(f\"[Foobar2k] In Stop. Current state is [{self._state}]\")\n if (self._power == POWER_ON and self._state == STATE_PLAYING):\n await self.prep_fetch(HTTP_POST, POST_PLAYER_STOP, data=None)\n self._state = STATE_STOPPED\n _LOGGER.debug(f\"[Foobar2k] State now is [{self._state}]\")", "def stop_video(self):\n if self.is_playing:\n print(f\"Stopping video: {self.playing_now}\")\n self.is_playing = False\n else:\n print(\"Cannot stop video: No video is currently playing\")", "def StopTrack(self):\n handler = self.get_command_object(\"StopTrack\")\n handler()", "async def stop(self):\n await self._bot.lavalink.ws.send(op='stop', guildId=self.guild_id)\n self.current = None", "def stop_video(self):\n\n if self.current_video is None:\n print(\"Cannot stop video: No video is currently playing\")\n else:\n print(\"Stopping video:\", self.current_video.title)\n self.current_video = None", "def pause(self):\n if not self.paused:\n pygame.mixer.music.pause()\n self.paused = True\n else:\n pygame.mixer.music.unpause()\n self.paused = False", "def force_stop(self):\n self.timer.stop()\n QMetaObject.invokeMethod(self.video_player, \"stop\", Qt.QueuedConnection)\n self.video_playing = False\n self.stopped = True", "async def stop(self, msg):\n if msg.voice_client is None:\n return await msg.send(\"Bot is not connect to a voice channel\")\n\n if msg.author.voice is None:\n return await msg.send(\"You must be in the same voice channel as the bot\")\n\n if msg.author.voice is not None and msg.voice_client is not None:\n if msg.voice_client.is_playing() is True or self.player[msg.guild.id]['queue']:\n self.player[msg.guild.id]['queue'].clear()\n self.player[msg.guild.id]['repeat'] = False\n msg.voice_client.stop()\n return await msg.message.add_reaction(emoji='✅')\n\n return await msg.send(f\"**{msg.author.display_name}, there is no audio currently playing or songs in queue**\")", "def stop( self ):\n self.data_source.si.daqStop()\n self.timer.stop()\n \n #re-enable the play button\n self.play_button.setEnabled(True)\n self.stop_button.setEnabled(False)\n self.spinbox_timestep.setEnabled(True)", "def stop_current_episode(self):\n raise NotImplementedError", "async def set_stopped(self, value: bool):\n await self._pytheos.api.player.set_play_state(self.id, models.player.PlayState.Stopped if value else models.player.PlayState.Playing)", "def unpause(self):\n self.paused = False", "async def stop(self, ctx):\n\n await ctx.voice_client.disconnect()", "async def stop(self, ctx):\n\n await ctx.voice_client.disconnect()", "def pushbutton_stop_clicked(self):\n\n if self.frame_player.run_player:\n self.frame_player.run_player = False", "def stop_alarm_ringtone(self):\n mixer.stop()\n mixer.quit()", "async def stop(ctx):\r\n\r\n await ctx.voice_client.disconnect()", "async def stop(self):\n await self.node._send(op='stop', guildId=self.guild_id)\n self.current = None", "def media_play_pause(self) -> None:\n if self.state == MediaPlayerState.PLAYING:\n self.media_pause()\n else:\n self.media_play()", "async def _pause(self, ctx: commands.Context):\n try:\n if ctx.voice_state.voice.is_playing:\n ctx.voice_state.voice.pause()\n await ctx.message.add_reaction('⏯')\n\n except AttributeError:\n await ctx.send(\"Can't pause. No song is being played!\")", "def stop(self, **kwargs):\n self.turn_off()", "def stop(self):\n # Set a flag to stop audio and clear the queue.\n self.stop_and_clear_queue = True\n # Unblock the _wait_for_player_to_complete()\n # in the \"run\" method.\n self.audio_finished_event.set()\n # It's possible that immediately after the audio\n # finished event is set, the event cleared in \"run\"\n # thread inside of the _do_send method. To synchronize\n # the shutdown, send an empty test string to the\n # queue and then set the audio finished event again.\n self.text_queue.put('')\n self.audio_finished_event.set()\n # Wait for the text queue to become empty.\n self.text_queue.join()\n # Enable text processing again.\n self.stop_and_clear_queue = False", "def stop(self, **kwargs):\n return self.client.api.stop(self.id, **kwargs)", "def pause_play(self):\n\n if self.estado == gst.STATE_PAUSED \\\n or self.estado == gst.STATE_NULL \\\n or self.estado == gst.STATE_READY:\n self.__play()\n\n elif self.estado == gst.STATE_PLAYING:\n self.__pause()", "def stop_recording():\n do_command('PlayStop')\n print('Stopped')", "async def stop(self, ctx):\n if ctx.voice_client != None:\n await ctx.voice_client.disconnect()", "def stop(self):\n self.stream.stop()\n self.running = False", "def stop(self):\n GameEngine().stop()\n self.on_stop()", "def cancel_stop(cls):\n cls._set_mode_running()", "def on_stop(self):\n self.songs.save_songs(FILE_NAME)", "def stop(self,event=None):\r\n if self.controlLock.locked():\r\n return\r\n self.controlLock.acquire()\r\n self.videoPlayer.stop()\r\n self.videoPlayer.updateDataplayers()\r\n self.controlLock.release()", "def stop_video(self):\n stopped_video = self.video_id\n if stopped_video == None:\n print(\"Cannot stop video: No video is currently playing\")\n else:\n print(f\"Stopping video: {stopped_video}\")", "async def _pause(self, ctx: commands.Context):\n\n if ctx.voice_state.is_playing and ctx.voice_state.voice.is_playing():\n ctx.voice_state.voice.pause()\n await ctx.message.add_reaction('⏯')", "async def _pause(self, ctx: commands.Context):\n\n if not ctx.voice_state.is_playing and ctx.voice_state.voice.is_playing():\n ctx.voice_state.voice.pause()\n await ctx.message.add_reaction('⏯')", "def remove_song(self):\n self.stop()\n self.listbox.delete(\"anchor\")\n pygame.mixer.music.stop()", "def force_unpause(self):\n self.timer.start(self.timer_interval)\n QMetaObject.invokeMethod(self.video_player, \"play\", Qt.QueuedConnection)\n self.paused = False\n\n # Re-enable video buttons\n self.update.workerUnpaused.emit()", "def remove_songs(self):\n self.stop()\n self.listbox.delete(0, \"end\")\n pygame.mixer.music.stop()", "def stop(self) -> None:\n self._running = False", "def stop(self):\n self._log.info(\"Stopping\")\n self._running.clear()", "def on_stop(self):\n self.song_list.save_songs()", "def stop(self):\n self._running = False", "def stop(self):\n self._running = False", "def stop(self):\n\n self.keep_running = False", "def stop(self):\n self._schedule(0, 0)\n self._started = False", "def media_play_pause(self):\n if self._state == STATE_PLAYING:\n self._state = STATE_PAUSED\n else:\n self._state = STATE_PLAYING", "def stop(self):\n self.running = False", "def stop(self):\n self.running = False", "def stop(self):\n self.running = False", "def stop(self):\n self.running = False", "def stop(self):\n self.running = False", "def stopSound(self):\r\n self._shipsound = None\r\n self._aliensound = None\r\n self._shipexplode = None\r\n self._alienexplode = None", "def stopGame(event):\n if event.action == sense_hat.ACTION_RELEASED:\n global playAgain, alive\n playAgain = False\n alive = False", "def stop(self):\n self.__running = False", "def stop(self):\r\n self.running = False", "def stop(self):\r\n self.running = False", "def _control_pause(self):\n self.player.pause()", "def stop(self):\n with self.stop_lock:\n self._stop_event.set()", "def set_stop(self, track, xclip, ident, value = None):\n if track in self.song().tracks:\n track.stop_all_clips()", "def pause(self,event=None):\r\n # If pause -> pause or stop -> pause, ignore, or if no video\r\n if not self.isPlaying():\r\n return\r\n # If play -> pause\r\n self.progress = time.time() - self.startTimestamp\r\n if self.hasAudio:\r\n mixer.music.pause()\r\n self.state = VideoPlayer.State.PAUSED", "def pause_play(self):\n\n try:\n if self.entrada:\n if self.estado == \"playing\": # pausa\n self.__pause()\n\n elif self.estado == \"paused\":\n self.__pause(True)\n self.estado = \"playing\"\n self.emit(\"estado\", \"playing\")\n\n else:\n #if self.uri: self.load(self.uri)\n pass\n\n except Exception, e:\n print \"HA OCURRIDO UN ERROR EN PAUSE_PLAY DEL REPRODUCTOR\", e", "async def _resume(self, ctx: commands.Context):\n try:\n if ctx.voice_state.voice.is_paused:\n ctx.voice_state.voice.resume()\n await ctx.message.add_reaction('⏯')\n\n else:\n await ctx.send(\"No music paused!\")\n\n except AttributeError:\n await ctx.send(\"Can't pause. No song is being played!\")", "def turn_off(self) -> None:\n self._media_title = None\n self._state = self._player.turn_off()", "def stop(self, /, noerror=False):\n\t\tif not self in _running:\n\t\t\tif noerror: return\n\t\t\traise RuntimeError('Not running')\n\t\t_running.remove(self)\n\t\t_anim_stopped(self)", "async def stop(self):\n await self.pause()\n return await self.send_command_and_read_reply(\n Protocol1Command(command=\"\", execution_command=\"V\")\n )", "def close_play_lock(self) : \n self.play_lock = True", "def nothing_playing(self):\n self.state.set_active_player(None)", "async def next(self, ctx):\n if not await self.control_checks(ctx):\n return\n server_id = ctx.message.server.id\n if self.is_playing(server_id):\n self.get_server_dict(server_id)['player'].stop()\n else:\n await self._play(ctx.bot, server_id)", "def stop(self):\n\n self.active = False", "def stop(self):\n self.scion_sh('stop')", "def _stop(self):\n self._pi.stop()", "def pause(self):\n self.paused = True\n # FIXME?: Why is this not doing anything? Shouldn't it be calling into the player API?", "def pause_music():\n from Functions import Menu\n if Menu.music == False:\n pygame.mixer.music.unpause()\n Menu.music = True\n else:\n pygame.mixer.music.pause()\n Menu.music = False", "def on_worker_unpaused(self):\n self.playing = True\n self.enable_video_buttons(False, True, True)\n self.unpausing = False", "def stop_media(self):\n self.stdin_queue.put(\"stop\")" ]
[ "0.7680431", "0.76113164", "0.759389", "0.75338066", "0.75338066", "0.7464192", "0.74512273", "0.7417875", "0.7278527", "0.71976995", "0.71916264", "0.7135208", "0.710452", "0.69504833", "0.6907551", "0.68905944", "0.6856153", "0.6830112", "0.68043816", "0.6791049", "0.67570907", "0.6738051", "0.6732128", "0.6717595", "0.6671323", "0.66347104", "0.6624268", "0.6592487", "0.65596366", "0.6428721", "0.64206827", "0.64063627", "0.63992524", "0.6392939", "0.63928175", "0.6373799", "0.6361728", "0.6337174", "0.6310268", "0.6310268", "0.62928295", "0.6285398", "0.62394667", "0.6233348", "0.6212076", "0.61594546", "0.6152093", "0.6151737", "0.61477286", "0.6146245", "0.6133663", "0.6105896", "0.6096718", "0.607096", "0.6061252", "0.60470325", "0.60443294", "0.60359716", "0.6016974", "0.6014777", "0.60032046", "0.5996048", "0.59546626", "0.5948665", "0.59432364", "0.59297293", "0.5927047", "0.5927047", "0.5926972", "0.5908634", "0.5907065", "0.59051347", "0.59051347", "0.59051347", "0.59051347", "0.59051347", "0.59017456", "0.5895516", "0.5889531", "0.588804", "0.588804", "0.5887901", "0.58843", "0.5875571", "0.5856109", "0.58508396", "0.5847207", "0.58394974", "0.58364666", "0.58241093", "0.5816452", "0.5811364", "0.58084124", "0.5805851", "0.5798702", "0.57961655", "0.57953155", "0.57916003", "0.578649", "0.57862854" ]
0.81801236
0
Pause the current playing song.
def pause_song(self): if self.isPlaying: self.playSong[0].pause() print("Song paused. To continue type Play.") else: print("Play a song first...")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pause(self):\n self.paused = True\n # FIXME?: Why is this not doing anything? Shouldn't it be calling into the player API?", "def pause(self):\n if not self.paused:\n pygame.mixer.music.pause()\n self.paused = True\n else:\n pygame.mixer.music.unpause()\n self.paused = False", "async def _pause(self, ctx: commands.Context):\n try:\n if ctx.voice_state.voice.is_playing:\n ctx.voice_state.voice.pause()\n await ctx.message.add_reaction('⏯')\n\n except AttributeError:\n await ctx.send(\"Can't pause. No song is being played!\")", "def pause_play(self):\n\n if self.estado == gst.STATE_PAUSED \\\n or self.estado == gst.STATE_NULL \\\n or self.estado == gst.STATE_READY:\n self.__play()\n\n elif self.estado == gst.STATE_PLAYING:\n self.__pause()", "def media_play_pause(self) -> None:\n if self.state == MediaPlayerState.PLAYING:\n self.media_pause()\n else:\n self.media_play()", "def pause(self):\n if self.status()['state'] == \"playing\":\n self.toggle_pause()", "def pause(self):\n spotifyconnect.Error.maybe_raise(lib.SpPlaybackPause())", "def media_play_pause(self):\n if self._state == STATE_PLAYING:\n self._state = STATE_PAUSED\n else:\n self._state = STATE_PLAYING", "async def pause(self, ctx):\n state = self.get_voice_state(ctx.message.server)\n if state.is_playing():\n player = state.player\n player.pause()\n await self.bot.say('Paused.')", "def pause(self,event=None):\r\n # If pause -> pause or stop -> pause, ignore, or if no video\r\n if not self.isPlaying():\r\n return\r\n # If play -> pause\r\n self.progress = time.time() - self.startTimestamp\r\n if self.hasAudio:\r\n mixer.music.pause()\r\n self.state = VideoPlayer.State.PAUSED", "def _control_pause(self):\n self.player.pause()", "async def _pause(self, ctx: commands.Context):\n\n if ctx.voice_state.is_playing and ctx.voice_state.voice.is_playing():\n ctx.voice_state.voice.pause()\n await ctx.message.add_reaction('⏯')", "async def _pause(self, ctx: commands.Context):\n\n if not ctx.voice_state.is_playing and ctx.voice_state.voice.is_playing():\n ctx.voice_state.voice.pause()\n await ctx.message.add_reaction('⏯')", "def pause(self):\n self.paused_time = time.time()\n self.paused = True", "def media_pause(self) -> None:\n self._attr_state = MediaPlayerState.PAUSED\n self._client.pause()", "def media_pause(self):\n self._state = STATE_PAUSED", "def media_pause(self):\n self._state = STATE_PAUSED", "async def async_media_pause(self):\n if not self._slave_mode:\n if self._playing_stream and not (self._playing_mediabrowser or self._playing_mass):\n # Pausing a live stream will cause a buffer overrun in hardware. Stop is the correct procedure in this case.\n # If the stream is configured as an input source, when pressing Play after this, it will be started again (using self._prev_source).\n await self.async_media_stop()\n return\n\n value = await self.async_call_linkplay_httpapi(\"setPlayerCmd:pause\", None)\n if value == \"OK\":\n self._position_updated_at = utcnow()\n self._idletime_updated_at = self._position_updated_at\n if self._playing_spotify:\n self._spotify_paused_at = utcnow()\n self._state = STATE_PAUSED\n if self._slave_list is not None:\n for slave in self._slave_list:\n await slave.async_set_state(self._state)\n await slave.async_set_position_updated_at(self.media_position_updated_at)\n# self.async_schedule_update_ha_state(True)\n else:\n _LOGGER.warning(\"Failed to pause playback. Device: %s, Got response: %s\", self.entity_id, value)\n else:\n await self._master.async_media_pause()", "async def pause(self, ctx):\n if not await self.control_checks(ctx):\n return\n server_id = ctx.message.server.id\n srv = self.get_server_dict(server_id)\n if self.is_playing(server_id):\n srv['player'].pause()\n else:\n srv['player'].resume()", "def play_pause(self):\n return self._call_player_proxy('PlayPause', None)", "def __pause(self):\n\n self.set_state(gst.STATE_PAUSED)", "def pause_play(self):\n\n try:\n if self.entrada:\n if self.estado == \"playing\": # pausa\n self.__pause()\n\n elif self.estado == \"paused\":\n self.__pause(True)\n self.estado = \"playing\"\n self.emit(\"estado\", \"playing\")\n\n else:\n #if self.uri: self.load(self.uri)\n pass\n\n except Exception, e:\n print \"HA OCURRIDO UN ERROR EN PAUSE_PLAY DEL REPRODUCTOR\", e", "async def toggle_play_pause(self):\n _LOGGER.debug(\"[Foobar2k] In Play / Pause\")\n if (self._power == POWER_ON):\n if (self._state == STATE_STOPPED):\n await self.prep_fetch(HTTP_POST, POST_PLAYER_PLAY_PLAYLIST.format(self._current_playlist_id, self._current_index), data=None)\n else: \n await self.prep_fetch(HTTP_POST, POST_PLAYER_PAUSE_TOGGLE, data=None)", "async def pause(self, msg):\n if msg.author.voice is not None and msg.voice_client is not None:\n if msg.voice_client.is_paused() is True:\n return await msg.send(\"Song is already paused\")\n\n if msg.voice_client.is_paused() is False:\n msg.voice_client.pause()\n await msg.message.add_reaction(emoji='✅')", "def pause(self):\n self.block.mobile = not self.block.mobile\n if not self.paused:\n self.paused = True\n # Also print paused message\n self.screen.print(\"PAUSED\")\n else:\n self.paused = False\n self.screen.print(\"\")\n # Also reset tick time\n self.t = time.time()", "async def play_pause(self) -> None:\n return await self.relay(\"play_pause\")()", "async def pause(self, ctx):\n voice = discord.utils.get(self.bot.voice_clients, guild=ctx.guild)\n\n if voice:\n if voice.is_playing():\n voice.pause()\n await ctx.send(\"Playing paused.\")\n else:\n await ctx.message.add_reaction('\\U0001F615');\n await ctx.send(\"Not playing anything right now.\")\n else:\n await ctx.message.add_reaction('\\U0001F615')\n await ctx.send(\"Not in a voice channel.\")", "def _toggle_paused(self, paused=None):\n #automatically start the first wave\n if self._wave == 0:\n self.next_wave()\n\n if paused is None:\n paused = not self._paused\n\n #Task 1.5 (Play Controls): Reconfigure the pause button here\n \n if paused:\n self.pause()\n self._play_button_text.set(\"play\")\n else:\n self.start()\n self._play_button_text.set(\"pause\")\n\n self._paused = paused", "async def set_paused(self, value: bool):\n await self._pytheos.api.player.set_play_state(self.id, models.player.PlayState.Paused if value else models.player.PlayState.Playing)", "def pause(self):\n self.sendCommand(\"pause\")", "async def pause(self, ctx):\n player = self.bot.lavalink.players.get(ctx.guild.id)\n if not player.is_connected:\n return await ctx.send(\"I'm not connected to a voice channel :no_entry:\")\n if not player.is_playing:\n return await ctx.send(\"Nothing is currently playing :no_entry:\")\n if player.paused:\n await player.set_pause(False)\n await ctx.send(\"Resumed.\")\n else:\n await player.set_pause(True)\n await ctx.send(\"Paused.\")", "def pause(self):\n self._cleanup()\n self._paused = True", "def pause_video(self):\n if self.now_playing_videoid:\n video_playing = self._video_library.get_video(self.now_playing_videoid)\n if self.pause == True:\n print(f\"Video already paused: {video_playing.title}\")\n else:\n print(f\"Pausing video: {video_playing.title}\")\n self.pause = True\n \n else: \n print(f\"Cannot pause video: No video is currently playing\")\n\n # print(\"pause_video needs implementation\")", "def togglePause(self):\n self.model.paused = not self.model.paused\n self.proc.send_signal(signal.SIGUSR1)", "async def pause(self, ctx: commands.Context) -> Optional[bool]:\n\n if ctx.voice_client.is_paused():\n await self.call_event(\n \"on_music_error\", ctx, AlreadyPaused(\"Player is already paused.\")\n )\n return\n\n if self.type == ManagerType.LAVALINK:\n await ctx.voice_client.set_pause(pause=True)\n else:\n (await self.now_playing(ctx)).last_pause_timestamp = time.time()\n ctx.voice_client.pause()\n\n create_task(self.bot.loop, self.ensure_activity(ctx))\n return True", "def pause(self):\n return self.client.api.pause(self.id)", "def pause_video(self):\n\n if self.is_playing and self.is_paused is False:\n print(f\"Pausing video: {self.playing_now}\")\n self.is_paused = True\n elif self.is_paused:\n print(f\"Video already paused: {self.playing_now}\")\n elif self.is_playing is False:\n print(\"Cannot pause video: No video is currently playing\")", "def on_pause(self):\r\n store = get_store()\r\n store.put(\"pause\", value=self.sm.current)\r\n return True", "def pause_video(self):\n if self._paused:\n print(f\"Video already paused: {self._current_video.title}\")\n return\n elif self._current_video is None:\n print(\"Cannot pause video: No video is currently playing\")\n return\n print(f\"Pausing video: {self._current_video.title}\")\n self._paused = True", "def pause(self):\n while 1:\n if self.is_paused:\n time.sleep(1)\n else:\n break", "async def async_media_pause(self) -> None:\n await self._projector.send_command(PAUSE)", "def toggle_play(self):\n\t\tif self._playing:\n\t\t\tself.stop()\n\t\t\tself._playing = False\n\t\telse:\n\t\t\tself.play()\n\t\t\tself._playing = True", "def pause(self) :\n raise NotImplementedError(\"pause not implemented\")", "def pause(self, _):\n if not self.is_ended:\n self.canvas.create_text(self.game.width // 2,\n self.game.height // 2,\n text=\"Paused\",\n font=(Game.FONT, 50),\n fill=Game.TEXT_COLOUR,\n tag='pause_text')\n self.game.running = not self.game.running\n if self.game.running:\n self.canvas.delete('pause_text')", "def auto_play_pause(self):\r\n if ActiveCheck.not_active():\r\n return\r\n \r\n if not self.playing:\r\n return # Suppress activity\r\n\r\n player = self.get_player()\r\n if not player.auto:\r\n return\r\n self.auto_delay_waiting = True\r\n pause = player.pause\r\n if self.speed_step >= 0:\r\n pause = self.speed_step\r\n delay_ms = int(pause*1000)\r\n self.mw.after(delay_ms)\r\n return", "def pause(self):\n \n self.pause = True", "def pause_video(self):\n if self.current_video is None:\n print(\"Cannot pause video: No video is currently playing\")\n elif self.current_paused is False:\n print(\"Pausing video:\", self.current_video.title)\n self.current_paused = True\n elif self.current_paused:\n print(\"Video already paused:\", self.current_video.title)", "async def set_pause(self, pause: bool):\n await self._bot.lavalink.ws.send(op='pause', guildId=self.guild_id, pause=pause)\n self.paused = pause", "def update(self, song: int) -> None:\n if 0 <= song < len(self.sounds):\n self.sounds[song].play()", "def pause_video(self):\n if self.is_paused() is not None:\n print(\"Video already paused: {}\".format(self.is_paused()._title))\n elif self.is_playing() is not None:\n print(\"Pausing video: {}\".format(self.is_playing()._title))\n self.is_playing()._status = 2\n else:\n print(\"Cannot pause video: No video is currently playing\")", "async def _resume(self, ctx: commands.Context):\n try:\n if ctx.voice_state.voice.is_paused:\n ctx.voice_state.voice.resume()\n await ctx.message.add_reaction('⏯')\n\n else:\n await ctx.send(\"No music paused!\")\n\n except AttributeError:\n await ctx.send(\"Can't pause. No song is being played!\")", "def play(self):\n self.playing = True\n # FIXME?: Why is this not doing anything? Shouldn't it be calling into the player API?", "def pause_video(self):\n current_video = self._video_library.get_video()\n if self.title == current_video.title:\n print(f\"Pausing video: {self.title}\")\n elif current_video == None:\n print(f\"Cannot pause video: No video is currently playing\")\n else:\n print(f\"Video already paused: {self.title}\")", "def on_pause(self, event):\n self.pre_check(event)\n if not self.get_player(event.guild.id).paused:\n self.get_player(event.guild.id).pause()", "def pause(self):\r\n cmd = MsgHelper.createMessage(Messages.CMD_PAUSE)\r\n self.mailbox.push( cmd, high_priority = True )", "def force_pause(self):\n self.timer.stop()\n QMetaObject.invokeMethod(self.video_player, \"pause\", Qt.QueuedConnection)\n self.paused = True\n\n # Re-enable video buttons\n self.update.workerPaused.emit()", "async def play(self):\n if self.repeat and self.current is not None:\n self.queue.append(self.current)\n\n self.current = None\n self.position = 0\n self.paused = False\n\n if not self.queue:\n await self.stop()\n await self._bot.lavalink.client._trigger_event('QueueEndEvent', self.guild_id)\n else:\n if self.shuffle:\n track = self.queue.pop(randrange(len(self.queue)))\n else:\n track = self.queue.pop(0)\n\n self.current = track\n await self._bot.lavalink.ws.send(op='play', guildId=self.guild_id, track=track.track)\n await self._bot.lavalink.client._trigger_event('TrackStartEvent', self.guild_id)", "def __pause(self, reset=False):\n\n self.entrada.write('pause 0\\n')\n self.entrada.flush()\n self.__new_handle(reset)\n self.estado = \"paused\"\n self.emit(\"estado\", \"paused\")", "def pause(self) -> None:\n self.system.notify(\"Jarvis::Paused\")\n self.media.pause()", "def pause_music():\n from Functions import Menu\n if Menu.music == False:\n pygame.mixer.music.unpause()\n Menu.music = True\n else:\n pygame.mixer.music.pause()\n Menu.music = False", "def media_play_pause(hass, entity_id=None):\n data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}\n\n hass.services.call(DOMAIN, SERVICE_MEDIA_PLAY_PAUSE, data)", "def pause_music(sid):\n try:\n # Get the host data from the database\n db = sqlite3.connect('/home/tropius/TROPIUS/TROPIUS.db')\n host = hosts.get_detail(db, sid)\n spotify.pause(host['ip'])\n return jsonify({})\n except:\n abort(400)", "def PAUSED(self):\n self.pause_state = self.get_state() # the state FSM was in before 'op-pause' was called\n self.continue_state = self.pause_state\n self.update_status(self.STATES.PAUSED)", "def _pause(self):\n data_paused = None\n while self.target.is_active and data_paused != '01':\n data_paused = self._mem_read(self.data['paused'][0], 1)\n time.sleep(self.pause_time)\n self.data['paused'][1] = data_paused\n return", "def pause(self, enabled=True):\n if self._dev_name is None:\n raise SpeakerError\n\n #self._device.pause(enabled) that statement throws an exception\n self._paused = enabled", "def pause(self):\n raise NotImplementedError()", "def pause(self):\n raise NotImplementedError()", "def play_music(self):\n song_index = -1\n if self.num_songs == 0:\n sys.stdout.write(\"No songs found\\n\")\n sys.exit(0)\n \n # FIXME: spacebar/pause is an mplayer-specific command\n sys.stdout.write(\"Press spacebar to pause songs\\n\")\n sys.stdout.write(\"Press ctrl+c once to skip a song\\n\")\n sys.stdout.write(\"Hold ctrl+c to exit\\n\")\n sys.stdout.write(\"%d files found.\\n\" % self.num_songs)\n while True:\n try:\n song_index = self._get_song_index(song_index)\n if song_index == None:\n sys.exit(0)\n song = self.songs[song_index]\n sys.stdout.write(\"%s\\n\" % song)\n \n # Disabled the following as it got pretty annoying seeing a \n # torrent of notifications for non-music files (mplayer \n # gracefully skips these). \n #try:\n # notify_cmd=\"notify-send -t 1000 '%s'\" % \\\n # song.split(\"/\")[-1]\n # subprocess.check_call(notify_cmd, shell=True)\n #except:\n # pass\n #FIXME: escape quotes in songs\n play_cmd = '\"%s\" \"%s\" > /dev/null 2>&1 ' % \\\n (self.music_client, song) \n subprocess.check_call(play_cmd, shell=True)\n except KeyboardInterrupt:\n try:\n # HACK to allow repeated ctrl+c to exit outright\n time.sleep(0.1) \n except KeyboardInterrupt:\n sys.stderr.write(\"\\nExiting...\\n\")\n sys.exit(0)", "def paused():\n pause_time = time()\n cache.set('paused', pause_time)\n socketio.emit('paused', pause_time)", "def pause(self):\n pass", "def pause(self):\n pass", "def pause(cls):\n\n cls._set_mode_paused()\n TimeDisplay.stop_time()\n for callback in cls.pause_callback:\n callback()", "def pause(self,event=None):\r\n if self.controlLock.locked():\r\n return\r\n self.controlLock.acquire()\r\n self.videoPlayer.pause()\r\n for dp in self.dataPlayers:\r\n dp.update(self.videoPlayer.startTimestamp)\r\n self.controlLock.release()", "def media_play(self):\n self._state = STATE_PLAYING", "def media_play(self):\n self._state = STATE_PLAYING", "def toggle_pause(self, sender):\n if self.pause:\n self.app.title = 'checking'\n sender.title = 'pause'\n self.pause = False\n else:\n self.app.title = 'paused'\n sender.title = 'start'\n self.pause = True", "def unpause(self):\n self.paused = False", "def pause(self) -> None:\n self._running.clear()", "def set_pause(self, pause):\n\n game_status = self.game.get_game_status();\n if(game_status == GameStatus.NotStarted or game_status == GameStatus.Finished):\n return;\n\n if(pause == True):\n self.game.set_game_status(GameStatus.Paused);\n self.bttn_pause.set_text(\"Reprendre la partie\");\n\n self.game.stop_timer();\n\n elif(pause == False):\n self.game.set_game_status(GameStatus.InProgress);\n self.bttn_pause.set_text(\"Mettre en pause\");\n\n self.game.start_timer();", "async def set_playing(self, value: bool):\n await self._pytheos.api.player.set_play_state(self.id, models.player.PlayState.Playing if value else models.player.PlayState.Stopped)", "def play_music(self):\n if self.music is not None and not self.music_playing:\n self.music_playing = True\n if self.is_running:\n cocos.audio.music.control.play()", "def stop_song(self):\r\n if self.isPlaying:\r\n self.playSong[0].stop()\r\n self.playSong.clear()\r\n self.isPlaying = False\r\n print(\"Music stopped\")\r\n else:\r\n print(\"Play a song first...\")", "async def async_media_pause(self) -> None:\n if self._state.get(\"trackType\") == \"webradio\":\n await self._volumio.stop()\n else:\n await self._volumio.pause()", "def pause(self):\n if self._pause:\n self._pause = False\n else:\n self._pause = True\n self.step() # trigger the next step", "def start_pause(self, **kwargs):\n if self.is_on:\n self.turn_off()\n else:\n self.turn_on()", "def play_audio(self):\n if not self.voice.get_busy():\n self.voice.play(self.sound)\n else:\n pass", "def pause(self):\n\t\tpass", "def toggle_next(self):\n self.bot.loop.call_soon_threadsafe(self.play_next_song.set)", "def music_playing(\n self, paused, artist, album, title, genre, total_time, position, **kwargs\n ):\n fields = {\n \"playback_state\": PlaybackState.Paused if paused else PlaybackState.Playing,\n \"playback_rate\": 0.0 if paused else 1.0,\n \"artist\": artist,\n \"album\": album,\n \"title\": title,\n \"genre\": genre,\n \"total_time\": total_time,\n \"position\": position,\n \"media_type\": protobuf.ContentItemMetadata.Music,\n }\n fields.update(kwargs)\n self.state.set_player_state(PLAYER_IDENTIFIER, PlayingState(**fields))\n self.state.set_active_player(PLAYER_IDENTIFIER)", "def pause(self):\n self._event.clear()", "def play(self):\n if self._get_state() == Gst.State.PAUSED:\n self.pipeline.set_state(Gst.State.PLAYING)\n self.playing = True", "def media_pause(hass, entity_id=None):\n data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}\n\n hass.services.call(DOMAIN, SERVICE_MEDIA_PAUSE, data)", "def set_paused(self, progress=None):\n if progress is not None:\n assert 0 <= progress < 100\n self._progress = int(progress)\n self.logger.info(\"status: PAUSED %d%%\", self._progress)\n self._callback('on_paused', self._progress)\n return self.update_response(self.encoder.encode_paused(self._progress))", "def stop(self):\n if self.player and self.player.is_playing():\n self.player.pause()\n super().stop()", "def test_pause(self):\n source = procedural.WhiteNoise(0.5)\n player = media.Player()\n player.queue(source)\n player.play()\n start_time = time.time()\n\n stage = 0\n while player.source:\n if stage == 0 and time.time() - start_time > 0.25:\n player.pause()\n stage = 1\n if stage == 1 and time.time() - start_time > 0.75:\n player.play()\n stage = 2\n player.dispatch_events()", "def toggle_pause(self) -> None:\n self.keyboard.press(Key.space)\n self.keyboard.release(Key.space)", "def music_playing(self, paused, artist, album, title,\n total_time, position):\n self.device.responses['playing'] = PlayingResponse(\n paused=paused, title=title,\n artist=artist, album=album,\n total_time=total_time,\n position=position,\n mediakind=2)", "def pause_workflow(self):\n with self._driver.session() as session:\n session.write_transaction(tx.set_workflow_state, state='PAUSED')", "def pause(self, state):\n resp = yield from self.command('pause '+str(state))\n return True", "async def playing(self, ctx):\n\n player = self.player\n\n if player.current is None:\n await self.bot.say('Not playing anything.')\n else:\n await self.bot.say('Now playing {} :'.format(player.current))" ]
[ "0.7713319", "0.74910986", "0.74175787", "0.7405902", "0.7326798", "0.72035414", "0.71642816", "0.71472704", "0.7133736", "0.7114518", "0.70905143", "0.7087032", "0.7062801", "0.70473045", "0.70375997", "0.70167387", "0.70167387", "0.6942558", "0.6881331", "0.6881242", "0.68398225", "0.68370444", "0.6831393", "0.68272746", "0.6692693", "0.6680627", "0.66648364", "0.658427", "0.65313506", "0.6499516", "0.6473457", "0.6461974", "0.6439017", "0.6426666", "0.6425218", "0.63993394", "0.63904506", "0.6378717", "0.6352555", "0.63356346", "0.63192546", "0.62901723", "0.62292945", "0.62210995", "0.62178594", "0.62078744", "0.6188755", "0.61546814", "0.61376977", "0.61146694", "0.6098795", "0.6092021", "0.60839957", "0.6069915", "0.6051431", "0.6042442", "0.60409915", "0.60262257", "0.6024723", "0.6021841", "0.60211873", "0.599756", "0.5984016", "0.59837365", "0.5943365", "0.59379065", "0.59379065", "0.5925522", "0.59112936", "0.5903854", "0.5903854", "0.59008163", "0.5897093", "0.58891606", "0.58891606", "0.58586", "0.5830143", "0.58207417", "0.58194524", "0.5814641", "0.5808378", "0.5797429", "0.57964545", "0.57908493", "0.5777329", "0.57748896", "0.57493144", "0.57179844", "0.5715453", "0.5711878", "0.57110304", "0.56963223", "0.568626", "0.5671668", "0.56455165", "0.56405103", "0.5638523", "0.5637033", "0.56312084", "0.5623742" ]
0.82099915
0
Add song to the storage directory and to the database. Return ID of the new song / error message.
def add_song(self): path = input("Give file path:\t") # Request file path path = path.replace('\\', '/') if self.path_song_re.match(path) and not self.path_storage_re.match( path): # Check that the path leads to a song that is not already found in Storage copy(path, self.p_storage) # Copy the song to the storage directory file_title, form = path.split("/")[-1].split(".") # Save file title and format from the path sql = "SELECT COUNT(*) FROM songs WHERE file_title = %s AND form = %s" # Check the existence of a song # with the same title and format in the database self.cursor.execute(sql, (file_title, form)) r = self.cursor.fetchall() if r[0][0] != 0: return "A song with this file name and format already exists!" song_title = input("Song title:\t") artist = input("Artist:\t") data = input("Release date:\t") tags = input("Associated tags:\t") sql = "INSERT INTO songs (file_title, song_title, artist, form, data, tag) VALUES (%s, %s, %s, %s, %s, " \ "%s) " # Insert song into database columns = (file_title, song_title, artist, form, data, tags) self.cursor.execute(sql, columns) self.cnx.commit() self.cursor.execute( "SELECT MAX(ID) FROM songs") result = self.cursor.fetchall() return "New song ID: " + str(result[0][0]) else: return "Give valid path"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_song():\n options = queue.instantiate_options()\n raw_queue = queue.instantiate_queue()\n track_id = request.args.get('song')\n\n for song in raw_queue:\n if song['track_id'] == track_id[14:]:\n return json.dumps({'error': 'Cannot add a song already in the queue'})\n\n num_songs_added = 0\n for song in raw_queue:\n if song['added_by'] == session['id']:\n num_songs_added += 1\n\n if num_songs_added >= int(options['max_individual_songs']):\n print('user reached max songs')\n return json.dumps({'error': \"You are not allowed to add any more songs until one plays\"})\n\n song_obj = create_song(track_id, added_by=session['id'])\n queue.addSong(song_obj)\n queue_change()\n return json.dumps({'success': 'added ' + track_id})", "def insert_song(self, song_name: str, title: str, artist: str, file_hash: str, total_hashes: int) -> int:\n id = random.randint(1, 1000000000000)\n song = Songs(meta={'id': id}, song_name=song_name, song_title=title, artist=artist, file_sha1=file_hash, total_hashes=total_hashes)\n song.save()\n return id", "def add_song(_name_of_the_song, _duration_in_number_of_seconds):\r\n # creating an instance of our Song constructor\r\n new_song = Song(name_of_the_song=_name_of_the_song,\r\n duration_in_number_of_seconds=_duration_in_number_of_seconds)\r\n db.session.add(new_song) # add new song to database session\r\n db.session.commit() # commit changes to session\r", "def add_song(self, song):\n self.songs.append(song)", "def new_song():\n song_id = int(request.args['song_id'])\n track_info = shiva.get_tracks([song_id])[song_id]\n vlc.add_song(track_info['path'])\n return 'ok'", "def add_song_to_database(artist, name, db):\n if exists(db):\n f = open(db, 'r+')\n song_list = pickle.load(f)\n current_entry = Song_data(artist, name);\n if current_entry.id in [previous_entry.id for previous_entry in song_list]:\n print str(current_entry) + \" already in database.\"\n return\n song_list.append(current_entry)\n f.seek(0,0)\n pickle.dump(song_list, f)\n else:\n f = open(db, 'w')\n song_list = [Song_data(artist, name)]\n f.seek(0,0)\n pickle.dump(song_list, f)", "def add_song(self, song: Song):\n self.playlist.append(song)", "def newsong(self, filename):\n datas = None\n try:\n unicode(filename)\n except UnicodeDecodeError:\n return\n\n cur = self.conn.cursor()\n\n try:\n datas = mutagen.File(filename, easy=True)\n except:\n query = \"\"\"INSERT INTO caro_logs (filename, message, date_import) VALUES (%s, 'ERROR 01', now());\"\"\"\n cur.execute(query, (filename,))\n\n\n if datas is not None:\n artist = None\n album = None\n title = None\n genre = None\n\n try:\n artist = datas['artist'][0]\n album = datas['album'][0]\n title = datas['title'][0]\n genre = datas['genre'][0]\n except KeyError as e:\n msg = str(sys.exc_type), \":\", \"%s is not in the list.\" % sys.exc_value\n\n query = \"\"\"INSERT INTO caro_logs (filename, message, date_import) VALUES (%s, %s, now());\"\"\"\n cur.execute(query, (filename, msg, ))\n\n if artist and album and genre and title:\n fsig = hashfile(filename)\n chk = self.checkfile(fsig)\n if chk == 0:\n self.insertfile([filename, artist, album, title, genre, fsig])\n else:\n self.update_path(filename, fsig)\n else:\n print \"Missing tag\"\n self.conn.commit()", "def add_song(name, duration):\n song = Song(\n name=name,\n duration=duration,\n )\n db.session.add(song)\n db.session.commit()\n\n return song", "def add_song(self, song, position=None):\n\n # Use find_object to see if the song exist already.\n song_found = find_object(song, self.tracks) # look for song.tracks to see if it exist in the list\n if song_found is None: # if song is not found\n song_found = Song(song, self.artist) # We create new song using \"Song\" function and assign it to song_found\n if position is None: # If there are no songs in this track\n self.tracks.append(song_found) # Add this song_found in the first position\n else: # else if there are already some songs in the track\n self.tracks.insert(position, song_found) # inserts the position and song in self.tracks list", "def add_song(self, song, position=None):\n\n song_found = find_object(song, self.tracks)\n if song_found is None:\n song_found = Song(song, self.artist)\n if position is None:\n self.tracks.append(song_found)\n else:\n self.tracks.insert(position, song_found)", "def insert_song(self, song_name: str, file_hash: str, total_hashes: int) -> int:\n try:\n record = {FIELD_SONGNAME:song_name,FIELD_FILE_SHA1:file_hash,FIELD_TOTAL_HASHES:total_hashes,FIELD_FINGERPRINTED:False}\n outcome = self.cursor.index(index=SONGS_INDEXNAME, body=record)\n except Exception as ex:\n print('Error indexing data')\n print(str(ex))\n return outcome['_id']", "def add_song(self, song: Song) -> None:\n\n self.songs.append(song)\n self.set_song_count(len(self.songs))", "def add_song(self):\n # Error check for blank inputs\n if \"\" in (self.root.ids.input_title.text, self.root.ids.input_artist.text, self.root.ids.input_year.text):\n self.root.ids.status_text.text = \"All fields must be completed\"\n return\n # Error check for negative numbers\n try:\n if int(self.root.ids.input_year.text) < 0:\n self.root.ids.status_text.text = \"Year must be >= 0\"\n return\n # Error check for invalid numbers\n except ValueError:\n self.root.ids.status_text.text = \"Please enter a valid number\"\n return\n # Song add, clear inputs, sort songlist\n song_to_add = Song(self.root.ids.input_title.text, self.root.ids.input_artist.text,\n int(self.root.ids.input_year.text))\n self.songs.add_song(song_to_add)\n SongsToLearnApp.clear_inputs(self)\n self.sort_songs(self.root.ids.sort_options.text)", "def add_song(self, song, position=None):\n if position:\n self.tracks.insert(position, song)\n else:\n self.tracks.append(song)", "def add_song(self, song, position=None):\n if position is None:\n self.tracks.append(song)\n else:\n self.tracks.insert(position, song)", "def add_music(request, music_id: int) -> HttpResponse:\n music_item = get_object_or_404(Music, id=music_id)\n\n if music_item in request.user.profile.playlist.all():\n return HttpResponse('Success')\n\n playpos = PlayPosition(\n position=music_item,\n plist=request.user.profile\n )\n playpos.add_order()\n playpos.save()\n\n return HttpResponse('Success')", "def import_song(self, song, playlist):\n\n try:\n song_uri = self.find_song_uri(song)\n except SongNotFoundError as e:\n print(f\"could not find song {song} to add to playlist '{playlist['name']}'\")\n else:\n self.add_song_to_playlist(song_uri, playlist[\"id\"])", "def add_song_to_playlist(self):\n #populate our songs dictionary\n self.get_liked_videos()\n\n #collect all of uri\n uris = []\n for song,info in self.all_song_info.items():\n uris.append(info[\"spotify_uri\"])\n\n #create a new playlist\n playlist_id = self.create_playlist()\n\n #add all songs into new playlist\n\n #Spotipy can only add 100 songs at a time to a playlist that is why this method is taken\n g = len(uris)\n if g > 100:\n s = 0\n e = 99\n while g > 100:\n self.sp.user_playlist_add_tracks(user=self.username, playlist_id=playlist_id,\n tracks=uris[s:e])\n g -= 100\n s = e + 1\n e += 100\n self.sp.user_playlist_add_tracks(user=self.username, playlist_id=playlist_id,\n tracks=uris[s:])\n else:\n self.sp.user_playlist_add_tracks(user=self.username, playlist_id=playlist_id,\n tracks=uris)", "def associate_song(self, song):\n self.songs.append(song)", "def add_new_song(self):\n return \"New Song Added\"", "def update_db(self):\n songs = self.db.get_all_songs()\n for song in songs:\n if choose_song(song) == ERROR:\n self.db.delete_song(song)\n files = []\n for song in glob.glob(\"songs\\*.wav\"):\n to_append = song.split('\\\\')[ONE][:-4]\n files.append(to_append)\n for song in files:\n if song not in songs:\n self.db.add_new_song(song)", "def add_lyrics_and_song_data_to_database(artist, song):\n if exists('song_database.txt'):\n f = open('song_database.txt', 'r+')\n song_list = pickle.load(f)\n current_entry = Song_data(artist, song)\n if current_entry.id in [previous_entry.id for previous_entry in song_list]:\n print \"Song '\" + song + \"' already in database.\"\n return\n song_list.append(current_entry)\n f.seek(0,0)\n pickle.dump(song_list, f)\n else:\n f = open('song_database.txt', 'w')\n song_list = [Song_data(artist, song)]\n f.seek(0,0)\n pickle.dump(song_list, f)", "def __add_song(self, song, genius_api):\n\t\tentry = {\n\t\t\t'id' : int(song['id']),\n\t\t\t'title' : song['title'],\n\t\t\t'primary_artist' : {\n\t\t\t\t'id' : song['primary_artist']['id'],\n\t\t\t\t'name' : str(song['primary_artist']['name']).lower(),\n\t\t\t\t'url' : song['primary_artist']['url'],\n\t\t\t\t'is_verified' : song['primary_artist']['is_verified'],\n\t\t\t\t},\n\t\t\t'url' : song['url'],\n\t\t\t'lyrics' : genius_api.get_lyrics(song['id'], song['url'])\n\t\t\t}\n\t\tif song['album']:\n\t\t\tentry['album'] = {\n\t\t\t\t'id': song['album']['id'], \n\t\t\t\t'full_title': song['album']['full_title'], \n\t\t\t\t'name': song['album']['name'], \n\t\t\t\t'artist': song['album']['artist']['id']\n\t\t\t\t}\n\t\tif song['release_date']:\n\t\t\tentry['release_date'] = song['release_date']\n\t\tif len(song['featured_artists']) > 0:\n\t\t\tfeatured_artists = list()\n\t\t\tfor artist in song['featured_artists']:\n\t\t\t\tart = {\n\t\t\t\t\t'id' : artist['id'],\n\t\t\t\t\t'name' : artist['name'].lower()\n\t\t\t\t\t}\n\t\t\t\tfeatured_artists.append(art)\n\t\t\tentry['featured_artists'] = featured_artists\n\t\t\t#Step 3: Insert Artist into MongoDB via isnert_one\n\t\tself.db.songs.insert_one(entry)", "def add_entry():\n username = util.remove_commas_from_string(request.form[\"name\"])\n link = util.remove_commas_from_string(request.form[\"ytLink\"])\n song = util.remove_commas_from_string(request.form[\"songName\"])\n\n festive = CHRISTMAS_MODE and \"christmasSong\" in request.form\n\n with database.connect_to_database() as db:\n user_id = database.get_userid(db, username)\n database.add_song(db, link, song, user_id, month=12 if festive else None)\n\n return redirect(url_for('main'))", "def addSong(self, title, filename):\n #make sure that the filename is valid? or does this happen outside?\n self.__songDictionary[title]=filename\n return True", "def add_album(self, artist='', album='', totaltracks=0, totalseconds=0,\n altype='album', commit=True):\n self.app.curs.execute(\"\"\"insert into album(\n alartist, alalbum, totaltracks, totalseconds, altype\n ) values ( %s, %s, %s, %s, %s)\"\"\", (\n artist, album, totaltracks, totalseconds, altype\n ))\n if commit:\n self.app.db.commit()\n return self.app.curs.lastrowid", "def add_song(self):\n settings = dict(initialdir=pathlib.Path().absolute(), title=\"Choose songs\", filetypes=(\n (\"flac files\", \"*.flac\"),\n (\"mp3 files\", \"*.mp3\"),\n (\"all files\", \"*\")))\n\n song = filedialog.askopenfilename(**settings)\n\n self.update_playlist(song)\n self.listbox.insert(\"end\", self.song_list[-1]['name'])", "async def add(self, ctx, url_string : str):\n logger.info(\"add command issued by {0} with {1}\".format(ctx.message.author.name, url_string))\n if self.spotify_device is None:\n await ctx.send(\"No device playing\")\n elif ctx.voice_client is None:\n await ctx.send(\"No voice to skip\")\n else:\n try:\n url_parsed = urllib.parse.urlparse(url_string)\n except:\n await ctx.send(\"invalid spotify url\")\n return\n url_split = url_parsed.path\n url_split, url_id = os.path.split(url_split)\n url_split, url_type = os.path.split(url_split)\n logger.info(\"type is {0} and id is {1}\".format(url_type, url_id))\n if url_type == 'track':\n self.song_list.append(url_id)\n await ctx.send(\"Added song\")\n else:\n await ctx.send(\"Only single tracks for now\")", "def addSong(self, song):\n queue = self.instantiate_queue()\n history = self.instantiate_history()\n options = self.instantiate_options()\n\n queue = [song for song in queue if song['explicit']]\n queue.append(song.to_dict())\n\n if len(queue) < 5:\n self.addImplicit(queue, history, fallback_song=song.to_dict())\n \n queue = self.sortSongs(queue)\n self.cache.set('queue', queue)", "def appendSong(song):\n\tsql = []\n\tsql.append(\"INSERT INTO SONGS (filename, path, hash, length, track, \"\n\t\t+ \"genre, date, title, base) VALUES ('\" + song.filename + \"', '\" + song.path \n\t\t+ \"', '\" + str(song.hash) + \"', '\" + str(song.length) + \"', '\" \n\t\t+ '/'.join(song.track) + \"', '\" + '/'.join(song.genre) \n\t\t+ \"', '\" + str(song.year) + \"', '\" + '/'.join(song.title) + \"', '\"\n\t\t+ song.base + \"');\")\n\treturn sql", "def add_song(self, name, year, title):\n album_found = find_object(name, self.albums)\n if album_found is None:\n album_found = Album(name, year, self.name)\n self.add_album(album_found)\n album_found.add_song(title)", "def add_song_to_pl(self, song, pl):\n to_send = self.db.add_song_to_pl(song, pl)\n if not to_send:\n to_send = SUCCESS\n self.send_message(to_send)", "def add_song(self, name, year, title):\n\n # Here we check if album exist under artist.\n album_found = find_object(name, self.albums)\n if album_found is None: # If there is no album found\n print(name + \"not found\") # we print \"Album name not found\n album_found = Album(name, year, self.name) # Change_3: Pass \"self.name\" instead of \"self\"\n self.add_album(album_found) # We add new_album to song.\n else: # if we found an existing album with same name\n print(\"found album\" + name) # we print found album name\n\n # so we add song to album_found\n album_found.add_song(title)", "def add_songs(self, song, position=None):\n song_found = find_object(song, self.tracks)\n if song_found is None:\n song_found = Song(song, self.artist)\n if position is None:\n self.tracks.append(song_found)\n else:\n self.tracks.insert(position, song_found)", "def add_track():\n # Inserts the track information.\n t_id = db.track.insert(\n tracklength = request.vars.tracklength,\n bpm = request.vars.bpm,\n title = request.vars.title,\n num_plays = 0\n )\n # Then, updates the uploaded track to point to this track.\n db(db.track_data.id == request.vars.insertion_id).update(track_id=t_id)\n # Also, to clean up, remove tracks that do not belong to anyone.\n db(db.track_data.track_id == None).delete()\n # Returns the track info. Building the dict should likely be done in\n # a shared function, but oh well.\n return response.json(dict(track=dict(\n id = t_id,\n tracklength = request.vars.tracklength,\n bpm = request.vars.bpm,\n title = request.vars.title,\n num_plays = 0,\n track_url = build_track_url(t_id)\n )))", "def createsong(tags, sig, fpath, played=0):\n\n song = Song.objects.create(artist=tags['artist'],\n album=tags['album'],\n title=tags['title'],\n genre=tags['genre'],\n score=0,\n played=played,\n uniq=sig,\n global_score=0,\n filename=fpath)\n # lookup to fill cover\n picture.delay(song)\n \n if hasattr(song, 'title') and song.title != '':\n try:\n song.genre += ','.join(get_tags(song.artist, song.title))\n except:\n pass\n song.save()\n return \"[I] %s\\n\" % song.title", "def add_the_song_to_playlist(self):\n com_util.tap_on(self.driver, element['AddToPlaylist'])\n # com_util.send_to(self.driver, element['EnterThePlaylist'], 'My Songs')\n com_util.tap_on(self.driver, element['ClickMySongs'])\n # com_util.tap_on(self.driver, element['SaveBtn'])\n com_util.tap_on(self.driver, element['CancelBtn'])\n com_util.tap_on(self.driver, element['DownArrow'])", "def song_save(song_id):\r\n querystring = apiurl_musixmatch + \"track.lyrics.get?track_id=\" + urllib2.quote(\r\n song_id) + \"&apikey=\" + apikey_musixmatch + \"&format=plain\"\r\n try:\r\n request = urllib2.Request(querystring)\r\n # timeout set to 4 seconds; automatically retries\r\n response = urllib2.urlopen(request, timeout=4)\r\n # raw = response.read()\r\n print colored.green(\"Starting\", bold=12)\r\n all_data = ''\r\n while True:\r\n do_task()\r\n print '\\b.',\r\n sys.stdout.flush()\r\n data = response.read(2048)\r\n if not data:\r\n break\r\n all_data += data\r\n time.sleep(0.4)\r\n print \"\\n\"\r\n json_obj = json.loads(all_data.decode(\"utf-8\"))\r\n body = json_obj[\"message\"][\"body\"][\"lyrics\"][\"lyrics_body\"]\r\n if body == 0:\r\n print colored.red(\"No lyrics found\", bold=12)\r\n else:\r\n song_found = SongLyricsFinder(song_id, body)\r\n session.add(song_found)\r\n session.commit()\r\n print colored.green(\"Song saved successfully.\", bold=12)\r\n except socket.timeout:\r\n print \"Timeout raised and caught\"", "def song_already_exists(song, playlist_id):\n print('Song {title} already in playlist {playlist_id}, adding has been skipped.'\n .format(title=song.title,\n playlist_id=playlist_id))\n pass", "def add_song_to_playlist(self, song_uri, playlist_id):\n\n endpoint = f\"/playlists/{playlist_id}/tracks\"\n self._send(endpoint, \"POST\", params={\"uris\": song_uri})", "def songs_add_failed(self, songs, library=True, playlist=None):\n message = {\n \"timestamp\": self._get_time(),\n \"level\": \"ERROR\",\n \"type\": \"SONGS_ADD_FAILED\",\n \"songs\": songs,\n }\n\n if library:\n message[\"library\"] = library\n else:\n message[\"playlist\"] = playlist\n\n self._log_queue.put(json.dumps(message))", "def record_lyrics_result(self, track_id, songdata):\n self.lyrics.insert_one(\n {\n \"_id\": track_id,\n \"response_artist\": songdata.artist,\n \"response_title\": songdata.title,\n \"lyrics\": songdata.lyrics,\n }\n )", "async def queue(self, msg, song):\n title1 = await Downloader.get_info(self, url=song)\n title = title1[0]\n data = title1[1]\n # NOTE:needs fix here\n if data['queue']:\n await self.playlist(data, msg)\n # NOTE: needs to be embeded to make it better output\n return await msg.send(f\"Added playlist {data['title']} to queue\")\n self.player[msg.guild.id]['queue'].append(\n {'title': title, 'author': msg})\n return await msg.send(f\"**{title} added to queue**\".title())", "def _mpd_add_track(uri, position = None):\n \n if position != None:\n _mpd_client.addid(uri, position)\n else:\n _mpd_client.addid(uri)", "def add_music_from_search(request, music_id: int) -> HttpResponse:\n music_item = get_object_or_404(Music, id=music_id)\n\n if music_item in request.user.profile.playlist.all():\n return HttpResponse('Success')\n\n playpos = PlayPosition(\n position=music_item,\n plist=request.user.profile\n )\n\n playpos.add_order()\n playpos.save()\n\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))", "def delete_song(self):\r\n song_id = tuple(input(\"Give the melody id to be deleted:\\t\"))\r\n sql = \"SELECT file_title, form FROM songs WHERE id = %s\" # Check existence of song with given ID\r\n self.cursor.execute(sql, song_id)\r\n result = self.cursor.fetchall()\r\n if len(result) > 0:\r\n path = self.p_storage + \"/\" + result[0][0] + \".\" + result[0][\r\n 1] # Find path of song by appending the name and format to the storage directory path\r\n os.remove(path) # Remove song from directory\r\n sql = \"DELETE FROM songs WHERE id = %s\" # Delete song from database\r\n self.cursor.execute(sql, song_id)\r\n self.cnx.commit()\r\n print(self.cursor.rowcount, \"record(s) deleted\")\r\n else:\r\n print(\"Give a valid id...\")", "def upload_music(handler):\n user_id = handler.get_argument('user_id')\n music_path = handler.get_argument('path') #Having problems parsing this out\n sound_cloud_client = Petitions.instantiate_user(user_id)\n track = sound_cloud_client.post('/tracks', track={\n 'title': 'Testing Uploads',\n 'asset_data': open(music_path, 'rb')\n })\n\n return track.permalink_url # Improve messages. Change to Json", "def importsong(fpath):\n result = \"\"\n\n tags = checkid3(fpath)\n if tags is not None:\n sig = sigfile(fpath)\n exsong = Song.objects.filter(uniq=sig)\n\n if len(exsong) > 0:\n if exsong[0].filename != fpath:\n result = updatesong(exsong[0], fpath)\n else:\n result = \"[X] %s\" % exsong[0].title\n else:\n result = createsong(tags, sig, fpath, songminplay())\n else:\n logger.error('No tags found in [%s]' % fpath)\n\n return result", "def add_songs(self, name, year, title):\n\n album_found = find_object(name, self.album)\n if album_found is None:\n print(\"Not Found \" + name)\n album_found = Album(name, year, self.name)\n self.add_album(album_found)\n else:\n print(\"Found album \"+name)\n\n album_found.add_songs(title)", "def enterSong(song):\n\tc, conn = connect()\n\tsql = []\n\n\t# checks if the song is already in the database by hash\n\tif checkHash(song):\n\t\tsql2 = appendSong(song)\n\t\tsql += sql2\n\t\t\n\t\t# checks if the song has an artist\n\t\tif song.artist:\n\t\t\tsql2 = appendArtist(song)\n\t\t\tsql += sql2\n\t\n\t\t# checks if the song has an album\n\t\tif song.album:\n\t\t\tsql2 = appendAlbum(song)\n\t\t\tsql += sql2\n\t\n\t# execute all the queries\n\tfor query in sql:\n\t\tc.execute(query)\n\t\t\n\tconn.commit()\n\treturn sql", "def Store(self):\n\n if FLAGS.verbose or FLAGS.verbose_writes:\n print 'Writing track:'\n for key in sorted(self.persistant):\n print ' %s = %s' %(key, self.persistant[key])\n\n if not self.persistant:\n return\n \n try:\n self.db.WriteOneRow('tracks', 'id', self.persistant)\n except MySQLdb.Error, (errno, errstr):\n if errno != 1064:\n raise TrackException(self.db, 'Could not store track %s: %s \"%s\"'\n %(self.persistant['id'], errno, errstr))\n except sql.FormatException, e:\n raise e\n except Exception, e:\n raise TrackException(self.db, 'Could not store track: %s: \"%s\" (%s)'\n %(self.persistant['id'], e, type(e)))", "def save(self, media):\n if self.exists(media):\n self.update(media)\n else:\n self.insert(media)", "def test_adding_album_twice_forced(self):\n self.add_mp3(filename='1.mp3')\n (added, status) = self.app.add_album(self.filenames)\n self.assertEqual(added, True)\n self.assertEqual(self.get_album_count(), 1)\n\n self.add_mp3(filename='2.mp3')\n (added, status) = self.app.add_album(self.filenames, 'ep', force_update=True)\n self.assertEqual(added, True)\n self.assertIn('Updated to', status)\n self.assertEqual(self.get_album_count(), 1)\n\n album = Album.get_by_artist_album(self.app.curs, 'Artist', 'Album')\n self.assertEqual(album.artist, 'Artist')\n self.assertEqual(album.album, 'Album')\n self.assertEqual(album.album_type, 'ep')\n self.assertEqual(album.totalseconds, 4)\n self.assertEqual(album.totaltracks, 2)", "def store(self, item):\n cursor = self.conn.cursor()\n # Store the item\n if item:\n cursor.execute(*self._build_insert(item, 'items'))\n for file_ in item.files:\n cursor.execute(\"\"\"insert into files (filename, item_id)\n values (?, ?)\"\"\", (file_, item.kg_id))\n self.conn.commit()\n self.logger.info(\"Succesfully stored item %d\" % item.kg_id)", "def add_song_to_playlist(self, song_uri, playlist_id, user=None):\n\n if song_uri[0] in self.list_pl_songs(playlist_id, user=None):\n logging.debug('Song already in playlist')\n else:\n if user:\n self.sp.user_playlist_add_tracks(user, playlist_id, song_uri)\n else:\n self.sp.user_playlist_add_tracks(\n self.user, playlist_id, song_uri)", "def songs_added(self, songs, library=True, playlist=None):\n message = {\n \"timestamp\": self._get_time(),\n \"level\": \"INFO\",\n \"type\": \"SONGS_ADDED\",\n \"songs\": songs,\n }\n\n if library:\n message[\"library\"] = library\n else:\n message[\"playlist\"] = playlist\n\n self._log_queue.put(json.dumps(message))", "def add_path(self):\n name = self._get_path(\"Music File or Directory: \")\n if name != None:\n self._clear_window()\n self.player.add(name)\n self.refresh_window()", "def test_single_track(self):\n self.add_mp3()\n (added, status) = self.app.add_album(self.filenames)\n self.assertEqual(added, True)\n self.assertEqual(self.get_album_count(), 1)\n album = Album.get_by_artist_album(self.app.curs, 'Artist', 'Album')\n self.assertNotEqual(album, None)\n self.assertEqual(album.artist, 'Artist')\n self.assertEqual(album.album, 'Album')\n self.assertEqual(album.album_type, 'album')\n self.assertEqual(album.totalseconds, 2)\n self.assertEqual(album.totaltracks, 1)", "def test_adding_album_twice(self):\n self.add_mp3(filename='1.mp3')\n (added, status) = self.app.add_album(self.filenames)\n self.assertEqual(added, True)\n self.assertEqual(self.get_album_count(), 1)\n\n self.add_mp3(filename='2.mp3')\n (added, status) = self.app.add_album(self.filenames, 'ep')\n self.assertEqual(added, False)\n self.assertIn('Would update to', status)\n self.assertEqual(self.get_album_count(), 1)\n\n album = Album.get_by_artist_album(self.app.curs, 'Artist', 'Album')\n self.assertEqual(album.artist, 'Artist')\n self.assertEqual(album.album, 'Album')\n self.assertEqual(album.album_type, 'album')\n self.assertEqual(album.totalseconds, 2)\n self.assertEqual(album.totaltracks, 1)", "def _do_move(self, artist, album, song):\n try:\n move_to = \"{0}{1}/{2}/\".format(self.dupe_dir, \n artist, album)\n if not os.path.exists(move_to):\n os.makedirs(move_to)\n \n shutil.move(song['path'], move_to)\n self.moved.append(song)\n return 1\n except:\n self.logger.error(\"Could not move file: {0}\".format(str(song['path'])))\n return 0", "def post(self, track_id=0):\n\t\tdb = getattr(g, 'db', None)\n\t\tobj = request.get_json()\n\t\tprint \"input\",obj\n\n\t\tif ('username' not in obj) or ('session' not in obj) or\\\n\t\t\t('action' not in obj):\n\t\t\treturn {'status':'MISSING_PARAMS'}\n\t\telif not authenticate(obj['username'], obj['session']):\n\t\t\treturn {'status':'AUTH_FAIL'}\n\t\telse:\n\t\t\tusername = obj['username']\n\t\t\tsession = obj['session']\n\t\t\taction = obj['action']\n\n\t\t\tif action == 'ADD':\n\t\t\t\tif ('title' not in obj) or ('playlist_id' not in obj) or ('path' not in obj):\n\t\t\t\t\treturn {'status':'MISSING_PARAMS'}\n\n\t\t\t\ttitle = obj['title']\n\t\t\t\tpath = obj['path']\n\t\t\t\tplaylist_id = obj['playlist_id']\n\n\t\t\t\tqry = \"INSERT INTO music VALUES (default, (SELECT id FROM profiles WHERE username=%s), %s, %s, (SELECT id FROM playlists WHERE id=%s));\"\n\t\t\t\twith db as cur:\n\t\t\t\t\tlines = cur.execute(qry, (username, title, path, playlist_id))\n\t\t\t\t\tif lines == 0:\n\t\t\t\t\t\treturn {'status':'ADD_FAILED'}\n\t\t\t\t\telse:\n\t\t\t\t\t\treturn {'status':'ADD_SUCCESS'}\n\t\t\telif action == 'DELETE':\n\t\t\t\tif('track_id' not in obj):\n\t\t\t\t\treturn {'status':'MISSING_PARAMS'}\n\n\t\t\t\ttrack_id= obj['track_id']\n\t\t\t\ttry:\n\t\t\t\t\twith db as cur:\n\t\t\t\t\t\tqry=\"DELETE FROM music WHERE user_id = (SELECT id FROM profiles WHERE username=%s)\\\n\t\t\t\t\t\t\tand id = %s;\"\n\t\t\t\t\t\tlines = cur.execute(qry,(username,track_id))\n\t\t\t\t\t\tif lines == 0:\n\t\t\t\t\t\t\treturn {\"status\":\"NO_SUCH_TRACK\"}\n\n\t\t\t\t\t\tdb.commit()\n\t\t\t\t\treturn {\"status\":\"DELETION_SUCCESS\"}\n\t\t\t\texcept sql.Error as e:\n\t\t\t\t\tprint e\n\t\t\t\t\treturn {\"status\":\"DELETION_FAILED\"}\n\t\t\t\n\t\t\telif action == 'GET':\n\t\t\t\tif ('playlist_id' not in obj):\n\t\t\t\t\treturn {'status':'MISSING_PARAMS'}\n\t\t\t\tplaylist_id = obj['playlist_id']\n\n\t\t\t\tqry = None\n\t\t\t\twith db as cur:\n\t\t\t\t\tif playlist_id == '*':\n\t\t\t\t\t\tqry = \"SELECT id,title,path FROM music WHERE\\\n\t\t\t\t\t\tuser_id = (SELECT id FROM profiles where username=%s) ORDER BY(id) ASC;\"\n\t\t\t\t\t\tcur.execute(qry, (username,))\n\t\t\t\t\telse:\n\t\t\t\t\t\tqry = \"SELECT id,title,path FROM music WHERE\\\n\t\t\t\t\t\tplaylist_id = (SELECT id FROM playlists where id=%s) ORDER BY(id) ASC;\"\n\t\t\t\t\t\tcur.execute(qry, (playlist_id,))\n\t\t\t\t\treturn {'status':'MUSIC_LIST', 'tracks':cur.fetchall()}\n\t\t\t\treturn {'status':'INTERNAL_ERROR'}\n\t\t\t\t#user_id = (SELECT id FROM profiles WHERE username=%s;\"\n\t\t\telse:\n\t\t\t\treturn {'status':'INVALID_ACTION'}", "def add_songs(playlist_id, user_id, uris):\n\t# TODO: ensure duplicates not added or else they'll pop to the top of the playlist\n\t# Not going to do this right now. If you want the playlist to be a record of daily tracks, \n\t# doesn't make sense to get rid of duplicates.\n\n\tfor uri in uris:\n\t\tlogging.debug('Adding uri {0}'.format(uri))\n\ttoken = get_token()\n\theaders = {'Authorization': 'Bearer ' + token}\n\tbase_url = SPOTIFY_API_HOST + 'users/{0}/playlists/{1}/tracks?position=0&uris={2}'\n\n\tformatted_uris = [quote('spotify:track:{0}'.format(uri), safe='') for uri in uris if uri] # Probably shouldn't quote\n\turi_string = ','.join(formatted_uris)\n\n\turl = base_url.format(SPOTIFY_USER_ID, SPOTIFY_PLAYLIST_ID, uri_string)\n\tresponse = requests.post(url, headers=headers)\n\tlogging.debug('Called add url {0}'.format(url))\n\tlogging.debug('Got response {0}'.format(response.text))\n\tif response.status_code == 429:\n\t\tlogging.warning('!!!!!!!!!!!!!!!!!!!!!GOT STATUS CODE 429; RATE LIMITING FROM SPOTIFY!!!!!!!!!!!!!!!!!!')", "def insert_tracks(event=None):\n # playlist_items = playlist_box.get(0, len(playlist))\n # Fetching tracks\n tracks_items = filedialog.askopenfilenames(filetypes=[(\n \"Audio files\", ('*.flac', '*.wav', '*.mp3', '.ogg'))], title=\"Select tracks\")\n\n # Force insertion of at least one track\n # if (not tracks_list) and (not playlist_items):\n # insert_tracks()\n\n # Inserting into Playlist\n for track_path in tracks_items:\n # Extract file name from full path\n track = os.path.basename(track_path)\n if track not in playlist_box.get(0, len(playlist)): # Avoid duplicates\n playlist_box.insert(END, track)\n playlist.append(track_path)", "async def add(self, ctx, query):\n if ctx.guild is None:\n await ctx.reply(\"This command can only be used in a server, not in DMs.\")\n raise commands.CommandError(\"Invoker not in a guild.\")\n\n if ctx.voice_client is None or ctx.voice_client.channel is None:\n await ctx.reply(f\"I am not in a voice channel, invite me first with `{self.bot_config['prefix']}join`.\")\n raise commands.CommandError(\"Bot not connected to a voice channel.\")\n\n if ctx.author.voice is None or ctx.author.voice.channel is None:\n await ctx.reply(\"You need to be in a voice channel to use this command.\")\n raise commands.CommandError(\"Invoker not connected to a voice channel.\")\n\n if ctx.voice_client is not None and ctx.author.voice.channel != ctx.voice_client.channel:\n await ctx.reply(\"You need to be in the same voice channel as the bot to use this command.\")\n raise commands.CommandError(\"Invoker not in same voice channel as bot.\")\n\n if ctx.voice_client is not None and ctx.voice_client.channel is not None:\n controller = SpotifyController.get_instance(ctx.voice_client.channel.id)\n if controller is None:\n await ctx.reply(f\"I'm not playing anything at the moment.\")\n raise commands.CommandError(\"Bot not connected to active spotify session.\")\n else:\n await ctx.reply(f\"I am not in a voice channel, invite me first with `{self.bot_config['prefix']}join`.\")\n raise commands.CommandError(\"Bot not connected to a voice channel.\")\n\n print(f\"Adding {query} to playlist\")\n controller = SpotifyController.get_instance(ctx.voice_client.channel.id)\n sp = controller.get_playlist_api()\n\n uri = None\n item_info = None\n item_type = None\n\n # If link, queue by link\n if query.startswith(\"http://\") or query.startswith(\"https://\"):\n m = SPOTIFY_LINK_REGEX.match(query)\n if m:\n uri = f\"spotify:{m.group('type')}:{m.group('id')}\"\n item_type = m.group('type')\n if item_type == \"track\":\n try:\n item_info = sp.track(m.group('id'))\n except SpotifyException:\n await ctx.send(f\"Cannot add! Invalid track!\")\n return\n elif item_type == \"album\":\n try:\n item_info = sp.album(m.group('id'))\n except SpotifyException:\n await ctx.send(f\"Cannot add! Invalid album!\")\n return\n elif item_type == \"playlist\":\n try:\n item_info = sp.playlist(m.group('id'))\n except SpotifyException:\n await ctx.send(f\"Cannot add! Invalid or private playlist!\")\n return\n else:\n await ctx.send(f\"Type {item_type} not supported!\")\n return\n\n print(f\"Converted link to ID '{uri}'\")\n else:\n await ctx.send(f\"Only spotify links are supported!\")\n return\n\n # If spotify uri, queue by link\n if uri is None:\n m = SPOTIFY_URI_REGEX.match(query)\n if m:\n uri = f\"spotify:{m.group('type')}:{m.group('id')}\"\n item_type = m.group('type')\n if item_type == \"track\":\n try:\n item_info = sp.track(m.group('id'))\n except SpotifyException:\n await ctx.send(f\"Cannot add! Invalid track!\")\n return\n elif item_type == \"album\":\n try:\n item_info = sp.album(m.group('id'))\n except SpotifyException:\n await ctx.send(f\"Cannot add! Invalid album!\")\n return\n elif item_type == \"playlist\":\n try:\n item_info = sp.playlist(m.group('id'))\n except SpotifyException:\n await ctx.send(f\"Cannot add! Invalid or private playlist!\")\n return\n else:\n await ctx.send(f\"Type {item_type} not supported!\")\n return\n print(f\"Converted URI to ID '{uri}'\")\n\n # Else, try to search\n if uri is None:\n await ctx.send(f'Searching not supported yet.')\n return\n\n # Add URI\n if uri is not None:\n if item_type == \"track\":\n sp.playlist_add_items(controller.playlist[\"id\"], items=[uri])\n elif item_type == \"album\":\n album_tracks = controller.get_album_tracks(item_info['id'])\n i, max_tracks = 0, 50\n while i < len(album_tracks):\n block = [t['uri'] for t in album_tracks[i:i+max_tracks]]\n sp.playlist_add_items(controller.playlist[\"id\"], items=block)\n i += max_tracks\n elif item_type == \"playlist\":\n playlist_tracks = controller.get_playlist_tracks(item_info['id'])\n i, max_tracks = 0, 50\n while i < len(playlist_tracks):\n block = [t['uri'] for t in playlist_tracks[i:i+max_tracks]]\n sp.playlist_add_items(controller.playlist[\"id\"], items=block)\n i += max_tracks\n else:\n await ctx.send(f\"Cannot add! Type {item_type} not supported!\")\n return\n\n try:\n controller.update_playlist()\n except IndexError as e:\n print(e, file=sys.stderr)\n\n msg_embed = Embed()\n if item_type == \"track\":\n full_title = SpotifyController.format_full_title(item_info)\n try:\n thumbnail = item_info['album']['images'][0]['url']\n except IndexError:\n thumbnail = None\n msg_embed.description = f\"Added [{full_title}]({item_info['external_urls']['spotify']}) to queue!\"\n msg_embed.set_thumbnail(url=thumbnail)\n elif item_type == \"album\":\n full_title = SpotifyController.format_full_title(item_info)\n try:\n thumbnail = item_info['images'][0]['url']\n except IndexError:\n thumbnail = None\n num_tracks = item_info['tracks']['total']\n msg_embed.description = f\"Added album [{full_title}]({item_info['external_urls']['spotify']}) \" \\\n f\"({num_tracks} tracks) to queue!\"\n msg_embed.set_thumbnail(url=thumbnail)\n elif item_type == \"playlist\":\n title = item_info['name']\n try:\n thumbnail = item_info['images'][0]['url']\n except IndexError:\n thumbnail = None\n num_tracks = item_info['tracks']['total']\n msg_embed.description = f\"Added playlist [{title}]({item_info['external_urls']['spotify']}) \" \\\n f\"({num_tracks} tracks) to queue!\"\n msg_embed.set_thumbnail(url=thumbnail)\n else:\n # Shouldn't happen, but lets add a message anyway...\n msg_embed.description = f\"Unknown {item_type} item added to queue!\"\n await ctx.reply(embed=msg_embed)", "def AddPath(self, path):\n\n if not self.db.GetOneRow('select * from paths where path=\"%s\";'\n % path):\n self.db.ExecuteSql('insert into paths(path, track_id) values(\"%s\", %d);'\n %(path, self.persistant['id']))\n self.db.ExecuteSql('commit;')\n return True\n\n elif FLAGS.verbose:\n print 'Path already stored'\n\n return False", "def AddTag(self, tag):\n\n if not self.persistant:\n return\n\n self.db.ExecuteSql('insert into tags(tag, track_id) values(\"%s\", %d);'\n %(tag, self.persistant['id']))\n self.db.ExecuteSql('commit;')", "def add_album(self):\n item = self.clementine_albums.currentItem()\n albumname = item.text(0) if item else ''\n year = item.data(0, core.Qt.UserRole) if item else ''\n dlg = NewAlbumDialog(self, albumname, year).exec_()\n if dlg != qtw.QDialog.Accepted:\n return\n name, year, is_live = self.data\n if not item:\n result = self.clementine_albums.findItems(name,\n core.Qt.MatchFixedString, 0)\n if result:\n item = result[0]\n if not item:\n qtw.QMessageBox.information(self, self.appname, \"Album doesn't \"\n \"exist on the Clementine side\")\n return\n\n a_item = None\n results = self.albums_albums.findItems(name, core.Qt.MatchFixedString, 0)\n data = [build_album_name(x) for x in results]\n if results:\n selected, ok = qtw.QInputDialog.getItem(self, self.appname,\n 'Select Album', data,\n editable=False)\n if ok:\n a_item = results[data.index(selected)]\n if not a_item:\n a_item = qtw.QTreeWidgetItem([name, year, '0'])\n self.albums_albums.addTopLevelItem(a_item)\n tracklist = dmlc.list_tracks_for_album(dmlc.DB, self.c_artist,\n item.text(0))\n num = itertools.count(1)\n self.albums_to_save[self.c_artist].append(\n (name, year, 'X', is_live,\n [(next(num), x['title']) for x in tracklist if x['track'] > -1]))\n self.update_item(a_item, item)", "def __add_lyric(self, song, genius_api):\n\t\tentry = {\n\t\t\t'song_id' : int(song['id']),\n\t\t\t'song_title' : song['title'],\n\t\t\t'url' : song['url']\n\t\t\t}\n\t\ttry:\n\t\t\tentry['lyrics'] = genius_api.get_lyrics(song['id'], song['url'])\n\t\texcept:\n\t\t\tentry['lyrics'] = ''\t\n\t\t\t\t#Step 3: Insert Artist into MongoDB via isnert_one\n\t\ttry:\n\t\t\tself.db.lyrics.insert_one(entry)\n\t\texcept errors.DuplicateKeyError:\n\t\t\tpass", "def song(self, value):\r\n self._song_id = value\r\n data = Song(value)\r\n self.songtitel = data.songtitel if data.found else \"\"", "def _add_artist(self, artist):\n\n insert_artist = 'INSERT INTO artists (name, email, artist_id) VALUES (?, ?, ?)'\n\n try:\n with sqlite3.connect(db_path) as conn:\n res = conn.execute(insert_artist, (artist.name, artist.email, artist.artist_id))\n new_id = res.lastrowid # Get the ID of the new row in the table\n artist.artist_id = new_id # Set this artist's ID\n conn.close()\n return True\n except sqlite3.IntegrityError:\n print(f'\\nError - Artist with that email is already in the database.\\n')\n return False", "def add_song(self):\n\n if self.root.ids.Title.text == \"\" or self.root.ids.Artist.text == \"\" or self.root.ids.Year.text == \"\":\n self.root.ids.statusLabel.text = \"All fields must be required\" #Displayed when user does not fill in all the field and press Add Song\n return\n try:\n YEAR = int(self.root.ids.Year.text) #Make sure the year text is a number\n self.song_list.AddSongToSongList(self.root.ids.Title.text,self.root.ids.Artist.text,self.root.ids.Year.text,\"n\")#Return to function from songlist\n temp_button = Button(text=\"{} by {} ({}) ({})\".format(self.root.ids.Title.text,self.root.ids.Artist.text,self.root.ids.Year.text,\"y\"))\n temp_button.bind(on_release=self.press_entry)\n temp_button.background_color = [1,0,0,2] #Append the new temp button with color pink\n self.root.ids.entriesBox.add_widget(temp_button)#Adding widget temp_button\n self.root.ids.Title.text = \"\"\n self.root.ids.Artist.text = \"\" #Empty the text boxes\n self.root.ids.Year.text = \"\"\n except ValueError:\n self.status_text2=\"Please enter a valid number\"#Display status label at the buttom", "def adds_new_songs_to_db_by_en_id(yt_playlist_query):\n # yt_playlist_query returned by gets_playlist_history(en_playlist), api_helper.py\n\n for item in yt_playlist_query:\n en_song_id = item['en_song_id']\n is_en_song_id_in_db = db.session.query(exists().where(Song.en_song_id == en_song_id)).scalar()\n if is_en_song_id_in_db == False:\n en_artist_id = item['en_artist_id']\n artist_id = db.session.query(Artist.artist_id).filter(Artist.en_artist_id == en_artist_id).one()\n song_info = Song(en_song_id=en_song_id,\n song_title=item['song_title'],\n artist_id=artist_id)\n db.session.add(song_info)\n db.session.flush", "def savePlaylist():\n\n # get user form info\n title = request.json.get('title')\n interval = request.json.get('interval')\n orig_playlist_id = request.json.get('playlist_id')\n\n # create a new playlist\n new_playlist = crud.createPlaylist(session, title)\n\n new_playlist_id = new_playlist['id']\n\n user_id = session['user_id']\n\n # store playlist in DB\n savedPlaylist = crud.storeSavedPlaylist(user_id, orig_playlist_id, \n new_playlist_id, interval, title)\n print(savedPlaylist)\n \n # copy over tracks in original playlist to the new playlist\n snapshot_id = crud.updatePlaylist(session, orig_playlist_id, new_playlist_id)\n\n return snapshot_id", "def delete_song(id):\n # check if the song exists, if not return a 404 with a helpful message\n song = session.query(models.Song).get(id)\n if not song:\n message = \"Could not find song with id {}\".format(id)\n data = json.dumps({\"message\": message})\n return Response(data, 404, mimetype=\"application/json\")\n \n session.delete(song)\n session.commit\n \n message = \"deleted song {}\".format(id)\n data = json.dumps({\"message\": message})\n return Response(data, 404, mimetype=\"application/json\")", "def song_not_found(self, song):\n message = {\n \"timestamp\": self._get_time(),\n \"level\": \"ERROR\",\n \"type\": \"SONG_NOT_FOUND\",\n \"song\": json.dumps(song.to_dict()),\n }\n\n self._log_queue.put(json.dumps(message))", "def __insert_song_data(cur, df):\n song_data = (\n df.song_id.values[0],\n df.title.values[0],\n df.artist_id.values[0],\n (df.year.values[0]).item(),\n (df.duration.values[0]).item()\n )\n cur.execute(song_table_insert, song_data)", "def post(self, request) -> redirect:\n form = UploadMusicForm(request.POST, request.FILES)\n if form.is_valid():\n music = form.save()\n playpos = PlayPosition(position=music,\n plist=request.user.profile)\n playpos.add_order()\n playpos.save()\n\n return redirect('/accounts/profile/{}/music/'.format(request.user.profile.custom_url))", "def add_song():\n return render_template('pong!')", "def test_adding_invalid_file(self):\n (added, status) = self.app.add_album(__file__)\n self.assertEqual(added, False)\n self.assertIn('Unable to load', status)\n self.assertEqual(self.get_album_count(), 0)", "def insert_file(sess, job_id, status):\n fs = File(job_id=job_id, filename=' ', file_status_id=status)\n sess.add(fs)\n sess.commit()\n return fs.file_id", "def get_next_song(self):\r\n if self.timestamp:\r\n delta = datetime.datetime.now() - self.timestamp\r\n if delta < timedelta(seconds=3):\r\n self.log.warning(u\"Song '%s' stopped playing after less than 3 seconds for some reason!\" % self.meta)\r\n time.sleep(3)\r\n self.timestamp = datetime.datetime.now()\r\n\r\n song = self.findQueued()\r\n\r\n self.meta = u\"%s - %s\" % (song.artist(), song.title)\r\n self.log.debug(\"Now playing \\\"%s\\\" [ID %s]\" % (song.title, song.id))\r\n self.song = song\r\n\r\n try:\r\n filepath = song.file.path.encode(self.fsenc)\r\n except:\r\n try:\r\n filepath = song.file.path.encode(self.sysenc)\r\n except:\r\n filepath = song.file.path\r\n self.log.debug(\"Returning path %s\" % filepath)\r\n return filepath", "def process_song_file(cur, filepath):\n # open song file\n df = get_file_df(filepath)\n\n # insert song record\n song_data = songs_data = [df.loc[0].song_id, df.loc[0].title, df.loc[0].artist_id, int(df.loc[0].year), int(df.loc[0].duration)]\n cur.execute(song_table_insert, song_data)\n \n # insert artist record\n artist_data = [df.loc[0].artist_id, df.loc[0].artist_name, df.loc[0].artist_location, df.loc[0].artist_latitude, df.loc[0].artist_longitude] \n\n cur.execute(artist_table_insert, artist_data)", "def store_add_to_backend(image_id, data, size, store, context=None,\n verifier=None):\n (location, size, checksum, metadata) = store.add(image_id,\n data,\n size,\n context=context,\n verifier=verifier)\n\n if metadata is not None:\n _check_metadata(store, metadata)\n\n return (location, size, checksum, metadata)", "def save_track(self, track: Track):\n if track.internal_id is not None:\n self.logging.info(f\"updated track: {track.track_id} for {track.title}\")\n self.sess.query(Track).filter(Track.track_id == track.track_id).update(track)\n elif self.sess.query(self.sess.query(exists().where(and_(Track.title == track.title,\n Track.artist == track.artist,\n Track.url == track.url)).exists()).scalar()) \\\n .scalar():\n\n self.logging.info(f\"updated track w/o id: {track.title} by {track.artist}\")\n self.sess.query(Track).filter(and_(Track.title == track.title,\n Track.artist == track.artist,\n Track.url == track.url)).update(track)\n else:\n self.logging.info(f\"added track: {track.title} by {track.artist}\")\n if track.category_list is not None:\n for category in track.category_list:\n cat = self.fetch_category(category)\n track.categories.append(cat)\n cat.tracks.append(track)\n if track.genre_list is not None:\n for genre in track.genre_list:\n gen = self.fetch_genre(genre)\n track.genres.append(gen)\n gen.tracks.append(track)\n track.internal_id = uuid4().hex\n self.sess.add(track)\n self.sess.commit()", "def insertfile(self, datas):\n query = \"\"\"INSERT INTO caro_song (score, filename, artist, album, title, genre, played, uniq, global_score, family) VALUES (0, %s, %s, %s, %s, %s, 0, %s, 0, 0);\"\"\"\n cur = self.conn.cursor()\n try:\n cur.execute(query, (datas[0],\n datas[1],\n datas[2],\n datas[3],\n datas[4],\n datas[5]\n ))\n except KeyError:\n query = \"\"\"INSERT INTO caro_logs (filename, message, date_import) VALUES (%s, 'ERROR 02', now());\"\"\"\n cur.execute(query, (datas[0],))", "def add_new_artwork():\n artist_name = get_artist_name()\n if not controls_utils.artist_already_in_db(artist_name):\n print('Artist not registered, creating new registration. ')\n email = get_artist_email()\n new_artist = Artist(artist_name, email)\n artwork_db.add_artist(new_artist)\n artwork_name = get_new_artwork_name()\n price = get_price()\n available = True\n new_artwork = Artwork(artist_name, artwork_name, price, available)\n artwork_db.add_artwork(new_artwork)", "def insert(self, media):\n insert_query = \"\"\"INSERT INTO %s VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n \"\"\" % MediaCollection.COLLECTIONS_TABLE\n self.cursor.execute(insert_query, media.totuple())\n self.connection.commit()", "def test_add():\n\n conn = psycopg2.connect(host=\"sculptor.stat.cmu.edu\", database=c.DB_USER,\n user=c.DB_USER, password=c.DB_PASSWORD)\n cur = conn.cursor()\n cur.execute(\"\"\" SELECT COUNT(*) FROM songs WHERE song_title = SHERlocked\n artist_name = unknown\"\"\")\n count = cur.fetchone()[0]\n assert count != 0", "def update_song(_id, _name_of_the_song, _duration_in_number_of_seconds):\r\n song_to_update = Song.query.filter_by(id=_id).first()\r\n song_to_update.name_of_the_song = _name_of_the_song\r\n song_to_update.duration_in_number_of_seconds = _duration_in_number_of_seconds\r\n db.session.commit()", "def add_mp3(self, filename='song.mp3', set_artist=False, artist=None,\n set_album=False, album=None):\n full_filename = os.path.join(self.mp3_dir, filename)\n shutil.copyfile(self.source_file, full_filename)\n self.assertEqual(os.path.exists(full_filename), True)\n\n if set_artist or set_album:\n tags = id3.ID3(full_filename)\n\n if set_artist:\n tags.delall('TPE1')\n if artist is not None:\n tags.add(id3.TPE1(encoding=3, text=artist))\n \n if set_album:\n tags.delall('TALB')\n if album is not None:\n tags.add(id3.TALB(encoding=3, text=album))\n\n tags.save()\n\n self.filenames.append(full_filename)\n return full_filename", "def _add_artwork(self, artwork):\n\n insert_artwork = 'INSERT INTO artworks (artwork, price, artist_id, for_sale) VALUES (?, ?, ?, ?)'\n\n try:\n with sqlite3.connect(db_path) as conn:\n conn.execute(insert_artwork, (artwork.artwork, artwork.price, artwork.artist_id, artwork.for_sale))\n # new_id = res.lastrowid # Get the ID of the new row in the table\n # artwork.artist_id = new_id # Set this artist's ID\n conn.close()\n return True\n except sqlite3.IntegrityError as e:\n print(f'\\nError - Artwork with that name is already in the database.\\n', e)\n return False", "def appendAlbum(song):\n\tsql = []\n\tsql.append(\"INSERT INTO ALBUM ('name') VALUES ('\" \n\t+ '/'.join(song.album) + \"');\")\n\t\n\tsql.append(\"INSERT INTO songs_album ('songs_id', 'album_id')\"\n\t+ \" VALUES ((select id from songs where hash = '\" + str(song.hash) + \"'), \"\n\t+ \"(select id from album where name = '\" + '/'.join(song.album) + \"'));\")\n\tsql.append(\"INSERT INTO artist_album ('artist_id', 'album_id')\"\n\t+ \" VALUES ((select id from songs where hash = '\" + str(song.hash) + \"'), \"\n\t+ \"(select id from album where name = '\" + '/'.join(song.album) + \"'));\")\n\t\n\treturn sql", "def markfile(self, song_id):\n cur = self.conn.cursor()\n query = \"\"\"UPDATE caro_song SET score = -1000 WHERE id=%s\"\"\"\n cur.execute(query, (song_id, ))\n\n self.memcache.delete(\":1:song_%d\" % song_id)\n\n query = \"\"\"DELETE FROM caro_playlistentry WHERE song_id=%s\"\"\"\n cur.execute(query, (song_id, ))", "def insert_playlist(self, playlist_contents):\n\n # Just make sure we don't overwrite an existing playlist! Silly python not having do-while..\n while True:\n playlist_uuid = str(uuid4())\n if playlist_uuid not in self.playlists:\n break\n\n try:\n playlist = Playlist(playlist_contents)\n except PlaylistValidationError as e:\n rsp = rsp_codes[8]\n rsp['trace'] = traceback.format_exc()\n return rsp\n\n self.playlists[playlist_uuid] = playlist\n\n rsp = rsp_codes[0]\n rsp['playlist_uuid'] = playlist_uuid\n return rsp", "def song_added(song, playlist_id):\n if song.added_by == 'cedmunds90':\n print('Ruhpushuh {song_id} ({title}) ruhpush a shuh {playlist_id} rhup {added_by}.'\n .format(song_id=song.id,\n title=song.title,\n playlist_id=playlist_id,\n added_by=song.added_by))\n pass\n else:\n print('Song {song_id} ({title}) added to playlist {playlist_id} by {added_by}.'\n .format(song_id=song.id,\n title=song.title,\n playlist_id=playlist_id,\n added_by=song.added_by))\n\n pass", "def add_artist_to_db(artist_id, session):\n # type: (six.stringtypes, Any) -> None\n logger.info('adding artist {} to db'.format(artist_id))\n with musicbrainz_lock:\n artist_info = musicbrainzngs.get_artist_by_id(artist_id)['artist']\n\n artist = Artist(name=artist_info['name'],\n musicbrainz_id=artist_id,\n status=Status.Wanted)\n session.add(artist)\n\n release_groups = get_release_groups_for_artist(artist.musicbrainz_id)\n\n for group_info in release_groups:\n logger.debug('found {type} {name}'.format(type=group_info['type'], name=ensure_unicode(group_info['title'])))\n album = Album(title=ensure_unicode(group_info['title']),\n musicbrainz_id=group_info['id'],\n type=group_info['type'],\n artist=artist,\n status=Status.Wanted\n )\n\n session.add(album)\n\n releases = get_releases_for_release_group(album.musicbrainz_id)\n for release_info in releases:\n add_album_and_tracks_to_db(album, release_info, session)\n\n # Chose oldest release (it's usually the original release)\n chosen_release = session.query(Release).join(Album).filter(Album.musicbrainz_id == group_info['id']).order_by(\n Release.release_date.asc()).first()\n if chosen_release:\n chosen_release.is_selected = True\n\n with write_lock:\n session.commit()", "def test_single_track_no_album(self):\n self.add_mp3(set_album=True)\n (added, status) = self.app.add_album(self.filenames)\n self.assertEqual(added, False)\n self.assertIn('has no album tag', status)\n self.assertEqual(self.get_album_count(), 0)", "def test_two_tracks_same_album(self):\n self.add_mp3(filename='1.mp3')\n self.add_mp3(filename='2.mp3')\n (added, status) = self.app.add_album(self.filenames)\n self.assertEqual(added, True)\n self.assertEqual(self.get_album_count(), 1)\n album = Album.get_by_artist_album(self.app.curs, 'Artist', 'Album')\n self.assertNotEqual(album, None)\n self.assertEqual(album.artist, 'Artist')\n self.assertEqual(album.album, 'Album')\n self.assertEqual(album.album_type, 'album')\n self.assertEqual(album.totalseconds, 4)\n self.assertEqual(album.totaltracks, 2)", "def setup_local_storage(mod, media_type, media_id, id=None):\n BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n if mod == 'post':\n mod = 'posts'\n path = os.path.join(BASE_DIR, 'save', mod, str(media_id))\n if id:\n path = os.path.join(BASE_DIR, 'save', mod, str(id))\n name = media_type.lower()\n try:\n os.mkdir(path)\n except FileExistsError as e:\n timestamp = time.strftime('%Y%m%d-%H%M%S')\n name += f\"_{timestamp}\"\n except OSError as e:\n raise InvalidUsage(\"OSError in setup_local_storage. \", status_code=501, payload=e)\n filename = f\"{str(path)}/{name}\"\n return path, filename" ]
[ "0.72021145", "0.7178171", "0.68124354", "0.67616093", "0.6730884", "0.661579", "0.6544573", "0.6535995", "0.65308034", "0.6529165", "0.6515031", "0.64952904", "0.64827377", "0.6465418", "0.6454924", "0.64515936", "0.6450223", "0.64397067", "0.64342123", "0.63851374", "0.63463825", "0.6323187", "0.6295637", "0.6265829", "0.6242704", "0.6231726", "0.6189661", "0.6178191", "0.6153588", "0.6153416", "0.615185", "0.6070421", "0.60573184", "0.6033373", "0.6009425", "0.6002129", "0.59948117", "0.5989942", "0.5971573", "0.5939724", "0.589388", "0.58873343", "0.5835173", "0.58096117", "0.58081985", "0.5776529", "0.57599145", "0.57468307", "0.57365984", "0.57242346", "0.56983966", "0.5683002", "0.5678996", "0.56527364", "0.564178", "0.56414473", "0.56312305", "0.5612597", "0.5604488", "0.5585595", "0.5585282", "0.55819464", "0.5569453", "0.5552033", "0.5546841", "0.5539453", "0.55329984", "0.55197066", "0.5488367", "0.54820824", "0.54609114", "0.54554915", "0.54465437", "0.54461485", "0.5432493", "0.54222363", "0.541917", "0.54173815", "0.5414838", "0.5412782", "0.541264", "0.5396648", "0.5391492", "0.5390681", "0.539003", "0.53862226", "0.5383175", "0.5377439", "0.53579485", "0.53537995", "0.5339869", "0.5334496", "0.5331653", "0.5325155", "0.53219885", "0.53145164", "0.53106505", "0.5301857", "0.5293242", "0.52848345" ]
0.8322641
0
Remove song from database and from the storage directory based on ID
def delete_song(self): song_id = tuple(input("Give the melody id to be deleted:\t")) sql = "SELECT file_title, form FROM songs WHERE id = %s" # Check existence of song with given ID self.cursor.execute(sql, song_id) result = self.cursor.fetchall() if len(result) > 0: path = self.p_storage + "/" + result[0][0] + "." + result[0][ 1] # Find path of song by appending the name and format to the storage directory path os.remove(path) # Remove song from directory sql = "DELETE FROM songs WHERE id = %s" # Delete song from database self.cursor.execute(sql, song_id) self.cnx.commit() print(self.cursor.rowcount, "record(s) deleted") else: print("Give a valid id...")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_song(_id):\r\n Song.query.filter_by(id=_id).delete()\r\n # filter song by id and delete\r\n db.session.commit() # commiting the new change to our database\r", "def delete_music():\n track_id = request.vars.track_id\n if track_id is None:\n raise HTTP(500)\n db(db.track_data.track_id == track_id).delete()\n return \"ok\"", "def delete_song(song):\n logging.debug('{CRUD_operations} BEGIN function delete_song()')\n logging.debug('{CRUD_operations} Data received: song: %s', song)\n song.is_deleted = True\n logging.debug('{CRUD_operations} END function delete_song()')", "def markfile(self, song_id):\n cur = self.conn.cursor()\n query = \"\"\"UPDATE caro_song SET score = -1000 WHERE id=%s\"\"\"\n cur.execute(query, (song_id, ))\n\n self.memcache.delete(\":1:song_%d\" % song_id)\n\n query = \"\"\"DELETE FROM caro_playlistentry WHERE song_id=%s\"\"\"\n cur.execute(query, (song_id, ))", "def delete_song(id):\n # check if the song exists, if not return a 404 with a helpful message\n song = session.query(models.Song).get(id)\n if not song:\n message = \"Could not find song with id {}\".format(id)\n data = json.dumps({\"message\": message})\n return Response(data, 404, mimetype=\"application/json\")\n \n session.delete(song)\n session.commit\n \n message = \"deleted song {}\".format(id)\n data = json.dumps({\"message\": message})\n return Response(data, 404, mimetype=\"application/json\")", "def remove(request, music_id: int) -> HttpResponseRedirect:\n music_item = get_object_or_404(Music, id=music_id)\n request.user.profile.playlist.remove(music_item)\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))", "def disassociate_song(self, song):\n self.songs.remove(song)", "def Delete(self):\n\n self.db.ExecuteSql('delete from tracks where id=%d;'\n % self.persistant['id'])\n self.db.ExecuteSql('commit;')", "def delete(self, cls, id):\n\n del FileStorage.__objects[key(cls, id)]", "def remove_song(self, song):\n # code omitted\n self.playlist.remove(song)", "def DeletePlaylist(self):\n os.remove(self.path)", "def delete_file(file_id):\n file_obj = Data.objects.get(id=file_id)\n print(\"Removing file: \", file_obj.name)\n print(file_obj.file.path)\n file_dir = file_obj.file.path\n os.remove(file_dir)\n print(\"Done.\")", "def remove(id):\n upload = Upload.query.get_or_404(id)\n\n if upload.publisher == session['username']:\n db.session.delete(upload)\n db.session.commit()\n else:\n return 'you do not have right perms'\n\n return redirect(url_for('dashboard'))", "def delete(self, show_id):\r\n song = Shows.query.filter_by(ShowID=show_id).first_or_404()\r\n db.session.delete(song)\r\n db.session.commit()\r\n return make_response(\"\", 204)", "def remove_song(self):\n self.stop()\n self.listbox.delete(\"anchor\")\n pygame.mixer.music.stop()", "def delete_leftovers(self):\n for each_file, artist in self.past_songs_db_data:\n if os.path.isfile(each_file): \n os.remove(each_file)\n print \"Deleted \" + each_file\n\n for each_file in os.listdir(\".\"):\n if each_file.endswith(\".jpg\"):\n os.remove(each_file)", "async def clear_data(self, msg):\n name = self.player[msg.guild.id]['name']\n os.remove(name)\n self.player['audio_files'].remove(name)", "def current_remove(self):\n storage.close()", "def clean_old_data():\n logger.info('Cleaning standalone files on disk...')\n for absolute_path in glob.glob(MEDIA_URL + '*'):\n file_name = os.path.basename(absolute_path)\n try:\n relative_path = os.path.join(AUDIOS_URL, file_name)\n audio = Audio.objects.get(filename=relative_path)\n if audio.get_type() == 'episode':\n try:\n # If there are inactive audios on its being\n for e in audio.podcast.episode_set.exclude(pk=audio.podcast.active_episode.pk):\n if not e.is_active():\n logger.info('Inactive audio found in podcast set. Erasing files.')\n e.delete_files()\n except Exception, e:\n logger.exception(e.message)\n except ObjectDoesNotExist, e:\n logger.info('A file with no audio registered in database')\n if os.path.isfile(relative_path):\n logger.info('Erasing: %s' % relative_path)\n os.remove(relative_path)\n logger.info('... Done.')", "def delete_video(self, video_ID): # WORKS\n try:\n self.cur.execute(\"DELETE FROM videos WHERE video_ID = \\\"{}\\\"\".format(video_ID))\n self.db.commit()\n os.remove('static/videos/' + str(video_ID) + '.mp4')\n os.remove('static/images/' + str(video_ID) + '.jpg')\n except:\n self.db.rollback()", "def delete():", "def delete(self, request, pk=None):\n song = get_object_or_404(Song, pk=pk)\n\n self.check_object_permissions(request, song.creator)\n\n song.delete()\n return Response({}, status.HTTP_204_NO_CONTENT)", "def remove(self, path):\n path = path.decode('utf8')\n cursor = self._dbcon.cursor()\n filename = os.path.basename(path)\n dirname = os.path.dirname(path)\n t = (dirname, filename)\n sql = u\"delete from books where path = ? and filename = ?\"\n cursor.execute(sql, t)\n self._dbcon.commit()\n cursor.close()", "def delete_audiobook(_id):\r\n Audiobook.query.filter_by(id=_id).delete()\r\n # filter audio book by id and delete\r\n db.session.commit() # commiting the new change to our database\r", "def unlink(self):\n album_id = self.albums_map[self.artist][self.c_album][1]\n # clear entry in self.albums_map[artist]\n self.albums_map[self.artist].pop(self.c_album)\n # remove Albums recording only if no more references to the album exist\n still_present = False\n for item in self.albums_map[self.artist].values():\n if item[1] == album_id:\n still_present = True\n if not still_present:\n dmla.unlink_album(self.a_album)\n self.modified = True\n self.refresh_screen(self.artists_list.currentIndex(),\n self.albums_list.currentIndex(), modifyoff=False)", "def remove(table, id_):\n\n # your code\n\n common.toremoveid(\"store/games.csv\",data_manager.get_table_from_file(\"store/games.csv\"),id_)", "def remove():\n\n db_remove()", "def update_db(self):\n songs = self.db.get_all_songs()\n for song in songs:\n if choose_song(song) == ERROR:\n self.db.delete_song(song)\n files = []\n for song in glob.glob(\"songs\\*.wav\"):\n to_append = song.split('\\\\')[ONE][:-4]\n files.append(to_append)\n for song in files:\n if song not in songs:\n self.db.add_new_song(song)", "def delete(self):\r\n delete_tracks(self.project, [self])", "def delete(self, file_id: str):\n file_path = self._path_to_file(file_id)\n os.remove(file_path)\n del self.index[file_id]", "def delete_item(dataobj_id):\n file = get_by_id(dataobj_id)\n remove_from_index(dataobj_id)\n if file:\n Path(file).unlink()", "def delete(self, _id):", "def clean_cache(user_id, song_id, ple_id):\n\n key = 'ple_{}_{}'.format(user_id, ple_id)\n song_key = 'song_{}'.format(song_id)\n\n cache.delete(key)\n cache.delete(song_key)\n return 0", "def delete_playlists_in(path):\n\n for f in [f for f in os.listdir(path) if f.endswith('.m3u')]:\n os.remove(os.path.join(path, f))", "def db_remove():\n\n db.session.close()\n db.drop_all()\n\n path = current_app.config['SNER_VAR']\n for file_object in os.listdir(path):\n file_object_path = os.path.join(path, file_object)\n if os.path.isdir(file_object_path):\n shutil.rmtree(file_object_path)\n else:\n os.unlink(file_object_path)", "def delete_pl_btn_push(self):\n try:\n pl_name = self.pl_line_edit.text().replace(\" \", \"_\")\n path = os.path.abspath(\"Playlists/\"+ pl_name+\".m3u\")\n to_keep = {}\n for row in range(self.model.rowCount()):\n if not self.model.item(row).checkState():\n title = str(self.model.data(self.model.index(row, 1)))\n artist = str(self.model.data(self.model.index(row, 2)))\n to_keep[title] = artist\n os.system(\"rm %s\" % (path))\n\n pl_file = open(path, \"w\")\n for mp3 in glob.glob(\"Fixed/*/*/*\"):\n data = mutagen.File(mp3, easy=True)\n if (data[\"title\"][0] in to_keep.keys() and\n to_keep[data[\"title\"][0]] == data[\"artist\"][0]):\n pl_file.write(mp3+\"\\n\")\n QMessageBox.about(self, \"Playlist Updated\",\n 'Playlist \"%s\" has been updated, please view again to see changes.'% (self.pl_line_edit.text()))\n except:\n QMessageBox.about(self, \"Playlist Not Updated\",\n 'Playlist \"%s\" could not be updated, please view again to see changes.'% (self.pl_line_edit.text()))", "def test_api_can_delete_music(self):\n music = Music.objects.get()\n response = self.client.delete(\n reverse('details', kwargs={'pk': music.id}),\n format = \"json\",\n follow = True\n )\n self.assertEquals(response.status_code, status.HTTP_204_NO_CONTENT)", "def _http_delete(self, id: int):\n self._http_request(\"pl_delete&id=%i\" % id)\n self.get_playlist()", "def do_destroy(self, arg):\n args = shlex.split(arg)\n stored_objects = models.storage.all()\n\n if self.basic_errs(args):\n '''check if instance exists'''\n instance = self.check_instance(args[0], args[1], stored_objects)\n if instance:\n \"\"\"delete from FileStorage.__objects\"\"\"\n del stored_objects[instance]\n \"\"\"overwrite the new data to file.json\"\"\"\n models.storage.save()", "def remove_songs(self):\n self.stop()\n self.listbox.delete(0, \"end\")\n pygame.mixer.music.stop()", "def delete(self, name):\n if (self.model_dir / (str(name) + '.pkl')).exists():\n (self.model_dir / (str(name) + '.pkl')).unlink()", "def deleteFileRecordByID(file_id):\n session = Queries.createSession()\n try:\n file_db = session.query(FileTable).filter_by(id=file_id).first()\n servers = file_db.server_id[:]\n for server in servers:\n file_db.server_id.remove(server)\n session.commit()\n session.delete(file_db)\n session.commit()\n except sqlalchemy.exc.ArgumentError:\n print 'SQLAlchemy ERROR: Invalid or conflicting function argument is supplied'\n except sqlalchemy.exc.CompileError:\n print 'SQLAlchemy ERROR: Error occurs during SQL compilation'\n finally:\n session.close()", "def delete_db(self):\n import os.path\n os.remove(self.filepath)", "def track_del(self,posicion):\n self.tracks.pop(posicion)", "def exposed_delete_data(self, chunk_id):\n local_filename = self.chunk_filename(chunk_id)\n if not os.path.isfile(local_filename):\n return None\n os.remove(local_filename)", "def move(self):\n for artist in self.audio_dict:\n for album in self.audio_dict[artist]:\n for songlist in self.audio_dict[artist][album]:\n if len(self.audio_dict[artist][album][songlist]) > 1:\n \n # track the song that wont be deleted\n song_to_keep = {}\n # track bitrate through songlist\n highest_bitrate = 0\n # find the highest bitrate\n for song in self.audio_dict[artist][album][songlist]:\n if song['bitrate'] > highest_bitrate:\n highest_bitrate = song['bitrate']\n song_to_keep = song\n # flag files for deletion \n for song in self.audio_dict[artist][album][songlist]:\n if song != song_to_keep:\n self._do_move(artist, album, song)\n \n return self", "def remove(path):", "def __del__(self):\n if self.playlists_path is None:\n return\n else:\n # Clear out the directory ready for the next lot, gets around deleting easily.\n for filename in os.listdir(self.playlists_path):\n filepath = os.path.join(self.playlists_path, filename)\n if os.path.isfile(filepath):\n os.unlink(filepath)\n\n for uuid, playlist in self.playlists.iteritems():\n playlist_path = os.path.join(self.playlists_path, uuid)\n with open(playlist_path, 'w') as pl_file:\n json.dumps(str(playlist), pl_file)", "def delete_file(filename: str):\n\t\tif filename == \"ALL\":\n\t\t\tfor file in os.listdir(\"data/music/\"):\n\t\t\t\tdeleted = False\n\t\t\t\twhile not deleted:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tos.remove(f\"data/music/{file}\")\n\t\t\t\t\t\tdeleted = True\n\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\tprint(\"Not removed, waiting 1 second...\")\n\t\t\t\t\t\tasyncio.sleep(1)\n\t\telse:\n\t\t\tprint(\"File--: \", filename)", "def remove_file(path, save):\n if not save:\n os.remove(path)\n print \"[crawler] removing audio file...\"", "def on_stop(self):\n self.song_list.save_songs()", "def remove_media(media, window=None, gui_instance=None):\r\n\r\n cursor = connection.cursor()\r\n\r\n if media.isnumeric(): # CLI-only: The user has attempted to delete the media file based on its ID in the database\r\n cursor.execute(\"SELECT full_path FROM media WHERE id = \" + media)\r\n\r\n full_path = cursor.fetchone()\r\n\r\n if full_path is None: # The system couldn't find the specified ID\r\n print(\"Error: The specified ID does not exist in the database.\")\r\n return\r\n\r\n # Attempting to remove the media file record from the database\r\n try:\r\n cursor.execute(\"DELETE FROM media WHERE id = \" + media) # Deleting the record from the database\r\n\r\n connection.commit() # Writing the changes to the database\r\n\r\n except Error: # Database is locked\r\n print(\"\\nError when trying to commit changes to database. Make sure another application is not using the \"\r\n \"database.\")\r\n\r\n return False\r\n\r\n cursor.close()\r\n\r\n # Attempting to re-order the keys after the deleted one\r\n if not resort_keys(media): # Fatal error: database is locked\r\n print(\"\\nERROR: DATABASE COULD NOT BE UPDATED. APPLICATION CANNOT WORK AS INTENDED. \"\r\n \"PLEASE MANUALLY REMOVE ALL MEDIA FILES FROM THE MEDIA FOLDER AND TRY ADDING THEM BACK.\")\r\n sys.exit() # Quitting; the application will malfunction until the user manually resets the media folder\r\n\r\n try:\r\n os.remove(full_path[0].replace(\"\\\\\", \"/\")) # Removes the media file from the media folder\r\n\r\n except FileNotFoundError:\r\n print(\"\\nError: Could not remove the file from the media folder: The file does not exist.\")\r\n return False\r\n\r\n except PermissionError:\r\n print(\"\\nError: Unable to remove file from the media folder. Make sure you haven't selected a \"\r\n \"write-protected folder. If the issue persists, try changing the media folder and manually removing\"\r\n \" the media file from the current media folder.\")\r\n return False\r\n\r\n print(\"\\nThe media file has been removed.\")\r\n\r\n else: # The user is either using the GUI or has provided the filename as parameter\r\n # Getting the full path of the file (using an app-level convention for slashes)\r\n full_path = os.path.join(media_folder, os.path.basename(media)).replace(\"\\\\\", \"/\")\r\n\r\n if path.exists(full_path): # (CLI-only) Checking if the provided filename exists\r\n\r\n # Getting the id of the media which will be removed in order to re-order the IDs of the database\r\n cursor.execute(\"SELECT id FROM media WHERE full_path = \" + \"\\\"\" + full_path + \"\\\"\")\r\n id_value = cursor.fetchone()\r\n\r\n # Attempting to remove the media file record from the database\r\n try:\r\n cursor.execute(\"DELETE FROM media WHERE full_path = \" + \"\\\"\" + full_path + \"\\\"\")\r\n\r\n connection.commit() # Writing the changes to the database\r\n\r\n except Error: # Database is locked\r\n # Application is running in GUI-mode\r\n if gui_instance is not None:\r\n messagebox.showerror(\"Database is locked\", \"Error when trying to commit changes to database. Make \"\r\n \"sure another application is not using the database.\")\r\n\r\n # Application is running in CLI or debugging mode\r\n if config_var['RUN-MODE']['run_mode'] == \"1\" or config_var['RUN-MODE']['run_mode'] == \"2\":\r\n print(\"\\nError when trying to commit changes to database. Make sure another application is not \"\r\n \"using the database.\")\r\n\r\n return False\r\n\r\n cursor.close()\r\n\r\n # Attempting to re-order the keys after the deleted one\r\n if not resort_keys(id_value[0]): # Fatal error: database is locked\r\n # Application is running in GUI-mode\r\n if gui_instance is not None:\r\n messagebox.showerror(\"Database error\", \"DATABASE COULD NOT BE UPDATED. APPLICATION CANNOT WORK AS \"\r\n \"INTENDED. PLEASE MANUALLY REMOVE ALL MEDIA FILES FROM THE MEDIA FOLDER AND \"\r\n \"TRY ADDING THEM BACK.\")\r\n # Quitting; the application will malfunction until the user manually resets the media folder\r\n sys.exit()\r\n\r\n # Application is running in CLI or debugging mode\r\n if config_var['RUN-MODE']['run_mode'] == \"1\" or config_var['RUN-MODE']['run_mode'] == \"2\":\r\n print(\"\\nERROR: DATABASE COULD NOT BE UPDATED. APPLICATION CANNOT WORK AS INTENDED. \"\r\n \"PLEASE MANUALLY REMOVE ALL MEDIA FILES FROM THE MEDIA FOLDER AND TRY ADDING THEM BACK.\")\r\n # Quitting; the application will malfunction until the user manually resets the media folder\r\n sys.exit()\r\n\r\n try:\r\n os.remove(full_path) # Removes the media file from the media folder\r\n\r\n except FileNotFoundError:\r\n # Application is running in GUI-mode\r\n if gui_instance is not None:\r\n messagebox.showerror(\"File not found\", \"The file does not exist.\")\r\n\r\n # Application is running in CLI or debugging mode\r\n if config_var['RUN-MODE']['run_mode'] == \"1\" or config_var['RUN-MODE']['run_mode'] == \"2\":\r\n print(\"\\nError: Could not remove the file from the media folder: The file does not exist.\")\r\n\r\n return False\r\n\r\n except PermissionError:\r\n # Application is running in GUI-mode\r\n if gui_instance is not None:\r\n messagebox.showerror(\"Unable to remove file\", \"Unable to remove file from the media folder. Make \"\r\n \"sure you haven't selected a write-protected folder. If the issue persists, \"\r\n \"try changing the media folder and manually removing the media file from the \"\r\n \"current media folder.\")\r\n\r\n # Application is running in CLI or debugging mode\r\n if config_var['RUN-MODE']['run_mode'] == \"1\" or config_var['RUN-MODE']['run_mode'] == \"2\":\r\n print(\"\\nError: Unable to remove file from the media folder. Make sure you haven't selected a \"\r\n \"write-protected folder. If the issue persists, try changing the media folder and manually \"\r\n \"removing the media file from the current media folder.\")\r\n\r\n return False\r\n\r\n if gui_instance is not None: # The method has been fired by a GUI widget\r\n window.destroy() # Closes the removal window\r\n\r\n # Reloading the media list of the root window\r\n gui_instance.library_items = []\r\n gui_instance.path_frame_parent.destroy()\r\n gui_instance.display_media()\r\n\r\n else: # The method has been fired by using CLI\r\n print(\"\\nThe media file has been removed.\")\r\n\r\n else: # (CLI-only) The user has provided an invalid filename\r\n print(\"\\nError: The specified media file does not exist.\")\r\n return False\r\n\r\n return True", "def on_stop(self):\n self.songs.save_songs(FILE_NAME)", "def delete_song_petition(request, id):\n # instance gets the id from the Song Petition selected\n instance = get_object_or_404(SongPetition, id=id)\n # delete method deletes the instance from the database\n instance.delete()\n # Feedbacj message telling that the petition was deleted \n messages.success(request, \"Petition succesfully deleted\")\n return redirect(\"petition:list\")", "def delete_image(self):\n Image.objects.get(id = self.id).delete()", "def delete(self, name):\n result = self.cm.find_name(name)\n path = result[0]['path']\n delete_path = Path(f'{path}/{name}')\n try:\n os.system(f\"rmdir {delete_path}\")\n result[0]['State'] = 'deleted'\n result = self.update_dict(result)\n except:\n Console.error(\"volume is either not empty or not exist\")\n return result", "def min_cleanup(self):\n self.past_songs_db.close()", "def _clear_audio_files(self):\n try:\n shutil.rmtree(self.audio_file_folder)\n except:\n print('Failure to clear audio files in {self.audio_file_folder}')", "def test_d_remove_database(self):\n\n if os.path.isfile(location):\n os.remove(location)\n\n assert(True)", "def remove(table, id_):\n\n common.toremoveid(\"inventory/inventory.csv\",data_manager.get_table_from_file(\"inventory/inventory.csv\"),id_)", "def delete(self, filename):\n pass", "def handle_delete(uuid):\n location = os.path.join(app.config['UPLOAD_DIRECTORY'], uuid)\n print(uuid)\n print(location)\n shutil.rmtree(location)", "def handle_delete(uuid):\n location = os.path.join(app.config['UPLOAD_DIRECTORY'], uuid)\n print(uuid)\n print(location)\n shutil.rmtree(location)", "def delete_path():\n #TODO delete path from database\n pass", "def unlink(self, link_id):", "async def delete(bot, message):\n reply = message.reply_to_message\n if reply and reply.media:\n msg = await message.reply(\"Processing...⏳\", quote=True)\n else:\n await message.reply('Reply to file with /delete which you want to delete', quote=True)\n return\n\n for file_type in (\"document\", \"video\", \"audio\"):\n media = getattr(reply, file_type, None)\n if media is not None:\n break\n else:\n await msg.edit('This is not supported file format')\n return\n\n result = await Media.collection.delete_one({\n 'file_name': media.file_name,\n 'file_size': media.file_size,\n 'mime_type': media.mime_type\n })\n if result.deleted_count:\n await msg.edit('File is successfully deleted from database')\n else:\n await msg.edit('File not found in database')", "def sorl_delete(**kwargs):\n from sorl.thumbnail import delete\n delete(kwargs['file'])", "def delete_rent(self, id):\n allR=self.__loadFromFile()\n\n poz=-1\n for index in range(len(allR)):\n if allR[index].get_id()==id:\n poz=index\n break\n if poz<0:\n raise RepositoryExceptionRent(\"\\n Id doesn't exist. \\n \".upper())\n\n del allR[poz]\n self.__storeToFile(allR)", "def delete(self, *args, **kwargs):\n self.file.storage.delete(self.file.name)\n super().delete(*args, **kwargs)", "def remove_previously_scanned(path):\n query = db_session.query(MediaFiles) \\\n .filter(MediaFiles.path.like(f'{path}%'))\n query.delete(synchronize_session='fetch')\n db_session.commit()\n return query.count()", "def do_destroy(self, arg):\n obj = self.verify(arg, 2)\n if obj:\n del storage.all()[obj]\n storage.save()", "def delete(self):\n os.remove(self.file_path)\n super(VideoFile, self).delete()", "def remove_mediafile(mediafiles_id):\n mediafile = MediaFiles.query.get(mediafiles_id)\n db_session.delete(mediafile)\n db_session.commit()\n return 'Removed MediaFile #%s \"%s\" from database.' % (mediafiles_id, mediafile.path), 'success'", "def delete(self, filename, **kw):\n\n file_path = os.path.join(self.storage_path, filename)\n\n try:\n os.remove(file_path)\n except OSError:\n pass", "def delete(id):\n db = core.connect()\n permIds = [perm[\"_id\"] for perm in permission.permissionsForStream(id)]\n [permission.delete(permId) for permId in permIds]\n del db[id]", "def remove():", "def volume_delete_by_storage(context, storage_id):\n _volume_get_query(context).filter_by(storage_id=storage_id).delete()", "def delete_podcast(_id):\r\n Podcast.query.filter_by(id=_id).delete()\r\n # filter podcast by id and delete\r\n db.session.commit() # commiting the new change to our database\r", "def delete(self, id):\n raise NotImplementedError", "def delete(self, player_id):\n current_player = DBPlayer.query.get(player_id)\n if not current_player:\n return get_response(404, 'Not exists.')\n try:\n db.session.delete(current_player)\n db.session.commit()\n except Exception as e:\n db.session.rollback()\n return get_response(400, \"{e}\".format(e=str(e)))\n return get_response(200, 'done!')", "def delete(self):\n if not hasattr(self, 'id'):\n raise BadReference('No matching issue on disk')\n shutil.rmtree(self.paths['root'])", "def remove_subs_from_store(subs_id, item, lang='en'):\r\n filename = subs_filename(subs_id, lang)\r\n Transcript.delete_asset(item.location, filename)", "def _delete_from_db(self):\r\n if not self._created:\r\n return\r\n\r\n tdb.del_thing(self._type_id, self._id)\r\n cache.delete(thing_prefix(self.__class__.__name__, self._id))", "def delete_item(id: str):\n db.delete(id, kind=endpoint_model)\n return {\"result\": \"ok\"}", "def delete(self):\n self.id = uuid4()\n DataStore.remove_instance(self)", "def delImg(img_name):\n img = Image.objects.raw({\"_id\": img_name}).first()\n img.delete()\n return", "def remove(self):\n self.remove_file()", "def delete(self):\r\n return self.connection.delete_volume(self.id)", "def delete_playlist(self, playlist_name):\n print(\"deletes_playlist needs implementation\")", "def __delitem__(self, userid):\r\n self.removePlayer(userid)", "def do_destroy(self, arg):\n args = shlex.split(arg)\n if len(args) == 0:\n print(\"** class name missing **\")\n elif args[0] in class_type:\n if len(args) > 1:\n key = args[0] + \".\" + args[1]\n if key in models.storage.all():\n models.storage.all().pop(key)\n models.storage.save()\n else:\n print(\"** no instance found **\")\n else:\n print(\"** instance id missing **\")\n else:\n print(\"** class doesn't exist **\")", "def delete_upload(sender, **kwargs):\n instance = kwargs['instance']\n path_to_delete = '%s/%s.%s' % (instance.path,instance.uuid,instance.ext)\n if not os.path.isdir(path_to_delete):\n os.unlink(path_to_delete)", "def delete(self, filename):\n raise NotImplementedError", "def remove(self, _id):\n self.collection.remove({\"_id\": ObjectId(_id)})\n\n file_path = os.path.abspath(os.path.join(os.path.dirname(__file__),\n \"assets\",\n \"action_selection\",\n str(_id)))\n if os.path.exists(file_path):\n shutil.rmtree(file_path)\n\n return", "def rm(path):\n abs_path = navigate.get_abs_path(path)\n parent, name = navigate.split_path(abs_path)\n access_token = db.get_access_to_file(parent, name)\n if access_token is not None:\n dbox_path = '/' + name\n client = dropbox.client.DropboxClient(access_token)\n client.file_delete(dbox_path)\n db.remove_file(access_token, parent, name)", "async def _remove(self, ctx: commands.Context, index: int):\n\n if len(ctx.voice_state.songs) == 0:\n return await ctx.send('Cannot remove song because the queue is empty.')\n\n ctx.voice_state.songs.remove(index - 1)\n await ctx.message.add_reaction('✅')", "def del_book(username, book_id):\n data = db_books.get_by_id(username, book_id)\n if data['front'] != None:\n try:\n remove(data['front'])\n except FileNotFoundError:\n print(\"No cover to delete\")\n db_books.delete_by_id(username, book_id)\n return 0", "async def on_guild_remove(self, guild):\n self.jfile.data.pop(str(guild.id), None)\n self.jfile.save\n log.guildinfo(\"Removed\", guild.id)", "def playlist_remove(name):\n if name.isdigit() or g.userpl.get(name):\n\n if name.isdigit():\n name = int(name) - 1\n name = sorted(g.userpl)[name]\n\n del g.userpl[name]\n g.message = \"Deleted playlist %s%s%s\" % (c.y, name, c.w)\n g.content = playlists_display()\n save_to_file()\n\n else:\n g.message = F('pl not found advise ls') % name\n g.content = playlists_display()", "def delete_album_by_slug(slug): \n album = get_album_by_slug(slug)\n [x.delete() for x in ContributorAlbum.scan({\"slug\": condition.EQ(album.slug)})]\n album.delete()" ]
[ "0.724472", "0.68608385", "0.68424237", "0.6805456", "0.66874766", "0.6629392", "0.65805244", "0.65743804", "0.6480073", "0.6455035", "0.64183515", "0.6390148", "0.63755953", "0.6350693", "0.63068855", "0.6271331", "0.62588185", "0.6248313", "0.62059045", "0.6190672", "0.61641735", "0.6159572", "0.6143351", "0.6108577", "0.60998833", "0.609651", "0.6076376", "0.6048415", "0.60313904", "0.60286045", "0.600257", "0.59974414", "0.59874266", "0.59834653", "0.597499", "0.5947701", "0.5932179", "0.592616", "0.59164464", "0.59046316", "0.5878242", "0.5871594", "0.586103", "0.58495355", "0.58324826", "0.5823397", "0.58145016", "0.58027023", "0.5801203", "0.5785957", "0.5785578", "0.5782466", "0.57816386", "0.5780961", "0.57784545", "0.5773277", "0.5760327", "0.5756001", "0.57545084", "0.57539874", "0.57452995", "0.5744832", "0.5744832", "0.57369864", "0.57327396", "0.5719225", "0.57175255", "0.5716824", "0.5714788", "0.5703683", "0.5703682", "0.5698582", "0.5697847", "0.56977695", "0.56966263", "0.5694854", "0.568098", "0.56567687", "0.564898", "0.564681", "0.56366014", "0.56276923", "0.5626231", "0.56242204", "0.56132925", "0.560829", "0.5603181", "0.5596601", "0.5593107", "0.55929726", "0.5591923", "0.5589533", "0.55891126", "0.5588589", "0.5587925", "0.5585702", "0.5576172", "0.55734247", "0.55728096", "0.55699676" ]
0.76542425
0
Modifies song info in the database
def modify_data(self): song_id = tuple(input("Give the id of the song to be modified:\t")) # Request song ID sql = "SELECT song_title, artist, data, tag FROM songs WHERE id = %s" # Find song with given ID self.cursor.execute(sql, song_id) res = self.cursor.fetchall() if len(res) > 0: while True: sql = "SELECT song_title, artist, data, tag FROM songs WHERE id = %s" # Save current info self.cursor.execute(sql, song_id) result = self.cursor.fetchall() modify = input( "What do you want to modify? [title/artist/(release )date/tags/none]\t") # Request data to be # modified if modify == 'title': # Modify title print('Current title is ' + result[0][0]) new = (input('Give new title:\t'), song_id[0]) sql = "UPDATE songs SET song_title = %s WHERE id = %s" self.cursor.execute(sql, new) self.cnx.commit() print("New title assigned") if modify == 'artist': # Modify artist print('Current artist is ' + result[0][1]) new = (input('Give new artist:\t'), song_id[0]) sql = "UPDATE songs SET artist = %s WHERE id = %s" self.cursor.execute(sql, new) self.cnx.commit() print("New artist assigned") if modify == 'date': # Modify release date print('Current date is ' + result[0][2]) new = (input('Give new date:\t'), song_id[0]) sql = "UPDATE songs SET data = %s WHERE id = %s" self.cursor.execute(sql, new) self.cnx.commit() print("New date assigned") if modify == 'tags': # Modify tags print('Current tags are ' + result[0][3]) new = (input('Give new tags:\t'), song_id[0]) sql = "UPDATE songs SET tag = %s WHERE id = %s" self.cursor.execute(sql, new) self.cnx.commit() print("New tags assigned") if modify == 'none': # Do not modify anything, print the current song info sql = "SELECT song_title, artist, data, tag FROM songs WHERE id = %s" self.cursor.execute(sql, song_id) result = self.cursor.fetchall() print( "Current data for the song with id" + song_id[0] + "are:\ntitle:" + result[0][0] + "\nartist:" + result[0][1] + "\nrelease date:" + result[0][2] + "\ntags:" + result[0][3]) break else: print("Give a valid id...")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_db(self):\n songs = self.db.get_all_songs()\n for song in songs:\n if choose_song(song) == ERROR:\n self.db.delete_song(song)\n files = []\n for song in glob.glob(\"songs\\*.wav\"):\n to_append = song.split('\\\\')[ONE][:-4]\n files.append(to_append)\n for song in files:\n if song not in songs:\n self.db.add_new_song(song)", "def song(self, value):\r\n self._song_id = value\r\n data = Song(value)\r\n self.songtitel = data.songtitel if data.found else \"\"", "def updatesong(song, fpath):\n song.filename = fpath\n song.save()\n return \"[U] %s\\n\" % song.title", "def update_song(_id, _name_of_the_song, _duration_in_number_of_seconds):\r\n song_to_update = Song.query.filter_by(id=_id).first()\r\n song_to_update.name_of_the_song = _name_of_the_song\r\n song_to_update.duration_in_number_of_seconds = _duration_in_number_of_seconds\r\n db.session.commit()", "def update_title_song(title_song, mess_chat_id):\n connection = connection_to_db()\n cursor = connection.cursor()\n\n cursor.execute(\n \"UPDATE song_data SET title_song = %s \"\n \"WHERE user_id = %s;\", (title_song, mess_chat_id)\n )\n\n connection.commit()", "def add_lyrics_and_song_data_to_database(artist, song):\n if exists('song_database.txt'):\n f = open('song_database.txt', 'r+')\n song_list = pickle.load(f)\n current_entry = Song_data(artist, song)\n if current_entry.id in [previous_entry.id for previous_entry in song_list]:\n print \"Song '\" + song + \"' already in database.\"\n return\n song_list.append(current_entry)\n f.seek(0,0)\n pickle.dump(song_list, f)\n else:\n f = open('song_database.txt', 'w')\n song_list = [Song_data(artist, song)]\n f.seek(0,0)\n pickle.dump(song_list, f)", "def update():\n\tglobal songList\n\tglobal songs\n\tsongList=os.listdir(\"./music/\")\n\tsongs=['```']\n\tfor song in songList:\n\t\tif len(songs[-1])>1800:\n\t\t\tsongs[-1]+='```'\n\t\t\tsongs.append('```')\n\t\tif '.mp3' in song:\n\t\t\tsongs[-1]+=song.replace('.mp3','')\n\t\t\tsongs[-1]+='\\n'\n\tsongs[-1]+='```'", "def markfile(self, song_id):\n cur = self.conn.cursor()\n query = \"\"\"UPDATE caro_song SET score = -1000 WHERE id=%s\"\"\"\n cur.execute(query, (song_id, ))\n\n self.memcache.delete(\":1:song_%d\" % song_id)\n\n query = \"\"\"DELETE FROM caro_playlistentry WHERE song_id=%s\"\"\"\n cur.execute(query, (song_id, ))", "def enterSong(song):\n\tc, conn = connect()\n\tsql = []\n\n\t# checks if the song is already in the database by hash\n\tif checkHash(song):\n\t\tsql2 = appendSong(song)\n\t\tsql += sql2\n\t\t\n\t\t# checks if the song has an artist\n\t\tif song.artist:\n\t\t\tsql2 = appendArtist(song)\n\t\t\tsql += sql2\n\t\n\t\t# checks if the song has an album\n\t\tif song.album:\n\t\t\tsql2 = appendAlbum(song)\n\t\t\tsql += sql2\n\t\n\t# execute all the queries\n\tfor query in sql:\n\t\tc.execute(query)\n\t\t\n\tconn.commit()\n\treturn sql", "def update(self, song: int) -> None:\n if 0 <= song < len(self.sounds):\n self.sounds[song].play()", "def songUpdate(song,cindex):\r\n if cindex == 0:\r\n song[MpMusic.SONGINDEX] = songGetAlbumIndex(song);\r\n return 0;", "def __insert_song_data(cur, df):\n song_data = (\n df.song_id.values[0],\n df.title.values[0],\n df.artist_id.values[0],\n (df.year.values[0]).item(),\n (df.duration.values[0]).item()\n )\n cur.execute(song_table_insert, song_data)", "def add_song_to_database(artist, name, db):\n if exists(db):\n f = open(db, 'r+')\n song_list = pickle.load(f)\n current_entry = Song_data(artist, name);\n if current_entry.id in [previous_entry.id for previous_entry in song_list]:\n print str(current_entry) + \" already in database.\"\n return\n song_list.append(current_entry)\n f.seek(0,0)\n pickle.dump(song_list, f)\n else:\n f = open(db, 'w')\n song_list = [Song_data(artist, name)]\n f.seek(0,0)\n pickle.dump(song_list, f)", "def associate_song(self, song):\n self.songs.append(song)", "def add_song(self):\r\n path = input(\"Give file path:\\t\") # Request file path\r\n path = path.replace('\\\\', '/')\r\n if self.path_song_re.match(path) and not self.path_storage_re.match(\r\n path): # Check that the path leads to a song that is not already found in Storage\r\n copy(path, self.p_storage) # Copy the song to the storage directory\r\n file_title, form = path.split(\"/\")[-1].split(\".\") # Save file title and format from the path\r\n sql = \"SELECT COUNT(*) FROM songs WHERE file_title = %s AND form = %s\" # Check the existence of a song\r\n # with the same title and format in the database\r\n self.cursor.execute(sql, (file_title, form))\r\n r = self.cursor.fetchall()\r\n if r[0][0] != 0:\r\n return \"A song with this file name and format already exists!\"\r\n song_title = input(\"Song title:\\t\")\r\n artist = input(\"Artist:\\t\")\r\n data = input(\"Release date:\\t\")\r\n tags = input(\"Associated tags:\\t\")\r\n sql = \"INSERT INTO songs (file_title, song_title, artist, form, data, tag) VALUES (%s, %s, %s, %s, %s, \" \\\r\n \"%s) \" # Insert song into database\r\n columns = (file_title, song_title, artist, form, data, tags)\r\n self.cursor.execute(sql, columns)\r\n self.cnx.commit()\r\n self.cursor.execute(\r\n \"SELECT MAX(ID) FROM songs\")\r\n result = self.cursor.fetchall()\r\n return \"New song ID: \" + str(result[0][0])\r\n else:\r\n return \"Give valid path\"", "def appendSong(song):\n\tsql = []\n\tsql.append(\"INSERT INTO SONGS (filename, path, hash, length, track, \"\n\t\t+ \"genre, date, title, base) VALUES ('\" + song.filename + \"', '\" + song.path \n\t\t+ \"', '\" + str(song.hash) + \"', '\" + str(song.length) + \"', '\" \n\t\t+ '/'.join(song.track) + \"', '\" + '/'.join(song.genre) \n\t\t+ \"', '\" + str(song.year) + \"', '\" + '/'.join(song.title) + \"', '\"\n\t\t+ song.base + \"');\")\n\treturn sql", "def __add_song(self, song, genius_api):\n\t\tentry = {\n\t\t\t'id' : int(song['id']),\n\t\t\t'title' : song['title'],\n\t\t\t'primary_artist' : {\n\t\t\t\t'id' : song['primary_artist']['id'],\n\t\t\t\t'name' : str(song['primary_artist']['name']).lower(),\n\t\t\t\t'url' : song['primary_artist']['url'],\n\t\t\t\t'is_verified' : song['primary_artist']['is_verified'],\n\t\t\t\t},\n\t\t\t'url' : song['url'],\n\t\t\t'lyrics' : genius_api.get_lyrics(song['id'], song['url'])\n\t\t\t}\n\t\tif song['album']:\n\t\t\tentry['album'] = {\n\t\t\t\t'id': song['album']['id'], \n\t\t\t\t'full_title': song['album']['full_title'], \n\t\t\t\t'name': song['album']['name'], \n\t\t\t\t'artist': song['album']['artist']['id']\n\t\t\t\t}\n\t\tif song['release_date']:\n\t\t\tentry['release_date'] = song['release_date']\n\t\tif len(song['featured_artists']) > 0:\n\t\t\tfeatured_artists = list()\n\t\t\tfor artist in song['featured_artists']:\n\t\t\t\tart = {\n\t\t\t\t\t'id' : artist['id'],\n\t\t\t\t\t'name' : artist['name'].lower()\n\t\t\t\t\t}\n\t\t\t\tfeatured_artists.append(art)\n\t\t\tentry['featured_artists'] = featured_artists\n\t\t\t#Step 3: Insert Artist into MongoDB via isnert_one\n\t\tself.db.songs.insert_one(entry)", "def set_artist_song_entry(self, artist, song):\n self.artist_name.set_text(artist)\n self.song_name.set_text(song)", "def process_song_file(cur, filepath):\n # open song file\n df = pd.read_json(filepath,lines=True)\n\n # insert song record\n __insert_song_data(cur, df)\n \n # insert artist record\n __insert_artist_data(cur, df)", "def test_update_song():\n\n target_song = {\n \"artist\": \"Heng\",\n \"song\": \"I can do all things\",\n \"genre\": \"Hip-Hop\",\n \"lyrics\": \"Like Steph said I can do all things In...\",\n \"year\": get_timestamp_year(),\n \"timestamp\": get_timestamp()\n }\n\n update_song(target_song)\n\n updated_song_lyrics = \"Like Steph said I can do all things In...\"\n\n given_artist = \"Heng\"\n given_song_title = \"I can do all things\"\n\n song_data = get_one_song(given_artist, given_song_title)\n\n print(\"updated_song_data['lyrics']: \", song_data['lyrics'])\n\n assert song_data['lyrics'] == updated_song_lyrics", "def song_changed(self, song):\n if song == NOTPLAYING:\n print(\"Not playing\")\n else:\n print(\"Changed to: {} - {}\". format(song.get('artist', 'Unknown artist'), song.get('title', 'Unknown title')))\n self._publish({TAGS[tag]: value for (tag, value) in song.items() if tag in TAGS})", "def process_song_file(cur, filepath):\n # open song file\n df = get_file_df(filepath)\n\n # insert song record\n song_data = songs_data = [df.loc[0].song_id, df.loc[0].title, df.loc[0].artist_id, int(df.loc[0].year), int(df.loc[0].duration)]\n cur.execute(song_table_insert, song_data)\n \n # insert artist record\n artist_data = [df.loc[0].artist_id, df.loc[0].artist_name, df.loc[0].artist_location, df.loc[0].artist_latitude, df.loc[0].artist_longitude] \n\n cur.execute(artist_table_insert, artist_data)", "def update_author_song(author_song, mess_chat_id):\n connection = connection_to_db()\n cursor = connection.cursor()\n\n cursor.execute(\n \"UPDATE song_data SET author_song = %s \"\n \"WHERE user_id = %s;\", (author_song, mess_chat_id)\n )\n\n connection.commit()", "def add_song(self, song):\n self.songs.append(song)", "def set_artists(audio: EasyID3, artists):\r\n audio['artist'] = artists\r\n audio.save()", "def process_song_file(cur, filepath):\r\n\r\n \"\"\" open song file\r\n drop duplicates\r\n set NAs to Zero \"\"\"\r\n df = pd.read_json(filepath, lines=True)\r\n df.drop_duplicates(subset=['song_id','artist_id'], keep = 'first')\r\n df['artist_latitude'] = df['artist_latitude'].fillna(0)\r\n df['artist_longitude'] = df['artist_longitude'].fillna(0)\r\n\r\n\r\n \"\"\" Extract columns for dataframe for song table\r\n drop duplicates before performing insert\r\n convert dataframe to a list for insert \"\"\"\r\n\r\n song_data = (df[['song_id','title','artist_id','year','duration']])\r\n song_data.drop_duplicates(subset='song_id',keep ='first',inplace = True)\r\n song_data = (song_data.values).tolist()\r\n song_data = song_data[0]\r\n # insert song record\r\n cur.execute(song_table_insert,song_data)\r\n\r\n \"\"\" Extract columns for dataframe for artist table,\r\n drop duplicates before performing insert\r\n convert dataframe to a list for insert \"\"\"\r\n\r\n artist_data = (df[['artist_id','artist_name','artist_location','artist_latitude','artist_longitude']])\r\n artist_data.drop_duplicates(subset='artist_id',keep ='first',inplace = True)\r\n artist_data = (artist_data.values).tolist()\r\n artist_data = artist_data[0]\r\n # insert artist record\r\n cur.execute(artist_table_insert, artist_data)", "def process_song_file(cur, filepath):\n \n # open song file\n \n df = pd.read_json(filepath,lines=True)\n \n # insert song record\n song_data = df[['song_id', 'title', 'artist_id','year',\n 'duration']].values[0].tolist()\n cur.execute(song_table_insert, song_data)\n \n # insert artist record\n artist_data = df[['artist_id','artist_name',\n 'artist_location', 'artist_latitude',\n 'artist_longitude']].values[0].tolist()\n cur.execute(artist_table_insert, artist_data)", "def process_song_file(cur, filepath):\n # open song file\n data_frame = pd.read_json(filepath, lines=True)\n\n # insert song record\n song_data = list(data_frame[['song_id', 'title', 'artist_id', 'year', 'duration']].values[0])\n cur.execute(song_table_insert, song_data)\n\n # insert artist record\n artist_data = list(\n data_frame[['artist_id', 'artist_name', 'artist_location',\n 'artist_latitude', 'artist_longitude']].values[0])\n cur.execute(artist_table_insert, artist_data)", "def set_nowplaying_metadata(self, track, album, artist):\n\n\t\tparts = [artist[:30], album[:30], track[:30]]\n\t\tself._send_message(\"MUSIC_CONTROL\", self._pack_message_data(16, parts))", "def set_nowplaying_metadata(self, track, album, artist):\n\n\t\tparts = [artist[:30], album[:30], track[:30]]\n\t\tself._send_message(\"MUSIC_CONTROL\", self._pack_message_data(16, parts))", "def update_path(self, fname, sig):\n query = \"\"\"UPDATE caro_song SET filename=%s WHERE uniq=%s\"\"\"\n cur = self.conn.cursor()\n try:\n cur.execute(query, (fname, sig,))\n except:\n pass", "def update_metadata(self, item, feat_part, drop_feat):\n # In all cases, update the artist fields.\n self._log.info('artist: {0} -> {1}', item.artist, item.albumartist)\n item.artist = item.albumartist\n if item.artist_sort:\n # Just strip the featured artist from the sort name.\n item.artist_sort, _ = split_on_feat(item.artist_sort)\n\n # Only update the title if it does not already contain a featured\n # artist and if we do not drop featuring information.\n if not drop_feat and not contains_feat(item.title):\n feat_format = self.config['format'].as_str()\n new_format = feat_format.format(feat_part)\n new_title = f\"{item.title} {new_format}\"\n self._log.info('title: {0} -> {1}', item.title, new_title)\n item.title = new_title", "def update_analysis(pk):\n\n track = Track.query.get(pk)\n if track is None:\n # TODO: Log This\n return False\n\n # Call Echo Nest\n try:\n analysis = get_track_analysis(track.spotify_uri)\n except EchoNestError:\n # TODO: Log This\n return False\n\n # commit analysis data to Track model\n track.audio_summary = analysis\n\n db.session.add(track)\n db.session.commit()\n\n return True", "def metadata_updated(self, session):\n\t\tself.playlist_container = session.playlist_container()\n\t\tself.metadata_updated_callback(self)", "def addSong(self, title, filename):\n #make sure that the filename is valid? or does this happen outside?\n self.__songDictionary[title]=filename\n return True", "def process_song_file(cur, filepath):\n\n # open song file\n df = pd.read_json(filepath, lines=True)\n\n # insert song record\n song_data = list(\n df[['song_id', 'artist_id', 'title', 'year', 'duration']].values[0])\n cur.execute(song_table_insert, song_data)\n\n # insert artist record\n artist_data = list(df[['artist_id', 'artist_name', 'artist_location',\n 'artist_latitude', 'artist_longitude']].values[0])\n cur.execute(artist_table_insert, artist_data)", "def test_create_update_Song():\n\n target_song = {\n \"artist\": \"Heng\",\n \"song\": \"I can do all things (Remix)\",\n \"genre\": \"Hip-Hop\",\n \"lyrics\": \"Like Steph said I can do all things...\",\n \"year\": get_timestamp_year(),\n \"timestamp\": get_timestamp()\n }\n\n given_artist = \"Heng\"\n given_song_title = \"I can do all things (Remix)\"\n\n create_update_song(target_song)\n\n song_data = get_one_song(given_artist, given_song_title)\n\n assert song_data['artist'] == given_artist\n assert song_data['song'] == given_song_title", "def test_api_can_update_music(self):\n change_music = {'name': 'Music2'}\n res = self.client.put(\n reverse('details', kwargs={'pk':music.id}),\n change_music,\n format = 'json'\n )\n self.assertEqual(res.status_code, status.HTTP_200_OK)", "def add_songs(self, name, year, title):\n\n album_found = find_object(name, self.album)\n if album_found is None:\n print(\"Not Found \" + name)\n album_found = Album(name, year, self.name)\n self.add_album(album_found)\n else:\n print(\"Found album \"+name)\n\n album_found.add_songs(title)", "def change_metadata(self, **kwargs):\n metadata = self.state.get_player_state(PLAYER_IDENTIFIER)\n\n # Update saved metadata\n for key, value in kwargs.items():\n setattr(metadata, key, value)\n\n # Create a temporary metadata instance with requested parameters\n change = PlayingState(**kwargs)\n self.state.item_update(change, PLAYER_IDENTIFIER)", "def construct_metadata(song):\n print(song) #temp", "def write_to_db(self, df):\n #query for the history data\n query = \"INSERT IGNORE INTO spotify_history (Time, Song_Name, Spotify_ID, Spotify_URI, Popularity, Object_Type) VALUES (%s, %s, %s, %s, %s, %s)\"\n \n val = []\n for index, row in df.iterrows():\n #some songs don't have milisecond, so the dateformat needs to be adapted\n try:\n timestamp = datetime.strptime(row[\"timestamp\"], '%Y-%m-%dT%H:%M:%S.%fZ')\n except:\n datetime.strptime(row[\"timestamp\"], '%Y-%m-%dT%H:%M:%SZ')\n finally:\n val.append((timestamp, \n row[\"name\"], \n row[\"id\"], \n row[\"uri\"], \n row[\"popularity\"], \n row[\"object_type\"]))\n\n self.cursor.executemany(query, val)\n print(\"New Songs in the History {}\".format(self.cursor.rowcount))\n \n #query for the song properties\n query = \"INSERT IGNORE INTO song_data (Spotify_ID, Spotify_URI, Artist, Album, Duration, Acousticness, Danceability, Energy, Instrumentalness, key_spotify, Liveness, Loudness, Mode, Speechiness, Tempo, Time_Signature, Valence) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\"\n val = []\n for index, row in df.iterrows():\n val.append((row[\"id\"], \n row[\"uri\"], \n row[\"artist\"], \n row[\"album\"], \n row[\"duration_ms\"], \n row[\"acousticness\"],\n row[\"danceability\"],\n row[\"energy\"],\n row[\"instrumentalness\"],\n row[\"key\"],\n row[\"liveness\"],\n row[\"loudness\"],\n row[\"mode\"],\n row[\"speechiness\"],\n row[\"tempo\"],\n row[\"time_signature\"],\n row[\"valence\"])\n )\n \n print(\"New Songs in the database: {}\".format(self.cursor.rowcount))\n self.cursor.executemany(query, val)\n \n self.connection.commit()", "def update_records(self, something):\n print(\"Some logic (not shown) to update database of units\")", "def song(song_id):\n return process_input(song_id) #jsonify(recomendations)", "def save_track(self, track: Track):\n if track.internal_id is not None:\n self.logging.info(f\"updated track: {track.track_id} for {track.title}\")\n self.sess.query(Track).filter(Track.track_id == track.track_id).update(track)\n elif self.sess.query(self.sess.query(exists().where(and_(Track.title == track.title,\n Track.artist == track.artist,\n Track.url == track.url)).exists()).scalar()) \\\n .scalar():\n\n self.logging.info(f\"updated track w/o id: {track.title} by {track.artist}\")\n self.sess.query(Track).filter(and_(Track.title == track.title,\n Track.artist == track.artist,\n Track.url == track.url)).update(track)\n else:\n self.logging.info(f\"added track: {track.title} by {track.artist}\")\n if track.category_list is not None:\n for category in track.category_list:\n cat = self.fetch_category(category)\n track.categories.append(cat)\n cat.tracks.append(track)\n if track.genre_list is not None:\n for genre in track.genre_list:\n gen = self.fetch_genre(genre)\n track.genres.append(gen)\n gen.tracks.append(track)\n track.internal_id = uuid4().hex\n self.sess.add(track)\n self.sess.commit()", "def add_song_to_playlist(self):\n #populate our songs dictionary\n self.get_liked_videos()\n\n #collect all of uri\n uris = []\n for song,info in self.all_song_info.items():\n uris.append(info[\"spotify_uri\"])\n\n #create a new playlist\n playlist_id = self.create_playlist()\n\n #add all songs into new playlist\n\n #Spotipy can only add 100 songs at a time to a playlist that is why this method is taken\n g = len(uris)\n if g > 100:\n s = 0\n e = 99\n while g > 100:\n self.sp.user_playlist_add_tracks(user=self.username, playlist_id=playlist_id,\n tracks=uris[s:e])\n g -= 100\n s = e + 1\n e += 100\n self.sp.user_playlist_add_tracks(user=self.username, playlist_id=playlist_id,\n tracks=uris[s:])\n else:\n self.sp.user_playlist_add_tracks(user=self.username, playlist_id=playlist_id,\n tracks=uris)", "def update_info(self):\n\n with Player.client as client:\n db = client.game_db\n user = db.players.find_one({\"id\": self.id})\n db.players.update({\"_id\": user[\"_id\"]}, {\"$inc\": {\"games_num\": 1},\n \"$set\": {\"rating\": self.rating}})", "def load(self, song):\n self.currentSongName = song\n self.currentSong = pygame.mixer.music.load(song)", "def set_album_artist(audio: EasyID3, album_artist):\r\n audio['albumartist'] = album_artist\r\n audio.save()", "def set_album(audio: EasyID3, album):\r\n audio['album'] = album\r\n audio.save()", "def process_song_file(cur, filepath):\n\n df = pd.read_json(filepath, lines=True)\n\n song_data = df[['song_id', 'title',\n 'artist_id', 'year', 'duration']].values[0]\n cur.execute(song_table_insert, song_data)\n\n artist_data = df[['artist_id', 'artist_name', 'artist_location',\n 'artist_latitude', 'artist_longitude']].values[0]\n cur.execute(artist_table_insert, artist_data)", "def update(self, media):\n update_query = \"\"\"UPDATE %s\n SET\n filename=?,\n uploader=?,\n width=?,\n height=?,\n size=?,\n quality_image=?,\n featured_picture=?,\n valued_image=?,\n timestamp=?\n WHERE pageid=?\"\"\" % MediaCollection.COLLECTIONS_TABLE\n fields = list(media.totuple())\n fields.append(fields[0])\n fields.remove(fields[0])\n fields = tuple(fields) # generate tuple with pageid as last element\n self.cursor.execute(update_query, fields)\n self.connection.commit()", "def process_song_file(cur, filepath):\n # open song file\n\n inputData = pd.read_json(filepath, lines=True)\n song_df = pd.DataFrame(data=inputData)\n song_df.head()\n \n\n # insert song record\n song_data = song_df[['song_id', 'title', 'artist_id','year','duration']].values\n for i, row in song_df.iterrows():\n cur.execute(song_table_insert, song_data[i])\n \n \n # insert artist record\n \n artist_data = song_df[['artist_id', 'artist_name', 'artist_location','artist_latitude','artist_longitude']].values\n for i, row in song_df.iterrows():\n cur.execute(artist_table_insert, artist_data[i])", "def process_song_file(cur, filepath):\n df = pd.read_json(filepath, lines=True)\n \n # insert artist record\n artist_data = df[['artist_id', 'artist_name', 'artist_location',\n 'artist_latitude', 'artist_longitude']].values[0].tolist()\n cur.execute(artist_table_insert, artist_data)\n \n # insert song record\n song_data = df[['song_id', 'title', 'artist_id',\n 'year', 'duration']].values[0].tolist()\n cur.execute(song_table_insert, song_data)", "def process_song_file(cur, filepath: str) -> None:\n # open song file\n df = pd.read_json(filepath, lines=True)\n\n # insert song record\n for song_record in df[\n [\n \"song_id\",\n \"title\",\n \"artist_id\",\n \"year\",\n \"duration\",\n ]\n ].values:\n cur.execute(sql_queries.song_table_insert, song_record)\n\n # insert artist record\n for artist_record in df[\n [\n \"artist_id\",\n \"artist_name\",\n \"artist_location\",\n \"artist_latitude\",\n \"artist_longitude\",\n ]\n ].values:\n cur.execute(sql_queries.artist_table_insert, artist_record)", "async def async_update_track_info(self):\n track_info = await self._raumfeld.async_get_track_info(self._rooms)\n if track_info:\n self._media_duration = track_info[\"duration\"]\n self._media_image_url = track_info[\"image_uri\"]\n self._media_title = track_info[\"title\"]\n self._media_artist = track_info[\"artist\"]\n self._media_album_name = track_info[\"album\"]\n self._media_album_artist = track_info[\"artist\"]\n self._media_track = track_info[\"number\"]\n self._media_position = track_info[\"position\"]\n self._media_position_updated_at = utcnow()", "def add_song(self, name, year, title):\n album_found = find_object(name, self.albums)\n if album_found is None:\n album_found = Album(name, year, self.name)\n self.add_album(album_found)\n album_found.add_song(title)", "def set_song_fingerprinted(self, song_id):\n print(\"song_id to set fingerprinted: \",song_id)\n record = {\n \"doc\": {\n FIELD_FINGERPRINTED: True\n },\n \"doc_as_upsert\": True \n }\n self.cursor.update(index=SONGS_INDEXNAME, id=song_id, body=record)", "def process_song_file(cur, filepath):\n df = pd.read_json(filepath, typ='series')\n\n columns = ['song_id', 'title', 'artist_id', 'year', 'duration']\n song_data = df[[*columns]]\n cur.execute(song_table_insert, song_data)\n\n columns = ['artist_id', 'artist_name', 'artist_location', 'artist_latitude', 'artist_longitude']\n artist_data = df[[*columns]]\n cur.execute(artist_table_insert, artist_data)", "def adds_new_songs_to_db_by_en_id(yt_playlist_query):\n # yt_playlist_query returned by gets_playlist_history(en_playlist), api_helper.py\n\n for item in yt_playlist_query:\n en_song_id = item['en_song_id']\n is_en_song_id_in_db = db.session.query(exists().where(Song.en_song_id == en_song_id)).scalar()\n if is_en_song_id_in_db == False:\n en_artist_id = item['en_artist_id']\n artist_id = db.session.query(Artist.artist_id).filter(Artist.en_artist_id == en_artist_id).one()\n song_info = Song(en_song_id=en_song_id,\n song_title=item['song_title'],\n artist_id=artist_id)\n db.session.add(song_info)\n db.session.flush", "def currently_playing_change(song):\n socketio.emit('currently_playing_changed', song.to_dict())", "def FromMeta(self, artist, album, track_number, song):\n\n # Cleanup input\n artist = artist.rstrip()\n album = album.rstrip()\n song = song.rstrip()\n\n self.persistant = self.db.GetOneRow('select * from tracks where '\n 'artist=%s and album=%s '\n 'and number=%d and song=%s;'\n %(sql.FormatSqlValue('artist',\n artist),\n sql.FormatSqlValue('album',\n album),\n track_number,\n sql.FormatSqlValue('song',\n song)))\n\n if not self.persistant:\n self.persistant = {}\n self.persistant['artist'] = artist\n self.persistant['album'] = album\n self.persistant['number'] = track_number\n self.persistant['song'] = song\n\n self.persistant['plays'] = 0\n self.persistant['skips'] = 0\n self.persistant['creation_time'] = datetime.datetime.now()", "def _set_music_id(cls, data):\n document_id = data.get(\"_id\")\n if document_id:\n data[\"music_id\"] = document_id\n return data", "def add_song(self, song: Song) -> None:\n\n self.songs.append(song)\n self.set_song_count(len(self.songs))", "def delete_song(song):\n logging.debug('{CRUD_operations} BEGIN function delete_song()')\n logging.debug('{CRUD_operations} Data received: song: %s', song)\n song.is_deleted = True\n logging.debug('{CRUD_operations} END function delete_song()')", "def add_lyrics(self):\n\n conn = self.conn\n conn.text_factory = str\n c = conn.cursor()\n\n c.execute(\"SELECT songs.id, artist, title, url FROM songs LEFT JOIN lyrics ON songs.id = lyrics.song_id WHERE lyrics.song_id IS NULL\")\n all_songs_to_scrape = c.fetchall()\n for song in all_songs_to_scrape:\n song_id = song[0]\n song_artist = song[1]\n song_title = song[2]\n song_url = song[3]\n print(\"Looking for lyrics for \" + song_title + \" by \" + song_artist)\n try:\n lyrics = pygenius_songs.searchURL(song_url, 'lyrics')\n for lyric in lyrics:\n for line in lyric.split('\\n'):\n c.execute('INSERT INTO lyrics(song_id, line) VALUES (?,?)', (song_id, line))\n conn.commit()\n except Exception as e:\n print(e)\n print song_url\n print(\"Exception caught! ... continuing.\")\n pass", "def __add_lyric(self, song, genius_api):\n\t\tentry = {\n\t\t\t'song_id' : int(song['id']),\n\t\t\t'song_title' : song['title'],\n\t\t\t'url' : song['url']\n\t\t\t}\n\t\ttry:\n\t\t\tentry['lyrics'] = genius_api.get_lyrics(song['id'], song['url'])\n\t\texcept:\n\t\t\tentry['lyrics'] = ''\t\n\t\t\t\t#Step 3: Insert Artist into MongoDB via isnert_one\n\t\ttry:\n\t\t\tself.db.lyrics.insert_one(entry)\n\t\texcept errors.DuplicateKeyError:\n\t\t\tpass", "def process_song_file(cur, filepath):\r\n\r\n\r\n\r\n\r\n df=pd.read_json(filepath,lines=True)\r\n for j,row in df.iterrows():\r\n n, artist_id, artist_latitude, artist_longitude, artist_location, artist_name, song_id, title, duration, year =row\r\n cur.execute(song_table_insert,[song_id,title,artist_id,year,duration])\r\n\r\n cur.execute(artist_table_insert, [artist_id, artist_name, artist_location,artist_latitude,artist_longitude])", "def update_mod_database():\r\n\tmydb = database()\r\n\tcursor = mydb.cursor()\r\n\tmod_path = \"data/stats.json\"\r\n\tinfo = dict_from_json_file(mod_path)\r\n\tcursor.execute(\"DELETE FROM poe.mod\")\t# Clear table\r\n\tfor mod_type in info[\"result\"]:\r\n\t\tfor mod in mod_type[\"entries\"]:\r\n\t\t\tmod_id = mod[\"id\"]\r\n\t\t\tmod_text = mod[\"text\"]\r\n\t\t\tmod_type = mod[\"type\"]\r\n\t\t\t# If the mod has options we need to add these to the options table\r\n\t\t\tif \"option\" in mod:\r\n\t\t\t\tquery = \"INSERT INTO poe.mod (id, text, type, options) VALUES (%s, %s, %s, %s);\"\r\n\t\t\t\tval = (mod_id, mod_text, mod_type, 1)\r\n\t\t\t\tcursor.execute(query, val)\r\n\t\t\t\tfor option_mod in mod[\"option\"][\"options\"]:\r\n\t\t\t\t\toption_mod_id = option_mod[\"id\"]\r\n\t\t\t\t\toption_mod_text = option_mod[\"text\"]\r\n\t\t\t\t\tmod_query \t= \"INSERT INTO poe.options (mod_id, id, text) VALUES (%s, %s, %s)\"\r\n\t\t\t\t\tmod_val \t= (mod_id, option_mod_id, option_mod_text)\r\n\t\t\t\t\tcursor.execute(mod_query, mod_val)\r\n\t\t\t# If there are no mods, simply add the mod to the table\r\n\t\t\telse:\r\n\t\t\t\tquery = \"INSERT INTO poe.mod (id, text, type) VALUES (%s, %s, %s);\"\r\n\t\t\t\tval = (mod_id, mod_text, mod_type)\r\n\t\t\t\tcursor.execute(query, val)\r\n\tmydb.commit()", "def incorporate_song_chords_from_external_source(source_path):\r\n number_of_songs_read = 0\r\n number_of_songs_with_existing_chords = 0\r\n songs_to_chord_symbols = pickle.load(open(source_path,'r'))\r\n start_time = time.time()\r\n #'\\r' endings result from windows and linux handling line breaks differently (external source from linux mahcine)\r\n songs_to_chord_symbols = {(key[0],key[1].replace('\\r','')):value for key,value in songs_to_chord_symbols.items()}\r\n# songs_to_chord_symbols = {(title,artist):value for (title,artist),value in songs_to_chord_symbols.items() if \\\r\n# Song.objects.filter(title=title,artist=artist,chords__isnull=True).exists()}#skip already chorded\r\n print len(songs_to_chord_symbols), \"songs to update with chords from file\", source_path.split(\"/\")[-1]\r\n #create a mapping from chord symbols to database chords, for less db access in the iterations\r\n symbols_to_db_chords = dict()\r\n for symbol_list in songs_to_chord_symbols.values():\r\n for symbol in symbol_list: \r\n if not symbol in symbols_to_db_chords: \r\n try:\r\n root, notes = decode(symbol)\r\n except Exception as e:\r\n# print e\r\n break #skip this symbol list, it belongs to a song that will not get saved eventually\r\n symbols_to_db_chords[symbol]=Chord.objects.get_or_create(root=root, notes=notes,symbol=symbol)[0]\r\n for title,artist in songs_to_chord_symbols.keys():\r\n try:\r\n song = Song.objects.get(title=title, artist=artist) \r\n if song.chords.exists():\r\n continue\r\n chord_symbols_vector = songs_to_chord_symbols[title,artist]\r\n chord_vector = [symbols_to_db_chords[symbol] for symbol in chord_symbols_vector]\r\n with transaction.commit_on_success():\r\n for index,chord in enumerate(chord_vector):\r\n Song_chord_index.objects.create(song=song, chord=chord, index=index) \r\n number_of_songs_read += 1\r\n if not (number_of_songs_read % 100): print \"saved chords for\", \\\r\n number_of_songs_read, \"songs.\"\r\n except KeyError as e:\r\n song.previously_failed_chords = True\r\n song.save()\r\n continue\r\n except Exception as e:#assume exception was before chord linking phase.\r\n song.previously_failed_chords = True\r\n song.save()\r\n print e\r\n continue\r\n print \"done updating\", number_of_songs_read, ' successfully,', number_of_songs_with_existing_chords, \\\r\n \" of them already updated. Time taken\", time_elapsed_minutes(start_time)", "def process_song_file(cur, filepath):\n # open song file\n df = pd.read_json(filepath, lines=True)\n\n # insert song record\n song_data = list(df[[\"song_id\", \"title\", \"artist_id\", \"year\", \"duration\"]].values[0])\n try:\n cur.execute(song_table_insert, song_data)\n except psycopg2.Error as e:\n print(\"Error: Unable to insert record in songs table\")\n print(e)\n\n # insert artist record\n artist_data = list(df[[\"artist_id\", \"artist_name\", \"artist_location\", \"artist_latitude\", \"artist_longitude\"]].values[0])\n try:\n cur.execute(artist_table_insert, artist_data)\n except psycopg2.Error as e:\n print(\"Error: Unable to insert record in artists table\")\n print(e)", "def put(self):\n if not self.other_raw_names:\n self.other_raw_names = []\n if self.raw_movie_name not in self.other_raw_names:\n self.other_raw_names.append(self.raw_movie_name)\n \n if self.movie:\n self.genres = self.movie.genres\n \n return db.Model.put(self)", "def save_changes(price, form, new=False):\n # Get data from form and assign it to the correct attributes\n # of the SQLAlchemy table object\n SKU = SKU()\n SKU.item_number = form.SKU.data\n\n price.item_number = item_number\n price.list_price = form.list_price.data\n price.price_effective_date = form.price_effective_date.data\n #album.publisher = form.publisher.data\n #album.media_type = form.media_type.data\n\n if new:\n # Add the new album to the database\n db_session.add(album)\n\n # commit the data to the database\n db_session.commit()", "def set_track_info(self, payload):\n self.raw_trackname = payload['currentTrack'].get('title', \"\")\n self.artist = payload['currentTrack'].get('artist', \"\")\n self.album = payload['currentTrack'].get('album', \"\")\n self.station = payload['currentTrack'].get('stationName', \"\")\n\n if sonos_settings.artist_and_album_newlook :\n if self.raw_trackname.startswith(\"x-sonosapi-\") :\n self.raw_trackname = self.station\n\n if self.artist == self.station and self.type == \"radio\" :\n if self.raw_trackname.count(\"~\") : c = \"~\"\n elif self.raw_trackname.count(\"˗\") : c = \"˗\"\n elif self.raw_trackname.count(\"*\") : c = \"*\"\n elif self.raw_trackname.count(\"|\") : c = \"|\"\n elif self.raw_trackname.count(\" - \") : c = \" - \"\n elif self.raw_trackname.count(\" / \") : c = \" / \"\n else : c = \"\"\n\n if c :\n oldstr=self.raw_trackname.casefold()\n splitstr = oldstr.split(c)\n self.artist = ' '.join(word[0].upper() + word[1:] for word in splitstr[0].split())\n self.raw_trackname = ' '.join(word[0].upper() + word[1:] for word in splitstr[1].split())\n if c == \"~\" :\n self.album = ' '.join(word[0].upper() + word[1:] for word in splitstr[2].split())\n else :\n self.album = \"\"\n# self.album = self.station\n\n # Abort update if all data is empty\n if not any([self.album, self.artist, self.duration, self.station, self.raw_trackname]):\n _LOGGER.debug(\"No data returned by the API, skipping update\")\n return None\n\n if self.type == \"radio\" and not self.station:\n # if not then try to look it up (usually because its played from Alexa)\n self.station = find_unknown_radio_station_name(self.raw_trackname)\n\n # Clear uninteresting tracknames\n if self.raw_trackname.startswith(\"x-sonosapi-\") or self.raw_trackname.endswith(\".m3u8\"):\n self.trackname = \"\"\n else:\n self.trackname = self.raw_trackname\n\n\n track_id = self.artist\n if self.trackname:\n track_id += f\" - {self.trackname}\"\n if self.album:\n track_id += f\" ({self.album})\"\n if self.duration:\n track_id += f\" - {timedelta(seconds=self.duration)}\"\n if self.station:\n track_id += f\" [{self.station}]\"\n\n return track_id", "def view_songs_push(self):\n #clear all data first\n self.model.removeRows(0, self.model.rowCount())\n songs = glob.glob(\"Fixed/*/*/*\")\n for song in songs:\n data = mutagen.File(song, easy=True)\n track = get_track(data['title'][0], data['artist'][0])\n self.add_track_to_box(track)", "def set_meta_mp3(file):\n\n list_str_prop_mp3 = ['album', 'artist', 'title']\n list_other_prop_mp3 = ['comment', 'genre', 'year']\n dict_file_mp3 = {}\n # For each string properties into the tag\n for prop in list_str_prop_mp3:\n # If the tag exist (i.e it's not empty for the music file)\n if file.tag.d.has_key(prop.upper()):\n # We delete spe char and we format it\n dict_file_mp3[prop] = delete_spe_char_and_format(file.tag[prop.upper()])\n else:\n # Or we define it's value as 'Unknow ' + prop\n # For instance 'Unknow Artist'\n dict_file_mp3[prop] = 'Unknow ' + prop.capitalize()\n # For each other properties\n for prop in list_other_prop_mp3:\n if file.tag.d.has_key(prop.upper()):\n # We just copy them\n dict_file_mp3[prop] = file.tag[prop.upper()]\n else:\n dict_file_mp3[prop] = ''\n # To try to find the tracknumber, we need 'title'\n if dict_file_mp3.has_key('title'): \n # But before, we delete the duplicate\n list_duplicate = [dict_file_mp3['artist'], dict_file_mp3['album']]\n # Now we delete the duplicates\n dict_file_mp3['title'] = delete_duplicate(dict_file_mp3['title'], list_duplicate)\n # So we are able to find the tracknumber\n number = ''\n # If ID3 already find it\n if file.tag.d.has_key(\"TRACKNUMBER\"):\n number = file.tag[\"TRACKNUMBER\"]\n # Else we try to find by ourself\n else:\n number = find_tracknumber(dict_file_mp3['title'])\n # If we found a tracknumber, we delete it from 'title'\n if number:\n dict_file_mp3['title'] = delete_duplicate(dict_file_mp3['title'], [number])\n dict_file_mp3['tracknumber'] = number\n # And we format the new title\n dict_file_mp3['title'] = build_track_name(dict_file_mp3['title'], number)\n dict_file_mp3['name'] = dict_file_mp3['title'] + '.mp3'\n dict_file_mp3['path'] = build_path([dict_file_mp3['artist'], dict_file_mp3['album']])\n return dict_file_mp3", "def test_update(self):\n track = Track(artist='Artist', album='Album', title='Title',\n ensemble='Ensemble', conductor='Conductor', composer='Composer')\n pk = track.insert(self.app.db,\n self.app.curs,\n 'xmms',\n datetime.datetime.now())\n self.assertNotEqual(pk, None)\n self.assertNotEqual(pk, 0)\n self.assertEqual(self.get_track_count(), 1)\n track_row = self.get_track_by_id(pk)\n self.assertEqual(track_row['artist'], 'Artist')\n self.assertEqual(track_row['album'], 'Album')\n self.assertEqual(track_row['title'], 'Title')\n self.assertEqual(track_row['ensemble'], 'Ensemble')\n self.assertEqual(track_row['composer'], 'Composer')\n self.assertEqual(track_row['conductor'], 'Conductor')\n\n # Now update the object and save out, and test.\n track.artist = 'Artist 2'\n track.album = 'Album 2'\n track.title = 'Title 2'\n track.ensemble = 'Ensemble 2'\n track.composer = 'Composer 2'\n track.conductor = 'Conductor 2'\n track.update(self.app.db, self.app.curs)\n self.assertEqual(self.get_track_count(), 1)\n track_row = self.get_track_by_id(pk)\n self.assertEqual(track_row['artist'], 'Artist 2')\n self.assertEqual(track_row['album'], 'Album 2')\n self.assertEqual(track_row['title'], 'Title 2')\n self.assertEqual(track_row['ensemble'], 'Ensemble 2')\n self.assertEqual(track_row['composer'], 'Composer 2')\n self.assertEqual(track_row['conductor'], 'Conductor 2')", "def save_all(self):\n data = []\n for key, albums in self.albums_to_update.items():\n self.albums_to_save[key] += albums\n with wait_cursor(self._parent):\n for artist, albumdata in self.albums_to_save.items():\n if not albumdata:\n continue\n artistid = self.artist_map[artist]\n data = []\n for name, year, key, is_live, tracks in albumdata:\n if key == 'X':\n key = 0\n data.append((key, name, year, is_live, tracks))\n albums = dmla.update_albums_by_artist(artistid, data)\n albums_map_lookup = {build_album_name(x): x.id for x in albums}\n for c_name, value in self.albums_map[artist].items():\n a_name, id = value\n try:\n test = albums_map_lookup[a_name]\n except KeyError:\n continue\n if id != test:\n self.albums_map[artist][c_name] = (a_name, test)\n self.albums_to_save.clear()\n self.albums_to_update.clear()\n self._parent.albums_map = self.albums_map\n self._parent.albums_map.update({x: {} for x, y in self.albums_map.items()\n if not y})\n ## self.last_handled = None\n save_appdata([self._parent.artist_map, self._parent.albums_map])\n self.refresh_screen(self.artist_list.currentIndex())", "def onair(self, song, score):\n for key in ['title', 'artist', 'score', 'full']:\n self.memcache.delete(\"%s:1:onair_%s\" % (self.prefix, key))\n\n try:\n datas = mutagen.File(song, easy=True)\n except:\n pass\n\n try:\n self.memcache.set(\"%s:1:onair_title\" % self.prefix, datas[\"title\"][0])\n self.memcache.set(\"%s:1:onair_artist\" % self.prefix, datas[\"artist\"][0])\n self.memcache.set(\"%s:1:onair_score\" % self.prefix, score)\n except:\n pass\n\n try:\n self.memcache.set(\"%s:1:onair_full\" % self.prefix,\n \"%s - %s\" % (datas[\"artist\"][0],\n datas[\"title\"][0]))\n except:\n pass", "def update_item(self, table, item):", "def set_song_fingerprinted(self, song_id):\n song = Songs.get(id=song_id)\n song.fingerprinted = True\n song.save()", "def add_the_song_to_playlist(self):\n com_util.tap_on(self.driver, element['AddToPlaylist'])\n # com_util.send_to(self.driver, element['EnterThePlaylist'], 'My Songs')\n com_util.tap_on(self.driver, element['ClickMySongs'])\n # com_util.tap_on(self.driver, element['SaveBtn'])\n com_util.tap_on(self.driver, element['CancelBtn'])\n com_util.tap_on(self.driver, element['DownArrow'])", "def populate_music_entity_tables():\n\tsession = get_session()\n\tclient = SpotifyAuthAPI()\n\n\tplaylists = get_playlists_to_process(session)\n\n\tplaylist_count = playlists.count()\n\n\tfor i, playlist in enumerate(playlists):\n\t\tprint \"populating playlist {} of {}\".format(i + 1, playlist_count)\n\n\t\tplaylist_id = playlist.playlist_id\n\t\towner_id = playlist.owner_id\n\n\t\ttry:\n\t\t\ts_playlist = client.get('users/{}/playlists/{}/tracks'.format(owner_id, playlist_id))\n\t\texcept SpotifyException:\n\t\t\tcontinue\n\t\texcept UnicodeEncodeError:\n\t\t\tprint owner_id.encode('utf-8'), playlist_id.encode('utf-8')\n\t\t\tcontinue\n\n\t\tif len(s_playlist) > TOO_MANY_SONGS_TO_STORE:\n\t\t\tprint \"skipping playlist {} because it has {} songs\".format(playlist.name.encode('utf-8'), len(s_playlist))\n\n\t\t\tplaylist.too_big = True\n\t\t\tsession.add(playlist)\n\t\t\tsession.commit()\n\n\t\t\tcontinue\n\n\t\ts_playlist = filter(None, [t['track'] for t in s_playlist])\n\n\t\tfor k, s_track in enumerate(s_playlist):\n\t\t\tif k % 25 == 0:\n\t\t\t\tprint 'creating track {} of {}'.format(k + 1, len(s_playlist))\n\n\t\t\tif not s_track.get('id'):\n\t\t\t\tprint 'skipping track name: {} because no track id'.format(s_track.get('name').encode('utf-8'))\n\t\t\t\tcontinue\n\t\t\ttrack = session.query(Track).get(s_track['id'])\n\n\t\t\tif track is None:\n\t\t\t\ttrack = Track(\n\t\t\t\t\ttrack_id=s_track['id'],\n\t\t\t\t\tname=s_track['name'],\n\t\t\t\t\talbum_id=s_track['album']['id'],\n\t\t\t\t\talbum_name=s_track['album']['name'],\n\t\t\t\t)\n\n\t\t\ts_artists = s_track['artists']\n\n\t\t\tfor s_artist in s_artists:\n\t\t\t\tif not s_artist.get('id'):\n\t\t\t\t\tprint 'skipping artist name: {} because no artist id'.format(s_artist.get('name'))\n\t\t\t\t\tcontinue\n\t\t\t\tartist = session.query(Artist).get(s_artist['id'])\n\n\t\t\t\tif artist is None:\n\t\t\t\t\tartist = Artist(\n\t\t\t\t\t\tartist_id=s_artist['id'],\n\t\t\t\t\t\tname=s_artist['name'],\n\t\t\t\t\t)\n\n\t\t\t\ttrack.artists.append(artist)\n\t\t\tplaylist.tracks.append(track)\n\n\t\t\tsession.add(playlist)\n\t\t\tsession.commit()", "def add_song(self, name, year, title):\n\n # Here we check if album exist under artist.\n album_found = find_object(name, self.albums)\n if album_found is None: # If there is no album found\n print(name + \"not found\") # we print \"Album name not found\n album_found = Album(name, year, self.name) # Change_3: Pass \"self.name\" instead of \"self\"\n self.add_album(album_found) # We add new_album to song.\n else: # if we found an existing album with same name\n print(\"found album\" + name) # we print found album name\n\n # so we add song to album_found\n album_found.add_song(title)", "def test_update(self):\n album = Album(artist='Artist', album='Album', album_type='ep',\n totaltracks=1, totalseconds=120)\n pk = album.insert(self.app.db, self.app.curs)\n self.assertNotEqual(pk, None)\n self.assertNotEqual(pk, 0)\n self.assertEqual(self.get_album_count(), 1)\n album_row = self.get_album_by_id(pk)\n self.assertEqual(album_row['alartist'], 'Artist')\n self.assertEqual(album_row['alalbum'], 'Album')\n self.assertEqual(album_row['altype'], 'ep')\n self.assertEqual(album_row['totaltracks'], 1)\n self.assertEqual(album_row['totalseconds'], 120)\n\n # Now update the object and save out, and test.\n album.artist = 'Artist 2'\n album.album = 'Album 2'\n album.album_type = 'live'\n album.totaltracks = 2\n album.totalseconds = 240\n album.update(self.app.db, self.app.curs)\n self.assertEqual(self.get_album_count(), 1)\n album_row = self.get_album_by_id(pk)\n self.assertEqual(album_row['alartist'], 'Artist 2')\n self.assertEqual(album_row['alalbum'], 'Album 2')\n self.assertEqual(album_row['altype'], 'live')\n self.assertEqual(album_row['totaltracks'], 2)\n self.assertEqual(album_row['totalseconds'], 240)", "def __insert_songplay_data(cur, df):\n # for each songplay event, described by a row in the dataframe\n for index, row in df.iterrows():\n \n # get songid and artistid from song and artist tables\n cur.execute(song_select, (row.song, row.artist, row.length))\n results = cur.fetchone()\n\n if results:\n songid, artistid = results\n else:\n songid, artistid = None, None\n\n # insert songplay record\n songplay_data = (pd.to_datetime(row.ts, unit='ms'), row.userId, row.level, songid, artistid, row.sessionId, row.location, row.userAgent)\n cur.execute(songplay_table_insert, songplay_data)", "def process_song_file(cursor, filepath):\n # open song file\n df = pd.read_json(filepath, lines=True)\n\n # insert artist record\n artist_columns = ['artist_id', 'artist_name', 'artist_location', 'artist_latitude', 'artist_longitude']\n artist_data = df[artist_columns].values[0].tolist()\n cursor.execute(artist_table_insert, artist_data)\n\n # insert song record\n song_columns = ['song_id', 'title', 'artist_id', 'year', 'duration']\n song_data = df[song_columns].values[0].tolist()\n cursor.execute(song_table_insert, song_data)", "def appendArtist(song):\n\tsql = []\n\t\n\tsql.append(\"INSERT INTO ARTIST ('name') VALUES ('\" \n\t+ '/'.join(song.artist) + \"');\")\n\t\n\tsql.append(\"INSERT INTO songs_artist ('songs_id', 'artist_id')\"\n\t+ \" VALUES ((select id from songs where hash = '\" + str(song.hash) + \"'), \"\n\t+ \"(select id from artist where name = '\" + '/'.join(song.artist) + \"'));\")\n\t\n\treturn sql", "def update(self):\n \n for track in self.tracks:\n track.update()", "def add_song(self, song: Song):\n self.playlist.append(song)", "def save_song(self):\n if self.is_stream:\n self.save_song_from_stream()\n else:\n self.save_song_from_file()", "def assign_position(self, song, pos=None):\n if not pos:\n next_order = self.__get_max_pos() + 1\n else:\n next_order = pos\n\n update = text(\n \"update setlist set song_position = :order where show_id = :show_id and song_id = :song_id\")\n update = update.bindparams(order=next_order, show_id=self.id, song_id=song.id)\n\n db.engine.execute(update.execution_options(autocommit=True))", "def update_audiobook(_id, _title_of_the_audiobook, _author_of_the_title, _narrator,\r\n _duration_in_number_of_seconds):\r\n audiobook_to_update = Audiobook.query.filter_by(id=_id).first()\r\n audiobook_to_update.title_of_the_audiobook = _title_of_the_audiobook\r\n audiobook_to_update.author_of_the_title = _author_of_the_title\r\n audiobook_to_update.narrator = _narrator\r\n audiobook_to_update.duration_in_number_of_seconds = _duration_in_number_of_seconds\r\n db.session.commit()", "def record_lyrics_result(self, track_id, songdata):\n self.lyrics.insert_one(\n {\n \"_id\": track_id,\n \"response_artist\": songdata.artist,\n \"response_title\": songdata.title,\n \"lyrics\": songdata.lyrics,\n }\n )", "def pl_btn_push(self):\n try:\n pl_name = self.pl_line_edit.text().replace(\" \", \"_\")\n path = os.path.abspath(\"Playlists/\"+pl_name+\".m3u\")\n pl_file = open(path, 'a')\n\n songs = glob.glob(\"Fixed/*/*/*\")\n for row in range(self.model.rowCount()):\n if self.model.item(row).checkState():\n index = self.model.index(row, 4)\n for song in songs:\n data = mutagen.File(song, easy=True)\n track = get_track(data['title'][0], data['artist'][0])\n if int(track.track_id) == int(self.model.data(index)):\n mp3_path = os.path.abspath(song)\n pl_file.write(mp3_path+\"\\n\")\n QMessageBox.about(self, \"Playlist Updated\",\n 'Playlist \"%s\" has been updated.'%(self.pl_line_edit.text()))\n pl_file.close()\n except:\n QMessageBox.about(self, \"Playlist Not Updated\",\n 'Playlist \"%s\" could not be updated.'%(self.pl_line_edit.text()))", "def update(self):\n self.getDbRecord().update()", "def update(self):\n db.session.commit()", "def update(self):\n db.session.commit()", "def add_new_song(self):\n return \"New Song Added\"", "def update(self, stock_record):\n self._records[stock_record.symbol] = stock_record" ]
[ "0.7413717", "0.6995219", "0.68380946", "0.6823178", "0.66802067", "0.6608994", "0.6446211", "0.6343561", "0.6318853", "0.6218813", "0.6192342", "0.6175601", "0.6141071", "0.61293864", "0.61132926", "0.60554576", "0.6035266", "0.6034275", "0.5926394", "0.592424", "0.591346", "0.5883479", "0.5878672", "0.5841953", "0.5828499", "0.5824835", "0.580802", "0.5783843", "0.57731354", "0.57731354", "0.5764232", "0.57612556", "0.57471263", "0.5743129", "0.5735491", "0.5721421", "0.5710091", "0.56911975", "0.56890243", "0.56736416", "0.5657241", "0.5656785", "0.5655713", "0.5655608", "0.5652286", "0.5649418", "0.5645688", "0.56298035", "0.56294733", "0.5627077", "0.56267446", "0.56234926", "0.5618751", "0.5618619", "0.5614558", "0.5594182", "0.5591713", "0.55916816", "0.55749214", "0.5564479", "0.556007", "0.5559589", "0.5553217", "0.5544543", "0.55433613", "0.5539932", "0.55244255", "0.5519112", "0.5511795", "0.55102795", "0.5505528", "0.5501127", "0.54928225", "0.5492454", "0.54899585", "0.54894394", "0.5484502", "0.54816514", "0.5467592", "0.54675794", "0.54644185", "0.5451311", "0.54433125", "0.5443281", "0.54390806", "0.5437897", "0.5437841", "0.54372287", "0.54339457", "0.5433374", "0.54321235", "0.54317147", "0.5429027", "0.54256743", "0.5421707", "0.54217", "0.5419182", "0.5419182", "0.54156554", "0.54082847" ]
0.70110184
1
Create a Batch from an existing batch id. Notes
def from_batch_id(batch_id: int, *args, **kwargs): b = Batch(*args, **kwargs) assert isinstance(b._backend, _backend.ServiceBackend) b._batch_handle = b._backend._batch_client.get_batch(batch_id) return b
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_batch(self, batch_name, priority = 0, comments = '', notifications = []):\n\n url = self._base_url + urlConfig.URLS['Project'] + '/' + self._project_id + '/batch'\n batch = {\n \"batch_name\": batch_name,\n \"priority\": priority,\n \"comments\": comments,\n \"notifications\": [\n ]\n }\n data = json.dumps(batch)\n response = apiCall.post(self._get_token(), url,self._proxy,data, 30)\n logging.debug(response['id'])\n return response['id']", "def create_batch(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"create_batch\"), kwargs)", "def create_batch(self, batch_index, *args, **kwargs):\n batch = self.dataset.create_batch(batch_index, *args, **kwargs)\n batch_res = self._exec(batch)\n return batch_res", "def batch_id(self, batch_id):\n\n self._batch_id = batch_id", "def add_to_batch(self, created_job_ids):\n batch_folder = BatchFolder(path=self.current_dir)\n if batch_folder.has_batch():\n batch: JobBatch = batch_folder.load()\n else:\n batch = JobBatch(job_ids=[], server=self.get_active_server())\n if batch.server.url != self.get_active_server().url:\n logger.info(\n \"A batch exists in this folder, but for a different server. \"\n \"Not saving job ids in batch\"\n )\n else:\n logger.info(\"Saving job ids in batch in current folder\")\n batch.job_ids = sorted(\n list(set(batch.job_ids) | set(created_job_ids))\n ) # add only unique new ids\n batch_folder.save(batch)", "def test_batch(self):\n batch = batch_test_utils.create_batch()\n self.job1.batch_id = batch.id\n self.job1.save()\n\n url = '/%s/jobs/?batch_id=%d' % (self.api, batch.id)\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 1)\n self.assertEqual(result['results'][0]['id'], self.job1.id)", "def get_batch(self, batch_id):\n #fmt = lambda x: join(self.path, self.simulation_paths[x])\n fmt = lambda x: self.simulation_paths[x]\n simulation_paths = [fmt(i) for i in self.batch_indices[batch_id]]\n return Batch(simulation_paths, root=self.path)", "def sfdcCreateBatch(query, chunk_size=10000, **kwargs):\n api_ver = kwargs.get('api_ver', '')\n session_id = kwargs.get('session_id', '')\n instance = kwargs.get('instance', '')\n job_id = kwargs.get('job_id', '')\n sfdcXml = kwargs.get('sfdcXml', {})\n\n bodyXml = sfdcXml.get('batch', {}).get('body')\n url = sfdcXml.get('batch', {}).get('url')\n headers = sfdcXml.get('batch', {}).get('headers')\n\n bodyXml = unicode(query, \"UTF-8\")\n url = url.format(instance=instance, api_ver=api_ver,\\\n job_id=job_id)\n headers['Content-Type'] = headers.get('Content-Type', '')\\\n .format(chunk_size=chunk_size)\n headers['X-SFDC-Session'] = session_id\n\n resp = requests.post(url=url, headers=headers, data=bodyXml)\n dictResp = xmltodict.parse(resp.text)\n batch_id = str(dictResp['batchInfo']['id'])\n\n return batch_id", "def nextBatch(self, batch_to_get_id=None):\n\n # batch id to get\n if batch_to_get_id is None:\n batch_to_get_id = self.current_batch_id\n\n # batch to get\n batch_to_get = self.batches[batch_to_get_id]\n\n # check if batch is available in memory / disk\n if batch_to_get.is_stored:\n # get batch data\n X_data, y_data = batch_to_get.getBatchData()\n # return X np array, label array\n return X_data, y_data\n\n # get data of current batch\n urls = list()\n\n for key in batch_to_get.ids:\n value = self.data_dict.data_dict[key]\n batch_to_get.batch_subjects[key] = value\n batch_to_get.y_data.append(value['label'])\n urls.append(value['path'])\n\n # get images using Image Loader class\n binary_images = self.imageLoader.getImages(urls)\n\n # convert images to array\n X_data = self._listOfImagesToNumpy(images=binary_images)\n y_data = np.array(batch_to_get.y_data)\n\n # decide where to store batch\n system_memory_usage_percent = psutil.virtual_memory()[2]\n if (system_memory_usage_percent < 90):\n save_to = \"memory\"\n elif self.disk_scratch is not None:\n save_to = \"disk\"\n elif self.disk_scratch is not None:\n save_to = \"disk_raw\"\n else:\n save_to = \"none\"\n\n # store batch\n batch_to_get.storeBatch(storage=save_to, X_data=X_data,\n y_data=y_data)\n\n # increment current batch\n if self.current_batch_id < (self.n_batches-1):\n self.current_batch_id += 1\n else:\n self.current_batch_id = 0\n\n # return X np array, label array\n return X_data, y_data", "def post(self, batch):\n num_jobs = len(batch)\n plural = \"\" if num_jobs == 1 else \"s\"\n log.info(\"> Sending batch request with %s job%s\", num_jobs, plural)\n data = []\n for i, job in enumerate(batch):\n if job.finished:\n raise Finished(job)\n else:\n job.finished = True\n log.info(\"> {%s} %s\", i, job)\n data.append(dict(job, id=i))\n response = self.resource.post(data)\n log.info(\"< Received batch response for %s job%s\", num_jobs, plural)\n return response", "def add_plant_batch(db_path: str, plant_batch: PlantBatch) -> None:\n plant, location, tray = parse_plant_location_tray_to_dict(plant_batch)\n\n query = f'INSERT INTO batches (Plant, Location, Tray, n_trays, planting_time) VALUES (\"{plant}\", \"{location}\", \"{tray}\", {plant_batch.n_tray}, \"{plant_batch.planting_time.isoformat()}\")'\n\n conn: Connection = sqlite3.connect(path.join(db_path, 'batches.db'))\n curr: Cursor = conn.cursor()\n try:\n curr.execute(query)\n except sqlite3.IntegrityError:\n raise ValueError(\"Error occured\")\n\n conn.commit()\n curr.close()\n conn.close()", "def get_batch(self, name):\n batches = self._meta['sets'].get('batches', {})\n if batches.get(name):\n b = name\n elif batches.get(name):\n b = name\n else:\n raise KeyError('No Batch found named {}.'.format(name))\n return qp.Batch(self, b)", "def submit_job(self, batch_id):\n\n job_name = self.bot_id + \"_\" + batch_id\n job_queue = self.jobQueueName\n job_definition = self.job_def\n command = self.bot_cmd\n\n kwargs = {'jobName': job_name,\n 'jobQueue': job_queue,\n 'jobDefinition': job_definition,\n 'containerOverrides': {'command': [command]}}\n print(\">>> Going to create job: \" + str(kwargs))\n submit_job_response = self.batch_client.submit_job(jobName=job_name,\n jobQueue=job_queue,\n jobDefinition=job_definition,\n # containerOverrides={'command': [command]}\n )\n\n print(\">>> submit job response is :\" + str(submit_job_response))\n job_id = submit_job_response['jobId']\n print('Submitted job [%s - %s] to the job queue [%s]' % (job_name, job_id, job_queue))", "def add(\n self,\n batch: RolloutBatchProtocol,\n buffer_ids: Optional[Union[np.ndarray, list[int]]] = None,\n ) -> tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:\n # preprocess batch\n new_batch = Batch()\n for key in set(self._reserved_keys).intersection(batch.keys()):\n new_batch.__dict__[key] = batch[key]\n batch = new_batch\n batch.__dict__[\"done\"] = np.logical_or(batch.terminated, batch.truncated)\n assert {\"obs\", \"act\", \"rew\", \"terminated\", \"truncated\", \"done\"}.issubset(batch.keys())\n if self._save_only_last_obs:\n batch.obs = batch.obs[:, -1]\n if not self._save_obs_next:\n batch.pop(\"obs_next\", None)\n elif self._save_only_last_obs:\n batch.obs_next = batch.obs_next[:, -1]\n # get index\n if buffer_ids is None:\n buffer_ids = np.arange(self.buffer_num)\n ptrs, ep_lens, ep_rews, ep_idxs = [], [], [], []\n for batch_idx, buffer_id in enumerate(buffer_ids):\n ptr, ep_rew, ep_len, ep_idx = self.buffers[buffer_id]._add_index(\n batch.rew[batch_idx],\n batch.done[batch_idx],\n )\n ptrs.append(ptr + self._offset[buffer_id])\n ep_lens.append(ep_len)\n ep_rews.append(ep_rew)\n ep_idxs.append(ep_idx + self._offset[buffer_id])\n self.last_index[buffer_id] = ptr + self._offset[buffer_id]\n self._lengths[buffer_id] = len(self.buffers[buffer_id])\n ptrs = np.array(ptrs)\n try:\n self._meta[ptrs] = batch\n except ValueError:\n batch.rew = batch.rew.astype(float)\n batch.done = batch.done.astype(bool)\n batch.terminated = batch.terminated.astype(bool)\n batch.truncated = batch.truncated.astype(bool)\n if self._meta.is_empty():\n self._meta = create_value(batch, self.maxsize, stack=False) # type: ignore\n else: # dynamic key pops up in batch\n alloc_by_keys_diff(self._meta, batch, self.maxsize, False)\n self._set_batch_for_children()\n self._meta[ptrs] = batch\n return ptrs, np.array(ep_rews), np.array(ep_lens), np.array(ep_idxs)", "def helper_create_batch_item(\n *,\n amount,\n bank_code,\n bank_account_name,\n bank_account_number,\n description,\n external_id,\n email_to=None,\n email_cc=None,\n email_bcc=None,\n **kwargs,\n ):\n params = locals()\n del params[\"kwargs\"]\n\n return BatchDisbursementItem.Query(**params)", "def test_adding_a_batch(created_job, bulk_request):\n bulk_request.reset_mock()\n bulk_request.return_value = '''<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n <batchInfo xmlns=\"http://www.force.com/2009/06/asyncapi/dataload\">\n <id>BATCHONE</id>\n <jobId>THEJOBID</jobId>\n <state>Queued</state>\n </batchInfo>\n '''\n\n fake_data = [('1', '2'), ('3', '4')]\n created_job.add_batch(['Id', 'Name'], iter(fake_data))\n\n assert created_job.pending_batches == ['BATCHONE']\n\n bulk_request.assert_called_once_with(\n 'post',\n 'https://salesforce/services/async/34.0/job/THEJOBID/batch',\n content_type='text/csv; charset=UTF-8',\n data=mock.ANY\n )\n\n data = bulk_request.call_args[1]['data']\n assert b''.join(data) == b'Id,Name\\r\\n1,2\\r\\n3,4\\r\\n'", "def make_batch_request(self, batch):\n args = {}\n args['access_token'] = self.access_token\n args['batch'] = json.dumps(batch)\n args = {k.encode('utf-8'): unicode(v).encode('utf-8')\n for k, v in args.items()}\n logger.info('Making a batched request with %s' % args)\n try:\n f = urllib2.urlopen(self.api_root, urllib.urlencode(args))\n data = json.load(f)\n # For debugging\n self.data = data\n for idx, val in enumerate(data):\n data[idx] = json.loads(val['body'])\n return data\n except urllib2.HTTPError as e:\n logger.info('%s' % e)\n return json.load(e)\n except urllib2.URLError as e:\n logger.warn('URLError: %s' % e.reason)", "def create_batch(count):\n\n if count < 1:\n raise click.BadParameter('count needs to be > 0')\n\n factory = V2ProfileFactory()\n output = factory.create_batch(count, export_json=True)\n click.echo(output)", "def add_delete_batch() -> str:\r\n batches = app.config[\"batches\"]\r\n # Contains HTML form data inputted by the user submitted using POST.\r\n response = request.form\r\n batch_id_input = response.get(\"id_input\")\r\n batch_volume_input = response.get(\"volume_input\")\r\n batch_beer_type_input = response.get(\"beer_type_input\")\r\n delete_batch_input = response.get(\"delete_batch_input\")\r\n # True if user submits batch id, volume, and beer_type (part of one form).\r\n if batch_id_input is not None:\r\n # Removes blanks (whitespace characters) from batch id.\r\n batch_id_input = batch_id_input.replace(\" \", \"\")\r\n # Volume is always a number as str; HTML is set to only allow num input\r\n batch_volume_input = int(batch_volume_input)\r\n # Creates and adds Batch to batches dict. if ID isn't in batches dict.\r\n if batches.get(batch_id_input) is None:\r\n handle = {\"inventory\": app.config[\"inventory\"],\r\n \"tanks\": app.config[\"tanks\"]}\r\n batches[batch_id_input] = Batch(batch_id_input,\r\n batch_beer_type_input,\r\n batch_volume_input,\r\n handle)\r\n log_message = (\"Batch {} with beer type {} and {} L volume \"\r\n + \"was added.\").format(batch_id_input,\r\n batch_beer_type_input,\r\n batch_volume_input)\r\n app.config[\"logger\"].info(log_message)\r\n # Elif user wants to del. a batch and batch id exists, deletes this batch.\r\n elif delete_batch_input is not None and delete_batch_input in batches:\r\n del batches[delete_batch_input]\r\n log_message = \"Batch {} was deleted.\".format(delete_batch_input)\r\n app.config[\"logger\"].info(log_message)\r\n # Creates HTML table containing all batches.\r\n html_batch_table = update_batch_table(batches)\r\n return \"\"\"<style>\r\n h1, h2, h3 {\r\n font-family: arial, sans-serif;\r\n }\r\n table {\r\n font-family: arial, sans-serif;\r\n border-collapse: collapse;\r\n width: 100%;\r\n }\r\n td, th {\r\n border: 1px solid #dddddd;\r\n text-align: left;\r\n padding: 8px;\r\n }\r\n tr:nth-child(even) {\r\n background-color: #dddddd;\r\n }\r\n </style>\r\n <h2>Add batch</h2>\r\n <form action=\"/add_delete_batch\" method=\"POST\">\r\n Batch ID:<br>\r\n <input type=\"text\" name=\"id_input\" required=\"required\">\r\n <br>\r\n Volume (in litres):<br>\r\n <input type=\"number\" name=\"volume_input\" min=\"0\"\r\n required=\"required\">\r\n <br>\r\n Beer type:<br>\r\n <select name=\"beer_type_input\">\r\n <option value=\"dunkers\">Dunkers</option>\r\n <option value=\"pilsner\">Pilsner</option>\r\n <option value=\"red_helles\">Red Helles</option>\r\n </select>\r\n <br><br>\r\n <input type=\"submit\" value=\"Add batch\">\r\n </form>\r\n <h2>Delete batch</h2>\r\n <form action=\"/add_delete_batch\" method=\"POST\">\r\n Batch ID:<br>\r\n <input type=\"text\" name=\"delete_batch_input\" \r\n required=\"required\">\r\n <br><br>\r\n <input type=\"submit\" value=\"Delete batch\">\r\n </form>\r\n <form action=\"/\" method=\"POST\">\r\n <input type=\"hidden\">\r\n <br>\r\n <input type=\"submit\" value=\"Go back to tracking screen\">\r\n </form>\r\n <h2>Batches</h2>\r\n <table>\r\n <tr>\r\n <th>Batch ID</th>\r\n <th>Beer type</th>\r\n <th>Volume (L)</th>\r\n <th>Current production phase</th>\r\n <th>Current tank</th>\r\n <th>Current phase finishes</th>\r\n <th>Last completed phase</th>\r\n <th>Bottles put in inventory</th>\r\n </tr>\"\"\" + html_batch_table + \"</table>\"", "def _defineBatches(self):\n # extract all ids\n all_keys = list(self.data_dict.unique_ids)\n\n # randomly shuffle keys\n if self.random_shuffle_batches:\n random.shuffle(all_keys)\n\n # create batches based on number of batches\n if self.n_big_batches is not None:\n self.n_big_batches += 1\n # define cuts for batches\n cuts = np.linspace(0, self.n_observations,\n self.n_big_batches).round()\n # create batches based on batch size\n elif self.batch_size is not None:\n cuts = [x for x in range(0, self.n_observations,\n int(self.batch_size))]\n if cuts[-1] < self.n_observations:\n cuts.append(self.n_observations)\n\n # convert batch sizes to integers\n cuts = [int(x) for x in cuts]\n\n # save batches into dictionary\n batches = dict()\n for i in range(0, (len(cuts) - 1)):\n # create DataBatch object\n current_batch = DataBatch(ids=all_keys[cuts[i]:cuts[i+1]],\n batch_id=i)\n current_batch.setDiskStoragePath(self.disk_scratch)\n batches[i] = current_batch\n\n # save batches\n self.n_batches = len(batches.keys())\n self.batches = batches", "def post(self, request, work_batch_id):\n\n from sentry.models.workbatch import WorkBatch\n\n try:\n work_batch = WorkBatch.objects.get(pk=int(work_batch_id))\n except WorkBatch.DoesNotExist:\n raise ResourceDoesNotExist\n\n logger = logging.getLogger('clims.files')\n logger.info('workbatchfile.start')\n\n if 'file' not in request.data:\n return Response({'detail': 'Missing uploaded file'}, status=400)\n\n fileobj = request.data['file']\n\n full_name = request.data.get('name', fileobj.name)\n if not full_name or full_name == 'file':\n return Response({'detail': 'File name must be specified'}, status=400)\n\n name = full_name.rsplit('/', 1)[-1]\n\n if _filename_re.search(name):\n return Response(\n {\n 'detail': 'File name must not contain special whitespace characters'\n }, status=400\n )\n\n headers = {\n 'Content-Type': fileobj.content_type,\n }\n for headerval in request.data.getlist('header') or ():\n try:\n k, v = headerval.split(':', 1)\n except ValueError:\n return Response({'detail': 'header value was not formatted correctly'}, status=400)\n else:\n if _filename_re.search(v):\n return Response(\n {\n 'detail': 'header value must not contain special whitespace characters'\n },\n status=400\n )\n headers[k] = v.strip()\n\n file = File.objects.create(\n name=name,\n type='work_batch.file',\n headers=headers,\n )\n file.putfile(fileobj, logger=logger)\n\n try:\n with transaction.atomic():\n # TODO: Remove the organization id from the user task file\n work_batch_file = WorkBatchFile.objects.create(\n organization_id=work_batch.organization_id,\n file=file,\n name=full_name,\n work_batch_id=work_batch.id\n )\n except IOError:\n file.delete()\n return Response({'detail': ERR_FILE_EXISTS}, status=409)\n\n return Response(serialize(work_batch_file, request.user), status=201)", "def created_job(new_job, bulk_request):\n bulk_request.return_value = '''<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n <jobInfo xmlns=\"http://www.force.com/2009/06/asyncapi/dataload\">\n <id>THEJOBID</id>\n <operation>update</operation>\n <object>Lead</object>\n </jobInfo>\n '''\n new_job.create()\n return new_job", "def create(self, batch_outs):\n raise NotImplementedError('Must be implemented in subclasses.')", "def batch(self, batch):\n\n self._batch = batch", "def insert_job(sess, filetype, status, type_id, submission, job_id=None, filename=None,\n file_size=None, num_rows=None):\n job = Job(\n file_type_id=filetype,\n job_status_id=status,\n job_type_id=type_id,\n submission_id=submission,\n original_filename=filename,\n file_size=file_size,\n number_of_rows=num_rows\n )\n if job_id:\n job.job_id = job_id\n sess.add(job)\n sess.commit()\n return job", "def get_batch(self, batch_kwargs, batch_parameters=None) -> None:\n raise NotImplementedError", "def insert(self, json_data, batch=None):\n headers = {'Content-type': 'application/json;charset=UTF-8'}\n url = '/'.join([self.url, self.db])\n time.sleep(0.01)\n if batch:\n r = requests.post(url, data=json_data, headers=headers, params={'batch': 'ok'})\n else:\n r = requests.post(url, data=json_data, headers=headers) \n time.sleep(0.01)\n if not r.status_code in (201, 202):\n raise Exception(\"HTTP \" + str(r.status_code))", "def add_batch(self, batch, env_ids=None):\n batch_size = get_nest_batch_size(batch, tf.int32)\n with tf.device(self._device):\n if env_ids is None:\n env_ids = tf.range(self._num_envs)\n\n assert len(\n env_ids.shape.as_list()) == 1, \"env_ids should be an 1D tensor\"\n tf.Assert(batch_size == tf.shape(env_ids)[0], [\n \"batch and env_ids do not have same length\", batch_size, \"vs.\",\n tf.shape(env_ids)[0]\n ])\n\n # Make sure that there is no duplicate in `env_id`\n _, _, env_id_count = tf.unique_with_counts(tf.sort(env_ids))\n tf.Assert(\n tf.reduce_max(env_id_count) == 1,\n [\"There are duplicated ids in env_ids\", env_ids])\n current_pos = tf.gather(self._current_pos, env_ids, axis=0)\n indices = tf.concat([\n tf.cast(tf.expand_dims(env_ids, -1), tf.int64),\n tf.expand_dims(current_pos, -1)\n ],\n axis=-1)\n\n tf.nest.map_structure(\n lambda buf, bat: buf.scatter_nd_update(indices, bat),\n self._buffer, batch)\n\n self._current_pos.scatter_nd_update(\n tf.expand_dims(env_ids, -1),\n (current_pos + 1) % self._max_length)\n current_size = tf.gather(self._current_size, env_ids, axis=0)\n self._current_size.scatter_nd_update(\n tf.expand_dims(env_ids, -1),\n tf.minimum(current_size + 1, self._max_length))", "def BatchCreate(self, request, global_params=None):\n config = self.GetMethodConfig('BatchCreate')\n return self._RunMethod(\n config, request, global_params=global_params)", "def GetBatchJob(client, batch_job_id):\n batch_job_service = client.GetService('BatchJobService')\n\n selector = {\n 'fields': ['Id', 'Status', 'DownloadUrl'],\n 'predicates': [\n {\n 'field': 'Id',\n 'operator': 'EQUALS',\n 'values': [batch_job_id]\n }\n ]\n }\n\n return batch_job_service.get(selector)['entries'][0]", "def link_with_batch(self, batch, node1, node2, edge_type, **attr):\n batch[:] = getattr(batch, edge_type).create(node1, node2, **attr)", "def batch_start(self, batch_idx, batch_data):\n self.batch = batch_idx", "def create(self, request, *args, **kwargs):\n file_obj = request.data['file']\n max_trip_distance = request.data.get('max_trip_distance')\n\n client = boto3.client('s3', config=BotocoreClientConfig(signature_version='s3v4'))\n\n organization = request.user.organization\n file_name = '{}.zip'.format(str(uuid4()))\n key = get_batch_shapefile_upload_path(organization.name, file_name).lstrip('/')\n\n response = client.upload_fileobj(file_obj, settings.AWS_STORAGE_BUCKET_NAME, key)\n print(response)\n url = client.generate_presigned_url(\n ClientMethod='get_object',\n Params={'Bucket': settings.AWS_STORAGE_BUCKET_NAME, 'Key': key}\n )\n async_task('pfb_analysis.tasks.create_batch_from_remote_shapefile',\n url,\n max_trip_distance=max_trip_distance,\n group='create_analysis_batch',\n ack_failure=True)\n\n return Response({\n 'shapefile_url': url,\n 'status': 'STARTED'\n }, status=status.HTTP_200_OK)", "def _get_batch(batch, ctx):\n if isinstance(batch, mx.io.DataBatch):\n data = batch.data[0]\n label = batch.label[0]\n else:\n data, label = batch\n return (gluon.utils.split_and_load(data, ctx),\n gluon.utils.split_and_load(label, ctx),\n data.shape[0])", "def test_create_batch_child():\n class ChildSet(DatasetIndex):\n # pylint: disable=too-few-public-methods\n pass\n dsi = ChildSet(5)\n assert isinstance(dsi.create_batch(range(5)), ChildSet)", "def append(self, batch: Batch):", "def action(self):\n\n batch_name = super().find_batch(self.__arguments, '')\n butch = self.__batch_data.get_batch(batch_name)\n cmd = CMDfromArray(butch)\n cmd.run()", "def batch_input(self, type_shape, batch_idx):\n try:\n ts_idx = self._loom._type_shape_to_idx[type_shape]\n except KeyError:\n raise TypeError('Constant is not of a recognized TypeShape: %s' %\n str(type_shape))\n batch_input = self._weaver.BatchInput(ts_idx, batch_idx)\n if batch_input == -1:\n raise AssertionError('Weaver Batch Input creation failed: %s' %\n self._weaver.error_string())\n return batch_input", "def AddBatchJob(client):\n # Initialize appropriate service.\n batch_job_service = client.GetService('BatchJobService', version='v201509')\n # Create a BatchJob.\n batch_job_operations = [{\n 'operand': {},\n 'operator': 'ADD'\n }]\n return batch_job_service.mutate(batch_job_operations)['value'][0]", "def _create_certificate_batch(self, request):\n with transaction.atomic():\n batch = CertificateBatch.objects.create(\n creator=request.user, **self.recipient_form.cleaned_data)\n\n # Create certificates\n for key in self.forms:\n Certificate.objects.create(batch=batch, **self.forms[key].cleaned_data)\n\n return batch", "def get_batch(self) -> CBInput:\n sample_batch_idx, batch_arms = self.gen_arms_per_batch()\n context_arm_features = self.gen_features_batch(batch_idx=sample_batch_idx)\n assert context_arm_features.ndim == 3\n rewards_all_arms = self.features_to_rewards(\n inp_feature=context_arm_features, sample_batch_idx=sample_batch_idx\n )\n batch = CBInput(\n context_arm_features=context_arm_features,\n arms=batch_arms, # ads of batch_size campaigns\n rewards_all_arms=rewards_all_arms,\n )\n return batch", "def createDeleteSystemJob(self, dataSetId: str = None, batchId: str = None) -> dict:\n if self.loggingEnabled:\n self.logger.debug(f\"Starting createDeleteSystemJob\")\n path = \"/system/jobs\"\n if dataSetId is not None:\n obj = {\"dataSetId\": dataSetId}\n res = self.connector.postData(self.endpoint + path, data=obj)\n return res\n elif batchId is not None:\n obj = {\"batchId\": batchId}\n res = self.connector.postData(self.endpoint + path, data=obj)\n return res\n else:\n raise ValueError(\"Require a dataSetId or a batchId\")", "def batch(self, batch_obj):\r\n if batch_obj is not None and not isinstance(batch_obj, BatchQuery):\r\n raise CQLEngineException('batch_obj must be a BatchQuery instance or None')\r\n clone = copy.deepcopy(self)\r\n clone._batch = batch_obj\r\n return clone", "def build_batch_spec(self, batch_definition: BatchDefinition) -> BatchSpec:\n batch_spec_params: dict = (\n self._generate_batch_spec_parameters_from_batch_definition(\n batch_definition=batch_definition\n )\n )\n # batch_spec_passthrough via Data Connector config\n batch_spec_passthrough: dict = deepcopy(self.batch_spec_passthrough)\n\n # batch_spec_passthrough from batch_definition supersedes batch_spec_passthrough from Data Connector config\n if isinstance(batch_definition.batch_spec_passthrough, dict):\n batch_spec_passthrough.update(batch_definition.batch_spec_passthrough)\n\n batch_spec_params.update(batch_spec_passthrough)\n batch_spec = BatchSpec(**batch_spec_params)\n return batch_spec", "def batch(data, batch_type=\"static\", batch_size=16, max_frames_in_batch=12000):\n if batch_type == \"static\":\n return static_batch(data, batch_size)\n elif batch_type == \"dynamic\":\n return dynamic_batch(data, max_frames_in_batch)\n else:\n logging.fatal(\"Unsupported batch type {}\".format(batch_type))", "def test_post_cve_id_invalid_batch_type(reg_user_headers):\n res = requests.post(\n f'{env.AWG_BASE_URL}{CVE_ID_URL}',\n headers=reg_user_headers,\n params={\n 'amount': '10',\n 'batch_type': '---',\n 'cve_year': f'{utils.CURRENT_YEAR}',\n 'short_name': reg_user_headers['CVE-API-ORG']\n }\n )\n assert res.status_code == 400\n response_contains_json(res, 'error', 'INVALID_BATCH_TYPE')", "def test_batch_default_arg_pos(self):\n\n b = br.Read((\"test\", \"demo\", 1), ops=None)\n\n bwr = br.BatchRecords()\n\n bwr.batch_records.append(b)\n\n assert len(bwr.batch_records) == 1\n\n bwr = br.BatchRecords()\n\n assert len(bwr.batch_records) == 0", "def instantiate_batch(self, inputs):\n return inputs", "def grow(self, batch_ids, **combo_runner_opts):\n if isinstance(batch_ids, int):\n batch_ids = (batch_ids,)\n\n combo_runner_core(\n grow,\n combos=((\"batch_number\", batch_ids),),\n constants={\"verbosity\": 0, \"crop\": self},\n **combo_runner_opts,\n )", "def instantiate_batch(self, inputs):\n _ = inputs\n raise NotImplementedError(\n 'LoomOp needs a definition for instantiate_batch.')", "def create_batch(request, due_date):\n\n template_name = 'batch_screen.html'\n\n context = {\n 'records': [],\n 'amount': '',\n 'b_amount': '',\n 'count': '',\n 'b_count': '',\n 'due_date': '',\n 'batch_ref': '',\n 'remove_success': request.GET.get('remove_success')\n }\n\n call_date = request.GET.get('call_date') or due_date\n\n if request.method == 'POST':\n pass\n\n else:\n\n try:\n datetime.datetime.strptime(due_date, '%Y-%m-%d')\n datetime.datetime.strptime(call_date, '%Y-%m-%d')\n except:\n raise Http404()\n\n try:\n\n count = 0\n amount = 0\n\n days = daysbeforecalldd()\n if datetime.datetime.strptime(call_date, \"%Y-%m-%d\") < datetime.datetime.now() + datetime.timedelta(days=days):\n raise Exception(\"Due date must be at least 7 days in the future.\")\n\n try:\n batches = sync_drawdown_table(due_date, user=request.user, call_date=call_date)\n except Exception as e:\n context['error'] = '{} {}'.format(e, traceback.format_exc())\n get_batches(request, context)\n return render(request, 'batch_list.html', context)\n\n new_batches = batches.get('new_batches', [])\n existing_batches = batches.get('existing_batches', [])\n\n # New batches but none existing\n if len(new_batches) and not len(existing_batches):\n if len(new_batches) == 1:\n return redirect('core_dd_drawdowns:view_batch', id=new_batches[0])\n else:\n url = reverse('core_dd_drawdowns:view_batches') + '?due_date={}&created={}'.format(due_date,\n len(new_batches))\n return redirect(url)\n\n # No new batches but existing batches\n if not len(new_batches) and len(existing_batches):\n if len(existing_batches) == 1:\n return redirect('core_dd_drawdowns:view_batch', id=existing_batches[0])\n else:\n url = reverse('core_dd_drawdowns:view_batches') + '?due_date={}&open={}'.format(due_date,\n len(existing_batches))\n return redirect(url)\n\n # New batches and existing batches\n elif len(new_batches) and len(existing_batches):\n url = reverse('core_dd_drawdowns:view_batches') + '?due_date={}&created={}'.format(due_date,\n len(new_batches))\n return redirect(url)\n\n context['count'] = count\n context['amount'] = '{0:0,.2f}'.format(amount or 0)\n\n context['due_date'] = due_date\n context['uk_due_date'] = datetime.datetime.strftime(datetime.datetime.strptime(due_date, '%Y-%m-%d'), '%d/%m/%Y')\n if type(call_date) is str:\n context['uk_call_date'] = datetime.datetime.strftime(datetime.datetime.strptime(call_date, '%Y-%m-%d'), '%d/%m/%Y')\n else:\n context['uk_call_date'] = call_date.strftime('%d/%m/%Y')\n\n except Exception as e:\n context['error'] = '{} {}'.format(e, traceback.format_exc())\n\n if not context['batch_ref'] and not context.get('error'):\n url = reverse('core_dd_drawdowns:view_batches') + '?invalid_due_date=1'\n return redirect(url)\n\n return render(request, template_name, context)", "def make_batch(self, batch_size):\n filenames = self.get_filenames()\n\n if os.path.isdir(filenames):\n num_records = len(os.listdir(filenames))\n print(\"Loading from directory. \" + str(num_records) + \" tfRecords found.\")\n files = tf.data.Dataset.list_files(filenames + \"/\" + \"*.tfrecord\").shuffle(num_records)\n dataset = files.apply(\n tf.contrib.data.parallel_interleave(\n lambda x: tf.data.TFRecordDataset(x, num_parallel_reads=256, buffer_size=8*1024*1024),\n cycle_length=32, sloppy=True)\n )\n else:\n print(\"Loading from single tfRecord...\")\n dataset = tf.data.TFRecordDataset(filenames + \".tfrecord\").repeat()\n \n dataset = dataset.map(self.parser, num_parallel_calls=128)\n \n if self.subset == 'train':\n min_queue_examples = int(\n Cifar10DataSet.num_examples_per_epoch(self.subset) * 0.4)\n # Ensure that the capacity is sufficiently large to provide good random\n # shuffling.\n dataset = dataset.shuffle(buffer_size=min_queue_examples + 3 * batch_size)\n \n dataset = dataset.apply(tf.contrib.data.batch_and_drop_remainder(batch_size))\n dataset = dataset.prefetch(10)\n \n iterator = dataset.make_one_shot_iterator()\n seq_batch, input_batch, map_batch, transformation_batch = iterator.get_next()\n\n return seq_batch, input_batch, map_batch, transformation_batch", "def upload_bulk_sms_file(batch_id, file_path):\n batch = Batch.objects.get(id=batch_id)\n batch.add_messages(read_messages_from_file(file_path))\n batch.status = Batch.PENDING\n batch.save()", "def batched_drawing(self, batch):\n self.label_object = Label(self.text, font_name=self.font_name, font_size=self.font_size,\n x=self.pos.x, y=self.pos.y, anchor_x=self.ANCHOR_X, anchor_y=self.ANCHOR_Y,\n batch=batch)\n\n # Rectangle objects are added to the batch \"automatically\" since it's a subclass of GraphicsObject.\n self.rectangle_object = Rectangle(self.width, self.height, pos=self.pos, hidden=True)", "def pack_data_into_batches(self, ids):\n\n # create buckets sorted by the number of src tokens\n # each bucket is also sorted by the number of tgt tokens\n buckets = {}\n for i, line_ids in enumerate(ids):\n len_ = len(line_ids)\n if len_ not in buckets:\n buckets[len_] = [i]\n else:\n buckets[len_].append(i)\n\n for b_idx in buckets:\n buckets[b_idx] = sorted(buckets[b_idx])\n\n buckets = OrderedDict(sorted(buckets.items()))\n\n batches = []\n batch_elem_lengths = []\n curr_batch = []\n len_of_longest_sent = 0\n for sent_len, bucket in buckets.items():\n for sent_i in bucket:\n if sent_len * (len(curr_batch) + 1) > self.tokens_in_batch:\n if not curr_batch:\n raise ValueError(\n f\"The limitation on number of tokens in batch {self.tokens_in_batch} is too strong.\"\n f\"Several sentences contain {sent_len} tokens.\"\n )\n batches.append(curr_batch)\n batch_elem_lengths.append(sent_len)\n curr_batch = []\n curr_batch.append(sent_i)\n len_of_longest_sent = sent_len\n if curr_batch:\n batches.append(curr_batch)\n batch_elem_lengths.append(len_of_longest_sent)\n return batches, batch_elem_lengths", "def init_batch(self):\n pass", "def process_batch_group(batch_name_list, instance_to_create):\n try:\n proc_inst = instance_to_create\n proc_inst.process(batch_name_list)\n except Exception, ex:\n print traceback.format_exc()\n raise ex", "def create(self, validated_data):\n return Job.objects.create(**validated_data)", "def generate_batch(\n batch: Tuple[Dict[str, Sequence[int]], List[Sequence[int]]]\n ) -> Tuple[Dict[str, torch.Tensor], torch.Tensor]:\n input_ids = torch.tensor([b[0][\"input_ids\"] for b in batch])\n attention_mask = torch.tensor([b[0][\"attention_mask\"] for b in batch])\n token_type_ids = torch.tensor([b[0][\"token_type_ids\"] for b in batch])\n labels = torch.tensor([b[1] for b in batch])\n features = {\n \"input_ids\": input_ids,\n \"attention_mask\": attention_mask,\n \"token_type_ids\": token_type_ids,\n }\n return features, labels", "def init_batch(self, src):\n batch, bos = src.size(1), self.src_dict.get_bos()\n return src.data.new(1, batch).fill_(bos)", "def embed_batch(self, batch: batches.TFBatch) -> tf.Tensor:\n data = batches.batch_to_components(batch, self._config.context_features,\n self._config.sequential_features)\n return self.embed_data(data)", "def test_invalid_batch_info_id(self):\n batch_info_id = '12121212121'\n self.batch_data['batch_id'] = batch_info_id\n resp = self.query_with_token(\n self.access_token_master,\n update_batch_info.format(**self.batch_data))\n\n self.assertIn('data', resp)\n self.assertIn(PRODUCTS_ERROR_RESPONSES[\n \"inexistent_batchinfo\"].format(batch_info_id),\n resp['errors'][0]['message'])", "def new_job(salesforce_session):\n return SalesforceBulkJob('update', 'Lead')", "def __CreateNewBlipData(self, wave_id, wavelet_id):\n blip_data = model.BlipData()\n blip_data.wave_id = wave_id\n blip_data.wavelet_id = wavelet_id\n blip_data.blip_id = 'TBD_' + str(random.random()).split('.')[1]\n return blip_data", "def batch_with_http_info(self, batch, **kwargs):\n\n all_params = ['batch']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method batch\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'batch' is set\n if ('batch' not in params) or (params['batch'] is None):\n raise ValueError(\"Missing the required parameter `batch` when calling `batch`\")\n\n\n collection_formats = {}\n\n resource_path = '/batch'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'batch' in params:\n body_params = params['batch']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['basicAuth']\n\n return self.api_client.call_api(resource_path, 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='InlineResponse200',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)", "def prepare_new_batch(self, batch_index):\n t = self._get_acquisition_index(batch_index)\n\n # Check if we still should take initial points from the prior\n if t < 0:\n return\n\n # Take the next batch from the acquisition_batch\n acquisition = self.state['acquisition']\n if len(acquisition) == 0:\n acquisition = self.acquisition_method.acquire(\n self.acq_batch_size, t=t)\n\n batch = arr2d_to_batch(\n acquisition[:self.batch_size], self.target_model.parameter_names)\n self.state['acquisition'] = acquisition[self.batch_size:]\n\n return batch", "def batch(self, batch, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.batch_with_http_info(batch, **kwargs)\n else:\n (data) = self.batch_with_http_info(batch, **kwargs)\n return data", "def wrapper(*args, **kwargs):\n batch_ref = kwargs.pop('batch_reference', None)\n r = func(*args, **kwargs)\n if batch_ref:\n for key in r.keys():\n r[key]['BatchReferenceName'] = batch_ref\n return r", "def upload_data_new_batch(tsca_id, latest_tsca_id, paths_to_batches_info, namespace, workspace, google_bucket_id):\n paths_to_batches_info_df = pd.read_excel(paths_to_batches_info, index_col=0)\n path_to_samples_info = paths_to_batches_info_df.loc[tsca_id, 'path_to_samples_info']\n\n # DF of remote [sample < > sample set ]\n remote_sample_sets = pd.read_table('remote_files/sample_set_membership_%s.tsv'%latest_tsca_id)\n # DF of remote [pair < > pair set]\n remote_pair_sets = pd.read_table('remote_files/pair_set_membership_%s.tsv'%latest_tsca_id)\n\n all_samples = get_samples(paths_to_batches_info, google_bucket_id)\n # Add cohorts for older batches\n all_samples = add_cohort_to_old_batches(all_samples)\n\n ##### Remove blacklisted samples ##\n # Blacklisted samples\n blacklisted = pd.read_table(\"samples_blacklist.txt\", header=None, names=[\"entity:sample_id\"])\n blacklisted_sample_ids = blacklisted[\"entity:sample_id\"].values.tolist()\n all_samples = all_samples[~all_samples[\"entity:sample_id\"].isin(blacklisted_sample_ids)]\n\n ########## Participants ##########\n print(\"Uploading participants...\") \n participants = prepare_participants_for_metadata_export(path_to_samples_info, tsca_id)\n r1 = save_and_upload_participants(participants, namespace, workspace, tsca_id)\n ##################################\n\n ########## Samples ############\n print(\"Uploading samples...\")\n batch_samples = prepare_batch_samples_for_metadata_export(path_to_samples_info, tsca_id, google_bucket_id)\n r2 = save_and_upload_samples(batch_samples, namespace, workspace, tsca_id)\n #################################\n\n ########## Pairs #############\n print(\"Uploading pairs...\")\n pairs = create_pairs_list(all_samples)\n r3 = save_and_upload_pairs(namespace, workspace, pairs)\n #################################\n\n ########## Sample Sets #########\n print(\"Uploading sample sets...\")\n batch_sample_set, batch_tumor_set, batch_normal_set = prepare_batch_sample_set_for_metadata_export(path_to_samples_info, tsca_id)\n # Remove the samples that have already been uploaded \n uploaded_sample_ids = remote_sample_sets['sample'].tolist()\n batch_sample_set_clean = batch_sample_set[~batch_sample_set['sample_id'].isin(uploaded_sample_ids)]\n batch_tumor_set_clean = batch_tumor_set[~batch_tumor_set['sample_id'].isin(uploaded_sample_ids)]\n batch_normal_set_clean = batch_normal_set[~batch_normal_set['sample_id'].isin(uploaded_sample_ids)]\n r4a, r4b, r4c = save_and_upload_batch_sample_sets(batch_sample_set_clean, batch_tumor_set_clean, batch_normal_set_clean, tsca_id, namespace, workspace)\n #################################\n\n ########## PoNs ###############\n print(\"Uploading PoNs...\")\n \n # Number of latest tsca id\n latest_tsca_id_int = int(re.findall('\\d+', latest_tsca_id )[0])\n # Array with list of all previous TSCA ids\n previous_tsca_ids = [\"TSCA%s\"%i for i in np.arange(14, latest_tsca_id_int+1)]\n previous_tsca_ids.insert(0, \"TSCA1213\")\n\n pon, name = create_panel_of_normals_advanced(tsca_id, all_samples,\\\n num_normals_per_cohort_involved = 3, \\\n batches_to_pick_from = previous_tsca_ids)\n\n # Only upload PoN if it hasn't been uploaded already\n if not name in remote_sample_sets['membership:sample_set_id'].unique().tolist():\n r5 = upload_pon(pon, name, namespace, workspace) \n else: \n print(\"PoN already exists...\")\n r5 = {}\n #################################\n \n ########## Pair Set ###########\n print(\"Uploading pair sets...\")\n # Upload cumulative pair sets\n tn_cum_pairsets, tp_cum_pairsets = prepare_cumulative_pairsets_for_metadata_export(pairs, tsca_id)\n r6 = upload_pairsets(namespace, workspace, tn_cum_pairsets, \"TN\")\n r7 = upload_pairsets(namespace, workspace, tp_cum_pairsets, \"TP\")\n\n # Batch pair sets\n tn_pairsets, tp_pairsets = prepare_batch_pairsets_for_metadata_export(all_samples, pairs, tsca_id)\n uploaded_pair_ids = remote_pair_sets['pair'].tolist()\n tn_pairsets_clean = tn_pairsets[~tn_pairsets['pair_id'].isin(uploaded_pair_ids)]\n tp_pairsets_clean = tp_pairsets[~tp_pairsets['pair_id'].isin(uploaded_pair_ids)]\n\n r8 = upload_pairsets(namespace, workspace, tn_pairsets_clean, \"TN\")\n r9 = upload_pairsets(namespace, workspace, tp_pairsets_clean, \"TP\")\n #################################\n\n return (r1, r2, r3, r4a, r4b, r4c, r5, r6, r7, r8, r9)", "def pad_batches(self, ids):\n\n batches = []\n for batch_elem_len, batch_sent_ids in zip(self.batch_elem_lengths, self.batch_sent_ids):\n batch = self.tokenizer.pad_id * np.ones((len(batch_sent_ids), batch_elem_len), dtype=np.int)\n for i, sentence_idx in enumerate(batch_sent_ids):\n batch[i][: len(ids[sentence_idx])] = ids[sentence_idx]\n batches.append(batch)\n return batches", "def prepare_batches(self, pairs, batch_size):\n\t\treturn MATHBatch.create_from_items(pairs, batch_size)", "def get_shipments_by_batch(auth, batch_id, base_url='https://api.cratejoy.com/v1/'):\n \n shipment_endpoint = '{}shipments/?batch_id={}'.format(base_url, batch_id)\n\n resp = requests.get(\n shipment_endpoint,\n auth=auth\n )\n\n print('GET request to {} responded with status '\n 'code: {}'.format(shipment_endpoint,\n resp.status_code))\n print(resp.content)", "def batchadd(batch_file_name):\n click.echo(\"Processing batch file...\")\n dio_dir: DioDir = DioDir()\n with open(batch_file_name, \"r\") as batch_file:\n reader = csv.DictReader(batch_file, fieldnames=[\"name\"])\n for row in reader:\n # if you don't do this they all have the same salt\n new_rand: str = str(random.randint(int(1e30), int(9e30)))\n new_peep: Person = Person(name=row[\"name\"], salt=new_rand)\n new_peep.save(dio_dir)\n click.echo(\"Finished processing batch file...\")", "def trigger_batch_job(parent_batch_id, job_input, job_params):\n job_name = job_params[\"jobName\"]\n job_modality = job_params[\"jobModality\"]\n\n batch_id = f\"{parent_batch_id}-{job_name}\"\n\n output_path = (\n f\"s3://{batch_processing_bucket_name}/batch_manifests/{job_modality}/{batch_id}/output\"\n )\n\n # If a label category file wasn't provided as API input, use the previous\n # job's label category file.\n label_category_config_uri = job_input.label_category_s3_uri\n if \"labelCategoryConfigS3Uri\" in job_params:\n label_category_config_uri = job_params[\"labelCategoryConfigS3Uri\"]\n\n # batch_job_input_data = event[\"batch_job_input\"]\n labeling_job_request = construct_labeling_job_input(\n parent_batch_id=parent_batch_id,\n input_manifest_url=job_input.input_manifest_s3_uri,\n audit_label_attribute_name=job_input.label_attribute_name,\n label_category_config_uri=label_category_config_uri,\n job_params=job_params,\n output_path=output_path,\n )\n\n sagemaker.create_labeling_job(**labeling_job_request)\n s3_output_path = f\"{output_path}/{job_name}/manifests/output/output.manifest\"\n\n db.insert_job_level_metadata(\n parent_batch_id=parent_batch_id,\n batch_id=batch_id,\n batch_status=BatchStatus.WAIT_FOR_SMGT_RESPONSE,\n labeling_job_name=job_name,\n label_attribute_name=labeling_job_request[\"LabelAttributeName\"],\n label_category_s3_uri=labeling_job_request[\"LabelCategoryConfigS3Uri\"],\n job_input_s3_uri=labeling_job_request[\"InputConfig\"][\"DataSource\"][\"S3DataSource\"][\n \"ManifestS3Uri\"\n ],\n job_output_s3_uri=s3_output_path,\n )", "def create_batch(client, generator: DataGenerator):\n try:\n event_data_batch = client.create_batch()\n for device in generator.devices:\n # event_data_batch.add(EventData(gen.generate_payload(device)))\n event_data_batch.add(EventData(generator.generate_payload(device)))\n return event_data_batch\n except Exception as e:\n print(str(e))", "def get_batch_list_from_batch_request(\n self, batch_request: Union[BatchRequest, RuntimeBatchRequest]\n ) -> List[Batch]:\n self._validate_batch_request(batch_request=batch_request)\n\n data_connector: DataConnector = self.data_connectors[\n batch_request.data_connector_name\n ]\n\n batch_definition_list: List[\n BatchDefinition\n ] = data_connector.get_batch_definition_list_from_batch_request(\n batch_request=batch_request\n )\n\n if isinstance(batch_request, RuntimeBatchRequest):\n # This is a runtime batch_request\n\n if len(batch_definition_list) != 1:\n raise ValueError(\n \"RuntimeBatchRequests must specify exactly one corresponding BatchDefinition\"\n )\n\n batch_definition = batch_definition_list[0]\n runtime_parameters = batch_request.runtime_parameters\n\n # noinspection PyArgumentList\n (\n batch_data,\n batch_spec,\n batch_markers,\n ) = data_connector.get_batch_data_and_metadata( # type: ignore[call-arg]\n batch_definition=batch_definition,\n runtime_parameters=runtime_parameters,\n )\n\n new_batch = Batch(\n data=batch_data,\n batch_request=batch_request,\n batch_definition=batch_definition,\n batch_spec=batch_spec,\n batch_markers=batch_markers,\n )\n\n return [new_batch]\n else:\n batches: List[Batch] = []\n for batch_definition in batch_definition_list:\n batch_definition.batch_spec_passthrough = (\n batch_request.batch_spec_passthrough\n )\n batch_data: Any # type: ignore[no-redef]\n batch_spec: PathBatchSpec # type: ignore[no-redef]\n batch_markers: BatchMarkers # type: ignore[no-redef]\n (\n batch_data,\n batch_spec,\n batch_markers,\n ) = data_connector.get_batch_data_and_metadata(\n batch_definition=batch_definition\n )\n new_batch = Batch(\n data=batch_data,\n batch_request=batch_request,\n batch_definition=batch_definition,\n batch_spec=batch_spec,\n batch_markers=batch_markers,\n )\n batches.append(new_batch)\n return batches", "def batch_info():\n return BatchInfo(\"UFG Hackathon\")", "def do_create(self, arg):\n if not arg:\n print(\"** class name missing **\")\n return\n if arg not in HBNBCommand.class_list:\n print(\"** class doesn't exist **\")\n return\n obj = eval(arg + \"()\")\n obj.save()\n print(obj.id)", "def create_batches(self, X, y, batch_size, step):\n index_start = batch_size * step\n index_end = index_start + batch_size\n\n if index_end < X.shape[0]:\n batch_indices = np.arange(index_start, index_end)\n X_batch = X[batch_indices]\n y_batch = y[batch_indices]\n else:\n batch_indices = np.arange(index_start, X.shape[0])\n X_batch = X[batch_indices]\n y_batch = y[batch_indices]\n\n return X_batch, y_batch", "def post(self):\n try:\n body = json.loads(request.data.decode(\"utf-8\"))\n trainer = TrainerService.get_trainer_by_id(body[\"trainerId\"])\n trainer.role = body[\"trainerRole\"]\n register = TrainerService.assign_trainer_to_batch(\n trainer, body[\"batchId\"])\n return {\"result\": True}, 201\n except ValueError:\n return INVALID_ID_ERROR, 400\n except (KeyError, TypeError):\n return \"Invalid JSON body\", 400\n except ResourceNotFound as r:\n return r.message, 404", "def closed_job(created_job, bulk_request):\n bulk_request.side_effect = [\n '''<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n <batchInfo xmlns=\"http://www.force.com/2009/06/asyncapi/dataload\">\n <id>BATCHONE</id>\n </batchInfo>\n ''',\n '''<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n <batchInfo xmlns=\"http://www.force.com/2009/06/asyncapi/dataload\">\n <id>BATCHTWO</id>\n </batchInfo>\n ''',\n '''<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n <batchInfo xmlns=\"http://www.force.com/2009/06/asyncapi/dataload\">\n </batchInfo>\n ''',\n ]\n created_job.add_batch(['Id', 'Description'], [[1, 2], [3, 4]])\n created_job.add_batch(['Id', 'Description'], [[5, 6], [7, 8]])\n created_job.close()\n return created_job", "def rebatch(pad_idx, batch):\n return Batch(batch.src, batch.trg, pad_idx)", "def pack_batch(self, batch, device=None):\n return pack_batch(self.label_encoder, batch, device or self.device)", "def _send_batch(self, base_url, endpoint, batch, dataset_id=None, dataset_version=None, retries=0):\n try:\n params = {'data': base64.b64encode(json.dumps(batch).encode()).decode()}\n if dataset_id:\n params['dataset_id'] = dataset_id\n params['token'] = self.token\n if dataset_version:\n params['dataset_version'] = dataset_version\n response = self.request(base_url, [endpoint], params, 'POST')\n msg = \"Sent \" + str(len(batch)) + \" items on \" + time.strftime(\"%Y-%m-%d %H:%M:%S\") + \"!\"\n Mixpanel.LOGGER.debug(msg)\n return response\n except BaseException as be:\n Mixpanel.LOGGER.debug('Exception in _send_batch')\n Mixpanel.LOGGER.debug(be)\n Mixpanel.LOGGER.warning(\"Failed to import batch, dumping to file import_backup.txt\")\n with open('import_backup.txt', 'a+') as backup:\n json.dump(batch, backup)\n backup.write('\\n')", "def _create_batch_list(self, transactions):\r\n transaction_signatures = [t.header_signature for t in transactions]\r\n\r\n header = BatchHeader(\r\n signer_public_key=self._signer.get_public_key().as_hex(),\r\n transaction_ids=transaction_signatures\r\n ).SerializeToString()\r\n\r\n signature = self._signer.sign(header)\r\n\r\n batch = Batch(\r\n header=header,\r\n transactions=transactions,\r\n header_signature=signature)\r\n\r\n return BatchList(batches=[batch])", "def add_batch(self, batch):\n x, e, f = batch\n\n if self.is_empty:\n self.x = x\n self.e = e\n self.f = f\n\n else:\n self.x = ch.cat([self.x, x], dim=0)\n self.e = ch.cat([self.e, e], dim=0)\n self.f = ch.cat([self.f, f], dim=0)", "async def submit_batch(\n self,\n content_length: int,\n body: IO,\n timeout: Optional[int] = None,\n request_id_parameter: Optional[str] = None,\n **kwargs: Any\n ) -> AsyncIterator[bytes]:\n error_map = {\n 401: ClientAuthenticationError,\n 404: ResourceNotFoundError,\n 409: ResourceExistsError,\n 304: ResourceNotModifiedError,\n }\n error_map.update(kwargs.pop(\"error_map\", {}) or {})\n\n _headers = case_insensitive_dict(kwargs.pop(\"headers\", {}) or {})\n _params = case_insensitive_dict(kwargs.pop(\"params\", {}) or {})\n\n restype: Literal[\"container\"] = kwargs.pop(\"restype\", _params.pop(\"restype\", \"container\"))\n comp: Literal[\"batch\"] = kwargs.pop(\"comp\", _params.pop(\"comp\", \"batch\"))\n multipart_content_type: str = kwargs.pop(\n \"multipart_content_type\", _headers.pop(\"Content-Type\", \"application/xml\")\n )\n cls: ClsType[AsyncIterator[bytes]] = kwargs.pop(\"cls\", None)\n\n _content = body\n\n request = build_submit_batch_request(\n url=self._config.url,\n content_length=content_length,\n timeout=timeout,\n request_id_parameter=request_id_parameter,\n restype=restype,\n comp=comp,\n multipart_content_type=multipart_content_type,\n version=self._config.version,\n content=_content,\n template_url=self.submit_batch.metadata[\"url\"],\n headers=_headers,\n params=_params,\n )\n request = _convert_request(request)\n request.url = self._client.format_url(request.url)\n\n _stream = True\n pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access\n request, stream=_stream, **kwargs\n )\n\n response = pipeline_response.http_response\n\n if response.status_code not in [202]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)\n raise HttpResponseError(response=response, model=error)\n\n response_headers = {}\n response_headers[\"Content-Type\"] = self._deserialize(\"str\", response.headers.get(\"Content-Type\"))\n response_headers[\"x-ms-request-id\"] = self._deserialize(\"str\", response.headers.get(\"x-ms-request-id\"))\n response_headers[\"x-ms-version\"] = self._deserialize(\"str\", response.headers.get(\"x-ms-version\"))\n\n deserialized = response.stream_download(self._client._pipeline)\n\n if cls:\n return cls(pipeline_response, deserialized, response_headers) # type: ignore\n\n return deserialized # type: ignore", "def run(self, batch):\n response = self.post(batch)\n log.info(\"< Discarding batch response\")\n response.close()", "def _create_batch_file_handle_copy_request(\n file_handle_ids, obj_types, obj_ids, new_con_types, new_file_names\n):\n copy_file_handle_request = {\"copyRequests\": []}\n for (\n file_handle_id,\n obj_type,\n obj_id,\n new_con_type,\n new_file_name,\n ) in itertools.zip_longest(\n file_handle_ids, obj_types, obj_ids, new_con_types, new_file_names\n ):\n # construct JSON object for REST call\n curr_dict = {\n \"originalFile\": {\n \"fileHandleId\": file_handle_id,\n \"associateObjectId\": obj_id,\n \"associateObjectType\": obj_type,\n },\n \"newContentType\": new_con_type,\n \"newFileName\": new_file_name,\n }\n\n # add copy request to list of requests\n copy_file_handle_request[\"copyRequests\"].append(curr_dict)\n return copy_file_handle_request", "def create_node_batch(\n self,\n request: dds_20151201_models.CreateNodeBatchRequest,\n ) -> dds_20151201_models.CreateNodeBatchResponse:\n runtime = util_models.RuntimeOptions()\n return self.create_node_batch_with_options(request, runtime)", "def create(self):\n datagrid_json = self.__as_json()\n response = GsSession.current._post(f'{API}', datagrid_json, request_headers=DATAGRID_HEADERS)\n self.id_ = response['id']\n return response['id']", "def add_batch_job(self, name, job, cleanup=None, min_runtime=0):\n if name in self.jobs or name in self.batch_jobs:\n raise ValueError(\"job {} already exists\".format(name))\n self.batch_jobs[name]=(job,cleanup,min_runtime)", "def make_batch(filenames, batch_size):\n # Repeat infinitely.\n dataset = tf.data.TFRecordDataset(filenames).repeat()\n\n # Parse records.\n dataset = dataset.map(single_example_parser, num_parallel_calls=1)\n\n # Batch it up.\n dataset = dataset.batch(batch_size, drop_remainder=True)\n iterator = dataset.make_one_shot_iterator()\n\n image_batch, label_batch = iterator.get_next()\n return image_batch, label_batch", "def batch(self, batch_size=None):\n if self.gen_batches:\n assert batch_size is None, 'Cannot enforce a batch size if `func()` returns batches!'\n batch = self._queue.dequeue()\n for name, pl in self.placeholders.items():\n shape = pl.get_shape()\n if shape.ndims is not None:\n batch[name].set_shape(shape.as_list())\n\n else:\n batch = self._queue.dequeue_many(batch_size)\n\n return Struct.make(batch)", "def GroundExcelAddId(builder, Id):\n return AddId(builder, Id)", "def post_bill(self, bill: Bill, bill_lineitems: List[BillLineitem]):\n bills_payload = self.__construct_bill(bill, bill_lineitems)\n created_bill = self.connection.bills.post(bills_payload)\n return created_bill", "def _get_input_batch(\n factory: Optional[TriplesFactory] = None,\n # exactly one of them is None\n head: Union[None, int, str] = None,\n relation: Union[None, int, str] = None,\n tail: Union[None, int, str] = None,\n) -> Tuple[Target, torch.LongTensor, Tuple[int, int]]:\n # create input batch\n batch_ids: List[int] = []\n target: Optional[Target] = None\n if head is None:\n target = LABEL_HEAD\n else:\n if not isinstance(head, int):\n if factory is None:\n raise ValueError(\"If head is not given as index, a triples factory must be passed.\")\n head = factory.entity_to_id[head]\n batch_ids.append(head)\n if relation is None:\n target = LABEL_RELATION\n else:\n if not isinstance(relation, int):\n if factory is None:\n raise ValueError(\"If relation is not given as index, a triples factory must be passed.\")\n relation = factory.relation_to_id[relation]\n batch_ids.append(relation)\n if tail is None:\n target = LABEL_TAIL\n else:\n if not isinstance(tail, int):\n if factory is None:\n raise ValueError(\"If tail is not given as index, a triples factory must be passed.\")\n tail = factory.entity_to_id[tail]\n batch_ids.append(tail)\n if target is None or len(batch_ids) != 2:\n raise ValueError(\n f\"Exactly one of {{head, relation, tail}} must be None, but got {head}, {relation}, {tail}\",\n )\n\n batch = cast(torch.LongTensor, torch.as_tensor([batch_ids], dtype=torch.long))\n return target, batch, (batch_ids[0], batch_ids[1])", "def _get_batch(self):\n url = self._base_url + urlConfig.URLS['Project'] + '/' + self._project_id + '/batch'\n response = apiCall.get(self._get_token(), url,self._proxy, {}, 10)\n logging.debug(response)\n return response", "def batch_creator(batch_size, dataset_length, dataset_name):\n # batch_size = 128\n # dataset_length = 6000\n batch_mask = rng.choice(dataset_length, batch_size)\n\n batch_x = eval('x_' + dataset_name)[[batch_mask]].reshape(-1, input_num_units)\n batch_x = preproc(batch_x)\n\n if dataset_name == 'train':\n batch_y = eval('y_' + dataset_name)[[batch_mask]]\n batch_y = dense_to_one_hot(batch_y)\n\n return batch_x, batch_y", "def batched_drawing(self, batch):\n Label(self.text, font_name=self.font_name, font_size=self.font_size,\n x=self.pos.x, y=self.pos.y, anchor_x=self.ANCHOR_X, anchor_y=self.ANCHOR_Y,\n batch=batch)" ]
[ "0.69767964", "0.69643956", "0.6652114", "0.6631825", "0.61280215", "0.6110446", "0.60883904", "0.60016143", "0.59871805", "0.5886255", "0.5873244", "0.58249927", "0.5809669", "0.57384014", "0.5699515", "0.56257725", "0.56246674", "0.5616465", "0.551616", "0.53761625", "0.537489", "0.5363888", "0.53520834", "0.5332634", "0.53145397", "0.53142214", "0.53026015", "0.5297402", "0.52970856", "0.5290663", "0.5290247", "0.5272419", "0.5206693", "0.52052855", "0.51627433", "0.51509553", "0.51485455", "0.5139469", "0.51377463", "0.51345426", "0.5099114", "0.50856704", "0.5085249", "0.50810266", "0.5056461", "0.50377053", "0.5036115", "0.5027716", "0.50273013", "0.5025793", "0.50202835", "0.49959224", "0.49898556", "0.49824625", "0.4959361", "0.49548665", "0.49535763", "0.49509448", "0.49504757", "0.49488342", "0.4942484", "0.49416688", "0.4932001", "0.49286884", "0.49240318", "0.49161962", "0.49021393", "0.48927653", "0.4880752", "0.48719633", "0.4865065", "0.4861297", "0.48563942", "0.4852083", "0.4851219", "0.48490942", "0.48469177", "0.4843677", "0.4841361", "0.48389778", "0.48187593", "0.48139596", "0.4794263", "0.47907913", "0.47895193", "0.47781748", "0.47777453", "0.4777548", "0.47764626", "0.47725424", "0.4766648", "0.4764181", "0.4754354", "0.47501186", "0.47479397", "0.47449175", "0.47416848", "0.47370368", "0.47351313", "0.47322702" ]
0.7599396
0
Create a new input resource file object representing a single file.
def read_input(self, path: str) -> _resource.InputResourceFile: irf = self._new_input_resource_file(path) return irf
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_file_object(inputfile=None):\n if type(inputfile) == str:\n return open(inputfile, 'r')\n return inputfile", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n content_type: Optional[pulumi.Input[Union[str, 'FileImportContentType']]] = None,\n file_import_id: Optional[pulumi.Input[str]] = None,\n import_file: Optional[pulumi.Input[pulumi.InputType['FileMetadataArgs']]] = None,\n ingestion_mode: Optional[pulumi.Input[Union[str, 'IngestionMode']]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n source: Optional[pulumi.Input[str]] = None,\n workspace_name: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def from_file(cls, path_src):\n cp_cond = [os.path.exists(path_src), os.path.isfile(path_src),\n len(path_new) != 0]\n content = \"\"\n\n # read input from file\n if cp_cond[0] and cp_cond[1]:\n with open(path_src) as f:\n content = f.read()\n\n # connect object with file content\n return cls(path_src, inp_string=content, to_file=False)", "def __init__(__self__,\n resource_name: str,\n args: FileImportArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def file(self):\n file = BytesIO(b\"the_file\")\n file.name = \"the file name\"\n return File(file)", "def file(self):\n file = BytesIO(b\"the_file\")\n file.name = self.name\n return File(file)", "def __init__(__self__, *,\n content_type: pulumi.Input[Union[str, 'FileImportContentType']],\n import_file: pulumi.Input['FileMetadataArgs'],\n ingestion_mode: pulumi.Input[Union[str, 'IngestionMode']],\n resource_group_name: pulumi.Input[str],\n source: pulumi.Input[str],\n workspace_name: pulumi.Input[str],\n file_import_id: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"content_type\", content_type)\n pulumi.set(__self__, \"import_file\", import_file)\n pulumi.set(__self__, \"ingestion_mode\", ingestion_mode)\n pulumi.set(__self__, \"resource_group_name\", resource_group_name)\n pulumi.set(__self__, \"source\", source)\n pulumi.set(__self__, \"workspace_name\", workspace_name)\n if file_import_id is not None:\n pulumi.set(__self__, \"file_import_id\", file_import_id)", "def input_file(ddir, file_prefix):\n name = autofile.name.input_file(file_prefix)\n return factory.DataFile(ddir=ddir, name=name)", "def make_instance(self, include_optional):\n # model = yapily.models.resource.Resource() # noqa: E501\n if include_optional :\n return Resource(\n description = '0', \n file = yapily.models.file.File(\n absolute = True, \n absolute_file = yapily.models.file.File(\n absolute = True, \n absolute_path = '0', \n canonical_file = yapily.models.file.File(\n absolute = True, \n absolute_path = '0', \n canonical_path = '0', \n directory = True, \n file = True, \n free_space = 56, \n hidden = True, \n name = '0', \n parent = '0', \n parent_file = yapily.models.file.File(\n absolute = True, \n absolute_path = '0', \n canonical_path = '0', \n directory = True, \n file = True, \n free_space = 56, \n hidden = True, \n name = '0', \n parent = '0', \n path = '0', \n total_space = 56, \n usable_space = 56, ), \n path = '0', \n total_space = 56, \n usable_space = 56, ), \n canonical_path = '0', \n directory = True, \n file = True, \n free_space = 56, \n hidden = True, \n name = '0', \n parent = '0', \n parent_file = yapily.models.file.File(\n absolute = True, \n absolute_path = '0', \n canonical_path = '0', \n directory = True, \n file = True, \n free_space = 56, \n hidden = True, \n name = '0', \n parent = '0', \n path = '0', \n total_space = 56, \n usable_space = 56, ), \n path = '0', \n total_space = 56, \n usable_space = 56, ), \n absolute_path = '0', \n canonical_file = yapily.models.file.File(\n absolute = True, \n absolute_path = '0', \n canonical_path = '0', \n directory = True, \n file = True, \n free_space = 56, \n hidden = True, \n name = '0', \n parent = '0', \n path = '0', \n total_space = 56, \n usable_space = 56, ), \n canonical_path = '0', \n directory = True, \n file = True, \n free_space = 56, \n hidden = True, \n name = '0', \n parent = '0', \n parent_file = yapily.models.file.File(\n absolute = True, \n absolute_path = '0', \n canonical_path = '0', \n directory = True, \n file = True, \n free_space = 56, \n hidden = True, \n name = '0', \n parent = '0', \n path = '0', \n total_space = 56, \n usable_space = 56, ), \n path = '0', \n total_space = 56, \n usable_space = 56, ), \n filename = '0', \n input_stream = None, \n open = True, \n readable = True, \n uri = yapily.models.uri.URI(\n absolute = True, \n authority = '0', \n fragment = '0', \n host = '0', \n opaque = True, \n path = '0', \n port = 56, \n query = '0', \n raw_authority = '0', \n raw_fragment = '0', \n raw_path = '0', \n raw_query = '0', \n raw_scheme_specific_part = '0', \n raw_user_info = '0', \n scheme = '0', \n scheme_specific_part = '0', \n user_info = '0', ), \n url = yapily.models.url.URL(\n authority = '0', \n content = yapily.models.content.content(), \n default_port = 56, \n file = '0', \n host = '0', \n path = '0', \n port = 56, \n protocol = '0', \n query = '0', \n ref = '0', \n user_info = '0', )\n )\n else :\n return Resource(\n )", "def create_initialised_input(self):\n input_creator = InputCreator(self.times, self.inputs,\n params=self.params, outputs=self.outputs)\n f_out = input_creator.initialised_creation(self.burn_in)\n\n if self.debug:\n print(f_out.getvalue(), file=sys.stderr)\n f_out.seek(0)\n\n self.input_file = f_out.getvalue()\n pprint.pprint(self.input_file)\n return self.input_file", "def __init__(self, input_filename='input.txt', output_filename='output.txt'):\n self._input = input_filename\n self._output = output_filename\n self._fin = open(self._input, 'r')\n self._fout = open(self._output, 'w')", "def _get_file_object(infilename):\n\n _, extension = os.path.splitext(infilename)\n if extension.lower() == '.spe':\n return parsers.SpeFile(infilename)\n elif extension.lower() == '.spc':\n return parsers.SpcFile(infilename)\n elif extension.lower() == '.cnf':\n return parsers.CnfFile(infilename)\n else:\n raise NotImplementedError(\n 'File type {} can not be read'.format(extension))", "def makeFileResource(path):\n\n path = unwrapStr(path)\n segments = [segment.encode(\"utf-8\") for segment in path.split(u'/')]\n if not path.startswith(u'/'):\n # Relative path.\n segments = os.getcwd().split('/') + segments\n log.log([\"fs\"], u\"makeFileResource.run/1: Relative path '%s'\" % path)\n return FileResource(segments)", "def __init__(self, file: IO, mode: str = \"single\"):\n self.file = file\n super().__init__(mode=mode)", "def getResource(self, file_name):\n path = os.path.join(os.path.dirname(__file__), \"resource\", file_name)\n return open(path)", "def create_from_file(cls, path):\n\n with open(path, 'r') as file:\n # Possible FileNotFound.\n text = file.read()\n return cls.create_from_string(text)", "def inputfile(self, path):\n if isinstance(path, Path):\n path = str(path)\n if self.default_remote_provider is not None:\n path = self.modifier.modify_path(path)\n return IOFile(path)", "def __init__(self, owner, resourceFile):\n log.debug(u\"init resourceFile=%s\" % resourceFile)\n self._storageName = self._fn2ascii(resourceFile)\n self._userName = resourceFile.encode('utf-8')\n self._originalFile = resourceFile\n try:\n self.checksum = resourceFile.md5\n from exe.engine.idevice import Idevice\n if isinstance(owner, Idevice):\n self._idevice = owner\n if owner.parentNode:\n self.package = owner.parentNode.package\n else:\n self.package = None\n else:\n self._idevice = None\n self.package = owner\n finally:\n del self._originalFile", "def create(self):\n self.file = open(self.filename, \"xb\", buffering=self.bufferSize)", "def __new__(cls, name, bases, attrs):\n new_class = super(DasFileMetaclass, cls).__new__(cls, name, bases,\n attrs)\n opts = getattr(new_class, 'Meta', None)\n new_class._meta = DasResourceOptions(opts)\n # Note that ResourceOptions and DasResourceOptions both get called.\n filename = getattr(new_class._meta, \"filename\")\n filetype = getattr(new_class._meta, \"filetype\", None)\n\n if not filetype or filetype == '' and name != 'DasResource':\n global FILETYPES\n try:\n extension = filename.split(\".\")[1]\n if extension in FILETYPES:\n filetype = extension\n setattr(new_class._meta, \"filetype\", filetype)\n else:\n raise KeyError(\"Bleg No extension of filename found\")\n\n except IndexError:\n raise KeyError(\"No extension of filename found\")\n else:\n # Check if it is a valid filetype\n pass\n return new_class", "def __init__(self, file: str):\n self._file = file", "def __init__(self, file_name: Optional[str] = None):\n self.entries = OrderedDict() # Dict[str, PathElement]\n self.file_name = file_name # input file for logging\n self.jsonf = None # json image of input file\n self.namespaces = Namespaces('http://hl7.org/fhir/StructureDefinition/')\n self.path_map = {} # map from path to name (Dict[str, str])", "def __init__(self, input_file=None, output_file=None, gt_file=None):\n\n self.input_file = input_file\n self.output_file = output_file\n self.gt_file = gt_file\n\n #print('Created object: ', musicxml_file)", "def from_file(cls, f, **kwargs):\n if isinstance(f, string_types):\n with open(f, 'rb') as f:\n return cls(value=f.read(), **kwargs)\n else:\n if 'format' not in kwargs:\n ext = os.path.splitext(f)[1]\n if ext:\n kwargs['format'] = ext[1:] # remove the .\n return cls(value=f.read(), **kwargs)", "def _constructInstance(self, container, id, *args, **kw):\n file, title = None, ''\n id = container.manage_addProduct['OFSP'].manage_addFile(id, file, title)\n return container.get(id, None)", "def create_default_input(self):\n input_creator = InputCreator(self.times, self.inputs)\n self.input_file = input_creator.default_creation().getvalue()\n\n return self.input_file", "def from_file(cls, slots, fileobj, offset = 0):\n return cls.from_fileno(slots, fileobj.fileno(), offset)", "def __init__(self, filename):\n self.filename = filename\n self.file = open(filename, \"w\")", "def test_single_file_resource(self):\n year = random.randint(2001, 2020)\n name = \"eia923-%d.zip\" % year\n size = random.randint(500000, 800000)\n\n md5_hash = random.choice([\n \"4bd7e1025c91c00b50b6cef87cb9bfad\",\n \"883895453cb3144b97d0095472f6136e\",\n \"c271dfc0ca452b6582f0e592f57351ef\"])\n\n url = \"https://zenodo.org/api/deposit/depositions/%d/files/%s\" % (\n random.randint(10000, 99999), uuid.uuid4())\n\n fake_resource = {\n \"filename\": name,\n \"links\": {\"download\": url},\n \"filesize\": size,\n \"checksum\": md5_hash\n }\n\n package = eia923_raw.datapackager([fake_resource])\n res = package[\"resources\"][0]\n\n assert(res[\"name\"] == name)\n assert(res[\"title\"] == \"eia923-%d\" % year)\n assert(res[\"path\"] == url)\n assert(res[\"parts\"][\"year\"] == year)\n assert(res[\"remote_url\"] == url)\n\n assert(res[\"mediatype\"] == \"application/zip\")\n assert(res[\"format\"] == \"zip\")\n\n assert(res[\"bytes\"] == size)\n assert(res[\"hash\"] == md5_hash)", "def from_file(self, path, **kwargs):\n\t\twith codecs.open(path, 'r', encoding='utf-8') as file_h:\n\t\t\tsource = file_h.read()\n\t\treturn self.from_string(source, **kwargs)", "def file_factory(test_workspace):\n\n return FileCreator(test_workspace)", "def __enter__(self):\n self.fileobject = open(self.filename, 'r')\n return self", "def __init__(self, input_file):\n self.file_name = input_file\n # Import the excel file:\n self.xlfile = ExcelFile(self.file_name) # to retrieve & work w/ input", "def file(self, path: str) -> File:\n _args = [\n Arg(\"path\", path),\n ]\n _ctx = self._select(\"file\", _args)\n return File(_ctx)", "def file(self, path: str) -> \"File\":\n _args = [\n Arg(\"path\", path),\n ]\n _ctx = self._select(\"file\", _args)\n return File(_ctx)", "def file(self, path: str) -> \"File\":\n _args = [\n Arg(\"path\", path),\n ]\n _ctx = self._select(\"file\", _args)\n return File(_ctx)", "def create_input_file(self, polymer_identifier, format, outpath):\n\n\t\tsmiles = self.get_smiles_from_identifier(polymer_identifier)\n\t\t\n\t\tresult = generate_input_files(smiles, format)\n\t\twith open(outpath, 'w+') as f:\n\t\t\tf.write(result)", "def __init__(self, filename, binary_file=None):\n BaseRawIO.__init__(self)\n self.filename = filename\n self.binary_file = binary_file", "def Input(day):\n filename = 'input/input{}.txt'.format(day)\n try:\n return open(filename)\n except FileNotFoundError:\n raise AssertionError(\"Input file not found.\")", "def __init__(self, file):\n self._file = file", "def __init__(self,file_reader):\n self.file_reader = file_reader", "def make_input(args, stdin=sys.stdin, url_or_filename=None):\n\n if url_or_filename is None:\n url_or_filename = args.infile\n\n # JSONPath selector\n selector = args.selector\n\n return hxl.input.make_input(\n url_or_filename or stdin,\n make_input_options(args),\n )", "def _get_file_data_object(self, file_path, spec_header):\n logger.DLOG(\"Creating FVaRFileData object from file '%s'\", file_path)\n return acm.Risk().CreateScenarioFileData(file_path,\n spec_header.DelimiterChar(), spec_header.CommentChar())", "def __init__(self, filename, validate=True):\n pass", "def get_file(self, filename):\r\n\r\n return File.from_name(self, filename)", "def __init__(self,file):\n self.file = file", "def from_file(cls, file_obj):\n kdm = cls(file_obj.read())\n return kdm", "def from_filename(cls, filename, query_sacl=False, flags=DEFAULT_SECURITY_INFORMATION):\n return cls._from_name_and_type(filename, gdef.SE_FILE_OBJECT, flags=flags, query_sacl=query_sacl)", "def getFile(self, resource):\n resource = self.parseUrl(resource, 'files')\n\n res = self.getRequest(resource)\n fObj = vsdModels.File(**res)\n return fObj", "def from_file(cls, path):\n raise NotImplementedError", "def open_resource(self, resource):\n # type: (Text) -> BinaryIO\n # This deliberately raises FileNotFoundError instead of\n # NotImplementedError so that if this method is accidentally called,\n # it'll still do the right thing.\n raise FileNotFoundError", "def node_file_create(ctx, filename, content, from_stdin):\n if from_stdin:\n content = click.get_text_stream('stdin').read(8196)\n\n try:\n ctx.obj['node'].create_file(filename, content=content)\n except TimeoutError as e:\n logger.error('Error: %s' % e)\n exit(1)", "def __init__(self, path_to_the_file):", "def __init__(self, path_to_the_file):", "def __init__(self, file, name):\n if isinstance(file, ContentFile):\n image_data = file\n else:\n image_data = ContentFile(file.read(), name=name)\n self.image_data = image_data\n file.close()", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'FileImport':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = FileImportArgs.__new__(FileImportArgs)\n\n __props__.__dict__[\"content_type\"] = None\n __props__.__dict__[\"created_time_utc\"] = None\n __props__.__dict__[\"error_file\"] = None\n __props__.__dict__[\"errors_preview\"] = None\n __props__.__dict__[\"files_valid_until_time_utc\"] = None\n __props__.__dict__[\"import_file\"] = None\n __props__.__dict__[\"import_valid_until_time_utc\"] = None\n __props__.__dict__[\"ingested_record_count\"] = None\n __props__.__dict__[\"ingestion_mode\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"source\"] = None\n __props__.__dict__[\"state\"] = None\n __props__.__dict__[\"system_data\"] = None\n __props__.__dict__[\"total_record_count\"] = None\n __props__.__dict__[\"type\"] = None\n __props__.__dict__[\"valid_record_count\"] = None\n return FileImport(resource_name, opts=opts, __props__=__props__)", "def __init__(self, \n save_data_folder: str,\n reader:FileReader = None,\n input_file:str = None,\n *args, **kwargs):\n \n if reader:\n self.files, self.attr_names = reader.read_file(input_file, *args, **kwargs)\n \n self.save_data_folder = Path(save_data_folder)\n self.save_data_folder.mkdir(parents=True, exist_ok=True)\n BaseProcess.set_logger('generator.log')", "def open_from(self, f: BinaryIO):\n raise NotImplementedError", "def __init__(self, filename=None, **kwargs):\n self.filename = filename\n if filename:\n self.read(**kwargs)", "def __init__(self, filename, offset):\r\n self.__input__ = open(filename, 'rb')\r\n self.__input__.seek(offset, FROM_START)", "def createInputFile(self):\r\n\r\n input_variables = []\r\n\r\n for variable in self.modelDescription.modelVariables:\r\n if variable.causality == 'input':\r\n input_variables.append(variable)\r\n\r\n if len(input_variables) == 0:\r\n QMessageBox.warning(self,\r\n \"Cannot create input file\",\r\n \"The input file cannot be created because the model has no input variables\")\r\n return\r\n\r\n filename, _ = os.path.splitext(self.filename)\r\n\r\n filename, _ = QFileDialog.getSaveFileName(parent=self,\r\n caption=\"Save Input File\",\r\n directory=filename + '_in.csv',\r\n filter=\"Comma Separated Values (*.csv);;All Files (*.*)\")\r\n\r\n if not filename:\r\n return\r\n\r\n with open(filename, 'w') as f:\r\n\r\n # column names\r\n f.write('\"time\"')\r\n for variable in input_variables:\r\n f.write(',\"%s\"' % variable.name)\r\n f.write('\\n')\r\n\r\n # example data\r\n f.write(','.join(['0'] * (len(input_variables) + 1)) + '\\n')\r\n\r\n self.ui.inputFilenameLineEdit.setText(filename)", "def __init__(self, filename_or_data, mode='r'):\n\n if mode[0] in ['r', 'a', 'w']:\n if mode == 'r':\n # force universal read mode\n mode = 'rU'\n self.__fobj = open(filename_or_data, mode)\n elif mode == 'f':\n self.__fobj = filename_or_data\n elif mode == 's':\n self.__fobj = StringIO.StringIO(filename_or_data)\n else:\n msg = \"mode string must start with 'r', 'a', 'w', 'f' or 's', \\\n not '%s'\" % mode[0]\n raise ValueError(msg)\n self.__mode = mode", "def __init__(self, resource):\n if resource:\n self.__resource = str(os.path.realpath(str(resource)));\n else:\n self.__resource = '';", "def from_file(\n filename: str, *, formatter: Optional[ModelFormatter] = None\n ) -> \"Model\":\n # change the cwd to the the directory containing the file\n filename = os.path.abspath(filename)\n cwd = os.getcwd()\n dir, _ = os.path.split(filename)\n os.chdir(dir)\n\n # parse the file\n with open(filename, \"r\") as file:\n component = Model.from_string(file.read(), formatter=formatter)\n file.close()\n\n # restore the cwd\n os.chdir(cwd)\n\n return component", "def test_adaptToResource(self):\n res = IResource(self.testObject)\n self.assertIsInstance(res, File)\n self.assertEquals(res.fp, self.testObject.content)\n self.assertEquals(res.type, 'application/octet-stream')\n self.assertEquals(res.encoding, None)", "def __init__(self, file_path: str):\n\n super().__init__(file_path)\n self.reader = None", "def _read_input_file(self):\n pass", "def __init__(self, file_name):\n self.file_name = file_name", "def __init__(self, file=None, data=None, inputastext=None,\n outputastext=None):\n self.file, self.data = file, data\n self.inputastext, self.outputastext = inputastext, outputastext", "def fromFile(cls, filepath):\r\n return cls(values=foamFileFromFile(filepath, cls.__name__))", "def create_object_from_file(self, bucket_name, object_name, filename):\n\n return h3lib.create_object_from_file(self._handle, bucket_name, object_name, filename, self._user_id)", "def openInputFile(infile, *args):\n if infile is None:\n logging.info(\"Reading input from STDIN\")\n return sys.stdin\n\n if isinstance(infile, str):\n if urlRE.match(infile):\n import urllib2\n return urllib2.urlopen(infile)\n if len(infile)>3 and infile[-3:]=='.gz':\n import gzip\n return gzip.GzipFile(infile,'rb')\n elif len(infile)>4 and infile[-4:]=='.bz2':\n import bz2\n return bz2.BZ2File(infile,'rb')\n else:\n return open(infile,'rt')\n else:\n return infile", "def __init__(self, filename):\r\n self.__output__ = open(format(filename, '08X') + '.gen', 'wb')", "def read(cls, filename):\n return cls(filename)", "def open(self):\n self.f = open(self.join(self.fname), 'rb')", "def __init__(self, filename):\r\n\r\n self.filename = filename", "def _ReadRecipeFromFileObject(\n self, file_object: Union[StringIO, TextIOWrapper, TextIO]) -> Recipe:\n json_dict = json.load(file_object)\n\n description = json_dict['description']\n del json_dict['description']\n\n args = []\n for arg_list in json_dict['args']:\n args.append(RecipeArgs(*arg_list))\n del json_dict['args']\n\n return resources.Recipe(description, json_dict, args)", "def new_file(self, *args, **kwargs):\n super().new_file(*args, **kwargs)\n self.file = TemporaryUploadedFile(self.file_name, self.content_type, 0, self.charset, self.content_type_extra)", "def __init__(self, filename):\n self.filename = filename", "def make_source(args, stdin=STDIN):\n\n infile = args.infile\n if infile is None:\n infile = stdin\n\n return hxl.input.data(infile, make_input_options(args))", "def _create_file_entry(self, path: str, file_id: str, comp_matrix: str or None,\n compensate: bool, catch_standardisation_errors: bool,\n control: bool = False) -> File or None:\n try:\n fcs = FCSFile(path, comp_matrix=comp_matrix)\n except ValueError as e:\n print(f'Unable to load data from {path}; encountered the following exception: {e}')\n return None\n new_file = File()\n new_file.file_id = file_id\n if compensate:\n fcs.compensate()\n new_file.compensated = True\n if control:\n new_file.file_type = 'control'\n data = fcs.dataframe\n column_mappings = self.panel.standardise(data, catch_standardisation_errors)\n if column_mappings is None:\n print(f'Error: invalid channel/marker mappings for {file_id}, at path {path}, aborting.')\n return None\n new_file.put(data.values)\n new_file.channel_mappings = [ChannelMap(channel=c, marker=m) for c, m in column_mappings]\n return new_file", "def create_resource(self, **kwargs):\n results = self.api.action.resource_create(**kwargs)\n # TODO: use `results` rather than re-download, using an isolation layer to standardize the re-structure\n self.get_ckan_metadata(True)\n if 'id' in results:\n self._import_resource_to_cache(kwargs['upload'], results['id'])\n return results", "def __init__(self, file_path: str, mode: str = \"single\"):\n self.file_path = file_path\n super().__init__(mode=mode)", "def __init__(self, path, input_type='f'):\n if input_type == 'f':\n file = open(path, 'r')\n elif input_type == 's':\n file = path\n else:\n raise exceptions.BadInputError(f\"invalid input type {input_type}\")\n\n pdl = yaml.safe_load(file)\n\n self.type_checks = {\n 'typedef': self.validate_typedef,\n 'component': self.validate_component,\n 'graph': self.validate_graph,\n }\n\n self.imports = []\n if 'import' in pdl:\n self.imports = pdl['import']\n\n self.namespace = pdl['name']\n self.body = pdl['body']\n self.typedefs = {}\n self.components = []\n self.graphs = []\n self.validate()", "def from_file(cls, filepath):\n fp = open(filepath, 'rb')\n return cls(fp)", "def test_custom_local_input_file() -> None:\n with tempfile.TemporaryDirectory() as tmpdirname:\n file_location = os.path.join(tmpdirname, \"foo.txt\")\n with open(file_location, \"wb\") as write_file:\n write_file.write(b\"foo\")\n\n # Confirm that the file initially exists\n assert os.path.exists(file_location)\n\n # Instantiate the input file\n absolute_file_location = os.path.abspath(file_location)\n input_file = PyArrowFileIO().new_input(location=f\"{absolute_file_location}\")\n\n # Test opening and reading the file\n f = input_file.open()\n data = f.read()\n assert data == b\"foo\"\n assert len(input_file) == 3", "def __init__(self, fileformat='POSCAR', filename=None, \\\n lattice=None, atom_type=None, composition=None, coordinate=None):\n if fileformat == 'POSCAR':\n self.from_POSCAR(filename)\n elif fileformat == 'cif':\n self.from_cif(filename)\n else:\n self.from_dict(lattice, atom_type, composition, coordinate)", "def open_input(name=None):\n return Input(name)", "def __init__(self, output_file):\n self.file = open(output_file, \"w\")", "def __init__(self, filename=None):\n self.content = dict()\n if filename and os.path.exists(filename):\n self.parse(filename)\n elif filename:\n self.new(filename)", "def __init__(self, file_pattern, validate=True, **nucleus_kwargs):\n\n super(ReadGenomicsFile, self).__init__()\n self._source = self._source_class(\n file_pattern, validate=validate, **nucleus_kwargs)", "def create_from_file(\n cls,\n model_file_path: str,\n index_file_path: Optional[str] = None) -> \"ImageSearcher\":\n options = ImageSearcherOptions(\n base_options=_BaseOptions(file_name=model_file_path),\n search_options=_SearchOptions(index_file_name=index_file_path))\n return cls.create_from_options(options)", "def fromFile(cls, filePath):\n with open(filePath, 'rb') as f:\n return cls(f.read())", "def file(self, id: FileID) -> File:\n _args = [\n Arg(\"id\", id),\n ]\n _ctx = self._select(\"file\", _args)\n return File(_ctx)", "def _create_from_template(self):\n template_file = self._helper._get_template_file_path()\n self._engine.open_file_by_path(template_file)\n self._save_current_as_new()", "def __init__(self, file_name=None, file_object=None, pdb_code=None):\n self.line_number = 0\n if file_name is not None:\n assert file_object is None\n assert pdb_code is None\n self.file_object = open(file_name)\n elif file_object is not None:\n assert pdb_code is None\n self.file_object = file_object\n elif pdb_code is not None:\n self.file_object = mmcif_files.getFile(pdb_code)\n else:\n raise ValueError(\"No input file given\")", "def __init__(self, file_format, location):\n Reader.__init__(self, file_format, location)", "def open_any(filename):\n if filename == '-':\n fh = sys.stdin\n elif filename[-3:] == '.gz':\n fh = GzipFile(filename, 'r')\n elif filename[-4:] == '.bz2':\n fh = BZ2File(filename, 'r')\n else:\n fh = open(filename, 'r')\n\n return fh", "def __init__(self, owner, resourceFile):\n self.checksum = Path(resourceFile).md5 # Just use the path name as a unique ID\n _Resource.__init__(self, owner, resourceFile)\n if self._idevice:\n self._idevice.userResources.append(self)", "def from_text_file(cls, filename):\n raise NotImplementedError()" ]
[ "0.6874475", "0.6812216", "0.65446967", "0.65262955", "0.6509462", "0.6438844", "0.6272451", "0.62469476", "0.61195093", "0.60929656", "0.6091407", "0.60815406", "0.60800767", "0.6025725", "0.5954721", "0.59426826", "0.59278876", "0.59035605", "0.5878973", "0.5866811", "0.58572775", "0.58512354", "0.5825164", "0.58176243", "0.58088", "0.5808197", "0.57923186", "0.5787447", "0.57869726", "0.5781285", "0.5761263", "0.5751185", "0.5747586", "0.57463723", "0.5740563", "0.5740563", "0.573237", "0.5727571", "0.5725648", "0.5721482", "0.57091165", "0.56717986", "0.56664795", "0.56600636", "0.5658057", "0.5652436", "0.56501937", "0.5635885", "0.56350607", "0.56263727", "0.5619574", "0.5611", "0.5609427", "0.5609427", "0.5608252", "0.5600325", "0.5593788", "0.5586023", "0.5585399", "0.5583643", "0.5567117", "0.5559535", "0.55568135", "0.55553913", "0.554939", "0.55454016", "0.55371374", "0.55370164", "0.5534543", "0.55343187", "0.55251145", "0.55245256", "0.5524043", "0.5497069", "0.5492893", "0.54911345", "0.54803735", "0.5477495", "0.5477434", "0.5473509", "0.54700845", "0.5457344", "0.5457255", "0.5454551", "0.544711", "0.5436324", "0.543568", "0.5433726", "0.54326874", "0.5428433", "0.5426726", "0.54254436", "0.5422964", "0.5422959", "0.5420098", "0.5418153", "0.5418118", "0.54154694", "0.5414523", "0.5407321" ]
0.70438915
0
Create a new resource group representing a mapping of identifier to input resource files.
def read_input_group(self, **kwargs: str) -> _resource.ResourceGroup: root = secret_alnum_string(5) new_resources = {name: self._new_input_resource_file(file, root) for name, file in kwargs.items()} rg = _resource.ResourceGroup(None, root, **new_resources) self._resource_map.update({rg._uid: rg}) return rg
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_create_resource_group(self):\n pass", "def load(self):\n self.suite.load()\n self.resource_map = {}\n dirlist = os.listdir(self.resources)\n for resource_name in (name for name in dirlist\n if os.path.isfile(os.path.join(self.resources,name)) and\n os.path.splitext(name)[1].lower() == '.fbr'):\n try:\n f = open(os.path.join(self.resources,resource_name),'rU')\n expr = f.read()\n d = eval(expr)\n resource_id = os.path.splitext(resource_name)[0].lower()\n d['id'] = resource_id\n kind = d['kind']\n del d['kind']\n self.resource_map[resource_id] = Resource.create(kind,**d)\n finally:\n f.close()", "def create(self):\n\n for key in self.dirs.keys():\n if type(self.dirs[key]) == dict:\n for dir_type in self.dirs[key].keys():\n create_if_not_exists(self.dirs[key][dir_type])\n else:\n create_if_not_exists(self.dirs[key])\n\n self.inputFileIds = {}\n for sample_name, sample_info in self.samples.items():\n if not sample_info['use_it']:\n continue\n\n process_name = sample_info[\"process_name_specific\"]\n is_mc = (sample_info[\"type\"] == \"mc\")\n\n if not is_mc:\n continue\n\n logging.info(\"Creating configuration files to run '%s' for sample %s\" % (self.executable, process_name))\n\n inputFileList = generateInputFileList(sample_info, self.max_files_per_job)\n key_dir = getKey(process_name)\n\n outputFile = os.path.join(\n self.dirs[key_dir][DKEY_HISTO], \"%s.root\" % process_name\n )\n self.outputFiles[process_name] = {\n 'inputFiles' : [],\n 'outputFile' : outputFile,\n }\n if os.path.isfile(outputFile) and tools_is_file_ok(outputFile, min_file_size = 2000):\n logging.info('File {} already exists --> skipping job'.format(outputFile))\n continue\n\n for jobId in inputFileList.keys():\n\n key_file = getKey(sample_name, jobId)\n\n self.inputFiles[key_file] = inputFileList[jobId]\n if len(self.inputFiles[key_file]) == 0:\n logging.warning(\n \"'%s' = %s --> skipping job !!\" % (key_file, self.inputFiles[key_file])\n )\n continue\n\n self.cfgFiles_projection[key_file] = os.path.join(\n self.dirs[key_dir][DKEY_CFGS], \"project_%s_%i_cfg.txt\" % (process_name, jobId)\n )\n self.outputFiles_tmp[key_file] = os.path.join(\n self.dirs[key_dir][DKEY_HISTO_TMP], \"histogram_%i.root\" % jobId\n )\n self.logFiles_projection[key_file] = os.path.join(\n self.dirs[key_dir][DKEY_LOGS], \"project_%s_%i.log\" % (process_name, jobId)\n )\n self.scriptFiles_projection[key_file] = os.path.join(\n self.dirs[key_dir][DKEY_CFGS], \"project_%s_%i_cfg.sh\" % (process_name, jobId)\n )\n projection_module = self.projection_module\n if projection_module == \"count\":\n projection_module = \"countHistogramAll\"\n if sample_name.startswith('/TTTo'):\n projection_module += \"CompTopRwgt\"\n elif sample_info['sample_category'].startswith('ttH'):\n projection_module += \"CompHTXS\"\n elif isSplitByNlheJet(process_name):\n projection_module += \"SplitByLHENjet\"\n elif isSplitByNlheHT(process_name):\n projection_module += \"SplitByLHEHT\"\n elif isSplitByNlheJetHT(process_name, sample_name):\n projection_module += \"SplitByLHENjetHT\"\n self.jobOptions_sbatch[key_file] = {\n 'histName' : process_name,\n 'inputFiles' : self.inputFiles[key_file],\n 'cfgFile_path' : self.cfgFiles_projection[key_file],\n 'outputFile' : self.outputFiles_tmp[key_file],\n 'logFile' : self.logFiles_projection[key_file],\n 'scriptFile' : self.scriptFiles_projection[key_file],\n 'projection_module' : projection_module,\n }\n if self.projection_module != 'puHist':\n self.jobOptions_sbatch[key_file]['ref_genWeight'] = self.ref_genWeights[process_name]\n if process_name not in self.ref_genWeights:\n raise RuntimeError(\"Unable to find reference LHE weight for process %s\" % process_name)\n self.createCfg_project(self.jobOptions_sbatch[key_file])\n self.outputFiles[process_name]['inputFiles'].append(self.outputFiles_tmp[key_file])\n\n if self.is_sbatch:\n logging.info(\"Creating script for submitting '%s' jobs to batch system\" % self.executable)\n self.num_jobs['project'] += self.createScript_sbatch(\n self.executable, self.sbatchFile_projection, self.jobOptions_sbatch\n )\n\n logging.info(\"Creating Makefile\")\n lines_makefile = []\n self.addToMakefile_project(lines_makefile)\n self.addToMakefile_hadd(lines_makefile)\n if self.plot:\n self.addToMakefile_plot(lines_makefile)\n self.addToMakefile_finalHadd(lines_makefile)\n self.createMakefile(lines_makefile)\n logging.info(\"Done\")\n\n return self.num_jobs", "def _prepare_files(self, grouping_by):\n self.post_conf_dict = {}\n self.pre_conf_dict = {}\n main_folder = self.main_folder\n\n file_path = 'devlab/tests/groups_example.yaml'\n exmpl_file_path = os.path.join(main_folder, file_path)\n pre_conf = open(exmpl_file_path, 'r')\n self.pre_conf_dict = yaml.load(pre_conf)\n\n inst_id_list = []\n inst_3 = None\n for key in self.pre_conf_dict.keys():\n if key == 'user_defined_group_1':\n for val in self.pre_conf_dict[key]:\n for inst in self.src_vms:\n if inst['name'] == val:\n inst_id_list.append(inst['id'])\n elif key == 'user_defined_group_2':\n for inst in self.src_vms:\n if inst['name'] == self.pre_conf_dict[key][0]:\n inst_3 = inst['id']\n self.pre_conf_dict['group_by'] = [unicode(grouping_by)]\n self.pre_conf_dict['user_defined_group_1'] = inst_id_list\n self.pre_conf_dict['user_defined_group_2'] = [inst_3]\n self.new_file_name = 'test_file.yaml'\n file_to_write_into = os.path.join(os.getcwd(), self.new_file_name)\n with open(file_to_write_into, 'w') as stream:\n yaml.dump(self.pre_conf_dict, stream, default_flow_style=False)\n fab_path = os.path.join('devlab/tests', self.new_file_name)\n _cmd = 'cd {cf_folder} && fab get_groups:{config_ini},{new_file}'\n cmd = _cmd.format(cf_folder=main_folder, new_file=fab_path,\n config_ini='devlab/tests/configuration.ini')\n os.system(cmd)\n post_file_path = os.path.join(main_folder, 'vm_groups.yaml')\n post_conf = file(post_file_path, 'r')\n self.post_conf_dict = yaml.load(post_conf)", "def MakeResource(resource_list, output_list=None):\n content = {'resources': resource_list}\n if output_list:\n content['outputs'] = output_list\n return yaml.dump(content)", "def createGroup(root, group, fileList):\n topGroupElem = ElementTree.SubElement(root, ELEM_GROUP, {ATTR_NAME: group})\n headerGroupElem = None\n sourceGroupElem = None\n pathElem = None\n for fl in fileList:\n if fl.endswith(\".h\"):\n if headerGroupElem == None:\n headerGroupElem = ElementTree.SubElement(topGroupElem, ELEM_GROUP, {ATTR_NAME: GRP_HEADER})\n pathElem = ElementTree.SubElement(headerGroupElem, ELEM_PATH)\n else:\n if sourceGroupElem == None:\n sourceGroupElem = ElementTree.SubElement(topGroupElem, ELEM_GROUP, {ATTR_NAME: GRP_SRC})\n pathElem = ElementTree.SubElement(sourceGroupElem, ELEM_PATH)\n pathElem.text = fl", "def create_raster_resources(self, file_path):\n extension = os.path.splitext(os.path.normpath(file_path))[1]\n fomart_x = extension[1:]\n file_name = os.path.basename(file_path)\n base = os.path.splitext(file_name)[0]\n resource_pk = []\n if os.path.isfile(file_path) and fomart_x in self.pk_formats:\n sub_dataset_name = file_path\n src_ds = self.get_source(sub_dataset_name)\n if not self.name:\n self.name = os.path.basename(src_ds.GetDescription())\n self.set_global(src_ds)\n\n resource_pk = []\n for band_num in range(1, src_ds.RasterCount + 1):\n bands = OrderedDict()\n srcband = src_ds.GetRasterBand(band_num)\n bands[\"extensions\"] = [fomart_x]\n bands[\"other_paths\"] = \"\"\n bands[\"format\"] = \"raster\"\n bands[\"name\"] = clean_table_name(base)\n bands[\"path\"] = os.path.basename(src_ds.GetDescription())\n bands[\"band_name\"] = base + \"_\" + str(band_num)\n bands[\"no_data_value\"] = srcband.GetNoDataValue()\n bands[\"scale\"] = srcband.GetScale()\n bands[\"color_table\"] = (None\n if not srcband.GetRasterColorTable() else True)\n bands[\"url\"] = None\n bands[\"statistics\"] = OrderedDict(\n zip(\n [\"minimum\", \"maximum\", \"mean\", \"stddev\"],\n srcband.GetStatistics(True, False),\n ))\n resource_pk.append(bands)\n return resource_pk[0]", "def build_groupings(idir: str) -> dict:\n bkg_group = {key: [ifile for ifile in glob(f'{idir}/*_{key}_*.root')] for key in bkgs}\n pw_group = {key: [ifile for ifile in glob(f'{idir}/{key}*.root')] for key in powhegs}\n wh_pw_group = [ifile for name in wh_powhegs for ifile in glob(f'{idir}/{name}*.root')]\n ungrouped = [ifile for ifile in glob(f'{idir}/*.root') if 'madgraph' in ifile or 'JHU' in ifile]\n\n group = {}\n for key, files in bkg_group.items():\n if len(files) > 0:\n group[key] = files\n\n for key, files in pw_group.items():\n if len(files) > 0:\n group[key] = files\n\n for ifile in ungrouped:\n name = ifile.split('/')[-1].replace('.root', '')\n name = name.split('_SYST')[0].replace('-', '_')\n name = name.replace('_ggH125', '').replace('_VBF125', '').replace('_WH125', '').replace('_ZH125', '')\n group[name] = [ifile]\n\n if len(wh_pw_group) > 0:\n group['wh125_powheg'] = wh_pw_group\n\n return group", "def gen_inventory(self):\n if isinstance(self.resource, list):\n self.my_add_group(self.resource, 'default_group')\n elif isinstance(self.resource, dict):\n for groupname, hosts_and_vars in self.resource.iteritems():\n self.my_add_group(hosts_and_vars.get(\"hosts\"), groupname, hosts_and_vars.get(\"vars\"))", "def make_group(self, qid, name='', path='', attrs={}, link='', abort=True):\n gqid = qid + \"/\"\n sdef = self.get_sdef(gqid, self.default_ns, \"referenced in make_group\")\n id = sdef['id']\n ns = sdef['ns']\n path = self.deduce_path(id, ns, path)\n if not abort:\n id_noslash = id.rstrip('/') # could be different from gqid if namespace present\n grp = self.get_existing_group(path, id_noslash, name)\n if grp:\n # found already existing group\n return grp \n link_info = self.extract_link_info(name, link, Group)\n # create the group\n parent = None # no parent since this node created from File object (top level)\n grp = Group(self, sdef, name, path, attrs, parent, link_info)\n return grp", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n description: Optional[pulumi.Input[str]] = None,\n display_name: Optional[pulumi.Input[str]] = None,\n entry_group_id: Optional[pulumi.Input[str]] = None,\n project: Optional[pulumi.Input[str]] = None,\n region: Optional[pulumi.Input[str]] = None,\n __props__=None,\n __name__=None,\n __opts__=None):\n if __name__ is not None:\n warnings.warn(\"explicit use of __name__ is deprecated\", DeprecationWarning)\n resource_name = __name__\n if __opts__ is not None:\n warnings.warn(\"explicit use of __opts__ is deprecated, use 'opts' instead\", DeprecationWarning)\n opts = __opts__\n if opts is None:\n opts = pulumi.ResourceOptions()\n if not isinstance(opts, pulumi.ResourceOptions):\n raise TypeError('Expected resource options to be a ResourceOptions instance')\n if opts.version is None:\n opts.version = _utilities.get_version()\n if opts.id is None:\n if __props__ is not None:\n raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')\n __props__ = dict()\n\n __props__['description'] = description\n __props__['display_name'] = display_name\n if entry_group_id is None:\n raise TypeError(\"Missing required property 'entry_group_id'\")\n __props__['entry_group_id'] = entry_group_id\n __props__['project'] = project\n __props__['region'] = region\n __props__['name'] = None\n super(EntryGroup, __self__).__init__(\n 'gcp:datacatalog/entryGroup:EntryGroup',\n resource_name,\n __props__,\n opts)", "def __init__(__self__,\n resource_name: str,\n args: GroupArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def make_group(self, id, name='', attrs={}, link='', abort=True ): \n gid = id + \"/\"\n sgd = self.get_sgd(gid, name)\n path = self.full_path\n link_info = self.file.extract_link_info(name, link, Group)\n if not abort:\n # id = sgd['id'].rstrip('/') # not sure if need this\n grp = self.file.get_existing_group(path, id, name)\n if grp:\n return grp\n grp = Group(self.file, sgd, name, path, attrs, self, link_info)\n # self.mstats[gid]['created'].append(grp)\n return grp", "def create_rg_dict(self, bam_in, rg_dict_out_files, rg_prefix=False):\n if rg_prefix:\n sampleID = os.path.basename(bam_in).rstrip(\".input.bam\")\n prefix_string = \"--prefix %s\" % (sampleID)\n else:\n prefix_string = \"\"\n\n self.cmd(\"python {readgroup_mover} create\\\n {prefix_string} \\\n --input {bam_in}\\\n --output {dict_out}\"\n .format(\n readgroup_mover=self.cmds[\"readgroup_mover\"],\n prefix_string=prefix_string,\n bam_in=bam_in,\n dict_out=rg_dict_out_files[0]\n ),\n on_error=lambda: self.create_error_file(rg_dict_out_files[0]),\n shell=True)\n\n self.checkpoint(rg_dict_out_files[0])\n self.checkpoint(rg_dict_out_files[1])\n self.checkpoint(rg_dict_out_files[2])", "def from_structure(self, structure):\n session = meta.Session()\n \n try:\n for resource_s in structure['resources']:\n log.debug(\"Importing: {0!r}\".format(resource_s))\n \n # First build up a list of group_ids for this resource that will correspond to groups\n # in *this* database.\n group_ids = []\n for gname in resource_s['groups']:\n group = groups.get_by_name(gname, assert_exists=False)\n if not group:\n group = groups.create(gname)\n log.info(\"Created group: {0!r}\".format(group))\n else:\n log.info(\"Found existing group: {0!r}\".format(group))\n \n group_ids.append(group.id)\n \n # First we should see if there is a match for the id and name; we can't rely on name alone since\n # there is no guarantee of name uniqueness (even with a group)\n resource = None\n resource_candidate = resources.get(resource_s['id'], assert_exists=False)\n if resource_candidate and resource_candidate.name == resource_s['name']:\n resource = resource_candidate \n else:\n # If we find a matching resource (by name) and there is only one then we'll use that.\n try:\n resource = resources.get_by_name(resource_s['name'], assert_single=True, assert_exists=True)\n except MultipleResultsFound:\n log.info(\"Multiple resource matched name {0!r}, will create a new one.\".format(resource_s['name']))\n except exc.NoSuchEntity:\n log.debug(\"No resource found matching name: {0!r}\".format(resource_s['name']))\n pass\n \n resource_attribs = ('name', 'addr', 'description', 'notes', 'tags')\n resource_attribs_update = dict([(k,v) for (k,v) in resource_s.items() if k in resource_attribs])\n \n if resource:\n (resource, modified) = resources.modify(resource.id, group_ids=group_ids, **resource_attribs_update)\n # (yes, we are overwriting 'resource' var with new copy returned from this method)\n log.info(\"Updating existing resource: {0!r} (modified: {1!r})\".format(resource, modified))\n if modified and modified != ['group_ids']:\n if not self.force:\n raise RuntimeError(\"Refusing to modify existing resource attributes {0!r} on {1!r} (use 'force' to override this).\".format(modified, resource))\n else:\n log.warning(\"Overwriting resource attributes {0!r} on {1!r}\".format(modified, resource))\n else:\n # We will just assume that we need to create the resource. Yes, it's possible it'll match an existing\n # one, but better to build a merge tool than end up silently merging things that are not the same.\n resource = resources.create(group_ids=group_ids, **resource_attribs_update)\n log.info(\"Created new resource: {0!r}\".format(resource))\n \n # Add the passwords\n for password_s in resource_s['passwords']:\n \n password_attribs = ('username', 'description', 'password', 'tags')\n password_attribs_update = dict([(k,v) for (k,v) in password_s.items() if k in password_attribs])\n \n # Look for a matching password. We do know that this is unique.\n password = passwords.get_for_resource(password_s['username'], password_s['resource_id'], assert_exists=False)\n if password:\n (password, modified) = passwords.modify(password_id=password.id, **password_attribs_update)\n # (Yeah, we overwrite password object.)\n log.info(\"Updating existing password: {0!r} (modified: {1!r})\".format(password, modified))\n \n non_pw_modified = set(modified) - set(['password'])\n if not modified:\n log.debug(\"Password row not modified.\")\n else:\n log.debug(\"Password modified: {0!r}\".format(modified))\n \n # If anything changed other than password, we need to ensure that force=true\n if non_pw_modified:\n if not self.force:\n raise RuntimeError(\"Refusing to modify existing password attributes {0!r} on {1!r} (use 'force' to override this).\".format(non_pw_modified, password))\n else:\n log.warning(\"Overwriting password attributes {0!r} on {1!r}\".format(non_pw_modified, password))\n else:\n password = passwords.create(resource_id=resource.id, **password_attribs_update)\n log.info(\"Creating new password: {0!r}\".format(password))\n \n \n # This probably isn't necessary as all the DAO methods should also flush session, but might as well.\n session.flush()\n \n except:\n session.rollback()\n raise", "def create_input_files(self, datasets_dict):\n ifname = self.keywords['inputfile']\n dirstem = os.path.dirname(ifname)\n basename = os.path.basename(ifname).split('.')[0]\n createdfiles=list()\n if dirstem == \"\":\n dirstem = os.getcwd()\n dkeys = datasets_dict.keys()\n dkeys.sort()\n dct=1\n for didx in dkeys:\n newfile = MASTFile()\n newfile.data = list(datasets_dict[didx])\n newname=\"%s/loop_%s_%s.inp\" % (dirstem, basename, str(dct).zfill(2))\n newfile.to_file(newname)\n #createdfiles.append(os.path.basename(newname))\n createdfiles.append(newname)\n dct=dct+1\n return createdfiles", "def create(self):\n\n if len(self.filenames) != len(self.download_links):\n print(\"Must have the same amount off file names than download links\", file=sys.stderr)\n return None\n\n resources = []\n\n #Creating the resource dict\n for i in range(len(self.filenames)):\n resources.append(\n {\n \"id\": self.ids[i],\n \"description\":\"\",\n \"filename\":self.filenames[i],\n \"download_link\":self.download_links[i]\n }\n )\n\n\n #The JSON\n data = {\n \"dataset\":{\n \"project\":self.project,\n \"version\":self.version,\n \"description\":self.description,\n \"project_link\":self.project_link,\n \"data_path\": self.data_path,\n \"metadata\": self.metadata,\n \"files_type\":self.file_type,\n \"protocole\":self.protocole,\n \"resources\":resources,\n \"data_representation\":self.data_representation\n }\n }\n with open(self.dataset_path, \"w\") as json_file:\n json_file.write(json.dumps(data))", "def assemble(metadata_file):\n\n def read(file):\n with open(file) as yaml:\n return load(yaml.read())\n\n def add_name(info):\n info['name'] = slugify(info['title'], separator='_')\n return info\n\n def get_files(filetype):\n filename = metadata_file.replace('metadata', filetype)\n folder = dirname(metadata_file)\n schema_files_pattern = join(folder, filename)\n return glob(schema_files_pattern)\n\n descriptor = add_name(read(metadata_file))\n resources = [add_name(read(file)) for file in get_files('resource')]\n model = get_files('model')\n\n descriptor['resources'] = resources\n if model and len(model) == 1:\n descriptor['model'] = model.pop()\n\n return DataPackage(descriptor)", "def _build_ec2_mapping_from_resources(resource_to_analyse, result_dict, session):\n for instance, security_group in _generate_ec2_instance_and_sg(resource_to_analyse):\n resource_dict = _check_if_in_list(result_dict, instance.id, \"resource_id\")\n if resource_dict is not None:\n resource_dict[\"sg_attached\"].append({\n \"sg_id\": security_group[\"GroupId\"],\n \"sg_name\": security_group[\"GroupName\"]\n })\n else:\n result_dict.append({\n \"resource_id\": instance.id,\n \"resource_type\": \"ec2\",\n \"resource_name\": \"\" if _check_if_in_list(instance.tags, \"Name\", \"Key\") is None else _check_if_in_list(instance.tags, \"Name\", \"Key\").get(\"Value\", \"\"),\n \"sg_attached\": [{\n \"sg_id\": security_group[\"GroupId\"],\n \"sg_name\": security_group[\"GroupName\"]\n }]\n })\n return result_dict", "def _create_layout(root_dir, subsets):\n _create_folder(os.path.join(root_dir, \"images\"))\n _create_folder(os.path.join(root_dir, \"labels\"))\n\n for subset in subsets:\n _create_folder(os.path.join(root_dir, \"images\", subset))\n _create_folder(os.path.join(root_dir, \"labels\", subset))", "def create_entry_group(self, location_id, entry_group_id):\n entry_group = self.__datacatalog.create_entry_group(\n parent=f'projects/{self.__project_id}/locations/{location_id}',\n entry_group_id=entry_group_id,\n entry_group=datacatalog.EntryGroup())\n logging.info('Entry Group created: %s', entry_group.name)\n return entry_group", "def create_input_file(self, polymer_identifier, format, outpath):\n\n\t\tsmiles = self.get_smiles_from_identifier(polymer_identifier)\n\t\t\n\t\tresult = generate_input_files(smiles, format)\n\t\twith open(outpath, 'w+') as f:\n\t\t\tf.write(result)", "def createMainGroup(self):\n\t\tmc.group( n = self.grp.name, em = True )", "def load_ids_to_groups(self):\n self.groups = set([])\n self.h_group_ids = defaultdict(lambda: set([]))\n self.h_id_to_group = defaultdict(lambda: set([]))\n for i,g in zip(self.df.sample_id, self.df.group_name):\n self.h_group_ids[g].add(i)\n self.h_id_to_group[i] = g\n self.groups.add(g)", "def createGroup(self, name):\n new_group = ET.SubElement(self._root,'group')\n group_name = ET.SubElement(new_group, 'name')\n group_name.text = name\n # update the document's groups\n self._groups = self._root.findall('group') \n print 'Creating group, \\'%s\\'' % name\n return CAGroup(new_group)", "def pre_security_group_create(self, resource_dict):\n pass", "def createDictionaryFromFile(inputfile):\n logger.info('loading file: %s' % inputfile)\n dic = {}\n with open(inputfile) as fin:\n for n, line in enumerate(fin, start=1):\n arr = line.strip().split()\n path = arr[0]\n\n labels = []\n for label in arr[1:]:\n labels.append(ast.literal_eval(label))\n\n cpath = path.split('/')\n id_img = int(cpath[-1].replace('.jpg', ''))\n size_img = cpath[-2]\n activity = cpath[-3]\n id_data = int((cpath[-4])[-1])\n home = '/'.join(cpath[:-4])\n\n if dic.has_key(id_data):\n if dic[id_data].has_key(activity):\n if dic[id_data][activity].has_key(size_img):\n dic[id_data][activity][size_img][id_img] = labels\n else:\n dic[id_data][activity][size_img] = {id_img: labels}\n else:\n dic[id_data][activity] = {size_img: {id_img: labels}}\n else:\n dic[id_data] = {activity: {size_img: {id_img: labels}}}\n return n, home, dic", "def __make_group_by_res(self, group_name, name_list):\r\n if group_name not in self.groups:\r\n res_group = self.group['Residue'].getChildGrps()\r\n groups = [ res for res in res_groups if res.name in name_list ]\r\n new_group = Group(parent=[], id=-1, type=group_name, childs=groups)\r\n self.groups[group_name] = new_group", "def __init__(self):\n groups = [\n os.path.splitext(f)[0] for f in os.listdir(data_dir) if f.endswith(\".json\")\n ]\n\n self._data = {\n group: IndicatorGroup.parse_file(os.path.join(data_dir, f\"{group}.json\"))\n for group in groups\n }", "def __init__(__self__, resource_name, opts=None, attributes=None, name=None, parent_id=None, realm_id=None, __props__=None, __name__=None, __opts__=None):\n if __name__ is not None:\n warnings.warn(\"explicit use of __name__ is deprecated\", DeprecationWarning)\n resource_name = __name__\n if __opts__ is not None:\n warnings.warn(\"explicit use of __opts__ is deprecated, use 'opts' instead\", DeprecationWarning)\n opts = __opts__\n if opts is None:\n opts = pulumi.ResourceOptions()\n if not isinstance(opts, pulumi.ResourceOptions):\n raise TypeError('Expected resource options to be a ResourceOptions instance')\n if opts.version is None:\n opts.version = utilities.get_version()\n if opts.id is None:\n if __props__ is not None:\n raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')\n __props__ = dict()\n\n __props__['attributes'] = attributes\n __props__['name'] = name\n __props__['parent_id'] = parent_id\n if realm_id is None:\n raise TypeError(\"Missing required property 'realm_id'\")\n __props__['realm_id'] = realm_id\n __props__['path'] = None\n super(Group, __self__).__init__(\n 'keycloak:index/group:Group',\n resource_name,\n __props__,\n opts)", "def prepare_io(filename, input_dataset, output_dataset):\n file_id = filename[1:] if filename.startswith(os.sep) else filename\n file_in = os.path.join(input_dataset.path, 'files', file_id)\n file_out = os.path.join(output_dataset.path, 'files', file_id)\n ensure_path(os.path.dirname(file_out))\n return file_in, file_out", "def make_groups(self):\n for g in self.groups:\n self.add_group(groupname=g['groupname'],\n grouptitle=g['grouptitle'],\n path_to_group=g['path'])", "def addRG(in_files,args):\n #define readgroup header lines by combining the following\n\n \"\"\"\n -\n read group\n ID*\n Unique read group identifier. The value of the ID field is used in the RG tags of alignment records.\n SM*\n Sample (use pool name where a pool is being sequenced)\n LB\n Library\n DS\n Description\n PU\n Platform unit (e.g. lane for Illumina or slide for SOLiD); should be a full, unambiguous identifier\n PI\n Predicted median insert size (maybe different from the actual median insert size)\n CN\n Name of sequencing center producing the read.\n DT\n Date the run was produced (ISO 8601 date or date/time).\n PL\n Platform/technology used to produce the read.\"\"\"\n\n with open(args.barcodes,'r') as barcodes:\n sam_out= open(in_files['header'],'a')\n header = barcodes.readline().split('\\t')\n for line in barcodes:\n RG = ['@RG']\n split_line = line.split('\\t')\n if args.species and 'Species' in header:\n if split_line[(header.index('Species'))] != args.species:\n continue\n fc = split_line[(header.index('Flowcell'))]\n lane = split_line[(header.index('Lane'))]\n sample = split_line[(header.index('Sample'))]\n RG.append('ID:%s_%s_%s'%(fc,lane,sample))\n RG.append('SM:%s'%(sample))\n RG.append('LB:%s_%s'%(fc,sample))\n RG.append('PL:ILLUMINA\\n')\n sam_out.write('\\t'.join(RG))\n sam_out.close()\n return in_files", "def make_custom_group(self, qid, name='', path='', attrs={}):\n gslash = \"/\"\n sdef, name, path = self.get_custom_node_info(qid, gslash, name, path) \n parent = None # no parent since this node created from File object (top level)\n grp = Group(self, sdef, name, path, attrs, parent)\n return grp", "def __init__(\n self, rule_id, parent_id, group_actions, input, output, local=False, **kwargs\n ):\n self.rule_id = rule_id\n self.parent_id = parent_id\n self.input = input\n self.output = output\n self.local = local\n self.params = kwargs\n\n self.groupped = True\n\n # load sub-actions\n self.actions = OrderedDict()\n\n for action in group_actions:\n # get action name\n action_name = action[\"action_name\"]\n del action[\"action_name\"]\n\n # determine template filepath\n action_type = action_name.split(\"_\")[0]\n template = \"actions/{}/{}.snakefile\".format(action_type, action_name)\n\n # create new SnakemakeRule instance\n self.actions[action_name] = ActionRule(\n rule_id=None,\n parent_id=None,\n input=None,\n output=None,\n template=template,\n **action\n )", "def create_sagemaker_resource(\n resource_plural, resource_name, spec_file, replacements, namespace=\"default\"\n):\n\n reference, spec, resource = k8s.load_and_create_resource(\n resource_directory,\n CRD_GROUP,\n CRD_VERSION,\n resource_plural,\n resource_name,\n spec_file,\n replacements,\n namespace,\n )\n\n return reference, spec, resource", "def make_custom_group(self, qid, name='', path='', attrs={}):\n gslash = \"/\"\n parent = self\n sdef, name, path = self.file.get_custom_node_info(qid, gslash, name, path, parent) \n grp = Group(self.file, sdef, name, path, attrs, parent)\n return grp", "def generate_groups(ctx):\n asyncio.run(generate_groups_impl(ctx.obj[\"config\"]))", "def make_grp(self):\n try:\n self.base['grp']\n except:\n self.base['grp'] = np.zeros(len(self.base),dtype='i')\n\n for halo in self._halos.values():\n halo[name][:] = halo._halo_id\n\n if config['verbose']: print \"writing %s\"%(self._base().filename+'.grp')\n self._base().write_array('grp',overwrite=True,binary=False)", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n content_type: Optional[pulumi.Input[Union[str, 'FileImportContentType']]] = None,\n file_import_id: Optional[pulumi.Input[str]] = None,\n import_file: Optional[pulumi.Input[pulumi.InputType['FileMetadataArgs']]] = None,\n ingestion_mode: Optional[pulumi.Input[Union[str, 'IngestionMode']]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n source: Optional[pulumi.Input[str]] = None,\n workspace_name: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def post_security_group_create(self, resource_dict):\n pass", "def _create_ID_files(self):\n for file, IDs in [(self._trn_IDs_file, self._trn_IDs), (self._val_IDs_file,\n self._val_IDs), (self._tst_IDs_file, self._tst_IDs)]:\n with open(file, 'w') as f:\n f.write('\\n'.join('{}###{}###{}'.format(ID[0], ID[1], ID[2]) for ID in IDs))", "def create(self):\n\n for sample_name, sample_info in self.samples.items():\n if not sample_info[\"use_it\"]:\n continue\n process_name = sample_info[\"process_name_specific\"]\n sample_category = sample_info[\"sample_category\"]\n is_mc = (sample_info[\"type\"] == \"mc\")\n\n logging.info(\"Building dictionaries for sample %s...\" % process_name)\n for charge_selection in self.charge_selections:\n central_or_shift_extensions = [\"\", \"hadd\", \"addBackgrounds\"]\n central_or_shifts_extended = central_or_shift_extensions + self.central_or_shifts\n for central_or_shift_or_dummy in central_or_shifts_extended:\n process_name_extended = [ process_name, \"hadd\" ]\n for process_name_or_dummy in process_name_extended:\n if central_or_shift_or_dummy in [ \"hadd\" ] and process_name_or_dummy in [ \"hadd\" ]:\n continue\n if central_or_shift_or_dummy != \"central\" and central_or_shift_or_dummy not in central_or_shift_extensions:\n if not is_mc:\n continue\n if not self.accept_central_or_shift(central_or_shift_or_dummy, sample_info):\n continue\n\n key_dir = getKey(process_name_or_dummy, charge_selection, central_or_shift_or_dummy)\n for dir_type in [ DKEY_CFGS, DKEY_HIST, DKEY_LOGS, DKEY_RLES ]:\n initDict(self.dirs, [ key_dir, dir_type ])\n if dir_type in [ DKEY_CFGS, DKEY_LOGS ]:\n self.dirs[key_dir][dir_type] = os.path.join(self.get_dir_type(dir_type), dir_type, self.channel,\n \"_\".join([ charge_selection ]), process_name_or_dummy, central_or_shift_or_dummy)\n else:\n self.dirs[key_dir][dir_type] = os.path.join(self.outputDir, dir_type, self.channel,\n \"_\".join([ charge_selection ]), process_name_or_dummy)\n for subdirectory in [ \"comp_jetToTauFakeRate\", \"makePlots\" ]:\n key_dir = getKey(subdirectory)\n for dir_type in [ DKEY_CFGS, DKEY_HIST, DKEY_LOGS, DKEY_DCRD, DKEY_PLOT ]:\n initDict(self.dirs, [ key_dir, dir_type ])\n if dir_type in [ DKEY_CFGS, DKEY_LOGS, DKEY_DCRD, DKEY_PLOT ]:\n self.dirs[key_dir][dir_type] = os.path.join(self.get_dir_type(dir_type), dir_type, self.channel, subdirectory)\n else:\n self.dirs[key_dir][dir_type] = os.path.join(self.outputDir, dir_type, self.channel, subdirectory)\n for dir_type in [ DKEY_CFGS, DKEY_SCRIPTS, DKEY_HIST, DKEY_LOGS, DKEY_DCRD, DKEY_PLOT, DKEY_HADD_RT ]:\n initDict(self.dirs, [ dir_type ])\n if dir_type in [ DKEY_CFGS, DKEY_SCRIPTS, DKEY_LOGS, DKEY_DCRD, DKEY_PLOT, DKEY_HADD_RT ]:\n self.dirs[dir_type] = os.path.join(self.get_dir_type(dir_type), dir_type, self.channel)\n else:\n self.dirs[dir_type] = os.path.join(self.outputDir, dir_type, self.channel)\n\n numDirectories = 0\n for key in self.dirs.keys():\n if type(self.dirs[key]) == dict:\n numDirectories += len(self.dirs[key])\n else:\n numDirectories += 1\n logging.info(\"Creating directory structure (numDirectories = %i)\" % numDirectories)\n numDirectories_created = 0;\n frac = 1\n for key in self.dirs.keys():\n if type(self.dirs[key]) == dict:\n for dir_type in self.dirs[key].keys():\n create_if_not_exists(self.dirs[key][dir_type])\n numDirectories_created += len(self.dirs[key])\n else:\n create_if_not_exists(self.dirs[key])\n numDirectories_created = numDirectories_created + 1\n while 100*numDirectories_created >= frac*numDirectories:\n logging.info(\" %i%% completed\" % frac)\n frac = frac + 1\n logging.info(\"Done.\")\n\n inputFileLists = {}\n for sample_name, sample_info in self.samples.items():\n if not sample_info[\"use_it\"]:\n continue\n logging.info(\"Checking input files for sample %s\" % sample_info[\"process_name_specific\"])\n inputFileLists[sample_name] = generateInputFileList(sample_info, self.max_files_per_job)\n\n self.inputFileIds = {}\n for sample_name, sample_info in self.samples.items():\n if not sample_info[\"use_it\"]:\n continue\n\n process_name = sample_info[\"process_name_specific\"]\n inputFileList = inputFileLists[sample_name]\n\n logging.info(\"Creating configuration files to run '%s' for sample %s\" % (self.executable_analyze, process_name))\n\n is_mc = (sample_info[\"type\"] == \"mc\")\n sample_category = sample_info[\"sample_category\"]\n\n for charge_selection in self.charge_selections:\n for central_or_shift in self.central_or_shifts:\n \n if central_or_shift != \"central\" and not is_mc:\n continue\n if not self.accept_central_or_shift(central_or_shift, sample_info):\n continue\n\n # build config files for executing analysis code\n key_analyze_dir = getKey(process_name, charge_selection, central_or_shift)\n\n for jobId in inputFileList.keys():\n\n analyze_job_tuple = (process_name, charge_selection, central_or_shift, jobId)\n key_analyze_job = getKey(*analyze_job_tuple)\n ntupleFiles = inputFileList[jobId]\n if len(ntupleFiles) == 0:\n logging.warning(\"No input ntuples for %s --> skipping job !!\" % (key_analyze_job))\n continue\n\n cfgFile_modified_path = os.path.join(self.dirs[key_analyze_dir][DKEY_CFGS], \"analyze_%s_%s_%s_%i_cfg.py\" % analyze_job_tuple)\n logFile_path = os.path.join(self.dirs[key_analyze_dir][DKEY_LOGS], \"analyze_%s_%s_%s_%i.log\" % analyze_job_tuple)\n histogramFile_path = os.path.join(self.dirs[key_analyze_dir][DKEY_HIST], \"analyze_%s_%s_%s_%i.root\" % analyze_job_tuple)\n rleOutputFile_path = os.path.join(self.dirs[key_analyze_dir][DKEY_RLES], \"rle_%s_%s_%s_%i.txt\" % analyze_job_tuple) \\\n if self.select_rle_output else \"\"\n\n self.jobOptions_analyze[key_analyze_job] = {\n 'ntupleFiles' : ntupleFiles,\n 'cfgFile_modified' : cfgFile_modified_path,\n 'histogramFile' : histogramFile_path,\n 'logFile' : logFile_path,\n 'chargeSelection' : charge_selection,\n 'jet_minPt' : self.jet_minPt,\n 'jet_maxPt' : self.jet_maxPt,\n 'jet_minAbsEta' : self.jet_minAbsEta,\n 'jet_maxAbsEta' : self.jet_maxAbsEta,\n 'hadTau_selection_tight' : self.hadTau_selection_tight,\n 'hadTauSelection_denominator' : self.hadTau_selection_denominator,\n 'hadTauSelections_numerator' : self.hadTau_selections_numerator,\n 'trigMatchingOptions' : self.trigMatchingOptions,\n 'selEventsFileName_output' : rleOutputFile_path,\n 'absEtaBins' : self.absEtaBins,\n 'decayModes' : self.decayModes,\n 'central_or_shift' : central_or_shift,\n 'central_or_shifts_local' : [],\n 'apply_hlt_filter' : self.hlt_filter,\n }\n self.createCfg_analyze(self.jobOptions_analyze[key_analyze_job], sample_info)\n\n # initialize input and output file names for hadd_stage1\n key_hadd_stage1_dir = getKey(process_name, charge_selection)\n hadd_stage1_job_tuple = (process_name, charge_selection)\n key_hadd_stage1_job = getKey(*hadd_stage1_job_tuple)\n if not key_hadd_stage1_job in self.inputFiles_hadd_stage1:\n self.inputFiles_hadd_stage1[key_hadd_stage1_job] = []\n self.inputFiles_hadd_stage1[key_hadd_stage1_job].append(self.jobOptions_analyze[key_analyze_job]['histogramFile'])\n self.outputFile_hadd_stage1[key_hadd_stage1_job] = os.path.join(self.dirs[key_hadd_stage1_dir][DKEY_HIST],\n \"hadd_stage1_%s_%s.root\" % hadd_stage1_job_tuple)\n\n # initialize input and output file names for hadd_stage2\n key_hadd_stage1_job = getKey(process_name, charge_selection)\n key_hadd_stage2_dir = getKey(\"hadd\", charge_selection)\n key_hadd_stage2_job = getKey(charge_selection)\n if not key_hadd_stage2_job in self.inputFiles_hadd_stage2:\n self.inputFiles_hadd_stage2[key_hadd_stage2_job] = []\n self.inputFiles_hadd_stage2[key_hadd_stage2_job].append(self.outputFile_hadd_stage1[key_hadd_stage1_job])\n self.outputFile_hadd_stage2[key_hadd_stage2_job] = os.path.join(self.dirs[key_hadd_stage2_dir][DKEY_HIST],\n \"hadd_stage2_%s.root\" % charge_selection)\n\n logging.info(\"Creating configuration files for executing 'comp_jetToTauFakeRate'\")\n for charge_selection in self.charge_selections:\n charge_key = \"comp_%s\" % charge_selection\n self.comp_input_files[charge_key] = []\n for trigMatchingOption in self.trigMatchingOptions:\n key_hadd_stage2_job = getKey(charge_selection)\n key_comp_jetToTauFakeRate_dir = getKey(\"comp_jetToTauFakeRate\")\n key_comp_jetToTauFakeRate_job = getKey(charge_selection, trigMatchingOption)\n self.jobOptions_comp_jetToTauFakeRate[key_comp_jetToTauFakeRate_job] = {\n 'inputFile' : self.outputFile_hadd_stage2[key_hadd_stage2_job],\n 'cfgFile_modified' : os.path.join(\n self.dirs[DKEY_CFGS], \"comp_jetToTauFakeRate_%s_%s_cfg.py\" % (charge_selection, trigMatchingOption)),\n 'outputFile' : os.path.join(\n self.dirs[DKEY_HIST], \"comp_jetToTauFakeRate_%s_%s.root\" % (charge_selection, trigMatchingOption)),\n 'logFile' : os.path.join(\n self.dirs[DKEY_LOGS], \"comp_jetToTauFakeRate_%s_%s.log\" % (charge_selection, trigMatchingOption)),\n 'looseRegion' : \"jetToTauFakeRate_%s_%s/denominator/\" % (charge_selection, trigMatchingOption),\n 'tightRegion' : \"jetToTauFakeRate_%s_%s/numerator/\" % (charge_selection, trigMatchingOption),\n 'absEtaBins' : self.absEtaBins,\n 'ptBins' : self.ptBins,\n 'decayModes' : self.decayModes,\n 'hadTauSelections' : self.hadTau_selections_numerator,\n 'trigMatchingOption' : trigMatchingOption,\n 'plots_outputFileName' : os.path.join(self.dirs[key_comp_jetToTauFakeRate_dir][DKEY_PLOT], \"comp_jetToTauFakeRate_%s.png\" % trigMatchingOption)\n }\n self.createCfg_comp_jetToTauFakeRate(self.jobOptions_comp_jetToTauFakeRate[key_comp_jetToTauFakeRate_job])\n comp_output = self.jobOptions_comp_jetToTauFakeRate[key_comp_jetToTauFakeRate_job]['outputFile']\n self.targets.append(comp_output)\n self.comp_input_files[charge_key].append(comp_output)\n self.comp_output_files[charge_key] = os.path.join(self.dirs[DKEY_HIST], \"comp_jetToTauFakeRate_%s.root\" % charge_selection)\n\n logging.info(\"Creating configuration files to run 'makePlots'\")\n for charge_selection in self.charge_selections:\n key_hadd_stage2_job = getKey(charge_selection)\n key_makePlots_dir = getKey(\"makePlots\")\n key_makePlots_job = getKey(charge_selection) \n self.jobOptions_make_plots[key_makePlots_job] = {\n 'executable' : self.executable_make_plots,\n 'inputFile' : self.outputFile_hadd_stage2[key_hadd_stage2_job],\n 'cfgFile_modified' : os.path.join(\n self.dirs[key_makePlots_dir][DKEY_CFGS], \"makePlots_%s_cfg.py\" % self.channel),\n 'outputFile' : os.path.join(\n self.dirs[key_makePlots_dir][DKEY_PLOT], \"makePlots_%s.png\" % self.channel),\n 'histogramDir' : \"jetToTauFakeRate_%s\" % charge_selection,\n 'label' : None,\n 'make_plots_backgrounds' : self.make_plots_backgrounds\n }\n self.createCfg_makePlots(self.jobOptions_make_plots[key_makePlots_job])\n for trigMatchingOption in self.trigMatchingOptions:\n self.cfgFile_make_plots = self.cfgFile_make_plots_denominator\n for absEtaBin in [ \"absEtaLt1_5\", \"absEta1_5to9_9\" ]:\n key_hadd_stage2_job = getKey(charge_selection)\n key_makePlots_job = getKey(charge_selection, trigMatchingOption, absEtaBin, \"denominator\") \n self.jobOptions_make_plots[key_makePlots_job] = {\n 'executable' : self.executable_make_plots,\n 'inputFile' : self.outputFile_hadd_stage2[key_hadd_stage2_job],\n 'cfgFile_modified' : os.path.join(\n self.dirs[key_makePlots_dir][DKEY_CFGS], \"makePlots_%s_%s_%s_denominator_%s_cfg.py\" % \\\n (self.channel, charge_selection, trigMatchingOption, absEtaBin)),\n 'outputFile' : os.path.join(\n self.dirs[key_makePlots_dir][DKEY_PLOT], \"makePlots_%s_%s_%s_denominator_%s.png\" % (self.channel, charge_selection, trigMatchingOption, absEtaBin)),\n 'histogramDir' : \"jetToTauFakeRate_%s_%s/denominator/%s\" % (charge_selection, trigMatchingOption, absEtaBin),\n 'label' : None,\n 'make_plots_backgrounds' : self.make_plots_backgrounds\n }\n self.createCfg_makePlots(self.jobOptions_make_plots[key_makePlots_job])\n for hadTau_selection_numerator in self.hadTau_selections_numerator:\n key_hadd_stage2_job = getKey(charge_selection)\n key_makePlots_job = getKey(charge_selection, trigMatchingOption, absEtaBin, \"numerator\", hadTau_selection_numerator)\n self.jobOptions_make_plots[key_makePlots_job] = {\n 'executable' : self.executable_make_plots,\n 'inputFile' : self.outputFile_hadd_stage2[key_hadd_stage2_job],\n 'cfgFile_modified' : os.path.join(\n self.dirs[key_makePlots_dir][DKEY_CFGS], \"makePlots_%s_%s_%s_numerator_%s_%s_cfg.py\" % \\\n (self.channel, charge_selection, trigMatchingOption, hadTau_selection_numerator, absEtaBin)),\n 'outputFile' : os.path.join(\n self.dirs[key_makePlots_dir][DKEY_PLOT], \"makePlots_%s_%s_%s_numerator_%s_%s.png\" % \\\n (self.channel, charge_selection, trigMatchingOption, hadTau_selection_numerator, absEtaBin)),\n 'histogramDir' : \"jetToTauFakeRate_%s_%s/numerator/%s/%s\" % (charge_selection, trigMatchingOption, hadTau_selection_numerator, absEtaBin),\n 'label' : None,\n 'make_plots_backgrounds' : self.make_plots_backgrounds\n }\n self.createCfg_makePlots(self.jobOptions_make_plots[key_makePlots_job])\n\n self.sbatchFile_analyze = os.path.join(self.dirs[DKEY_SCRIPTS], \"sbatch_analyze_%s.py\" % self.channel)\n self.sbatchFile_comp_jetToTauFakeRate = os.path.join(self.dirs[DKEY_SCRIPTS], \"sbatch_comp_jetToTauFakeRate.py\")\n if self.is_sbatch:\n logging.info(\"Creating script for submitting '%s' jobs to batch system\" % self.executable_analyze)\n self.createScript_sbatch_analyze(self.executable_analyze, self.sbatchFile_analyze, self.jobOptions_analyze)\n logging.info(\"Creating script for submitting '%s' jobs to batch system\" % self.executable_comp_jetToTauFakeRate)\n self.createScript_sbatch(self.executable_comp_jetToTauFakeRate, self.sbatchFile_comp_jetToTauFakeRate, self.jobOptions_comp_jetToTauFakeRate)\n\n lines_makefile = []\n self.addToMakefile_analyze(lines_makefile)\n self.addToMakefile_hadd_stage1(lines_makefile)\n self.addToMakefile_hadd_stage2(lines_makefile, make_dependency = \"phony_hadd_stage1\", max_mem = '4096M')\n self.addToMakefile_comp_jetToTauFakeRate(lines_makefile)\n self.addToMakefile_comp_hadd(lines_makefile)\n self.addToMakefile_make_plots(lines_makefile)\n self.createMakefile(lines_makefile)\n\n logging.info(\"Done.\")\n\n return self.num_jobs", "def add_resource(client, api_id, parent_resource, sub_path):\n response = client.create_resource(\n restApiId=api_id,\n parentId=parent_resource['id'],\n pathPart=sub_path)\n file_name = \"{0}_resource.pickle\".format(sub_path)\n pickle_dictionary_to_file(response, file_name)", "def resource_map(self):", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n description: Optional[pulumi.Input[str]] = None,\n destination_region_id: Optional[pulumi.Input[str]] = None,\n destination_zone_id: Optional[pulumi.Input[str]] = None,\n group_name: Optional[pulumi.Input[str]] = None,\n rpo: Optional[pulumi.Input[int]] = None,\n source_region_id: Optional[pulumi.Input[str]] = None,\n source_zone_id: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def _create_flattened_csv_from_resources(mapping):\n data = [{\n \"resource_id\": resource[\"resource_id\"],\n \"resource_type\": resource[\"resource_type\"],\n \"resource_name\": resource.get(\"resource_name\", \"\"),\n \"sg_id\": security_group[\"sg_id\"],\n \"sg_name\": security_group[\"sg_name\"]}\n for resource in mapping\n for security_group in resource[\"sg_attached\"]]\n header = [\"resource_id\", \"resource_type\", \"resource_name\", \"sg_id\", \"sg_name\"]\n print _generate_csv(data, header)[:-1]", "def create_resource(self, **kwargs):\n results = self.api.action.resource_create(**kwargs)\n # TODO: use `results` rather than re-download, using an isolation layer to standardize the re-structure\n self.get_ckan_metadata(True)\n if 'id' in results:\n self._import_resource_to_cache(kwargs['upload'], results['id'])\n return results", "def create_label_map(app_path, file_pattern):\n resource_loader = ResourceLoader.create_resource_loader(app_path)\n query_tree = resource_loader.get_labeled_queries(label_set=file_pattern)\n return LabelMap(query_tree)", "def create_samfile(self):", "def test_create(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n dset = grp.require_dataset('foo', (10, 3), 'f')\n assert isinstance(dset, Dataset)\n assert dset.shape == (10, 3)\n\n dset2 = grp.require_dataset('bar', data=(3, 10))\n dset3 = grp.require_dataset('bar', data=(4, 11))\n assert isinstance(dset2, Dataset)\n assert np.all(dset2[:] == (3, 10))\n assert np.all(dset3[:] == (3, 10))\n assert dset2 == dset3", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n target: Optional[pulumi.Input[pulumi.InputType['TargetGroupAttachmentTargetArgs']]] = None,\n target_group_identifier: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def _make_group(self, _rk, _group_hint):\n\n if isinstance(_group_hint, dict):\n # _group_hint is a single key/value pair\n g = _group_hint[list(_group_hint)[0]]\n\n r_type = g.get(\"type\", \"none\")\n if r_type != \"OS::Nova::ServerGroup\":\n return \"support only ServerGroup resource\"\n\n properties = g.get(\"properties\", {})\n if len(properties) == 0:\n return \"no properties\"\n\n group_name = properties.get(\"name\", None)\n if group_name is None:\n return \"no group name\"\n group_name = group_name.strip()\n\n policies = properties.get(\"policies\", [])\n if len(policies) == 0:\n return \"no policy of the group\"\n\n if len(policies) > 1:\n return \"multiple policies\"\n\n # TODO: exclude soft-affinity and soft-anti-affinity?\n\n if group_name in self.groups.keys():\n group = self.groups[group_name]\n else:\n group = Group(group_name)\n\n policy = policies[0].strip()\n if policy == \"anti-affinity\":\n group_type = \"diversity\"\n else:\n group_type = policy\n\n group.group_type = group_type\n group.factory = \"server-group\"\n group.level = \"host\"\n\n self.groups[group_name] = group\n else:\n # group hint is uuid string.\n rg = self.resource.get_group_by_uuid(_group_hint)\n if rg is None:\n return \"unknown group found while making group\"\n\n # TODO: exclude soft-affinity and soft-anti-affinity?\n\n if rg.name in self.groups.keys():\n group = self.groups[rg.name]\n else:\n group = Group(rg.name)\n\n group.group_type = rg.group_type\n group.factory = rg.factory\n group.level = \"host\"\n\n self.groups[rg.name] = group\n\n if group is not None:\n group.server_list.append(self.app_name + \":\" + _rk)\n\n return \"ok\"", "def new_dataset(args):\n if not args.args:\n raise ParserError('you must specify an existing directory')\n outdir = Path(args.args.pop(0))\n if not outdir.exists():\n raise ParserError('you must specify an existing directory')\n\n id_pattern = re.compile('[a-z_0-9]+$')\n md = {}\n if args.args:\n md['id'] = args.args.pop(0)\n else:\n md['id'] = input('Dataset ID: ')\n\n while not id_pattern.match(md['id']):\n print('dataset id must only consist of lowercase ascii letters, digits and _ (underscore)!')\n md['id'] = input('Dataset ID: ')\n\n outdir = outdir / md['id']\n if not outdir.exists():\n outdir.mkdir()\n\n for key in ['title', 'url', 'license', 'conceptlist', 'citation']:\n md[key] = input('Dataset {0}: '.format(key))\n\n # check license!\n # check conceptlist!\n\n for path in Path(pylexibank.__file__).parent.joinpath('dataset_template').iterdir():\n if path.is_file():\n if path.suffix in ['.pyc']:\n continue # pragma: no cover\n target = path.name\n content = read_text(path)\n if '+' in path.name:\n target = re.sub(\n '\\+([a-z]+)\\+',\n lambda m: '{' + m.groups()[0] + '}',\n path.name\n ).format(**md)\n if target.endswith('_tmpl'):\n target = target[:-5]\n content = content.format(**md)\n write_text(outdir / target, content)\n else:\n target = outdir / path.name\n if target.exists():\n shutil.rmtree(str(target))\n shutil.copytree(str(path), str(target))\n del md['id']\n jsonlib.dump(md, outdir / 'metadata.json', indent=4)", "def _create_input_file_dict(self, job_data):\n\n files_dict = {}\n\n for input_name, file_ids in job_data.get_input_file_ids_by_input().items():\n file_list = []\n file_names = set()\n for file_id in file_ids:\n scale_file_model = self._input_files[file_id]\n input_file = InputFile(scale_file_model)\n # Check for file name collision and use Scale file ID to ensure names are unique\n file_name = scale_file_model.file_name\n\n if file_name in file_names:\n file_name = '%d.%s' % (scale_file_model.id, file_name)\n input_file.local_file_name = file_name\n file_names.add(file_name)\n file_list.append(input_file)\n files_dict[input_name] = file_list\n\n return files_dict", "def _build_pcollection(self, pipeline, folder, split):\n beam = tfds.core.lazy_imports.apache_beam\n\n split_type = self.builder_config.split_type\n filename = os.path.join(folder, \"{}.tar.gz\".format(split_type))\n\n def _extract_data(inputs):\n \"\"\"Extracts files from the tar archives.\"\"\"\n filename, split = inputs\n for name, fobj in tfds.download.iter_archive(\n filename, tfds.download.ExtractMethod.TAR_STREAM\n ):\n split_name = name.split(\"_\")\n if len(split_name) > 2 and split_name[2] == split:\n yield [name, fobj.read()]\n\n def _process_example(inputs):\n filename, data_string = inputs\n buf = six.BytesIO(data_string)\n buf.seek(0)\n data = np.load(buf)\n # Extract the images and convert to uint8. The reshape is required, see\n # https://github.com/deepmind/abstract-reasoning-matrices.\n all_images = np.uint8(data[\"image\"].reshape(16, 160, 160, 1))\n return filename, {\n \"relation_structure_encoded\": data[\"relation_structure_encoded\"],\n \"target\": data[\"target\"],\n \"meta_target\": data[\"meta_target\"],\n \"context\": all_images[:8],\n \"answers\": all_images[8:],\n \"filename\": filename,\n }\n\n # Beam might fuse together the _extract_data and _process_example which\n # defeats the purpose of parallel processing. As a result, we reshard by\n # doing a GroupByKey on random keys, and then flattening again.\n def _add_random_keys(inputs):\n key = str(random.randrange(10**10))\n return key, inputs\n\n def _remove_keys(inputs):\n _, rows = inputs\n for row in rows:\n yield row\n\n return (\n pipeline\n | beam.Create([(filename, split)])\n | beam.FlatMap(_extract_data)\n | beam.Map(_add_random_keys)\n | beam.GroupByKey()\n | beam.FlatMap(_remove_keys)\n | beam.Map(_process_example)\n )", "def create_participant_group_mapping(\n self,\n qualification_name: str,\n requester_id: str,\n prolific_project_id: str,\n prolific_participant_group_name: str,\n prolific_participant_group_id: str,\n ) -> None:\n try:\n with self.table_access_condition, self._get_connection() as conn:\n c = conn.cursor()\n c.execute(\n \"\"\"\n INSERT INTO participant_groups(\n qualification_name,\n requester_id,\n prolific_project_id,\n prolific_participant_group_name,\n prolific_participant_group_id\n ) VALUES (?, ?, ?, ?, ?);\n \"\"\",\n (\n qualification_name,\n requester_id,\n prolific_project_id,\n prolific_participant_group_name,\n prolific_participant_group_id,\n ),\n )\n return None\n\n except sqlite3.IntegrityError as e:\n if is_unique_failure(e):\n # Ignore attempt to add another mapping for an existing key\n db_qualification = self.get_qualification_mapping(qualification_name)\n\n logger.debug(\n f\"Multiple Prolific mapping creations \"\n f'for qualification \"{qualification_name}\". '\n f\"Found existing one: {db_qualification}. \"\n )\n assert (\n db_qualification is not None\n ), \"Cannot be none given is_unique_failure on insert\"\n\n db_requester_id = db_qualification[\"requester_id\"]\n db_prolific_qualification_name = db_qualification[\"prolific_participant_group_name\"]\n\n if db_requester_id != requester_id:\n logger.warning(\n f\"Prolific Qualification mapping create for {qualification_name} \"\n f\"under requester {requester_id}, already exists under {db_requester_id}.\"\n )\n\n if db_prolific_qualification_name != prolific_participant_group_name:\n logger.warning(\n f\"Prolific Qualification mapping create for {qualification_name} \"\n f\"with Prolific name {prolific_participant_group_name}, \"\n f\"already exists under {db_prolific_qualification_name}.\"\n )\n\n return None\n else:\n raise e", "def create_and_fill_bucket(self):\n EmrProcessing.bucket = \\\n self.s3_handle.create_bucket(EmrProcessing.bucket_name)\n key = EmrProcessing.bucket.new_key('input/test.csv')\n input_file_path = '../data/test.csv'\n key.set_contents_from_filename(input_file_path)\n key.set_acl('public-read')\n\n key = EmrProcessing.bucket.new_key('mapper/mapper.py')\n input_file_path = '../src/mapper/mapper.py'\n key.set_contents_from_filename(input_file_path)\n key.set_acl('public-read')", "def collect_resources(namespace, output_dir, api_resources, k8s_cli_input=\"\", selector=\"\"):\n set_file_logger(output_dir)\n k8s_cli = detect_k8s_cli(k8s_cli_input)\n ns_output_dir = os.path.join(output_dir, namespace)\n make_dir(ns_output_dir)\n collect_api_resources(namespace, ns_output_dir, k8s_cli, api_resources, selector)\n collect_api_resources_description(namespace, ns_output_dir, k8s_cli, api_resources, selector)\n collect_pods_logs(namespace, ns_output_dir, k8s_cli, logs_from_all_pods=True)", "def gen_group(group_name=None, group_vars={}):\n group = Group(name=group_name)\n for key, value in group_vars.iteritems():\n group.set_variable(key, value)\n return group", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n account_id: Optional[pulumi.Input[str]] = None,\n group: Optional[pulumi.Input[str]] = None,\n image_id: Optional[pulumi.Input[str]] = None,\n organization_arn: Optional[pulumi.Input[str]] = None,\n organizational_unit_arn: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "async def create(\n self,\n resource_group_name: str,\n project_name: str,\n group_name: str,\n group: Optional[\"models.Group\"] = None,\n **kwargs\n ) -> \"models.Group\":\n cls = kwargs.pop('cls', None) # type: ClsType[\"models.Group\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n api_version = \"2018-06-01\"\n content_type = kwargs.pop(\"content_type\", \"application/json\")\n accept = \"application/json\"\n\n # Construct URL\n url = self.create.metadata['url'] # type: ignore\n path_format_arguments = {\n 'subscriptionId': self._serialize.url(\"self._config.subscription_id\", self._config.subscription_id, 'str'),\n 'resourceGroupName': self._serialize.url(\"resource_group_name\", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\\w\\._\\(\\)]+$'),\n 'projectName': self._serialize.url(\"project_name\", project_name, 'str'),\n 'groupName': self._serialize.url(\"group_name\", group_name, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n query_parameters['api-version'] = self._serialize.query(\"api_version\", api_version, 'str')\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Content-Type'] = self._serialize.header(\"content_type\", content_type, 'str')\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n body_content_kwargs = {} # type: Dict[str, Any]\n if group is not None:\n body_content = self._serialize.body(group, 'Group')\n else:\n body_content = None\n body_content_kwargs['content'] = body_content\n request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)\n pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200, 201]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n raise HttpResponseError(response=response, error_format=ARMErrorFormat)\n\n response_headers = {}\n if response.status_code == 200:\n response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))\n deserialized = self._deserialize('Group', pipeline_response)\n\n if response.status_code == 201:\n response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))\n deserialized = self._deserialize('Group', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, response_headers)\n\n return deserialized", "def pre_process_resource(self):\n\n # get list of resource files\n files = [os.path.join(self.resource_folder, f) for f in os.listdir(self.resource_folder)]\n files = [f for f in files if os.path.isfile(f)]\n\n # raise error if no files found\n if not files:\n raise FileNotFoundError(\"No resource files found...\")\n\n # create preprocessed folder if it doesn't exist\n if not os.path.exists(self.pre_processed_folder):\n os.mkdir(self.pre_processed_folder)\n\n # record number of chunks from previous files\n chunk_hist = 0\n\n # open valid files\n for filename in files:\n\n self.file_size = os.path.getsize(filename)\n\n # check chunk size\n if self.file_size / self.chunk_size > 500:\n print(\"Warning, this will create {} partitions.\".format(int(self.file_size / self.chunk_size)))\n rmtree(self.pre_processed_folder)\n raise ValueError(\"This file will create more than 500 partitions, consider increasing the chunk size...\")\n\n # process\n pool = Pool(self.configs[\"n_processors\"])\n for _ in tqdm.tqdm(pool.imap(self.pre_process_chunk, self.generate_chunk(filename)), total=int(self.file_size / self.chunk_size)):\n pass", "def pre_project_create(self, resource_dict):\n pass", "def build(self):\n writer = None\n out_complete = ''\n\n reader = self.module_loader.get_reader()()\n reader.set_configs(self.configs)\n reader.verify_parameters()\n input_path = self.configs.get_resolved('parameters', 'input', 'path')\n output_path = self.configs.get_resolved('parameters', 'output', 'path')\n pathlib.Path(output_path).mkdir(parents=True, exist_ok=True)\n \n files = reader.fetch_input_files(input_path)\n\n for i, group in enumerate(files):\n\n obs = self.params('output_block_size') if self.configs.exists('parameters', 'output_block_size') else 1\n if obs is None:\n obs = len(files)\n\n first_of_batch = (i % obs == 0)\n\n if first_of_batch:\n output_name = reader.output_filename(group['id'])\n writer = self.module_loader.get_writer()(output_path, output_name)\n out_complete = writer.file_path()\n\n Logger.log('started_r_files', group['files'])\n\n with writer.appending(not first_of_batch) as dataset:\n Logger.log('writing_file', out_complete, '' if first_of_batch else '(appending)')\n \n self.read_attributes(dataset)\n self.read_variables(dataset)\n \n if reader.data_grouping:\n complete_path = tuple([path.join(input_path, f) for f in group['files']])\n else:\n complete_path = path.join(input_path, group['files'])\n reader.read_to(dataset, complete_path, self.configs, not first_of_batch)\n\n Logger.info('done')", "def create_inputs_recipe():\n module_name, _ = os.path.splitext(os.path.basename(__file__))\n path = os.path.join(CREATED_INPUTS_PATH_FOR_TESTS, module_name)\n os.makedirs(path, exist_ok=True)\n os.chdir(path)\n os.makedirs(\"inputs/\", exist_ok=True)\n print('Current working directory:\\n {:s}'.format(os.getcwd()))\n\n for filename, _ in input_pars:\n print('Downloading files...')\n basename = filename.split(\"_\")[0] + \".fits\"\n sci_path = download_from_archive(basename)\n sci_ad = astrodata.open(sci_path)\n data_label = sci_ad.data_label()\n\n print('Reducing pre-processed data:')\n logutils.config(file_name='log_{}.txt'.format(data_label))\n p = GNIRSLongslit([sci_ad])\n p.prepare(bad_wcs=\"fix\")\n p.addDQ()\n p.addVAR(read_noise=True)\n p.ADUToElectrons()\n p.addVAR(poisson_noise=True)\n # p.flatCorrect()\n p.makeIRAFCompatible()\n\n os.chdir(\"inputs/\")\n processed_ad = p.writeOutputs().pop()\n os.chdir(\"../\")\n print('Wrote pre-processed file to:\\n'\n ' {:s}'.format(processed_ad.filename))", "def group_by_filenames(self):\n package = self.container.config.output.package\n class_map = collections.group_by(self.container, key=get_location)\n groups = self.group_common_paths(class_map.keys())\n\n for keys in groups:\n if len(keys) == 1:\n common_path = os.path.dirname(keys[0])\n else:\n common_path = os.path.commonpath(keys)\n\n for key in keys:\n items = class_map[key]\n suffix = \".\".join(Path(key).parent.relative_to(common_path).parts)\n\n package_name = f\"{package}.{suffix}\" if suffix else package\n self.assign(items, package_name, module_name(key))", "def pre_namespace_create(self, resource_dict):\n pass", "def create_group(self, identifier: str, group_name: str) -> Group:\n\n # APM-137701 - Namespace for custom device calculation should not be set\n group_id = get_group_id(\"\", identifier)\n if group_id in self._groups:\n raise ValueError(\"Group \" + group_name + \" already exist, id: \" + str(group_id))\n else:\n group = Group(group_id, group_name, self._technologies, self._results_builder)\n\n self._groups[group_id] = group\n return group", "def make_grp(self, name='grp'):\n self.base[name] = self.get_group_array()", "def build_resource(self, *args, **kwargs):\r\n r = {}\r\n for current_resource in self.resources:\r\n item = self._get_resource(\r\n repo=self.current_repo, owner=self.owner, \r\n resource=current_resource, **kwargs\r\n )\r\n if not item: continue\r\n r[current_resource] = item\r\n\r\n return r", "def create_group(self, name) -> \"GroupBase\":\n ancestor, group_names, last_name = self._descend(name)\n parent = ancestor._require_descendant_groups(*group_names)\n if last_name in parent:\n raise FileExistsError(f\"Group or dataset found at '{name}'\")\n return parent._create_child_group(last_name)", "def __init__(__self__, *,\n group: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n namespace: Optional[pulumi.Input[str]] = None,\n resource: Optional[pulumi.Input[str]] = None,\n subresource: Optional[pulumi.Input[str]] = None,\n verb: Optional[pulumi.Input[str]] = None,\n version: Optional[pulumi.Input[str]] = None):\n if group is not None:\n pulumi.set(__self__, \"group\", group)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if namespace is not None:\n pulumi.set(__self__, \"namespace\", namespace)\n if resource is not None:\n pulumi.set(__self__, \"resource\", resource)\n if subresource is not None:\n pulumi.set(__self__, \"subresource\", subresource)\n if verb is not None:\n pulumi.set(__self__, \"verb\", verb)\n if version is not None:\n pulumi.set(__self__, \"version\", version)", "def resource_create(resource_id, resource_type, resource_options=None, cibfile=None):\n return item_create(\n item=\"resource\",\n item_id=resource_id,\n item_type=resource_type,\n extra_args=resource_options,\n cibfile=cibfile,\n )", "def __init__(__self__, *,\n group: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n namespace: Optional[pulumi.Input[str]] = None,\n resource: Optional[pulumi.Input[str]] = None,\n version: Optional[pulumi.Input[str]] = None):\n if group is not None:\n pulumi.set(__self__, \"group\", group)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if namespace is not None:\n pulumi.set(__self__, \"namespace\", namespace)\n if resource is not None:\n pulumi.set(__self__, \"resource\", resource)\n if version is not None:\n pulumi.set(__self__, \"version\", version)", "def create_new_group(self, a, b):\n self.groups[self.group_id] = set([a,b])\n self.node_id[a] = self.node_id[b] = self.group_id\n self.group_id += 1", "def get_new_config_group(self):\n filename = \"%(config_dir)s/%(group)s.%(time)s\" % \\\n { \"config_dir\": self.config_dir(),\n \"group\": self.group_name(),\n \"time\": common.time_suffix(),}\n common.write_file(\"w\", 0o644, filename, self.get_match_criteria())", "def _init_run_continual_record_grp(self, run_idx, run_record_key, fields):\n\n # create the group\n run_grp = self.run(run_idx)\n record_grp = run_grp.create_group(run_record_key)\n\n # for each field simply create the dataset\n for field_name, field_shape, field_dtype in fields:\n\n self._init_run_records_field(run_idx, run_record_key,\n field_name, field_shape, field_dtype)\n\n return record_grp", "def load_groups(files):\n groups = defaultdict(list)\n for f in files:\n d = np.load(f, allow_pickle=True)\n gkey = to_group_key(d['args'].item()._get_kwargs())\n groups[gkey].append((f, d))\n return groups", "def _build_elbv2_mapping_from_resources(resource_to_analyse, result_dict, session):\n for elb_instance, security_group_id, security_group_name in _generate_elb_instances_and_sg(resource_to_analyse, session):\n resource_dict = _check_if_in_list(result_dict, elb_instance[\"LoadBalancerName\"], \"resource_id\")\n if resource_dict is not None:\n resource_dict[\"sg_attached\"].append({\n \"sg_id\": security_group_id,\n \"sg_name\": security_group_name\n })\n else:\n result_dict.append({\n \"resource_id\": elb_instance[\"LoadBalancerName\"],\n \"resource_type\": \"elb\",\n \"sg_attached\": [{\n \"sg_id\": security_group_id,\n \"sg_name\": security_group_name\n }]\n })\n return result_dict", "def create_default_groups():\n from flaskbb.fixtures.groups import fixture\n result = []\n for key, value in fixture.items():\n group = Group(name=key)\n\n for k, v in value.items():\n setattr(group, k, v)\n\n group.save()\n result.append(group)\n return result", "def GenerateResourceScript(input_filename, output_filename, payload_filename,\n manifest_filename, resource_filename):\n f_int = open(input_filename, 'r')\n f_out = open(output_filename, 'w')\n\n for line in f_int.readlines():\n f_out.write(re.sub(r'__RESOURCE_FILENAME__', resource_filename,\n re.sub(r'__MANIFEST_FILENAME__', manifest_filename,\n re.sub(r'__PAYLOAD_FILENAME__', payload_filename, line))))", "def _generate_inventory(self, datapath):\n \n files = [file for file in listdir(datapath) if '.nc' in file and not 'xyz' in file]\n # file_prefixes = list(set([ file.split('_')[0] for file in files ]))\n # file_prefixes = list(set([ \"_\".join(file.split('_')[0:2]) for file in files ]))\n if self.extra_pref:\n file_prefixes = list(set([ \"_\".join(file.split('_')[0:2] + [self.extra_pref]) for file in files ]))\n else:\n file_prefixes = list(set([ \"_\".join(file.split('_')[0:2]) for file in files ]))\n \n inventory = {}\n for file_prefix in file_prefixes:\n fname = path.join(datapath,f'{file_prefix}{self.first_suffix}')\n if not self.metafile:\n self.metafile = fname\n vars = [ var for var in list(Dataset(fname).variables) if var not in self.skip_vars ]\n for var in vars:\n inventory[var] = {'files': sorted([path.join(datapath,file) \n for file in listdir(datapath) if file_prefix in file])}\n return inventory", "def _build_resources_template(self, output_filename=\"{}_r.json\"):\n\n template = self._base_troposphere_template()\n\n for resource_type, resource_cls in six.iteritems(AVAILABLE_RESOURCES):\n resource_cls.register_type_resources_template(self, template)\n for r in self.get_resources(resource_type):\n r.register_resources_template(template)\n\n template = utils.fix_troposphere_references(template)\n\n if template and template.resources:\n output_filename = output_filename.format(self._get_next_build_sequence_id())\n self.puts(colored.cyan(output_filename))\n with open(os.path.join(self.build_path, output_filename), 'w') as f:\n f.write(template.to_json())", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n description: Optional[pulumi.Input[str]] = None,\n display_name: Optional[pulumi.Input[str]] = None,\n entry_group_id: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n project: Optional[pulumi.Input[str]] = None,\n region: Optional[pulumi.Input[str]] = None) -> 'EntryGroup':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = dict()\n\n __props__[\"description\"] = description\n __props__[\"display_name\"] = display_name\n __props__[\"entry_group_id\"] = entry_group_id\n __props__[\"name\"] = name\n __props__[\"project\"] = project\n __props__[\"region\"] = region\n return EntryGroup(resource_name, opts=opts, __props__=__props__)", "def test_create_existing(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n\n dset = grp.require_dataset('foo', (10, 3), 'float32')\n dset2 = grp.require_dataset('foo', (10, 3), 'float32')\n\n assert dset == dset2", "def test_create_extended(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n dset = grp.create_dataset('foo', (63,))\n assert dset.shape == (63,)\n assert dset.size == 63\n\n dset = f.create_dataset('bar', (6, 10))\n assert dset.shape == (6, 10)\n assert dset.size == (60)", "def resource_mapping():\n return {\n 'OS::Heat::ResourceChain': ResourceChain,\n }", "def set_read_group( in_bam_path, out_bam_path, id:str, pl:str, lb:str, sm:str, pu:str, threads=4 ):\n\n \"\"\"\n read_groups(set/dict) : set or dictionary which contains read groups. The dictionary should have the format { read_group_id (str)\n { 'ID': ID, 'LB':library,\n 'PL':platform,\n 'SM':sampleLib,\n 'PU':readGroup }\n \"\"\"\n\n read_groups = {id:{ 'ID': id, 'LB':lb,\n 'PL':pl,\n 'SM':sm,\n 'PU':pu }}\n\n with pysam.AlignmentFile(in_bam_path, threads = threads) as input_bam:\n\n input_header = input_bam.header.as_dict()\n\n # Write provenance information to BAM header\n write_program_tag(\n input_header,\n program_name='bamReadGroupFormat',\n command_line=\" \".join(\n sys.argv),\n version=singlecellmultiomics.__version__,\n description=f'SingleCellMultiOmics read group formatting, executed at {datetime.now().strftime(\"%d/%m/%Y %H:%M:%S\")}')\n\n with sorted_bam_file(out_bam_path, header=input_header, read_groups=read_groups, input_is_sorted=True) as out:\n print('Started writing')\n for read in input_bam:\n rg_id = id\n read.set_tag('RG',rg_id)\n out.write(read)", "def create_bag(resource):\n dest_prefix = getattr(settings, 'BAGIT_TEMP_LOCATION', '/tmp/hydroshare/')\n bagit_path = os.path.join(dest_prefix, resource.short_id, arrow.get(resource.updated).format(\"YYYY.MM.DD.HH.mm.ss\"))\n visualization_path = os.path.join(bagit_path, 'visualization')\n contents_path = os.path.join(bagit_path, 'contents')\n\n for d in (dest_prefix, bagit_path, visualization_path, contents_path):\n try:\n os.makedirs(d)\n except:\n shutil.rmtree(d)\n os.makedirs(d)\n\n for f in resource.files.all():\n shutil.copy2(f.resource_file.path, contents_path)\n\n with open(bagit_path + '/resourcemetadata.json', 'w') as out:\n tastypie_module = resource._meta.app_label + '.api' # the module name should follow this convention\n tastypie_name = resource._meta.object_name + 'Resource' # the classname of the Resource seralizer\n tastypie_api = importlib.import_module(tastypie_module) # import the module\n serializer = getattr(tastypie_api, tastypie_name)() # make an instance of the tastypie resource\n bundle = serializer.build_bundle(obj=resource) # build a serializable bundle out of the resource\n out.write(serializer.serialize(None, serializer.full_dehydrate(bundle), 'application/json'))\n\n bagit.make_bag(bagit_path, checksum=['md5'], bag_info={\n 'title': resource.title,\n 'author': resource.owners.all()[0].username,\n 'author_email': resource.owners.all()[0].email,\n 'version': arrow.get(resource.updated).format(\"YYYY.MM.DD.HH.mm.ss\"),\n 'resource_type': '.'.join((resource._meta.app_label, resource._meta.object_name)),\n 'hydroshare_version': getattr(settings, 'HYDROSHARE_VERSION', \"R1 development\"),\n 'shortkey': resource.short_id,\n 'slug': resource.slug\n })\n\n zf = os.path.join(dest_prefix, resource.short_id) + \".zip\"\n make_zipfile(output_filename=zf, source_dir=bagit_path)\n b = Bags.objects.create(\n content_object=resource,\n bag=File(open(zf)),\n timestamp=resource.updated\n )\n\n os.unlink(zf)\n shutil.rmtree(bagit_path)\n\n return b", "def construct_dicts(self, path):\n module_dicts = self.read_dict(path, use_superpkg=True)\n\n id_dict = dict()\n name_dict = dict()\n\n for cmd_dict in module_dicts:\n # Create a cmd template object\n cmd_temp = cmd_template.CmdTemplate(\n cmd_dict[self.OP_CODE_FIELD],\n cmd_dict[self.MNEMONIC_FIELD],\n cmd_dict[self.COMPONENT_FIELD],\n cmd_dict[self.ARGS_FIELD],\n cmd_dict[self.DESC_FIELD],\n )\n\n id_dict[cmd_dict[self.OP_CODE_FIELD]] = cmd_temp\n name_dict[cmd_dict[self.MNEMONIC_FIELD]] = cmd_temp\n\n return (id_dict, name_dict)", "def _build_pre_resources_template(self, output_filename=\"{}_pr_r.json\"):\n template = actions.ActionsTemplate()\n\n for resource_type, resource_cls in six.iteritems(AVAILABLE_RESOURCES):\n resource_cls.register_type_pre_resources_template(self, template)\n for r in self.get_resources(resource_type):\n r.register_pre_resources_template(template)\n\n if template:\n output_filename = output_filename.format(self._get_next_build_sequence_id())\n self.puts(colored.cyan(output_filename))\n with open(os.path.join(self.build_path, output_filename), 'w') as f:\n f.write(template.to_json(indent=4))", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n description: Optional[pulumi.Input[str]] = None,\n etag: Optional[pulumi.Input[str]] = None,\n file_shares: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FileShareConfigArgs']]]]] = None,\n instance_id: Optional[pulumi.Input[str]] = None,\n kms_key_name: Optional[pulumi.Input[str]] = None,\n labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n location: Optional[pulumi.Input[str]] = None,\n networks: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['NetworkConfigArgs']]]]] = None,\n project: Optional[pulumi.Input[str]] = None,\n tier: Optional[pulumi.Input['InstanceTier']] = None,\n __props__=None):\n ...", "def _create_resource_set_file(self, slots, uid, sandbox):\n\n # if `cpu_index_using: physical` is set to run at Lassen@LLNL,\n # then it returns an error \"error in ptssup_mkcltsock_afunix()\"\n if slots['nodes'][0]['name'].lower().startswith('lassen'):\n rs_str = ''\n else:\n rs_str = 'cpu_index_using: physical\\n'\n rank = 0\n for node in slots['nodes']:\n\n gpu_maps = list(node['gpu_map'])\n for map_set in node['core_map']:\n cores = ','.join(str(core) for core in map_set)\n rs_str += 'rank: %d: {' % rank\n rs_str += ' host: %s;' % str(node['uid'])\n rs_str += ' cpu: {%s}' % cores\n if gpu_maps:\n gpus = ','.join(str(gpu) for gpu in gpu_maps.pop(0))\n rs_str += '; gpu: {%s}' % gpus\n rs_str += '}\\n'\n rank += 1\n\n rs_name = '%s/%s.rs' % (sandbox, uid)\n with open(rs_name, 'w') as fout:\n fout.write(rs_str)\n\n return rs_name", "def make(input_filepath, output_filepath) -> None:\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')", "def _create_resource_provider(context, uuid, name,\n parent_provider_uuid=None):\n return {\n 'uuid': uuid,\n 'name': name,\n 'generation': 0,\n 'parent_provider_uuid': parent_provider_uuid\n }", "def create_groups(**kwargs):\n for gname in SEC_GROUP_NAMES.itervalues():\n Group.objects.get_or_create(name=gname)", "def generate_config(context):\n\n resources = []\n properties = context.properties\n project_id = properties.get('project', context.env['project'])\n name = properties.get('name', context.env['name'])\n\n resource = {\n 'name': context.env['name'],\n # https://cloud.google.com/filestore/docs/reference/rest/v1beta1/projects.locations.instances/create\n 'type': 'gcp-types/file-v1beta1:projects.locations.instances',\n 'properties': {\n 'parent': 'projects/{}/locations/{}'.format(project_id, properties['location']),\n 'instanceId': name,\n }\n }\n\n optional_props = [\n 'description',\n 'tier',\n 'labels',\n 'fileShares',\n 'networks',\n ]\n\n for prop in optional_props:\n if prop in properties:\n resource['properties'][prop] = properties[prop]\n\n resources.append(resource)\n\n return {\n 'resources':\n resources,\n 'outputs':\n [\n {\n 'name': 'name',\n 'value': name\n },\n {\n 'name': 'fileShares',\n 'value': '$(ref.{}.fileShares)'.format(context.env['name'])\n },\n {\n 'name': 'networks',\n 'value': '$(ref.{}.networks)'.format(context.env['name'])\n }\n ]\n }", "def _create_child_group(self, name) -> \"GroupBase\":\n pass", "def load_resources(resource_filename):" ]
[ "0.58438635", "0.5761084", "0.55505747", "0.5540623", "0.5514604", "0.55128634", "0.5483182", "0.54395956", "0.5409617", "0.5397805", "0.5392991", "0.53768927", "0.5376017", "0.5359928", "0.5356735", "0.5327738", "0.52933335", "0.52924156", "0.5286034", "0.52811337", "0.52422285", "0.5239233", "0.5233111", "0.52098596", "0.520034", "0.5199278", "0.5168087", "0.5167269", "0.51671344", "0.51649696", "0.51642823", "0.51049656", "0.5095727", "0.5089666", "0.50896126", "0.5082233", "0.5064525", "0.5063923", "0.50531816", "0.5032474", "0.50319237", "0.50313264", "0.5031313", "0.50280386", "0.5021194", "0.50112385", "0.5010435", "0.5003017", "0.49933437", "0.49872774", "0.49864683", "0.4972209", "0.496704", "0.4944811", "0.49375647", "0.49325803", "0.49318364", "0.49088207", "0.49060428", "0.49013352", "0.48999092", "0.48989815", "0.48870656", "0.48839423", "0.48819754", "0.48807323", "0.48807132", "0.4877739", "0.4876369", "0.4870625", "0.48680592", "0.48651502", "0.48512903", "0.484722", "0.4845119", "0.4844688", "0.484311", "0.4842499", "0.4836301", "0.48302463", "0.48186374", "0.48159504", "0.4812476", "0.48100722", "0.48096475", "0.4801761", "0.47988176", "0.47965613", "0.47912207", "0.47851652", "0.4768947", "0.47648752", "0.47634953", "0.47634065", "0.47550493", "0.47530818", "0.4750253", "0.47476864", "0.4745372", "0.47433394" ]
0.6814466
0
Write resource file or resource file group to an output destination. Examples
def write_output(self, resource: _resource.Resource, dest: str): if not isinstance(resource, _resource.Resource): raise BatchException(f"'write_output' only accepts Resource inputs. Found '{type(resource)}'.") if (isinstance(resource, _resource.JobResourceFile) and isinstance(resource._source, job.BashJob) and resource not in resource._source._mentioned): name = resource._source._resources_inverse[resource] raise BatchException(f"undefined resource '{name}'\n" f"Hint: resources must be defined within the " f"job methods 'command' or 'declare_resource_group'") if (isinstance(resource, _resource.PythonResult) and isinstance(resource._source, job.PythonJob) and resource not in resource._source._mentioned): name = resource._source._resources_inverse[resource] raise BatchException(f"undefined resource '{name}'\n" f"Hint: resources must be bound as a result " f"using the PythonJob 'call' method") if isinstance(self._backend, _backend.LocalBackend): dest_scheme = url_scheme(dest) if dest_scheme == '': dest = os.path.abspath(os.path.expanduser(dest)) resource._add_output_path(dest)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write(self, output, resources, **kw):\n\n raise NotImplementedError()", "def write_resources(self, resources):\n for filename, data in list(resources.get('outputs', {}).items()):\n # Determine where to write the file to\n dest = os.path.join(self.output_dir, filename)\n path = os.path.dirname(dest)\n if path and not os.path.isdir(path):\n os.makedirs(path)\n\n # Write file\n with open(dest, 'wb') as f:\n f.write(data)", "def WriteDataPack(resources, output_file, encoding):\n content = WriteDataPackToString(resources, encoding)\n with open(output_file, \"wb\") as file:\n file.write(content)", "def WriteDataPack(resources, output_file, encoding):\n content = WriteDataPackToString(resources, encoding)\n with open(output_file, 'wb') as file:\n file.write(content)", "def WriteToFile(output_file, security_policy, file_format):\n resource_printer.Print(\n security_policy, print_format=file_format, out=output_file)", "def WriteToFile(output_file, security_policy, file_format):\n resource_printer.Print(\n security_policy, print_format=file_format, out=output_file)", "def testWriteResourceFiles(self):\n resource_files = ['test.rc']\n\n file_writer = writers.VS2010ProjectFileWriter()\n\n file_writer._file = io.BytesIO()\n\n file_writer._WriteResourceFiles(resource_files)\n\n file_writer._file.seek(0, os.SEEK_SET)\n output_data = file_writer._file.read()\n\n expected_output_data = (\n b' <ItemGroup>\\r\\n'\n b' <ResourceCompile Include=\"test.rc\" />\\r\\n'\n b' </ItemGroup>\\r\\n')\n self.assertEqual(output_data, expected_output_data)", "def write(name, keyword, domain, citation, author, description, species, version, contact, license, values, output):\n write_namespace(\n name, keyword, domain, author, citation, values,\n namespace_description=description,\n namespace_species=species,\n namespace_version=version,\n author_contact=contact,\n author_copyright=license,\n file=output,\n )", "def write(self, destination):\n if not self._errors and not self._warnings:\n return\n destination.write('%s :\\n' % self._asset_name)\n for error in self._errors:\n destination.write(' * ERROR : %s\\n' % error)\n for warning in self._warnings:\n destination.write(' * WARNING : %s\\n' % warning)\n\n destination.write('\\n')", "def writeAPI(res_api, dst_file):\n with open(dst_file, 'w') as api_dst:\n api_dst.write(res_api)", "def write(name, keyword, domain, citation, author, description, species, version, contact, license, values,\n functions, output, value_prefix):\n write_namespace(\n name, keyword, domain, author, citation, values,\n namespace_description=description,\n namespace_species=species,\n namespace_version=version,\n author_contact=contact,\n author_copyright=license,\n functions=functions,\n file=output,\n value_prefix=value_prefix\n )", "def collect_helper(output_dir, cmd, file_name, resource_name, namespace=None):\n return_code, out = run_shell_command(cmd)\n if return_code:\n logger.warning(\"Error when running %s: %s\", cmd, out)\n return\n path = os.path.join(output_dir, file_name)\n with open(path, \"w+\", encoding='UTF-8') as file_handle:\n file_handle.write(out)\n logger.info(\"Namespace '%s': Collected %s\", namespace, resource_name)", "def testWriteResourceFiles(self):\n resource_files = ['test.rc']\n\n file_writer = writers.VS2008ProjectFileWriter()\n\n file_writer._file = io.BytesIO()\n\n file_writer._WriteResourceFiles(resource_files)\n\n file_writer._file.seek(0, os.SEEK_SET)\n output_data = file_writer._file.read()\n\n expected_output_data = (\n b'\\t\\t<Filter\\r\\n'\n b'\\t\\t\\tName=\"Resource Files\"\\r\\n'\n b'\\t\\t\\tFilter=\"rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;'\n b'resx;tiff;tif;png;wav\"\\r\\n'\n b'\\t\\t\\tUniqueIdentifier=\"{67DA6AB6-F800-4c08-8B7A-83BB121AAD01}\"\\r\\n'\n b'\\t\\t\\t>\\r\\n'\n b'\\t\\t\\t<File\\r\\n'\n b'\\t\\t\\t\\tRelativePath=\"test.rc\"\\r\\n'\n b'\\t\\t\\t\\t>\\r\\n'\n b'\\t\\t\\t</File>\\r\\n'\n b'\\t\\t</Filter>\\r\\n')\n self.assertEqual(output_data, expected_output_data)", "def writeToFile(self, targetFolder):\r\n pass", "def testWriteFiles(self):\n header_files = ['test.h']\n resource_files = ['test.rc']\n source_files = ['test.c']\n\n file_writer = writers.VS2010ProjectFileWriter()\n\n file_writer._file = io.BytesIO()\n\n file_writer.WriteFiles(source_files, header_files, resource_files)\n\n file_writer._file.seek(0, os.SEEK_SET)\n output_data = file_writer._file.read()\n\n self.assertTrue(output_data.startswith(b' <ItemGroup>\\r\\n'))\n self.assertTrue(output_data.endswith(b' </ItemGroup>\\r\\n'))", "def write(self, outfile, rebasings=None):\r\n raise NotImplementedError()", "def write(task_spec: TaskSpec, destination_dir: Path, force: bool = False):\n\n file_path: Path = destination_dir / task_spec.filename\n file_path.touch(exist_ok=force)\n\n writable_task_spec: Dict = clean(task_spec)\n\n yaml.dump(writable_task_spec, file_path)", "def write_to_file(inventory):\n env = Environment(loader=FileSystemLoader('templates'), trim_blocks=True)\n output_template = env.get_template('output.j2')\n # create and clean an 'outputs' folder\n path = \"./outputs\"\n try:\n shutil.rmtree(path, ignore_errors = True, onerror = None)\n except:\n print('Error while deleting directory')\n os.mkdir(path)\n os.chdir(path)\n for node, node_data in inventory.items():\n if 'outputs' in node_data:\n os.mkdir(node)\n for command, output in node_data['outputs'].items():\n # when creating filenames based on command, swap 'spaces' with 'underscores':\n command = re.sub(r\"\\s\", r\"_\", command)\n open(f\"{node}/{command}.txt\", 'a').write(\n output_template.render(node=node, data=output))\n print(\"\\n\" + f\"Job complete. If data gathering was successful, see 'outputs' directory.\")\n return inventory", "def write_to_files(section, csv_path, srt_path):\n write_to_csv(section, csv_path)\n write_to_srt(section, srt_path)", "def write(self, data, dst, label=None, mode='wb'):\n\n self._tag(dst, label)\n self._mkdir_for(dst)\n with open(os.path.join(self.chroot, dst), mode) as wp:\n wp.write(data)", "def write(self, data, dst, label=None, mode='wb'):\r\n\r\n self._tag(dst, label)\r\n self._mkdir_for(dst)\r\n with open(os.path.join(self.chroot, dst), mode) as wp:\r\n wp.write(data)", "def write(self, data, dst, label=None, mode='wb'):\r\n\r\n self._tag(dst, label)\r\n self._mkdir_for(dst)\r\n with open(os.path.join(self.chroot, dst), mode) as wp:\r\n wp.write(data)", "def _write_file(template, localcontext, output_path, name):\n output = template.render(localcontext)\n filename = os.sep.join((output_path, name))\n try:\n os.makedirs(os.path.dirname(filename))\n except Exception:\n pass\n with open(filename, 'w', encoding='utf-8') as f:\n f.write(output)\n print u' [ok] writing %s' % filename", "def write_output(content, dir_to_file):\n\n if not dir_to_file:\n dir_to_file = '{0}output-{1}'.format(dir_to_file, uuid.uuid4())\n\n f = open(dir_to_file, 'a')\n f.write(content)\n f.close()\n\n log.info('function: {} dir_to_file: {}'.format('write_output', dir_to_file))\n\n return dir_to_file", "async def dump(self, resource: ResourceType, location: PhysicalResourceLocation):\n try:\n # Serialize the resource.\n raw = self.serializer(resource)\n # Make sure the target directory exists.\n location.path.parent.mkdir(parents=True, exist_ok=True)\n # Dump the raw data to file.\n await self._dump_raw(raw, location)\n except ResourceDumperError:\n raise\n except Exception as ex:\n raise FailedToDumpResourceError(location.path) from ex", "def write(self, iface, dest_folder, feedback=None):\n return WriterResult()", "def write_file(rel_path, text, *args, **kwargs):\n path = os.path.join(os.path.dirname(__file__), \"resources\", rel_path)\n with open(path, 'w+', *args, **kwargs) as _file:\n _file.write(text)", "def write(self):\n self.output_directory.mkdir(parents=True, exist_ok=True)\n parameter_set_files = [pathlib.Path(set_name) for set_name in\n self.parameter_study.coords[_set_coordinate_key].values]\n if self.write_meta and self.provided_output_file_template:\n self._write_meta(parameter_set_files)\n if self.output_file_type == 'h5':\n self._write_dataset()\n elif self.output_file_type == 'yaml':\n self._write_yaml(parameter_set_files)\n else:\n raise ValueError(f\"Unsupported output file type '{self.output_file_type}'\")", "def _write_output_file(output: str, file_name: str):\n\tfile1 = open(file_name, 'w')\n\tfile1.write(output)\n\tfile1.close()", "def collect_resources(namespace, output_dir, api_resources, k8s_cli_input=\"\", selector=\"\"):\n set_file_logger(output_dir)\n k8s_cli = detect_k8s_cli(k8s_cli_input)\n ns_output_dir = os.path.join(output_dir, namespace)\n make_dir(ns_output_dir)\n collect_api_resources(namespace, ns_output_dir, k8s_cli, api_resources, selector)\n collect_api_resources_description(namespace, ns_output_dir, k8s_cli, api_resources, selector)\n collect_pods_logs(namespace, ns_output_dir, k8s_cli, logs_from_all_pods=True)", "def writeOutput(self, output):", "def write(self, fname, group=None, write_mode='w'):\n with h5py.File(fname, write_mode) as f:\n # write to group if group is given\n if group is not None:\n fobj = f.create_group(group)\n else:\n fobj = f\n for chan, ts in zip(self.channels, self.data):\n dset = fobj.create_dataset(chan, data=ts, compression='gzip')\n dset.attrs['sample_rate'] = self.fs\n dset.attrs['t0'] = self.t0\n dset.attrs['channel'] = str(chan)\n dset.attrs['name'] = str(chan)", "def MakeResource(resource_list, output_list=None):\n content = {'resources': resource_list}\n if output_list:\n content['outputs'] = output_list\n return yaml.dump(content)", "def write(filename, output, file_format=None, **kwargs):\n if not isinstance(filename, str):\n raise TypeError()\n\n output = [output] if isinstance(output, Output) else output\n if not (\n isinstance(output, (list, tuple))\n and all(isinstance(out, Output) for out in output)\n ):\n raise TypeError()\n\n fmt = (\n file_format\n if file_format\n else filetype_from_filename(filename, _extension_to_filetype)\n )\n\n return _writer_map[fmt](filename, output, **kwargs)", "def _export_resources(self, use_raster_region=False):\n for resource in self.resource_export_list:\n\n # print(\"Check for termination %i\"%self.resource_logger.get_termination(self.user_id, self.resource_id))\n\n # Check for termination requests between the exports\n if bool(self.resource_logger.get_termination(self.user_id, self.resource_id)) is True:\n raise AsyncProcessTermination(\"Resource export was terminated by user request\")\n\n # Raster export\n if resource[\"export\"][\"type\"] in [\"raster\", \"vector\", \"file\"]:\n\n output_type = resource[\"export\"][\"type\"]\n output_path = None\n\n # Legacy code\n if \"name\" in resource:\n file_name = resource[\"name\"]\n if \"value\" in resource:\n file_name = resource[\"value\"]\n\n if output_type == \"raster\":\n message = \"Export raster layer <%s> with format %s\"%(file_name, resource[\"export\"][\"format\"])\n self._send_resource_update(message)\n output_name, output_path = self._export_raster(raster_name=file_name,\n format=resource[\"export\"][\"format\"],\n use_raster_region=use_raster_region)\n elif output_type == \"vector\":\n if \"PostgreSQL\" in resource[\"export\"][\"format\"]:\n dbstring = resource[\"export\"][\"dbstring\"]\n output_layer = None\n if \"output_layer\" in resource[\"export\"]:\n output_layer = resource[\"export\"][\"output_layer\"]\n\n message = \"Export vector layer <%s> to PostgreSQL database\"%(file_name)\n self._send_resource_update(message)\n self._export_postgis(vector_name=file_name, dbstring=dbstring, output_layer=output_layer)\n # continue\n else:\n message = \"Export vector layer <%s> with format %s\"%(file_name, resource[\"export\"][\"format\"])\n self._send_resource_update(message)\n output_name, output_path = self._export_vector(vector_name=file_name,\n format=resource[\"export\"][\"format\"])\n elif output_type == \"file\":\n file_name = resource[\"file_name\"]\n tmp_file = resource[\"tmp_file\"]\n output_name, output_path = self._export_file(tmp_file=tmp_file, file_name=file_name)\n else:\n raise AsyncProcessTermination(\"Unknown export format %s\"%output_type)\n\n message = \"Moving generated resources to final destination\"\n self._send_resource_update(message)\n\n # Store the temporary file in the resource storage\n # and receive the resource URL\n if output_path is not None:\n resource_url = self.storage_interface.store_resource(output_path)\n self.resource_url_list.append(resource_url)", "def write_resource(config: Config) -> Config:\n destination_file = os.path.join(\n config.destination,\n \"res/description\",\n \"{}.res\".format(config.name)\n )\n\n contents = render_resource(reduce_resource(config.root))\n\n contents = \"\\n\".join([COMMENT_C + PREFIX, contents])\n\n assert_directories(destination_file, True)\n\n with open(destination_file, \"w\") as f:\n f.write(contents)\n\n return config", "def write_output_shifts_to_file(self, shift_output):\n pass", "def write_actual_output(self, output):\n actual_output_file = path.splitext(self.source_name)[0] + \".actual\"\n with open(actual_output_file, \"w\") as f:\n f.write(output)", "def make_output(args, stdout=sys.stdout):\n if args.outfile:\n return FileOutput(args.outfile)\n else:\n return StreamOutput(stdout)", "def write_file(*args, **kwargs): # real signature unknown\n pass", "def GenerateResourceScript(input_filename, output_filename, payload_filename,\n manifest_filename, resource_filename):\n f_int = open(input_filename, 'r')\n f_out = open(output_filename, 'w')\n\n for line in f_int.readlines():\n f_out.write(re.sub(r'__RESOURCE_FILENAME__', resource_filename,\n re.sub(r'__MANIFEST_FILENAME__', manifest_filename,\n re.sub(r'__PAYLOAD_FILENAME__', payload_filename, line))))", "def _out(self, *args):\n suffix = '_'.join(map(str, args))\n return os.path.join(self._out_folder, suffix )", "def write(self, outputfile):\n outfile = open(outputfile, 'w')\n if (outputfile.lower().endswith('.po')):\n self.write_po(outfile)\n elif (outputfile.lower().endswith('.json')):\n self.write_json(outfile)\n elif (outputfile.lower().endswith('.xml')):\n self.write_properties(outfile)\n outfile.close()", "def _write_dataset(self):\n if self.output_file:\n if self.dryrun:\n sys.stdout.write(f\"{self.output_file.resolve()}\\n{self.parameter_study}\\n\")\n else:\n self.output_file.parent.mkdir(parents=True, exist_ok=True)\n self._conditionally_write_dataset(self.output_file, self.parameter_study)\n else:\n for parameter_set_file, parameter_set in self.parameter_study.groupby(_set_coordinate_key):\n parameter_set_file = pathlib.Path(parameter_set_file)\n # If no output file template is provided, print to stdout\n if not self.provided_output_file_template:\n sys.stdout.write(f\"{parameter_set_file.name}\\n{parameter_set}\")\n sys.stdout.write(\"\\n\")\n # If overwrite is specified or if file doesn't exist\n elif self.overwrite or not parameter_set_file.is_file():\n # If dry run is specified, print the files that would have been written to stdout\n if self.dryrun:\n sys.stdout.write(f\"{parameter_set_file.resolve()}:\\n{parameter_set}\")\n sys.stdout.write(\"\\n\")\n else:\n self._conditionally_write_dataset(parameter_set_file, parameter_set)", "def writetif(self,outputname,):\n pass", "def _write_file(template, localcontext, output_path, name, override):\n # set localsiteurl for context so that Contents can adjust links\n if localcontext[\"localsiteurl\"]:\n context[\"localsiteurl\"] = localcontext[\"localsiteurl\"]\n output = template.render(localcontext)\n path = sanitised_join(output_path, name)\n\n try:\n os.makedirs(os.path.dirname(path))\n except Exception:\n pass\n\n with self._open_w(path, \"utf-8\", override=override) as f:\n f.write(output)\n logger.info(\"Writing %s\", path)\n\n # Send a signal to say we're writing a file with some specific\n # local context.\n signals.content_written.send(path, context=localcontext)", "def file_write(self, name: str, output: str) -> None:\n\n self._event_loop.call_soon_threadsafe(\n tpartial(self._file_write_in_thread, name, output)\n )", "def write(self, file_obj, file_format):\n if ( file_format.upper() == 'FASTA' ):\n write_func = write_fasta\n #elif ( file_format.upper() == 'NEXUS' ):\n # write_func = write_nexus\n #elif ( file_format.upper() == 'PHYLIP' ):\n # write_func = write_phylip\n #elif ( file_format.upper() == 'COMPACT' ):\n # write_func = write_compact \n #elif ( file_format.upper() == 'COMPACT2' ):\n # write_func = write_compact2 \n #elif ( file_format.upper() == 'COMPACT3' ):\n # write_func = write_compact3\n else:\n write_func = write_fasta\n write_func(self, file_obj)", "def to_output_file(self, content):\n self.__log(f'Starting to write response content to output file.')\n if self.output_file_exists() and not self.config['FORCE_OVERWRITE']:\n self.__log(f'Cannot write to file. Selected output file exists and FORCE_OVERWRITE is disabled.', 'error')\n raise FileExistsError\n file = self.config['OUT_FOLDER'] + '/' + self.config['OUTPUT_FOLDER'] + '/' + self.output_filename + '.' \\\n + self.options['image_format'].lower()\n with open(file, 'w') as f:\n f.writelines(content)\n self.__log(f'Successfully wrote response content to \"{file}\".', 'success')", "def writegrp(self, grpoutfile=False):\n snapshot = self[1].ancestor\n try:\n snapshot['grp']\n except:\n self.make_grp()\n if not grpoutfile:\n grpoutfile = snapshot.filename + '.grp'\n logger.info(\"Writing grp file to %s\" % grpoutfile)\n fpout = open(grpoutfile, \"w\")\n print >> fpout, len(snapshot['grp'])\n\n # writing 1st to a string sacrifices memory for speed.\n # but this is much faster than numpy.savetxt (could make an option).\n # it is assumed that max halo id <= nhalos (i.e.length of string is set\n # len(str(nhalos))\n stringarray = snapshot['grp'].astype(\n '|S' + str(len(str(self._nhalos))))\n outstring = \"\\n\".join(stringarray)\n print >> fpout, outstring\n fpout.close()", "def write_to_file(output, test_case_name, path):\n path_to_store = OutputWrite.make_test_dir(path, test_case_name)\n time_stamp = OutputWrite.get_time_stamp()\n try:\n LOG.debug('Changing the dir to {0}'.format(path_to_store))\n os.chdir(path_to_store)\n except Exception as _ex_:\n LOG.exception('Error :{0}'.format(_ex_))\n else:\n file_name = os.path.join(path_to_store, test_case_name +\n time_stamp)\n LOG.debug('The file name after joining = {0}'.format(file_name))\n try:\n LOG.debug('Writing Test case output to the file')\n with open(file_name, 'w') as file_obj:\n file_obj.write(output)\n except FileNotFoundError as _ex_:\n LOG.exception('Error : {0}'.format(_ex_))", "def setup_output_path(self):\n self.logger.info('setting up output path')\n try:\n self.output_path.mkdir()\n except FileExistsError:\n pass\n try:\n (self.output_path / 'simple').mkdir()\n except FileExistsError:\n pass\n for filename in resource_listdir(__name__, 'static'):\n if filename == 'index.html':\n # Skip template\n continue\n with (self.output_path / filename).open('wb') as f:\n source = resource_stream(__name__, 'static/' + filename)\n f.write(source.read())\n source.close()", "def _write_file(output_path: str, file_content: Iterable[str]) -> None:\n with open(output_path, \"w+\", encoding=\"utf-8\") as f:\n f.writelines(file_content)\n\n logging.info(f\"wrote to '{output_path}'\")", "def write(result, basename):\n filename = make_fullname(basename, type(result))\n write_funcs[type(result)](filename, result)", "def writeToFile(ruleset, className, classValue, fp):\n size = len(ruleset)\n if (size != 0):\n for i in range(size):\n j=0\n while(j<len(ruleset[i])-1):\n fp.write(str(ruleset[i][j]).replace(\"'\", \"\")+' & ')\n j = j+1\n fp.write(str(ruleset[i][j]).replace(\"'\", \"\")+\" -> (\"+className+\", \"+classValue+\") \\n\")\n fp.close()", "def _write_file(inobject, prefix=None, clobber=False):\n local_write = 'writeto'\n local_write_func = getattr(inobject, local_write)\n filename = os.path.basename(inobject.filename())\n if prefix:\n filename = ''.join([prefix, filename])\n local_write_func(filename, clobber=clobber)\n return", "def testWriteFiles(self):\n header_files = ['test.h']\n resource_files = ['test.rc']\n source_files = ['test.c']\n\n file_writer = writers.VS2008ProjectFileWriter()\n\n file_writer._file = io.BytesIO()\n\n file_writer.WriteFiles(source_files, header_files, resource_files)\n\n file_writer._file.seek(0, os.SEEK_SET)\n output_data = file_writer._file.read()\n\n self.assertTrue(output_data.startswith(b'\\t<Files>\\r\\n'))\n self.assertTrue(output_data.endswith(\n b'\\t</Files>\\r\\n'\n b'\\t<Globals>\\r\\n'\n b'\\t</Globals>\\r\\n'))", "def writefile(fname, obj_id, suffix):\n \n # check input is sensible\n if obj_id not in flocs.keys(): raise ValueError(\n 'obj_id:{} not in {}'.format(obj_id, flocs.keys()))\n \n fname = os.path.splitext(fname)[0] + suffix\n fname = os.path.split(fname)[-1]\n path = os.path.join(DATA_LOC, obj_id, fname + flocs[obj_id])\n hook = codecs.open(path, 'w+', **wparams)\n return path, hook", "def write_output(self):", "def write_output_files(self, file_type, output, expected):\n actual_filename = self.output_filename(self.FILENAME_SUFFIX_ACTUAL + file_type)\n expected_filename = self.output_filename(self.FILENAME_SUFFIX_EXPECTED + file_type)\n\n self._write_file(actual_filename, output)\n self._write_file(expected_filename, expected)", "def write_to_file(unit, fobj):\n\n _write_all_headers(unit, fobj)\n _write_all_sections(unit, fobj)", "def write_file(self):\n if self._write_file == None:\n return\n\n try:\n out = file(self._write_file, \"w\")\n except IOError, e:\n print e\n sys.exit(1)\n out.writelines(\"A cases\") \n out.close()", "def write_to_output(output):\n try:\n # changing current directory to script directory\n OutputWrite.change_to_script_directory(__file__)\n # writing the output a file\n timestamp_in_secs = time.time()\n time_stamp_readable = datetime.datetime.fromtimestamp(\n timestamp_in_secs).strftime(\"%Y_%m_%d-%Ih_%Mm_%Ss_%p\")\n try:\n if not os.path.isdir('../results'):\n os.chdir('..')\n print('Current directory {0}'.format(os.getcwd()))\n os.mkdir('./results')\n OutputWrite.change_to_script_directory(__file__)\n except OSError as _ex_:\n print(\"Unable to create results directory {0}\".format(_ex_))\n abspath = os.path.abspath('..')\n print('abspath of ..', abspath)\n path = OutputWrite.create_dir_structure()\n file_name = os.path.join(path, 'output_' +\n time_stamp_readable)\n print('The file name after joining', file_name)\n with open(file_name, 'w') as file_obj:\n file_obj.write(output)\n\n except FileNotFoundError as err:\n print('Unable write the test results into the file {0}'.\n format(err))", "def write(self, filename=None, as_type='json'):\n if not filename:\n filename = self.uri\n self.create_output_dir(filename)\n if as_type == 'json':\n with open(filename, 'w') as outfile:\n outfile.write(self.transform_data(outformat=formats.JSON))\n elif as_type == 'shapefile':\n self.data.to_file(filename)\n else:\n raise NotImplementedError('{} not a valid type'.format(as_type))\n return self.uri", "def _write_file(template, localcontext, output_path, name, override):\r\n old_locale = locale.setlocale(locale.LC_ALL)\r\n locale.setlocale(locale.LC_ALL, str('C'))\r\n try:\r\n output = template.render(localcontext)\r\n finally:\r\n locale.setlocale(locale.LC_ALL, old_locale)\r\n path = os.path.join(output_path, name)\r\n try:\r\n os.makedirs(os.path.dirname(path))\r\n except Exception:\r\n pass\r\n\r\n with self._open_w(path, 'utf-8', override=override) as f:\r\n f.write(output)\r\n logger.info('writing {}'.format(path))\r\n\r\n # Send a signal to say we're writing a file with some specific\r\n # local context.\r\n signals.content_written.send(path, context=localcontext)", "def writeDomainFile():\n writeTemplate(localTemplate)", "def add_out(psr, psr_verify=lambda args: args):\n def verify(args):\n out_dir = args.out_dir\n if not path.exists(out_dir):\n os.makedirs(out_dir)\n return args\n\n psr.add_argument(\n \"--out-dir\", default=\".\",\n help=\"The directory in which to store output files.\", type=str)\n return psr, lambda args: verify(psr_verify(args))", "def save_resource(resource, data):\n if resource not in RES_PATHS:\n raise Exception(f\"Unknown resource: {resource}\")\n\n with open(RES_PATHS[resource], 'w', encoding='utf8') as f:\n json.dump(data, f, sort_keys=True, indent=4, separators=(',', ': '))", "def process_output_file_write(output_file, response):\n\n with open(output_file, \"w\") as output_file:\n output_file.write(response)", "def write(self, output: Any) -> None:\n self._original.write(output)\n self._handler.file_write(self._name, output)", "def write_templated(output_file, results, version, template_loc):\n template = get_template(template_loc)\n\n for template_chunk in generate_output(results, version, template):\n assert isinstance(template_chunk, str)\n try:\n output_file.write(template_chunk)\n except Exception:\n import traceback\n msg = 'ERROR: Failed to write output for: ' + repr(template_chunk)\n msg += '\\n' + traceback.format_exc()\n raise Exception(msg)", "def write_output(output_q, outfile):\n with open(outfile, \"w\") as fh:\n while True:\n output = output_q.get()\n if output == None:\n break\n \n fname, mtime = output\n fh.write(\"fname={f}|mtime={m}\\n\".format(f=fname, m=mtime))\n \n output_q.task_done()", "def write_output(output_dir, df_out):\n # Make stage output dir\n output_dir = os.path.join(output_dir, 'transform')\n if not os.path.isdir(output_dir):\n os.mkdir(output_dir)\n\n # Write dfs to files\n for model_cls_name, df in df_out.items():\n fp = os.path.join(output_dir, model_cls_name + '.tsv')\n df.to_csv(fp, sep='\\t')", "def write(self, filename):\n pass", "def write(self, filename):\n pass", "def write(self, output):\n logging.info('write %s' % output)\n make_folder(output)\n f = open(output, 'w')\n code = self.get_code()\n if code:\n f.write(code)\n f.close()", "def save_resources(self, save_directory):\n for name, file_name in self.resource_files_names.items():\n save_path = os.path.join(save_directory, file_name)\n shutil.copyfile(getattr(self, \"_%s\" % name), save_path)", "def write(self, args, file_dat):\n assert self.checker_(file_dat)\n file_path = self.path(args)\n file_str = self.writer_(file_dat)\n autofile.write_file(file_path, file_str)", "def __export_file(self, filename, output):\n outfile = open(filename, \"w\")\n outfile.write(output)\n outfile.close\n print(\"Output written to file: \" + filename + \"\\n\")", "def write_to_files():\n\t# Create output files\n\toutput = [None, \\\n\t\t open(\"priority-1.txt\", \"w\"), \\\n\t\t open(\"priority-2.txt\", \"w\"), \\\n\t\t open(\"priority-3.txt\", \"w\"), \\\n\t\t open(\"priority-4.txt\", \"w\"), \\\n\t\t open(\"priority-5.txt\", \"w\"), ]\n\n\t# Loop over all fields and write them to the correct file\n\tfor field in sorted(reportlog.keys()):\n\t\tpriority = reportlog[field]['priority']\n\t\tlabel = reportlog[field]['label']\n\n\t\toutput[priority].write(\"intphas_%s\\t%s\\n\" % (field, label))\n\t\toutput[priority].flush()\n\n\t# Close files\n\tfor i in [1,2,3,4,5]:\n\t\toutput[i].close()", "def writeto(self,output_file,**kwargs):\n dump_pkl(self._properties,output_file,**kwargs)\n return", "def writeFile( str_, *args ):\n filePath = path.join( *args )\n with open( filePath, 'w' ) as fd:\n fd.write(str_)", "def write_file(country, season, final, var):\n if var=='label':\n path='../results/kmeans/'\n elif var=='cluster':\n path='../results/sequence_analysis/'\n country_ = country.lower()\n season_ = season.replace('-','_')\n file_name=country_+\"_\"+season_\n newpath=path+file_name+'/'\n if not os.path.exists(newpath):\n os.makedirs(newpath)\n f = open(newpath+file_name+\".txt\",\"w\") \n f.write(final)\n f.close()", "def test_writer_with_file():\n outputfile = \"testfile.txt\"\n GCMT(write=outputfile)\n assert os.path.exists(outputfile)\n os.remove(outputfile)", "def destage_output(output_dict):\n for _, desc in output_dict.items():\n if isinstance(desc, dict):\n if desc['class'] == 'File':\n location = urlparse(desc['location'])\n dest_path = os.path.join(os.getcwd(), os.path.basename(location.path))\n shutil.move(location.path, dest_path)\n desc['location'] = 'file://' + dest_path\n\n return output_dict", "def write_file(output_data, output_file):\n output_file = utils._get_absolute_path(output_file)\n with open(output_file, 'w') as json_file:\n json.dump(output_data, json_file)\n\n return output_file", "def _writer(path: str, sink: Queue):\n writer = Write2File(path)\n logger = settings.LOGGER\n count = 0\n while True:\n article = sink.get()\n if article == 'EXIT':\n logger.info(f'All {count} articles saved to {path}.')\n return\n writer(article)\n count += 1\n if count % 10000 == 0:\n logger.info(f'{count} articles processed.')", "def testWriteOutIntDirPropertyGroups(self):\n project_configurations = resources.VSConfigurations()\n\n file_writer = writers.VS2012ProjectFileWriter()\n\n file_writer._file = io.BytesIO()\n\n file_writer._WriteOutIntDirPropertyGroups(project_configurations)\n\n file_writer._file.seek(0, os.SEEK_SET)\n output_data = file_writer._file.read()\n\n expected_output_data = (\n b' <PropertyGroup>\\r\\n'\n b' <_ProjectFileVersion>11.0.61030.0</_ProjectFileVersion>\\r\\n'\n b' </PropertyGroup>\\r\\n')\n self.assertEqual(output_data, expected_output_data)", "def task_output_block_groups():\n for dept in Department.list():\n yield {\n 'name': dept.name,\n 'file_dep': [dept.block_groups_path],\n 'targets': [dept.block_groups_output],\n 'actions': ['cp %(dependencies)s %(targets)s'],\n 'clean': True,\n }", "def createOutputFile(dataList, maxClusterNum, labelConverter, filePrefix):\n outputFileList = []\n for i in range(maxClusterNum):\n outputFileList.append(open(filePrefix + \"_cluster_\" + str(i) + \".txt\", 'w'))\n\n for pt in dataList:\n matchingCluster = labelConverter[pt.label]\n if matchingCluster == -1:\n continue\n outputFileList[matchingCluster].write(str(pt.id) + '\\n')\n\n for i in range(maxClusterNum):\n outputFileList[i].close()", "def write(self, op, assembler):\n if isinstance(op, str):\n fd = open(op, \"w\")\n for ii in self.__sections:\n ii.write(fd)\n fd.close()\n if is_verbose():\n print(\"Wrote assembler source file '%s'.\" % (op))\n else:\n prefix = assembler.format_block_comment(\"Program\")\n op.write(prefix)\n for ii in self.__sections:\n ii.write(op)", "def outfile(self):\n\n return f\"{self.name}.run.out\"", "def write(self, *args):\n\n self._write(self._out, *args)", "def write_to(self, io):\n out = io\n io, path = tempfile.mkstemp()\n fnlen = Erf.filename_length(self.fversion)\n lstr_iter = iter(sorted(self.localized_strings.items()))\n locstr = []\n for k, v in lstr_iter:\n locstr.append(struct.pack(\"<L L %ds x\" % len(v), k, len(v)+1, v.encode(get_encoding())))\n locstr = b''.join(locstr)\n\n keylist = []\n for i, co in enumerate(self.content):\n pad = 0\n max = len(co.resref)\n if len(co.resref) > fnlen:\n print(\"truncating filename %s, longer than %d\" % (co.resref, fnlen), file=sys.stderr)\n max = fnlen\n else:\n pad = fnlen - len(co.resref)\n\n keylist.append(struct.pack(\"<%ds %dx L h h\" % (len(co.resref), pad),\n co.resref.encode(get_encoding()),\n i, co.res_type, 0))\n keylist = b''.join(keylist)\n\n offset = 160 + len(locstr) + len(keylist) + 8 * len(self.content)\n\n reslist = []\n for co in self.content:\n reslist.append(struct.pack(\"< L L\", offset, co.size))\n offset += co.size\n\n reslist = b''.join(reslist)\n\n offset_to_locstr = 160\n offset_to_keylist = offset_to_locstr + len(locstr)\n offset_to_resourcelist = offset_to_keylist + len(keylist)\n\n header = struct.pack(\"8s LL LL LL LL L 116x\",\n (self.ftype+' '+self.fversion).encode(get_encoding()),\n len(self.localized_strings),\n len(locstr), len(self.content), offset_to_locstr, offset_to_keylist,\n offset_to_resourcelist, self.year, self.day_of_year, self.desc_strref)\n\n os.write(io, header)\n os.write(io, locstr)\n os.write(io, keylist)\n os.write(io, reslist)\n\n for co in self.content:\n os.write(io, co.get())\n\n os.close(io)\n shutil.copy(path, out)\n os.remove(path)", "def write(self, target):\n mpath = path.join(self._working_dir, 'manifest.json')\n with open(mpath, 'w') as mani:\n json.dump(self.item, mani)\n\n directory = path.abspath(self._working_dir)\n with zipfile.ZipFile(target, 'w', allowZip64=True) as zip:\n for root, dirs, files in walk(directory):\n for f in files:\n abspath = path.join(root, f)\n relpath = path.relpath(abspath, directory)\n zip.write(abspath, relpath)\n return target", "def write_processor(args, processor_path, templates):\n now_str = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n processor_code = templates.format(author=args.author,\n email_addr=args.email,\n name=args.name,\n now=now_str,\n purpose=args.purpose)\n with open(processor_path, 'w') as f_obj:\n f_obj.writelines(processor_code)", "def write(self, file_obj, file_format):\n if ( file_format.upper() == 'FASTA' ):\n write_func = write_compact_to_fasta \n #elif ( file_format.upper() == 'COMPACT' ):\n # write_func = write_compact_to_compact\n #elif ( file_format.upper() == 'COMPACT3' ):\n # write_func = write_compact_to_compact3 \n #elif ( file_format.upper() == 'PHYLIP' ):\n # write_func = write_compact_to_phylip \n else:\n write_func = write_compact_to_fasta\n write_func(self, file_obj)", "def render_and_write(template_name, context, output_name, output_dir):\n\n template = templates_env.get_template(template_name)\n f = open(path.join(output_dir, output_name), \"w\")\n f.write(template.render(**context).encode('utf-8'))\n f.close()", "def write_result(file_name, name, entries, extra_includes, src_file_names):\r\n\r\n with open(file_name, 'w', newline='\\n') as f:\r\n f.write('// Generated by %s\\n' % os.path.basename(__file__))\r\n f.write('// Based on %s: %s\\n' %\r\n ((\"this file\" if len(src_file_names) < 2 else\r\n \"these files\"), \", \".join(src_file_names)))\r\n methods = entries[0]\r\n if len(methods) != 0:\r\n f.write(to_PyMethodDef(name, methods, extra_includes))\r\n f.write('\\n')\r\n\r\n properties = entries[1]\r\n if len(properties) != 0:\r\n f.write('\\n')\r\n f.write(to_PyGetSetDef(name, properties))", "def render_and_write(template_name, context, output_name, output_dir,templates_env):\n\ttemplate = templates_env.get_template(template_name)\n\tf = open(path.join(output_dir, output_name), \"w\")\n\tf.write(template.render(**context).encode('utf-8'))\n\tf.close()" ]
[ "0.7189583", "0.67922026", "0.6485851", "0.6471249", "0.5966388", "0.5966388", "0.5919293", "0.58600813", "0.5827604", "0.5729985", "0.5707116", "0.5665977", "0.55832523", "0.557501", "0.55459183", "0.54772556", "0.5467842", "0.5445758", "0.54371053", "0.54174685", "0.5403804", "0.5403804", "0.5396148", "0.5392162", "0.5386541", "0.5378357", "0.5377417", "0.535219", "0.5340851", "0.52905977", "0.52787286", "0.52543354", "0.5246355", "0.52392775", "0.5233375", "0.52286434", "0.5220048", "0.5195824", "0.51857316", "0.51787585", "0.5177428", "0.5175927", "0.5138824", "0.5135812", "0.5105462", "0.509502", "0.50901514", "0.5073143", "0.50674456", "0.5066062", "0.50535077", "0.50429296", "0.5034999", "0.5034246", "0.503378", "0.50303656", "0.5030261", "0.5029216", "0.5013113", "0.5007943", "0.50055975", "0.49933112", "0.49834368", "0.49719426", "0.4963294", "0.49451014", "0.49377933", "0.49360728", "0.4927549", "0.49259737", "0.49233398", "0.49162242", "0.49080327", "0.49010032", "0.49010032", "0.4895449", "0.48899624", "0.48880115", "0.48854932", "0.4883487", "0.4880984", "0.4875455", "0.4871932", "0.48710206", "0.48680004", "0.4867353", "0.48640278", "0.48574486", "0.48506227", "0.4849352", "0.4848212", "0.48474112", "0.4840161", "0.48356023", "0.48327833", "0.48287466", "0.4827178", "0.48271102", "0.48214313", "0.4817857" ]
0.68054146
1
Select all jobs in the batch whose name matches `pattern`. Examples
def select_jobs(self, pattern: str) -> List[job.Job]: return [job for job in self._jobs if job.name is not None and re.match(pattern, job.name) is not None]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def filter_jobs(jobs, keyword):\n for job in jobs:\n if keyword == \"all\":\n yield job\n elif job[\"name\"].find(keyword) != -1:\n yield job", "def search_by_pattern(self, tl):\n print(\"Search by regex pattern\")\n pattern = input(\"Please enter search pattern: \")\n return tl.findall_pattern(pattern)", "def do_builds(self, pattern):\n if not pattern:\n print('\\n'.join(self._qm.get_available_stc_builds()))\n return\n\n for build in self._qm.get_available_stc_builds():\n if fnmatch.fnmatch(build, pattern):\n print(build)", "def _get_wild_tasks(self, pattern):\n wild_list = []\n for t_name in self._def_order:\n if fnmatch.fnmatch(t_name, pattern):\n wild_list.append(t_name)\n return wild_list", "def grep(requestContext, seriesList, pattern):\n regex = re.compile(pattern)\n return [s for s in seriesList if regex.search(s.name)]", "def name_search(self, search):\n if isinstance(search, str):\n name_re = re.compile(search)\n else:\n name_re = search\n matches = [\n entry\n for entry in self\n if entry is not None and name_re.search(entry.name)\n ]\n return matches", "def all_by_actor(actor) -> co.Parallel:\n df = _load_data()\n titles = df[df.cast.str.contains(actor) | False].title\n\n output = co.Parallel()\n for title in titles:\n output[title] = co.Exec(\n f\"python pipeline.py for_title {repr(title)}\"\n )\n return output", "def WhereMatches(self, pattern):\n regex = re.compile(match_util.ExpandRegexIdentifierPlaceholder(pattern))\n return self.Filter(lambda s: (\n regex.search(s.source_path) or\n regex.search(s.object_path) or\n regex.search(s.full_name) or\n s.full_name is not s.template_name and regex.search(s.template_name) or\n s.full_name is not s.name and regex.search(s.name)))", "def search(self, pattern):\n raise NotImplementedError()", "def do_ls(self, pattern=\"\"):\n if pattern:\n print \"The available jobs with substring %s are:\" % pattern\n else:\n print \"The available jobs are:\"\n \n app_order = self.router.app_order\n app_path = self.router.app_path\n n = len(self.router.app_order)\n j = 0\n for i in range(n):\n path = app_order[i]\n if path.find(pattern) != -1:\n j += 1\n app, type = app_path[path]\n if type == \"func\":\n print \" %d. %-12s [%4s] --> %s\" % (i, path, type, Job.get_func_help(app))\n elif type in (\"Job\", \"MJob\", \"PJob\"):\n print \" %d. %-12s [%4s] --> %s\" % (i, path, type, app.get_line_help())\n else:\n raise Exception(\"unknown Object type = %s of %s\" % (type, app) )\n if pattern:\n print \"There are %d/%d including '%s'\" % (j, n, pattern)", "def search(self, pattern=\"*\", mode=\"both\"):\n pattern = self._glob_to_sql(pattern)\n\n COND = \"(keyword.name like ? OR keyword.doc like ?)\"\n args = [pattern, pattern]\n if mode == \"name\":\n COND = \"(keyword.name like ?)\"\n args = [pattern,]\n\n sql = \"\"\"SELECT collection.collection_id, collection.name, keyword.name, keyword.doc\n FROM collection_table as collection\n JOIN keyword_table as keyword\n WHERE collection.collection_id == keyword.collection_id\n AND %s\n ORDER by collection.collection_id, collection.name, keyword.name\n \"\"\" % COND\n\n cursor = self._execute(sql, args)\n result = [(row[0], row[1], row[2], row[3].strip().split(\"\\n\")[0])\n for row in cursor.fetchall()]\n return list(set(result))", "def tasks(self, pattern, **kwargs):\n\n config = self.config.make(**kwargs)\n # yield a task for every py file in selection\n base = Path(config['base_dir'])\n excluded = set([base.joinpath(e) for e in config['exclude_paths']])\n for src in base.glob(pattern):\n if src in excluded:\n continue\n for exclude_pattern in config['exclude_patterns']:\n if src.match(exclude_pattern):\n break\n else:\n yield self(str(src))", "def list(self, pattern='*'):\n if self._group_dict is None:\n self._group_dict = collections.OrderedDict(\n (group.name, group) for group in self._client.list_groups())\n\n return [group for group in self._group_dict.values()\n if fnmatch.fnmatch(group.display_name, pattern)]", "def match_name(pattern, rows):\n matching = []\n for row in rows:\n # Use regex matching to check whether first name or last name contains the pattern\n if re.search(r'%s' % pattern.lower(), row[0].lower()) != None or re.search(r'%s' % pattern.lower(), row[1].lower()) != None:\n matching.append(row)\n\n # print the matched records\n print_records(matching)", "def tok_by_reg(pattern, list_of_toks):\n try:\n comped = re.compile(pattern)\n except:\n import traceback\n import sys\n exc_type, exc_value, exc_traceback = sys.exc_info()\n lst = traceback.format_exception(exc_type, exc_value,\n exc_traceback)\n error_message = lst[-1]\n thetime = strftime(\"%H:%M:%S\", localtime())\n print '%s: Query %s' % (thetime, error_message)\n return 'Bad query'\n\n matches = [m for m in list_of_toks if re.search(comped, m)]\n\n return matches", "def pattern_filter(patterns, name):\n return [pat for pat in patterns if fnmatch.fnmatchcase(name, pat)]", "def _matching_jobs(buildername, all_jobs):\n LOG.debug(\"Find jobs matching '%s'\" % buildername)\n matching_jobs = []\n for j in all_jobs:\n if j[\"buildername\"] == buildername:\n matching_jobs.append(j)\n\n LOG.debug(\"We have found %d job(s) of '%s'.\" %\n (len(matching_jobs), buildername))\n return matching_jobs", "def getitemsbypattern(self, pattern):\n\n return True", "def tok_by_list(pattern, list_of_toks):\n if type(pattern) == str:\n pattern = [pattern]\n result = []\n matches = [m for m in list_of_toks if m in pattern]\n for m in matches:\n result.append(m)\n return result", "def find(pattern):\n files = config.index.files(path_glob=\"*%s*\" % pattern)\n print_files(files)", "def list_command_filter(self, testsuite_pattern, testcase_pattern):\n return None # Hobbes-test does not support listing by filter", "def grep(pattern, *files_or_paths):\n matches = []\n\n for fop in files_or_paths:\n with fileobj(fop) as fo:\n matches.extend((line for line in fo if re.match(pattern, line)))\n\n return matches", "def match(\n self,\n pattern: str\n ) -> Iterable[AbstractPage]:\n paths: List[str] = [page.path for page in self._page_table.keys()]\n for match in sorted(fn_filter(paths, pattern)):\n yield self[match]", "def search_by_pattern(self, pattern, key=lambda data: data['meta']):\n result = []\n for node, data in self.traverse():\n if re.search(pattern, key(data), flags=re.VERBOSE):\n result.append([node, data])\n return result", "def search_leaf_nodes_by_pattern(self, pattern, scope_pattern=False):\n is_match = lambda x, y: x.lower().startswith(y) if scope_pattern else y in x.lower()\n if pattern is not None:\n pattern = pattern.lower()\n searched_nodes = [\n node for name, node in self._leaf_nodes.items()\n if is_match(name, pattern)\n ]\n else:\n searched_nodes = [node for node in self._leaf_nodes.values()]\n return searched_nodes", "def search_jobs(self, bill_id: int = 0, limit: int = 0) -> List[Job]:\n pass", "def fetch_s3_keys_by_regex_pattern(s3_bucket, s3_directory, pattern):\n bucket_contents = s3_bucket.list(s3_directory)\n return [key for key in bucket_contents if pattern.search(key.name)]", "def SearchRe(context, pattern, arg=None):\n if not arg:\n arg = context.node\n arg = Conversions.StringValue(arg)\n matches = re.findall(pattern, arg)\n proc = context.processor\n matches_nodeset = []\n for groups in matches:\n proc.pushResult()\n proc.writers[-1].startElement('Match', EMPTY_NAMESPACE)\n if type(groups) != type(()):\n groups = (groups,)\n for group in groups:\n proc.writers[-1].startElement('Group', EMPTY_NAMESPACE)\n proc.writers[-1].text(group)\n proc.writers[-1].endElement('Group')\n proc.writers[-1].endElement('Match')\n frag = proc.popResult()\n context.rtfs.append(frag)\n matches_nodeset.append(frag.childNodes[0])\n return matches_nodeset", "def find_batch(self, arguments, start_with):\n if len(arguments) < 1:\n raise Exception(\"error, not valid number of arguments, need 2\")\n if not arguments[0].startswith(start_with):\n raise Exception(f\"error, tha name of batch need to start with {start_with}\")\n return arguments[0]", "def search_nodes_by_pattern(self, pattern):\n searched_nodes = []\n if pattern and pattern != '/':\n pattern = pattern.lower()\n for name, node in self._normal_node_map.items():\n name = name.lower()\n pattern_index = name.rfind(pattern)\n if pattern_index >= 0 and name.find('/', pattern_index + len(pattern)) == -1:\n searched_nodes.append(node)\n return searched_nodes", "def advanced_search(self, pattern):\n pass", "def search(self, pattern):\n return self.simple_search(pattern)\n # try:\n # return self.advanced_search(pattern)\n # except Exception, e:\n # return self.simple_search(pattern)", "def grep(self, pattern, flags=0, lazy=False):\n\n def iterator():\n for filename in self.walk(lazy=lazy):\n if re.search(pattern, filename, flags):\n yield self.__class__(filename)\n\n return lazy and iterator() or list(iterator())", "def listfiles(pattern):\n pattern = os.path.normpath(pattern)\n first_wildcard = re.search(\"{[^{]\", pattern)\n if first_wildcard:\n dirname = os.path.dirname(pattern[:first_wildcard.start()])\n if not dirname:\n dirname = \".\"\n else:\n dirname = os.path.dirname(pattern)\n pattern = re.compile(snakemake.io.regex(pattern))\n for dirpath, dirnames, filenames in os.walk(dirname, followlinks=True):\n for f in itertools.chain(filenames, dirnames):\n if dirpath != \".\":\n f = os.path.normpath(os.path.join(dirpath, f))\n match = re.match(pattern, f)\n if match:\n wildcards = snakemake.io.Namedlist(fromdict=match.groupdict())\n yield f, wildcards", "def get_matches_commandline(self, match_pattern):\n\n matches = []\n for _process in self.processes:\n if re.search(match_pattern, _process[\"cmd\"]):\n matches.append(_process[\"pid\"])\n return matches", "def search(self, pattern):\n result = set()\n\n if re.search(pattern, 'any'):\n result |= set([rule for rule in self.object.get_rules() if\n not rule.ip_source or not rule.ip_dest or not rule.port_source or not rule.port_dest])\n if re.search(pattern, 'ip'):\n result |= set([rule for rule in self.object.get_rules() if not rule.protocol])\n result |= set([rule for rule in self.object.get_rules() if rule.search(pattern)])\n self.model.clear()\n self.add_rules(list(result))", "def SearchRePy20(context, pattern, arg=None):\n if not arg:\n arg = context.node\n arg = Conversions.StringValue(arg)\n proc = context.processor\n matches_nodeset = []\n _re =re.compile(pattern)\n _match =_re.search(arg)\n while _match:\n proc.pushResult()\n proc.writers[-1].startElement('Match', EMPTY_NAMESPACE)\n _groups =_match.groups()\n # .groups() return empty tuple when the pattern did not do grouping\n if not _groups: _groups =tuple(_match.group())\n for group in _groups:\n proc.writers[-1].startElement('Group', EMPTY_NAMESPACE)\n # MatchObject groups return None if unmatched\n # unlike .findall() returning empty strings\n proc.writers[-1].text(group or '')\n proc.writers[-1].endElement('Group')\n proc.writers[-1].endElement('Match')\n frag = proc.popResult()\n context.rtfs.append(frag)\n matches_nodeset.append(frag.childNodes[0])\n _match =_re.search(arg, _match.end())\n return matches_nodeset", "def get_keywords(self, pattern=\"*\"):\n\n sql = \"\"\"SELECT collection.collection_id, collection.name,\n keyword.name, keyword.doc, keyword.args\n FROM collection_table as collection\n JOIN keyword_table as keyword\n WHERE collection.collection_id == keyword.collection_id\n AND keyword.name like ?\n ORDER by collection.name, keyword.name\n \"\"\"\n pattern = self._glob_to_sql(pattern)\n cursor = self._execute(sql, (pattern,))\n result = [(row[0], row[1], row[2], row[3], row[4])\n for row in cursor.fetchall()]\n return list(sorted(set(result), key=itemgetter(2)))", "def lookup(self, pattern):\n with self.connect() as c:\n # so we can access results via dictionary\n c.row_factory = sqlite3.Row\n cur = c.cursor()\n for res in cur.execute(self.create_query(\"SELECT *\", pattern)).fetchall():\n yield res", "def simple_search(self, pattern):\n query = Q()\n for ptn in pattern.split():\n for field in SEARCH_FIELDS:\n query |= Q(**{'%s__icontains' % field: ptn})\n return self.get_queryset().filter(query)", "async def findall(self, ctx, pattern, string, flags=None):\r\n if await bMsg(ctx,ctx.message.author.name,client):\r\n return\r\n logger.info('Regexes.findall: \\\"' + '\\\" \\\"'.join((pattern, string, flags)) + '\\\"', extra={'invoker': ctx.message.author.name})\r\n if flags is not None:\r\n exp = '(?' + flags.lower().replace('l', 'L') + ')(?:' + pattern + ')'\r\n else:\r\n exp = pattern\r\n gen = re.finditer(exp, string)\r\n result = '```\\nResults:\\n'\r\n try:\r\n for m in gen:\r\n result += m.group(0) + ('\\t' if len(m.groups()) > 0 else '') + '\\t'.join(m.groups()) + '\\n'\r\n except Exception:\r\n result += 'Error in flags or expression.\\n'\r\n if result == '```\\nResults:\\n':\r\n result += 'No results :(\\n'\r\n if '\\t' in result:\r\n ms = re.finditer('([^\\t\\n]*\\t)+[^\\t\\n]*\\n?', result)\r\n ms = [len(m.group(0).strip().split('\\t')) for m in ms]\r\n ms = max(ms)\r\n result = result[:13] \\\r\n + '\\t'.join(['Gp{}'.format(i) for i in range(ms-1)]) \\\r\n + '\\n' + result[13:]\r\n result += '```'\r\n await ctx.send(result)", "def _find_by_name_pattern(search_directory, name_pattern, is_file=True):\n if not os.listdir(search_directory):\n return None\n # filter check list\n choose_list = []\n if is_file:\n choose_list += filter(lambda x: os.path.isfile(x), [os.path.join(search_directory, y) for y in os.listdir(search_directory)])\n else:\n choose_list += filter(lambda x: os.path.isdir(x), [os.path.join(search_directory, y) for y in os.listdir(search_directory)])\n # check\n for sub_name in choose_list:\n if re.match(name_pattern, os.path.basename(sub_name), re.IGNORECASE):\n return sub_name\n return None", "def wildcard(pattern):\n wildcards = pattern.count('?')\n alphabet = ['0', '1']\n\n def xcombinations(items, length):\n if length == 0:\n yield []\n else:\n for i in xrange(len(items)):\n for sc in xcombinations(items, length - 1):\n yield [items[i]] + sc\n\n for combination in xcombinations(alphabet, wildcards):\n buff = ''\n for c in pattern:\n if c == '?':\n buff += combination.pop()\n else:\n buff += c\n yield buff", "def glob(self, pattern, lazy=False):\n def iterator():\n for filename in self.walk(lazy=lazy):\n if fnmatch(filename, pattern):\n yield self.__class__(filename)\n\n return lazy and iterator() or list(iterator())", "def get_matches_commandline_with_children(self, match_pattern):\n\n matched_pids = self.get_matches_commandline(match_pattern)\n for matched_pid in matched_pids:\n matched_pids.extend(self.get_child_processes(matched_pid))\n return list(set(matched_pids))", "def _internal_match(self, pattern):\n compiled_re = re.compile(pattern)\n for word in self.words:\n if compiled_re.fullmatch(word) is not None:\n yield word", "def find_all_entries(\n yaml_file: str, pattern: Pattern, pattern_keyword: str) -> List[str]:\n matches = []\n with open(yaml_file, 'r') as data:\n lines = data.readlines()\n\n for idx, line in enumerate(lines):\n match = re.search(pattern, line)\n if match is not None:\n matches.append(match.group(pattern_keyword))\n return matches", "def search_file_all(pattern, filename):\n if not os.path.exists(filename):\n raise Exception(\"Can't open file for reading! \" + filename)\n\n matches = []\n fh = open(filename, \"r\")\n for line in fh:\n allmatch = re.findall(pattern, line)\n if allmatch:\n matches += allmatch\n\n fh.close()\n return matches", "def find_all_by_name(folder, name):\n # return all entities by running the generator to it's end\n return list(find_by(folder, lambda e: e.name == name))", "async def search(self, ctx, pattern, string, flags=None):\r\n if await bMsg(ctx,ctx.message.author.name,client):\r\n return\r\n logger.info('Regexes.search: \\\"' + '\\\" \\\"'.join((pattern, string, flags)) + '\\\"', extra={'invoker': ctx.message.author.name})\r\n if flags is not None:\r\n exp = '(?' + flags.lower().replace('l', 'L') + ')(?:' + pattern + ')'\r\n else:\r\n exp = pattern\r\n try:\r\n m = re.search(exp, string)\r\n except Exception:\r\n m = False\r\n if m:\r\n result = '```\\nGroups:\\n' + m.group(0) + '\\n'\r\n for group in m.groups():\r\n result += (group or '') + '\\n'\r\n result += '```'\r\n elif m is False:\r\n result = '```\\nError in flags or expression.\\n```'\r\n else:\r\n result = '```\\nNo match :(\\n```'\r\n await ctx.send(result)", "def select(wildcards, output):\n return ceil(int(wildcards.n_proc) / PPN)", "def reg_csv_search(arg):\n results_list = []\n with open('work_log.csv', 'r') as file:\n for row in file:\n if re.search(arg, str(row)):\n results_list.append(row)\n results_sort(results_list)", "def find_business_based_on_name_partial():\n while True:\n print()\n business_name = input(\n 'Please enter partial business name or type \"back\" or \"quit\": ')\n print()\n if business_name == \"quit\":\n print(\"Goodbye!\")\n sys.exit()\n if business_name == \"back\":\n return\n\n business_objects = business_col.find(\n {\"$text\": {\"$search\": business_name}}).limit(10)\n if business_objects is None:\n print(\"No business found with given name.\")\n continue\n\n for business_object in business_objects:\n print_business(business_object)", "def test_command_filter(self, testsuite_pattern=\"*\", testcase_pattern=\"*\"):\n cmd = self.test_command()\n\n # TODO: although Hobbes-test can select tests to run by \"--tests\"\n # command line option, but can not select them during listing.\n # May need to implement this feature if needed, now we just run\n # the test as a whole, even only one suite is requested run.\n\n # if testsuite_pattern not in (\"*\", self._VERIFICATION_SUITE_NAME):\n # cmd.extend([\"--tests\", testsuite_pattern])\n\n # At the beginning no testcase exists in test suite\n if testcase_pattern not in (\"*\", self._VERIFICATION_TESTCASE_NAME):\n self.logger.user_info(\n 'Should run testcases in pattern \"%s\", but cannot run'\n \" individual testcases thus will run the whole test suite\",\n testcase_pattern,\n )\n\n return cmd", "def check_all_jobs(self, phase=None, regex=None, sortby=None):\n checkalljobs = self._request('GET', CosmoSim.QUERY_URL,\n auth=(self.username, self.password),\n params={'print': 'b'}, cache=False)\n\n self.job_dict = {}\n soup = BeautifulSoup(checkalljobs.content, \"lxml\")\n\n for tag in soup.find_all({\"uws:jobref\"}):\n tag_phase = str(tag.find('uws:phase').string)\n if tag_phase in ['COMPLETED', 'EXECUTING', 'ABORTED', 'ERROR']:\n self.job_dict['{0}'.format(tag.get('xlink:href')\n .split('/')[-1])] = tag_phase\n else:\n self.job_dict[str(tag.get('id'))] = tag_phase\n\n if phase:\n phase = [phase[i].upper() for i in range(len(phase))]\n\n if regex:\n pattern = re.compile(\"{}\".format(regex))\n try:\n groups = [pattern.match(self.table_dict.values()[i]).group()\n for i in range(len(self.table_dict.values()))\n if pattern.match(self.table_dict.values()[i])\n is not None]\n matching_tables = [groups[i]\n for i in range(len(groups))\n if groups[i] in self.table_dict.values()]\n except AttributeError:\n warnings.warn('No tables matching the regular expression '\n '`{0}` were found.'.format(regex))\n matching_tables = self.table_dict.values()\n\n if phase:\n if \"COMPLETED\" not in phase:\n warnings.warn(\"No jobs with phase `{0}` matching \"\n \"the regular expression `{1}` were found.\\n\"\n \"Matching regular expression `{1}` to all \"\n \"jobs with phase `COMPLETED` instead \"\n \"(unsorted):\".format(phase, regex))\n else:\n matching_tables = [[self.table_dict[i]\n for i in self.table_dict.keys()\n if self.table_dict[i] == miter\n and self.job_dict[i] in phase][0]\n for miter in matching_tables]\n self._existing_tables() # creates a fresh up-to-date table_dict\n\n self._starttime_dict()\n\n if not sortby:\n if regex:\n matching = zip(*[[(i, self.job_dict[i], self.starttime_dict[i])\n for i in self.table_dict.keys()\n if self.table_dict[i] == miter][0]\n for miter in matching_tables])\n (matching_jobids, matching_phases,\n matching_starttimes) = matching\n if sortby:\n if sortby.upper() == \"TABLENAME\":\n if 'matching_tables' not in locals():\n matching_tables = sorted(self.table_dict.values())\n else:\n matching_tables = sorted(matching_tables)\n matching = zip(*[[(i, self.job_dict[i], self.starttime_dict[i])\n for i in self.table_dict.keys()\n if self.table_dict[i] == miter][0]\n for miter in matching_tables])\n (matching_jobids, matching_phases,\n matching_starttimes) = matching\n\n elif sortby.upper() == 'STARTTIME':\n if 'matching_tables' not in locals():\n matching_starttimes = sorted(self.starttime_dict.values())\n matching = zip(*[[(i, self.job_dict[i], self.table_dict[i])\n for i in self.starttime_dict.keys()\n if self.starttime_dict[i] == miter][0]\n for miter in matching_starttimes])\n (matching_jobids, matching_phases,\n matching_tables) = matching\n else:\n matching_starttimes = [[self.starttime_dict[i]\n for i in self.table_dict.keys()\n if self.table_dict[i] == miter][0]\n for miter in matching_tables]\n matching = zip(*[[(i, self.job_dict[i], self.table_dict[i])\n for i in self.starttime_dict.keys()\n if self.starttime_dict[i] == miter][0]\n for miter in matching_starttimes])\n (matching_jobids, matching_phases,\n matching_tables) = matching\n\n frame = sys._getframe(1)\n\n # list of methods which use check_all_jobs() for which I would not\n # like job_dict to be printed to the terminal\n do_not_print_job_dict = ['completed_job_info', 'general_job_info',\n 'delete_all_jobs', '_existing_tables',\n 'delete_job', 'download']\n\n if frame.f_code.co_name in do_not_print_job_dict:\n return checkalljobs\n else:\n if not phase and not regex:\n if not sortby:\n t = Table()\n t['JobID'] = list(self.job_dict.keys())\n t['Phase'] = list(self.job_dict.values())\n t.pprint()\n else:\n if sortby.upper() == 'TABLENAME':\n t = Table()\n t['Tablename'] = matching_tables\n t['Starttime'] = matching_starttimes\n t['JobID'] = matching_jobids\n t['Phase'] = matching_phases\n t.pprint()\n if sortby.upper() == 'STARTTIME':\n t = Table()\n t['Starttime'] = matching_starttimes\n t['Tablename'] = matching_tables\n t['JobID'] = matching_jobids\n t['Phase'] = matching_phases\n t.pprint()\n\n elif not phase and regex:\n t = Table()\n if sortby:\n if sortby.upper() == 'STARTTIME':\n t['Starttime'] = matching_starttimes\n t['Tablename'] = matching_tables\n if sortby.upper() == 'TABLENAME':\n t['Tablename'] = matching_tables\n t['Starttime'] = matching_starttimes\n if not sortby:\n t['Tablename'] = matching_tables\n t['Starttime'] = matching_starttimes\n t['JobID'] = matching_jobids\n t['Phase'] = matching_phases\n t.pprint()\n\n if phase and not regex:\n if len(phase) == 1 and \"COMPLETED\" in phase:\n if not sortby:\n matching_jobids = [key\n for key in self.job_dict.keys()\n if self.job_dict[key] in phase]\n matching = zip(*[[(self.table_dict[i],\n self.job_dict[i],\n self.starttime_dict[i])\n for i in self.table_dict.keys()\n if i == miter][0]\n for miter in matching_jobids])\n (matching_tables, matching_phases,\n matching_starttimes) = matching\n\n t = Table()\n t['JobID'] = matching_jobids\n t['Phase'] = matching_phases\n t['Tablename'] = matching_tables\n t['Starttime'] = matching_starttimes\n\n if sortby:\n if sortby.upper() == 'TABLENAME':\n t['Tablename',\n 'Starttime', 'JobID', 'Phase'].pprint()\n if sortby.upper() == 'STARTTIME':\n t['Starttime',\n 'Tablename', 'JobID', 'Phase'].pprint()\n else:\n t.pprint()\n\n else:\n if sortby:\n warnings.warn('Sorting can only be applied to jobs '\n 'with phase `COMPLETED`.')\n if not sortby:\n matching_jobids = [key\n for key in self.job_dict.keys()\n if self.job_dict[key] in phase]\n matching_phases = [self.job_dict[key]\n for key in self.job_dict.keys()\n if self.job_dict[key] in phase]\n t = Table()\n t['JobID'] = matching_jobids\n t['Phase'] = matching_phases\n t.pprint()\n\n if phase and regex:\n t = Table()\n t['Tablename'] = matching_tables\n t['Starttime'] = matching_starttimes\n t['JobID'] = matching_jobids\n t['Phase'] = matching_phases\n\n if sortby:\n if sortby.upper() == 'TABLENAME':\n t.pprint()\n if sortby.upper() == 'STARTTIME':\n t['Starttime', 'Tablename', 'JobID', 'Phase'].pprint()\n else:\n t.pprint()\n\n return checkalljobs", "def _matchPart(self, part):\r\n return [{**{key.name:p[key.name] for key in self.groups},\r\n **({#Call recursively on nested subpattern\r\n self.name:self.nestedPattern._matchPart(\r\n #and match\r\n p[0])}\r\n #only if subpattern exists\r\n if self.nestedPattern is not None else {})}\r\n for p in re.finditer(self.regex, part)\r\n #discard any record in ignored\r\n if not any([p[key.name] in self.ignored[key]\r\n for key in self.ignored])]", "def matchPattern(category):\n settings = settingsLoader()\n categoryPattern = (settings['categoriesDictSettings']\n [category]\n ['matches']\n ['matchExpression'])\n logging.debug(\"SORT: matchPattern: using %s\" % categoryPattern)\n for EachPattern in categoryPattern:\n logging.debug(\"SORT: matchPattern: searching for %s\" % EachPattern)\n for EachFile in listOfFiles:\n logging.debug(\"SORT: matchPattern: searching for %s in %s\" %\n (EachPattern, EachFile))\n if fnmatch.fnmatchcase(EachFile, EachPattern):\n return True\n return False", "def find_all_by_name ( self, name, **kw ):\n return self.find_all (\n lambda s, n: s.name == n, c_args=( name, ), **kw\n )", "def batch_mover(pattern, directory=None):\n if directory is None:\n directory = Path().cwd()\n\n for i in os.scandir(directory):\n if file_check(pattern, i.name):\n pass\n # shutil.move(i.name, yeah we gotta change a lot here", "def list_match(names: List[str], expression: str) -> List[str]:\n exprs_unix_shell_style = parse_exp2unix_shell_style(expression)\n filter_results = []\n for expr in exprs_unix_shell_style:\n filter_results.extend(fnmatch.filter(names, expr))\n return list(set(filter_results))", "def search(pattern='*',sub='',vol=default,mirror=False):\n base = path(sub=sub,vol=vol,mirror=mirror)\n return sorted([f.relative_to(base) for f in base.glob(pattern)])", "def filter_resources(self, pattern):\n if isinstance(pattern, basestring):\n is_match = lambda x: pattern == x\n elif isinstance(pattern, _pattern_type):\n is_match = lambda x: pattern.match(x)\n elif hasattr(pattern, '__call__'):\n is_match = pattern\n else:\n raise TypeError, 'pattern must be one of str,re.compile,callable'\n return filter(lambda x: is_match(x.request_url), self.http_resources)[:]", "async def find_pattern_in_tags(pattern: str, tags: ResultSet) -> list[str]:\n matches: list[str] = []\n\n for tag in tags:\n url: str = str(tag.get('href'))\n matches.append(url) if re.search(pattern=pattern, string=url) else None\n\n return matches", "def lst_and_pattern (filer_lst, pattern):\r\n new_filter_lst=[]\r\n for word in filer_lst:\r\n if word_and_pattern(word,pattern):\r\n new_filter_lst.append(word)\r\n return new_filter_lst", "def search_task(var,username):\n cursor = conn.cursor()\n cursor.execute(\"SELECT category,task_name, deadline from tasks where (task_name=%s OR category=%s OR deadline=%s) AND username=%s;\", (var,var,var,username))\n rows = cursor.fetchall()\n return rows", "def egrep(self, pattern):\n pattern = re.compile(pattern)\n result = []\n for line in self.contents:\n if pattern.search(line):\n result.append(line)\n if result:\n return result\n return False", "def find_item_by_name(list_, namegetter, name):\n matching_items = [i for i in list_ if namegetter(i) == name]\n if len(matching_items) == 0:\n prog = re.compile(re.escape(name) + '$', re.IGNORECASE)\n matching_items = [i for i in list_ if prog.match(namegetter(i))]\n if len(matching_items) == 0:\n prog = re.compile(re.escape(name))\n matching_items = [i for i in list_ if prog.match(namegetter(i))]\n if len(matching_items) == 0:\n prog = re.compile(re.escape(name), re.IGNORECASE)\n matching_items = [i for i in list_ if prog.match(namegetter(i))]\n if len(matching_items) == 0:\n prog = re.compile(re.escape(name))\n matching_items = [i for i in list_ if prog.search(namegetter(i))]\n if len(matching_items) == 0:\n prog = re.compile(re.escape(name), re.IGNORECASE)\n matching_items = [i for i in list_ if prog.search(namegetter(i))]\n return matching_items", "def search(wiki, pattern):\n wiki.search_tags(pattern)", "def glob(patterns: list[str]) -> Table:\n for val in _ensure_list(patterns):\n fol, _, pat = val.partition(\"/*\")\n folder = Path(fol)\n for file in folder.glob(\"*\" + pat):\n yield {\"file\": str(file)}", "def grep(pattern, arr):\n\n assert isinstance(pattern, basestring)\n assert isinstance(arr, list)\n\n ret = []\n\n for el in arr:\n if re.search(pattern, el):\n ret.append(el)\n\n return ret", "def multifilter(names, patterns):\n for name in names:\n if isinstance(name, collections.Mapping):\n for key in name.iterkeys():\n for pattern in patterns:\n if fnmatch.fnmatch(key, pattern):\n yield key\n break\n else:\n for pattern in patterns:\n if fnmatch.fnmatch(name, pattern):\n yield name\n break", "def compile(self, name, pattern):\n try:\n return self.get_pattern(name)\n except KeyError:\n return self.store_pattern(name, re.compile(pattern))", "def topic_pattern_match(pattern):\n client = AdminClient({\"bootstrap.servers\": \"PLAINTEXT://localhost:9092\"})\n topic_metadata = client.list_topics()\n topics = topic_metadata.topics\n filtered_topics = {key: value for key, value in topics.items() if contains_substring(key, pattern)}\n return len(filtered_topics) > 0", "def globfiles(path, pattern):\n return Path(path).glob(pattern)", "def __selectJobsHandle(self, value = None):\n\n if not value:\n value = str(self._selectJobsEditBox.text())\n\n if value:\n self.__monitorCue.clearSelection()\n\n # Allow simple substring searching as well as full regex\n if not re.search('[^A-Za-z0-9_.|-]', value):\n value = \".*%s.*\" % value.replace(\"|\", \".*|.*\")\n\n items = self.__monitorCue.findItems(value,\n QtCore.Qt.MatchRegExp |\n QtCore.Qt.MatchWrap |\n QtCore.Qt.MatchRecursive,\n 0)\n\n if items:\n # Select and show all found items\n for item in items:\n item.setSelected(True)\n if not item.isExpanded():\n parent = item.parent()\n while parent:\n parent.setExpanded(True)\n parent = parent.parent()\n\n # Scroll to the first item\n self.__monitorCue.scrollToItem(items[0],\n QtWidgets.QAbstractItemView.PositionAtTop)", "def glob_patterns(separator=os.path.sep, **kwargs):\n terms = [kwargs.pop(field, '*')\n for field in NormalizedSceneId.tuple_type._fields]\n assert not kwargs, 'Unrecognized field names: {}'.format(kwargs)\n\n # terms which are not str are assumed to contain choices (list of str)\n choice_inds = [i for i, val in enumerate(terms) if not isinstance(val, str)]\n val_lists = [terms[i][:] for i in choice_inds]\n\n patterns = []\n for values in it.product(*val_lists):\n for i, ind in enumerate(choice_inds):\n terms[ind] = values[i]\n patterns.append(separator.join(terms))\n return patterns", "def pattern_search(pattern, dataset, column):\n # Filter\n dataset = dataset[dataset[column].str.contains(pattern, regex=True)]\n # Reset index\n dataset = dataset.reset_index(drop=True)\n # Return\n return dataset", "def search_by_string(self):\n print(\"*** String Search ***\\n\")\n print(\"Enter a search string.\\n\")\n print(\"- NAME and NOTE will be searched for all tasks -\")\n print(\"- Searching IS case-sensitive, but partial matches will be returned -\\n\")\n while True:\n try:\n search_string = input(\">>> \")\n results = self.regex_entry_search(search_string)\n except re.error:\n print(\"Couldn't parse search query. Please try again.\")\n else:\n clear_screen()\n print(f\"Found {len(results)} matches for string \\\"{search_string}\\\"...\\n\")\n self.print_selected_entries(results)\n break", "def search(pattern, string):\n result = []\n if re.search(pattern, string):\n result.append(string)\n return result", "def find(self, name=None, pattern=None, extension=None, **kwargs):\n elements = []\n context = getContext()\n for path in self.findPaths(name=name, pattern=pattern, extension=extension):\n if os.path.exists(path):\n e = elementFromPath(path, context=context, name=name, **kwargs)\n if e is not None:\n elements.append(e)\n return elements", "def find_all_by_name_begin ( self, prefix, **kw ):\n return self.find_all (\n lambda s, pre: s.name.startswith ( pre ), c_args=( prefix, ), **kw\n )", "def gen_grep(pattern, lines):\n pat = re.compile(pattern)\n for line in lines:\n if pat.search(line):\n yield line", "def matching_programs(pattern):\n programs = get_programs()\n search_results = []\n\n # For every program, check if it matches by title or by filename.\n for program, data in programs.items():\n title_result = fuzzy_search(pattern, data['Name'])\n filename_result = fuzzy_search(pattern, program)\n title_match = True\n for result in title_result, filename_result:\n if result in (WHOLE_PATTERN_FIRST,\n WHOLE_PATTERN_IN_STRING,\n PATTERN_IN_STRING):\n search_results.append((program, data, title_match, result))\n break\n title_match = False\n\n # Sort the programs by how well they matched and yield them.\n sorted_ = sorted(search_results, key=lambda r: r[3], reverse=True)\n for i, item in enumerate(sorted_):\n program, data, title_match, _ = item\n yield (program, data, title_match)", "def selectWork(self, args, pooltable = 'tp_threadpool'):\n # this query takes place in a locked section so\n # we do not have to worry about multiple slaves\n # getting the same work.\n result = ''\n if pooltable in ['tp_threadpool', 'tp_threadpool_buffer_in', \\\n 'tp_threadpool_buffer_out']:\n sqlStr = \"\"\"\nSELECT min(id) FROM %s WHERE component = :component AND\nthread_pool_id = :thread_pool_id AND state='queued'\n \"\"\" % (pooltable)\n result = self.execute(sqlStr, args)\n else:\n sqlStr = \"\"\"\nSELECT min(id) FROM %s WHERE state='queued'\n \"\"\" % (pooltable)\n result = self.execute(sqlStr, {})\n\n return self.formatOne(result)", "def _search_multiprocessing(self):\n pool = multiprocessing.Pool(self._main_args_.n_jobs)\n _cand_list = pool.map(self._search, self._main_args_._n_process_range)\n\n return _cand_list", "def get_jobs(self, expression):\n return list(parse_job_list(expression, context=self.cc))", "def name_search(self, name, args=None, operator='ilike', limit=100):\n args = args or []\n if name and operator in ('=', 'ilike', '=ilike', 'like', '=like'):\n tickets = []\n if name.isdigit():\n number = int(name)\n tickets = self.search([('number', '=', number)] + args,\n limit=limit)\n else:\n tickets = self.search([('name', operator, name)] + args,\n limit=limit)\n if len(tickets) > 0:\n return tickets.name_get()\n return super(Ticket, self.browse()).name_search()", "def glob(pattern: str) -> List[str]:\n\n path = clean_path(pattern)\n result = Stat._cache.get(path)\n\n if isinstance(result, BaseException):\n return []\n\n if result is None:\n paths = glob_files(pattern, recursive=True)\n if paths != [pattern]:\n return [clean_path(path) for path in paths]\n result = Stat._result(pattern, throw=False)\n assert not isinstance(result, BaseException)\n\n return [pattern]", "def get_jobs(self, label_selector, namespace):\n return self.batch_client.list_namespaced_job(namespace, label_selector=label_selector, watch=False)", "def search_jobs(self, bill_id: int = 0, limit: int = 0) -> List[Job]:\n res = []\n query = QSqlQuery()\n q = \"select id, hours, price, job from jobs\"\n if bill_id > 0:\n q += \" where b_id=?\"\n q += \" order by id desc\"\n if limit > 0:\n q += \" limit ?\"\n query.prepare(q)\n if bill_id > 0:\n query.addBindValue(bill_id)\n if limit > 0:\n query.addBindValue(limit)\n query.exec_()\n while query.next():\n res.append(_extract_job(query))\n return res", "def _scan_hosts(self):\n results = []\n for item in glob.glob(self._pattern):\n results.append(item)\n return results", "def glob_fmt(pattern: str, *templates: Strings) -> List[str]:\n results: List[str] = []\n for wildcards in glob_extract(pattern):\n for template in each_string(*templates):\n results.append(copy_annotations(template, template.format(**wildcards)))\n return results", "def GetInputFilenameGlob(pattern):\n if not indir:\n return glob.glob(fname)\n files = []\n for dirname in indir:\n pathname = os.path.join(dirname, pattern)\n files += glob.glob(pathname)\n return sorted(files)", "def search_term():\n search = input(\"Enter term or string: \")\n entries = select_entries()\n entries = entries.where(\n (Entry.task_name.contains(search)) |\n (Entry.notes.contains(search)))\n view_entries(entries)\n return entries", "def search(self, text, location=\"\"):\n return [\n obj.name[len(location) + 1 : -3] for obj in self._file_model.search(text) if obj.name.startswith(location)\n ]", "def findMatchingNames(regname, map):\n list = []\n regname += \"$\"\n\n # Find the existing items that match this string\n\n for name in map:\n regexp = re.compile(regname).match(name)\n if regexp:\n list.append(regexp)\n\n return list", "def search_service(self, name_filter):\n rs=search_service(name_filter)\n for el in rs:\n print(el)", "def add_command_wildcard(self, pattern):\n self._command_wildcards.append(pattern)", "def scan(self, pat):\n re_pat = re.compile(pat)\n for infilename in self.file_names:\n infile = open(infilename, 'r')\n for line in infile:\n line = line.rstrip()\n mo = re_pat.search(line)\n if mo is not None:\n print '%s:%s' % (infilename, line, )", "def regex_entry_search(self, expression):\n return [entry for entry in self.entries \n if re.search(expression, entry.name)\n or re.search(expression, entry.note)]" ]
[ "0.6330027", "0.57241905", "0.5717007", "0.5662955", "0.5661944", "0.5415436", "0.5401124", "0.53878415", "0.5367021", "0.5349991", "0.53413725", "0.53162223", "0.5308199", "0.5304073", "0.5292824", "0.5277535", "0.5271562", "0.52473783", "0.52251303", "0.5207287", "0.5199997", "0.5077761", "0.5067722", "0.50676364", "0.50552154", "0.50485784", "0.5037991", "0.50257653", "0.5004253", "0.4994167", "0.4983213", "0.49431965", "0.4935373", "0.49168554", "0.4915093", "0.4903381", "0.49032587", "0.48827", "0.48825642", "0.48800036", "0.48742306", "0.4868486", "0.48318186", "0.48182398", "0.48069555", "0.48048678", "0.4789572", "0.47791138", "0.47741684", "0.47697723", "0.4768891", "0.47530323", "0.47377506", "0.4723521", "0.47172984", "0.47077087", "0.46965033", "0.4693903", "0.4689358", "0.4686365", "0.46814013", "0.46749955", "0.4671631", "0.46704656", "0.4653824", "0.4638475", "0.46370727", "0.46370715", "0.46254858", "0.4623866", "0.46186167", "0.46182013", "0.46122745", "0.4611688", "0.46075976", "0.46074128", "0.46067524", "0.46059063", "0.46018723", "0.46002653", "0.4590766", "0.459063", "0.45834783", "0.4576156", "0.45694286", "0.45681223", "0.456002", "0.4559936", "0.4559685", "0.45525748", "0.45454934", "0.45423457", "0.45416602", "0.45369014", "0.45295393", "0.45238784", "0.45191386", "0.4514331", "0.45118764", "0.45086524" ]
0.7555124
0
Execute a batch. Examples
def run(self, dry_run: bool = False, verbose: bool = False, delete_scratch_on_exit: bool = True, **backend_kwargs: Any) -> Optional[_bc.Batch]: seen = set() ordered_jobs = [] def schedule_job(j): if j in seen: return seen.add(j) for p in j._dependencies: schedule_job(p) ordered_jobs.append(j) for j in self._jobs: schedule_job(j) assert len(seen) == len(self._jobs) job_index = {j: i for i, j in enumerate(ordered_jobs, start=1)} for j in ordered_jobs: i = job_index[j] j._job_id = i for d in j._dependencies: if job_index[d] >= i: raise BatchException("cycle detected in dependency graph") self._jobs = ordered_jobs run_result = self._backend._run(self, dry_run, verbose, delete_scratch_on_exit, **backend_kwargs) # pylint: disable=assignment-from-no-return if self._DEPRECATED_fs is not None: # best effort only because this is deprecated async_to_blocking(self._DEPRECATED_fs.close()) self._DEPRECATED_fs = None return run_result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run_batch(self, batch_x, batch_y):\n raise NotImplementedError()", "def ExecuteBatch(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def execute(self):\n return self._batch.execute()", "def BeginExecuteBatch(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def ExecuteBatch(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n raise NotImplementedError()", "def action(self):\n\n batch_name = super().find_batch(self.__arguments, '')\n butch = self.__batch_data.get_batch(batch_name)\n cmd = CMDfromArray(butch)\n cmd.run()", "def eval_batch(self, outputs, target):\n raise NotImplementedError", "def execute_batch(self, commands=()):\n if not hasattr(commands, \"__iter__\"):\n raise InvalidCommandValue(\"commands must be an iterable\")\n if self._execute_batch_fn is None:\n raise MissingFunctionDefinition(\"execute_batch method is not mapped\")\n return self._execute_batch_fn(commands)", "def run_batch(cpu, dtypes, itypes, nt, datafile, N):\n for ((dt, size), it) in itertools.product(dtypes, itypes):\n base = 10**(1/10)\n n = base \n m = 2000000\n while n*size < N:\n if nt == 1:\n cmd = 'numactl -C {} ./main {} {} {} {} >> {}'\n cmd = cmd.format(cpu, dt, it, int(n), m, datafile)\n else:\n cmd = './main-mt {} {} {} {} {} >> {}'\n cmd = cmd.format(dt, it, int(n), m, nt, datafile)\n print cmd\n if os.system(cmd) != 0:\n errmsg = \"Error while executing '{}': Aborting!\\n\".format(cmd)\n sys.stderr.write(errmsg)\n sys.exit(-1)\n while int(n*base) == int(n):\n n *= base\n n *= base", "def test_batch(self):\n pass", "def executemany(self, operation, seq_of_parameters):\n self._check_closed()\n\n p_statement = self._statement_cache.get_prepared_statement(operation)\n self.session.execute_batch_prepared_statement(p_statement, seq_of_parameters)", "def run(self, batch):\n response = self.post(batch)\n log.info(\"< Discarding batch response\")\n response.close()", "def batch_execute(self, conn):\n def batches(data, batch_size) -> list:\n \"\"\"Return batches of length `batch_size` from any object that\n supports iteration without knowing length.\"\"\"\n rv = []\n for idx, line in enumerate(data):\n if idx != 0 and idx % batch_size == 0:\n yield rv\n rv = []\n rv.append(line)\n yield rv\n\n columns = ColumnCollection(self.columns)\n if self.header:\n self.columns = [columns.get(h) for h in next(self.data)]\n columns = ColumnCollection(self.columns)\n\n total = 0\n query = BulkInsertQuery(self.table, columns)\n for batch in batches(self.data, self.batch_size):\n total += query.execute(conn, batch) or 0\n yield total", "def batch(self):\n return self._client.batch()", "def ExecuteBatch(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def ExecuteBatch(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def execute_batch(self, authorization: str, response: dict):\n\n batch_id = str(response['data']['executeBatch']['batch']['id'])\n container_name = f'mobydq-batch-{batch_id}'\n client = docker.from_env()\n client.containers.run(\n name=container_name,\n image='mobydq-scripts',\n network='mobydq_network',\n command=['python', 'run.py', authorization, 'execute_batch', batch_id],\n remove=True,\n detach=True\n )\n\n # Return original response as container is executed in background\n return response", "def ExecuteBatch(self, batch_feed, url=None, spreadsheet_key=None, \n worksheet_id=None,\n converter=gdata.spreadsheet.SpreadsheetsCellsFeedFromString):\n\n if url is None:\n url = self._GenerateCellsBatchUrl(spreadsheet_key, worksheet_id)\n return self.Post(batch_feed, url, converter=converter)", "def execute(targets, lines):", "def _run_batch(self, opts, operation, placeholder, feed,\n placeholder2=None, feed2=None):\n assert len(feed.shape) > 0, 'Empry feed.'\n num_points = feed.shape[0]\n batch_size = opts['tf_run_batch_size']\n batches_num = int(np.ceil((num_points + 0.) / batch_size))\n result = []\n for idx in xrange(batches_num):\n if idx == batches_num - 1:\n if feed2 is None:\n res = self._session.run(\n operation,\n feed_dict={placeholder: feed[idx * batch_size:]})\n else:\n res = self._session.run(\n operation,\n feed_dict={placeholder: feed[idx * batch_size:],\n placeholder2: feed2})\n else:\n if feed2 is None:\n res = self._session.run(\n operation,\n feed_dict={placeholder: feed[idx * batch_size:\n (idx + 1) * batch_size]})\n else:\n res = self._session.run(\n operation,\n feed_dict={placeholder: feed[idx * batch_size:\n (idx + 1) * batch_size],\n placeholder2: feed2})\n\n if len(res.shape) == 1:\n # convert (n,) vector to (n,1) array\n res = np.reshape(res, [-1, 1])\n result.append(res)\n result = np.vstack(result)\n assert len(result) == num_points\n return result", "def BeginExecuteBatch(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n raise NotImplementedError()", "def post(self, batch):\n num_jobs = len(batch)\n plural = \"\" if num_jobs == 1 else \"s\"\n log.info(\"> Sending batch request with %s job%s\", num_jobs, plural)\n data = []\n for i, job in enumerate(batch):\n if job.finished:\n raise Finished(job)\n else:\n job.finished = True\n log.info(\"> {%s} %s\", i, job)\n data.append(dict(job, id=i))\n response = self.resource.post(data)\n log.info(\"< Received batch response for %s job%s\", num_jobs, plural)\n return response", "def issueBatchJob(self, command, memory, cores, disk):\n raise NotImplementedError('Abstract method: issueBatchJob')", "def simulate_batch():\n this_run = op_util.current_run()\n util.ensure_dir(this_run.guild_path(\"proto\"))", "def execute(self, targets):", "def do_bulk(self, args):\n pass", "def test_batch_execute_parallel(mock_run_batch):\n mock_run_batch.return_value = TASK_BATCH\n dev = _aws_device(wires=4, foo=\"bar\", parallel=True)\n assert dev.parallel is True\n\n with QuantumTape() as circuit:\n qml.Hadamard(wires=0)\n qml.CNOT(wires=[0, 1])\n qml.probs(wires=[0])\n qml.expval(qml.PauliX(1))\n qml.var(qml.PauliY(2))\n qml.sample(qml.PauliZ(3))\n\n circuits = [circuit, circuit]\n batch_results = dev.batch_execute(circuits)\n for results in batch_results:\n assert np.allclose(\n results[0], RESULT.get_value_by_result_type(result_types.Probability(target=[0]))\n )\n assert np.allclose(\n results[1],\n RESULT.get_value_by_result_type(\n result_types.Expectation(observable=Observable.X(), target=1)\n ),\n )\n assert np.allclose(\n results[2],\n RESULT.get_value_by_result_type(\n result_types.Variance(observable=Observable.Y(), target=2)\n ),\n )\n assert np.allclose(\n results[3],\n RESULT.get_value_by_result_type(\n result_types.Sample(observable=Observable.Z(), target=3)\n ),\n )\n\n mock_run_batch.assert_called_with(\n [CIRCUIT, CIRCUIT],\n s3_destination_folder=(\"foo\", \"bar\"),\n shots=SHOTS,\n max_parallel=None,\n max_connections=AwsQuantumTaskBatch.MAX_CONNECTIONS_DEFAULT,\n poll_timeout_seconds=AwsQuantumTask.DEFAULT_RESULTS_POLL_TIMEOUT,\n poll_interval_seconds=AwsQuantumTask.DEFAULT_RESULTS_POLL_INTERVAL,\n foo=\"bar\",\n )", "def execute(self, targets):\r\n raise TaskError('execute() not implemented')", "def run_testing_batch(self, session, batch):\n feed_dict = self.batch_to_feed(batch)\n feed_dict[self.use_dropout_placeholder] = 0.0\n fetches = [self.loss, self.predictions]\n loss, probabilities = session.run(fetches, feed_dict=feed_dict)\n return loss, probabilities", "def execute():", "def execute(*args):", "def run_bulk_process(\n row_handler, rows, progress_label=None, stats=None, raise_global_error=True\n):\n processor = _create_bulk_processor(\n row_handler,\n rows,\n progress_label,\n stats=stats,\n raise_global_error=raise_global_error,\n )\n return processor.run()", "def applyBatch(self, authority, operations):\n pass", "def batch(self, batch):\n\n self._batch = batch", "def evaluate_batch(self, pipelines):", "def _batching_call(self, *args, **kw):\n b_start = kw.pop('b_start', None)\n b_size = kw.pop('b_size', None)\n results = list(self._original_call(*args, **kw))\n\n if b_start is None:\n return results\n\n if b_size is None:\n b_size = len(results)\n\n return results[b_start:b_start+b_size]", "def execute(\n cls, datasets: list[DatasetBase], runner: Callable, nprocs: int | None = None\n ) -> Self:\n if nprocs is None:\n nprocs = max(os.cpu_count() - 1, 1)\n\n results: list[ExecutionResponse] = []\n if nprocs == 1:\n # run without a ProcessPoolExecutor; useful for debugging\n for dataset in tqdm(datasets, desc=\"Executing...\"):\n results.append(runner(dataset))\n else:\n # adapted from https://gist.github.com/alexeygrigorev/79c97c1e9dd854562df9bbeea76fc5de\n with ProcessPoolExecutor(max_workers=nprocs) as executor:\n with tqdm(total=len(datasets), desc=\"Executing...\") as progress:\n futures = []\n for dataset in datasets:\n future = executor.submit(runner, dataset)\n future.add_done_callback(lambda p: progress.update())\n futures.append(future)\n\n for future in futures:\n results.append(future.result())\n\n batch = cls()\n for result in tqdm(results, desc=\"Building batch...\"):\n if result.success:\n if isinstance(result.content, list):\n for item in result.content:\n batch.sessions.append(BmdsSession.from_serialized(item))\n else:\n batch.sessions.append(BmdsSession.from_serialized(result.content))\n else:\n batch.errors.append(result.content)\n\n return batch", "def _send(self, batch):\n return self.agent.emitBatch(batch)", "def execute(self, operation, args=None, stream=None):\n\t\toperations = (op.strip() for op in operation.split(';') if len(op.strip()) > 0)\n\t\tfor op in operations:\n\t\t\tself._cursor.execute(op, args, stream)", "def flush_batch(self, batch: Sequence[TResult]) -> None:\n pass", "def batch(ctx, config, job_number, total_jobs, executor, force_overwrite):\n # Imports inside CLI for speed\n from yatsm.utils import distribute_jobs\n\n # TODO: remove when not debugging\n import dask\n dask.set_options(get=dask.async.get_sync)\n\n\n # TODO: Better define how authoritative reader when using multiple datasets\n # and choosing block shape (in config?)\n # TODO: Allow user to specify block shape in config (?)\n block_windows = config.primary_reader.block_windows\n job_idx = distribute_jobs(job_number, total_jobs, len(block_windows))\n\n logger.debug('Working on {} of {} block windows'\n .format(len(job_idx), len(block_windows)))\n\n block_windows = [block_windows[i] for i in job_idx]\n\n force_overwrite = (force_overwrite or\n config['pipeline'].get('overwrite', False))\n\n # TODO: iterate over block_windows assigned to ``job_id``\n futures = {}\n for idx, window in block_windows:\n future = executor.submit(batch_block,\n config=config,\n readers=config.readers,\n window=window,\n overwrite=force_overwrite)\n futures[future] = window\n\n n_good, n_skip, n_fail = 0, 0, 0\n for future in executor.as_completed(futures):\n window = futures[future]\n try:\n result = future.result()\n if isinstance(result, str):\n logger.info(\"Wrote to: %s\" % result)\n n_good += 1\n else:\n n_skip += 1\n time.sleep(1)\n except KeyboardInterrupt:\n logger.critical('Interrupting and shutting down')\n executor.shutdown()\n raise click.Abort()\n except Exception:\n logger.exception(\"Exception for window: {}\".format(window))\n n_fail += 1\n raise # TODO: remove and log?\n\n logger.info('Complete: %s' % n_good)\n logger.info('Skipped: %s' % n_skip)\n logger.info('Failed: %s' % n_fail)", "def BeginExecuteBatch(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def BeginExecuteBatch(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def update_batch(self, *args, **kwargs):\n pass", "def RunBatchJob(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def batch(\n arguments: List[BatchArgument],\n *,\n uniform_mime_type: str,\n include_output: bool = True,\n drive_service: Optional[discovery.Resource] = None,\n) -> List[drive_api.ResourceID]:\n\n # callback will append resulting IDs in order\n result: List[drive_api.ResourceID] = []\n\n def batch_response(request_id, response, exception) -> None:\n nonlocal result\n result.append(response.get(\"id\"))\n\n requests = [\n request(\n name=argument.name,\n mime_type=uniform_mime_type,\n parent_folder_id=argument.parent_folder_id,\n drive_service=drive_service,\n )\n for argument in arguments\n ]\n kwargs = {\"requests\": requests, \"drive_service\": drive_service}\n if include_output:\n kwargs[\"callback\"] = batch_response\n drive_api.batch_command(**kwargs)\n return result", "def test_fixture_batch_run(tmp_sample_project):\n config_dir = tmp_sample_project\n output = subprocess.run([\"smif\", \"run\", \"-v\", \"-b\", \"-d\", config_dir,\n os.path.join(config_dir, \"batchfile\")],\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n print(output.stdout.decode(\"utf-8\"))\n print(output.stderr.decode(\"utf-8\"), file=sys.stderr)\n assert \"Running energy_water_cp_cr\" in str(output.stderr)\n assert \"Model run 'energy_water_cp_cr' complete\" in str(output.stdout)\n assert \"Running energy_central\" in str(output.stderr)\n assert \"Model run 'energy_central' complete\" in str(output.stdout)", "def executeAll(lines):", "def execute(self, args):", "def main():\n args = get_arguments()\n print(args.batch_size)", "def batch(self, batch, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.batch_with_http_info(batch, **kwargs)\n else:\n (data) = self.batch_with_http_info(batch, **kwargs)\n return data", "def batch_executor(creds, cmds):\n def handler(rid, resp, ex, responses):\n \"Callback invoked by Google API to handled message data.\"\n def ex_is_error(ex, code):\n \"Check if exception is error code 'code'.\"\n return (isinstance(ex, googleapiclient.errors.HttpError) and\n ex.resp.status == code)\n if ex is not None:\n if ex_is_error(ex, 404):\n # message could not be found this is probably a\n # deleted message, spam or draft message since these\n # are not included in the messages.get() query by\n # default.\n print(\"remote: could not find remote message: %s!\" % rid)\n return\n\n elif ex_is_error(ex, 400):\n # message id invalid, probably caused by stray files\n # in the mail repo\n print(\"remote: message id: %s is invalid! \" % rid)\n return\n\n elif ex_is_error(ex, 403) or ex_is_error(ex, 429):\n #import pdb; pdb.set_trace()\n raise Gmail.UserRateException(ex)\n elif ex_is_error(ex, 500):\n raise Gmail.GenericException(ex)\n else:\n raise Gmail.BatchException(ex)\n responses.append(resp)\n\n http = creds.authorize(Http(timeout=30))\n service = build('gmail', 'v1', http=http)\n batch = service.new_batch_http_request()\n responses = []\n for gid, cmd in cmds:\n batch.add(cmd, callback=lambda a, b, c: handler(a, b, c,\n responses),\n request_id=gid)\n batch.execute(http=http)\n return responses", "def make_batch_request(self, batch):\n args = {}\n args['access_token'] = self.access_token\n args['batch'] = json.dumps(batch)\n args = {k.encode('utf-8'): unicode(v).encode('utf-8')\n for k, v in args.items()}\n logger.info('Making a batched request with %s' % args)\n try:\n f = urllib2.urlopen(self.api_root, urllib.urlencode(args))\n data = json.load(f)\n # For debugging\n self.data = data\n for idx, val in enumerate(data):\n data[idx] = json.loads(val['body'])\n return data\n except urllib2.HTTPError as e:\n logger.info('%s' % e)\n return json.load(e)\n except urllib2.URLError as e:\n logger.warn('URLError: %s' % e.reason)", "def stream(self, batch):\n response = self.post(batch)\n try:\n for i, result_data in grouped(response):\n result = JobResult.hydrate(assembled(result_data), batch)\n log.info(\"< %s\", result)\n yield result\n finally:\n response.close()", "def execute(self, *args, **kwargs):", "def execute(self, *args, **kwargs):", "def is_batch():\n\n pass", "def process(self, data_batch: Any, data_samples: Sequence[dict]) -> None:", "def run_batch(self):\n\n print(\"Running experiment with batch_size {}...\".format(self.batch_size))\n\n errors = self.W.new_tensor([])\n energies = self.W.new_tensor([])\n\n x = (2 * self.W.new(self.N, self.batch_size).random_(2) - 1).float() # Initialize the x vector\n rands = self.W.new(self.t_max, self.batch_size).uniform_() # The random values which will be compared to the acceptance probabilities\n idxs = self.W.new(self.t_max, self.batch_size).random_(self.N).long() # The indices which will be flipped in x at each iteration\n\n energy, wx = utils.compute_energy_batch(x, self.W, self.Y) # Compute the initial value of the energy\n\n for iteration in range(self.t_max):\n self.beta_scheduler.step(energies) # Update the value of beta according to the cooling strategy\n\n x, energy, wx = self.chain.step_batch(x, self.W, self.Y, self.beta_scheduler.beta, energy, wx, idxs[iteration], rands[iteration])\n energies = torch.cat((energies, energy.unsqueeze(0)))\n\n e = utils.compute_reconstruction_error_batch(x, self.X) # Compute the current reconstruction error\n errors = torch.cat((errors, e.unsqueeze(0)))\n\n return errors, energies, x", "def execute(self, inputs=None, output=None, load_targets=False):\n if self == output:\n if os.path.exists(self._dump_dirname):\n shutil.rmtree(self._dump_dirname)\n if os.path.exists(self._target_filename):\n os.remove(self._target_filename)\n os.makedirs(self._dump_dirname)\n\n if inputs is None:\n inputs = []\n\n if not self.has_result():\n if self in inputs or (load_targets and self.target):\n logging.info('Loading\\n%s' % util.indent(str(self)))\n self.load()\n else:\n for i in self.inputs:\n i.execute(inputs=inputs, output=output,\n load_targets=load_targets)\n\n args, kwargs = self.map_inputs()\n logging.info('Running\\n%s' % util.indent(str(self)))\n self.set_result(self.run(*args, **kwargs))\n\n if self == output:\n logging.info('Dumping\\n%s' % util.indent(str(self)))\n self.dump()\n util.touch(self._target_filename)", "def my_run_batch_train(args, model, batch):\n input_ids, lm_labels, mc_labels = [x.to(args.device) for x in batch if isinstance(x, torch.Tensor)]\n\n lm_loss, lm_logits, mc_loss, mc_logits, *_ = model(\n input_ids=input_ids,\n lm_labels=lm_labels.view(-1, lm_labels.size(-1)),\n mc_labels=mc_labels)\n\n loss = mc_loss * args.mc_coefficient + lm_loss\n return loss, lm_logits, mc_logits, mc_labels", "def execute(self, args):\r\n pass", "def batch(self, batch_identifier, begin_operation):\n logger.debug('Starting transaction...')\n self.publish(\n batch_operation=BatchOperation(\n batch_identifier=batch_identifier,\n begin_operation=begin_operation,\n ),\n )\n\n def mutation(mutation_operation):\n return self.publish(\n batch_operation=BatchOperation(\n batch_identifier=batch_identifier,\n mutation_operation=mutation_operation,\n ),\n )\n\n try:\n yield mutation\n except Exception:\n logger.debug('Attempting to publish rollback of in progress transaction...')\n self.publish(\n batch_operation=BatchOperation(\n batch_identifier=batch_identifier,\n rollback_operation=RollbackOperation(),\n ),\n )\n logger.debug('Published rollback.')\n raise\n else:\n logger.debug('Attempting to publish commit of in progress transaction...')\n self.publish(\n batch_operation=BatchOperation(\n batch_identifier=batch_identifier,\n commit_operation=CommitOperation(),\n ),\n )\n logger.debug('Published commit.')", "def run_multiple(self, sql, it):\n self.database.executemany(sql, it)", "def evaluate_batch(self, batch: TorchData, model: nn.Module) -> Dict[str, Any]:\n pass", "def test_identity_multiple_batched(self, dev):\n qml.enable_tape()\n dev = qml.device(dev, wires=2)\n\n with qml.tape.QuantumTape() as tape1:\n qml.expval(qml.Identity(wires=[0]))\n qml.expval(qml.Identity(wires=[1]))\n\n res = dev.batch_execute([tape1])\n assert len(res) == 1\n assert np.allclose(res[0], np.array([1, 1]))\n qml.disable_tape()", "def executemany(self, operation, seq_of_parameters):\r\n if self._closed:\r\n raise Error('The cursor has been closed.')\r\n if self.connection._closed:\r\n raise Error('The connection to the database has been closed.')\r\n for parameters in seq_of_parameters:\r\n self.execute(operation, parameters)", "async def executemany(self, operation, seq_of_parameters):\n\n count = 0\n for parameters in seq_of_parameters:\n count += await self.execute(operation, parameters)\n self.rowcount = count\n return count", "def process_batch_group(batch_name_list, instance_to_create):\n try:\n proc_inst = instance_to_create\n proc_inst.process(batch_name_list)\n except Exception, ex:\n print traceback.format_exc()\n raise ex", "def batch_process(self, message_list, action, userId='me'):\n\n list_of_ids = []\n\n for key, value in message_list.items():\n list_of_ids.append(value)\n\n chunks = [list_of_ids[x:x+1000] for x in range(0, len(list_of_ids), 1000)]\n\n for page in range(0, len(chunks)):\n if action.lower() == 'archive':\n resource = getattr(self.connection.users().messages(), 'batchModify')\n body = { \n \"ids\": chunks[page],\n \"removeLabelIds\": [\"INBOX\"],\n }\n else:\n resource = getattr(self.connection.users().messages(), 'batchDelete')\n body = { \n \"ids\": chunks[page],\n }\n\n dynamic_request = resource(userId=userId, body=body)\n response = dynamic_request.execute()\n print(f'[√] Bulk Action: SUCCESS {len(chunks[page])} Messages have been {action}d! - {page}')\n print(f'[√] Bulk Action: SUCCESS Total Number of Processed Messages: {len(list_of_ids)}')\n return True", "def command():\n return BatchCommand(\n config=BatchConfig(\n stack_name='my-unique-stack-name',\n code_dir=os.path.abspath(os.path.join(\n __file__, '../lambda'\n )),\n description='Demo batch processing',\n argparse_callback=argparse_callback,\n env_callback=env_callback,\n webpack=True\n )\n )", "def run(self, *args, **kwargs):\n if kwargs.pop('lazy', False):\n self._lazy_run = args, kwargs\n else:\n if len(args) == 0 and len(kwargs) == 0:\n args, kwargs = self._lazy_run\n for _ in self.gen_batch(*args, **kwargs):\n pass\n return self", "def test_batch(self):\n batch = batch_test_utils.create_batch()\n self.job1.batch_id = batch.id\n self.job1.save()\n\n url = '/%s/jobs/?batch_id=%d' % (self.api, batch.id)\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 1)\n self.assertEqual(result['results'][0]['id'], self.job1.id)", "def execute(self, *args, **kwargs):\n pass", "def batch(self, sql):\n return _Batch(self.conn, sql)", "def execute(self, context, obj):\n \n # if there are results, then processthem, otherwise stop the machine\n if obj.get(CONTINUATION_RESULTS_KEY):\n \n entities, data = obj[CONTINUATION_RESULTS_KEY], []\n \n # process each entity in turn\n for entity in entities:\n \n # extracting entity specific data\n csvData = self.generateCsvData(context, obj, entity)\n if csvData:\n data.append(csvData)\n \n if data:\n context[DATA_PARAM] = data\n \n # and then extracting data from a batch of entities (ie. sums, averages etc.)\n aggData = self.generateAggregatedCsvData(context, obj, entities)\n if aggData:\n context[AGG_DATA_PARAM] = aggData\n \n # this stores the total number of entities processed by this Task\n context[NUM_ENTITIES_PARAM] = len(entities)\n \n # keep going in all cases, even when the continuation is done\n return OK_EVENT", "def execute():\n pass", "def run(self):\n self.assign_inputs()\n self.execute()\n self.collect_outputs()", "def batch_execute(global_step, thunks, batch_size, name=None):\n\n def true_fn(thunk):\n \"\"\"Ensures thunk is executed and returns an Op (not a Tensor).\"\"\"\n\n def result():\n with tf.control_dependencies([thunk()]):\n return tf.no_op()\n\n return result\n\n def false_fn(_):\n \"\"\"Executes a no-op.\"\"\"\n\n def result():\n return tf.no_op()\n\n return result\n\n with tf.name_scope(name, \"batch_execute\"):\n true_fns = [true_fn(thunk) for thunk in thunks]\n false_fns = [false_fn(thunk) for thunk in thunks]\n num_thunks = len(thunks)\n conditions = [\n tf.less(\n tf.mod(batch_size - 1 + global_step * batch_size - j, num_thunks),\n batch_size) for j in range(num_thunks)\n ]\n result = [\n tf.cond(condition, true_fn, false_fn)\n for (condition, true_fn,\n false_fn) in zip(conditions, true_fns, false_fns)\n ]\n return result", "def test_batch_exec(self, keep, tmpdir, monkeypatch, test_batch_result):\n qml.enable_tape()\n\n dev = qml.device(\"orquestra.forest\", wires=3, keep_files=keep)\n\n with qml.tape.QuantumTape() as tape1:\n qml.expval(qml.PauliZ(wires=[0]))\n\n with qml.tape.QuantumTape() as tape2:\n qml.RX(0.432, wires=0)\n qml.RY(0.543, wires=0)\n qml.expval(qml.PauliZ(wires=[0]))\n\n with qml.tape.QuantumTape() as tape3:\n qml.RX(0.432, wires=0)\n qml.expval(qml.PauliZ(wires=[0]))\n\n circuits = [tape1, tape2, tape3]\n\n test_uuid = \"1234\"\n assert not os.path.exists(tmpdir.join(f\"expval-{test_uuid}-0.yaml\"))\n\n with monkeypatch.context() as m:\n m.setattr(pennylane_orquestra.cli_actions, \"user_data_dir\", lambda *args: tmpdir)\n\n # Disable submitting to the Orquestra platform by mocking Popen\n m.setattr(subprocess, \"Popen\", lambda *args, **kwargs: MockPopen())\n m.setattr(\n pennylane_orquestra.orquestra_device,\n \"loop_until_finished\",\n lambda *args, **kwargs: test_batch_result,\n )\n\n # Disable random uuid generation\n m.setattr(uuid, \"uuid4\", lambda *args: test_uuid)\n\n res = dev.batch_execute(circuits)\n\n # Correct order of results is expected\n assert np.allclose(res[0], test_batch_res0)\n assert np.allclose(res[1], test_batch_res1)\n assert np.allclose(res[2], test_batch_res2)\n file_kept = os.path.exists(tmpdir.join(f\"expval-{test_uuid}-0.yaml\"))\n\n assert file_kept if keep else not file_kept\n\n qml.disable_tape()", "def index_batch(self,batch):\n pass", "def ExecuteBatchQueue(self):\n\t\tself.client.ExecuteBatch(self.batch_queue, 'https://www.google.com/m8/feeds/contacts/default/full/batch')\n\t\tself.ClearBatchQueue();", "def run(self, batch_size=20):\n logging.info('%s: Starting.'% (self.__class__.__name__))\n deferred.defer(self._continue, None, batch_size, _queue=self.QUEUE)", "def forward(self, batch):\n raise NotImplementedError", "def forward(self, batch):\n raise NotImplementedError", "def submit(self, batch):\n response = self.post(batch)\n try:\n results = []\n for result_data in response.content:\n result = JobResult.hydrate(result_data, batch)\n log.info(\"< %s\", result)\n results.append(result)\n return results\n except ValueError:\n # Here, we're looking to gracefully handle a Neo4j server bug\n # whereby a response is received with no content and\n # 'Content-Type: application/json'. Given that correct JSON\n # technically needs to contain {} at minimum, the JSON\n # parser fails with a ValueError.\n if response.content_length == 0:\n from sys import exc_info\n from traceback import extract_tb\n type_, value, traceback = exc_info()\n for filename, line_number, function_name, text in extract_tb(traceback):\n if \"json\" in filename and \"decode\" in function_name:\n return []\n raise\n finally:\n response.close()", "def test_execute(self):\n context = dict()\n cmd = pycell.python_cell(\n source='print(2+2)',\n validate=True\n )\n controller = FakeWorkflowController()\n self.backend.execute_async(\n task=TaskHandle(\n task_id='000',\n project_id=self.PROJECT_ID,\n controller=controller\n ),\n command=cmd,\n artifacts=context\n )\n time.sleep(3)\n self.assertEqual(controller.task_id, '000')\n self.assertEqual(controller.state, 'SUCCESS')\n self.assertEqual(controller.outputs.stdout[0].value, '4')", "def run(self):\n\n input_args = {}\n self._execute(input_args, self.args)", "def _dispatch_batches(self, base_url, endpoint, item_list, prep_args, dataset_id=None, dataset_version=None):\n pool = ThreadPool(processes=self.pool_size)\n batch = []\n\n # Decide which _prep function to use based on the endpoint\n if endpoint == 'import' or endpoint == 'import-events':\n prep_function = Mixpanel._prep_event_for_import\n elif endpoint == 'engage' or endpoint == 'import-people':\n prep_function = Mixpanel._prep_params_for_profile\n else:\n Mixpanel.LOGGER.warning(\n 'endpoint must be \"import\", \"engage\", \"import-events\" or \"import-people\", found: ' + str(endpoint))\n return\n\n if base_url == self.BETA_IMPORT_API:\n batch_size = 1000\n else:\n batch_size = 50\n\n for item in item_list:\n if prep_args is not None:\n # Insert the given item as the first argument to be passed to the _prep function determined above\n prep_args[0] = item\n params = prep_function(*prep_args)\n if params:\n batch.append(params)\n else:\n batch.append(item)\n\n if len(batch) == batch_size:\n # Add an asynchronous call to _send_batch to the thread pool\n pool.apply_async(self._send_batch, args=(base_url, endpoint, batch, dataset_id, dataset_version),\n callback=Mixpanel._response_handler_callback)\n batch = []\n\n # If there are fewer than batch_size updates left ensure one last call is made\n if len(batch):\n # Add an asynchronous call to _send_batch to the thread pool\n pool.apply_async(self._send_batch, args=(base_url, endpoint, batch, dataset_id, dataset_version),\n callback=Mixpanel._response_handler_callback)\n pool.close()\n pool.join()", "def node_execute_multiple(self, ipaddr, username, password, commands):\n for cmd in commands:\n rc, output, error = self.node_execute_command(ipaddr, username, password, cmd)\n if rc is False:\n print(\"error running: [%s] %s\" % (ipaddr, cmd))", "def test_command(self):\n output, _error = self.executor.command(['echo', 'hello']).batch()\n self.assertEqual(output, 'hello\\n')", "def handle(self, context: DurableEntityContext, batch: List[Dict[str, Any]]) -> str:\n response = EntityState(results=[], signals=[])\n for operation_data in batch:\n result: Any = None\n is_error: bool = False\n start_time: datetime = datetime.now()\n\n try:\n # populate context\n operation = operation_data[\"name\"]\n if operation is None:\n message = \"Durable Functions Internal Error:\"\\\n \"Entity operation was missing a name field\"\n raise InternalEntityException(message)\n context._operation = operation\n context._input = operation_data[\"input\"]\n self.fn(context)\n result = context._result\n\n except InternalEntityException as e:\n raise e\n\n except Exception as e:\n is_error = True\n result = str(e)\n\n duration: int = self._elapsed_milliseconds_since(start_time)\n operation_result = OperationResult(\n is_error=is_error,\n duration=duration,\n result=result\n )\n response.results.append(operation_result)\n\n response.state = context._state\n response.entity_exists = context._exists\n return response.to_json_string()", "def batch(_func):\n def batch_wrap(\n _lst, num_threads=25, suppress_err_msg=False, raise_exception=False\n ):\n def worker():\n while True:\n item = q.get()\n try:\n _func(*item)\n except Exception as err:\n if not suppress_err_msg:\n log.error('Error: {}'.format(err))\n if raise_exception:\n raise Exception(err)\n q.task_done()\n\n q = queue.Queue()\n\n for _i in range(num_threads):\n t = threading.Thread(target=worker)\n t.daemon = True\n t.start()\n\n for _item in _lst:\n if not isinstance(_item, tuple):\n q.put((_item,))\n else:\n q.put(_item)\n\n q.join() # Wait for all operations to complete\n\n return batch_wrap", "def test_batch(self):\n req = '''[{\"foo\": \"boo\"},\n {\"jsonrpc\": \"2.0\", \"method\": \"notify_hello\", \"params\": [7]},\n {\"jsonrpc\": \"2.0\", \"method\": \"subtract\", \"params\": [42,23], \"id\": \"2\"},\n {\"jsonrpc\": \"2.0\", \"method\": \"foo.get\", \"params\": {\"name\": \"myself\"}, \"id\": \"5\"}\n ]'''\n\n resp = '''[{\"jsonrpc\": \"2.0\", \"error\": {\"code\": -32600, \"message\": \"InvalidRequestError: Invalid members in request object\"}, \"id\": null},\n {\"jsonrpc\": \"2.0\", \"result\": 19, \"id\": \"2\"},\n {\"jsonrpc\": \"2.0\", \"id\": \"5\", \"error\": {\"message\": \"MethodNotFoundError: Method foo.get not found\", \"code\": -32601}}\n ]'''\n\n status = 200\n r_status, r_resp = self.exec_handler(req)\n self.assertEqual(r_status, status)\n self.assertEqual(simplejson.loads(r_resp), simplejson.loads(resp))", "def step_batch(self, x_batch, W, Y, beta, energy_batch, wx, idx_batch, rand_batch):\n raise NotImplementedError()", "def execute_many(self, sql, args=None):\r\n args = args or None\r\n with Executer(self) as cursor:\r\n rows = cursor.executemany(sql, args)\r\n return rows", "async def test_delete_batch_valid(database, valid_data):\n await test_valid_insert_batch(database,valid_data)\n database = await Database.connect_pool()\n N = 10\n batch_id = 1\n for idx in range(N):\n await database.delete_batch(batch_id=batch_id,user_id=str(idx))\n await database.close_pool()", "def execute(self, devices, command_bytes):", "def instantiate_batch(self, inputs):\n return inputs", "def run_all(operations=ops):\n for operation in operations:\n run(operation)", "def execute_jobs(job_statuses:list, verbose:bool=False):\n\tBaseModel._meta.database.close()\n\tBaseModel._meta.database = get_db()\n\tfor j in tqdm(\n\t\tjob_statuses\n\t\t, desc = \"🔮 Training Models 🔮\"\n\t\t, ncols = 100\n\t):\n\t\tif (j['result_id'] is None):\n\t\t\tJob.run(id=j['job_id'], verbose=verbose, repeat_index=j['repeat_index'])" ]
[ "0.7048897", "0.6716209", "0.66513216", "0.6633743", "0.65684175", "0.6428148", "0.6417398", "0.6358319", "0.63045424", "0.6275959", "0.6252536", "0.62219197", "0.6158494", "0.61560374", "0.61498094", "0.61498094", "0.60457814", "0.60279816", "0.6023647", "0.6015122", "0.6008182", "0.5992778", "0.59863687", "0.5983878", "0.59705126", "0.5965271", "0.5950781", "0.5926105", "0.59076166", "0.58861434", "0.5878019", "0.5876642", "0.5872827", "0.5866366", "0.5857038", "0.58348095", "0.58272547", "0.5816093", "0.580663", "0.58009684", "0.57988554", "0.5772918", "0.5772918", "0.57657325", "0.5746666", "0.5717928", "0.5715291", "0.57100576", "0.5696025", "0.56744397", "0.5671222", "0.56560564", "0.5643328", "0.56309485", "0.56214106", "0.56214106", "0.5578695", "0.55766076", "0.5572923", "0.5560586", "0.5550657", "0.55473423", "0.5539627", "0.5539368", "0.55388933", "0.55374557", "0.55333763", "0.5532937", "0.552617", "0.5526119", "0.5522", "0.5519673", "0.55074984", "0.54910105", "0.54854566", "0.5475327", "0.54716486", "0.5470199", "0.5458924", "0.5457773", "0.54546124", "0.5450167", "0.54433", "0.543534", "0.543534", "0.5424715", "0.54203635", "0.54187095", "0.54127383", "0.5405317", "0.53995603", "0.5385289", "0.5377451", "0.5375692", "0.5359658", "0.5355341", "0.53516275", "0.5351525", "0.5343895", "0.53424215", "0.5341714" ]
0.0
-1
Initializes querysets for keyword and headlinekeyword
def __init__(self): self.keyword_queryset = Keyword.objects.all() self.headlinekeyword_queryset = Headlinekeyword.objects.all() self.headline_queryset = Headline.objects.all()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def keyword_headlines(self):\r\n\t\td = {}\r\n\r\n\t\tfor q in self.keyword_queryset:\r\n\t\t\td[q.content] = self.headlinekeyword_queryset.filter(keywordid = q.id)\r\n\r\n\t\treturn d", "def get_queryset(self):\r\n return Keyword.objects.all()", "def setup_eager_loading(cls, queryset):\n queryset = queryset.prefetch_related('keywords_str')\n queryset = queryset.prefetch_related('tags_str')\n # queryset = queryset.prefetch_related('keywords')\n # queryset = queryset.prefetch_related('tags')\n return queryset", "def SetupKeywords(self):\n kwlist = u\" \".join(self._keywords)\n self.SetKeyWords(0, kwlist)", "def __init__(self, orKeywords=True, baseurl=None, version=\"1.0\"):\n if not baseurl: baseurl = RegistryService._STSCI_REGISTRY_BASEURL\n dalquery.DALQuery.__init__(self, baseurl, \"vaoreg\", version)\n self._kw = [] # list of individual keyword phrases\n self._preds = [] # list of SQL predicates\n self._svctype = None\n self._band = None\n self._orKw = orKeywords\n self._doSort = True\n self._dalonly = False", "def set_keywords(self):\n\n if len(self.get_keywords()) == 0 and len(self.get_files()) > 0:\n self.keywords = self.files[0].get_parent()[\"title\"].split(\" \")\n for keyword in self.keywords:\n if str(keyword) in str(self.text):\n self.keywords = []", "def init_by_keys(cls, **query):\n raise NotImplementedError()", "def fetchRelatedkeywords(self, keyword, meta_keyword):\n prefix = [\"how\", \"which\", \"why\", \"where\", \"who\", \"when\", \"are\", \"what\"]\n suffix = [\"\", \"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\", \"h\", \"i\", \"j\", \"k\", \"l\",\n \"m\", \"n\", \"o\", \"p\", \"q\", \"r\", \"s\", \"t\", \"u\", \"v\", \"w\" \"x\", \"y\", \"z\"]\n suffix_arr = list(map(lambda x: keyword+\" \"+x, suffix))\n prefix_arr = list(map(lambda x: x+\" \"+keyword, prefix))\n suffix_arr.extend(prefix_arr)\n # removes duplicates for a seed keyword\n duplicates = set()\n for word in suffix_arr:\n suggestion = self.fetchSuggestion(word, keyword, meta_keyword)\n if suggestion == False:\n return False\n self.api_rate_limit+=1\n for query in suggestion:\n if query['keyword'] not in duplicates:\n duplicates.add(query['keyword'])\n # allows same keywords with multiple keywords\n # self.results.append(query)\n if query['keyword'] not in self.already_fetched:\n # does not allow same keyword with multiple keywords\n # this line is temporary need to remove after fetching 10 categories\n self.results.append(query)\n self.queue.add(query['keyword']) \n self.keywords_count += len(self.results)", "def __prepare_query(self, query, stopwords=[], stemming_func=None):\n pass", "def get_queryset(self):\n\n # Get the keyword URL parameter value. Return empty string if the keyword is empty.\n # Filter the queryset based on the value of keyword and the queryset object's title.\n keyword = self.request.query_params.get('keyword', '')\n queryset = self.queryset.filter(title__icontains=keyword)\n\n if isinstance(queryset, QuerySet):\n # Ensure queryset is re-evaluated on each request.\n queryset = queryset.all()\n\n #return queryset\n return queryset.order_by('-first_published_at')", "def __init__(self, tags=None, keyphrases=None, links=None):\n\n self.categories = set()\n\n if keyphrases:\n keyphrases = [keyphrase for keyphrase in keyphrases]\n self.keyphrases = keyphrases\n\n if tags:\n self.set_tags(tags)\n\n if links:\n self.set_links(links)", "def init_queryset(self, qs):\n nsize = qs.count() # we assume each will be a line\n self._npages = nsize // self.height + (0 if nsize % self.height == 0 else 1)\n self._data = qs", "def pre_search(self, qs):\n return qs", "def headwords ():\n\n q = request.args.get ('q')\n fulltext = request.args.get ('fulltext')\n offset = int (arg ('offset', '0', re_integer_arg))\n limit = clip (arg ('limit', str (MAX_RESULTS), re_integer_arg), 1, MAX_RESULTS)\n where = ''\n\n if (not q) and (not fulltext):\n # Retrieve full list of headwords\n with current_app.config.dba.engine.begin () as conn:\n res = execute (conn, r\"\"\"\n SELECT id, webkeyword, no\n FROM keyword\n ORDER BY sortkeyword, n, no\n LIMIT :limit\n OFFSET :offset\n \"\"\", { 'offset' : offset, 'limit' : limit })\n\n return make_headwords_response (res, limit)\n\n if q:\n q = q.replace ('-', '')\n q = q.replace ('%', '')\n q = q.replace ('?', '_')\n q = q.replace ('*', '%')\n where = \"(keyword LIKE :q) AND\"\n\n if not fulltext:\n # easy out\n with current_app.config.dba.engine.begin () as conn:\n res = execute (conn, r\"\"\"\n SELECT id, webkeyword, no\n FROM keyword\n WHERE keyword LIKE :q\n ORDER BY sortkeyword, n, no\n LIMIT :limit\n OFFSET :offset\n \"\"\", { 'q' : q, 'offset' : offset, 'limit' : limit })\n\n return make_headwords_response (res, limit)\n\n with current_app.config.dba.engine.begin () as conn:\n res = execute (conn, r\"\"\"\n SELECT DISTINCT\n k.id,\n k.webkeyword COLLATE utf8mb4_bin AS webkeyword,\n k.no\n FROM keyword k,\n article a\n WHERE {where} (MATCH (a.idxtext) AGAINST (:fulltext IN BOOLEAN MODE))\n AND a.no = k.no\n ORDER BY k.sortkeyword, k.n, k.no\n LIMIT :limit\n OFFSET :offset\n \"\"\".format (where = where), { 'q' : q, 'fulltext' : fulltext,\n 'offset' : offset, 'limit' : limit })\n\n return make_headwords_response (res, limit)", "def _build_queryset(self, request, term):\n context = {}\n self.graphs_count = 0\n\n undefined = u''\n\n scribe = request.GET.get('scribe', undefined)\n # alternative names are for backward compatibility with old-style graph\n # search page\n script = request.GET.get('script', undefined)\n chartype = request.GET.get('chartype', undefined)\n character = request.GET.get('character', undefined)\n allograph = request.GET.get('allograph', undefined)\n component = request.GET.get('component', undefined)\n feature = request.GET.get('feature', undefined)\n repository = request.GET.get('repository', undefined)\n index = request.GET.get('index', undefined)\n\n excluded_images = None\n from digipal.utils import is_staff\n if not is_staff(request):\n excluded_images = Image.filter_permissions(\n Image.objects.all(), [MediaPermission.PERM_PRIVATE])\n\n none = u'-1'\n one_or_more = u'-2'\n\n from datetime import datetime\n\n t0 = datetime.now()\n t4 = datetime.now()\n\n wheres = []\n\n if self.search_hands:\n graphs = Graph.objects.filter(\n hand__id__in=self.search_hands.queryset)\n else:\n\n # .order_by('item_part__current_item__repository__name', 'item_part__current_item__shelfmark', 'descriptions__description','id')\n # Although we are listing hands on the front-end, we search for graphs and not for hand.\n # Two reasons:\n # searching for character and allograh at the same time through a Hand model would generate two separate joins to graph\n # this would bring potentially invalid results and it is also much slower\n # it is faster than excluding all the hands without a graph (yet another expensive join)\n #\n if term:\n term = term.replace('\"', '')\n graphs = Graph.objects.filter(\n Q(hand__descriptions__description__icontains=term) |\n Q(hand__scribe__name__icontains=term) |\n Q(hand__assigned_place__name__icontains=term) |\n Q(hand__assigned_date__date__icontains=term) |\n Q(hand__item_part__current_item__shelfmark__icontains=term) |\n Q(hand__item_part__current_item__repository__name__icontains=term) |\n Q(hand__item_part__current_item__repository__place__name__icontains=term) |\n Q(hand__item_part__historical_items__catalogue_number__icontains=term) | \\\n # JIRA 423\n Q(hand__item_part__historical_items__name__icontains=term) | \\\n Q(hand__item_part__group__historical_items__name__icontains=term) | \\\n Q(hand__item_part__display_label__icontains=term) | \\\n Q(hand__item_part__group__display_label__icontains=term)\n )\n else:\n graphs = Graph.objects.all()\n\n t1 = datetime.now()\n\n if index:\n graphs = graphs.filter(\n hand__item_part__historical_items__catalogue_number__iexact=index)\n if repository:\n matches = re.match(ur'^([^,]+?),([^,]+)$', repository)\n if matches:\n graphs = graphs.filter(Q(hand__item_part__current_item__repository__place__name__iexact=matches.group(\n 1).strip()) & Q(hand__item_part__current_item__repository__name__iexact=matches.group(2).strip()))\n if scribe:\n graphs = graphs.filter(hand__scribe__name__icontains=scribe)\n if script:\n graphs = graphs.filter(hand__script__name=script)\n\n if chartype:\n graphs = graphs.filter(\n idiograph__allograph__character__ontograph__ontograph_type__name=chartype)\n if character:\n graphs = graphs.filter(\n idiograph__allograph__character__name=character)\n if allograph:\n graphs = graphs.filter(idiograph__allograph__name=allograph)\n\n # we discard freak graph records (i.e. without annotation) to prevent\n # errors further down the line.\n graphs = graphs.filter(annotation__isnull=False)\n\n # if the user is not logged in we exclude graphs where the allograph is\n # hidden\n from digipal.models import has_edit_permission\n if not has_edit_permission(request, self.get_model()):\n graphs = graphs.exclude(idiograph__allograph__hidden=True)\n\n # exclude private images\n if excluded_images and excluded_images.count():\n graphs = graphs.exclude(annotation__image__in=excluded_images)\n\n # condition on component\n if component:\n component_where = Q(graph_components__component__name=component)\n if feature in [undefined, none]:\n # If no feature is specified we find all the graph which are supposed to have a component\n # according to their idiograph\n component_where = component_where | Q(\n idiograph__allograph__allograph_components__component__name=component)\n wheres.append(component_where)\n\n # condition on feature\n if feature not in [undefined, none, one_or_more]:\n wheres.append(Q(graph_components__features__name=feature))\n if feature in [one_or_more]:\n wheres.append(Q(graph_components__features__id__isnull=False))\n\n # ANDs all the Q() where clauses together\n if wheres:\n where_and = wheres.pop(0)\n for where in wheres:\n where_and = where_and & where\n\n graphs = graphs.filter(where_and)\n\n # Treat the feature=none case\n if feature == none:\n excluded_q = Q(graph_components__features__id__isnull=False)\n if component:\n excluded_q = excluded_q & Q(\n graph_components__component__name=component)\n excluded_graphs = Graph.objects.filter(excluded_q)\n graphs = graphs.exclude(\n id__in=excluded_graphs.values_list('id', flat=True))\n\n from digipal.utils import set_left_joins_in_queryset, get_str_from_queryset\n set_left_joins_in_queryset(graphs)\n # print get_str_from_queryset(graphs)\n\n t2 = datetime.now()\n\n # Get the graphs then id of all the related Hands\n # We use values_list because it is much faster, we don't need to fetch all the Hands at this stage\n # That will be done after pagination in the template\n # Distinct is needed here.\n #graphs = graphs.distinct().order_by('hand__scribe__name', 'hand__id', 'idiograph__allograph__character__ontograph__sort_order')\n chrono('graph filter:')\n graphs = graphs.distinct().order_by('hand__scribe__name', 'hand__id')\n chrono(':graph filter')\n\n # print graphs.query\n chrono('graph values_list:')\n graph_ids = graphs.values_list('id', 'hand_id')\n chrono(':graph values_list')\n\n# chrono('len:')\n# l = len(graph_ids)\n# print graph_ids.query\n# chrono(':len')\n\n # Build a structure that groups all the graph ids by hand id\n # context['hand_ids'] = [[1, 101, 102], [2, 103, 104]]\n # In the above we have two hands: 1 and 2. For hand 1 we have Graph 101\n # and 102.\n chrono('hand_ids:')\n context['hand_ids'] = [[0]]\n last = 0\n for g in graph_ids:\n if g[1] != context['hand_ids'][-1][0]:\n context['hand_ids'].append([g[1]])\n context['hand_ids'][-1].append(g[0])\n del(context['hand_ids'][0])\n chrono(':hand_ids')\n\n t3 = datetime.now()\n\n self.graphs_count = len(graph_ids)\n\n t4 = datetime.now()\n\n # print 'search %s; hands query: %s + graph count: %s' % (t4 - t0, t3 -\n # t2, t4 - t3)\n\n t5 = datetime.now()\n self._queryset = context['hand_ids']\n\n return self._queryset", "def __init__(self, *args, **kwargs):\r\n\r\n super(DynamicMixin, self).__init__(*args, **kwargs)\r\n\r\n if not self._meta.queryset is None:\r\n self._meta.queryset = self._meta.queryset.all()", "def init():\n execute(query=_query['cr_tweet'])\n execute(query=_query['cr_sentiment'])", "def keywords(self, keywords):\n self._keywords = keywords", "def get_queryset(self):\n queryset = Article.objects.all()\n username = self.request.query_params.get('username', None)\n if username is not None:\n queryset = queryset.filter(author__username__iexact=username)\n tag = self.request.query_params.get('tag', None)\n if tag is not None:\n queryset = queryset.filter(tags__tag_name__iexact=tag)\n search = self.request.query_params.get('search', None)\n if search is not None:\n queryset = queryset.filter(\n Q(title__icontains=search) |\n Q(slug__icontains=search) |\n Q(description__icontains=search) |\n Q(body__contains=search)\n )\n\n return queryset", "def keyword_list(request):\n if request.method == 'GET':\n keywords = get_list_or_404(Keyword, is_active=True)\n if request.GET.get('pagination'):\n pagination = request.GET.get('pagination')\n if pagination == 'true':\n paginator = PageNumberPagination()\n results = paginator.paginate_queryset(keywords, request)\n serializer = KeywordSerializer(results, many=True)\n return paginator.get_paginated_response(serializer.data)\n else:\n return Response(status=status.HTTP_400_BAD_REQUEST)\n else:\n serializer = KeywordSerializer(keywords, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)", "def keywords(self, keywords):\n\n self._keywords = keywords", "def defaultKeywords(self, kwSet):\n return QsciLexerJava.keywords(self, kwSet)", "def set_queries(self, **kwargs):\n for k, v in kwargs.items():\n self._query_dict[k] = v", "def initialise():\n _initialiseGlobals()\n for pop in AnadPartOfPerspectiveDb.Iterator():\n _addToKnowledge(pop)\n return", "def keywords(self):\n from hubspot3.keywords import KeywordsClient\n\n return KeywordsClient(**self.auth, **self.options)", "def clearkeywords(self):\n self._kw = []", "def __init__(self, query, title, link, subtext, searchterms, scripts):\n self.search_query = query\n self.title = title\n self.link = link\n self.subtext = subtext\n self.searchterms = searchterms\n self.link_scripts = scripts", "def __init__(self):\n\t\tself.relevances = None", "def initialize_survey(self, **kwargs):", "def make_complex_query_set(self):\n\n query = self.request.GET.get(\"q\")\n category = self.request.GET.get(\"category\")\n title = self.request.GET.get(\"title\")\n\n q_object = Q()\n\n if query:\n q_object.add((\n Q(category__category_name__icontains=query) |\n Q(title__icontains=query) \n ), Q.OR)\n\n else:\n if category:\n q_object.add(\n Q(category=category),\n Q.AND)\n if title:\n q_object.add(\n Q(title__icontains=title),\n Q.AND)\n\n return q_object", "def set_keyword_map(self):\n \n ret = defaultdict(list)\n for idx, doc in enumerate(self.docs):\n for token in doc:\n if token in self.dictionary.token2id:\n ret[token].append(idx)\n \n self.keyword_map = ret\n return ret", "def getKeywords(self):\n return", "def test_queryKeywordFlag(self):\n self._keywordFilteringTest(\"keyword\")", "def __init__(self, **keywords):\n # Set cord with the normal method if it was passed. Set atoms if\n # atomlist was passed. init() if both were passed.\n if keywords.has_key(\"cord\"):\n self.setcord(keywords[\"cord\"])\n if keywords.has_key(\"weights\"):\n self.setweights(keywords[\"weights\"])\n if keywords.has_key(\"cord\") and keywords.has_key(\"weights\"):\n self.init()", "def populate_keywords(kwds, pkg_id):\n if not kwds:\n return\n for word in kwds:\n # @todo(Check data and use the special character-list\n # variable in the constants' file.)\n word = word.strip(\".:;=-,\\\"'\\n $_%{}()[]^*?& +#`\").lower()\n if len(word) <= 1 or (word in constants.STOP_WORDS) or \\\n has_special_chars(word):\n continue\n insert_keyword(word, pkg_id)", "def keywords(self, **kwargs):\n\n path = self._get_movie_id_path('keywords')\n resp = self._get_method(path, kwargs)\n return resp", "def _get_keywords(self, title: str):\n # Prepare data\n keywords = set()\n stops = set(nltk.corpus.stopwords.words(\"english\"))\n stemmer = nltk.stem.SnowballStemmer(\"english\")\n ent_types = [\n \"PERSON\", \"ORGANIZATION\", \"FACILITY\", \"LOCATION\", \"DATE\",\n \"TIME\", \"GPE\", \"MONEY\",\n ]\n excluded_word_types = [\"RB\", \"IN\", \"PRP\"]\n\n # Tokenize and chunk words using NLTK\n tokens = nltk.tokenize.word_tokenize(title)\n positions = nltk.pos_tag(tokens)\n chunk = nltk.ne_chunk(positions)\n\n # Make a word list of keywords we want to add, that\n # are not part of our excluded word types.\n words = set()\n for pos in positions:\n word, word_type = pos\n if word.isalnum() and word_type not in excluded_word_types:\n words.add(word)\n\n # Add all entities to keyword list and remove them from\n # our remaining word set so they don't get added again\n # and stemmed later.\n for subtree in chunk.subtrees(filter=lambda t: t.label() in ent_types):\n for leaf in subtree.leaves():\n keywords.add(leaf[0])\n if leaf[0] in words:\n words.remove(leaf[0])\n\n # Add remaining words in list and stem them to base form,\n # stemming means we change words from e.g. \"eating\" to \"eat\".\n for word in words:\n if word not in stops:\n keywords.add(stemmer.stem(word))\n\n return sorted([keyword.lower() for keyword in keywords])", "def Keywords(self, default=[{}]):\n tmp = self.data.get('metadata', {}).get('keywords', default)\n return [HEP.KeywordObject(i) for i in tmp]", "def _load_keywords(self, collection_id, path=None, libdoc=None):\n if libdoc is None and path is None:\n raise(Exception(\"You must provide either a path or libdoc argument\"))\n\n if libdoc is None:\n libdoc = LibraryDocumentation(path)\n\n if len(libdoc.keywords) > 0:\n for keyword in libdoc.keywords:\n self._add_keyword(collection_id, keyword.name, keyword.doc, keyword.args)", "def initialize_indexer_backend(self):\n pass", "def get_queryset(self):\n queryset = Article.objects.all().order_by('-id')\n title = self.request.query_params.get('title', None)\n limit = self.request.query_params.get('limit', None)\n random = self.request.query_params.get('random', None)\n if title is not None:\n queryset = queryset.filter(title__icontains=title).order_by('-id')\n elif limit is not None:\n queryset = queryset[:limit]\n elif random is not None:\n queryset = queryset[COUNT:COUNT_MAX]\n return queryset", "def setup(self, query: str, docache=False, recache=False) -> None:\n self.q = Query(query, docache, recache)", "def prepare(self,docs,topics):\n \n self.docs, self.dictionary, self.corpus = self.clean_docs(docs)\n \n # Create keyword map\n self.set_keyword_map()\n \n # Create keyword map with their relatives\n self.set_keyword_map_rel()\n \n self.topic_map = {topic: set(self.get_related_keywords(topic,self.keyword_map_rel,_score=False)) \n for topic in topics}", "def get_headlines_with_keyword(self, kw):\r\n\t\tkey_head = self.keyword_headlines()\r\n\r\n\t\theadlines = set()\r\n\r\n\t\tfor headlinekw in key_head[kw]:\r\n\t\t\tcontent = headlinekw.headlineid.content\r\n\t\t\theadlines.add(content)\r\n\r\n\t\treturn list(headlines)", "def prepare_queries(self):\n self.log.info(\"preparing queries ...\")\n self.prepared_queries = {}\n self.prepared_query_to_str = {}\n initNs = {\"rdfs\": RDFApi.RDFS}\n get_classes = \"\"\"\n SELECT ?class\n WHERE {\n ?class rdf:type rdfs:Class .\n }\n \"\"\"\n self.add_prepared_query(\"get_classes\", get_classes, initNs)\n\n get_properties = \"\"\"\n SELECT ?property\n WHERE {\n ?property rdf:type rdf:Property .\n }\n \"\"\"\n self.add_prepared_query(\"get_properties\", get_properties, None)\n\n get_term_to_label = \"\"\"\n SELECT ?term ?label\n WHERE {\n ?term rdfs:label ?label\n }\n \"\"\"\n self.add_prepared_query(\"get_term_to_label\", get_term_to_label, initNs)\n\n get_term_to_desc = \"\"\"\n SELECT ?term ?desc\n WHERE {\n ?term rdfs:comment ?desc\n }\n \"\"\"\n self.add_prepared_query(\"get_term_to_desc\", get_term_to_desc, initNs)\n\n get_ancestors = \"\"\"\n SELECT ?class\n WHERE {\n ?subject rdfs:subClassOf* ?mid .\n ?mid rdfs:subClassOf* ?class .\n }\n group by ?class\n order by count(?mid)\n \"\"\"\n self.add_prepared_query(\"get_ancestors\", get_ancestors, initNs)", "def index(request):\r\n #context = RequestContext(request)\r\n\r\n category_list = Keyword.objects.all().count()\r\n context_dict = {'categories': category_list}\r\n\r\n \r\n # Generate counts of some of the main objects\r\n num_books = Book.objects.all().count()\r\n num_keywords = Keyword.objects.all().count()\r\n \r\n\r\n context = {\r\n 'num_books': num_books,\r\n 'num_keywords': num_keywords,\r\n 'categories': category_list,\r\n }\r\n\r\n\r\n # Render the HTML template index.html with the data in the context variable\r\n return render(request, 'index.html',context)", "def get_qs(self):\n if self.qs == None:\n # Get the Lemma PKs\n qs = self.get_queryset()\n else:\n qs = self.qs\n return qs", "def get_qs(self):\n if self.qs == None:\n # Get the Lemma PKs\n qs = self.get_queryset()\n else:\n qs = self.qs\n return qs", "def start_requests(self):\n keyword_list = ['西瓜']\n request_list = []\n for keyword in keyword_list:\n url = self.url.format(keyword)\n url_request = scrapy.Request(url=url, headers=self.search_header, callback=self.parse_search_page,\n meta={\"dont_merge_cookies\": True, \"category\": keyword})\n request_list.append(url_request)\n return request_list", "def get_keywords(self):\r\n\t\treturn list(self.keyword_headlines().keys())", "def load_all_queryset(self):\n return self.get_model()._default_manager.all()", "def load_searchfields():\n \n corpus = SpecialMeta()\n corpus.title = \"corpus\"\n corpus.order = 1\n \n author = SpecialMeta()\n author.title = \"author\"\n author.order = 2\n\n ms_name = SpecialMeta()\n ms_name.title = \"msName\"\n ms_name.order = 3\n\n annotation = SpecialMeta()\n annotation.title = \"annotation\"\n annotation.order = 4\n annotation.splittable = True\n\n translation = SpecialMeta()\n translation.title = \"translation\"\n translation.order = 5\n translation.splittable = True\n \n for field in [corpus, author, ms_name, annotation, translation]:\n try:\n SpecialMeta.objects.get(name=field.name)\n except SpecialMeta.DoesNotExist:\n field.save()", "def search_by_keywords(self, keywords, operator='or'):\n if operator == 'or' and self.indexes:\n articles_indexes = []\n for keyword in keywords:\n if keyword in self.indexes.keys():\n articles_indexes += self.indexes[keyword]\n articles_indexes = list(set(articles_indexes))[:self.no_returned_articles]\n return retrieve_articles(articles_indexes)\n\n if operator == 'and' and self.indexes:\n if keywords[0] in self.indexes.keys():\n article_indexes = self.indexes[keywords[0]]\n for keyword in keywords:\n article_indexes = list(set(article_indexes) & set(self.indexes[keyword]))\n article_indexes = article_indexes[:self.no_returned_articles]\n return retrieve_articles(article_indexes)\n\n return None", "def __iter__(self):\n for keyword in self.meta.findall(CN('meta:keyword')):\n yield keyword.text", "def add(self, keyword, definitions):\r\n for x_temp in definitions:\r\n self.query(term1='kd',term2=keyword,term3=x_temp.strip(),action='set')", "def with_keyword(self, keyword):\n return (self\n .filter(keyword=keyword)\n .select_related('keyword')\n .prefetch_related('donor', 'keyword__content_object'))", "def __init__(self, **kwargs):\n DataLoader.__init__(self, **kwargs)\n\n self.init_db()\n \n self._query_names_['totals'] = 'report_campaign_totals'\n self._query_names_['times'] = 'report_campaign_times'\n self._query_names_[FDH._TESTTYPE_BANNER_] = 'report_campaign_banners'\n self._query_names_[FDH._TESTTYPE_LP_] = 'report_campaign_lps'\n self._query_names_[FDH._TESTTYPE_BANNER_LP_] = 'report_campaign_bannerlps'\n \n self._query_type_ = kwargs['query_type']", "def recommend_by_keywords(self, key_words_list=None):\n pass", "def __init__(self, query_set, per_page_limit, optional_count_query_set=None,\n allow_empty_first_page=True):\n self.query_set = query_set\n self.per_page_limit = per_page_limit\n self.optional_count_query_set = optional_count_query_set\n self.allow_empty_first_page = allow_empty_first_page\n self.__total_pages = self.__count = None\n self.__iter_page = 1", "def test_keyword(self):\n\n url = '/%s/job-types/?keyword=%s' % (self.api, self.job_type1.name)\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 1)\n self.assertEqual(result['results'][0]['name'], self.job_type1.name)\n\n url = '/%s/job-types/?keyword=%s' % (self.api, 'job-type')\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 6)\n\n url = '/%s/job-types/?keyword=%s' % (self.api, 'job-type-for-view-test')\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 3)\n\n url = '/%s/job-types/?keyword=%s&keyword=%s' % (self.api, 'job-type-for-view-test', self.job_type1.name)\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 4)", "def get_queryset(self):\n tags = self.request.query_params.get('tags')\n categories = self.request.query_params.get('categories')\n user = self.request.query_params.get('user')\n queryset = self.queryset\n\n if tags:\n tags_title = self._params(tags)\n queryset = queryset.filter(tags__title__in=tags_title)\n\n if categories:\n categories_title = self._params(categories)\n queryset = queryset.filter(categories__title__in=categories_title)\n\n if user:\n user_id = self._params_to_ints(user)\n queryset = queryset.filter(user__id__in=user_id)\n return queryset", "def _update_key_set(self):\n self._key_set = set([item.keyword for item in self._metadata])", "def __init__(self, q):\n\n self.inverted_lists = {}\n self.q = q\n self.records = []\n self.original_records = []\n self.city_names = []", "def get_queryset(self):\n\n qs = SynonymList.objects.all()\n\n terms = self.request.query_params.get(\"q\", \"\")\n\n if terms:\n terms = self.request.query_params.get(\"q\", \"\")\n query = parse_query(remove_accents(terms))\n qs = qs.filter(search_vector_keywords_list=query).annotate(\n rank=SearchRank(F(\"search_vector_keywords_list\"), query)\n )\n\n qs = qs.order_by(\"name\").distinct()\n\n return qs", "def setup(query):\n\n results = []\n return results", "def setup(query):\n\n results = []\n return results", "def setup(query):\n\n results = []\n return results", "def setup(query):\n\n results = []\n return results", "def get_queryset(self, request):\n qs = super().get_queryset(request)\n qs = qs.prefetch_related('work__writers')\n qs = qs.prefetch_related('artist')\n qs = qs.prefetch_related('record_label')\n return qs", "def init(self):\n objects = self.load()\n self._pkcache = {obj.pk: True for obj in objects}\n for obj in objects:\n for ctype in obj._content_types:\n self._typecache[ctype][obj.pk] = True", "def __init__(self, **kwargs):\n DataLoader.__init__(self, **kwargs)\n \n \"\"\" Use _query_names_ to store a single query name \"\"\"\n self._query_names_ = 'report_donor_dollar_breakdown' # embed the query name in the class itself\n self._query_type_ = kwargs['query_type']", "def set_keywords(self, **kwargs):\n keywords = dict()\n\n for key, value in self.allowed_keys.items():\n keywords[key] = value[1]\n\n for key, value in kwargs.items():\n if key not in self.allowed_keys:\n error = 'Keyword %s for %s object not found' % \\\n (key, self.__class__.__name__)\n MASTError(self.__class__.__name__, error)\n\n# raise RuntimeError('Keyword %s for %s object not found' % \\\n# (key, self.__class__.__name__))\n\n if isinstance(value, self.allowed_keys[key][0]):\n keywords[key] = value\n else:\n error = 'Keyword %s value %s invalid; expected type %s, got type %s' % (key, str(value), self.allowed_keys[key][0], type(value))\n MASTError(self.__class__.__name__, error)\n# raise RuntimeError('Keyword %s value invalid' % key)\n\n return keywords", "def start_requests(self):\n\n with open(os.path.join(os.path.dirname(__file__), \"../resources/mapemall_categories.csv\")) as categories:\n for category in csv.DictReader(categories):\n category_text=category[\"category\"]\n url=str(MapemallCrawlerSpider.start_urls[0])+category_text\n # The meta is used to send our search text into the parser as metadata\n yield scrapy.Request(url, callback = self.parse, meta = {\"category_text\": category_text})", "def load_keywords():\n keywords = set()\n with open(os.path.join(BASE, \"data/keywords.txt\")) as fp:\n for line in fp:\n keywords.add(line.strip().lower())\n return keywords", "def get_headlines(self, kw = None):\r\n\t\tif kw:\r\n\t\t\treturn self.get_headlines_with_keyword(kw)\r\n\t\telse:\r\n\t\t\treturn self.get_all_headlines()", "def initialise(self, **kwargs):\n pass", "def _init_dataset(self):\n chars = set()\n with open(self.file_path + \"/words.txt\", 'r') as input_file:\n for line in input_file:\n line_split = line.strip().split('\\t')\n file_name = self.file_path+\"/words/\"+line_split[1]\n gt_text = line_split[0]\n chars = chars.union(set(list(gt_text)))\n self.samples.append((file_name, gt_text))\n input_file.close()\n\n self.char_set = sorted(list(chars))", "def init_models(self) -> None:\n self.models = AKSManagedClusterModels(self.cmd, self.resource_type)", "def init_models(self) -> None:\n self.models = AKSManagedClusterModels(self.cmd, self.resource_type)", "def _set_kb_attrs(flush_all=True):\n\tdebug_msg = \"initializing the knowledge base\"\n\tlogger.debug(debug_msg)\n\n\tkb.apk = AttribDict()\n\tkb.apk.md5 = None\n\tkb.apk.file_size = None\n\tkb.apk.display_perm = []\n\tkb.apk.providers = []\n\tkb.apk.receivers = []\n\tkb.apk.services = []\n\tkb.apk.activities = []\n\tkb.apk.actions = []\n\tkb.apk.manifest = None\n\t\n\tif flush_all:\n\t\tkb.cache = AttribDict()\n\t\tkb.cache.regex = {}\n\t\tkb.cache.files = {}\n\t\tkb.targets = set()\n\t\tkb.heartbeat = None\n\t\tkb.storage = None\n\t\tkb.plugins = AttribDict()\n\t\tkb.plugins.handle = []", "def get_queryset(self):\n qs = super().get_queryset()\n search_value = self.request.GET.get('search_box')\n\n if search_value is not None:\n qs = qs.search_by(search_value)\n\n return qs", "def init_fields(self):\n result = self.connection.execute('pragma table_info(files)')\n rows = result.fetchall()\n self.fields = [Field(row) for row in rows[4:]]\n result = self.connection.execute('select _keyword from keywords')\n rows = result.fetchall()\n self.keywords = [row[0] for row in rows]", "def __init__(self, query, datastoreio_stub, label=None):\n super(GetModels, self).__init__(label=label)\n self.datastoreio = datastoreio_stub\n self.query = query", "def __init__(self):\n self.recipeset = {}\n self.hardcoded()", "def _init(self, **kwargs):\n self.queries: FoldersQueries = FoldersQueries(auth=self.auth, **kwargs)\n self.enforcements: FoldersEnforcements = FoldersEnforcements(auth=self.auth, **kwargs)", "def test_set_keywords_1(self):\n data_dict = {\"type\":\"ADD\",\n \"cluster\":\"RETRIEVE\",\n \"subcluster\": \"NONE\",\n \"host_genus\": \"PARSE\",\n \"retrieve_record\": \"RETAIN\"}\n keywords = set([\"retrieve\", \"retain\"])\n tickets.set_keywords(data_dict, self.keywords)\n with self.subTest():\n self.assertEqual(data_dict[\"type\"], \"ADD\")\n with self.subTest():\n self.assertEqual(data_dict[\"cluster\"], \"retrieve\")\n with self.subTest():\n self.assertEqual(data_dict[\"subcluster\"], \"none\")\n with self.subTest():\n self.assertEqual(data_dict[\"host_genus\"], \"parse\")\n with self.subTest():\n self.assertEqual(data_dict[\"retrieve_record\"], \"retain\")", "def test_keywords(self):\n\n test_cases = (\n makeTestCase('adele 21',\n AlbumResultMatcher(title=Equals('21'), artist=Equals('adele')),\n ArtistResultMatcher(title=Equals('adele'))),\n makeTestCase('kanye power',\n TrackResultMatcher(title=Equals('power', artist=Equals('kanye west'))),\n ArtistResultMatcher(title=Equals('kanye west')),\n AlbumResultMatcher(title=Equals('my beautiful dark twisted fantasy'))),\n makeTestCase('ratat party with children',\n TrackResultMatcher(title=Equals('party with children', artist=Equals('ratatat'))),\n ArtistResultMatcher(title=Equals('ratatat'))),\n makeTestCase('flobot fight with tools handlebars',\n TrackResultMatcher(title=Equals('handlebars')),\n ArtistResultMatcher(title=Equals('flobots')),\n AlbumResultMatcher(title=Equals('fight with tools')))\n )\n\n self._run_tests(tests, {})", "def __init__(self, *terms, **kwargs):\n self.missing = kwargs.pop('_key_missing_', False)\n if terms and kwargs:\n raise ValueError(\"You must specify terms or kwargs, not both\")\n self.terms = []\n for t in terms:\n self.add_term(t)\n self.add_term(kwargs)", "def get_queryset(self):\n\n\t\t# Initially set the returned objects to be all sentences\n\t\tqueryset = Sentence.objects.all()\n\n\t\t# Access the request params\n\t\tplayername = self.request.query_params.get('playername', None)\n\n\t\t# If a player name is specified ---> Set the filter\n\t\tif playername is not None:\n\t\t\tqueryset = queryset.filter(player_name=playername)\n\n\t\t# Return the appropriate queryset\n\t\treturn queryset", "def __init__(self, corpus):\n self.unigramCounts = collections.defaultdict(lambda: 0)\n self.bigramCounts = collections.defaultdict(lambda: 0)\n self.trigramCounts = collections.defaultdict(lambda: 0)\n self.followingWords = collections.defaultdict(lambda: set())\n self.precedingWords = collections.defaultdict(lambda: set())\n self.total = 0\n self.discount = 0.75\n self.train(corpus)", "def setUp(self):\n self.twitter = Twitter(CUR_DIR + \"/test_crossfit.tweets\", CUR_DIR + \"/test_stop_words.txt\")\n self.twitter.load_tweets_and_build_index()\n\n self.searcher = Searcher(self.twitter.tweets, self.twitter.stop_words)", "def test_keyword_extractor(self):\n data = [{\"Header\": \"This is a Header\", \"Paragraph\": \"This is a Paragraph\", \"slide\": 10}]\n keywords = keyword_extractor(data)\n data[0][\"Header_keywords\"] = [\"header\"]\n data[0][\"Paragraph_keywords\"] = [\"paragraph\"]\n self.assertEqual(keywords, data)", "def start_requests(self):\n\n base_url = \"https://www.f3motorauctions.com.au/search_results.aspx?sitekey=F3A&make=All%20Makes&model=All%20Models&keyword=&fromyear%20=From%20Any&toyear=To%20Any&body=All%20Body%20Types\"\n self.init_data()\n yield scrapy.Request(\n url = base_url, callback = self.parse_all_cars_within_page)", "def __init__(self, **kwargs):\n DataLoader.__init__(self, **kwargs)\n \n self._query_names_[FDH._QTYPE_BANNER_] = 'report_banner_metrics_minutely'\n self._query_names_[FDH._QTYPE_LP_] = 'report_LP_metrics_minutely'\n self._query_names_[FDH._QTYPE_BANNER_LP_] = 'report_bannerLP_metrics_minutely'\n self._query_names_['campaign'] = 'report_campaign_metrics_minutely'\n self._query_names_['campaign_total'] = 'report_campaign_metrics_minutely_total'\n \n self._query_names_[FDH._QTYPE_BANNER_ + FDH._QTYPE_TIME_] = 'report_banner_metrics_minutely_all'\n self._query_names_[FDH._QTYPE_LP_ + FDH._QTYPE_TIME_] = 'report_lp_metrics_minutely_all'\n self._query_names_[FDH._QTYPE_CAMPAIGN_ + FDH._QTYPE_TIME_] = 'report_campaign_metrics_minutely_all'\n \n self._query_type_ = kwargs['query_type']\n \n \"\"\" hardcode the data handler for now \"\"\"\n self._data_handler_ = FDH\n \n self._summary_data_ = None", "def init_query():\n file_path = Config.config['init_files']['query_file']\n with open(file_path) as file:\n for line in file:\n # ignore empty line\n if line == '\\n':\n continue\n yield Query(line)", "def initialize(self, **kwargs):", "def populate_clean_keywords():\n try:\n print 'populate clean keywords table'\n sql = 'insert into clean_keywords(word, count) SELECT name,COUNT(*) as count FROM keywords GROUP BY name'\n print sql\n util.executeSQL(conn, sql)\n except Exception as e:\n print e", "def run_query(self):\n query_dictionary_file_lines = self.get_dictionary_file_lines_for_keywords()\n result_postings_list = merge_lists([result.postings_list for result in query_dictionary_file_lines])\n self.result = result_postings_list\n print(\"Found {} matching documents\".format(len(result_postings_list)))", "def get_queryset(self):\n search_str = self.request.GET.get('search', None)\n col_nm = self.request.GET.get('sort_by', 'title')\n self.temp=col_nm\n sort_order = self.request.GET.get('sort_order', 'ASC')\n self.sort_ordr=sort_order\n if search_str:\n search_str = self.request.GET.get('search', None)\n a = Q(title__icontains = search_str)\n b = Q(description__icontains = search_str)\n objects = Designation.objects.filter(a | b).distinct()\n else: # SORTING BY COL_NM\n objects = Designation.objects.filter().extra(\n select = {col_nm:'lower('+col_nm+')'}).order_by(col_nm)\n\n if sort_order == \"DESC\":\n objects = objects.reverse()\n return objects", "def test_get_queryset(self):\n category = CategoryFactory.create()\n image_banner1, image_banner2 = ImageBannerFactory.create_batch(2, category=category)\n text_banner1, text_banner2 = TextBannerFactory.create_batch(2, category=category)\n self.view.category = category\n\n eq_(set(self.view.get_queryset()),\n set([image_banner1, image_banner2, text_banner1, text_banner2]))" ]
[ "0.62303746", "0.62133765", "0.5979311", "0.59030783", "0.5808249", "0.5781533", "0.5741475", "0.57359666", "0.5595646", "0.55684435", "0.5490495", "0.54874605", "0.54597276", "0.5437996", "0.5390633", "0.5304389", "0.5293679", "0.5290758", "0.5277677", "0.5255377", "0.52434176", "0.52301097", "0.52124393", "0.5198709", "0.51841795", "0.5179254", "0.51700854", "0.51626796", "0.51400644", "0.5125439", "0.51184475", "0.5109111", "0.50941575", "0.5090587", "0.5077902", "0.50661576", "0.50477725", "0.50435543", "0.50359136", "0.5019046", "0.4998874", "0.4997074", "0.4993441", "0.4991925", "0.4988176", "0.49772346", "0.4958917", "0.4958917", "0.4928677", "0.49169514", "0.49047172", "0.49024808", "0.49015927", "0.48965153", "0.4895013", "0.4889824", "0.48852307", "0.487201", "0.48696607", "0.48663622", "0.48589113", "0.48516956", "0.48508132", "0.48481727", "0.48401555", "0.48401555", "0.48401555", "0.48401555", "0.48232728", "0.48225236", "0.48087582", "0.4802591", "0.47927684", "0.47926766", "0.4789578", "0.47844496", "0.47801033", "0.47495845", "0.47495845", "0.4725551", "0.47203428", "0.4718678", "0.47141147", "0.4705754", "0.47018808", "0.46993497", "0.46964693", "0.4687751", "0.4687332", "0.46843413", "0.4672292", "0.46693766", "0.46645468", "0.46575075", "0.465675", "0.4653954", "0.4653206", "0.46445027", "0.46427828", "0.4635278" ]
0.7972529
0
Returns a dictionary of the keywords and the list of corresponding headlines (ids only)
def keyword_headlines(self): d = {} for q in self.keyword_queryset: d[q.content] = self.headlinekeyword_queryset.filter(keywordid = q.id) return d
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_keywords(self):\r\n\t\treturn list(self.keyword_headlines().keys())", "def get_headlines_with_keyword(self, kw):\r\n\t\tkey_head = self.keyword_headlines()\r\n\r\n\t\theadlines = set()\r\n\r\n\t\tfor headlinekw in key_head[kw]:\r\n\t\t\tcontent = headlinekw.headlineid.content\r\n\t\t\theadlines.add(content)\r\n\r\n\t\treturn list(headlines)", "def get_all_headlines(self):\r\n\t\tlist_vals = list(self.keyword_headlines().values())\r\n\t\tuniq_headlines = set()\r\n\t\tfor list_val in list_vals:\r\n\t\t\tfor headlineobj in list_val:\r\n\t\t\t\tuniq_headlines.add(headlineobj.headlineid.content)\r\n\r\n\t\treturn list(uniq_headlines)", "def get_headlines(self, kw = None):\r\n\t\tif kw:\r\n\t\t\treturn self.get_headlines_with_keyword(kw)\r\n\t\telse:\r\n\t\t\treturn self.get_all_headlines()", "def extract_keywords(self):\n keywords = [] \n for keyword in self.watsonLanguageModel['keywords'][:self.entitySizeLimit]: \n keywords.append(keyword['text'])\n return keywords", "def Keywords(self, default=[{}]):\n tmp = self.data.get('metadata', {}).get('keywords', default)\n return [HEP.KeywordObject(i) for i in tmp]", "def get_keywords(self, sectioned_text):\n \n keywords = []\n \n if 'full text' in list(sectioned_text.keys()):\n \n for word in self.keyword_list:\n if word in sectioned_text['full text']:\n keywords.append(word)\n \n else: \n fulltext = self.restitch_text(sectioned_text)\n for word in self.keyword_list:\n if word in fulltext:\n keywords.append(word)\n \n return keywords", "def keywords(self):\n return {\n \"unary\": {\n k: v[0] for k, v in self.unary_commands.items()\n },\n \"terminal\": {\n k: v[0] for k, v in self.terminal_commands.items()\n },\n \"binary\": {\n k: v[0] for k, v in self.binary_commands.items()\n },\n }", "def keyword_extraction(file_content):\n\n # [question, question....]\n for key, value in file_content.items():\n seg, hidden = ltp.seg([key])\n # ner: [[('Nh', 2, 2)]]\n ner = ltp.ner(hidden)\n # keywords: [('PERSON', \"吴轩\")], tuple_item: ('Nh', 2, 2)\n keywords = [(tag_to_name[tuple_item[0]], to_string(seg[0][tuple_item[1]: tuple_item[2]+1])) for tuple_item in ner[0]]\n file_content[key].keywords = keywords\n\n return file_content", "def getKeywords(self):\n return", "def chunk(keywords, lines):\n chunks = dict()\n chunk = []\n \n # Create an empty dictionary using all the keywords\n for keyword in keywords:\n chunks[keyword] = []\n \n # Populate dictionary with lists of chunks associated\n # with the keywords in the list \n for line in lines:\n if line.strip():\n token = line.split()[0]\n if token in keywords:\n chunk = [line] \n chunks[token].append(chunk) \n else:\n chunk.append(line)\n\n return chunks", "def parse_keywords(medline):\n keyword_list = medline.find(\"KeywordList\")\n keywords = list()\n if keyword_list is not None:\n for k in keyword_list.findall(\"Keyword\"):\n if k.text is not None:\n keywords.append(k.text)\n keywords = \"; \".join(keywords)\n else:\n keywords = \"\"\n return keywords", "def extract_keywords(raw_text,id):\n\n print(\"Extracting keywords for \"+id)\n\n stemmer = nltk.PorterStemmer()\n\n # Construct text\n\n # Tokens\n tokens = nltk.word_tokenize(raw_text)\n # filter undesirable words and format\n words = [w.replace('\\'','') for w in tokens if len(w)>=3]\n text = nltk.Text(words)\n\n tagged_text = nltk.pos_tag(text)\n #nouns = [tg[0] for tg in tagged_text if tg[1]=='NN' or tg[1]=='NNP' ]\n #print(nouns)\n\n # multi-term\n multiterms = set()\n stem_dico = {}\n for i in range(len(tagged_text)) :\n # max length 4 for multi-terms ==> 3\n for l in range(1,4) :\n if i+l < len(tagged_text) :\n tags = [tagged_text[k] for k in range(i,i+l)]\n if potential_multi_term(tags) :\n multistemlist = [str.lower(stemmer.stem(tagged_text[k][0])) for k in range(i,i+l)]\n #multistem.sort(key=str.lower)\n\t\t #python 3 : remove .encode('ascii','ignore')\n multistem = functools.reduce(lambda s1,s2 : s1+' '+s2,multistemlist)\n rawtext = functools.reduce(lambda s1,s2 : s1+' '+s2,[str.lower(tagged_text[k][0]) for k in range(i,i+l)])\n multiterms.add(multistem)\n if multistem in stem_dico :\n stem_dico[multistem].add(rawtext)\n else :\n stem_dico[multistem] = set([rawtext])\n\n return [list(multiterms),stem_dico]", "def determine_keywords(self):\n\n split = dict()\n split['email_cc'] = re.compile(\"^\\s*CC[-_]?MAIL[:=]\\s*(.*)\")\n split['email_cc2'] = re.compile(\"^\\s*C[Cc][:=]\\s*(.*)\")\n split['fixed_in'] = re.compile(\"^\\s*FIXED[-_]?IN[:=]\\s*(.*)\")\n\n numeric = dict()\n numeric['bug_fixed'] = re.compile(\"^\\s*(?:BUGS?|FEATURE)[:=]\\s*(.+)\")\n numeric['bug_cc'] = re.compile(\"^\\s*CCBUGS?[:=]\\s*(.+)\")\n\n presence = dict()\n presence['email_gui'] = re.compile(\"^\\s*GUI:\")\n presence['silent'] = re.compile(\"(?:CVS|SVN|GIT|SCM).?SILENT\")\n presence['notes'] = re.compile(\"(?:Notes added by 'git notes add'|Notes removed by 'git notes remove')\")\n\n results = defaultdict(list)\n for line in self.commit.message.split(\"\\n\"):\n # If our line starts with Summary: (as it does when using Arcanist's default template) then strip this off\n # This allows for people to fill keywords in the Differential Summary and have this work smoothly for them\n line = re.sub(\"^Summary: (.+)\", \"\\g<1>\", line)\n\n # Start processing our keywords...\n for (name, regex) in split.iteritems():\n match = re.match( regex, line )\n if match:\n results[name] += [result.strip() for result in match.group(1).split(\",\")]\n\n for (name, regex) in numeric.iteritems():\n match = re.match( regex, line )\n if match:\n results[name] += re.findall(\"(\\d{1,10})\", match.group(1))\n\n for (name, regex) in presence.iteritems():\n if re.match( regex, line ):\n results[name] = True\n\n self.keywords = results", "def get_meta_keywords(self):\n return self.get_meta_content(self.article.doc, \"meta[name=keywords]\")", "def get_keywords(self):\n keys = []\n for post in self:\n keys.extend(post.Keywords)\n return list(sorted(set(keys)))", "def add_keywords(self, response: Response) -> list:\n return response.xpath(\"//ul[@class='term']/li/a/text()\").getall()", "def get_entities_dict(p_str):\n nlp = en_core_web_sm.load()\n doc = nlp(p_str)\n entities = {}\n relevant_keywords = []\n list_of_types = ['NORP', 'ORG', 'GPE', 'LAW', 'LANGUAGE']\n for X in doc.ents:\n if not(X.label_ in entities):\n entities[X.label_] = []\n entities[X.label_].append(X.text)\n if X.label_ in list_of_types:\n relevant_keywords.append(X.text)\n print(entities)\n print(\"HERE\")\n print(relevant_keywords)\n return entities, relevant_keywords", "def articles_id_headwords (_id):\n\n offset = int (arg ('offset', '0', re_integer_arg))\n limit = clip (arg ('limit', str (MAX_RESULTS), re_integer_arg), 1, MAX_RESULTS)\n\n with current_app.config.dba.engine.begin () as conn:\n res = execute (conn, r\"\"\"\n SELECT id, webkeyword, no\n FROM keyword\n WHERE no = :id\n ORDER BY sortkeyword\n LIMIT :limit\n OFFSET :offset\n \"\"\", { 'id' : _id, 'offset' : offset, 'limit' : limit })\n\n return make_headwords_response (res, limit)", "def _get_keywords(self, title: str):\n # Prepare data\n keywords = set()\n stops = set(nltk.corpus.stopwords.words(\"english\"))\n stemmer = nltk.stem.SnowballStemmer(\"english\")\n ent_types = [\n \"PERSON\", \"ORGANIZATION\", \"FACILITY\", \"LOCATION\", \"DATE\",\n \"TIME\", \"GPE\", \"MONEY\",\n ]\n excluded_word_types = [\"RB\", \"IN\", \"PRP\"]\n\n # Tokenize and chunk words using NLTK\n tokens = nltk.tokenize.word_tokenize(title)\n positions = nltk.pos_tag(tokens)\n chunk = nltk.ne_chunk(positions)\n\n # Make a word list of keywords we want to add, that\n # are not part of our excluded word types.\n words = set()\n for pos in positions:\n word, word_type = pos\n if word.isalnum() and word_type not in excluded_word_types:\n words.add(word)\n\n # Add all entities to keyword list and remove them from\n # our remaining word set so they don't get added again\n # and stemmed later.\n for subtree in chunk.subtrees(filter=lambda t: t.label() in ent_types):\n for leaf in subtree.leaves():\n keywords.add(leaf[0])\n if leaf[0] in words:\n words.remove(leaf[0])\n\n # Add remaining words in list and stem them to base form,\n # stemming means we change words from e.g. \"eating\" to \"eat\".\n for word in words:\n if word not in stops:\n keywords.add(stemmer.stem(word))\n\n return sorted([keyword.lower() for keyword in keywords])", "def get_dictionary_file_lines_for_keywords(self):\n keywords_iter = iter(self.keywords)\n next_keyword = keywords_iter.next()\n print(\"Searching for keyword {}\".format(next_keyword))\n\n self.dictionary_file.open_handle()\n result_lines = list()\n while next_keyword:\n line = self.dictionary_file.read_line_to_obj()\n if not line:\n print(\"Reached end of dictionary file\")\n break\n\n if line.term < next_keyword:\n continue\n elif line.term == next_keyword:\n print(\"Found postings list for term {}\".format(next_keyword))\n result_lines.append(line)\n\n try:\n next_keyword = keywords_iter.next()\n print(\"Searching for keyword {}\".format(next_keyword))\n except StopIteration:\n print(\"Finished searching for all keywords\")\n break\n\n return result_lines", "def headwords_id_context (_id):\n\n limit = clip (arg ('limit', str (MAX_RESULTS), re_integer_arg), 1, MAX_RESULTS)\n\n with current_app.config.dba.engine.begin () as conn:\n res = execute (conn, \"\"\"\n SELECT keyword, sortkeyword\n FROM keyword\n WHERE id = :id\n \"\"\", { 'id' : _id })\n keyword, sortkeyword = res.fetchone ()\n\n res1 = execute (conn, \"\"\"\n SELECT id, webkeyword, no\n FROM keyword\n WHERE sortkeyword < :sortkeyword\n ORDER BY sortkeyword DESC, n DESC, no DESC\n LIMIT :limit\n \"\"\", { 'sortkeyword' : sortkeyword, 'limit' : limit })\n\n res2 = execute (conn, \"\"\"\n SELECT id, webkeyword, no\n FROM keyword\n WHERE sortkeyword >= :sortkeyword\n ORDER BY sortkeyword, n, no\n LIMIT :limit\n \"\"\", { 'sortkeyword' : sortkeyword, 'limit' : limit + 1 })\n\n res = []\n\n for row in reversed (res1.fetchall ()):\n res.append (row[:3])\n for row in res2:\n res.append (row[:3])\n\n return make_headwords_response (res, limit)", "def get_full_data(self):\n to_return = {}\n keywords = Keywords()\n for word in self.keywords:\n to_return[word] = keywords[word].get_info()\n return to_return", "def find_keywords(anchor, keywords=['']):\n rel_keywords = []\n href, content = parse_anchor(anchor)\n \n for keyword in keywords:\n kw = keyword.lower()\n if kw in href.lower() or kw in content.lower():\n rel_keywords.append(keyword)\n \n return rel_keywords", "def getMetaKeywords(self, article):\n return self.getMetaContent(article.doc, \"meta[name=keywords]\")", "def get_all_keywords(resource):\n keywords = []\n resource.populate()\n for res in [i for i in resource.imports.data if isinstance(i, robot.parsing.settings.Resource)]:\n keyword_file = os.path.abspath('{}/{}'.format(res.directory, res.name))\n if keyword_file not in processed:\n res_obj = ResourceFile(keyword_file)\n processed[keyword_file] = res_obj\n keywords += get_all_keywords(res_obj)\n for keyword in resource.keywords:\n print(keyword.name)\n keywords.append(tuple((keyword.source, keyword.name, keyword.args.value if keyword.args.value else [])))\n return keywords", "def keywords(self):\n return self.__keywords", "def get_article_keywords(article,\n keywords,\n preprocess_type=PreprocessWordType.LEMMATIZE):\n matches = set()\n for word in article.words:\n preprocessed_word = query_utils.preprocess_word(word,\n preprocess_type)\n if preprocessed_word in keywords:\n matches.add(preprocessed_word)\n return sorted(list(matches))", "def get_required_keywords_from_original():\n required_keywords = {}\n f = open('required_keywords.txt', 'r')\n curr_instrument = \"\"\n for line in f:\n if line[-2:] == \":\\n\":\n instrument = line[:-2]\n curr_instrument = instrument\n if instrument not in required_keywords.keys():\n required_keywords[instrument] = {}\n #print (line[:-2])\n elif line == \"\\n\":\n pass\n else:\n line = re.sub('[(),\\'|]', '', line)\n line = re.sub('\\.', ' ', line)\n new_line = line.split(' ')\n final_line = []\n final_line.append(new_line[0])\n for l in range(1,len(new_line)):\n temp_word = str(new_line[l][:8])\n temp_word = re.sub('\\n','',temp_word)\n if temp_word not in final_line:\n final_line.append(temp_word)\n required_keywords[curr_instrument][final_line[0]] = final_line[1:]\n more_required = ['REFTYPE', 'DESCRIP', 'AUTHOR', 'PEDIGREE', 'HISTORY']\n for k,v in required_keywords.iteritems():\n path = 'required_keywords/' + k + '_required_keywords.csv'\n with open(path, 'wb') as csvfile:\n keywriter = csv.writer(csvfile, delimiter=' ', quotechar='|',quoting=csv.QUOTE_MINIMAL)\n for key,value in v.iteritems():\n keywriter.writerow([key]+value + more_required)", "def GetKeywords(self):\n if wx.VERSION >= (2, 9, 0, 0, ''):\n return [(0, R_KEYWORDS), (1, R_KEYWORDS2), (2, R_KEYWORDS3)]\n else:\n return [(1, KEYWORDS)]", "def get_url_keywords(self, webpage):\n\n return {'url': self.get_url(webpage), 'keywords': self.get_keywords(webpage), 'name': webpage}", "def keywords(self):\n return self._keywords", "def keywords(self):\n return self._keywords", "def get_paper_keywords(tree):\n\tpath = '//table/tr/th[text() = \"Keywords:\"]/following-sibling::td/text()'\n\tkeywords = tree.xpath(path)\n\t# xpath returns a list with the keywords as a single string element separated by new lines, commas or semi-colons\n\t# Make this into a list of keywords\n\tif keywords:\n\t\t# Split on new lines, commas and semi-colons\n\t\tkeywords = re.split('[\\\\n,;]', keywords[0])\n\t\t# Remove trailing white space and empty strings\n\t\tkeywords = [kw.strip() for kw in keywords if kw]\n\n\treturn keywords", "def headwords ():\n\n q = request.args.get ('q')\n fulltext = request.args.get ('fulltext')\n offset = int (arg ('offset', '0', re_integer_arg))\n limit = clip (arg ('limit', str (MAX_RESULTS), re_integer_arg), 1, MAX_RESULTS)\n where = ''\n\n if (not q) and (not fulltext):\n # Retrieve full list of headwords\n with current_app.config.dba.engine.begin () as conn:\n res = execute (conn, r\"\"\"\n SELECT id, webkeyword, no\n FROM keyword\n ORDER BY sortkeyword, n, no\n LIMIT :limit\n OFFSET :offset\n \"\"\", { 'offset' : offset, 'limit' : limit })\n\n return make_headwords_response (res, limit)\n\n if q:\n q = q.replace ('-', '')\n q = q.replace ('%', '')\n q = q.replace ('?', '_')\n q = q.replace ('*', '%')\n where = \"(keyword LIKE :q) AND\"\n\n if not fulltext:\n # easy out\n with current_app.config.dba.engine.begin () as conn:\n res = execute (conn, r\"\"\"\n SELECT id, webkeyword, no\n FROM keyword\n WHERE keyword LIKE :q\n ORDER BY sortkeyword, n, no\n LIMIT :limit\n OFFSET :offset\n \"\"\", { 'q' : q, 'offset' : offset, 'limit' : limit })\n\n return make_headwords_response (res, limit)\n\n with current_app.config.dba.engine.begin () as conn:\n res = execute (conn, r\"\"\"\n SELECT DISTINCT\n k.id,\n k.webkeyword COLLATE utf8mb4_bin AS webkeyword,\n k.no\n FROM keyword k,\n article a\n WHERE {where} (MATCH (a.idxtext) AGAINST (:fulltext IN BOOLEAN MODE))\n AND a.no = k.no\n ORDER BY k.sortkeyword, k.n, k.no\n LIMIT :limit\n OFFSET :offset\n \"\"\".format (where = where), { 'q' : q, 'fulltext' : fulltext,\n 'offset' : offset, 'limit' : limit })\n\n return make_headwords_response (res, limit)", "def load_keywords():\n keywords = set()\n with open(os.path.join(BASE, \"data/keywords.txt\")) as fp:\n for line in fp:\n keywords.add(line.strip().lower())\n return keywords", "def get_meta_keywords(self, article):\r\n return self.get_meta_content(article.doc, \"meta[name=keywords]\")", "def get_keywords(seq):\r\n if len(seq) = 0:\r\n return None\r\n freqs = {}\r\n for w in seq: \r\n if w not in freqs:\r\n\t freqs[w] = 1\r\n\telse\r\n\t freqs[w] += 1\r\n num_keys = len(freqs)\r\n res = []\r\n \r\n return res", "def extract_keywords(query):\n nlp_server_response = __post_request_nlpserver(extract_special_characters(query))\n keywords = []\n\n for sentence in nlp_server_response['sentences']:\n for token in sentence['tokens']:\n if token['pos'] in {'NN', 'JJ', 'NNP', 'NNS', 'NNPS', 'VB', 'VBN', 'VBZ', 'VBP', 'VBG'}:\n if not token[\"lemma\"].lower() in english_stopwords:\n if not token['lemma'] in {'be', 'have'}:\n keywords.append(token['lemma'])\n return keywords", "def gather_headlines(urls):\n pass", "def get_headlines(id):\n get_headlines_url = secondary_url.format(id,api_key)\n\n with urllib.request.urlopen(get_headlines_url) as url:\n get_headlines_data = url.read()\n get_headlines_response= json.loads(get_headlines_data)\n\n headlines_results = None\n\n if get_headlines_response['articles']:\n headlines_results_list = get_headlines_response['articles']\n headlines_results = process_headlines(headlines_results_list)\n\n return headlines_results", "def get_keywords(self):\n all_keywords = []\n z_index = 0\n for zettel in self.lemma_tokens:\n keywords = []\n w_index = 0\n cur_zettel_dict = {}\n for word in zettel:\n cur_zettel_dict.setdefault(word[0], 0)\n cur_word_total_score = self.all_scores[z_index][w_index]\n if cur_zettel_dict[word[0]] > cur_word_total_score:\n w_index += 1\n continue\n else:\n cur_zettel_dict[word[0]] = cur_word_total_score\n w_index += 1\n cur_sorted = sorted(cur_zettel_dict.items(), key=lambda kv: kv[1], reverse=True)\n for i in range(self.keyword_n):\n keywords.append(str(cur_sorted[i]))\n z_index += 1\n all_keywords.append(keywords)\n return all_keywords", "def get_headline_search(query):\n query = query.replace(' ',\"\")\n category=\"\"\n get_headlines_url = 'https://newsapi.org/v2/top-headlines?category={}&query={}&language=en&apiKey={}'.format(category,query,api_key)\n headlines_results = []\n with urllib.request.urlopen(get_headlines_url) as url:\n get_headlines_data = url.read()\n get_headlines_response = json.loads(get_headlines_data)\n if get_headlines_response['articles']:\n headlines_result_list=get_headlines_response['articles']\n for headline in headlines_result_list:\n headlines_results.append(headline)\n return headlines_results", "def get_meta_keywords(self):\n mk = self.meta_keywords.replace(\"<title>\", self.title)\n return mk.replace(\"<short-text>\", self.short_text)", "def all_headlines_from(url):\n pass", "def get_anchors(self):\n self.anchors_dic = {}\n meta = self.get_metadata()\n lines = meta.split(\"|\")\n for line in lines:\n data= line.split()\n anchor_name = data[0]\n # appending anchor in dictionary with its coordinates \n self.anchors_dic[anchor_name] = (data[1], data[2], data[3])", "def set_keyword_map(self):\n \n ret = defaultdict(list)\n for idx, doc in enumerate(self.docs):\n for token in doc:\n if token in self.dictionary.token2id:\n ret[token].append(idx)\n \n self.keyword_map = ret\n return ret", "def findall(ctx):\n _check_for_commands(ctx.obj[\"keep_path\"])\n keep = ctx.obj[\"keep\"]\n results = {}\n for kw, command_ids in keep[\"keyword2Ids\"].items():\n results[kw] = []\n for command_id in command_ids:\n command = keep[\"id2Command\"][str(command_id)]\n explanation = keep[\"id2Explanation\"][str(command_id)]\n results[kw].append({ \n \"id\": command_id,\n \"command\": command,\n \"explanation\": explanation\n })\n _show_results(results)", "def keywords(self):\n return list(self._kw)", "def keywords_pattern():\n with open(\"keywords.txt\", 'r') as f:\n lines = [line.strip() for line in f if line.strip()]\n return set(lines)", "def get_keywords_for_movie(url):\n pass", "def parse_keyword_in_comment(comment):\n debug(\"into keyword.\")\n debug(comment)\n comment_str = ('\\n'.join(comment)).lower()\n comment_dict = {}\n\n more_keywords = vim.eval(\"g:BHKeywords\")\n keywords = copy(KEYWORDS)\n keywords.update(more_keywords)\n\n keywords = sorted(\n [keyword for keyword in keywords if comment_str.find(keyword.lower()) != -1],\n lambda x, y: cmp(comment_str.index(x.lower()), comment_str.index(y.lower()))\n )\n debug(\"Found keywords in the original header: %s\" % keywords)\n if not keywords:\n return {}, comment\n\n def get_content(comment, keyword, next_keyword=None):\n debug(\"into get content\")\n for index, line in enumerate(comment):\n if line.lower().find(keyword.lower()) != -1:\n break\n\n if next_keyword:\n for jndex, line in enumerate(comment):\n if line.lower().find(next_keyword.lower()) != -1:\n break\n else:\n jndex = len(comment)\n # content in comment[index:jndex-1], further purify it.\n re.I = True\n line_re = re.compile(r\"(?P<key>%s)?:?\\s*(?P<value>.*)\" % keyword)\n re.I = False\n\n first_line = line_re.match(comment[index]).group('value')\n return '\\n'.join([first_line] + comment[index+1:jndex-1])\n\n for i, keyword in enumerate(keywords):\n if i == len(keywords) - 1:\n comment_dict[keyword] = get_content(comment, keyword)\n else:\n comment_dict[keyword] = get_content(comment, keyword, keywords[i+1])\n return comment_dict, comment", "def keywords(text):\r\n from operator import itemgetter # for sorting\r\n text = split_words(text)\r\n numWords = len(text) # of words before removing blacklist words\r\n text = [x for x in text if x not in stopWords]\r\n freq = Counter()\r\n for word in text:\r\n freq[word] += 1\r\n\r\n minSize = min(10, len(freq))\r\n keywords = tuple(freq.most_common(minSize)) # get first 10\r\n keywords = dict((x, y) for x, y in keywords) # recreate a dict\r\n\r\n for k in keywords:\r\n articleScore = keywords[k]*1.0 / numWords\r\n keywords[k] = articleScore * 1.5 + 1\r\n\r\n keywords = sorted(keywords.iteritems(), key=itemgetter(1))\r\n keywords.reverse()\r\n return dict(keywords)", "def keywords(self, **kwargs):\n\n path = self._get_movie_id_path('keywords')\n resp = self._get_method(path, kwargs)\n return resp", "def dictionary(cleaned_data,threshold):\n news = []\n for date in cleaned_data:\n for headlines in cleaned_data[date]:\n news.append(headlines)\n\n word_freq = nltk.FreqDist(itertools.chain(*news))\n id_to_word = ['<pad>'] + [word for word, cnt in word_freq.items() if cnt >= threshold] + ['<unk>']\n word_to_id = {word:idx for idx, word in enumerate(id_to_word)}\n \n return id_to_word, word_to_id", "def get_keywords(self, number=10):\n keyword = []\n node_weight = OrderedDict(sorted(self.node_weight.items(), key=lambda t: t[1], reverse=True))\n for i, (key, value) in enumerate(node_weight.items()):\n # print(key + ' - ' + str(value))\n keyword.append(key)\n if i > number:\n break\n return keyword", "def get_meta_keywords(self, language=None, fallback=True, version_id=None, force_reload=False):\n return self.get_title_obj_attribute(\"meta_keywords\", language, fallback, version_id, force_reload)", "def lookup_keywords(filename):\n keywords = []\n start_of_table = r'\\*+\\s+'\n start_of_kw_table = r'\\*+\\s+Keyword'\n in_kw_table = False\n f = open(filename, \"r\")\n for line in f.readlines():\n line = line.rstrip()\n if len(line) == 0 or line.startswith(\"#\"):\n continue # skip comments and blanks\n if re.match(start_of_kw_table, line):\n in_kw_table = True # table started\n continue\n if re.match(start_of_table, line) and not re.match(start_of_kw_table, line):\n in_kw_table = False # table ended\n continue\n if line.startswith(' '):\n continue # skip content rows\n if in_kw_table:\n keywords.append(line)\n f.close()\n return keywords", "def get_hypernyms(word):\n syn = wn.synsets(word)\n hnyms = []\n for h in syn[0].hypernyms():\n hnyms.append({\n \"lemmas\": h.lemma_names(),\n \"d\": h.definition(),\n \"pos\": h.pos(),\n \"id\": h.name()\n })\n return hnyms", "def __get_keywords(self, text_list):\r\n specialKW = [\r\n 'run keyword',\r\n 'run keyword and continue on failure',\r\n 'run keyword and expect error',\r\n 'run keyword and ignore error',\r\n 'run keyword and return'\r\n 'run keyword and return if',\r\n 'run keyword and return status',\r\n 'run keyword if',\r\n 'run keyword if all critical tests passed',\r\n 'run keyword if all tests passed',\r\n 'run keyword if any critical tests failed',\r\n 'run keyword if any tests failed',\r\n 'run keyword if test failed',\r\n 'run keyword if test passed',\r\n 'run keyword if timeout occurred',\r\n 'run keyword unless',\r\n 'run keywords',\r\n 'wait until keyword succeeds',\r\n 'repeat keyword',\r\n 'else'\r\n ]\r\n specialSettings = [\r\n '[Arguments]',\r\n '[Documentation]'\r\n ]\r\n L = []\r\n if text_list[0] in specialSettings:\r\n return L\r\n for item in text_list:\r\n if self.__is_keyword(item):\r\n L.append(item)\r\n if not item.replace('_', ' ').replace('-', ' ').lower() in specialKW:\r\n break\r\n return L", "def dehydrate_keywords(self, bundle):\n return map(str, bundle.obj.keywords.all())", "def candidate_keywords(self):\n candidate_phrases = list()\n for sentence in self.sentences():\n candidate_sentence = re.sub(self.stopwords_regex(), '|', sentence)\n phrases = candidate_sentence.split(\"|\")\n for phrase in phrases:\n phrase = remove_white_spaces(phrase).lower()\n if phrase:\n candidate_phrases.append(phrase)\n return candidate_phrases", "def get_headlines(driver,site,URL_exclusions):\r\n links = get_all_links(driver,site,URL_exclusions)\r\n headlines = []\r\n n=0\r\n for link in links:\r\n driver = make_driver_obj() #get_all_links quits driver when finished.\r\n try:\r\n while True:\r\n try:\r\n driver.get(link) #No need to accept cookies to don't need return_search\r\n break\r\n except:\r\n continue\r\n except: #If we can't open the URL for any reason.\r\n driver.quit()\r\n continue\r\n n += 1\r\n headline = get_headline(driver)\r\n if headline != '':\r\n headlines.append(headline) #Only append if able to identify headline text\r\n #print(n)\r\n #print(headline)\r\n #print()\r\n driver.quit()\r\n return headlines", "def _get_term_dictionaries(self):\n\t\tforward_dict = {}\n\t\treverse_dict = defaultdict(list)\n\t\tfor term in self.terms():\n\t\t\tif (term.name is not None) and (\"obsolete\" not in term.name): \n\t\t\t\twords = [term.name]\n\t\t\t\twords.extend([x.description for x in list(term.synonyms)])\t# Add all the synonyms\n\t\t\t\twords = [re.sub(r\" ?\\([^)]+\\)\", \"\", x) for x in words]\t\t# Replace parenthetical text.\n\t\t\t\tforward_dict[term.id] = words\n\t\t\t\tfor word in words:\n\t\t\t\t\treverse_dict[word].append(term.id)\n\t\treturn(forward_dict, reverse_dict)", "def get_top_keywords(entries):\n # Extract text for processing\n\n raw_text = [] # raw text in sentences\n for entry in entries:\n # Its a post\n if 'title' in entry:\n raw_text.append(entry['title'])\n raw_text += tokenize.sent_tokenize(entry['selftext'])\n else:\n raw_text += tokenize.sent_tokenize(entry['body'])\n \n # Tokenize\n tokens = tokenize_posts_keywords(raw_text)\n\n # 1-gram\n fdist_1 = FreqDist(tokens)\n top_keywords_1 = fdist_1.most_common(100)\n \n # 2-gram\n bigrams = ngrams(tokens, 2)\n fdist_2 = FreqDist(bigrams)\n top_keywords_2 = fdist_2.most_common(100)\n top_keywords_2 = [(f'{keywords[0]} {keywords[1]}', mentions) for keywords, mentions in top_keywords_2]\n\n # 3-gram\n trigrams = ngrams(tokens, 3)\n fdist_3 = FreqDist(trigrams)\n top_keywords_3 = fdist_3.most_common(100)\n top_keywords_3 = [(f'{keywords[0]} {keywords[1]} {keywords[2]}', mentions) for keywords, mentions in top_keywords_3]\n\n top_keywords = top_keywords_1 + top_keywords_2 + top_keywords_3\n return [{ 'keyword' : keyword, 'mentions' : mentions } for keyword, mentions in top_keywords]", "def alchemy_keywords(text):\n if text:\n # TODO Alchemy API breaks if overview text is greater than 150 kbytes\n # First step skip these. If time look at truncating, splitting, or combining\n # by first skipping, it will be easier to update later\n if sys.getsizeof(text) > 150000:\n return {}\n\n # Create an AlchemyAPI object.\n alchemy_obj = AlchemyAPI.AlchemyAPI()\n\n # Load the API key from disk.\n alchemy_obj.loadAPIKey(\"api_key.txt\")\n\n # Extract topic keywords from a text string.\n result = alchemy_obj.TextGetRankedKeywords(text)\n\n # Use xml.etree.ElementTree to process xml returned from AlchemyAPI\n # extract keyword and relevance\n root = ET.fromstring(result)\n\n keyword_dictionary = {}\n\n for node in root.iter(\"keyword\"):\n keyword = node.find(\"text\").text.encode(\"utf-8\")\n relevance = float(node.find(\"relevance\").text)\n keyword_dictionary[keyword] = relevance\n\n return keyword_dictionary\n else:\n print \"No text to analyze\"\n return {}", "def keys(self):\n return ['title', 'keywords', 'description', 'url', 'content_file',\n 'language', 'phone', 'email']", "def get_webpage_keywords(self, response):\n tags = response.xpath('//*/meta[@property=\"keywords\"]/@content').extract_first()\n tags1 = response.xpath('//*/meta[@name=\"keywords\"]/@content').extract_first()\n if tags:\n return tags.strip()\n elif tags1:\n return tags1.strip()\n else:\n tags = response.xpath('//*/meta[@itemprop=\"keywords\"]/@content').extract_first()\n if tags:\n return tags.strip()\n else:\n return \"\"", "def list_keywords(self, **kwargs) -> ApiResponse:\n return self._request(kwargs.pop('path'), params=kwargs)", "def _get_identifiers_from_kbs(self) -> dict:\n id_mapping_dict = defaultdict(set)\n\n for kb in self.kbs:\n sys.stdout.write('\\n%s \\n' % kb.name)\n for p in tqdm.tqdm(kb.pathways, total=len(kb.pathways)):\n for ent in p.entities:\n id_set = list(set(ent.xrefs))\n if len(id_set) == 1:\n id_mapping_dict[id_set.pop()] = set([])\n for p, q in itertools.combinations(id_set, 2):\n id_mapping_dict[p].add(q)\n id_mapping_dict[q].add(p)\n\n return id_mapping_dict", "def all_headlines(html_root_node):\n pass", "def keywords(self):\n from hubspot3.keywords import KeywordsClient\n\n return KeywordsClient(**self.auth, **self.options)", "def GetKeywords(self):\n return self._code['keywords']", "def keywords(self) -> Set[str]:\n return self._keywords", "def getmetakeywords(allcontent, corpus):\n for i in range(0, len(allcontent)):\n words = re.split(\"[, ]+\", allcontent[i])\n if words[0] == \"Meta\":\n for j in range(3, len(words)):\n if len(processword(words[j])) > 0:\n corpus.append(processword(words[j]))", "def _generate_keywords(self):\n _keywords = [*self._lookup_opcodes_dir.keys(), *self._registers_list.keys()]\n for key in _keywords:\n self._keywords.extend(key.split(\" \"))\n return", "def get_blog_keywords(id):\n key = KEY_BLOG_KEYWORDS_PREFIX + str(id)\n keywords = RedisHelper.get_cache(key)\n if RedisHelper.is_cache_exist(key) is False:\n keywords = list(BlogHelper.get_blog(id).tags.all())\n RedisHelper.create_cache(key, keywords, RedisTimeOut.REDIS_TIMEOUT_1_DAYS)\n return keywords", "def get_keywords(text):\n tokens = [word.lower() for word in word_tokenize(text)]\n\n # tag words as verb, noun etc\n tagged_words = pos_tag(tokens)\n\n # retrieve list of boring words from file\n stopwords_file = os.path.join(BASE_DIR, 'data', 'stopwords.txt')\n with open(stopwords_file, 'r', encoding='utf-8') as f:\n stopwords = [line.rstrip(linesep) for line in f]\n \n #We don't want keywords to contain anything in this list\n forbidden = ['.',',',';',':','?','!','+',')','(','[',']','/','<','>','\"','©','1','2','3','4','5','6','7','8','9','0']\n\n # NLTK Chunking - detects noun phrases and phrases of form verb noun or adj noun\n patterns = \"\"\"NP: {<JJ>*<NN><NNS>}\n {<JJR><NNS>}\n {<JJ>*<NNS>}\n {<NN><NNS>} \n {<JJ><NNS>}\n {<JJ>*<NN>*}\n {<NN>*}\n {<NNS>*}\"\"\"\n chunker = RegexpParser(patterns)\n chunks = chunker.parse(tagged_words)\n\n #these are the phrases we want, as lists within a list\n validphrases = []\n for t in chunks.subtrees():\n if t.label() == 'NP':\n validphrases.append([x for x,y in t.leaves()])\n\n #turning lists within lists into actual noun phrases i.e [[radiation], [breast,cancer]] becomes [radiation, breast cancer]\n #sorry for my horrible code\n #trees suck\n lemmatizables = []\n for sublist in validphrases:\n lemmatizables.append(' '.join(sublist))\n\n lemmatizer = WordNetLemmatizer()\n lems = [lemmatizer.lemmatize(x) for x in lemmatizables]\n\n #removing stopwords after lemmatizinga, then removing anything containing punctuation or a number\n lems = filter(lambda lem: lem not in stopwords, lems)\n lems = filter(lambda lem: not any(char in lem for char in forbidden), lems)\n\n return tuple(lems)", "def get_keywords(self, webpage):\n \n try:\n return self.urls_keywords_dict[webpage]['keywords']\n except:\n return None", "def lemma_headwords(self):\n new_var = 'lemma_headword'\n lemma_heads = [clx._lemmas[i]['Head'] for i in xrange(len(clx._lemmas))]\n has_item = self.compare_items(lemma_heads)\n new_column = []\n if False in has_item:\n self._warning_msg('lemma_headword', lemma_heads)\n for record, exists in zip(self._dict, has_item):\n if exists:\n lemma_id = clx.wordform_lookup(record)[0].IdNumLemma\n lemma_head = clx.lemma_by_id(lemma_id).Head\n else:\n lemma_head = None\n new_column.append(lemma_head)\n self._append_column(new_column, new_var)", "def keywords(self):\n defined_keywords = [\n ('allowempty_map', 'allowempty_map'),\n ('assertion', 'assertion'),\n ('default', 'default'),\n ('class', 'class'),\n ('desc', 'desc'),\n ('enum', 'enum'),\n ('example', 'example'),\n ('extensions', 'extensions'),\n ('format', 'format'),\n ('func', 'func'),\n ('ident', 'ident'),\n ('include_name', 'include'),\n ('length', 'length'),\n ('map_regex_rule', 'map_regex_rule'),\n ('mapping', 'mapping'),\n ('matching', 'matching'),\n ('matching_rule', 'matching_rule'),\n ('name', 'name'),\n ('nullable', 'nullable'),\n ('parent', 'parent'),\n ('pattern', 'pattern'),\n ('pattern_regexp', 'pattern_regexp'),\n ('range', 'range'),\n ('regex_mappings', 'regex_mappings'),\n ('required', 'required'),\n ('schema', 'schema'),\n ('schema_str', 'schema_str'),\n ('sequence', 'sequence'),\n ('type', 'type'),\n ('type_class', 'type_class'),\n ('unique', 'unique'),\n ('version', 'version'),\n ]\n found_keywords = []\n\n for var_name, keyword_name in defined_keywords:\n if getattr(self, var_name, None):\n found_keywords.append(keyword_name)\n\n return found_keywords", "def get_keywords(self):\n\n if str(self.keywords) == \"unset\": return []\n # if self.keywords: return self.keywords\n if len(self.keywords) > 0: return self.keywords\n # retrieve from args and return if exists\n keywords = Settings.get_keywords() or []\n if len(keywords) > 0: return keywords\n if not Settings.prompt(\"keywords\"):\n self.keywords = \"unset\" # used to skip prompting for value in future\n return []\n question = {\n 'type': 'input',\n 'name': 'keywords',\n 'message': 'Keywords:',\n 'validate': ListValidator\n }\n keywords = prompt(question)[\"keywords\"]\n keywords = [n.strip() for n in keywords.split(\",\")]\n # confirm keywords\n if not Settings.confirm(keywords): return self.get_keywords()\n self.keywords = keywords\n return self.keywords", "def text_to_keywords(text):\n response = alchemyapi.keywords('text', text, {'sentiment': 1})\n ret_keys = []\n\n if response['status'] == 'OK':\n keywords = response['keywords']\n for keyw_chunk in keywords[:2]:\n # if len(keywords) > 0:\n # keyw_chunk = keywords[0]\n top_keyword = keyw_chunk['text'].encode('utf-8')\n # else:\n # top_keyword = ''\n ret_keys.append(top_keyword)\n\n return ret_keys # top_keyword\n\n # for keyword in response['keywords']:\n # print('text: ', keyword['text'].encode('utf-8'))\n # print('relevance: ', keyword['relevance'])\n # print('sentiment: ', keyword['sentiment']['type'])\n # if 'score' in keyword['sentiment']:\n # print('sentiment score: ' + keyword['sentiment']['score'])\n # print('')\n else:\n print('Error in keyword extaction call: ', response['statusInfo'])\n return [] # ''", "def test_guided():\n top_n = 5\n seed_keywords = [\"time\", \"night\", \"day\", \"moment\"]\n keywords = model.extract_keywords(doc_one,\n min_df=1,\n top_n=top_n,\n seed_keywords=seed_keywords)\n\n assert isinstance(keywords, list)\n assert isinstance(keywords[0], tuple)\n assert isinstance(keywords[0][0], str)\n assert isinstance(keywords[0][1], float)\n assert len(keywords) == top_n", "def GetKeywords(self):\n kwlist = [CSS1_KEYWORDS , CSS_PSUEDO_CLASS]\n # 2.9 supports CSS3 so for 2.8 just add CSS3 keywords to the css2 list \n if wx.VERSION < (2, 9, 0, 0, ''):\n css2_kw = (CSS2_KEYWORDS[0], \" \".join((CSS2_KEYWORDS[1], CSS3_KEYWORDS[1])))\n kwlist.append(css2_kw)\n else:\n kwlist.append(CSS2_KEYWORDS)\n kwlist.append(CSS3_KEYWORDS)\n kwlist.append(PSEUDO_ELEMENTS)\n return kwlist", "def get_keywords(source_or_file):\n tree = get_ast(source_or_file)\n lister = KeywordLister().visit(tree)\n return lister.data", "def keywords_map(articles_map):\n logging.debug('============== IN keywords_map: ================')\n keywords_map = {}\n for article in articles_map:\n if len(articles_map[article]['pub_keywords']) != 0:\n keyword_list = articles_map[article]['pub_keywords'][\n 0] # for each element in articles_map[article]['pub_keywords'] list get keyword\n logging.debug(keyword_list)\n for keyword in keyword_list:\n if not keyword.lower() in keywords_map:\n keywords_map[keyword.encode('ascii', 'ignore').decode().lower()] = 1\n else:\n keywords_map[keyword.encode('ascii', 'ignore').decode().lower(\n )] += 1\n return keywords_map", "def onHeadlineClick(self, tag, keywords):\n self.handleEvent(\"headclick1\", tag, keywords)", "def list(self):\n\t\treturn self.link_words", "def getKeywords(tmdbKeywords):\n \n words = []\n if \"keywords\" in tmdbKeywords:\n for keyword in tmdbKeywords[\"keywords\"]:\n words += _format(keyword[\"name\"]).split()\n else:\n raise AttributeError(\"%s instance has no attribute keywords\" % tmdbKeywords) \n return words", "def main():\n records = get_block_of_records({\"keyword\": \"food\"})\n print (\"returned items: {}\".format(len(records)))\n\n processed_records = {}\n for item in records:\n meta = item[\"meta\"]\n umm = item[\"umm\"]\n cid = meta[\"concept-id\"]\n short_name = umm[\"ShortName\"]\n processed_records[cid] = short_name\n\n print (\"uniq keys: {}\".format(len(processed_records.keys())))", "def get_keywords(keyword_list: List[Tuple[str, str]], keyword_type: str) -> List[str]:\n keywords = [x[0] for x in keyword_list if x[1].startswith(keyword_type)]\n\n return keywords", "def search(self, keywords: List[str], _type: str = \"\") -> List[Dict]:\n url = self.construct_url(keywords, _type)\n source = self.get(url)\n links = self.extract_links(source)\n return [{\"url\": url} for url in links]", "def hot_phrases(self):\r\n return self._get('hot_phrases', {})", "def extract_keywords(df):\n df[\"key_words\"] = \"\"\n\n for index, row in df.iterrows():\n plot = row[\"Plot\"]\n\n rake = Rake()\n\n rake.extract_keywords_from_text(plot)\n\n key_words_dict_scores = rake.get_word_degrees()\n\n row[\"key_words\"] = list(key_words_dict_scores.keys())\n\n df.drop(columns=[\"Plot\"], inplace=True)\n df.set_index(\"Title\", inplace=True)\n\n return df", "def get_all_headline_data():\n\twebsites = database.get_website_URLs()\n\tall_headlines_arr = []\n\tfor curr_elt in websites:\n\t\tcurr_website = curr_elt[0]\n\t\tsource = curr_elt[1]\n\t\tcurr_headline_arr = get_headline_data(curr_website, source)\n\t\tall_headlines_arr.append(curr_headline_arr)\n\treturn all_headlines_arr", "def primary_header_keys(self):\n return [d for d in self.primary_header]", "def check_for_keywords(anchors, keywords, keywords_ignore=[\"\"]):\n\n # Format keywords to minimize discrepencies\n for ii, keyword in enumerate(keywords):\n keyword = keyword.lower()\n keywords[ii] = keyword\n\n # Parse all anchors and check if any contain keywords\n # Append to rel_anchors if a match\n hrefs, contents = parse_anchors(anchors)\n\n rel_anchors = []\n rel_keywords_all = []\n for anchor, href, content in zip(anchors, hrefs, contents):\n if href == None:\n continue\n\n href = href.lower()\n content = content.lower()\n\n # Check if href or content contain any of the keywords\n href_kw = any(map(href.__contains__, keywords))\n content_kw = any(map(content.__contains__, keywords))\n\n # Can combine with above?\n href_gen = map(href.__contains__, keywords)\n content_gen = map(content.__contains__, keywords)\n\n # Check if any of the \"ignore keywords\" exist\n href_kwi = any(map(href.__contains__, keywords_ignore))\n content_kwi = any(map(href.__contains__, keywords_ignore))\n\n rel_keywords = []\n if href_kw or content_kw:\n if href_kwi or content_kwi:\n print(\"Relevant keywords found but not added due to ignore keywords.\")\n\n else:\n rel_anchors.append(anchor)\n\n for keyword in keywords:\n if next(href_gen) or next(content_gen):\n rel_keywords.append(keyword)\n\n print(f\"Match because of following keywords: {rel_keywords}\")\n\n if not rel_keywords:\n rel_keywords = [\"\"]\n\n rel_keywords_all.append(rel_keywords)\n\n return rel_anchors, rel_keywords_all", "def match_keywords_descriptions(input_company):\n company = input_company[\"name\"]\n key_list = input_company[\"key_list\"]\n\n if key_list == []:\n # if there's no key words, then just pull apart the description.\n # kill any words that are generic amongst all descriptions. we want to\n # get something close to being a key word without having key words\n stop = stopwords.words(\"english\")\n too_generic = [\"developer\", \"provider\", \"operator\", \"owner\", \n \"manufacturer\", \"manufactures\", \"company\"]\n\n key_list = [word for word in input_company[\"desc\"].lower().strip()\\\n .split(\" \") if (word not in stop) and \\\n (word not in too_generic)]\n\n syns_list = []\n\n for word in key_list:\n syns = get_synonyms(word)\n\n if syns:\n for s in syns:\n syns_list.append(s)\n\n keyword_matches = company_data[(company_data.key_list.map(lambda x: \\\n [word in x for word in key_list] != [False for word in key_list])) \\\n & (company_data.name != company)]\n\n # add columns for keyword and synonym metrics\n keyword_matches[\"match_fraction\"] = \\\n keyword_matches.key_list.map(lambda x: \\\n sum([word in x for word in key_list])/float(len(key_list)))\n\n keyword_matches[\"syn_match_frac\"] = \\\n keyword_matches.key_list.map(lambda x: \\\n sum([word in x for word in syns_list])/float(len(syns_list)))\n\n return keyword_matches", "def readLinking(goldStdFile):\n linking = dict()\n for line in open(goldStdFile):\n d = re.split(\"\\s+\", line.strip())\n mention = d[0].upper()\n kb_id = d[1].upper()\n\n if kb_id in linking.keys():\n linking[kb_id].add(mention)\n else:\n linking[kb_id] = set([mention])\n return linking" ]
[ "0.763168", "0.7235824", "0.71703315", "0.65968394", "0.61900103", "0.6189536", "0.609543", "0.60951686", "0.60745686", "0.6041486", "0.6003403", "0.5991974", "0.5966871", "0.5924465", "0.5924083", "0.5896881", "0.5850133", "0.58487964", "0.58361", "0.58255136", "0.5808301", "0.58039963", "0.57934415", "0.5756343", "0.5741688", "0.57205516", "0.5694092", "0.5693882", "0.5686357", "0.568364", "0.5664595", "0.5662306", "0.5662306", "0.5656715", "0.56545407", "0.5644772", "0.56303984", "0.5625923", "0.5621512", "0.5598575", "0.55917907", "0.5584937", "0.5581943", "0.5574477", "0.5563384", "0.553524", "0.55131626", "0.551104", "0.55056095", "0.54865026", "0.54789585", "0.5472628", "0.5456944", "0.54564065", "0.5449083", "0.54462564", "0.5429331", "0.54227114", "0.54185784", "0.54099137", "0.54092574", "0.54024667", "0.5395055", "0.5392956", "0.5392585", "0.5388421", "0.5374107", "0.53724575", "0.5369283", "0.5363462", "0.5357806", "0.5346532", "0.534058", "0.53332764", "0.53256613", "0.53183603", "0.53153783", "0.52794474", "0.5269263", "0.5268734", "0.52659345", "0.5260532", "0.5245693", "0.5239595", "0.52291226", "0.5226687", "0.52199894", "0.52180594", "0.5202232", "0.5198985", "0.5190915", "0.51790583", "0.5174673", "0.5174373", "0.51718175", "0.5171233", "0.5160101", "0.5150821", "0.51455134", "0.51382184" ]
0.81225014
0
Returns a list of keywords
def get_keywords(self): return list(self.keyword_headlines().keys())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def keywords(self):\n return list(self._kw)", "def keywords(self):\n return self._keywords", "def keywords(self):\n return self._keywords", "def getKeywords(self):\n return", "def keywords(self):\n return self.__keywords", "def extract_keywords(self):\n keywords = [] \n for keyword in self.watsonLanguageModel['keywords'][:self.entitySizeLimit]: \n keywords.append(keyword['text'])\n return keywords", "def get_keywords(self):\n keys = []\n for post in self:\n keys.extend(post.Keywords)\n return list(sorted(set(keys)))", "def GetKeywords(self):\n return self._code['keywords']", "def keywords(self) -> Set[str]:\n return self._keywords", "def GetKeywords(self):\n if wx.VERSION >= (2, 9, 0, 0, ''):\n return [(0, R_KEYWORDS), (1, R_KEYWORDS2), (2, R_KEYWORDS3)]\n else:\n return [(1, KEYWORDS)]", "def get_keywords(self):\n\n if str(self.keywords) == \"unset\": return []\n # if self.keywords: return self.keywords\n if len(self.keywords) > 0: return self.keywords\n # retrieve from args and return if exists\n keywords = Settings.get_keywords() or []\n if len(keywords) > 0: return keywords\n if not Settings.prompt(\"keywords\"):\n self.keywords = \"unset\" # used to skip prompting for value in future\n return []\n question = {\n 'type': 'input',\n 'name': 'keywords',\n 'message': 'Keywords:',\n 'validate': ListValidator\n }\n keywords = prompt(question)[\"keywords\"]\n keywords = [n.strip() for n in keywords.split(\",\")]\n # confirm keywords\n if not Settings.confirm(keywords): return self.get_keywords()\n self.keywords = keywords\n return self.keywords", "def __get_keywords(self, text_list):\r\n specialKW = [\r\n 'run keyword',\r\n 'run keyword and continue on failure',\r\n 'run keyword and expect error',\r\n 'run keyword and ignore error',\r\n 'run keyword and return'\r\n 'run keyword and return if',\r\n 'run keyword and return status',\r\n 'run keyword if',\r\n 'run keyword if all critical tests passed',\r\n 'run keyword if all tests passed',\r\n 'run keyword if any critical tests failed',\r\n 'run keyword if any tests failed',\r\n 'run keyword if test failed',\r\n 'run keyword if test passed',\r\n 'run keyword if timeout occurred',\r\n 'run keyword unless',\r\n 'run keywords',\r\n 'wait until keyword succeeds',\r\n 'repeat keyword',\r\n 'else'\r\n ]\r\n specialSettings = [\r\n '[Arguments]',\r\n '[Documentation]'\r\n ]\r\n L = []\r\n if text_list[0] in specialSettings:\r\n return L\r\n for item in text_list:\r\n if self.__is_keyword(item):\r\n L.append(item)\r\n if not item.replace('_', ' ').replace('-', ' ').lower() in specialKW:\r\n break\r\n return L", "def GetKeywords(self):\n return [FS_COMMANDS, FS_STDLIB, FS_FUNC, FS_CLASS]", "def get_keywords(keyword_list: List[Tuple[str, str]], keyword_type: str) -> List[str]:\n keywords = [x[0] for x in keyword_list if x[1].startswith(keyword_type)]\n\n return keywords", "def Keywords(self, default=[{}]):\n tmp = self.data.get('metadata', {}).get('keywords', default)\n return [HEP.KeywordObject(i) for i in tmp]", "def extract_keywords(query):\n nlp_server_response = __post_request_nlpserver(extract_special_characters(query))\n keywords = []\n\n for sentence in nlp_server_response['sentences']:\n for token in sentence['tokens']:\n if token['pos'] in {'NN', 'JJ', 'NNP', 'NNS', 'NNPS', 'VB', 'VBN', 'VBZ', 'VBP', 'VBG'}:\n if not token[\"lemma\"].lower() in english_stopwords:\n if not token['lemma'] in {'be', 'have'}:\n keywords.append(token['lemma'])\n return keywords", "def keywords(text:str) -> list:\n return sorted(set(text.split(' ')), key=frequency, reverse=True)[0:5]", "def keywords(self, **kwargs):\n\n path = self._get_movie_id_path('keywords')\n resp = self._get_method(path, kwargs)\n return resp", "def get_keywords(self, sectioned_text):\n \n keywords = []\n \n if 'full text' in list(sectioned_text.keys()):\n \n for word in self.keyword_list:\n if word in sectioned_text['full text']:\n keywords.append(word)\n \n else: \n fulltext = self.restitch_text(sectioned_text)\n for word in self.keyword_list:\n if word in fulltext:\n keywords.append(word)\n \n return keywords", "def getKeywords(tmdbKeywords):\n \n words = []\n if \"keywords\" in tmdbKeywords:\n for keyword in tmdbKeywords[\"keywords\"]:\n words += _format(keyword[\"name\"]).split()\n else:\n raise AttributeError(\"%s instance has no attribute keywords\" % tmdbKeywords) \n return words", "def keywords(self):\n defined_keywords = [\n ('allowempty_map', 'allowempty_map'),\n ('assertion', 'assertion'),\n ('default', 'default'),\n ('class', 'class'),\n ('desc', 'desc'),\n ('enum', 'enum'),\n ('example', 'example'),\n ('extensions', 'extensions'),\n ('format', 'format'),\n ('func', 'func'),\n ('ident', 'ident'),\n ('include_name', 'include'),\n ('length', 'length'),\n ('map_regex_rule', 'map_regex_rule'),\n ('mapping', 'mapping'),\n ('matching', 'matching'),\n ('matching_rule', 'matching_rule'),\n ('name', 'name'),\n ('nullable', 'nullable'),\n ('parent', 'parent'),\n ('pattern', 'pattern'),\n ('pattern_regexp', 'pattern_regexp'),\n ('range', 'range'),\n ('regex_mappings', 'regex_mappings'),\n ('required', 'required'),\n ('schema', 'schema'),\n ('schema_str', 'schema_str'),\n ('sequence', 'sequence'),\n ('type', 'type'),\n ('type_class', 'type_class'),\n ('unique', 'unique'),\n ('version', 'version'),\n ]\n found_keywords = []\n\n for var_name, keyword_name in defined_keywords:\n if getattr(self, var_name, None):\n found_keywords.append(keyword_name)\n\n return found_keywords", "def get_keywords(source_or_file):\n tree = get_ast(source_or_file)\n lister = KeywordLister().visit(tree)\n return lister.data", "def add_keywords(self, response: Response) -> list:\n return response.xpath(\"//ul[@class='term']/li/a/text()\").getall()", "def get_all_keywords(resource):\n keywords = []\n resource.populate()\n for res in [i for i in resource.imports.data if isinstance(i, robot.parsing.settings.Resource)]:\n keyword_file = os.path.abspath('{}/{}'.format(res.directory, res.name))\n if keyword_file not in processed:\n res_obj = ResourceFile(keyword_file)\n processed[keyword_file] = res_obj\n keywords += get_all_keywords(res_obj)\n for keyword in resource.keywords:\n print(keyword.name)\n keywords.append(tuple((keyword.source, keyword.name, keyword.args.value if keyword.args.value else [])))\n return keywords", "def get_meta_keywords(self):\n return self.get_meta_content(self.article.doc, \"meta[name=keywords]\")", "def get_keywords(self):\n all_keywords = []\n z_index = 0\n for zettel in self.lemma_tokens:\n keywords = []\n w_index = 0\n cur_zettel_dict = {}\n for word in zettel:\n cur_zettel_dict.setdefault(word[0], 0)\n cur_word_total_score = self.all_scores[z_index][w_index]\n if cur_zettel_dict[word[0]] > cur_word_total_score:\n w_index += 1\n continue\n else:\n cur_zettel_dict[word[0]] = cur_word_total_score\n w_index += 1\n cur_sorted = sorted(cur_zettel_dict.items(), key=lambda kv: kv[1], reverse=True)\n for i in range(self.keyword_n):\n keywords.append(str(cur_sorted[i]))\n z_index += 1\n all_keywords.append(keywords)\n return all_keywords", "def get_keywords(self, pattern=\"*\"):\n\n sql = \"\"\"SELECT collection.collection_id, collection.name,\n keyword.name, keyword.doc, keyword.args\n FROM collection_table as collection\n JOIN keyword_table as keyword\n WHERE collection.collection_id == keyword.collection_id\n AND keyword.name like ?\n ORDER by collection.name, keyword.name\n \"\"\"\n pattern = self._glob_to_sql(pattern)\n cursor = self._execute(sql, (pattern,))\n result = [(row[0], row[1], row[2], row[3], row[4])\n for row in cursor.fetchall()]\n return list(sorted(set(result), key=itemgetter(2)))", "def target_words(self) -> List[str]:\n return list(map(\n lambda w: self.spaces[w.lower()] \n if w.lower() in self.spaces else w.lower(), \n self.keywords\n ))", "def get_paper_keywords(tree):\n\tpath = '//table/tr/th[text() = \"Keywords:\"]/following-sibling::td/text()'\n\tkeywords = tree.xpath(path)\n\t# xpath returns a list with the keywords as a single string element separated by new lines, commas or semi-colons\n\t# Make this into a list of keywords\n\tif keywords:\n\t\t# Split on new lines, commas and semi-colons\n\t\tkeywords = re.split('[\\\\n,;]', keywords[0])\n\t\t# Remove trailing white space and empty strings\n\t\tkeywords = [kw.strip() for kw in keywords if kw]\n\n\treturn keywords", "def GetKeywords(self):\n kwlist = [CSS1_KEYWORDS , CSS_PSUEDO_CLASS]\n # 2.9 supports CSS3 so for 2.8 just add CSS3 keywords to the css2 list \n if wx.VERSION < (2, 9, 0, 0, ''):\n css2_kw = (CSS2_KEYWORDS[0], \" \".join((CSS2_KEYWORDS[1], CSS3_KEYWORDS[1])))\n kwlist.append(css2_kw)\n else:\n kwlist.append(CSS2_KEYWORDS)\n kwlist.append(CSS3_KEYWORDS)\n kwlist.append(PSEUDO_ELEMENTS)\n return kwlist", "def collect_keywords(kw_file: str\n ) -> List[str]:\n try:\n keywords_df = pd.read_excel(kw_file, sheet_name=0)\n keywords = list(keywords_df['Keywords'])\n except FileNotFoundError:\n raise Exception(\"[ERROR] The keywords file was not found! This is \"\n \"required for the program to run.\")\n except KeyError:\n raise Exception(\"[ERROR] The keywords file does not contain a \"\n \"'Keywords' column! The file must contain this column \"\n \"followed by a list of keywords.\")\n return keywords", "def list_keywords(self, **kwargs) -> ApiResponse:\n return self._request(kwargs.pop('path'), params=kwargs)", "def candidate_keywords(self):\n candidate_phrases = list()\n for sentence in self.sentences():\n candidate_sentence = re.sub(self.stopwords_regex(), '|', sentence)\n phrases = candidate_sentence.split(\"|\")\n for phrase in phrases:\n phrase = remove_white_spaces(phrase).lower()\n if phrase:\n candidate_phrases.append(phrase)\n return candidate_phrases", "def get_keywords(seq):\r\n if len(seq) = 0:\r\n return None\r\n freqs = {}\r\n for w in seq: \r\n if w not in freqs:\r\n\t freqs[w] = 1\r\n\telse\r\n\t freqs[w] += 1\r\n num_keys = len(freqs)\r\n res = []\r\n \r\n return res", "def keywords(self):\n from hubspot3.keywords import KeywordsClient\n\n return KeywordsClient(**self.auth, **self.options)", "def load_keywords():\n keywords = set()\n with open(os.path.join(BASE, \"data/keywords.txt\")) as fp:\n for line in fp:\n keywords.add(line.strip().lower())\n return keywords", "def parse_keywords(medline):\n keyword_list = medline.find(\"KeywordList\")\n keywords = list()\n if keyword_list is not None:\n for k in keyword_list.findall(\"Keyword\"):\n if k.text is not None:\n keywords.append(k.text)\n keywords = \"; \".join(keywords)\n else:\n keywords = \"\"\n return keywords", "def find_keywords(anchor, keywords=['']):\n rel_keywords = []\n href, content = parse_anchor(anchor)\n \n for keyword in keywords:\n kw = keyword.lower()\n if kw in href.lower() or kw in content.lower():\n rel_keywords.append(keyword)\n \n return rel_keywords", "def _generate_keywords(self):\n _keywords = [*self._lookup_opcodes_dir.keys(), *self._registers_list.keys()]\n for key in _keywords:\n self._keywords.extend(key.split(\" \"))\n return", "def get_keywords(self, webpage):\n \n try:\n return self.urls_keywords_dict[webpage]['keywords']\n except:\n return None", "def filter_keywords(self, keywords):\n\t\tself.keywords += self._coerce_list(keywords)", "def _get_keywords(self, title: str):\n # Prepare data\n keywords = set()\n stops = set(nltk.corpus.stopwords.words(\"english\"))\n stemmer = nltk.stem.SnowballStemmer(\"english\")\n ent_types = [\n \"PERSON\", \"ORGANIZATION\", \"FACILITY\", \"LOCATION\", \"DATE\",\n \"TIME\", \"GPE\", \"MONEY\",\n ]\n excluded_word_types = [\"RB\", \"IN\", \"PRP\"]\n\n # Tokenize and chunk words using NLTK\n tokens = nltk.tokenize.word_tokenize(title)\n positions = nltk.pos_tag(tokens)\n chunk = nltk.ne_chunk(positions)\n\n # Make a word list of keywords we want to add, that\n # are not part of our excluded word types.\n words = set()\n for pos in positions:\n word, word_type = pos\n if word.isalnum() and word_type not in excluded_word_types:\n words.add(word)\n\n # Add all entities to keyword list and remove them from\n # our remaining word set so they don't get added again\n # and stemmed later.\n for subtree in chunk.subtrees(filter=lambda t: t.label() in ent_types):\n for leaf in subtree.leaves():\n keywords.add(leaf[0])\n if leaf[0] in words:\n words.remove(leaf[0])\n\n # Add remaining words in list and stem them to base form,\n # stemming means we change words from e.g. \"eating\" to \"eat\".\n for word in words:\n if word not in stops:\n keywords.add(stemmer.stem(word))\n\n return sorted([keyword.lower() for keyword in keywords])", "def keywords_pattern():\n with open(\"keywords.txt\", 'r') as f:\n lines = [line.strip() for line in f if line.strip()]\n return set(lines)", "def getMetaKeywords(self, article):\n return self.getMetaContent(article.doc, \"meta[name=keywords]\")", "def get_meta_keywords(self, article):\r\n return self.get_meta_content(article.doc, \"meta[name=keywords]\")", "def get_keywords_for_movie(url):\n pass", "def get_keywords(prefix, file_path, blacklist):\n file_path = str(file_path).replace(prefix, '') # remove base_dir from file_path\n file_path = os.path.splitext(file_path)[0] # Only keep the part without extension\n file_path = str(file_path).lower()\n for bad_keyword in blacklist:\n file_path = file_path.replace(bad_keyword, ' ')\n file_path = re.sub(r'\\s+', ' ', file_path) # Replace multiple spaces to single one\n keywords = file_path.split(' ')\n keywords = [k for k in keywords if k]\n\n return keywords", "def get_article_keywords(article,\n keywords,\n preprocess_type=PreprocessWordType.LEMMATIZE):\n matches = set()\n for word in article.words:\n preprocessed_word = query_utils.preprocess_word(word,\n preprocess_type)\n if preprocessed_word in keywords:\n matches.add(preprocessed_word)\n return sorted(list(matches))", "def get_keywords(self, number=10):\n keyword = []\n node_weight = OrderedDict(sorted(self.node_weight.items(), key=lambda t: t[1], reverse=True))\n for i, (key, value) in enumerate(node_weight.items()):\n # print(key + ' - ' + str(value))\n keyword.append(key)\n if i > number:\n break\n return keyword", "def lookup_keywords(filename):\n keywords = []\n start_of_table = r'\\*+\\s+'\n start_of_kw_table = r'\\*+\\s+Keyword'\n in_kw_table = False\n f = open(filename, \"r\")\n for line in f.readlines():\n line = line.rstrip()\n if len(line) == 0 or line.startswith(\"#\"):\n continue # skip comments and blanks\n if re.match(start_of_kw_table, line):\n in_kw_table = True # table started\n continue\n if re.match(start_of_table, line) and not re.match(start_of_kw_table, line):\n in_kw_table = False # table ended\n continue\n if line.startswith(' '):\n continue # skip content rows\n if in_kw_table:\n keywords.append(line)\n f.close()\n return keywords", "def dehydrate_keywords(self, bundle):\n return map(str, bundle.obj.keywords.all())", "def keywords(self):\n return {\n \"unary\": {\n k: v[0] for k, v in self.unary_commands.items()\n },\n \"terminal\": {\n k: v[0] for k, v in self.terminal_commands.items()\n },\n \"binary\": {\n k: v[0] for k, v in self.binary_commands.items()\n },\n }", "def keywords(self, keywords):\n self._keywords = keywords", "def get_queryset(self):\r\n return Keyword.objects.all()", "def split_keywords(keywords):\n try:\n keywords = keywords.replace(u'\\u201c', '\"').replace(u'\\u201d', '\"')\\\n .replace(\"-\", \" \")\n\n except AttributeError:\n # In the event that keywords = nan\n return []\n\n if '\"' in keywords:\n # for handling key phrases\n final_set = []\n imperfect_set = map(lambda x: x.split(' \"'), keywords.split('\" '))\n # imperfect_set will contain a list of lists. Must break down\n\n for sublist in imperfect_set:\n for item in sublist:\n # clear out remaining quotations\n item = item.replace('\"', '').lower() \n # only add if not already there\n if item not in final_set: \n final_set.append(item)\n\n # we may still want individual components of key phrases\n # and permutations of words in those phrases\n if \" \" in item: \n phrase = item.split(\" \")\n if len(phrase) > 2:\n for ii in range(len(phrase) - 1):\n for jj in range(ii + 1, len(phrase)):\n word = \" \".join([phrase[ii], phrase[jj]])\n if word not in final_set:\n final_set.append(word)\n\n else:\n for word in phrase: \n # again, only if not already there\n if word not in final_set:\n final_set.append(word)\n\n else:\n final_set = keywords.split(\" \")\n\n return final_set", "def interestingWords(self):\n words = set([])\n for token in self.importantTokenList():\n if token.isStopWord() == False:\n words.add(token.text.lower())\n return words", "def get_corpus(keywords):\n\n if type(keywords) in [str,unicode]:\n keywords = [keywords]\n\n corpus = []\n for kw in keywords :\n corpus += get_wikipedia_text(kw,'en',summary=False)\n corpus += get_wikipedia_text(kw,'simple',summary=False)\n \n return corpus", "def get_keywords(text):\n tokens = [word.lower() for word in word_tokenize(text)]\n\n # tag words as verb, noun etc\n tagged_words = pos_tag(tokens)\n\n # retrieve list of boring words from file\n stopwords_file = os.path.join(BASE_DIR, 'data', 'stopwords.txt')\n with open(stopwords_file, 'r', encoding='utf-8') as f:\n stopwords = [line.rstrip(linesep) for line in f]\n \n #We don't want keywords to contain anything in this list\n forbidden = ['.',',',';',':','?','!','+',')','(','[',']','/','<','>','\"','©','1','2','3','4','5','6','7','8','9','0']\n\n # NLTK Chunking - detects noun phrases and phrases of form verb noun or adj noun\n patterns = \"\"\"NP: {<JJ>*<NN><NNS>}\n {<JJR><NNS>}\n {<JJ>*<NNS>}\n {<NN><NNS>} \n {<JJ><NNS>}\n {<JJ>*<NN>*}\n {<NN>*}\n {<NNS>*}\"\"\"\n chunker = RegexpParser(patterns)\n chunks = chunker.parse(tagged_words)\n\n #these are the phrases we want, as lists within a list\n validphrases = []\n for t in chunks.subtrees():\n if t.label() == 'NP':\n validphrases.append([x for x,y in t.leaves()])\n\n #turning lists within lists into actual noun phrases i.e [[radiation], [breast,cancer]] becomes [radiation, breast cancer]\n #sorry for my horrible code\n #trees suck\n lemmatizables = []\n for sublist in validphrases:\n lemmatizables.append(' '.join(sublist))\n\n lemmatizer = WordNetLemmatizer()\n lems = [lemmatizer.lemmatize(x) for x in lemmatizables]\n\n #removing stopwords after lemmatizinga, then removing anything containing punctuation or a number\n lems = filter(lambda lem: lem not in stopwords, lems)\n lems = filter(lambda lem: not any(char in lem for char in forbidden), lems)\n\n return tuple(lems)", "def extract_keywords(text, max_keywords=10):\n keywords = rake.apply(text)\n return \" ; \".join([item[0] for item in keywords[:max_keywords]]).strip()", "def keywords_rege(keywords):\n searches = {}\n for kw in keywords:\n searches[kw] = re.compile(r'\\b' + kw + r'\\b', re.IGNORECASE)\n return searches", "def _get_keywords_from_tasks(self, tasks):\n keywords = []\n for task in tasks:\n keywords.extend(tasks[task][\"categories\"])\n return tuple(sorted(set(keywords)))", "def set_keywords(self):\n\n if len(self.get_keywords()) == 0 and len(self.get_files()) > 0:\n self.keywords = self.files[0].get_parent()[\"title\"].split(\" \")\n for keyword in self.keywords:\n if str(keyword) in str(self.text):\n self.keywords = []", "def get_meta_keywords(self, language=None, fallback=True, version_id=None, force_reload=False):\n return self.get_title_obj_attribute(\"meta_keywords\", language, fallback, version_id, force_reload)", "def get_meta_keywords(self):\n mk = self.meta_keywords.replace(\"<title>\", self.title)\n return mk.replace(\"<short-text>\", self.short_text)", "def test_filtered_instrument_keywords():\n kw = []\n for ins in JWST_INSTRUMENTS:\n kw.append(mm.instrument_keywords(ins, caom=False)['keyword'].tolist())\n\n assert kw[0] != kw[1] != kw[2] != kw[3] != kw[4]", "def importantWords(self, ignoreSemanticTagList=[]):\n words = set([])\n for token in self.importantTokenList(ignoreSemanticTagList=ignoreSemanticTagList):\n words.add(token.text.lower())\n return words", "def get_blog_keywords(id):\n key = KEY_BLOG_KEYWORDS_PREFIX + str(id)\n keywords = RedisHelper.get_cache(key)\n if RedisHelper.is_cache_exist(key) is False:\n keywords = list(BlogHelper.get_blog(id).tags.all())\n RedisHelper.create_cache(key, keywords, RedisTimeOut.REDIS_TIMEOUT_1_DAYS)\n return keywords", "def get_search_terms(self):\n params = self.request.QUERY_PARAMS.get(\"search\", \"\")\n return params.replace(\",\", \" \").split()", "def keywords(self, keywords):\n\n self._keywords = keywords", "def searchGlossary(self,keyword):\n\t\twords = []\n\n\t\tfor letter in glossary:\n\t\t\tfor word in glossary[letter]:\n\t\t\t\tprint word.keys()[0]\n\t\t\t\tif keyword.lower() in word.keys()[0].lower():\n\t\t\t\t\twords.append(word)\n\n\t\treturn words", "def words(self) -> List[str]:\n return pulumi.get(self, \"words\")", "def words(self) -> List[str]:\n return pulumi.get(self, \"words\")", "def getwords(self):\n return self._aggroWords.keys()", "def keyword_extraction(file_content):\n\n # [question, question....]\n for key, value in file_content.items():\n seg, hidden = ltp.seg([key])\n # ner: [[('Nh', 2, 2)]]\n ner = ltp.ner(hidden)\n # keywords: [('PERSON', \"吴轩\")], tuple_item: ('Nh', 2, 2)\n keywords = [(tag_to_name[tuple_item[0]], to_string(seg[0][tuple_item[1]: tuple_item[2]+1])) for tuple_item in ner[0]]\n file_content[key].keywords = keywords\n\n return file_content", "def extract_keywords(text):\n gcloud_response = gcloud_syntax_extraction(text)\n logging.info(\"gcloud syntax response: %s\", gcloud_response)\n\n tokens_shortened = []\n for token in gcloud_response.tokens:\n part_of_speech_tag = enums.PartOfSpeech.Tag(token.part_of_speech.tag).name\n if part_of_speech_tag in constants.KEY_PARTS_OF_SPEECH:\n token_data = {'word': token.text.content, 'lemma': token.lemma,\n 'part_of_speech': part_of_speech_tag}\n tokens_shortened.append(token_data)\n\n response = {'lan': gcloud_response.language, 'tokens': tokens_shortened}\n\n return response", "def get_keywords_for_component(component, user_defined_keywords):\n output_keywords = []\n input_keywords = user_defined_keywords # initialize with the user defined keywords\n input_keywords += component.split('/') # split the component if there are multiple terms involved\n for input_keyword in input_keywords:\n output_keywords.append(input_keyword)\n word_list_split_by_space = input_keyword.split(' ')\n for word in extract_words_from_word_list_split_by_space(word_list_split_by_space):\n output_keywords.append(word)\n output_keywords += get_synonyms(word)\n output_keywords = list(set(output_keywords))\n return output_keywords", "def defaultKeywords(self, kwSet):\n return QsciLexerJava.keywords(self, kwSet)", "def get_webpage_keywords(self, response):\n tags = response.xpath('//*/meta[@property=\"keywords\"]/@content').extract_first()\n tags1 = response.xpath('//*/meta[@name=\"keywords\"]/@content').extract_first()\n if tags:\n return tags.strip()\n elif tags1:\n return tags1.strip()\n else:\n tags = response.xpath('//*/meta[@itemprop=\"keywords\"]/@content').extract_first()\n if tags:\n return tags.strip()\n else:\n return \"\"", "def test_caom_instrument_keywords():\n kw = []\n for ins in JWST_INSTRUMENTS:\n kw.append(mm.instrument_keywords(ins, caom=True)['keyword'].tolist())\n\n assert kw[0] == kw[1] == kw[2] == kw[3] == kw[4]", "def all_words( corpus, key, ignore_words = Ignore_words ) :\n return list(set(chain.from_iterable( (words(c,key,ignore_words) for c in corpus ) ) ) )", "def generate_keywords(self, target_text):\n logging.debug(\"Start generate keywords\")\n sentences = text_to_sentences(target_text)\n\n base_candidates = self._generate_base_candidates(target_text)\n\n temp_result = []\n for item in zip(base_candidates, base_candidates[1:]):\n temp_result += self._upward_grouping(*item)\n\n # convert str into objects\n temp_result = [Keyword(text=item) for item in temp_result]\n temp_result = self._mark_keyword_attributes(sentences, temp_result)\n\n # check overlapping, could be combined into one word\n return self._merge_keywords(sentences, temp_result)", "def extract_keywords(article_list, n=10):\n vectorizer = TfidfVectorizer()\n tfidf = vectorizer.fit_transform(article_list)\n words = vectorizer.get_feature_names()\n # check N > total_words_length or not\n maxn = tfidf.shape[1] if tfidf.shape[1] < n else n\n weights = tfidf.toarray()\n # sort by decrease order\n indices = map(lambda w: np.argsort(-w)[:maxn], weights)\n keywords = [list(map(lambda i: words[i], indy)) for indy in indices]\n return keywords", "def SetupKeywords(self):\n kwlist = u\" \".join(self._keywords)\n self.SetKeyWords(0, kwlist)", "def keywords(text):\r\n from operator import itemgetter # for sorting\r\n text = split_words(text)\r\n numWords = len(text) # of words before removing blacklist words\r\n text = [x for x in text if x not in stopWords]\r\n freq = Counter()\r\n for word in text:\r\n freq[word] += 1\r\n\r\n minSize = min(10, len(freq))\r\n keywords = tuple(freq.most_common(minSize)) # get first 10\r\n keywords = dict((x, y) for x, y in keywords) # recreate a dict\r\n\r\n for k in keywords:\r\n articleScore = keywords[k]*1.0 / numWords\r\n keywords[k] = articleScore * 1.5 + 1\r\n\r\n keywords = sorted(keywords.iteritems(), key=itemgetter(1))\r\n keywords.reverse()\r\n return dict(keywords)", "def keyword_headlines(self):\r\n\t\td = {}\r\n\r\n\t\tfor q in self.keyword_queryset:\r\n\t\t\td[q.content] = self.headlinekeyword_queryset.filter(keywordid = q.id)\r\n\r\n\t\treturn d", "def __iter__(self):\n for keyword in self.meta.findall(CN('meta:keyword')):\n yield keyword.text", "def get_dictionary_file_lines_for_keywords(self):\n keywords_iter = iter(self.keywords)\n next_keyword = keywords_iter.next()\n print(\"Searching for keyword {}\".format(next_keyword))\n\n self.dictionary_file.open_handle()\n result_lines = list()\n while next_keyword:\n line = self.dictionary_file.read_line_to_obj()\n if not line:\n print(\"Reached end of dictionary file\")\n break\n\n if line.term < next_keyword:\n continue\n elif line.term == next_keyword:\n print(\"Found postings list for term {}\".format(next_keyword))\n result_lines.append(line)\n\n try:\n next_keyword = keywords_iter.next()\n print(\"Searching for keyword {}\".format(next_keyword))\n except StopIteration:\n print(\"Finished searching for all keywords\")\n break\n\n return result_lines", "def keyword_list(request):\n if request.method == 'GET':\n keywords = get_list_or_404(Keyword, is_active=True)\n if request.GET.get('pagination'):\n pagination = request.GET.get('pagination')\n if pagination == 'true':\n paginator = PageNumberPagination()\n results = paginator.paginate_queryset(keywords, request)\n serializer = KeywordSerializer(results, many=True)\n return paginator.get_paginated_response(serializer.data)\n else:\n return Response(status=status.HTTP_400_BAD_REQUEST)\n else:\n serializer = KeywordSerializer(keywords, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)", "def addkeywords(self, keywords):\n if isinstance(keywords, str):\n keywords = [keywords]\n self._kw.extend(keywords)", "def keyword_frequencies(self, limit = None):\r\n\t\tkey_head = self.keyword_headlines()\r\n\r\n\t\tfreq_list = []\r\n\t\tfor keyword in key_head:\r\n\t\t\tnumHeadlines = len(key_head[keyword])\r\n\t\t\tif limit:\r\n\t\t\t\tif numHeadlines > limit:\r\n\t\t\t\t\tnumHeadlines = limit\r\n\t\t\tfreq_list.append([keyword, numHeadlines])\r\n\r\n\t\treturn freq_list", "def get_search_keywords(testcase):\n crash_state_lines = testcase.crash_state.splitlines()\n # Use top 2 frames for searching.\n return crash_state_lines[:2]", "def populate_keywords(kwds, pkg_id):\n if not kwds:\n return\n for word in kwds:\n # @todo(Check data and use the special character-list\n # variable in the constants' file.)\n word = word.strip(\".:;=-,\\\"'\\n $_%{}()[]^*?& +#`\").lower()\n if len(word) <= 1 or (word in constants.STOP_WORDS) or \\\n has_special_chars(word):\n continue\n insert_keyword(word, pkg_id)", "def search(self, keywords: List[str], _type: str = \"\") -> List[Dict]:\n url = self.construct_url(keywords, _type)\n source = self.get(url)\n links = self.extract_links(source)\n return [{\"url\": url} for url in links]", "def get_headlines_with_keyword(self, kw):\r\n\t\tkey_head = self.keyword_headlines()\r\n\r\n\t\theadlines = set()\r\n\r\n\t\tfor headlinekw in key_head[kw]:\r\n\t\t\tcontent = headlinekw.headlineid.content\r\n\t\t\theadlines.add(content)\r\n\r\n\t\treturn list(headlines)", "def read_file(cls, filename):\n keywords = []\n with open(filename, newline='') as csvfile:\n for row in csv.reader(csvfile, skipinitialspace=True):\n for string in row:\n keywords.append(string)\n csvfile.close()\n\n return keywords", "def text_to_keywords(text):\n response = alchemyapi.keywords('text', text, {'sentiment': 1})\n ret_keys = []\n\n if response['status'] == 'OK':\n keywords = response['keywords']\n for keyw_chunk in keywords[:2]:\n # if len(keywords) > 0:\n # keyw_chunk = keywords[0]\n top_keyword = keyw_chunk['text'].encode('utf-8')\n # else:\n # top_keyword = ''\n ret_keys.append(top_keyword)\n\n return ret_keys # top_keyword\n\n # for keyword in response['keywords']:\n # print('text: ', keyword['text'].encode('utf-8'))\n # print('relevance: ', keyword['relevance'])\n # print('sentiment: ', keyword['sentiment']['type'])\n # if 'score' in keyword['sentiment']:\n # print('sentiment score: ' + keyword['sentiment']['score'])\n # print('')\n else:\n print('Error in keyword extaction call: ', response['statusInfo'])\n return [] # ''", "def get_keywords():\n \n #get all movies from db\n movies_df = movie_helper.get_movies_df() \n \n with tqdm(total=len(movies_df)) as pbar:\n for index, row in movies_df.iterrows(): \n \n #if imbdid exists use it to look up the API\n if (row['imdbId']):\n \n #get list of keywords and created delimted string\n movie = ia.get_movie(str(row['imdbId']), info='keywords')\n try:\n keywords = \",\".join(movie['keywords'])\n except:\n keywords = None\n \n #update the movies table in the db\n database_helper.update_data(\"movies\", update_params = {\"keywords\" : keywords}, select_params = {\"movieId\" : row[\"movieId\"]})\n pbar.update(1)", "def determine_keywords(self):\n\n split = dict()\n split['email_cc'] = re.compile(\"^\\s*CC[-_]?MAIL[:=]\\s*(.*)\")\n split['email_cc2'] = re.compile(\"^\\s*C[Cc][:=]\\s*(.*)\")\n split['fixed_in'] = re.compile(\"^\\s*FIXED[-_]?IN[:=]\\s*(.*)\")\n\n numeric = dict()\n numeric['bug_fixed'] = re.compile(\"^\\s*(?:BUGS?|FEATURE)[:=]\\s*(.+)\")\n numeric['bug_cc'] = re.compile(\"^\\s*CCBUGS?[:=]\\s*(.+)\")\n\n presence = dict()\n presence['email_gui'] = re.compile(\"^\\s*GUI:\")\n presence['silent'] = re.compile(\"(?:CVS|SVN|GIT|SCM).?SILENT\")\n presence['notes'] = re.compile(\"(?:Notes added by 'git notes add'|Notes removed by 'git notes remove')\")\n\n results = defaultdict(list)\n for line in self.commit.message.split(\"\\n\"):\n # If our line starts with Summary: (as it does when using Arcanist's default template) then strip this off\n # This allows for people to fill keywords in the Differential Summary and have this work smoothly for them\n line = re.sub(\"^Summary: (.+)\", \"\\g<1>\", line)\n\n # Start processing our keywords...\n for (name, regex) in split.iteritems():\n match = re.match( regex, line )\n if match:\n results[name] += [result.strip() for result in match.group(1).split(\",\")]\n\n for (name, regex) in numeric.iteritems():\n match = re.match( regex, line )\n if match:\n results[name] += re.findall(\"(\\d{1,10})\", match.group(1))\n\n for (name, regex) in presence.iteritems():\n if re.match( regex, line ):\n results[name] = True\n\n self.keywords = results", "def keys(self) -> List[str]:\n raise NotImplementedError", "def get_url_keywords(self, webpage):\n\n return {'url': self.get_url(webpage), 'keywords': self.get_keywords(webpage), 'name': webpage}" ]
[ "0.82068", "0.80030465", "0.80030465", "0.79815817", "0.7965522", "0.795927", "0.79124683", "0.7827855", "0.7802334", "0.7794272", "0.77290803", "0.7597001", "0.7496878", "0.74123514", "0.7399484", "0.73888963", "0.737007", "0.7306008", "0.7305014", "0.73011196", "0.7273646", "0.72724897", "0.7268493", "0.72314644", "0.7217067", "0.7145265", "0.7125484", "0.7044258", "0.701768", "0.7009621", "0.7008436", "0.7008121", "0.70002943", "0.6988243", "0.696225", "0.6943372", "0.6938068", "0.6888988", "0.68654823", "0.68163913", "0.67977595", "0.67763746", "0.6746251", "0.6744308", "0.6740857", "0.6726023", "0.6703974", "0.6673692", "0.6660761", "0.6633595", "0.6626641", "0.65508467", "0.6529824", "0.6516249", "0.65065414", "0.6465195", "0.64449006", "0.6435031", "0.6422645", "0.64039624", "0.6394115", "0.638084", "0.6349027", "0.6321653", "0.63107145", "0.6294171", "0.6280715", "0.62740964", "0.6269976", "0.62654227", "0.625701", "0.625701", "0.6242344", "0.623378", "0.61881864", "0.6184901", "0.6172304", "0.6164216", "0.61529", "0.6147865", "0.6138522", "0.6137641", "0.61208284", "0.6096497", "0.6090301", "0.60501474", "0.604741", "0.6034622", "0.6019145", "0.59750444", "0.59661233", "0.59582376", "0.59543395", "0.5948926", "0.5945303", "0.5943221", "0.5942725", "0.59401804", "0.59383625", "0.5918228" ]
0.8675625
0
Returns a list of lists [word, number of headlines]
def keyword_frequencies(self, limit = None): key_head = self.keyword_headlines() freq_list = [] for keyword in key_head: numHeadlines = len(key_head[keyword]) if limit: if numHeadlines > limit: numHeadlines = limit freq_list.append([keyword, numHeadlines]) return freq_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_headlines(self):\n headlines = re.findall(r\"^\\.\\.\\.(.*?)\\.\\.\\.[ ]?\\n\\n\", self.unixtext,\n re.M | re.S)\n headlines = [\" \".join(h.replace(\"...\",\n \", \").replace(\"\\n\", \" \").split())\n for h in headlines]\n return headlines", "def get_tokens_with_heads(self, snlp_doc):\n tokens = []\n heads = []\n offset = 0\n for sentence in snlp_doc.sentences:\n for token in sentence.tokens:\n for word in token.words:\n # Here, we're calculating the absolute token index in the doc,\n # then the *relative* index of the head, -1 for zero-indexed\n # and if the governor is 0 (root), we leave it at 0\n if word.head:\n head = word.head + offset - len(tokens) - 1\n else:\n head = 0\n heads.append(head)\n tokens.append(word)\n offset += sum(len(token.words) for token in sentence.tokens)\n return tokens, heads", "def get_headlines(self, kw = None):\r\n\t\tif kw:\r\n\t\t\treturn self.get_headlines_with_keyword(kw)\r\n\t\telse:\r\n\t\t\treturn self.get_all_headlines()", "def words(self, min_word_length=0):\n\n word_tokenizer = nltk.RegexpTokenizer(r'\\b[^\\s]+\\b')\n headline_string = self.headline_string.lower().replace(\"’\", \"'\")\n return [word for word in word_tokenizer.tokenize(headline_string) if len(word) >= min_word_length]", "def all_words(self, min_word_length=0):\n return [word for headline in self.headlines for word in\n headline.words(min_word_length=min_word_length)]", "def get_headlines(driver,site,URL_exclusions):\r\n links = get_all_links(driver,site,URL_exclusions)\r\n headlines = []\r\n n=0\r\n for link in links:\r\n driver = make_driver_obj() #get_all_links quits driver when finished.\r\n try:\r\n while True:\r\n try:\r\n driver.get(link) #No need to accept cookies to don't need return_search\r\n break\r\n except:\r\n continue\r\n except: #If we can't open the URL for any reason.\r\n driver.quit()\r\n continue\r\n n += 1\r\n headline = get_headline(driver)\r\n if headline != '':\r\n headlines.append(headline) #Only append if able to identify headline text\r\n #print(n)\r\n #print(headline)\r\n #print()\r\n driver.quit()\r\n return headlines", "def count_words(all_lines):\n words = {}\n for line in all_lines:\n for word in line:\n if word in words:\n words[word] += 1\n else:\n words[word] = 1\n\n new_words = sorted(words.items(), key=operator.itemgetter(1), reverse=True)\n\n return [item[0] for item in new_words]", "def lemma_headwords(self):\n new_var = 'lemma_headword'\n lemma_heads = [clx._lemmas[i]['Head'] for i in xrange(len(clx._lemmas))]\n has_item = self.compare_items(lemma_heads)\n new_column = []\n if False in has_item:\n self._warning_msg('lemma_headword', lemma_heads)\n for record, exists in zip(self._dict, has_item):\n if exists:\n lemma_id = clx.wordform_lookup(record)[0].IdNumLemma\n lemma_head = clx.lemma_by_id(lemma_id).Head\n else:\n lemma_head = None\n new_column.append(lemma_head)\n self._append_column(new_column, new_var)", "def get_all_headlines(self):\r\n\t\tlist_vals = list(self.keyword_headlines().values())\r\n\t\tuniq_headlines = set()\r\n\t\tfor list_val in list_vals:\r\n\t\t\tfor headlineobj in list_val:\r\n\t\t\t\tuniq_headlines.add(headlineobj.headlineid.content)\r\n\r\n\t\treturn list(uniq_headlines)", "def readTotitle(fh):\n preLines = []\n while True:\n l = fh.readline().strip()\n if l.startswith('>'):\n return (preLines,l)\n elif l == '':\n return preLines,None\n else:\n preLines.append(l)", "def harfbuzz_width(lines, hb_font):\n\tout = []\n\tbuf = hb.buffer_create ()\n\tfor text in lines:\n\t\thb.buffer_clear_contents (buf)\n\t\thb.buffer_add_utf8 (buf, tobytes(text), 0, -1)\n\t\thb.buffer_guess_segment_properties (buf)\n\n\t\thb.shape (hb_font, buf, [])\n\n\t\tpositions = hb.buffer_get_glyph_positions (buf)\n\t\tw = sum(pos.x_advance for pos in positions)\n\t\tout.append(w)\n\treturn out", "def num_of_words(line, context):\n return [('num_of_word', len(line.txt.split()))]", "def wordize(lines):\n parser = Parser()\n tokenizer = Tokenizer()\n word_ctr = WordCounter()\n words = []\n for l in lines :\n if (l.rstrip()) :\n statement = parser.parseSentence(l, int(word_ctr))\n token_lists = tokenizer.tokenizeStatement(statement, int(word_ctr))\n for l in token_lists :\n if len(l) > 0 :\n words.append(l)\n word_ctr += 1\n return words", "def get_headlines_with_keyword(self, kw):\r\n\t\tkey_head = self.keyword_headlines()\r\n\r\n\t\theadlines = set()\r\n\r\n\t\tfor headlinekw in key_head[kw]:\r\n\t\t\tcontent = headlinekw.headlineid.content\r\n\t\t\theadlines.add(content)\r\n\r\n\t\treturn list(headlines)", "def top_words(name):\n row = wiki[wiki['name'] == name]\n word_count_table = row[['word_count']].stack('word_count', new_column_name=['word','count'])\n return word_count_table.sort('count', ascending = False)", "def get_word_counts(slides) -> List[int]:\n word_count = []\n for slide in slides:\n # print(f\"========== slide {len(text_count)+1} ========== [{slide.slide_layout.name}]\")\n words = 0\n # find all text\n for shape in slide.shapes:\n if not shape.has_text_frame:\n continue\n # print(shape.name)\n for paragraph in shape.text_frame.paragraphs:\n for run in paragraph.runs:\n # print(\" \" + run.text)\n words += len(run.text.split())\n word_count.append(words)\n return word_count", "def test_headlines_predecessors(self):\n headline_str = \"* One\\n** Two\\n*** Three\\n** Two\\n*** Three\\n* One\"\n\n doc = parser.parse(headline_str)\n self.assertEqual(len(doc.children()), 2)\n\n h1_1 = doc.children()[0]\n h1_2 = doc.children()[1]\n\n self.assertEqual(len(h1_1.children), 2)\n self.assertEqual(len(h1_2.children), 0)\n\n h2_1 = h1_1.children[0]\n h2_2 = h1_1.children[1]\n\n self.assertEqual(len(h2_1.children), 1)\n self.assertEqual(len(h2_2.children), 1)", "def gather_headlines(urls):\n pass", "def first_words_func():\n return_list = []\n for lyric in lyrics:\n for line in lyric.split(\"\\n\"):\n return_list.append(line.split(\" \")[0])\n return (return_list)", "def get_negative_headlines(headlines,words):\r\n negative_headlines = []\r\n for headline in headlines:\r\n for word in words:\r\n if headline.lower().find(word) != -1: #If particular word is found in lowercased headline.\r\n negative_headlines.append(headline)\r\n break #Stop iterating through words when we have found one negative word.\r\n return negative_headlines", "def get_doc_title_list(clean_corpus):\n doc_title_list = []\n for content in clean_corpus:\n doc_title_list.append(content[0])\n num_docs = len(doc_title_list)\n return doc_title_list , num_docs", "def _counter(title_list):\n t = Tokenizer()\n words_count = defaultdict(int)\n words = []\n for title in title_list:\n tokens = t.tokenize(title)\n for token in tokens:\n pos = token.part_of_speech.split(',')[0]\n if pos == '名詞':\n words_count[token.base_form] += 1\n words.append(token.base_form)\n return words_count, words", "def word_count(self):\n print(self.words())\n return len(self.words())\n #count = 0\n #for lines in self.lines:\n # line = lines.strip(os.linesep)\n # wordslst = line.split()\n # count += len(wordslst)\n #return count\n #joined_string = ''.join(self.lines)\n #for word in joined_string:\n # if word != ' ' and word != '\\n' and word != '\\t':\n # count += 1\n #print('READ ME ––––––––––', self.lines)\n #print(joined_string)\n #print(line)\n #print(wordslst)\n #print(count)", "def test_headlines_successors(self):\n headline_str = \"* First level\\n** Second level\\n*** Third level\"\n doc = parser.parse(headline_str)\n self.assertEqual(len(doc.children()), 1)\n\n h1 = doc.children()[0]\n self.assertEqual(len(h1.children), 1)\n\n h2 = h1.children[0]\n self.assertEqual(len(h2.children), 1)\n\n h3 = h2.children[0]\n self.assertEqual(len(h3.children), 0)", "def wordCount( aList ):\n return len( aList )", "def get_hypernyms(word):\n syn = wn.synsets(word)\n hnyms = []\n for h in syn[0].hypernyms():\n hnyms.append({\n \"lemmas\": h.lemma_names(),\n \"d\": h.definition(),\n \"pos\": h.pos(),\n \"id\": h.name()\n })\n return hnyms", "def printResults(listWords):\n width = 0\n for word in listWords:\n if len(word.name) > width:\n width = len(word.name)\n for word in listWords:\n lstring = str(word.listOfLines).replace('[','').replace(']','')\n print '%s: %d times, lines: %s' % (word.name.rjust(width), \n word.occurence, lstring)", "def reddit_headlines(reddit):\n\n # Set metadata to make request:\n url = \"https://www.reddit.com/r/{}/.json?limit=10\".format(reddit)\n headers = {'User-Agent': '{} Reddit headlines'.format(reddit)}\n\n # Consume Reddit's API to gather info:\n html = requests.get(url, headers=headers)\n\n # If status code is OK:\n if html.status_code == requests.codes.ok:\n # Parse resonse:\n info = json.loads(html.content.decode('utf-8'))\n # pprint(info)\n\n # Get relevant info:\n child = info['data']['children']\n titles = [unidecode(elem['data']['title']) for elem in child]\n titles = \"... \".join([title for title in titles])\n else:\n titles = None\n\n return titles", "def all_headlines(html_root_node):\n pass", "def readTotitle(fh, titleChar):\n preLines = []\n while True:\n l = fh.readline().strip()\n if l.startswith(titleChar):\n return (preLines,l)\n elif l == '':\n return preLines,None\n else:\n preLines.append(l)", "def wcount(lines, topn):\n word = ''\n for i in lines:\n if 65<=ord(i) and ord(i)<=90:\n word = word + i \n elif 97<=ord(i) and ord(i)<=122:\n word = word + i\n else:\n word = word + ' ' \n word = word.split()\n #提取不重复的单词\n alreadyknown = []\n for m in word:\n if m not in alreadyknown:\n alreadyknown.append(m)\n #分别数数,排序,建构字典\n empty = []\n final = {}\n final2 = {}\n for j in alreadyknown:\n number = icount(word,j)\n final[j]=number\n final2[str(number)]=j\n empty.append(number)\n empty.sort()\n empty.reverse()\n last_step = empty[:10]\n #通过数字找到对应word\n last_str = ''\n for y in last_step:\n z = final2[str(y)]\n last_str += z + \"\\t\" + str(y) + \"\\n\"\n return last_str", "def test_headlines_samelevel(self):\n headline_str = \"* One\\n** Two\\n** Two\\n**\\\n Another one\"\n doc = parser.parse(headline_str)\n self.assertEqual(len(doc.children()), 1)\n\n h1 = doc.children()[0]\n self.assertEqual(len(h1.children), 3)", "def wcount(lines, topn=10):\n words=lines.lower()\n words=words.replace('.', '')\n words=words.replace(',', ' ')\n words=words.replace('!', ' ')\n words=words.replace('?', ' ')\n words=words.replace(':', ' ')\n words=words.replace('_', ' ')\n words=words.replace('\"', ' ')\n words=words.replace(\"'\", ' ')\n words=words.replace('(', ' ')\n words=words.replace(')', ' ')\n words=words.replace('[', ' ')\n words=words.replace(']', ' ')\n words=words.replace('-', ' ')\n words=words.replace(';', ' ')\n words=words.replace('\"', ' ')\n words=words.replace('*', ' ')\n lst=words.split(' ')\n lst2=list(set(lst))\n lst2.remove('')\n dic={}\n for i in lst2:\n dic[i]=lst.count(i)\n wds=list(dic.keys())\n numbers=list(dic.values())\n numbers2=sorted(numbers, reverse=True)\n for k in range(topn):\n m=numbers.index(numbers2[k])\n print(\"%-15s%-5d\"%(wds[m],numbers2[k]))", "def column_headlines(self):\n elements = self._selenium.find_elements_by_xpath(\n '//div[@id=\"content\"]/table/thead/tr/th/a')\n return [x.text for x in elements]", "def title_words(self):\n\n if self._title_words == []:\n for s in self.title():\n for w in s.split():\n self._title_words.append(w)\n\n return self._title_words", "def all_headlines_from(url):\n pass", "def top_words(source, number):\n\n keys = set()\n\n ht = HashMap(2500, hash_function_2)\n\n # This block of code will read a file one word at a time and\n # put the word in `w`. It should be left as starter code.\n with open(source) as f:\n for line in f:\n words = rgx.findall(line)\n for w in words:\n # convert word to lowercase to avoid inconsistent hash values\n # due to different cases of the same word.\n w = w.lower()\n\n # check if the current word already exists as a key\n if w in keys:\n current_count = ht.get(w) # fetch the current count for that word\n current_count += 1 # increment count by one\n ht.put(w, current_count) # update value for the key\n else:\n # word does not exist in hash map\n keys.add(w) # add current word to keys set\n ht.put(w, 1) # insert key into hash map with value of 1\n\n # fetch unsorted list of tuples from parsed data\n word_count_list = compile_list(ht, keys)\n\n # sort word count tuple list\n word_count_list = word_count_sort(word_count_list)\n\n # initialize and fill final word list\n final_list = []\n\n for index in range(0, number):\n final_list.append(word_count_list[index])\n\n return final_list", "def getHeadParts(self):\n return self.headParts", "def GenomeHeadItems(genome_class):\n\tgc = genome_class()\n\treturn (len(gc.headers().split(',')),gc.genome_length())", "def getTWordList(text):\n\ttmpwordlist = string.split(text)\n\twordlist = [ [] ]\n\tpos = 0\n\tfor i in range(len(tmpwordlist)):\n\t\tword = getBrownWords(tmpwordlist[i])\n\t\tword[0] = puncTrim(word[0])\n\t\tif len(word[0]) > 0:\n\t\t\twordlist[pos].append(word)\n\t\telse:\n\t\t\tpos += 1\n\t\t\twordlist.append([])\n\treturn wordlist", "def get_slide_analytics_new(slides) -> List[int]:\n word_count = []\n for slide in slides:\n print(slide)\n words = 0\n for shape in slide.shapes:\n if not shape.has_text_frame:\n continue\n print(shape.name)\n for paragraph in shape.text_frame.paragraphs:\n for run in paragraph.runs:\n print(\" \" + run.text)\n words += len(run.text.split())\n word_count.append(words)\n return word_count", "def get_word_list(file_name, n):\n f = open(file_name, 'r')\n text = f.read()\n words = re.compile('\\w+').findall(text)\n return get_top_n_words(words, n)", "def wcount(lines, topn=10):\n '''a=[]\n for line in lines:\n word = line.strip()\n a.append(word)\n def histogram(s):\n d = dict()\n for i in s:\n if i in d:\n d[i]+=1\n else:\n d[i]=1\n return d'''\n def process_line(lines,diction):\n lines = lines.replace('-',' ')\n for word in lines.split():\n word=word.strip(string.punctuation+string.whitespace)\n word.lower()\n diction[word]=diction.get(word,0)+1\n\n def process_file(lines):\n diction = {}\n process_line(lines,diction)\n return diction\n diction=process_file(lines)\n x=list(diction.values())\n x.sort()\n x.reverse()\n count = 0\n for i in range(topn):\n for key in list(diction.keys()):\n if diction[key]==x[i] and count<topn:\n print(\"%s %d\"%(key,diction[key]))\n count +=1\n del diction[key]\n pass", "def wcount(lines, topn = 10):\n global worddict\n worddict = {}\n # record words each line by each\n linestr = lines.readline().decode() \n while linestr:\n record(linestr)\n linestr = lines.readline().decode()\n \n # sort the worddict to construct a wordlist\n wordlist = sorted(worddict.items(),\\\n key=lambda x:x[1],reverse = True)\n \n # get all words if lenth is less than number\n print(' '*3+'Word'.ljust(30),'Times'.center(10))\n for num in range(min(len(wordlist),topn)):\n print(' '*3+wordlist[num][0].ljust(30),\\\n str(wordlist[num][1]).center(10))", "def getEntryNames(self,lines):\n lines = self.stripText(lines)\n #check if the first line is a count, ignore if it is\n try:\n linecount = parseNum(lines[0])\n lines.pop(0)\n except ValueError:\n pass\n return lines", "def get_head_dependents(sentence: str) -> List[str]:\n sentence = re.sub(r'\\s+', ' ', sentence)\n doc = nlp(sentence)\n dep = [token.dep_ for token in doc]\n\n # Get list of compounds in doc\n compounds = [token for token in doc if token.dep_ == 'compound']\n\n # Identifies roots and direct objects\n for token in compounds:\n if token.head.dep_ == 'dobj':\n dep[token.i] = 'dobj'\n elif token.head.dep_ == 'ROOT':\n dep[token.i] = 'ROOT'\n\n return [token.text for token in doc if dep[token.i] in ('ROOT', 'dobj')]", "def get_all_headline_data():\n\twebsites = database.get_website_URLs()\n\tall_headlines_arr = []\n\tfor curr_elt in websites:\n\t\tcurr_website = curr_elt[0]\n\t\tsource = curr_elt[1]\n\t\tcurr_headline_arr = get_headline_data(curr_website, source)\n\t\tall_headlines_arr.append(curr_headline_arr)\n\treturn all_headlines_arr", "def top_words(source, number):\n\n keys = set()\n\n ht = HashMap(2500,hash_function_2)\n\n # This block of code will read a file one word as a time and\n # put the word in `w`. It should be left as starter code.\n with open(source) as f:\n for line in f:\n words = rgx.findall(line)\n for w in words:\n # set up index for hash map\n key = w.lower()\n hash = ht._hash_function(key)\n hash_index = hash % ht.capacity\n cur_bucket = ht._buckets[hash_index]\n new_node = cur_bucket.head\n # if key already exists in hash map, find and increment value\n if ht.contains_key(key):\n while new_node is not None:\n if new_node.key == key:\n new_node.value = new_node.value + 1\n new_node = new_node.next\n # else, add key to hashmap with value of 1\n else:\n cur_bucket.add_front(key, 1)\n # make empty list\n list = []\n # add all buckets to list as tuples\n for i in range(ht.capacity):\n bucket = ht._buckets[i]\n if bucket.head is not None:\n new_node = bucket.head\n while new_node is not None:\n list.append((new_node.key, new_node.value))\n new_node = new_node.next\n # Sort list in reverse by key value (word count)\n # Source: https://www.geeksforgeeks.org/python-program-to-sort-a-list-of-tuples-by-second-item/\n list.sort(key = lambda x: x[1], reverse=True)\n # Return list from 0 to user number\n return(list[0:number])", "def keyword_headlines(self):\r\n\t\td = {}\r\n\r\n\t\tfor q in self.keyword_queryset:\r\n\t\t\td[q.content] = self.headlinekeyword_queryset.filter(keywordid = q.id)\r\n\r\n\t\treturn d", "def heading_count(self, phrase,char='~'):\r\n count = 0\r\n for x in phrase:\r\n if x != char:\r\n break\r\n count+=1\r\n return count,phrase[count:]", "def get_keywords(self):\r\n\t\treturn list(self.keyword_headlines().keys())", "def main ():\n fio = FileIo(\"../input2.txt\")\n text = fio.getInput()\n p = re.compile(r'#?\\d[\\s\\.]?[\\s]?')\n out = filter(None, p.split(text))\n #print out[2]\n #print len(out)\n wc = 0\n\n for s in out:\n text = nltk.word_tokenize(s)\n wc += wordCount( text )\n print wc", "def phrase_extent_for_head(self, tokens, head_index):\n begin = tokens[head_index].text_begin\n end = begin + len(tokens[head_index].text_content)\n for child in self.dependents(tokens, head_index):\n child_begin, child_end = self.phrase_extent_for_head(tokens, child)\n begin = min(begin, child_begin)\n end = max(end, child_end)\n return (begin, end)", "def hypernym(self, sense=None):\n s = self._synset(self.text)\n\n if not s:\n return []\n\n hyper = s.hypernyms()\n\n results = list()\n for h in hyper:\n results.append(h.lemma_names())\n\n if not sense:\n return results\n\n return results[:sense]", "def get_number_of_synonyms() -> List[int]:\n\n return [len(syns) for syns in wiki_data[\"synonyms\"]]", "def make_word_list(start, lines, excluded):\r\n words = []\r\n for line in lines:\r\n word = line.rstrip()\r\n if len(word) == len(start):\r\n if (word == start) or (word not in excluded):\r\n words.append(word)\r\n return words", "def getNumberTerms(content): \n return Counter(getTerms(content))", "def words(self):\n punctuation = '''!()-[]{};:'\"\\,<>./?@#$%^&*_~'''\n lst = []\n for lines in self.lines:\n words = lines.split(' ')\n for word in words:\n no_punc = ''\n for c in word:\n if c not in punctuation:\n no_punc += c.lower()\n if no_punc != '' and no_punc != '\\n':\n lst.append(no_punc.strip('\\n'))\n return lst\n #no_punc += word.lower()\n #for word in no_punc.split(' ')[:-1]:\n #for word in no_punc:\n # lst.append(word)\n #line = lines.strip(os.linesep) # strips away spaces, \\t (tabs), and \\n (new-lines/enter)\n #print(no_punc)\n #print(lst)", "def findLegHeaders(words, header, how='match'):\n locs = []\n for i, line in enumerate(words):\n match = header.match(line)\n if match is not None:\n locs.append(i)\n\n return locs", "def get_headings(self):\n return self.headings", "def test_headlines_predefined(self) -> None:\n for headline in self.report.headlines:\n if not self.rules.get_headline_rules(headline.name):\n headlines = [headline.name for headline in self.rules.headlines]\n suggestion, _ = process.extractOne(\n headline.name, headlines, scorer=fuzz.partial_ratio\n )\n self.add_error(\n f\"{headline.name} är inte en valid rubrik. \"\n f\"Rättningsförlsag: {suggestion}.\",\n headline=headline,\n )\n elif re.search(\"\\\\W{1,}\", headline.name, re.I):\n self.add_error(\n f\"Rubriken {headline.name} innehåller tecken som inte är \"\n \"alfanumeriska vilket inte är tillåtet för en rubrik.\",\n headline=headline,\n )", "def get_headline_search(query):\n query = query.replace(' ',\"\")\n category=\"\"\n get_headlines_url = 'https://newsapi.org/v2/top-headlines?category={}&query={}&language=en&apiKey={}'.format(category,query,api_key)\n headlines_results = []\n with urllib.request.urlopen(get_headlines_url) as url:\n get_headlines_data = url.read()\n get_headlines_response = json.loads(get_headlines_data)\n if get_headlines_response['articles']:\n headlines_result_list=get_headlines_response['articles']\n for headline in headlines_result_list:\n headlines_results.append(headline)\n return headlines_results", "def word_intersection() -> list:\r\n\r\n def count_words(title_pair: np.array) -> float:\r\n \"\"\"Function calculates number of unique words present\r\n in both titles and ratio of that number to the union of unique words.\r\n :param title_pair: Array containing two titles\r\n :return: Ratio of intersecting words to union of unique words\r\n \"\"\"\r\n title_1, title_2 = title_pair\r\n # Transform into sets of words\r\n title_1 = set(title_1.split())\r\n title_2 = set(title_2.split())\r\n # Divide length of intersection by length of union\r\n ratio = len(title_1.intersection(title_2)) / len(title_1.union(title_2))\r\n return ratio\r\n\r\n # Find titles for each pair in the current chunk by their indexes\r\n tmp_df = pd.DataFrame()\r\n tmp_df['title_1'] = data.loc[pairs.loc[:, 'idx_1'].values, 'title'].values\r\n tmp_df['title_2'] = data.loc[pairs.loc[:, 'idx_2'].values, 'title'].values\r\n\r\n # Process title pairs in current chunk and add results to the list\r\n scores = [result for result in map(count_words, tmp_df[['title_1', 'title_2']].values)]\r\n\r\n return scores", "def count_words(subreddit, word_list):\n word_list = [str.lower() for str in word_list]\n\n my_list = get_hot_list(subreddit)\n my_dict = {}\n\n for word in word_list:\n my_dict[word] = 0\n try:\n for title in my_list:\n title_split = title.split(\" \")\n\n for iter in title_split:\n for iter_split in word_list:\n if iter.lower() == iter_split.lower():\n my_dict[iter_split] += 1\n\n for key, val in sorted(my_dict.items(), key=lambda x: x[1],\n reverse=True):\n if val != 0:\n print(\"{}: {}\".format(key, val))\n except Exception:\n return None", "def split (l):\n segs = l.strip().split ('\\t')\n label = segs [-1]\n words = segs [:-1]\n return words, label", "def get_head_pos( head, ngram ):\n try:\n tokens = ngram.split( ' ' )\n return str([ i for i, t in enumerate( tokens ) if t.startswith( head + \"/\" )][0] + 1 )\n except ValueError:\n return None", "def get_word_list(file_name, to_skip_or_not_to_skip):\n fin = open(file_name) #opening file\n histogram={} \n if to_skip_or_not_to_skip == True: #if I want to skip the header this is set to True\n skip_first_part(fin)\n for line in fin: #runs through lines of book file\n line = line.replace(\"-\",\" \") #takes out dashed, underscroes, numbers, whitespaces, and punctuation\n line = line.replace(\"_\",\" \")\n to_remove = string.punctuation + string.whitespace + '0123456789' \n for word in line.split():\n word = word.strip(to_remove) #running through all words in each line \n if word == 'God' or 'Lord':\n pass\n else:\n word = word.lower()\n histogram[word] = histogram.get(word, 0)+1\n return histogram", "def read_words() -> List[str]:\n word_count = int(sys.stdin.readline())\n words = [None] * word_count\n print('words:')\n for i in range(0, word_count):\n words[i] = sys.stdin.readline().rstrip()\n print(words[i])\n print()\n return words", "def word_counter(list_) -> list:\n from operator import itemgetter\n _dict = dict()\n for word in list_:\n if word in _dict:\n _dict[word] = _dict[word] + 1\n else:\n _dict[word] = 1\n return sorted(_dict.items(), key=itemgetter(1), reverse=True)", "def get_tokenizer_result(blob):\n return list(blob.words)", "def get_lexicons(n_words):\r\n lexicons = []\r\n for words in n_words:\r\n lexicons.extend(words)\r\n return list(set(lexicons))", "def tokenize(self, path):\n dropped = 0\n with open(path, 'r') as f:\n linecount = 0\n lines = []\n for line in f:\n linecount += 1\n if self.lowercase:\n words = line[:-1].lower().strip().split(\" \")\n else:\n words = line[:-1].strip().split(\" \")\n if len(words) > self.maxlen:\n dropped += 1\n continue\n words = ['<sos>'] + words\n words += ['<eos>']\n # vectorize\n vocab = self.dictionary.word2idx\n unk_idx = vocab['<oov>']\n indices = [vocab[w] if w in vocab else unk_idx for w in words]\n lines.append(indices)\n\n print(\"Number of sentences dropped from {}: {} out of {} total\".\n format(path, dropped, linecount))\n return lines", "def length(word):\n list=[]#set up a new list\n for i in range(0,len(word)):\n list.append(len(word[i]))#count the length of each word\n print(list)", "def read_header(datafile):\n\thead = []\n\tf = open(datafile,'r')\n\tfor i,line in enumerate(f):\n\t\tif i is 10: break\n\t\thead += [line]\n\tf.close()\n\treturn head", "def wcount(lines, topn):\n l = re.split('[.,:-^(){}?\"\\n\\r!;\\' /&#*@_]',lines)#将lines里的单词分隔,放入列表l\n statistics = {}\n for i in l:\n if i not in statistics:\n statistics[i] = 1\n else:\n statistics[i] = statistics[i] + 1 #用字典统计单词出现的次数\n lis = sorted(statistics.items(),key = lambda x:x[1],reverse = True) #将单词出现的次数由大到小排序\n if topn > len(lis):#超出单词种类数,输出全部结果\n dic = dict(lis[1:]) \n else: #否则输出想要的个数\n dic = dict(lis[1:topn+1])\n for k in dic:\n print(str(k) + \" \" + str(dic[k])) #将字典以一列key,一列对应的value的形式输出\n pass", "def different_words(hist):\n return len(hist)", "def different_words(hist):\n return len(hist)", "def count_words(data):\n return np.array([len(text.split()) for text in data]).reshape(-1, 1)", "def BuildHeadList(all_file_contents):\n head_list = []\n list_all_file_contents = (all_file_contents)\n for line in list_all_file_contents: \n if line[0:4] != 'ATOM':\n head_list.append(line)\n else:\n break\n\n return head_list", "def countWords(text):\r\n\r\n\tlistOfWord = []\r\n\tlistOfFrequency = []\r\n\r\n\tfor word in text:\t\t\t\t\t \t# menghitung frekuensi kata\r\n if word == '':\r\n pass\r\n elif word not in listOfWord:\t\t\t\t\t# menyimpan kata ke dalam list\r\n listOfWord.append(word)\r\n listOfFrequency.append(1)\r\n else:\r\n index = listOfWord.index(word)\r\n listOfFrequency[index] = listOfFrequency[index] + 1 # menambah frekuensi kata yang sudah ada\r\n\r\n\r\n\tlst = [listOfWord, listOfFrequency]\r\n\r\n\treturn lst", "def get_top_words(self, label, n):\n score_list = []\n if('sod' in label):\n for term in self.vocab:\n score = self.cond_prob_sod[term] / self.cond_prob_pop[term]\n score_list.append((score,term)) \n else:\n for term in self.vocab:\n score = self.cond_prob_pop[term] / self.cond_prob_sod[term]\n score_list.append((score,term))\n score_list = sorted(score_list, key=lambda x:x[0],reverse=True)[:n]\n return score_list \n pass", "def get_lines(words, index):\n try:\n occurency = []\n new_list = []\n try:\n for key in words:\n for line in index[key]:\n occurency.append(line)\n for i in occurency:\n if occurency.count(i) == len(words) and i not in new_list:\n new_list.append(i)\n except:\n pass\n return new_list\n except:\n print(\"Error get_lines\")", "def top_words(source, number):\n\n keys = set()\n\n ht = HashMap(2500, hash_function_2)\n\n # This block of code will read a file one word at a time and\n # put the word in `w`\n with open(source) as f:\n for line in f:\n words = rgx.findall(line)\n for w in words:\n current_word = w.lower()\n #get a count for current word\n current_count = ht.get(current_word)\n if current_count is None:\n ht.put(current_word, 1)\n else:\n ht.put(current_word, current_count + 1)\n\n #create an empty list to store top words in\n tuple_list = []\n\n #traverse hash_map to find most used words\n for i in range(ht.capacity):\n if ht._buckets[i] is not None:\n #traverse links at each bucket\n current = ht._buckets[i].head\n while current is not None:\n tuple_list.append((current.key, current.value))\n current = current.next\n\n #create an ordered list out of items\n iter_tuple_quick_sort(tuple_list, len(tuple_list) - 1, 0)\n\n #create a new list to return with passed number arg\n return_list = []\n list_counter = 0\n while list_counter <= number - 1:\n if list_counter == len(tuple_list) - 1:\n break\n else:\n return_list.append(tuple_list[list_counter])\n list_counter += 1\n\n return return_list", "def get_headers(self):\n return self.numHeadList", "def make_althws(entries):\n althws = []\n nentries = len(entries)\n for ientry,entry in enumerate(entries):\n m = re.search(r'<althws>{#(.*?)#}</althws>',entry.datalines[0])\n if m == None:\n continue\n hws = re.split(r', *',m.group(1))\n L0 = entry.metad['L']\n pc0 = entry.metad['pc']\n k10 = entry.metad['k1']\n k20 = entry.metad['k2']\n # Get L for next entry, or None if this is last entry\n ientry1 = ientry+1\n if ientry1 < nentries:\n entry1 = entries[ientry1]\n L1 = entry1.metad['L']\n else:\n L1 = None\n nhws = len(hws)\n Lhws = generate_Ls(L0,L1,nhws) # this is the tricky part\n if 10 <= nhws:\n print('%s,%s,%s has %s alternate headwords' %(L0,pc0,k10,nhws))\n # generate a line for each hw\n for ihw,hw in enumerate(hws):\n L = Lhws[ihw]\n pc = pc0\n k1 = hw\n k2 = hw\n t = 'alt' #type\n lp = L0\n k1p = k10\n out = '<L>%s<pc>%s<k1>%s<k2>%s<type>%s<LP>%s<k1P>%s' %(\n L,pc,k1,k2,t,lp,k1p)\n althws.append(out)\n return althws", "def get_noun_phrases(blob):\n return blob.noun_phrases", "def keywords(text:str) -> list:\n return sorted(set(text.split(' ')), key=frequency, reverse=True)[0:5]", "def headwords ():\n\n q = request.args.get ('q')\n fulltext = request.args.get ('fulltext')\n offset = int (arg ('offset', '0', re_integer_arg))\n limit = clip (arg ('limit', str (MAX_RESULTS), re_integer_arg), 1, MAX_RESULTS)\n where = ''\n\n if (not q) and (not fulltext):\n # Retrieve full list of headwords\n with current_app.config.dba.engine.begin () as conn:\n res = execute (conn, r\"\"\"\n SELECT id, webkeyword, no\n FROM keyword\n ORDER BY sortkeyword, n, no\n LIMIT :limit\n OFFSET :offset\n \"\"\", { 'offset' : offset, 'limit' : limit })\n\n return make_headwords_response (res, limit)\n\n if q:\n q = q.replace ('-', '')\n q = q.replace ('%', '')\n q = q.replace ('?', '_')\n q = q.replace ('*', '%')\n where = \"(keyword LIKE :q) AND\"\n\n if not fulltext:\n # easy out\n with current_app.config.dba.engine.begin () as conn:\n res = execute (conn, r\"\"\"\n SELECT id, webkeyword, no\n FROM keyword\n WHERE keyword LIKE :q\n ORDER BY sortkeyword, n, no\n LIMIT :limit\n OFFSET :offset\n \"\"\", { 'q' : q, 'offset' : offset, 'limit' : limit })\n\n return make_headwords_response (res, limit)\n\n with current_app.config.dba.engine.begin () as conn:\n res = execute (conn, r\"\"\"\n SELECT DISTINCT\n k.id,\n k.webkeyword COLLATE utf8mb4_bin AS webkeyword,\n k.no\n FROM keyword k,\n article a\n WHERE {where} (MATCH (a.idxtext) AGAINST (:fulltext IN BOOLEAN MODE))\n AND a.no = k.no\n ORDER BY k.sortkeyword, k.n, k.no\n LIMIT :limit\n OFFSET :offset\n \"\"\".format (where = where), { 'q' : q, 'fulltext' : fulltext,\n 'offset' : offset, 'limit' : limit })\n\n return make_headwords_response (res, limit)", "def get_headline_position(self, headline: Headline) -> Tuple[int, int]:\n return self.get_regex_position(headline.name)", "def make_word_list(fin):\n\tword_list = []\n\tfor line in fin:\n\t\tword = line.strip()\n\t\tword_list.append(word)\n\treturn word_list", "def get_heading_words(self, html_doc):\n all_headings = []\n \n all_h1 = html_doc.findAll('h1')\n h1_text = ''\n for h1 in all_h1:\n h1_text += h1.text + ' '\n all_headings.append(h1_text.strip())\n \n all_h2 = html_doc.findAll('h2')\n h2_text = ''\n for h2 in all_h2:\n h2_text += h2.text + ' '\n all_headings.append(h2_text.strip())\n \n all_h3 = html_doc.findAll('h3')\n h3_text = ''\n for h3 in all_h3:\n h3_text += h3.text + ' '\n all_headings.append(h3_text.strip())\n \n all_h4 = html_doc.findAll('h4')\n h4_text = ''\n for h4 in all_h4:\n h4_text += h4.text + ' '\n all_headings.append(h4_text.strip()) \n \n all_h5 = html_doc.findAll('h5')\n h5_text = ''\n for h5 in all_h5:\n h5_text += h5.text + ' '\n all_headings.append(h5_text.strip())\n \n all_h6 = html_doc.findAll('h6')\n h6_text = ''\n for h6 in all_h6:\n h6_text += h6.text + ' '\n all_headings.append(h6_text.strip()) \n \n return all_headings", "def return_hpos_to_display(hpo_names, max_num_sites_to_display):\n length = len(hpo_names)\n\n num_lists = math.ceil(length / max_num_sites_to_display)\n\n base = math.floor(length / num_lists)\n\n remainder = length - (base * num_lists)\n\n all_hpos = []\n\n starting_idx = 0\n ending_idx = starting_idx + base # add one because it is not inclusive\n\n for list_num in range(num_lists):\n\n # this is useful for when the number of sites to display\n # does not go evenly into the number of HPOs - essentially\n # add it to the 'earlier' lists\n if list_num < remainder:\n ending_idx = ending_idx + 1\n\n sites = hpo_names[starting_idx:ending_idx]\n\n # reset for subsequent lists\n starting_idx = ending_idx\n ending_idx = starting_idx + base\n\n all_hpos.append(sites)\n\n return all_hpos", "def get_page_words(parsed_hocr_page, pageid):\n page_words = []\n page_height = parsed_hocr_page.box.height\n page_width = parsed_hocr_page.box.width\n page_dim_string = \"%sx%s\" %(page_width, page_height)\n \n for word in parsed_hocr_page.words:\n this_word = {\n 'x0':word.box.left, 'x1':word.box.right, \n 'y0':page_height-word.box.bottom, 'y1':page_height-word.box.top,\n 'text':word.text, 'width':word.box.width,\n 'height':word.box.height, 'pageid':pageid,\n 'page_dim':page_dim_string,\n 'object_type':'word',\n 'lang':word.lang,\n }\n page_words.append(this_word)\n \n return page_words", "def getVocabList():\n vocab_list = []\n with open('vocab.txt') as f_obj:\n while True:\n vocab_line = f_obj.readline()\n if not vocab_line:\n break\n word = re.search(r'\\t(\\w+)', vocab_line).group(1)\n vocab_list.append(word)\n return vocab_list", "def rarest_words(book):\n unique_dict = unique_words(book)\n\n count_list = []\n unsorted = [(v, k) for k, v in unique_dict.items()]\n\n smallest_num = min(unsorted)\n word_array = pd.DataFrame(list(unique_dict.items()), columns=['Word', 'Occurence'])\n\n for index, row in word_array.iterrows():\n if row['Occurence'] == smallest_num[0]:\n count_list.append(row['Word'])\n\n return count_list", "def get_main_words(idioms_set):\r\n main_words = Counter([idiom.split()[-1] for idiom in idioms_set])\r\n print('main words:', '\\n', main_words)\r\n print('top 50 main words:', '\\n', main_words.most_common(50)) \r\n return list(main_words)", "def get_words(self, first=10):\n return get_occurences(self.lemmatized_words)[:first]", "def ActiveHlt2Lines(self) :\n return []", "def count_sentences(txt):\n lst = [0]\n for i in range(len(txt)):\n if txt[i] == '.' or txt[i] == '?' or txt[i] == '!':\n if i + 2 < len(txt):\n lst += [i + 2]\n return lst", "def process_line(line, hist):\n # replace hyphens with spaces before splitting\n line = line.replace('-', ' ')\n wordlist=[]\n\n for word in line.split():\n # remove punctuation and convert to lowercase\n word = word.strip(string.punctuation + string.whitespace)\n word = word.lower()\n\n wordlist.append(word)\n # update the histogram\n #hist[word] = hist.get(word, 0) + 1\n return wordlist" ]
[ "0.6359982", "0.63080597", "0.62195677", "0.6214322", "0.6143615", "0.614284", "0.6101072", "0.60887766", "0.5984331", "0.5945753", "0.58900934", "0.5865218", "0.5827097", "0.5822467", "0.5805729", "0.5790344", "0.575367", "0.5737365", "0.5706382", "0.5671385", "0.5618535", "0.56139344", "0.56125176", "0.5608357", "0.55759203", "0.55736965", "0.5557831", "0.5552999", "0.55528086", "0.55519116", "0.5551827", "0.55419785", "0.5540675", "0.55250293", "0.55098075", "0.54894614", "0.54835135", "0.54803705", "0.54786813", "0.547727", "0.54499084", "0.54442465", "0.54440796", "0.54376656", "0.54353106", "0.5425497", "0.5423677", "0.5405836", "0.5389497", "0.5378488", "0.5355214", "0.53492785", "0.5321808", "0.53207093", "0.53150684", "0.5308017", "0.5306035", "0.5289887", "0.52888155", "0.52852124", "0.5284778", "0.52833015", "0.52775455", "0.52700436", "0.52660197", "0.5259613", "0.52554137", "0.5245826", "0.5235861", "0.52300787", "0.52250737", "0.5223743", "0.52188265", "0.5216134", "0.52160275", "0.52128327", "0.52128327", "0.5210411", "0.5208849", "0.5206076", "0.5197643", "0.5195569", "0.5192558", "0.5191172", "0.51716703", "0.5168759", "0.51684165", "0.51682293", "0.51675755", "0.51644826", "0.51632637", "0.51606137", "0.51470935", "0.5137943", "0.513224", "0.5132075", "0.51224583", "0.51156306", "0.5106817", "0.5104188" ]
0.54709244
40
Returns a list of headlines if given a keyword
def get_headlines(self, kw = None): if kw: return self.get_headlines_with_keyword(kw) else: return self.get_all_headlines()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_headlines_with_keyword(self, kw):\r\n\t\tkey_head = self.keyword_headlines()\r\n\r\n\t\theadlines = set()\r\n\r\n\t\tfor headlinekw in key_head[kw]:\r\n\t\t\tcontent = headlinekw.headlineid.content\r\n\t\t\theadlines.add(content)\r\n\r\n\t\treturn list(headlines)", "def keyword_headlines(self):\r\n\t\td = {}\r\n\r\n\t\tfor q in self.keyword_queryset:\r\n\t\t\td[q.content] = self.headlinekeyword_queryset.filter(keywordid = q.id)\r\n\r\n\t\treturn d", "def get_all_headlines(self):\r\n\t\tlist_vals = list(self.keyword_headlines().values())\r\n\t\tuniq_headlines = set()\r\n\t\tfor list_val in list_vals:\r\n\t\t\tfor headlineobj in list_val:\r\n\t\t\t\tuniq_headlines.add(headlineobj.headlineid.content)\r\n\r\n\t\treturn list(uniq_headlines)", "def get_headline_search(query):\n query = query.replace(' ',\"\")\n category=\"\"\n get_headlines_url = 'https://newsapi.org/v2/top-headlines?category={}&query={}&language=en&apiKey={}'.format(category,query,api_key)\n headlines_results = []\n with urllib.request.urlopen(get_headlines_url) as url:\n get_headlines_data = url.read()\n get_headlines_response = json.loads(get_headlines_data)\n if get_headlines_response['articles']:\n headlines_result_list=get_headlines_response['articles']\n for headline in headlines_result_list:\n headlines_results.append(headline)\n return headlines_results", "def get_keywords(self):\r\n\t\treturn list(self.keyword_headlines().keys())", "def all_headlines_from(url):\n pass", "def get_headlines(driver,site,URL_exclusions):\r\n links = get_all_links(driver,site,URL_exclusions)\r\n headlines = []\r\n n=0\r\n for link in links:\r\n driver = make_driver_obj() #get_all_links quits driver when finished.\r\n try:\r\n while True:\r\n try:\r\n driver.get(link) #No need to accept cookies to don't need return_search\r\n break\r\n except:\r\n continue\r\n except: #If we can't open the URL for any reason.\r\n driver.quit()\r\n continue\r\n n += 1\r\n headline = get_headline(driver)\r\n if headline != '':\r\n headlines.append(headline) #Only append if able to identify headline text\r\n #print(n)\r\n #print(headline)\r\n #print()\r\n driver.quit()\r\n return headlines", "def all_headlines(html_root_node):\n pass", "def gather_headlines(urls):\n pass", "def is_headline(node):\n pass", "def get_negative_headlines(headlines,words):\r\n negative_headlines = []\r\n for headline in headlines:\r\n for word in words:\r\n if headline.lower().find(word) != -1: #If particular word is found in lowercased headline.\r\n negative_headlines.append(headline)\r\n break #Stop iterating through words when we have found one negative word.\r\n return negative_headlines", "def test_headlines_predefined(self) -> None:\n for headline in self.report.headlines:\n if not self.rules.get_headline_rules(headline.name):\n headlines = [headline.name for headline in self.rules.headlines]\n suggestion, _ = process.extractOne(\n headline.name, headlines, scorer=fuzz.partial_ratio\n )\n self.add_error(\n f\"{headline.name} är inte en valid rubrik. \"\n f\"Rättningsförlsag: {suggestion}.\",\n headline=headline,\n )\n elif re.search(\"\\\\W{1,}\", headline.name, re.I):\n self.add_error(\n f\"Rubriken {headline.name} innehåller tecken som inte är \"\n \"alfanumeriska vilket inte är tillåtet för en rubrik.\",\n headline=headline,\n )", "def colorize_headlines_visitor(c, p, item):\n if p.h.startswith(\"!= \"):\n f = item.font(0)\n f.setBold(True)\n item.setFont(0, f)\n raise leoPlugins.TryNext", "def parse_headlines(self):\n headlines = re.findall(r\"^\\.\\.\\.(.*?)\\.\\.\\.[ ]?\\n\\n\", self.unixtext,\n re.M | re.S)\n headlines = [\" \".join(h.replace(\"...\",\n \", \").replace(\"\\n\", \" \").split())\n for h in headlines]\n return headlines", "def headwords ():\n\n q = request.args.get ('q')\n fulltext = request.args.get ('fulltext')\n offset = int (arg ('offset', '0', re_integer_arg))\n limit = clip (arg ('limit', str (MAX_RESULTS), re_integer_arg), 1, MAX_RESULTS)\n where = ''\n\n if (not q) and (not fulltext):\n # Retrieve full list of headwords\n with current_app.config.dba.engine.begin () as conn:\n res = execute (conn, r\"\"\"\n SELECT id, webkeyword, no\n FROM keyword\n ORDER BY sortkeyword, n, no\n LIMIT :limit\n OFFSET :offset\n \"\"\", { 'offset' : offset, 'limit' : limit })\n\n return make_headwords_response (res, limit)\n\n if q:\n q = q.replace ('-', '')\n q = q.replace ('%', '')\n q = q.replace ('?', '_')\n q = q.replace ('*', '%')\n where = \"(keyword LIKE :q) AND\"\n\n if not fulltext:\n # easy out\n with current_app.config.dba.engine.begin () as conn:\n res = execute (conn, r\"\"\"\n SELECT id, webkeyword, no\n FROM keyword\n WHERE keyword LIKE :q\n ORDER BY sortkeyword, n, no\n LIMIT :limit\n OFFSET :offset\n \"\"\", { 'q' : q, 'offset' : offset, 'limit' : limit })\n\n return make_headwords_response (res, limit)\n\n with current_app.config.dba.engine.begin () as conn:\n res = execute (conn, r\"\"\"\n SELECT DISTINCT\n k.id,\n k.webkeyword COLLATE utf8mb4_bin AS webkeyword,\n k.no\n FROM keyword k,\n article a\n WHERE {where} (MATCH (a.idxtext) AGAINST (:fulltext IN BOOLEAN MODE))\n AND a.no = k.no\n ORDER BY k.sortkeyword, k.n, k.no\n LIMIT :limit\n OFFSET :offset\n \"\"\".format (where = where), { 'q' : q, 'fulltext' : fulltext,\n 'offset' : offset, 'limit' : limit })\n\n return make_headwords_response (res, limit)", "def get_headlines(id):\n get_headlines_url = secondary_url.format(id,api_key)\n\n with urllib.request.urlopen(get_headlines_url) as url:\n get_headlines_data = url.read()\n get_headlines_response= json.loads(get_headlines_data)\n\n headlines_results = None\n\n if get_headlines_response['articles']:\n headlines_results_list = get_headlines_response['articles']\n headlines_results = process_headlines(headlines_results_list)\n\n return headlines_results", "def split_head(line, is_head=lambda line: line.startswith('>')):\n if is_head(line):\n return True\n else:\n return False", "def find_keywords(anchor, keywords=['']):\n rel_keywords = []\n href, content = parse_anchor(anchor)\n \n for keyword in keywords:\n kw = keyword.lower()\n if kw in href.lower() or kw in content.lower():\n rel_keywords.append(keyword)\n \n return rel_keywords", "def get_wiki_lines(wt, predicate=None):\n return [line for line in wt.contents.split('\\n') if not callable(predicate) or predicate(line)]", "def onHeadlineClick(self, tag, keywords):\n self.handleEvent(\"headclick1\", tag, keywords)", "def get_dictionary_file_lines_for_keywords(self):\n keywords_iter = iter(self.keywords)\n next_keyword = keywords_iter.next()\n print(\"Searching for keyword {}\".format(next_keyword))\n\n self.dictionary_file.open_handle()\n result_lines = list()\n while next_keyword:\n line = self.dictionary_file.read_line_to_obj()\n if not line:\n print(\"Reached end of dictionary file\")\n break\n\n if line.term < next_keyword:\n continue\n elif line.term == next_keyword:\n print(\"Found postings list for term {}\".format(next_keyword))\n result_lines.append(line)\n\n try:\n next_keyword = keywords_iter.next()\n print(\"Searching for keyword {}\".format(next_keyword))\n except StopIteration:\n print(\"Finished searching for all keywords\")\n break\n\n return result_lines", "def lemma_headwords(self):\n new_var = 'lemma_headword'\n lemma_heads = [clx._lemmas[i]['Head'] for i in xrange(len(clx._lemmas))]\n has_item = self.compare_items(lemma_heads)\n new_column = []\n if False in has_item:\n self._warning_msg('lemma_headword', lemma_heads)\n for record, exists in zip(self._dict, has_item):\n if exists:\n lemma_id = clx.wordform_lookup(record)[0].IdNumLemma\n lemma_head = clx.lemma_by_id(lemma_id).Head\n else:\n lemma_head = None\n new_column.append(lemma_head)\n self._append_column(new_column, new_var)", "def articles_id_headwords (_id):\n\n offset = int (arg ('offset', '0', re_integer_arg))\n limit = clip (arg ('limit', str (MAX_RESULTS), re_integer_arg), 1, MAX_RESULTS)\n\n with current_app.config.dba.engine.begin () as conn:\n res = execute (conn, r\"\"\"\n SELECT id, webkeyword, no\n FROM keyword\n WHERE no = :id\n ORDER BY sortkeyword\n LIMIT :limit\n OFFSET :offset\n \"\"\", { 'id' : _id, 'offset' : offset, 'limit' : limit })\n\n return make_headwords_response (res, limit)", "def get_headline_data(website_url, source):\n\tpage = requests.get(website_url)\n\tpage.raise_for_status()\n\tall_headlines = []\n\tbs_obj = bs4.BeautifulSoup(page.text, 'html.parser')\n\titem_list = bs_obj.select('item')\n\tprintable = set(string.printable)\n\tfor curr_item in item_list:\n\t\titem_title = curr_item.title.string\n\t\tfollowup_link = curr_item.select('link')[0].string\n\t\tdatestamp = curr_item.select('pubdate')[0].string\n\t\titem_title = item_title.replace(\"&apos;\", \"'\")\n\t\tfollowup_link = followup_link.replace(u\"\\u2018\", \"'\").replace(u\"\\u2019\", \"'\")\n\t\titem_title = item_title.encode('utf-8', errors='ignore')\n\t\tnew_headline = data_structures.Headline(item_title, followup_link, source, datestamp)\n\t\tall_headlines.append(new_headline)\n\treturn all_headlines", "def get_headlines():\n country = request.args.get('country', type=str)\n if country is not None:\n data = te.getNews(country=country).dropna()\n return jsonify(data.to_dict(orient='records'))\n data = te.getNews()\n return jsonify(te.getNews().dropna().to_dict(orient='records'))", "def fetch_headlines(self, retry=False):\n top_headlines_res = None\n try:\n top_headlines_res = self.news_api.get_top_headlines(\n country='in', page_size=100)\n except newsapi.newsapi_exception.NewsAPIException as err:\n print('NewsAPI Exception==', err)\n if not retry:\n print('Retrying with another key...')\n self.api_key = os.getenv('NEWS_API_KEY_BACKUP')\n self.configure_news_api()\n top_headlines_res = self.fetch_headlines(retry=True)\n else:\n return None\n except Exception as err:\n print('Exception occurred==', err)\n return None\n headlines = {}\n if top_headlines_res and top_headlines_res['status'] == 'ok':\n headlines = top_headlines_res\n else:\n headlines = None\n return headlines", "def get_all_headline_data():\n\twebsites = database.get_website_URLs()\n\tall_headlines_arr = []\n\tfor curr_elt in websites:\n\t\tcurr_website = curr_elt[0]\n\t\tsource = curr_elt[1]\n\t\tcurr_headline_arr = get_headline_data(curr_website, source)\n\t\tall_headlines_arr.append(curr_headline_arr)\n\treturn all_headlines_arr", "def all_words(self, min_word_length=0):\n return [word for headline in self.headlines for word in\n headline.words(min_word_length=min_word_length)]", "def get_keywords(self, sectioned_text):\n \n keywords = []\n \n if 'full text' in list(sectioned_text.keys()):\n \n for word in self.keyword_list:\n if word in sectioned_text['full text']:\n keywords.append(word)\n \n else: \n fulltext = self.restitch_text(sectioned_text)\n for word in self.keyword_list:\n if word in fulltext:\n keywords.append(word)\n \n return keywords", "def getHeadParts(self):\n return self.headParts", "def reddit_headlines(reddit):\n\n # Set metadata to make request:\n url = \"https://www.reddit.com/r/{}/.json?limit=10\".format(reddit)\n headers = {'User-Agent': '{} Reddit headlines'.format(reddit)}\n\n # Consume Reddit's API to gather info:\n html = requests.get(url, headers=headers)\n\n # If status code is OK:\n if html.status_code == requests.codes.ok:\n # Parse resonse:\n info = json.loads(html.content.decode('utf-8'))\n # pprint(info)\n\n # Get relevant info:\n child = info['data']['children']\n titles = [unidecode(elem['data']['title']) for elem in child]\n titles = \"... \".join([title for title in titles])\n else:\n titles = None\n\n return titles", "def test_headlines_predecessors(self):\n headline_str = \"* One\\n** Two\\n*** Three\\n** Two\\n*** Three\\n* One\"\n\n doc = parser.parse(headline_str)\n self.assertEqual(len(doc.children()), 2)\n\n h1_1 = doc.children()[0]\n h1_2 = doc.children()[1]\n\n self.assertEqual(len(h1_1.children), 2)\n self.assertEqual(len(h1_2.children), 0)\n\n h2_1 = h1_1.children[0]\n h2_2 = h1_1.children[1]\n\n self.assertEqual(len(h2_1.children), 1)\n self.assertEqual(len(h2_2.children), 1)", "def test_headlines_required(self) -> None:\n for rule in self.rules.headlines:\n if not rule.required:\n continue\n is_match: bool = False\n for headline in self.report.headlines:\n if self.rules.get_headline_rules(headline.name) == rule:\n is_match = True\n break\n if not is_match:\n self.add_error(f\"Rubriken {rule.name} som måste vara med saknas.\")", "def headline_text(node):\n pass", "def get_head_dependents(sentence: str) -> List[str]:\n sentence = re.sub(r'\\s+', ' ', sentence)\n doc = nlp(sentence)\n dep = [token.dep_ for token in doc]\n\n # Get list of compounds in doc\n compounds = [token for token in doc if token.dep_ == 'compound']\n\n # Identifies roots and direct objects\n for token in compounds:\n if token.head.dep_ == 'dobj':\n dep[token.i] = 'dobj'\n elif token.head.dep_ == 'ROOT':\n dep[token.i] = 'ROOT'\n\n return [token.text for token in doc if dep[token.i] in ('ROOT', 'dobj')]", "def get_keywords_for_movie(url):\n pass", "def get_all_headlines_from_chrome(site,URL_exclusions):\r\n headlines = []\r\n #Initial URL to pass to return search:\r\n URL = f'https://www.google.co.uk/search?hl=en&as_q=&as_epq=&as_oq=travellers&as_eq=quarantine+travel+train+flight+tourist+archive+airport+covid+coronavirus+hotel+holiday+honeymoon&as_nlo=&as_nhi=&lr=&cr=&as_qdr=all&as_sitesearch={site}&as_occt=title&safe=active&as_filetype=&tbs='\r\n n = 0\r\n while n < 10:\r\n n += 1\r\n driver = launch_chrome()\r\n try:\r\n return_search(URL,driver)\r\n except:\r\n continue\r\n sleep_time = np.random.random() * np.random.randint(1,6) \r\n time.sleep(sleep_time) #Slow down to avoid bot detection\r\n timeout = 0\r\n start = time.time()\r\n while timeout < 120:\r\n try:\r\n page_headlines = get_headlines_from_one_page(driver,site,URL_exclusions)\r\n break\r\n except:\r\n end = time.time()\r\n timeout = end - start\r\n for headline in page_headlines:\r\n headlines.append(headline)\r\n try:\r\n next_button = driver.find_element_by_id('pnnext')\r\n URL = next_button.get_attribute('href') #Pass new URL to return_search()\r\n except NoSuchElementException:\r\n driver.quit() #Quit driver if can't find next button \r\n break\r\n driver.quit() #Quit driver each iteration to avoid triggering recaptcha.\r\n return headlines", "def column_headlines(self):\n elements = self._selenium.find_elements_by_xpath(\n '//div[@id=\"content\"]/table/thead/tr/th/a')\n return [x.text for x in elements]", "def test_headlines_named_entities(self) -> None:\n for headline in self.report.headlines:\n rule: Optional[HeadlineRules] = self.rules.get_headline_rules(headline.name)\n if not (rule and rule.named_entities):\n continue\n\n for ne_rule in rule.named_entities:\n if headline.has_named_entity(\n ne_rule.identity, ne_rule.type, ne_rule.subtype\n ):\n continue\n if ne_rule.cheat and re.search(ne_rule.cheat, headline.to_text()):\n continue\n self.add_error(ne_rule.message, headline=headline)", "def GetListHead(self, *args, **kwargs):\n pass", "def get_headlines(outlet):\n if outlet == \"BBC\":\n parser = news_parser.BBC(\"https://www.bbc.co.uk\")\n elif outlet == \"DailyMail\":\n parser = news_parser.DailyMail(\"https://www.dailymail.co.uk\")\n elif outlet == \"Guardian\":\n parser = news_parser.Guardian(\"https://www.theguardian.com\")\n elif outlet == \"Metro\":\n parser = news_parser.Metro(\"https://www.metro.co.uk\")\n elif outlet == \"Mirror\":\n parser = news_parser.Mirror(\"https://www.mirror.co.uk/news/\")\n elif outlet == \"Reuters\":\n parser = news_parser.Reuters(\"https://uk.reuters.com\")\n elif outlet == \"Sun\":\n parser = news_parser.Sun(\"https://www.thesun.co.uk\")\n elif outlet == \"Independent\":\n parser = news_parser.Independent(\"https://www.independent.co.uk\")\n else:\n parser = news_parser.BBC(\"https://www.bbc.co.uk/news\")\n \n index = outlets.index(outlet)\n url_list = []\n while len(url_list) < 50:\n opts = {\n 'language': ['en'],\n 'source_id': [ids[index]],\n 'published_at_start':'NOW-1DAY',\n 'published_at_end':'NOW',\n 'sort_by': 'hotness',\n 'sort_direction': 'desc',\n 'cursor': '*',\n 'per_page': 100\n }\n\n try:\n api_response = api_instance.list_stories(**opts)\n for story in api_response.stories:\n url = story.links.permalink\n if url:\n url_list.append(url)\n except ApiException as e:\n print(\"Exception when calling DefaultApi->list_stories: %s\\n\" %e)\n \n opts['cursor'] = api_response.next_page_cursor\n \n url_list = url_list[:50]\n \n articles_list = []\n for url in url_list:\n raw_article = parser.get_article(url)\n if raw_article is not None:\n articles_list.append(raw_article)\n\n articles = []\n for article in articles_list:\n parsed_article = parser.parse(article)\n if parsed_article is not None:\n articles.append(parsed_article)\n \n if len(articles) > 30:\n articles = articles[:30]\n\n return articles", "def checkLine(line: str):\n\n key_words = ['src', 'href', 'url']\n out = list()\n for word in key_words:\n if line.__contains__(word):\n out.append((True, word))\n\n # Check if output list is not empty\n if len(out) == 0:\n # If list is empty return None\n return None\n else:\n return out", "def extract_head(data):\n tl = data['tls'][data['i']];\n br = data['brs'][data['i']];\n head = extract_area(data,(tl,br));\n return head;", "def parse_keywords(medline):\n keyword_list = medline.find(\"KeywordList\")\n keywords = list()\n if keyword_list is not None:\n for k in keyword_list.findall(\"Keyword\"):\n if k.text is not None:\n keywords.append(k.text)\n keywords = \"; \".join(keywords)\n else:\n keywords = \"\"\n return keywords", "def getHead(text):\n\n text = text.strip()\n\n #check if conjunction\n if utils.isConj(text):\n return utils.conjHead(text)\n\n tokens = text.split()\n new_text = \"\"\n first = True\n for word in tokens:\n if (utils.break_word(word) and not first):\n break\n\n if (word.endswith(\",\")):\n new_text += word[:-1]\n break\n\n #capture possessives?\n #if (word.endswith(\"'s\"):\n # new_text = \"\"\n # continue\n\n new_text += word + \" \"\n first = False\n\n new_text = new_text.strip()\n if new_text == \"\":\n sys.stderr.write(\"Empty text: \\\"{0}\\\" : \\\"{1}\\\"\".format(text, new_text))\n\n return new_text.split()[-1]", "def add_keywords(self, response: Response) -> list:\n return response.xpath(\"//ul[@class='term']/li/a/text()\").getall()", "def test_headlines_successors(self):\n headline_str = \"* First level\\n** Second level\\n*** Third level\"\n doc = parser.parse(headline_str)\n self.assertEqual(len(doc.children()), 1)\n\n h1 = doc.children()[0]\n self.assertEqual(len(h1.children), 1)\n\n h2 = h1.children[0]\n self.assertEqual(len(h2.children), 1)\n\n h3 = h2.children[0]\n self.assertEqual(len(h3.children), 0)", "def get_all_headlines_from_firefox(site,URL_exclusions):\r\n headlines = []\r\n #Initial URL to pass to return search:\r\n URL = f'https://www.google.co.uk/search?hl=en&as_q=&as_epq=&as_oq=travellers&as_eq=quarantine+travel+train+flight+tourist+archive+airport+covid+coronavirus+hotel+holiday+honeymoon&as_nlo=&as_nhi=&lr=&cr=&as_qdr=all&as_sitesearch={site}&as_occt=title&safe=active&as_filetype=&tbs='\r\n n = 0\r\n while n < 10:\r\n n += 1\r\n driver = launch_firefox()\r\n try:\r\n return_search(URL,driver)\r\n except:\r\n continue\r\n sleep_time = np.random.random() * np.random.randint(1,6) \r\n time.sleep(sleep_time) #Slow down to avoid bot detection\r\n timeout = 0\r\n start = time.time()\r\n while timeout < 120:\r\n try:\r\n page_headlines = get_headlines_from_one_page(driver,site,URL_exclusions)\r\n break\r\n except:\r\n end = time.time()\r\n timeout = end - start\r\n for headline in page_headlines:\r\n headlines.append(headline)\r\n try:\r\n next_button = driver.find_element_by_id('pnnext')\r\n URL = next_button.get_attribute('href') #Pass new URL to return_search()\r\n except NoSuchElementException:\r\n driver.quit()\r\n break\r\n driver.quit() #Quit driver each iteration to avoid triggering recaptcha.\r\n return headlines", "def BuildHeadList(all_file_contents):\n head_list = []\n list_all_file_contents = (all_file_contents)\n for line in list_all_file_contents: \n if line[0:4] != 'ATOM':\n head_list.append(line)\n else:\n break\n\n return head_list", "def findLegHeaders(words, header, how='match'):\n locs = []\n for i, line in enumerate(words):\n match = header.match(line)\n if match is not None:\n locs.append(i)\n\n return locs", "def entry_has_keyword(keyword):\n\tdef filter_function(entry):\n\t\tif entry is None:\n\t\t\treturn False\n\t\ttitle = entry.get('title')\n\t\tif title is None:\n\t\t\treturn False\n\t\tif entry.get('issued') is None:\n\t\t\treturn False\n\t\treturn title.find(keyword) > -1\n\treturn filter_function", "def topheadlines():\n newsSource = click.prompt(\"Please enter your choice from listsources\")\n \n main_url = \"https://newsapi.org/v2/top-headlines?apiKey=f45fa2c71932483f832f0cc745af0325&sources=\"+newsSource\n\n\t# fetching data in json format \n open_headline = requests.get(main_url).json() \n\n\t# getting all headlines in a string articles \n headline = open_headline[\"articles\"] \n\n\t# empty list which will \n\t# contain all trending newssources \n output = [] \n\t\n for h in headline: \n click.echo('\\n')\n click.secho(click.style('TITLE: ' + h['title'], fg='red'))\n click.secho(click.wrap_text(h['description']))\n click.secho(click.style('DOMAIN: ' + h['url'], fg='blue'))\n \n \t\n for i in output[:11]:\n print(i)", "def first_heading(source_text):\n lines = source_text.split('\\n')\n for l in lines:\n if l.startswith(u'#'):\n return l.strip(u'# ')\n return None", "def get_headlines_from_one_page(driver,site,URL_exclusions):\r\n headlines = []\r\n links = get_links_from_one_page(driver,site,URL_exclusions)\r\n for i in range(len(links)):\r\n start = time.time()\r\n timeout = 0\r\n while timeout < 120: #Someimtes the page doesn't load. Quit the page after two minutes.\r\n try:\r\n results = driver.find_elements_by_class_name(\"g\") #Pages contained in class=\"g\" elements\r\n button = results[i].find_element_by_tag_name(\"a\") #Links under <a> tag\r\n link = button.get_attribute('href') #URL contained under 'href' \r\n if link.find(site) != -1: #Some \"g\" elements are not search results\r\n find = np.zeros(len(URL_exclusions))\r\n for j in range(len(URL_exclusions)):\r\n find[j] = bool(link.find(URL_exclusions[j]) == -1)\r\n if all(find) == True: #If no exclusion words found in UR\r\n button.click()\r\n sleep_time = np.random.random() * np.random.randint(1,6) #Sleep for random time between 1 and 5s to reduce chance of bot detection.\r\n time.sleep(sleep_time)\r\n headline = get_headline(driver)\r\n if headline != '': #Only interested if we succesfully find headline\r\n headlines.append(headline)\r\n driver.back()\r\n sleep_time = np.random.random() * np.random.randint(1,6)\r\n time.sleep(sleep_time) #Slow down to avoid bot detection\r\n break\r\n except:\r\n end = time.time()\r\n timeout = end - start\r\n if timeout >= 120:\r\n break #If results hasn't loaded after 120 seconds, we need to break the for loop\r\n return headlines", "def get_annotations_containing_keyword(graph, keyword):\n return [\n {\n 'annotation': annotation,\n 'value': value\n }\n for annotation, value in iter_annotation_value_pairs(graph)\n if keyword.lower() in value.lower()\n ]", "def org_headlines(self, org):\n\n if org in self.organisations:\n return HeadlineData([headline for headline in self.headlines if\n headline.organisation == org])\n\n raise ValueError(\"Organisation '{}' not found.\".format(org))", "def get_metadata_header_lines(input_file):\n # type: (str) -> List[str]\n if not FileSystems.exists(input_file):\n raise ValueError('{} does not exist'.format(input_file))\n return[line for line in _header_line_generator(input_file) if\n line.startswith('##')]", "def get_headings(self):\n return self.headings", "def headline(self):\n \n return self._headline", "def get_tagged_titles(ttls_lnks):\n\ttagged_titles = []\n\tfor title, link in ttls_lnks:\n\t\t# get the html tree for the paper's page\n\t\tpaper_tree = get_tree(link)\n\t\tpath = '//table/tr/th[text() = \"Subjects:\"]'\n\t\t# Check if html contains the table header \"Subjects:\"\n\t\tsubject_th = paper_tree.xpath(path)\n\t\t# If it does, this means paper is tagged so add to the list to be returned\n\t\tif subject_th:\n\t\t\ttagged_titles.append(title)\n\n\treturn tagged_titles", "def extract_keywords(self):\n keywords = [] \n for keyword in self.watsonLanguageModel['keywords'][:self.entitySizeLimit]: \n keywords.append(keyword['text'])\n return keywords", "def getheadlines(self,\n category=None,\n language=None,\n country=None,\n sources=None,\n keywords=None,\n apiKey=None,\n version=None):\n # set sources to first in index by default\n if not sources:\n sources = 'abc-news'\n\n # get version and raise error if not 2\n version=self.version\n if self.version != 2:\n raise ValueError('You must use Version 2 to retrieve headlines from'\n ' News API service.')\n\n # retrive the api key if set; otherwise, error\n if not self._api_key:\n raise ValueError(\n 'You must use use an API key; to get a key visit https://news'\n 'api.org/. If you have an API key, set it using the '\n 'Api.SetCredentials method.')\n\n # if api key is there, set the params\n else:\n request_params = {\n \"category\": category,\n 'language': language,\n \"country\": country,\n \"sources\":sources,\n \"apiKey\": self._api_key,\n \"q\":keywords\n }\n\n # build the url\n url = self.base_url + self.__endpoints['heads']\n\n # make the request\n r = requests.get(url,params=request_params,timeout=self._timeout)\n\n # return the json\n return r.json()", "def _get_keywords(self, title: str):\n # Prepare data\n keywords = set()\n stops = set(nltk.corpus.stopwords.words(\"english\"))\n stemmer = nltk.stem.SnowballStemmer(\"english\")\n ent_types = [\n \"PERSON\", \"ORGANIZATION\", \"FACILITY\", \"LOCATION\", \"DATE\",\n \"TIME\", \"GPE\", \"MONEY\",\n ]\n excluded_word_types = [\"RB\", \"IN\", \"PRP\"]\n\n # Tokenize and chunk words using NLTK\n tokens = nltk.tokenize.word_tokenize(title)\n positions = nltk.pos_tag(tokens)\n chunk = nltk.ne_chunk(positions)\n\n # Make a word list of keywords we want to add, that\n # are not part of our excluded word types.\n words = set()\n for pos in positions:\n word, word_type = pos\n if word.isalnum() and word_type not in excluded_word_types:\n words.add(word)\n\n # Add all entities to keyword list and remove them from\n # our remaining word set so they don't get added again\n # and stemmed later.\n for subtree in chunk.subtrees(filter=lambda t: t.label() in ent_types):\n for leaf in subtree.leaves():\n keywords.add(leaf[0])\n if leaf[0] in words:\n words.remove(leaf[0])\n\n # Add remaining words in list and stem them to base form,\n # stemming means we change words from e.g. \"eating\" to \"eat\".\n for word in words:\n if word not in stops:\n keywords.add(stemmer.stem(word))\n\n return sorted([keyword.lower() for keyword in keywords])", "def words(self, min_word_length=0):\n\n word_tokenizer = nltk.RegexpTokenizer(r'\\b[^\\s]+\\b')\n headline_string = self.headline_string.lower().replace(\"’\", \"'\")\n return [word for word in word_tokenizer.tokenize(headline_string) if len(word) >= min_word_length]", "def get_main_headline(self, default=''):\n for segment in self.segments:\n if segment.headlines:\n return segment.headlines[0]\n return default", "def test_single_headline(self):\n\n text = \"Example headline\"\n\n for i in range(1,6):\n headline_str = '*' * i + ' ' + text\n doc = parser.parse(headline_str)\n\n headline_node = doc.children()[0]\n\n self.assertTrue(isinstance(headline_node, parser.HeadlineNode))\n self.assertEqual(headline_node.level, i)\n self.assertEqual(headline_node.text, text)", "def headwords_id_context (_id):\n\n limit = clip (arg ('limit', str (MAX_RESULTS), re_integer_arg), 1, MAX_RESULTS)\n\n with current_app.config.dba.engine.begin () as conn:\n res = execute (conn, \"\"\"\n SELECT keyword, sortkeyword\n FROM keyword\n WHERE id = :id\n \"\"\", { 'id' : _id })\n keyword, sortkeyword = res.fetchone ()\n\n res1 = execute (conn, \"\"\"\n SELECT id, webkeyword, no\n FROM keyword\n WHERE sortkeyword < :sortkeyword\n ORDER BY sortkeyword DESC, n DESC, no DESC\n LIMIT :limit\n \"\"\", { 'sortkeyword' : sortkeyword, 'limit' : limit })\n\n res2 = execute (conn, \"\"\"\n SELECT id, webkeyword, no\n FROM keyword\n WHERE sortkeyword >= :sortkeyword\n ORDER BY sortkeyword, n, no\n LIMIT :limit\n \"\"\", { 'sortkeyword' : sortkeyword, 'limit' : limit + 1 })\n\n res = []\n\n for row in reversed (res1.fetchall ()):\n res.append (row[:3])\n for row in res2:\n res.append (row[:3])\n\n return make_headwords_response (res, limit)", "def readTotitle(fh, titleChar):\n preLines = []\n while True:\n l = fh.readline().strip()\n if l.startswith(titleChar):\n return (preLines,l)\n elif l == '':\n return preLines,None\n else:\n preLines.append(l)", "def first_words_func():\n return_list = []\n for lyric in lyrics:\n for line in lyric.split(\"\\n\"):\n return_list.append(line.split(\" \")[0])\n return (return_list)", "def hypernym(self, sense=None):\n s = self._synset(self.text)\n\n if not s:\n return []\n\n hyper = s.hypernyms()\n\n results = list()\n for h in hyper:\n results.append(h.lemma_names())\n\n if not sense:\n return results\n\n return results[:sense]", "def extract_phrases_with_keywords(text, keyword):\n sentences = split_text(text)\n phrases = []\n keyword = word_process(keyword)\n for sentence in sentences:\n words = re.findall(r'\\w+', sentence)\n for i, word in enumerate(words):\n if word_process(word) == word_process(keyword): # Both word and keyword have been processed, so we can compare them directly\n start = sentence.index(words[max(0,i-2)])\n end = sentence.index(word) + len(word)\n phrases.append(sentence[start:end])\n return phrases", "def get_headline(driver):\r\n headline = ''\r\n privacy_statement = 'We value your privacy'\r\n try:\r\n try:\r\n h1 = driver.find_element_by_tag_name('h1') #Some headlines written under <h1> tag\r\n if h1.text.lower().find(privacy_statement.lower()) == -1:\r\n headline += h1.text #Only want to return one variable\r\n except NoSuchElementException:\r\n None\r\n try:\r\n h2 = driver.find_element_by_tag_name('h2') #Some headlines written under <h2? tag\r\n if h2.text.lower().find(privacy_statement.lower()) == -1:\r\n headline += h2.text #Only want to return one variable\r\n except NoSuchElementException:\r\n None\r\n try:\r\n video_headline = driver.find_element_by_class_name('video-headline')\r\n if video_headline.text.lower().find(privacy_statement.lower()) == -1:\r\n headline += video_headline.text\r\n except NoSuchElementException:\r\n None\r\n except:\r\n None\r\n return headline", "def get_all_headlines_from_chrome_2(site,URL_exclusions):\r\n headlines = []\r\n #Initial URL to pass to return search:\r\n URL = f'https://www.google.co.uk/search?as_q=&as_epq=irish+travellers&as_oq=&as_eq=&as_nlo=&as_nhi=&lr=&cr=&as_qdr=all&as_sitesearch={site}&as_occt=any&safe=active&as_filetype=&tbs='\r\n n = 0\r\n while n < 10:\r\n n += 1\r\n driver = launch_chrome()\r\n try:\r\n return_search(URL,driver)\r\n except:\r\n continue\r\n sleep_time = np.random.random() * np.random.randint(1,6) \r\n time.sleep(sleep_time) #Slow down to avoid bot detection\r\n timeout = 0\r\n start = time.time()\r\n while timeout < 120:\r\n try:\r\n page_headlines = get_headlines_from_one_page(driver,site,URL_exclusions)\r\n break\r\n except:\r\n end = time.time()\r\n timeout = end - start\r\n for headline in page_headlines:\r\n headlines.append(headline)\r\n try:\r\n next_button = driver.find_element_by_id('pnnext')\r\n URL = next_button.get_attribute('href') #Pass new URL to return_search()\r\n except NoSuchElementException:\r\n driver.quit() #Quit driver if can't find next button \r\n break\r\n driver.quit() #Quit driver each iteration to avoid triggering recaptcha.\r\n return headlines", "def getatitle(allcontent, corpus):\n for i in range(0, len(allcontent)):\n words = re.split(r'\\s+', allcontent[i])\n if words[0] == \"Title\":\n for j in range(2, len(words)):\n if len(processword(words[j])) > 0:\n corpus.append(processword(words[j]))", "def parse_title(self) -> list:\n scanning = False # start of a title is found, this may be the second of later part of that.\n ret = [] # to return\n temp = [] # deal with mutiple line titles.\n for page in self.pdf.pages:\n text = page.extract_text()\n # it's possible that a blank page exists which will let text be None.\n if text is None:\n continue\n lines = text.split('\\n')\n\n for line in lines:\n if self.__is_part_of_title(line):\n # middle part of a title\n if scanning:\n temp.append(line)\n # find a new title\n else:\n scanning = True\n temp = [line]\n else:\n # just find an entire title\n if scanning:\n scanning = False\n ret.append(\"\".join(temp))\n # remove wrong titles ( maybe trigger words occur at other part of the document )\n for title in ret:\n if self.title_keyword not in title:\n ret.remove(title)\n return ret", "def get_tokens_with_heads(self, snlp_doc):\n tokens = []\n heads = []\n offset = 0\n for sentence in snlp_doc.sentences:\n for token in sentence.tokens:\n for word in token.words:\n # Here, we're calculating the absolute token index in the doc,\n # then the *relative* index of the head, -1 for zero-indexed\n # and if the governor is 0 (root), we leave it at 0\n if word.head:\n head = word.head + offset - len(tokens) - 1\n else:\n head = 0\n heads.append(head)\n tokens.append(word)\n offset += sum(len(token.words) for token in sentence.tokens)\n return tokens, heads", "def remove_false_positives(headlines,exclusions):\r\n for headline in headlines:\r\n for word in exclusions:\r\n if headline.lower().find(word) != -1: #If headline contains exclusionary word.\r\n headlines.remove(headline)\r\n break\r\n return headlines", "def top_headlines():\n source = \"google-news\" # TODO: Add option to choose source\n try:\n r = requests.get(\"https://newsapi.org/v2/top-headlines?sources=\" + source + \"&apiKey=\" + NEWS_API_TOKEN)\n data = r.json()\n # TODO: Find a way to include multiple articles instead of a random one\n article = data['articles'][randint(0, len(data['articles']) - 1)]\n imageurl = article['urlToImage'].replace('\\\\', '')\n embed = discord.Embed(\n title=article['title'],\n description=article['description'],\n url=article['url'],\n image_url=imageurl\n )\n embed.set_image(url=imageurl)\n embed.set_footer(text=\"Powered by NewsAPI! (newsapi.org)\")\n return embed\n except Exception as e:\n print(e)\n return discord.Embed(title=\"Something went wrong\")", "def lookup_keywords(filename):\n keywords = []\n start_of_table = r'\\*+\\s+'\n start_of_kw_table = r'\\*+\\s+Keyword'\n in_kw_table = False\n f = open(filename, \"r\")\n for line in f.readlines():\n line = line.rstrip()\n if len(line) == 0 or line.startswith(\"#\"):\n continue # skip comments and blanks\n if re.match(start_of_kw_table, line):\n in_kw_table = True # table started\n continue\n if re.match(start_of_table, line) and not re.match(start_of_kw_table, line):\n in_kw_table = False # table ended\n continue\n if line.startswith(' '):\n continue # skip content rows\n if in_kw_table:\n keywords.append(line)\n f.close()\n return keywords", "def searchGlossary(self,keyword):\n\t\twords = []\n\n\t\tfor letter in glossary:\n\t\t\tfor word in glossary[letter]:\n\t\t\t\tprint word.keys()[0]\n\t\t\t\tif keyword.lower() in word.keys()[0].lower():\n\t\t\t\t\twords.append(word)\n\n\t\treturn words", "def skip_lines(input_file, keyword):\n dummy = ''\n while True:\n dummy = input_file.readline().strip()\n if dummy == keyword:\n dummy = input_file.readline()\n break\n return input_file", "def match(line,keyword):\n line=line.lstrip()\n length=len(keyword)\n if line[:length] == keyword:\n return line[length:]\n else:\n return None", "def readTotitle(fh):\n preLines = []\n while True:\n l = fh.readline().strip()\n if l.startswith('>'):\n return (preLines,l)\n elif l == '':\n return preLines,None\n else:\n preLines.append(l)", "def test_headlines_dependencies(self) -> None:\n\n def has_dep(dep: str) -> bool:\n for h in self.report.headlines:\n if self.rules.get_headline_rules(\n h.name\n ) == self.rules.get_headline_rules(dep):\n return True\n return False\n\n for headline in self.report.headlines:\n rule: Optional[HeadlineRules] = self.rules.get_headline_rules(headline.name)\n if not rule:\n continue\n\n for dependency_group in rule.dependencies:\n is_match: bool = False\n for dependency in dependency_group:\n if has_dep(dependency):\n is_match = True\n break\n if not is_match:\n dependencies_list: str = \", \".join(dependency_group)\n self.add_error(\n f\"Rubriken {headline.name} kräver att en av följande \"\n f\"rubriker finns med i dokumentet: {dependencies_list}.\",\n headline=headline,\n )", "def findOwnLines(self, term, scope=75):\n\t\treturn self.findLines(text=' '.join(self.textFile), term=term, scope=scope)", "def get_paper_keywords(tree):\n\tpath = '//table/tr/th[text() = \"Keywords:\"]/following-sibling::td/text()'\n\tkeywords = tree.xpath(path)\n\t# xpath returns a list with the keywords as a single string element separated by new lines, commas or semi-colons\n\t# Make this into a list of keywords\n\tif keywords:\n\t\t# Split on new lines, commas and semi-colons\n\t\tkeywords = re.split('[\\\\n,;]', keywords[0])\n\t\t# Remove trailing white space and empty strings\n\t\tkeywords = [kw.strip() for kw in keywords if kw]\n\n\treturn keywords", "def has_header():\n header_content = (\"\\n\".join(CURRENT_BUFFER[:7])).lower()\n return sum(1 for keyword in KEYWORDS if header_content.find(keyword.lower()) != -1) >= 2", "def complete_setup_pocs(self, text, line, begidx, endidx):\n names = ['all'] + hardware.get_all_names()\n return [name for name in names if name.startswith(text)]", "def search_keyword(self,keyword):\n for entry in self.available_fields_list:\n for x in entry:\n if keyword in x:\n print(entry)\n break\n return", "def getKeywords(self):\n return", "def findLines(self, text, term, scope=75):\n\t\tlistOfResults = list()\n\n\t\tcurrentIndex = 0\n\t\ttermLength\t = len(term)\n\t\tappend\t\t = listOfResults.append\n\t\treplace\t\t = str.replace\n\n\t\ttext = text.lower()\n\t\tterm = term.lower()\n\n\t\twhile currentIndex >= 0:\n\t\t\tcurrentIndex = text.find(term, currentIndex+1)\n\n\t\t\tindexA = currentIndex - scope\n\t\t\tindexB = currentIndex + termLength + scope\n\n\t\t\tfindings1 = replace(text[indexA:indexB], '\\n', '_')\n\t\t\tfindings2 = replace(findings1, '\\t', ' ')\n\t\t\tappend(findings2)\n\n\t\treturn listOfResults[:-1]", "def chunk(keywords, lines):\n chunks = dict()\n chunk = []\n \n # Create an empty dictionary using all the keywords\n for keyword in keywords:\n chunks[keyword] = []\n \n # Populate dictionary with lists of chunks associated\n # with the keywords in the list \n for line in lines:\n if line.strip():\n token = line.split()[0]\n if token in keywords:\n chunk = [line] \n chunks[token].append(chunk) \n else:\n chunk.append(line)\n\n return chunks", "def filter_jobs(jobs, keyword):\n for job in jobs:\n if keyword == \"all\":\n yield job\n elif job[\"name\"].find(keyword) != -1:\n yield job", "def _find(self, keyword):\n for tag in self.meta.findall(CN('meta:keyword')):\n if keyword == tag.text:\n return tag\n return None", "def keywords(text:str) -> list:\n return sorted(set(text.split(' ')), key=frequency, reverse=True)[0:5]", "def has_keys(self, key_in_pointer):\n start = self.head\n rList = []\n while start:\n if key_in_pointer in start.getMember().keys():\n rList.append(start)\n start = start.getLink()\n return rList", "def __get_keywords(self, text_list):\r\n specialKW = [\r\n 'run keyword',\r\n 'run keyword and continue on failure',\r\n 'run keyword and expect error',\r\n 'run keyword and ignore error',\r\n 'run keyword and return'\r\n 'run keyword and return if',\r\n 'run keyword and return status',\r\n 'run keyword if',\r\n 'run keyword if all critical tests passed',\r\n 'run keyword if all tests passed',\r\n 'run keyword if any critical tests failed',\r\n 'run keyword if any tests failed',\r\n 'run keyword if test failed',\r\n 'run keyword if test passed',\r\n 'run keyword if timeout occurred',\r\n 'run keyword unless',\r\n 'run keywords',\r\n 'wait until keyword succeeds',\r\n 'repeat keyword',\r\n 'else'\r\n ]\r\n specialSettings = [\r\n '[Arguments]',\r\n '[Documentation]'\r\n ]\r\n L = []\r\n if text_list[0] in specialSettings:\r\n return L\r\n for item in text_list:\r\n if self.__is_keyword(item):\r\n L.append(item)\r\n if not item.replace('_', ' ').replace('-', ' ').lower() in specialKW:\r\n break\r\n return L", "def Keywords(self, default=[{}]):\n tmp = self.data.get('metadata', {}).get('keywords', default)\n return [HEP.KeywordObject(i) for i in tmp]", "def keywords_of_section(self, section, kwfilter):\n pcat = getToolByName(section, 'portal_catalog')\n cat = pcat._catalog\n path_idx = cat.indexes[self.path_index]\n tags_idx = cat.indexes[self.keyword_index]\n result = []\n # query all oids of path - low level\n pquery = {\n self.path_index: {\n 'query': '/'.join(section.getPhysicalPath()),\n 'depth': -1,\n }\n }\n kwfilter = safe_encode(kwfilter)\n # uses internal zcatalog specific details to quickly get the values.\n path_result, info = path_idx._apply_index(pquery)\n for tag in tags_idx.uniqueValues():\n if kwfilter and kwfilter not in safe_encode(tag):\n continue\n tquery = {self.keyword_index: tag}\n tags_result, info = tags_idx._apply_index(tquery)\n if intersection(path_result, tags_result):\n result.append(tag)\n # result should be sorted, because uniqueValues are.\n return safe_simplevocabulary_from_values(result)", "def process_article(sentences: List[Dict[str, str]],\n article: str,\n keyword: str,\n collect_all: bool\n ) -> List[Dict[str, str]]:\n with open(article, 'r') as txt:\n for line in txt.read().split('\\n'):\n if collect_all or keyword.lower() in line.lower():\n sentences.append({\n \"sentence\": line,\n \"keyword\": keyword\n })\n \n return sentences" ]
[ "0.8183849", "0.7191412", "0.7000567", "0.66961074", "0.66491723", "0.66434336", "0.6356157", "0.6131674", "0.61187565", "0.59622735", "0.59542644", "0.5890983", "0.5875384", "0.57926047", "0.57580566", "0.5697247", "0.56422436", "0.5626344", "0.55865705", "0.5584343", "0.5565354", "0.5558015", "0.5479626", "0.5474127", "0.54610187", "0.5453939", "0.5449592", "0.5445621", "0.5369793", "0.53384167", "0.5337765", "0.533775", "0.5335115", "0.5330543", "0.53264487", "0.53228766", "0.5318326", "0.5287171", "0.5287048", "0.5278734", "0.5266336", "0.52641827", "0.5242852", "0.5231549", "0.52243125", "0.520201", "0.51969004", "0.51512474", "0.51457196", "0.5143225", "0.5139778", "0.51389164", "0.5117277", "0.51044655", "0.50992334", "0.50983626", "0.5095768", "0.5092399", "0.50882804", "0.5088028", "0.5076622", "0.50764805", "0.5060789", "0.50536066", "0.505197", "0.50497735", "0.5023786", "0.50183916", "0.49936917", "0.499293", "0.4991656", "0.4985837", "0.49829182", "0.49688753", "0.496828", "0.49631104", "0.49540606", "0.4952517", "0.495122", "0.49483645", "0.49441501", "0.49297783", "0.4925034", "0.48942602", "0.4890283", "0.48875245", "0.48862174", "0.48812044", "0.48773068", "0.4856701", "0.48550868", "0.48491636", "0.48489803", "0.48411733", "0.48353153", "0.48240656", "0.48126435", "0.48033994", "0.4801665", "0.4799365" ]
0.8182654
1
Returns a list of all headlines
def get_all_headlines(self): list_vals = list(self.keyword_headlines().values()) uniq_headlines = set() for list_val in list_vals: for headlineobj in list_val: uniq_headlines.add(headlineobj.headlineid.content) return list(uniq_headlines)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_headlines(self, kw = None):\r\n\t\tif kw:\r\n\t\t\treturn self.get_headlines_with_keyword(kw)\r\n\t\telse:\r\n\t\t\treturn self.get_all_headlines()", "def all_headlines(html_root_node):\n pass", "def all_headlines_from(url):\n pass", "def parse_headlines(self):\n headlines = re.findall(r\"^\\.\\.\\.(.*?)\\.\\.\\.[ ]?\\n\\n\", self.unixtext,\n re.M | re.S)\n headlines = [\" \".join(h.replace(\"...\",\n \", \").replace(\"\\n\", \" \").split())\n for h in headlines]\n return headlines", "def get_all_headline_data():\n\twebsites = database.get_website_URLs()\n\tall_headlines_arr = []\n\tfor curr_elt in websites:\n\t\tcurr_website = curr_elt[0]\n\t\tsource = curr_elt[1]\n\t\tcurr_headline_arr = get_headline_data(curr_website, source)\n\t\tall_headlines_arr.append(curr_headline_arr)\n\treturn all_headlines_arr", "def get_headlines(driver,site,URL_exclusions):\r\n links = get_all_links(driver,site,URL_exclusions)\r\n headlines = []\r\n n=0\r\n for link in links:\r\n driver = make_driver_obj() #get_all_links quits driver when finished.\r\n try:\r\n while True:\r\n try:\r\n driver.get(link) #No need to accept cookies to don't need return_search\r\n break\r\n except:\r\n continue\r\n except: #If we can't open the URL for any reason.\r\n driver.quit()\r\n continue\r\n n += 1\r\n headline = get_headline(driver)\r\n if headline != '':\r\n headlines.append(headline) #Only append if able to identify headline text\r\n #print(n)\r\n #print(headline)\r\n #print()\r\n driver.quit()\r\n return headlines", "def getHeadParts(self):\n return self.headParts", "def gather_headlines(urls):\n pass", "def heads(self) -> \"IterableList[Head]\":\n return Head.list_items(self)", "def column_headlines(self):\n elements = self._selenium.find_elements_by_xpath(\n '//div[@id=\"content\"]/table/thead/tr/th/a')\n return [x.text for x in elements]", "def reddit_headlines(reddit):\n\n # Set metadata to make request:\n url = \"https://www.reddit.com/r/{}/.json?limit=10\".format(reddit)\n headers = {'User-Agent': '{} Reddit headlines'.format(reddit)}\n\n # Consume Reddit's API to gather info:\n html = requests.get(url, headers=headers)\n\n # If status code is OK:\n if html.status_code == requests.codes.ok:\n # Parse resonse:\n info = json.loads(html.content.decode('utf-8'))\n # pprint(info)\n\n # Get relevant info:\n child = info['data']['children']\n titles = [unidecode(elem['data']['title']) for elem in child]\n titles = \"... \".join([title for title in titles])\n else:\n titles = None\n\n return titles", "def get_headlines_with_keyword(self, kw):\r\n\t\tkey_head = self.keyword_headlines()\r\n\r\n\t\theadlines = set()\r\n\r\n\t\tfor headlinekw in key_head[kw]:\r\n\t\t\tcontent = headlinekw.headlineid.content\r\n\t\t\theadlines.add(content)\r\n\r\n\t\treturn list(headlines)", "def HeadList(self):\n return [(rname, repo.currenthead) for rname, repo in self.repos.items()\n ]", "def get_headlines():\n country = request.args.get('country', type=str)\n if country is not None:\n data = te.getNews(country=country).dropna()\n return jsonify(data.to_dict(orient='records'))\n data = te.getNews()\n return jsonify(te.getNews().dropna().to_dict(orient='records'))", "def colorize_headlines_visitor(c, p, item):\n if p.h.startswith(\"!= \"):\n f = item.font(0)\n f.setBold(True)\n item.setFont(0, f)\n raise leoPlugins.TryNext", "def get_headings(self):\n return self.headings", "def fetch_headlines(self, retry=False):\n top_headlines_res = None\n try:\n top_headlines_res = self.news_api.get_top_headlines(\n country='in', page_size=100)\n except newsapi.newsapi_exception.NewsAPIException as err:\n print('NewsAPI Exception==', err)\n if not retry:\n print('Retrying with another key...')\n self.api_key = os.getenv('NEWS_API_KEY_BACKUP')\n self.configure_news_api()\n top_headlines_res = self.fetch_headlines(retry=True)\n else:\n return None\n except Exception as err:\n print('Exception occurred==', err)\n return None\n headlines = {}\n if top_headlines_res and top_headlines_res['status'] == 'ok':\n headlines = top_headlines_res\n else:\n headlines = None\n return headlines", "def get_headlines(id):\n get_headlines_url = secondary_url.format(id,api_key)\n\n with urllib.request.urlopen(get_headlines_url) as url:\n get_headlines_data = url.read()\n get_headlines_response= json.loads(get_headlines_data)\n\n headlines_results = None\n\n if get_headlines_response['articles']:\n headlines_results_list = get_headlines_response['articles']\n headlines_results = process_headlines(headlines_results_list)\n\n return headlines_results", "def keyword_headlines(self):\r\n\t\td = {}\r\n\r\n\t\tfor q in self.keyword_queryset:\r\n\t\t\td[q.content] = self.headlinekeyword_queryset.filter(keywordid = q.id)\r\n\r\n\t\treturn d", "def topheadlines():\n newsSource = click.prompt(\"Please enter your choice from listsources\")\n \n main_url = \"https://newsapi.org/v2/top-headlines?apiKey=f45fa2c71932483f832f0cc745af0325&sources=\"+newsSource\n\n\t# fetching data in json format \n open_headline = requests.get(main_url).json() \n\n\t# getting all headlines in a string articles \n headline = open_headline[\"articles\"] \n\n\t# empty list which will \n\t# contain all trending newssources \n output = [] \n\t\n for h in headline: \n click.echo('\\n')\n click.secho(click.style('TITLE: ' + h['title'], fg='red'))\n click.secho(click.wrap_text(h['description']))\n click.secho(click.style('DOMAIN: ' + h['url'], fg='blue'))\n \n \t\n for i in output[:11]:\n print(i)", "def test_headlines_predecessors(self):\n headline_str = \"* One\\n** Two\\n*** Three\\n** Two\\n*** Three\\n* One\"\n\n doc = parser.parse(headline_str)\n self.assertEqual(len(doc.children()), 2)\n\n h1_1 = doc.children()[0]\n h1_2 = doc.children()[1]\n\n self.assertEqual(len(h1_1.children), 2)\n self.assertEqual(len(h1_2.children), 0)\n\n h2_1 = h1_1.children[0]\n h2_2 = h1_1.children[1]\n\n self.assertEqual(len(h2_1.children), 1)\n self.assertEqual(len(h2_2.children), 1)", "def top_headlines():\n source = \"google-news\" # TODO: Add option to choose source\n try:\n r = requests.get(\"https://newsapi.org/v2/top-headlines?sources=\" + source + \"&apiKey=\" + NEWS_API_TOKEN)\n data = r.json()\n # TODO: Find a way to include multiple articles instead of a random one\n article = data['articles'][randint(0, len(data['articles']) - 1)]\n imageurl = article['urlToImage'].replace('\\\\', '')\n embed = discord.Embed(\n title=article['title'],\n description=article['description'],\n url=article['url'],\n image_url=imageurl\n )\n embed.set_image(url=imageurl)\n embed.set_footer(text=\"Powered by NewsAPI! (newsapi.org)\")\n return embed\n except Exception as e:\n print(e)\n return discord.Embed(title=\"Something went wrong\")", "def GetListHead(self, *args, **kwargs):\n pass", "def unique_headlines(self):\n\n all_headlines = sorted(self.headlines, key=lambda x: x.datetime, reverse=True)\n unique_headlines = list(dict((headline.headline_string, headline)\n for headline in all_headlines).values())\n\n return HeadlineData(unique_headlines)", "def get_headline_data(website_url, source):\n\tpage = requests.get(website_url)\n\tpage.raise_for_status()\n\tall_headlines = []\n\tbs_obj = bs4.BeautifulSoup(page.text, 'html.parser')\n\titem_list = bs_obj.select('item')\n\tprintable = set(string.printable)\n\tfor curr_item in item_list:\n\t\titem_title = curr_item.title.string\n\t\tfollowup_link = curr_item.select('link')[0].string\n\t\tdatestamp = curr_item.select('pubdate')[0].string\n\t\titem_title = item_title.replace(\"&apos;\", \"'\")\n\t\tfollowup_link = followup_link.replace(u\"\\u2018\", \"'\").replace(u\"\\u2019\", \"'\")\n\t\titem_title = item_title.encode('utf-8', errors='ignore')\n\t\tnew_headline = data_structures.Headline(item_title, followup_link, source, datestamp)\n\t\tall_headlines.append(new_headline)\n\treturn all_headlines", "def BuildHeadList(all_file_contents):\n head_list = []\n list_all_file_contents = (all_file_contents)\n for line in list_all_file_contents: \n if line[0:4] != 'ATOM':\n head_list.append(line)\n else:\n break\n\n return head_list", "def get_headline_search(query):\n query = query.replace(' ',\"\")\n category=\"\"\n get_headlines_url = 'https://newsapi.org/v2/top-headlines?category={}&query={}&language=en&apiKey={}'.format(category,query,api_key)\n headlines_results = []\n with urllib.request.urlopen(get_headlines_url) as url:\n get_headlines_data = url.read()\n get_headlines_response = json.loads(get_headlines_data)\n if get_headlines_response['articles']:\n headlines_result_list=get_headlines_response['articles']\n for headline in headlines_result_list:\n headlines_results.append(headline)\n return headlines_results", "def test_headlines_successors(self):\n headline_str = \"* First level\\n** Second level\\n*** Third level\"\n doc = parser.parse(headline_str)\n self.assertEqual(len(doc.children()), 1)\n\n h1 = doc.children()[0]\n self.assertEqual(len(h1.children), 1)\n\n h2 = h1.children[0]\n self.assertEqual(len(h2.children), 1)\n\n h3 = h2.children[0]\n self.assertEqual(len(h3.children), 0)", "def test_headlines_predefined(self) -> None:\n for headline in self.report.headlines:\n if not self.rules.get_headline_rules(headline.name):\n headlines = [headline.name for headline in self.rules.headlines]\n suggestion, _ = process.extractOne(\n headline.name, headlines, scorer=fuzz.partial_ratio\n )\n self.add_error(\n f\"{headline.name} är inte en valid rubrik. \"\n f\"Rättningsförlsag: {suggestion}.\",\n headline=headline,\n )\n elif re.search(\"\\\\W{1,}\", headline.name, re.I):\n self.add_error(\n f\"Rubriken {headline.name} innehåller tecken som inte är \"\n \"alfanumeriska vilket inte är tillåtet för en rubrik.\",\n headline=headline,\n )", "def get_headlines(outlet):\n if outlet == \"BBC\":\n parser = news_parser.BBC(\"https://www.bbc.co.uk\")\n elif outlet == \"DailyMail\":\n parser = news_parser.DailyMail(\"https://www.dailymail.co.uk\")\n elif outlet == \"Guardian\":\n parser = news_parser.Guardian(\"https://www.theguardian.com\")\n elif outlet == \"Metro\":\n parser = news_parser.Metro(\"https://www.metro.co.uk\")\n elif outlet == \"Mirror\":\n parser = news_parser.Mirror(\"https://www.mirror.co.uk/news/\")\n elif outlet == \"Reuters\":\n parser = news_parser.Reuters(\"https://uk.reuters.com\")\n elif outlet == \"Sun\":\n parser = news_parser.Sun(\"https://www.thesun.co.uk\")\n elif outlet == \"Independent\":\n parser = news_parser.Independent(\"https://www.independent.co.uk\")\n else:\n parser = news_parser.BBC(\"https://www.bbc.co.uk/news\")\n \n index = outlets.index(outlet)\n url_list = []\n while len(url_list) < 50:\n opts = {\n 'language': ['en'],\n 'source_id': [ids[index]],\n 'published_at_start':'NOW-1DAY',\n 'published_at_end':'NOW',\n 'sort_by': 'hotness',\n 'sort_direction': 'desc',\n 'cursor': '*',\n 'per_page': 100\n }\n\n try:\n api_response = api_instance.list_stories(**opts)\n for story in api_response.stories:\n url = story.links.permalink\n if url:\n url_list.append(url)\n except ApiException as e:\n print(\"Exception when calling DefaultApi->list_stories: %s\\n\" %e)\n \n opts['cursor'] = api_response.next_page_cursor\n \n url_list = url_list[:50]\n \n articles_list = []\n for url in url_list:\n raw_article = parser.get_article(url)\n if raw_article is not None:\n articles_list.append(raw_article)\n\n articles = []\n for article in articles_list:\n parsed_article = parser.parse(article)\n if parsed_article is not None:\n articles.append(parsed_article)\n \n if len(articles) > 30:\n articles = articles[:30]\n\n return articles", "def headline(self):\n \n return self._headline", "def lemma_headwords(self):\n new_var = 'lemma_headword'\n lemma_heads = [clx._lemmas[i]['Head'] for i in xrange(len(clx._lemmas))]\n has_item = self.compare_items(lemma_heads)\n new_column = []\n if False in has_item:\n self._warning_msg('lemma_headword', lemma_heads)\n for record, exists in zip(self._dict, has_item):\n if exists:\n lemma_id = clx.wordform_lookup(record)[0].IdNumLemma\n lemma_head = clx.lemma_by_id(lemma_id).Head\n else:\n lemma_head = None\n new_column.append(lemma_head)\n self._append_column(new_column, new_var)", "def get_headers_for_print(self):\n lines_for_print = []\n for header in self.metadata:\n lines_for_print.append(self.metadata[header])\n lines_for_print.append('\\t'.join(self.header))\n lines_for_print[-1] = '#' + lines_for_print[-1]\n return lines_for_print", "def ActiveHlt2Lines(self) :\n return []", "def headloss_curve_names(self):\n return list(self._headloss_curves)", "def get_heads(self):\n return self.heads[1:]", "def headline(self):\r\n return '%s%s %s%s' % (BLUE, self.title,\r\n NORMAL, self.link)", "def all_words(self, min_word_length=0):\n return [word for headline in self.headlines for word in\n headline.words(min_word_length=min_word_length)]", "def ActiveHlt1Lines(self) :\n lines = [ 'Hlt1TrackAllL0', 'Hlt1TrackMuon', 'Hlt1TrackAllL0Tight', 'Hlt1TrackPhoton'\n , 'Hlt1VertexDisplVertex'\n , 'Hlt1SingleMuonNoIP', 'Hlt1SingleMuonHighPT'\n , 'Hlt1SingleElectronNoIP'\n , 'Hlt1DiMuonLowMass', 'Hlt1DiMuonHighMass'\n , 'Hlt1DiProtonLowMult', 'Hlt1DiProton'\n , 'Hlt1L0HighSumETJet','Hlt1HighPtJetsSinglePV']\n \n \n lines += ['Hlt1CharmCalibrationNoBias']\n lines += ['Hlt1CharmCalibrationNoBias']\n return lines", "def extract_head(data):\n tl = data['tls'][data['i']];\n br = data['brs'][data['i']];\n head = extract_area(data,(tl,br));\n return head;", "def get_all_headlines_from_firefox(site,URL_exclusions):\r\n headlines = []\r\n #Initial URL to pass to return search:\r\n URL = f'https://www.google.co.uk/search?hl=en&as_q=&as_epq=&as_oq=travellers&as_eq=quarantine+travel+train+flight+tourist+archive+airport+covid+coronavirus+hotel+holiday+honeymoon&as_nlo=&as_nhi=&lr=&cr=&as_qdr=all&as_sitesearch={site}&as_occt=title&safe=active&as_filetype=&tbs='\r\n n = 0\r\n while n < 10:\r\n n += 1\r\n driver = launch_firefox()\r\n try:\r\n return_search(URL,driver)\r\n except:\r\n continue\r\n sleep_time = np.random.random() * np.random.randint(1,6) \r\n time.sleep(sleep_time) #Slow down to avoid bot detection\r\n timeout = 0\r\n start = time.time()\r\n while timeout < 120:\r\n try:\r\n page_headlines = get_headlines_from_one_page(driver,site,URL_exclusions)\r\n break\r\n except:\r\n end = time.time()\r\n timeout = end - start\r\n for headline in page_headlines:\r\n headlines.append(headline)\r\n try:\r\n next_button = driver.find_element_by_id('pnnext')\r\n URL = next_button.get_attribute('href') #Pass new URL to return_search()\r\n except NoSuchElementException:\r\n driver.quit()\r\n break\r\n driver.quit() #Quit driver each iteration to avoid triggering recaptcha.\r\n return headlines", "def get_all_headlines_from_chrome(site,URL_exclusions):\r\n headlines = []\r\n #Initial URL to pass to return search:\r\n URL = f'https://www.google.co.uk/search?hl=en&as_q=&as_epq=&as_oq=travellers&as_eq=quarantine+travel+train+flight+tourist+archive+airport+covid+coronavirus+hotel+holiday+honeymoon&as_nlo=&as_nhi=&lr=&cr=&as_qdr=all&as_sitesearch={site}&as_occt=title&safe=active&as_filetype=&tbs='\r\n n = 0\r\n while n < 10:\r\n n += 1\r\n driver = launch_chrome()\r\n try:\r\n return_search(URL,driver)\r\n except:\r\n continue\r\n sleep_time = np.random.random() * np.random.randint(1,6) \r\n time.sleep(sleep_time) #Slow down to avoid bot detection\r\n timeout = 0\r\n start = time.time()\r\n while timeout < 120:\r\n try:\r\n page_headlines = get_headlines_from_one_page(driver,site,URL_exclusions)\r\n break\r\n except:\r\n end = time.time()\r\n timeout = end - start\r\n for headline in page_headlines:\r\n headlines.append(headline)\r\n try:\r\n next_button = driver.find_element_by_id('pnnext')\r\n URL = next_button.get_attribute('href') #Pass new URL to return_search()\r\n except NoSuchElementException:\r\n driver.quit() #Quit driver if can't find next button \r\n break\r\n driver.quit() #Quit driver each iteration to avoid triggering recaptcha.\r\n return headlines", "def org_headlines(self, org):\n\n if org in self.organisations:\n return HeadlineData([headline for headline in self.headlines if\n headline.organisation == org])\n\n raise ValueError(\"Organisation '{}' not found.\".format(org))", "def head_of_all(x, l):\n return [[x] + p for p in l]", "def store_headlines():\n for outlet in outlets:\n articles = get_headlines(outlet)\n connect_db.store_headlines(articles,outlet)", "def get_main_headline(self, default=''):\n for segment in self.segments:\n if segment.headlines:\n return segment.headlines[0]\n return default", "def test_single_headline(self):\n\n text = \"Example headline\"\n\n for i in range(1,6):\n headline_str = '*' * i + ' ' + text\n doc = parser.parse(headline_str)\n\n headline_node = doc.children()[0]\n\n self.assertTrue(isinstance(headline_node, parser.HeadlineNode))\n self.assertEqual(headline_node.level, i)\n self.assertEqual(headline_node.text, text)", "def headloss_curves(self):\n for key in self._headloss_curves:\n yield key, self._data[key]", "def test_headlines_order(self) -> None:\n last: Tuple[int, str] = (0, \"\")\n\n for headline in self.report.headlines:\n rule: Optional[HeadlineRules] = self.rules.get_headline_rules(headline.name)\n if (not rule) or (rule.order is None):\n continue\n\n last_order, last_headline = last # type: int, str\n if last_order > rule.order:\n self.add_error(\n (\n f\"Rubriken {headline.name} ska komma före \"\n f\"rubriken {last_headline}.\"\n ),\n headline=headline,\n )\n\n last = (rule.order, headline.name)", "def head_lines(k):\n first_lines = 'Radar:' + location + '\\n\\n'' ' + k + '-DATA DATE: ' + date + \\\n '\\n\\n Bin Height/km'\n h = h1\n m = m1\n while h * 60 + m <= h2 * 60 + m2:\n hour = str(h)\n if len(hour) == 1:\n hour = \"0\" + hour\n minute = str(m)\n if len(minute) == 1:\n minute = \"0\" + minute\n first_lines = first_lines + ' ' + hour + ':' + minute + ' '\n h, m = next_time(h, m)\n first_lines = first_lines + '\\n\\n'\n return first_lines", "def _mdHeadings(self):\n\t\treturn [\n\t\t\t\t\t'ProcessingStatus', \n\t\t\t\t\t'OpenChCurrent', \n\t\t\t\t\t'NStates',\n\t\t\t\t\t'CurrentStep',\n\t\t\t\t\t'BlockDepth',\n\t\t\t\t\t'BlockSTD',\n\t\t\t\t\t'EventStart', \n\t\t\t\t\t'EventEnd', \n\t\t\t\t\t'EventDelay', \n\t\t\t\t\t'StateResTime',\n\t\t\t\t\t'ResTime', \n\t\t\t\t\t'AbsEventStart',\n\t\t\t\t\t'Threshold'\n\t\t\t\t]", "def test_headlines_samelevel(self):\n headline_str = \"* One\\n** Two\\n** Two\\n**\\\n Another one\"\n doc = parser.parse(headline_str)\n self.assertEqual(len(doc.children()), 1)\n\n h1 = doc.children()[0]\n self.assertEqual(len(h1.children), 3)", "def orphan_headers(self):\n orphans = []\n for cell in self.markdown_cells():\n orphans.extend([line for line in cell.source.splitlines()[1:] if MARKDOWN_HEADER.match(line)])\n return orphans", "def get_headers():\n soup = get_html()\n titles = []\n for i in soup.find_all('i'):\n header = str(i.text)\n titles.append(header.strip())\n return titles", "def head_pump_name_list(self):\n return list(self._link_reg.head_pump_names)", "def test_headlines_dependencies(self) -> None:\n\n def has_dep(dep: str) -> bool:\n for h in self.report.headlines:\n if self.rules.get_headline_rules(\n h.name\n ) == self.rules.get_headline_rules(dep):\n return True\n return False\n\n for headline in self.report.headlines:\n rule: Optional[HeadlineRules] = self.rules.get_headline_rules(headline.name)\n if not rule:\n continue\n\n for dependency_group in rule.dependencies:\n is_match: bool = False\n for dependency in dependency_group:\n if has_dep(dependency):\n is_match = True\n break\n if not is_match:\n dependencies_list: str = \", \".join(dependency_group)\n self.add_error(\n f\"Rubriken {headline.name} kräver att en av följande \"\n f\"rubriker finns med i dokumentet: {dependencies_list}.\",\n headline=headline,\n )", "def read_header(datafile):\n\thead = []\n\tf = open(datafile,'r')\n\tfor i,line in enumerate(f):\n\t\tif i is 10: break\n\t\thead += [line]\n\tf.close()\n\treturn head", "def get_metadata_header_lines(input_file):\n # type: (str) -> List[str]\n if not FileSystems.exists(input_file):\n raise ValueError('{} does not exist'.format(input_file))\n return[line for line in _header_line_generator(input_file) if\n line.startswith('##')]", "def get_headlines_from_one_page(driver,site,URL_exclusions):\r\n headlines = []\r\n links = get_links_from_one_page(driver,site,URL_exclusions)\r\n for i in range(len(links)):\r\n start = time.time()\r\n timeout = 0\r\n while timeout < 120: #Someimtes the page doesn't load. Quit the page after two minutes.\r\n try:\r\n results = driver.find_elements_by_class_name(\"g\") #Pages contained in class=\"g\" elements\r\n button = results[i].find_element_by_tag_name(\"a\") #Links under <a> tag\r\n link = button.get_attribute('href') #URL contained under 'href' \r\n if link.find(site) != -1: #Some \"g\" elements are not search results\r\n find = np.zeros(len(URL_exclusions))\r\n for j in range(len(URL_exclusions)):\r\n find[j] = bool(link.find(URL_exclusions[j]) == -1)\r\n if all(find) == True: #If no exclusion words found in UR\r\n button.click()\r\n sleep_time = np.random.random() * np.random.randint(1,6) #Sleep for random time between 1 and 5s to reduce chance of bot detection.\r\n time.sleep(sleep_time)\r\n headline = get_headline(driver)\r\n if headline != '': #Only interested if we succesfully find headline\r\n headlines.append(headline)\r\n driver.back()\r\n sleep_time = np.random.random() * np.random.randint(1,6)\r\n time.sleep(sleep_time) #Slow down to avoid bot detection\r\n break\r\n except:\r\n end = time.time()\r\n timeout = end - start\r\n if timeout >= 120:\r\n break #If results hasn't loaded after 120 seconds, we need to break the for loop\r\n return headlines", "def test_headlines_named_entities(self) -> None:\n for headline in self.report.headlines:\n rule: Optional[HeadlineRules] = self.rules.get_headline_rules(headline.name)\n if not (rule and rule.named_entities):\n continue\n\n for ne_rule in rule.named_entities:\n if headline.has_named_entity(\n ne_rule.identity, ne_rule.type, ne_rule.subtype\n ):\n continue\n if ne_rule.cheat and re.search(ne_rule.cheat, headline.to_text()):\n continue\n self.add_error(ne_rule.message, headline=headline)", "def make_head_line():\n with open(args.out_folder.strip() + \"/files/head_line.txt\", \"a\") as headLine:\n headLine.write(\"#Query ID\\t#Subject\\t#Subject accession\\t#Subject Taxonomy ID\\t#Identity percentage\\t#Coverage\\t#evalue\\t#bitscore\\n\")", "def getLines(self):\n lines = []\n for item in self:\n lines.extend(item.textLines)\n return lines", "def ActiveHlt1Lines(self) :\n lines = ['Hlt1IncPhi','Hlt1CalibTracking']\n\n return lines", "def headline_text(node):\n pass", "def list_all_lines(self):\n lines = self.list_lines_gen(self.go_forward, start=self.get_oldest_index())\n self.read_pos = self.write_pos\n return lines", "def get_headers(self):\n return self.numHeadList", "def printHeadings(headings, format):\n print(\"\")\n print(format % headings)\n\n # how wide should a dash be\n dashes = 0\n for s in headings:\n if len(s) > dashes:\n dashes = len(s)\n\n # create a line with that many dashes\n s = \"\"\n while dashes > 0:\n s += '-'\n dashes -= 1\n\n # create a tupple with the right number of lines\n l = list()\n i = 0\n while i < len(headings):\n l.append(s)\n i += 1\n\n print(format % tuple(l))", "def is_headline(node):\n pass", "def Heads(start=None, end=None):\n if start is None: start = ida_ida.inf_get_min_ea()\n if end is None: end = ida_ida.inf_get_max_ea()\n\n ea = start\n if not idc.is_head(ida_bytes.get_flags(ea)):\n ea = ida_bytes.next_head(ea, end)\n while ea < end and ea != ida_idaapi.BADADDR:\n yield ea\n ea = ida_bytes.next_head(ea, end)", "def get_keywords(self):\r\n\t\treturn list(self.keyword_headlines().keys())", "def ActiveHlt2Lines(self) :\n\n lines = [\n 'Hlt2SingleMuon',\n 'Hlt2SingleMuonHighPT',\n 'Hlt2SingleMuonLowPT',\n ]\n \n return lines", "def head(file_name):\n #from itertools import islice\n with open('../test_files/' + file_name, 'r') as infile:\n list = infile.readlines()\n #printing the 1st 10 lines\n print('list of first 10 lines',list[:10])", "def get_all_headlines_from_chrome_2(site,URL_exclusions):\r\n headlines = []\r\n #Initial URL to pass to return search:\r\n URL = f'https://www.google.co.uk/search?as_q=&as_epq=irish+travellers&as_oq=&as_eq=&as_nlo=&as_nhi=&lr=&cr=&as_qdr=all&as_sitesearch={site}&as_occt=any&safe=active&as_filetype=&tbs='\r\n n = 0\r\n while n < 10:\r\n n += 1\r\n driver = launch_chrome()\r\n try:\r\n return_search(URL,driver)\r\n except:\r\n continue\r\n sleep_time = np.random.random() * np.random.randint(1,6) \r\n time.sleep(sleep_time) #Slow down to avoid bot detection\r\n timeout = 0\r\n start = time.time()\r\n while timeout < 120:\r\n try:\r\n page_headlines = get_headlines_from_one_page(driver,site,URL_exclusions)\r\n break\r\n except:\r\n end = time.time()\r\n timeout = end - start\r\n for headline in page_headlines:\r\n headlines.append(headline)\r\n try:\r\n next_button = driver.find_element_by_id('pnnext')\r\n URL = next_button.get_attribute('href') #Pass new URL to return_search()\r\n except NoSuchElementException:\r\n driver.quit() #Quit driver if can't find next button \r\n break\r\n driver.quit() #Quit driver each iteration to avoid triggering recaptcha.\r\n return headlines", "def toc(self):\n toc = []\n header_cells = (cell for cell in self.markdown_cells() if cell.source.startswith(\"##\"))\n for header_cell in header_cells:\n header = header_cell.source.splitlines()[0].strip().split()\n txt = ' '.join(header[1:])\n url = '#'.join([self.html_url, '-'.join(header[1:])])\n toc.append(\" \" * (len(header[0]) - 2) + f\"- [{txt}]({url})\")\n return toc", "def head(self):\n return self._head", "def head(self):\n return self._head", "def get_all_headlines_from_firefox_2(site,URL_exclusions):\r\n headlines = []\r\n #Initial URL to pass to return search:\r\n URL = f'https://www.google.co.uk/search?as_q=&as_epq=irish+travellers&as_oq=&as_eq=&as_nlo=&as_nhi=&lr=&cr=&as_qdr=all&as_sitesearch={site}&as_occt=any&safe=active&as_filetype=&tbs='\r\n n = 0\r\n while n < 10:\r\n n += 1\r\n driver = launch_firefox()\r\n try:\r\n return_search(URL,driver)\r\n except:\r\n continue\r\n sleep_time = np.random.random() * np.random.randint(1,6) \r\n time.sleep(sleep_time) #Slow down to avoid bot detection\r\n timeout = 0\r\n start = time.time()\r\n while timeout < 120:\r\n try:\r\n page_headlines = get_headlines_from_one_page(driver,site,URL_exclusions)\r\n break\r\n except:\r\n end = time.time()\r\n timeout = end - start\r\n for headline in page_headlines:\r\n headlines.append(headline)\r\n try:\r\n next_button = driver.find_element_by_id('pnnext')\r\n URL = next_button.get_attribute('href') #Pass new URL to return_search()\r\n except NoSuchElementException:\r\n driver.quit() #Quit driver if can't find next button \r\n break\r\n driver.quit() #Quit driver each iteration to avoid triggering recaptcha.\r\n return headlines", "def test_headlines_required(self) -> None:\n for rule in self.rules.headlines:\n if not rule.required:\n continue\n is_match: bool = False\n for headline in self.report.headlines:\n if self.rules.get_headline_rules(headline.name) == rule:\n is_match = True\n break\n if not is_match:\n self.add_error(f\"Rubriken {rule.name} som måste vara med saknas.\")", "def getAllHeaders():", "def find_lines(self):\n return []", "def head_pumps(self):\n for name in self._head_pumps:\n yield name, self._data[name]", "def lines(self, headings_only: bool = False) -> list[tuple[SourceRange, str, Verdict]]:\n if headings_only:\n return [(loc, line, verdict) for loc, line, verdict in self._lines if verdict > 0]\n return self._lines", "def ActiveHlt2Lines(self) :\n lines = [\n 'Hlt2B2HH',\n 'Hlt2B2PiPi',\n 'Hlt2B2KPi',\n 'Hlt2B2KK',\n 'Hlt2Lb2PK',\n 'Hlt2Lb2PPi'\n ]\n\n return lines", "def head(filename, lines=5):\n from itertools import islice\n with open(filename, \"r\") as f:\n return list(islice(f, lines))", "def head_pump_names(self):\n return self._head_pumps", "def header_huffington(self):\n head = '\\n ^^Polls ^^fetched ^^from ^^[http://elections.huffingtonpost.com/](http://elections.huffingtonpost.com/).\\n\\n'\n head += '***{}***\\n\\n'.format(self.get_greeting())\n head += '.\\n\\n'\n head += '.\\n\\n'\n return head", "def get_tagged_titles(ttls_lnks):\n\ttagged_titles = []\n\tfor title, link in ttls_lnks:\n\t\t# get the html tree for the paper's page\n\t\tpaper_tree = get_tree(link)\n\t\tpath = '//table/tr/th[text() = \"Subjects:\"]'\n\t\t# Check if html contains the table header \"Subjects:\"\n\t\tsubject_th = paper_tree.xpath(path)\n\t\t# If it does, this means paper is tagged so add to the list to be returned\n\t\tif subject_th:\n\t\t\ttagged_titles.append(title)\n\n\treturn tagged_titles", "def readTotitle(fh):\n preLines = []\n while True:\n l = fh.readline().strip()\n if l.startswith('>'):\n return (preLines,l)\n elif l == '':\n return preLines,None\n else:\n preLines.append(l)", "def ActiveHlt2Lines(self) :\n \n lines = ['Hlt2PassThrough','Hlt2Lumi','Hlt2DebugEvent',\n 'Hlt2Forward','Hlt2ErrorEvent','Hlt2Transparent']\n \n return lines", "def head(self) -> List[str]:\n log = [\n \"idx\",\n \"from\",\n \"n\",\n \"params\",\n \"module\",\n \"arguments\",\n \"in_channel\",\n \"out_channel\",\n ]\n if self.log_shapes:\n log.append(\"in_shape\")\n log.append(\"out_shape\")\n\n return log", "def content_heading(obj: Any, screen_w: int) -> Union[CursesLines, None]:\n\n heading = []\n string = f\"{obj['full_name'].upper()}: {obj['__description']}\"\n string = string + (\" \" * (screen_w - len(string) + 1))\n\n heading.append(\n tuple(\n [\n CursesLinePart(\n column=0,\n string=string,\n color=2,\n decoration=curses.A_UNDERLINE,\n )\n ]\n )\n )\n return tuple(heading)", "def getHeaderList(self):\r\n return self.headerList", "def head(filename, n=10):\n\tprint(\"[HEAD {}] {}\".format(n,filename))\n\tif filename[-3:].casefold()=='.gz':\n\t\twith gzip.open(filename, 'rt') as previewfile:\n\t\t\tprint(*(next(previewfile) for x in range(n)))\n\telse:\n\t\twith open(filename, 'r') as f:\n\t\t\tfor linenumber in range(n):\n\t\t\t\tline = f.readline()\n\t\t\t\tprint(line)\n\tprint(\"[END HEAD]\")", "def format_headings(block, markdown_prefix='###'):\n out = []\n for idx, line in enumerate(block):\n m = HEADINGS_RE.match(line)\n if m:\n out.append(u'%s %s' % (markdown_prefix, m.group(1)))\n if idx+1 < len(block) and block[idx+1].strip():\n out.append('')\n else:\n out.append(line)\n return out", "def getHostHead(self):\n return self.host_head", "def getHeadAngles(self):\n\n\t\trobot_head_yaw, robot_head_pitch = self.motion.getAngles(\"Head\", False)\n\n\t\t# return adjusted robot head angles\n\t\treturn [robot_head_yaw, -robot_head_pitch]", "def get_tokens_with_heads(self, snlp_doc):\n tokens = []\n heads = []\n offset = 0\n for sentence in snlp_doc.sentences:\n for token in sentence.tokens:\n for word in token.words:\n # Here, we're calculating the absolute token index in the doc,\n # then the *relative* index of the head, -1 for zero-indexed\n # and if the governor is 0 (root), we leave it at 0\n if word.head:\n head = word.head + offset - len(tokens) - 1\n else:\n head = 0\n heads.append(head)\n tokens.append(word)\n offset += sum(len(token.words) for token in sentence.tokens)\n return tokens, heads", "def head_pumps(self):\n return self._link_reg.head_pumps", "def getHeadMotion(self):\r\n\r\n headMotion = []\r\n\r\n headMotion.append(self.headMotionVelocityX)\r\n headMotion.append(self.headMotionVelocityY)\r\n headMotion.append(self.headMotionVelocityZ)\r\n headMotion.append(self.headMotionAccelerationX)\r\n headMotion.append(self.headMotionAccelerationY)\r\n headMotion.append(self.headMotionAccelerationZ)\r\n headMotion.append(self.headMotionRotationVelocityX)\r\n headMotion.append(self.headMotionRotationVelocityY)\r\n headMotion.append(self.headMotionRotationVelocityZ)\r\n headMotion.append(self.headMotionRotationAccelerationX)\r\n headMotion.append(self.headMotionRotationAccelerationY)\r\n headMotion.append(self.headMotionRotationAccelerationZ)\r\n\r\n return headMotion", "def getheadlines(self,\n category=None,\n language=None,\n country=None,\n sources=None,\n keywords=None,\n apiKey=None,\n version=None):\n # set sources to first in index by default\n if not sources:\n sources = 'abc-news'\n\n # get version and raise error if not 2\n version=self.version\n if self.version != 2:\n raise ValueError('You must use Version 2 to retrieve headlines from'\n ' News API service.')\n\n # retrive the api key if set; otherwise, error\n if not self._api_key:\n raise ValueError(\n 'You must use use an API key; to get a key visit https://news'\n 'api.org/. If you have an API key, set it using the '\n 'Api.SetCredentials method.')\n\n # if api key is there, set the params\n else:\n request_params = {\n \"category\": category,\n 'language': language,\n \"country\": country,\n \"sources\":sources,\n \"apiKey\": self._api_key,\n \"q\":keywords\n }\n\n # build the url\n url = self.base_url + self.__endpoints['heads']\n\n # make the request\n r = requests.get(url,params=request_params,timeout=self._timeout)\n\n # return the json\n return r.json()" ]
[ "0.81433356", "0.789887", "0.76946384", "0.7529572", "0.7259968", "0.7255244", "0.70318645", "0.70286566", "0.6947499", "0.6834149", "0.6717767", "0.6677363", "0.6588224", "0.6471544", "0.6457992", "0.64250094", "0.64140487", "0.64032876", "0.63827264", "0.635251", "0.63524187", "0.63407344", "0.6286872", "0.62673324", "0.6264295", "0.6264189", "0.6196287", "0.61236507", "0.61232185", "0.60719997", "0.6049211", "0.59960896", "0.5991497", "0.5984683", "0.59706223", "0.5917945", "0.5873204", "0.5862349", "0.5860416", "0.58565617", "0.5832449", "0.5825333", "0.5810187", "0.57779634", "0.5772497", "0.5726605", "0.568882", "0.5685706", "0.56851393", "0.5684003", "0.56626827", "0.5646931", "0.5645809", "0.56320196", "0.5631953", "0.56279016", "0.56259423", "0.5606517", "0.55958617", "0.5591904", "0.5590204", "0.5575666", "0.5574668", "0.5565733", "0.55635333", "0.5560007", "0.5524442", "0.55062085", "0.55054826", "0.5478906", "0.5474522", "0.54692066", "0.54611164", "0.5437491", "0.5437278", "0.5437278", "0.5436958", "0.54237443", "0.5422462", "0.5419396", "0.54190433", "0.5409855", "0.54097563", "0.5401826", "0.5398753", "0.53749543", "0.536849", "0.53613514", "0.5356634", "0.5340845", "0.5339802", "0.53296167", "0.5329051", "0.5304669", "0.53039455", "0.5297129", "0.5291244", "0.52725345", "0.5271485", "0.5269885" ]
0.76380426
3
Returns a list of the headlines with the corresponding keyword
def get_headlines_with_keyword(self, kw): key_head = self.keyword_headlines() headlines = set() for headlinekw in key_head[kw]: content = headlinekw.headlineid.content headlines.add(content) return list(headlines)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_headlines(self, kw = None):\r\n\t\tif kw:\r\n\t\t\treturn self.get_headlines_with_keyword(kw)\r\n\t\telse:\r\n\t\t\treturn self.get_all_headlines()", "def keyword_headlines(self):\r\n\t\td = {}\r\n\r\n\t\tfor q in self.keyword_queryset:\r\n\t\t\td[q.content] = self.headlinekeyword_queryset.filter(keywordid = q.id)\r\n\r\n\t\treturn d", "def get_all_headlines(self):\r\n\t\tlist_vals = list(self.keyword_headlines().values())\r\n\t\tuniq_headlines = set()\r\n\t\tfor list_val in list_vals:\r\n\t\t\tfor headlineobj in list_val:\r\n\t\t\t\tuniq_headlines.add(headlineobj.headlineid.content)\r\n\r\n\t\treturn list(uniq_headlines)", "def get_keywords(self):\r\n\t\treturn list(self.keyword_headlines().keys())", "def get_headline_search(query):\n query = query.replace(' ',\"\")\n category=\"\"\n get_headlines_url = 'https://newsapi.org/v2/top-headlines?category={}&query={}&language=en&apiKey={}'.format(category,query,api_key)\n headlines_results = []\n with urllib.request.urlopen(get_headlines_url) as url:\n get_headlines_data = url.read()\n get_headlines_response = json.loads(get_headlines_data)\n if get_headlines_response['articles']:\n headlines_result_list=get_headlines_response['articles']\n for headline in headlines_result_list:\n headlines_results.append(headline)\n return headlines_results", "def all_headlines_from(url):\n pass", "def get_headlines(driver,site,URL_exclusions):\r\n links = get_all_links(driver,site,URL_exclusions)\r\n headlines = []\r\n n=0\r\n for link in links:\r\n driver = make_driver_obj() #get_all_links quits driver when finished.\r\n try:\r\n while True:\r\n try:\r\n driver.get(link) #No need to accept cookies to don't need return_search\r\n break\r\n except:\r\n continue\r\n except: #If we can't open the URL for any reason.\r\n driver.quit()\r\n continue\r\n n += 1\r\n headline = get_headline(driver)\r\n if headline != '':\r\n headlines.append(headline) #Only append if able to identify headline text\r\n #print(n)\r\n #print(headline)\r\n #print()\r\n driver.quit()\r\n return headlines", "def parse_headlines(self):\n headlines = re.findall(r\"^\\.\\.\\.(.*?)\\.\\.\\.[ ]?\\n\\n\", self.unixtext,\n re.M | re.S)\n headlines = [\" \".join(h.replace(\"...\",\n \", \").replace(\"\\n\", \" \").split())\n for h in headlines]\n return headlines", "def all_headlines(html_root_node):\n pass", "def gather_headlines(urls):\n pass", "def headwords ():\n\n q = request.args.get ('q')\n fulltext = request.args.get ('fulltext')\n offset = int (arg ('offset', '0', re_integer_arg))\n limit = clip (arg ('limit', str (MAX_RESULTS), re_integer_arg), 1, MAX_RESULTS)\n where = ''\n\n if (not q) and (not fulltext):\n # Retrieve full list of headwords\n with current_app.config.dba.engine.begin () as conn:\n res = execute (conn, r\"\"\"\n SELECT id, webkeyword, no\n FROM keyword\n ORDER BY sortkeyword, n, no\n LIMIT :limit\n OFFSET :offset\n \"\"\", { 'offset' : offset, 'limit' : limit })\n\n return make_headwords_response (res, limit)\n\n if q:\n q = q.replace ('-', '')\n q = q.replace ('%', '')\n q = q.replace ('?', '_')\n q = q.replace ('*', '%')\n where = \"(keyword LIKE :q) AND\"\n\n if not fulltext:\n # easy out\n with current_app.config.dba.engine.begin () as conn:\n res = execute (conn, r\"\"\"\n SELECT id, webkeyword, no\n FROM keyword\n WHERE keyword LIKE :q\n ORDER BY sortkeyword, n, no\n LIMIT :limit\n OFFSET :offset\n \"\"\", { 'q' : q, 'offset' : offset, 'limit' : limit })\n\n return make_headwords_response (res, limit)\n\n with current_app.config.dba.engine.begin () as conn:\n res = execute (conn, r\"\"\"\n SELECT DISTINCT\n k.id,\n k.webkeyword COLLATE utf8mb4_bin AS webkeyword,\n k.no\n FROM keyword k,\n article a\n WHERE {where} (MATCH (a.idxtext) AGAINST (:fulltext IN BOOLEAN MODE))\n AND a.no = k.no\n ORDER BY k.sortkeyword, k.n, k.no\n LIMIT :limit\n OFFSET :offset\n \"\"\".format (where = where), { 'q' : q, 'fulltext' : fulltext,\n 'offset' : offset, 'limit' : limit })\n\n return make_headwords_response (res, limit)", "def lemma_headwords(self):\n new_var = 'lemma_headword'\n lemma_heads = [clx._lemmas[i]['Head'] for i in xrange(len(clx._lemmas))]\n has_item = self.compare_items(lemma_heads)\n new_column = []\n if False in has_item:\n self._warning_msg('lemma_headword', lemma_heads)\n for record, exists in zip(self._dict, has_item):\n if exists:\n lemma_id = clx.wordform_lookup(record)[0].IdNumLemma\n lemma_head = clx.lemma_by_id(lemma_id).Head\n else:\n lemma_head = None\n new_column.append(lemma_head)\n self._append_column(new_column, new_var)", "def test_headlines_predefined(self) -> None:\n for headline in self.report.headlines:\n if not self.rules.get_headline_rules(headline.name):\n headlines = [headline.name for headline in self.rules.headlines]\n suggestion, _ = process.extractOne(\n headline.name, headlines, scorer=fuzz.partial_ratio\n )\n self.add_error(\n f\"{headline.name} är inte en valid rubrik. \"\n f\"Rättningsförlsag: {suggestion}.\",\n headline=headline,\n )\n elif re.search(\"\\\\W{1,}\", headline.name, re.I):\n self.add_error(\n f\"Rubriken {headline.name} innehåller tecken som inte är \"\n \"alfanumeriska vilket inte är tillåtet för en rubrik.\",\n headline=headline,\n )", "def get_all_headline_data():\n\twebsites = database.get_website_URLs()\n\tall_headlines_arr = []\n\tfor curr_elt in websites:\n\t\tcurr_website = curr_elt[0]\n\t\tsource = curr_elt[1]\n\t\tcurr_headline_arr = get_headline_data(curr_website, source)\n\t\tall_headlines_arr.append(curr_headline_arr)\n\treturn all_headlines_arr", "def extract_keywords(self):\n keywords = [] \n for keyword in self.watsonLanguageModel['keywords'][:self.entitySizeLimit]: \n keywords.append(keyword['text'])\n return keywords", "def column_headlines(self):\n elements = self._selenium.find_elements_by_xpath(\n '//div[@id=\"content\"]/table/thead/tr/th/a')\n return [x.text for x in elements]", "def get_headlines(id):\n get_headlines_url = secondary_url.format(id,api_key)\n\n with urllib.request.urlopen(get_headlines_url) as url:\n get_headlines_data = url.read()\n get_headlines_response= json.loads(get_headlines_data)\n\n headlines_results = None\n\n if get_headlines_response['articles']:\n headlines_results_list = get_headlines_response['articles']\n headlines_results = process_headlines(headlines_results_list)\n\n return headlines_results", "def add_keywords(self, response: Response) -> list:\n return response.xpath(\"//ul[@class='term']/li/a/text()\").getall()", "def get_dictionary_file_lines_for_keywords(self):\n keywords_iter = iter(self.keywords)\n next_keyword = keywords_iter.next()\n print(\"Searching for keyword {}\".format(next_keyword))\n\n self.dictionary_file.open_handle()\n result_lines = list()\n while next_keyword:\n line = self.dictionary_file.read_line_to_obj()\n if not line:\n print(\"Reached end of dictionary file\")\n break\n\n if line.term < next_keyword:\n continue\n elif line.term == next_keyword:\n print(\"Found postings list for term {}\".format(next_keyword))\n result_lines.append(line)\n\n try:\n next_keyword = keywords_iter.next()\n print(\"Searching for keyword {}\".format(next_keyword))\n except StopIteration:\n print(\"Finished searching for all keywords\")\n break\n\n return result_lines", "def colorize_headlines_visitor(c, p, item):\n if p.h.startswith(\"!= \"):\n f = item.font(0)\n f.setBold(True)\n item.setFont(0, f)\n raise leoPlugins.TryNext", "def all_words(self, min_word_length=0):\n return [word for headline in self.headlines for word in\n headline.words(min_word_length=min_word_length)]", "def getHeadParts(self):\n return self.headParts", "def articles_id_headwords (_id):\n\n offset = int (arg ('offset', '0', re_integer_arg))\n limit = clip (arg ('limit', str (MAX_RESULTS), re_integer_arg), 1, MAX_RESULTS)\n\n with current_app.config.dba.engine.begin () as conn:\n res = execute (conn, r\"\"\"\n SELECT id, webkeyword, no\n FROM keyword\n WHERE no = :id\n ORDER BY sortkeyword\n LIMIT :limit\n OFFSET :offset\n \"\"\", { 'id' : _id, 'offset' : offset, 'limit' : limit })\n\n return make_headwords_response (res, limit)", "def onHeadlineClick(self, tag, keywords):\n self.handleEvent(\"headclick1\", tag, keywords)", "def get_negative_headlines(headlines,words):\r\n negative_headlines = []\r\n for headline in headlines:\r\n for word in words:\r\n if headline.lower().find(word) != -1: #If particular word is found in lowercased headline.\r\n negative_headlines.append(headline)\r\n break #Stop iterating through words when we have found one negative word.\r\n return negative_headlines", "def get_headline_data(website_url, source):\n\tpage = requests.get(website_url)\n\tpage.raise_for_status()\n\tall_headlines = []\n\tbs_obj = bs4.BeautifulSoup(page.text, 'html.parser')\n\titem_list = bs_obj.select('item')\n\tprintable = set(string.printable)\n\tfor curr_item in item_list:\n\t\titem_title = curr_item.title.string\n\t\tfollowup_link = curr_item.select('link')[0].string\n\t\tdatestamp = curr_item.select('pubdate')[0].string\n\t\titem_title = item_title.replace(\"&apos;\", \"'\")\n\t\tfollowup_link = followup_link.replace(u\"\\u2018\", \"'\").replace(u\"\\u2019\", \"'\")\n\t\titem_title = item_title.encode('utf-8', errors='ignore')\n\t\tnew_headline = data_structures.Headline(item_title, followup_link, source, datestamp)\n\t\tall_headlines.append(new_headline)\n\treturn all_headlines", "def get_headings(self):\n return self.headings", "def GetListHead(self, *args, **kwargs):\n pass", "def reddit_headlines(reddit):\n\n # Set metadata to make request:\n url = \"https://www.reddit.com/r/{}/.json?limit=10\".format(reddit)\n headers = {'User-Agent': '{} Reddit headlines'.format(reddit)}\n\n # Consume Reddit's API to gather info:\n html = requests.get(url, headers=headers)\n\n # If status code is OK:\n if html.status_code == requests.codes.ok:\n # Parse resonse:\n info = json.loads(html.content.decode('utf-8'))\n # pprint(info)\n\n # Get relevant info:\n child = info['data']['children']\n titles = [unidecode(elem['data']['title']) for elem in child]\n titles = \"... \".join([title for title in titles])\n else:\n titles = None\n\n return titles", "def get_headlines():\n country = request.args.get('country', type=str)\n if country is not None:\n data = te.getNews(country=country).dropna()\n return jsonify(data.to_dict(orient='records'))\n data = te.getNews()\n return jsonify(te.getNews().dropna().to_dict(orient='records'))", "def parse_keywords(medline):\n keyword_list = medline.find(\"KeywordList\")\n keywords = list()\n if keyword_list is not None:\n for k in keyword_list.findall(\"Keyword\"):\n if k.text is not None:\n keywords.append(k.text)\n keywords = \"; \".join(keywords)\n else:\n keywords = \"\"\n return keywords", "def fetch_headlines(self, retry=False):\n top_headlines_res = None\n try:\n top_headlines_res = self.news_api.get_top_headlines(\n country='in', page_size=100)\n except newsapi.newsapi_exception.NewsAPIException as err:\n print('NewsAPI Exception==', err)\n if not retry:\n print('Retrying with another key...')\n self.api_key = os.getenv('NEWS_API_KEY_BACKUP')\n self.configure_news_api()\n top_headlines_res = self.fetch_headlines(retry=True)\n else:\n return None\n except Exception as err:\n print('Exception occurred==', err)\n return None\n headlines = {}\n if top_headlines_res and top_headlines_res['status'] == 'ok':\n headlines = top_headlines_res\n else:\n headlines = None\n return headlines", "def find_keywords(anchor, keywords=['']):\n rel_keywords = []\n href, content = parse_anchor(anchor)\n \n for keyword in keywords:\n kw = keyword.lower()\n if kw in href.lower() or kw in content.lower():\n rel_keywords.append(keyword)\n \n return rel_keywords", "def get_metadata_header_lines(input_file):\n # type: (str) -> List[str]\n if not FileSystems.exists(input_file):\n raise ValueError('{} does not exist'.format(input_file))\n return[line for line in _header_line_generator(input_file) if\n line.startswith('##')]", "def topheadlines():\n newsSource = click.prompt(\"Please enter your choice from listsources\")\n \n main_url = \"https://newsapi.org/v2/top-headlines?apiKey=f45fa2c71932483f832f0cc745af0325&sources=\"+newsSource\n\n\t# fetching data in json format \n open_headline = requests.get(main_url).json() \n\n\t# getting all headlines in a string articles \n headline = open_headline[\"articles\"] \n\n\t# empty list which will \n\t# contain all trending newssources \n output = [] \n\t\n for h in headline: \n click.echo('\\n')\n click.secho(click.style('TITLE: ' + h['title'], fg='red'))\n click.secho(click.wrap_text(h['description']))\n click.secho(click.style('DOMAIN: ' + h['url'], fg='blue'))\n \n \t\n for i in output[:11]:\n print(i)", "def get_keywords(self, sectioned_text):\n \n keywords = []\n \n if 'full text' in list(sectioned_text.keys()):\n \n for word in self.keyword_list:\n if word in sectioned_text['full text']:\n keywords.append(word)\n \n else: \n fulltext = self.restitch_text(sectioned_text)\n for word in self.keyword_list:\n if word in fulltext:\n keywords.append(word)\n \n return keywords", "def get_keywords_for_movie(url):\n pass", "def get_tagged_titles(ttls_lnks):\n\ttagged_titles = []\n\tfor title, link in ttls_lnks:\n\t\t# get the html tree for the paper's page\n\t\tpaper_tree = get_tree(link)\n\t\tpath = '//table/tr/th[text() = \"Subjects:\"]'\n\t\t# Check if html contains the table header \"Subjects:\"\n\t\tsubject_th = paper_tree.xpath(path)\n\t\t# If it does, this means paper is tagged so add to the list to be returned\n\t\tif subject_th:\n\t\t\ttagged_titles.append(title)\n\n\treturn tagged_titles", "def get_head_dependents(sentence: str) -> List[str]:\n sentence = re.sub(r'\\s+', ' ', sentence)\n doc = nlp(sentence)\n dep = [token.dep_ for token in doc]\n\n # Get list of compounds in doc\n compounds = [token for token in doc if token.dep_ == 'compound']\n\n # Identifies roots and direct objects\n for token in compounds:\n if token.head.dep_ == 'dobj':\n dep[token.i] = 'dobj'\n elif token.head.dep_ == 'ROOT':\n dep[token.i] = 'ROOT'\n\n return [token.text for token in doc if dep[token.i] in ('ROOT', 'dobj')]", "def BuildHeadList(all_file_contents):\n head_list = []\n list_all_file_contents = (all_file_contents)\n for line in list_all_file_contents: \n if line[0:4] != 'ATOM':\n head_list.append(line)\n else:\n break\n\n return head_list", "def extract_head(data):\n tl = data['tls'][data['i']];\n br = data['brs'][data['i']];\n head = extract_area(data,(tl,br));\n return head;", "def _get_keywords(self, title: str):\n # Prepare data\n keywords = set()\n stops = set(nltk.corpus.stopwords.words(\"english\"))\n stemmer = nltk.stem.SnowballStemmer(\"english\")\n ent_types = [\n \"PERSON\", \"ORGANIZATION\", \"FACILITY\", \"LOCATION\", \"DATE\",\n \"TIME\", \"GPE\", \"MONEY\",\n ]\n excluded_word_types = [\"RB\", \"IN\", \"PRP\"]\n\n # Tokenize and chunk words using NLTK\n tokens = nltk.tokenize.word_tokenize(title)\n positions = nltk.pos_tag(tokens)\n chunk = nltk.ne_chunk(positions)\n\n # Make a word list of keywords we want to add, that\n # are not part of our excluded word types.\n words = set()\n for pos in positions:\n word, word_type = pos\n if word.isalnum() and word_type not in excluded_word_types:\n words.add(word)\n\n # Add all entities to keyword list and remove them from\n # our remaining word set so they don't get added again\n # and stemmed later.\n for subtree in chunk.subtrees(filter=lambda t: t.label() in ent_types):\n for leaf in subtree.leaves():\n keywords.add(leaf[0])\n if leaf[0] in words:\n words.remove(leaf[0])\n\n # Add remaining words in list and stem them to base form,\n # stemming means we change words from e.g. \"eating\" to \"eat\".\n for word in words:\n if word not in stops:\n keywords.add(stemmer.stem(word))\n\n return sorted([keyword.lower() for keyword in keywords])", "def get_all_headlines_from_chrome(site,URL_exclusions):\r\n headlines = []\r\n #Initial URL to pass to return search:\r\n URL = f'https://www.google.co.uk/search?hl=en&as_q=&as_epq=&as_oq=travellers&as_eq=quarantine+travel+train+flight+tourist+archive+airport+covid+coronavirus+hotel+holiday+honeymoon&as_nlo=&as_nhi=&lr=&cr=&as_qdr=all&as_sitesearch={site}&as_occt=title&safe=active&as_filetype=&tbs='\r\n n = 0\r\n while n < 10:\r\n n += 1\r\n driver = launch_chrome()\r\n try:\r\n return_search(URL,driver)\r\n except:\r\n continue\r\n sleep_time = np.random.random() * np.random.randint(1,6) \r\n time.sleep(sleep_time) #Slow down to avoid bot detection\r\n timeout = 0\r\n start = time.time()\r\n while timeout < 120:\r\n try:\r\n page_headlines = get_headlines_from_one_page(driver,site,URL_exclusions)\r\n break\r\n except:\r\n end = time.time()\r\n timeout = end - start\r\n for headline in page_headlines:\r\n headlines.append(headline)\r\n try:\r\n next_button = driver.find_element_by_id('pnnext')\r\n URL = next_button.get_attribute('href') #Pass new URL to return_search()\r\n except NoSuchElementException:\r\n driver.quit() #Quit driver if can't find next button \r\n break\r\n driver.quit() #Quit driver each iteration to avoid triggering recaptcha.\r\n return headlines", "def test_headlines_predecessors(self):\n headline_str = \"* One\\n** Two\\n*** Three\\n** Two\\n*** Three\\n* One\"\n\n doc = parser.parse(headline_str)\n self.assertEqual(len(doc.children()), 2)\n\n h1_1 = doc.children()[0]\n h1_2 = doc.children()[1]\n\n self.assertEqual(len(h1_1.children), 2)\n self.assertEqual(len(h1_2.children), 0)\n\n h2_1 = h1_1.children[0]\n h2_2 = h1_1.children[1]\n\n self.assertEqual(len(h2_1.children), 1)\n self.assertEqual(len(h2_2.children), 1)", "def complete_setup_pocs(self, text, line, begidx, endidx):\n names = ['all'] + hardware.get_all_names()\n return [name for name in names if name.startswith(text)]", "def getKeywords(self):\n return", "def get_paper_keywords(tree):\n\tpath = '//table/tr/th[text() = \"Keywords:\"]/following-sibling::td/text()'\n\tkeywords = tree.xpath(path)\n\t# xpath returns a list with the keywords as a single string element separated by new lines, commas or semi-colons\n\t# Make this into a list of keywords\n\tif keywords:\n\t\t# Split on new lines, commas and semi-colons\n\t\tkeywords = re.split('[\\\\n,;]', keywords[0])\n\t\t# Remove trailing white space and empty strings\n\t\tkeywords = [kw.strip() for kw in keywords if kw]\n\n\treturn keywords", "def get_headlines(outlet):\n if outlet == \"BBC\":\n parser = news_parser.BBC(\"https://www.bbc.co.uk\")\n elif outlet == \"DailyMail\":\n parser = news_parser.DailyMail(\"https://www.dailymail.co.uk\")\n elif outlet == \"Guardian\":\n parser = news_parser.Guardian(\"https://www.theguardian.com\")\n elif outlet == \"Metro\":\n parser = news_parser.Metro(\"https://www.metro.co.uk\")\n elif outlet == \"Mirror\":\n parser = news_parser.Mirror(\"https://www.mirror.co.uk/news/\")\n elif outlet == \"Reuters\":\n parser = news_parser.Reuters(\"https://uk.reuters.com\")\n elif outlet == \"Sun\":\n parser = news_parser.Sun(\"https://www.thesun.co.uk\")\n elif outlet == \"Independent\":\n parser = news_parser.Independent(\"https://www.independent.co.uk\")\n else:\n parser = news_parser.BBC(\"https://www.bbc.co.uk/news\")\n \n index = outlets.index(outlet)\n url_list = []\n while len(url_list) < 50:\n opts = {\n 'language': ['en'],\n 'source_id': [ids[index]],\n 'published_at_start':'NOW-1DAY',\n 'published_at_end':'NOW',\n 'sort_by': 'hotness',\n 'sort_direction': 'desc',\n 'cursor': '*',\n 'per_page': 100\n }\n\n try:\n api_response = api_instance.list_stories(**opts)\n for story in api_response.stories:\n url = story.links.permalink\n if url:\n url_list.append(url)\n except ApiException as e:\n print(\"Exception when calling DefaultApi->list_stories: %s\\n\" %e)\n \n opts['cursor'] = api_response.next_page_cursor\n \n url_list = url_list[:50]\n \n articles_list = []\n for url in url_list:\n raw_article = parser.get_article(url)\n if raw_article is not None:\n articles_list.append(raw_article)\n\n articles = []\n for article in articles_list:\n parsed_article = parser.parse(article)\n if parsed_article is not None:\n articles.append(parsed_article)\n \n if len(articles) > 30:\n articles = articles[:30]\n\n return articles", "def headline(self):\n \n return self._headline", "def parse_title(self) -> list:\n scanning = False # start of a title is found, this may be the second of later part of that.\n ret = [] # to return\n temp = [] # deal with mutiple line titles.\n for page in self.pdf.pages:\n text = page.extract_text()\n # it's possible that a blank page exists which will let text be None.\n if text is None:\n continue\n lines = text.split('\\n')\n\n for line in lines:\n if self.__is_part_of_title(line):\n # middle part of a title\n if scanning:\n temp.append(line)\n # find a new title\n else:\n scanning = True\n temp = [line]\n else:\n # just find an entire title\n if scanning:\n scanning = False\n ret.append(\"\".join(temp))\n # remove wrong titles ( maybe trigger words occur at other part of the document )\n for title in ret:\n if self.title_keyword not in title:\n ret.remove(title)\n return ret", "def get_tokens_with_heads(self, snlp_doc):\n tokens = []\n heads = []\n offset = 0\n for sentence in snlp_doc.sentences:\n for token in sentence.tokens:\n for word in token.words:\n # Here, we're calculating the absolute token index in the doc,\n # then the *relative* index of the head, -1 for zero-indexed\n # and if the governor is 0 (root), we leave it at 0\n if word.head:\n head = word.head + offset - len(tokens) - 1\n else:\n head = 0\n heads.append(head)\n tokens.append(word)\n offset += sum(len(token.words) for token in sentence.tokens)\n return tokens, heads", "def hypernym(self, sense=None):\n s = self._synset(self.text)\n\n if not s:\n return []\n\n hyper = s.hypernyms()\n\n results = list()\n for h in hyper:\n results.append(h.lemma_names())\n\n if not sense:\n return results\n\n return results[:sense]", "def getheadlines(self,\n category=None,\n language=None,\n country=None,\n sources=None,\n keywords=None,\n apiKey=None,\n version=None):\n # set sources to first in index by default\n if not sources:\n sources = 'abc-news'\n\n # get version and raise error if not 2\n version=self.version\n if self.version != 2:\n raise ValueError('You must use Version 2 to retrieve headlines from'\n ' News API service.')\n\n # retrive the api key if set; otherwise, error\n if not self._api_key:\n raise ValueError(\n 'You must use use an API key; to get a key visit https://news'\n 'api.org/. If you have an API key, set it using the '\n 'Api.SetCredentials method.')\n\n # if api key is there, set the params\n else:\n request_params = {\n \"category\": category,\n 'language': language,\n \"country\": country,\n \"sources\":sources,\n \"apiKey\": self._api_key,\n \"q\":keywords\n }\n\n # build the url\n url = self.base_url + self.__endpoints['heads']\n\n # make the request\n r = requests.get(url,params=request_params,timeout=self._timeout)\n\n # return the json\n return r.json()", "def headwords_id_context (_id):\n\n limit = clip (arg ('limit', str (MAX_RESULTS), re_integer_arg), 1, MAX_RESULTS)\n\n with current_app.config.dba.engine.begin () as conn:\n res = execute (conn, \"\"\"\n SELECT keyword, sortkeyword\n FROM keyword\n WHERE id = :id\n \"\"\", { 'id' : _id })\n keyword, sortkeyword = res.fetchone ()\n\n res1 = execute (conn, \"\"\"\n SELECT id, webkeyword, no\n FROM keyword\n WHERE sortkeyword < :sortkeyword\n ORDER BY sortkeyword DESC, n DESC, no DESC\n LIMIT :limit\n \"\"\", { 'sortkeyword' : sortkeyword, 'limit' : limit })\n\n res2 = execute (conn, \"\"\"\n SELECT id, webkeyword, no\n FROM keyword\n WHERE sortkeyword >= :sortkeyword\n ORDER BY sortkeyword, n, no\n LIMIT :limit\n \"\"\", { 'sortkeyword' : sortkeyword, 'limit' : limit + 1 })\n\n res = []\n\n for row in reversed (res1.fetchall ()):\n res.append (row[:3])\n for row in res2:\n res.append (row[:3])\n\n return make_headwords_response (res, limit)", "def headline_text(node):\n pass", "def lookup_keywords(filename):\n keywords = []\n start_of_table = r'\\*+\\s+'\n start_of_kw_table = r'\\*+\\s+Keyword'\n in_kw_table = False\n f = open(filename, \"r\")\n for line in f.readlines():\n line = line.rstrip()\n if len(line) == 0 or line.startswith(\"#\"):\n continue # skip comments and blanks\n if re.match(start_of_kw_table, line):\n in_kw_table = True # table started\n continue\n if re.match(start_of_table, line) and not re.match(start_of_kw_table, line):\n in_kw_table = False # table ended\n continue\n if line.startswith(' '):\n continue # skip content rows\n if in_kw_table:\n keywords.append(line)\n f.close()\n return keywords", "def readTotitle(fh, titleChar):\n preLines = []\n while True:\n l = fh.readline().strip()\n if l.startswith(titleChar):\n return (preLines,l)\n elif l == '':\n return preLines,None\n else:\n preLines.append(l)", "def get_wiki_lines(wt, predicate=None):\n return [line for line in wt.contents.split('\\n') if not callable(predicate) or predicate(line)]", "def words(self, min_word_length=0):\n\n word_tokenizer = nltk.RegexpTokenizer(r'\\b[^\\s]+\\b')\n headline_string = self.headline_string.lower().replace(\"’\", \"'\")\n return [word for word in word_tokenizer.tokenize(headline_string) if len(word) >= min_word_length]", "def get_keywords(self, number=10):\n keyword = []\n node_weight = OrderedDict(sorted(self.node_weight.items(), key=lambda t: t[1], reverse=True))\n for i, (key, value) in enumerate(node_weight.items()):\n # print(key + ' - ' + str(value))\n keyword.append(key)\n if i > number:\n break\n return keyword", "def is_headline(node):\n pass", "def Keywords(self, default=[{}]):\n tmp = self.data.get('metadata', {}).get('keywords', default)\n return [HEP.KeywordObject(i) for i in tmp]", "def findLegHeaders(words, header, how='match'):\n locs = []\n for i, line in enumerate(words):\n match = header.match(line)\n if match is not None:\n locs.append(i)\n\n return locs", "def get_keywords(self):\n keys = []\n for post in self:\n keys.extend(post.Keywords)\n return list(sorted(set(keys)))", "def get_all_headlines_from_firefox(site,URL_exclusions):\r\n headlines = []\r\n #Initial URL to pass to return search:\r\n URL = f'https://www.google.co.uk/search?hl=en&as_q=&as_epq=&as_oq=travellers&as_eq=quarantine+travel+train+flight+tourist+archive+airport+covid+coronavirus+hotel+holiday+honeymoon&as_nlo=&as_nhi=&lr=&cr=&as_qdr=all&as_sitesearch={site}&as_occt=title&safe=active&as_filetype=&tbs='\r\n n = 0\r\n while n < 10:\r\n n += 1\r\n driver = launch_firefox()\r\n try:\r\n return_search(URL,driver)\r\n except:\r\n continue\r\n sleep_time = np.random.random() * np.random.randint(1,6) \r\n time.sleep(sleep_time) #Slow down to avoid bot detection\r\n timeout = 0\r\n start = time.time()\r\n while timeout < 120:\r\n try:\r\n page_headlines = get_headlines_from_one_page(driver,site,URL_exclusions)\r\n break\r\n except:\r\n end = time.time()\r\n timeout = end - start\r\n for headline in page_headlines:\r\n headlines.append(headline)\r\n try:\r\n next_button = driver.find_element_by_id('pnnext')\r\n URL = next_button.get_attribute('href') #Pass new URL to return_search()\r\n except NoSuchElementException:\r\n driver.quit()\r\n break\r\n driver.quit() #Quit driver each iteration to avoid triggering recaptcha.\r\n return headlines", "def readTotitle(fh):\n preLines = []\n while True:\n l = fh.readline().strip()\n if l.startswith('>'):\n return (preLines,l)\n elif l == '':\n return preLines,None\n else:\n preLines.append(l)", "def test_headlines_successors(self):\n headline_str = \"* First level\\n** Second level\\n*** Third level\"\n doc = parser.parse(headline_str)\n self.assertEqual(len(doc.children()), 1)\n\n h1 = doc.children()[0]\n self.assertEqual(len(h1.children), 1)\n\n h2 = h1.children[0]\n self.assertEqual(len(h2.children), 1)\n\n h3 = h2.children[0]\n self.assertEqual(len(h3.children), 0)", "def headline(self):\r\n return '%s%s %s%s' % (BLUE, self.title,\r\n NORMAL, self.link)", "def list(self, key):\n\n if \"~\" in key or key == \"title\":\n v = self(key, connector=\"\\n\")\n if v == \"\": return []\n else: return v.split(\"\\n\")\n elif key in self: return self[key].split(\"\\n\")\n else: return []", "def keyword_extraction(file_content):\n\n # [question, question....]\n for key, value in file_content.items():\n seg, hidden = ltp.seg([key])\n # ner: [[('Nh', 2, 2)]]\n ner = ltp.ner(hidden)\n # keywords: [('PERSON', \"吴轩\")], tuple_item: ('Nh', 2, 2)\n keywords = [(tag_to_name[tuple_item[0]], to_string(seg[0][tuple_item[1]: tuple_item[2]+1])) for tuple_item in ner[0]]\n file_content[key].keywords = keywords\n\n return file_content", "def __get_keywords(self, text_list):\r\n specialKW = [\r\n 'run keyword',\r\n 'run keyword and continue on failure',\r\n 'run keyword and expect error',\r\n 'run keyword and ignore error',\r\n 'run keyword and return'\r\n 'run keyword and return if',\r\n 'run keyword and return status',\r\n 'run keyword if',\r\n 'run keyword if all critical tests passed',\r\n 'run keyword if all tests passed',\r\n 'run keyword if any critical tests failed',\r\n 'run keyword if any tests failed',\r\n 'run keyword if test failed',\r\n 'run keyword if test passed',\r\n 'run keyword if timeout occurred',\r\n 'run keyword unless',\r\n 'run keywords',\r\n 'wait until keyword succeeds',\r\n 'repeat keyword',\r\n 'else'\r\n ]\r\n specialSettings = [\r\n '[Arguments]',\r\n '[Documentation]'\r\n ]\r\n L = []\r\n if text_list[0] in specialSettings:\r\n return L\r\n for item in text_list:\r\n if self.__is_keyword(item):\r\n L.append(item)\r\n if not item.replace('_', ' ').replace('-', ' ').lower() in specialKW:\r\n break\r\n return L", "def test_headlines_named_entities(self) -> None:\n for headline in self.report.headlines:\n rule: Optional[HeadlineRules] = self.rules.get_headline_rules(headline.name)\n if not (rule and rule.named_entities):\n continue\n\n for ne_rule in rule.named_entities:\n if headline.has_named_entity(\n ne_rule.identity, ne_rule.type, ne_rule.subtype\n ):\n continue\n if ne_rule.cheat and re.search(ne_rule.cheat, headline.to_text()):\n continue\n self.add_error(ne_rule.message, headline=headline)", "def top_headlines():\n source = \"google-news\" # TODO: Add option to choose source\n try:\n r = requests.get(\"https://newsapi.org/v2/top-headlines?sources=\" + source + \"&apiKey=\" + NEWS_API_TOKEN)\n data = r.json()\n # TODO: Find a way to include multiple articles instead of a random one\n article = data['articles'][randint(0, len(data['articles']) - 1)]\n imageurl = article['urlToImage'].replace('\\\\', '')\n embed = discord.Embed(\n title=article['title'],\n description=article['description'],\n url=article['url'],\n image_url=imageurl\n )\n embed.set_image(url=imageurl)\n embed.set_footer(text=\"Powered by NewsAPI! (newsapi.org)\")\n return embed\n except Exception as e:\n print(e)\n return discord.Embed(title=\"Something went wrong\")", "def read_header(datafile):\n\thead = []\n\tf = open(datafile,'r')\n\tfor i,line in enumerate(f):\n\t\tif i is 10: break\n\t\thead += [line]\n\tf.close()\n\treturn head", "def get_main_headline(self, default=''):\n for segment in self.segments:\n if segment.headlines:\n return segment.headlines[0]\n return default", "def keywords(text:str) -> list:\n return sorted(set(text.split(' ')), key=frequency, reverse=True)[0:5]", "def get_headlines_from_one_page(driver,site,URL_exclusions):\r\n headlines = []\r\n links = get_links_from_one_page(driver,site,URL_exclusions)\r\n for i in range(len(links)):\r\n start = time.time()\r\n timeout = 0\r\n while timeout < 120: #Someimtes the page doesn't load. Quit the page after two minutes.\r\n try:\r\n results = driver.find_elements_by_class_name(\"g\") #Pages contained in class=\"g\" elements\r\n button = results[i].find_element_by_tag_name(\"a\") #Links under <a> tag\r\n link = button.get_attribute('href') #URL contained under 'href' \r\n if link.find(site) != -1: #Some \"g\" elements are not search results\r\n find = np.zeros(len(URL_exclusions))\r\n for j in range(len(URL_exclusions)):\r\n find[j] = bool(link.find(URL_exclusions[j]) == -1)\r\n if all(find) == True: #If no exclusion words found in UR\r\n button.click()\r\n sleep_time = np.random.random() * np.random.randint(1,6) #Sleep for random time between 1 and 5s to reduce chance of bot detection.\r\n time.sleep(sleep_time)\r\n headline = get_headline(driver)\r\n if headline != '': #Only interested if we succesfully find headline\r\n headlines.append(headline)\r\n driver.back()\r\n sleep_time = np.random.random() * np.random.randint(1,6)\r\n time.sleep(sleep_time) #Slow down to avoid bot detection\r\n break\r\n except:\r\n end = time.time()\r\n timeout = end - start\r\n if timeout >= 120:\r\n break #If results hasn't loaded after 120 seconds, we need to break the for loop\r\n return headlines", "def heads(self) -> \"IterableList[Head]\":\n return Head.list_items(self)", "def get_meta_keywords(self):\n return self.get_meta_content(self.article.doc, \"meta[name=keywords]\")", "def get_nonnumbered_section_headers(self, full_text):\n narrowed_string = self.get_header_text(full_text)\n\n headers_list = [' Abstract ']\n\n potential_headers = []\n for hd in self.INTROS:\n if hd in narrowed_string:\n potential_headers.append(hd)\n else:\n pass\n\n if len(potential_headers) > 0:\n headers_list.append(potential_headers[0])\n else:\n pass\n\n ###############\n\n potential_headers = []\n for hd in self.METHODS:\n if hd in narrowed_string:\n potential_headers.append(hd)\n else:\n pass\n\n if len(potential_headers) > 0:\n headers_list.append(potential_headers[0])\n else:\n pass\n\n ###############\n\n potential_headers = []\n for hd in self.RandD:\n if hd in narrowed_string:\n potential_headers.append(hd)\n else:\n pass\n\n if len(potential_headers) > 0:\n headers_list.append(potential_headers[0])\n else:\n pass\n\n ###############\n\n potential_headers = []\n for hd in self.CONCS:\n if hd in narrowed_string:\n potential_headers.append(hd)\n else:\n pass\n\n if len(potential_headers) > 0:\n headers_list.append(potential_headers[0])\n else:\n pass\n\n ###############\n\n potential_headers = []\n for hd in self.ACKS:\n if hd in narrowed_string:\n potential_headers.append(hd)\n else:\n pass\n\n if len(potential_headers) > 0:\n headers_list.append(potential_headers[0])\n else:\n pass\n\n ###############\n\n potential_headers = []\n for hd in self.REFS:\n if hd in narrowed_string:\n potential_headers.append(hd)\n else:\n pass\n\n if len(potential_headers) > 0:\n headers_list.append(potential_headers[0])\n else:\n pass\n\n return headers_list", "def get_all_headlines_from_chrome_2(site,URL_exclusions):\r\n headlines = []\r\n #Initial URL to pass to return search:\r\n URL = f'https://www.google.co.uk/search?as_q=&as_epq=irish+travellers&as_oq=&as_eq=&as_nlo=&as_nhi=&lr=&cr=&as_qdr=all&as_sitesearch={site}&as_occt=any&safe=active&as_filetype=&tbs='\r\n n = 0\r\n while n < 10:\r\n n += 1\r\n driver = launch_chrome()\r\n try:\r\n return_search(URL,driver)\r\n except:\r\n continue\r\n sleep_time = np.random.random() * np.random.randint(1,6) \r\n time.sleep(sleep_time) #Slow down to avoid bot detection\r\n timeout = 0\r\n start = time.time()\r\n while timeout < 120:\r\n try:\r\n page_headlines = get_headlines_from_one_page(driver,site,URL_exclusions)\r\n break\r\n except:\r\n end = time.time()\r\n timeout = end - start\r\n for headline in page_headlines:\r\n headlines.append(headline)\r\n try:\r\n next_button = driver.find_element_by_id('pnnext')\r\n URL = next_button.get_attribute('href') #Pass new URL to return_search()\r\n except NoSuchElementException:\r\n driver.quit() #Quit driver if can't find next button \r\n break\r\n driver.quit() #Quit driver each iteration to avoid triggering recaptcha.\r\n return headlines", "def first_heading(source_text):\n lines = source_text.split('\\n')\n for l in lines:\n if l.startswith(u'#'):\n return l.strip(u'# ')\n return None", "def get_keywords(self):\n all_keywords = []\n z_index = 0\n for zettel in self.lemma_tokens:\n keywords = []\n w_index = 0\n cur_zettel_dict = {}\n for word in zettel:\n cur_zettel_dict.setdefault(word[0], 0)\n cur_word_total_score = self.all_scores[z_index][w_index]\n if cur_zettel_dict[word[0]] > cur_word_total_score:\n w_index += 1\n continue\n else:\n cur_zettel_dict[word[0]] = cur_word_total_score\n w_index += 1\n cur_sorted = sorted(cur_zettel_dict.items(), key=lambda kv: kv[1], reverse=True)\n for i in range(self.keyword_n):\n keywords.append(str(cur_sorted[i]))\n z_index += 1\n all_keywords.append(keywords)\n return all_keywords", "def get_keywords(keyword_list: List[Tuple[str, str]], keyword_type: str) -> List[str]:\n keywords = [x[0] for x in keyword_list if x[1].startswith(keyword_type)]\n\n return keywords", "def test_single_headline(self):\n\n text = \"Example headline\"\n\n for i in range(1,6):\n headline_str = '*' * i + ' ' + text\n doc = parser.parse(headline_str)\n\n headline_node = doc.children()[0]\n\n self.assertTrue(isinstance(headline_node, parser.HeadlineNode))\n self.assertEqual(headline_node.level, i)\n self.assertEqual(headline_node.text, text)", "def starts_with_tonic(a_list):\n key = a_list.track.bars[0].key.name\n note = a_list.get_first_actual_note()\n if note.name == key:\n return []\n else:\n return [note.start]", "def first_words_func():\n return_list = []\n for lyric in lyrics:\n for line in lyric.split(\"\\n\"):\n return_list.append(line.split(\" \")[0])\n return (return_list)", "def _mdHeadings(self):\n\t\treturn [\n\t\t\t\t\t'ProcessingStatus', \n\t\t\t\t\t'OpenChCurrent', \n\t\t\t\t\t'NStates',\n\t\t\t\t\t'CurrentStep',\n\t\t\t\t\t'BlockDepth',\n\t\t\t\t\t'BlockSTD',\n\t\t\t\t\t'EventStart', \n\t\t\t\t\t'EventEnd', \n\t\t\t\t\t'EventDelay', \n\t\t\t\t\t'StateResTime',\n\t\t\t\t\t'ResTime', \n\t\t\t\t\t'AbsEventStart',\n\t\t\t\t\t'Threshold'\n\t\t\t\t]", "def findLines(self, text, term, scope=75):\n\t\tlistOfResults = list()\n\n\t\tcurrentIndex = 0\n\t\ttermLength\t = len(term)\n\t\tappend\t\t = listOfResults.append\n\t\treplace\t\t = str.replace\n\n\t\ttext = text.lower()\n\t\tterm = term.lower()\n\n\t\twhile currentIndex >= 0:\n\t\t\tcurrentIndex = text.find(term, currentIndex+1)\n\n\t\t\tindexA = currentIndex - scope\n\t\t\tindexB = currentIndex + termLength + scope\n\n\t\t\tfindings1 = replace(text[indexA:indexB], '\\n', '_')\n\t\t\tfindings2 = replace(findings1, '\\t', ' ')\n\t\t\tappend(findings2)\n\n\t\treturn listOfResults[:-1]", "def keyword_frequencies(self, limit = None):\r\n\t\tkey_head = self.keyword_headlines()\r\n\r\n\t\tfreq_list = []\r\n\t\tfor keyword in key_head:\r\n\t\t\tnumHeadlines = len(key_head[keyword])\r\n\t\t\tif limit:\r\n\t\t\t\tif numHeadlines > limit:\r\n\t\t\t\t\tnumHeadlines = limit\r\n\t\t\tfreq_list.append([keyword, numHeadlines])\r\n\r\n\t\treturn freq_list", "def get_heading_words(self, html_doc):\n all_headings = []\n \n all_h1 = html_doc.findAll('h1')\n h1_text = ''\n for h1 in all_h1:\n h1_text += h1.text + ' '\n all_headings.append(h1_text.strip())\n \n all_h2 = html_doc.findAll('h2')\n h2_text = ''\n for h2 in all_h2:\n h2_text += h2.text + ' '\n all_headings.append(h2_text.strip())\n \n all_h3 = html_doc.findAll('h3')\n h3_text = ''\n for h3 in all_h3:\n h3_text += h3.text + ' '\n all_headings.append(h3_text.strip())\n \n all_h4 = html_doc.findAll('h4')\n h4_text = ''\n for h4 in all_h4:\n h4_text += h4.text + ' '\n all_headings.append(h4_text.strip()) \n \n all_h5 = html_doc.findAll('h5')\n h5_text = ''\n for h5 in all_h5:\n h5_text += h5.text + ' '\n all_headings.append(h5_text.strip())\n \n all_h6 = html_doc.findAll('h6')\n h6_text = ''\n for h6 in all_h6:\n h6_text += h6.text + ' '\n all_headings.append(h6_text.strip()) \n \n return all_headings", "def find_sections(self):\n sections = []\n text = self.unixtext\n # Correct bad encoding of colons due to new NWS software\n for token in AMPM_COLON.findall(text):\n text = text.replace(token, \" \" + token.replace(\":\", \"\"))\n for section in text.split(\"&&\"):\n if not HEADLINE_RE.findall(section.replace(\"\\n\", \" \")):\n continue\n tokens = re.findall(\"^WEATHER ITEM.*$\", section, re.M)\n if not tokens:\n raise CLIException(\"Could not find 'WEATHER ITEM' within text\")\n if len(tokens) == 1:\n sections.append(section)\n continue\n # Uh oh, we need to do some manual splitting\n pos = []\n for match in re.finditer(HEADLINE_RE, section.replace(\"\\n\", \" \")):\n pos.append(match.start())\n pos.append(len(section))\n for i, p in enumerate(pos[:-1]):\n sections.append(section[max([0, p - 10]) : pos[i + 1]])\n return sections", "def get_headers_for_print(self):\n lines_for_print = []\n for header in self.metadata:\n lines_for_print.append(self.metadata[header])\n lines_for_print.append('\\t'.join(self.header))\n lines_for_print[-1] = '#' + lines_for_print[-1]\n return lines_for_print", "def test_headlines_required(self) -> None:\n for rule in self.rules.headlines:\n if not rule.required:\n continue\n is_match: bool = False\n for headline in self.report.headlines:\n if self.rules.get_headline_rules(headline.name) == rule:\n is_match = True\n break\n if not is_match:\n self.add_error(f\"Rubriken {rule.name} som måste vara med saknas.\")", "def getHead(text):\n\n text = text.strip()\n\n #check if conjunction\n if utils.isConj(text):\n return utils.conjHead(text)\n\n tokens = text.split()\n new_text = \"\"\n first = True\n for word in tokens:\n if (utils.break_word(word) and not first):\n break\n\n if (word.endswith(\",\")):\n new_text += word[:-1]\n break\n\n #capture possessives?\n #if (word.endswith(\"'s\"):\n # new_text = \"\"\n # continue\n\n new_text += word + \" \"\n first = False\n\n new_text = new_text.strip()\n if new_text == \"\":\n sys.stderr.write(\"Empty text: \\\"{0}\\\" : \\\"{1}\\\"\".format(text, new_text))\n\n return new_text.split()[-1]", "def get_search_keywords(testcase):\n crash_state_lines = testcase.crash_state.splitlines()\n # Use top 2 frames for searching.\n return crash_state_lines[:2]", "def _section_titles(self):\r\n chapter_css = 'nav > div.chapter > h3 > a'\r\n return self.q(css=chapter_css).map(lambda el: el.text.strip()).results", "def chunk(keywords, lines):\n chunks = dict()\n chunk = []\n \n # Create an empty dictionary using all the keywords\n for keyword in keywords:\n chunks[keyword] = []\n \n # Populate dictionary with lists of chunks associated\n # with the keywords in the list \n for line in lines:\n if line.strip():\n token = line.split()[0]\n if token in keywords:\n chunk = [line] \n chunks[token].append(chunk) \n else:\n chunk.append(line)\n\n return chunks", "def parsewokkeys(keywords):\n resultstring = '<td class=\"resultTitle\">Material</td><td class=\"resultTitle\">Publications</td>'\n\n for key in keywords:\n resultstring += '<td class=\"resultTitle\">' + key + '</td>'\n\n return resultstring", "def HeadList(self):\n return [(rname, repo.currenthead) for rname, repo in self.repos.items()\n ]" ]
[ "0.8323906", "0.77197367", "0.746953", "0.7303126", "0.68182117", "0.6805935", "0.66141784", "0.64018786", "0.6364445", "0.63333815", "0.59744793", "0.59658384", "0.59578186", "0.5909768", "0.5894226", "0.58745706", "0.5857797", "0.5850383", "0.58470386", "0.58389264", "0.58179617", "0.58088344", "0.57964855", "0.5795066", "0.57684404", "0.5751211", "0.5724461", "0.56948507", "0.5630085", "0.5623782", "0.56205213", "0.5612611", "0.5609248", "0.55962515", "0.5588961", "0.5569021", "0.55604655", "0.5549465", "0.5540631", "0.5530487", "0.54926467", "0.54904777", "0.5477238", "0.54743063", "0.5464533", "0.54500246", "0.5433313", "0.5422295", "0.5417825", "0.5407356", "0.54005414", "0.5393014", "0.5383134", "0.5375863", "0.53748834", "0.5373083", "0.5372399", "0.5364065", "0.5354464", "0.5342855", "0.53247166", "0.5321247", "0.5318854", "0.5315665", "0.53053886", "0.5304232", "0.52885103", "0.5282585", "0.5274454", "0.52678955", "0.5245356", "0.52406985", "0.5233082", "0.5232417", "0.52192014", "0.52151304", "0.5202496", "0.5198208", "0.5196466", "0.51929265", "0.5179086", "0.5173304", "0.51727515", "0.5170859", "0.51604664", "0.51535356", "0.51403856", "0.51396286", "0.5129054", "0.5128891", "0.512168", "0.51141155", "0.51107204", "0.5109799", "0.5095959", "0.5094742", "0.5093479", "0.5090293", "0.5086876", "0.5085634" ]
0.8318916
1
Welcome route Show api info
def get(self, **kwargs): # groups = kwargs.get('groups') return { 'app_fullname': main_config.app_name, 'app_name': main_config.package_name, 'app_version': main_config.app_version }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def welcome():\n return(\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/etf_info<br/>\"\n f\"/api/v1.0/mutualfunds_info\"\n )", "def welcome():\n print(\"Server received request for 'Home' page...\")\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/names<br/>\"\n f\"/api/v1.0/passengers\"\n )", "def welcome():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/precip<br/>\"\n\t\tf\"/api/stations<br/>\"\n\t\tf\"/api/tobs<br/>\"\n\t\tf\"/api/<start><br/>\"\n f\"/api/<start>/<end>\"\n )", "def welcome():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/aircraft_data<br/>\"\n f\"/api/v1.0/aerial_attack<br/>\"\n f\"/api/v1.0/weapon_data<br/>\"\n )", "def welcome():\n return (\n f\"Welcome to the climate Analysis API!<br/>\"\n f\"Here are available API routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/<start><br/>\"\n f\"/api/v1.0/<start>/<end>\"\n )", "def welcome():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/consumption<br/>\"\n f\"/api/v1.0/gasprices<br/>\"\n f\"/api/v1.0/states<br/>\"\n f\"/api/v1.0/vmt<br/>\"\n )", "def welcome():\n return (\n f\"Welcome to Hawaii Climate Analysis API :-)<br/>\"\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/start<br/>\"\n )", "def welcome():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n\t\tf\"/api/v1.0/stations<br/>\"\n\t\tf\"/api/v1.0/tobs<br/>\"\n\t\tf\"/api/v1.0/<start><br/>\"\n\t\tf\"/api/v1.0/<start>/<end>\"\n )", "def welcome():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n )", "def welcome():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n )", "def welcome():\n return (\n f\"Climate API Routes<br/>\"\n \"Available Routes:<br/>\"\n \"/api/v1.0/precipitation<br/>\"\n \"/api/v1.0/stations<br/>\"\n \"/api/v1.0/tobs<br/>\"\n \"/api/v1.0/yyyy-mm-dd<br/>\"\n \"/api/v1.0/yyyy-mm-dd/yyyy-mm-dd\"\n )", "def welcome():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation\"\n f\"/api/v1.0/stations\"\n f\"/api/v1.0/tobs\"\n f\"/api/v1.0/start/end\"\n \n )", "def welcome():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/temperature/<start><br/>\"\n f\"/api/v1.0/temperature/<start>/<end><br/>\"\n\n )", "def welcome():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs\")", "def welcome():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/station<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/<start><br/>\"\n f\"/api/v1.0/<start>/<end><br/>\"\n )", "def welcome():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/station<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/<start>/<end>\"\n )", "def welcome():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/start-date<br/>\"\n f\"/api/v1.0/start-date/end-date<br/>\"\n )", "def welcome():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/start_date<br/>\"\n f\"/api/v1.0/start_date/end_date\"\n )", "def welcome():\n return \"Welcome to API Deployment\"", "def index():\n return (\n f\"Welcome to the Climate App API!<br/>\"\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/&lt;start&gt;<br/>\"\n f\"/api/v1.0/&lt;start&gt;/&lt;end&gt;\"\n )", "def welcome():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/<start><br/>\"\n f\"/api/v1.0/<start>/<end><br/>\"\n )", "def welcome():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation\"\n f\"/api/v1.0/stations\"\n f\"/api/v1.0/tobs\"\n f\"/api/v1.0/<start> \"\n f\"/api/v1.0/<start>/<end>\"\n )", "def welcome():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/start_date<br/>\"\n f\"/api/v1.0/start_date/end_date<br/>\"\n )", "def welcome():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs</br>\"\n f\"/api/v1.0/<start>/<end></br>\"\n )", "def welcome():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/<start><br/>\"\n f\"/api/v1.0/<start>/<end>\"\n )", "def welcome():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/<start><br/>\"\n f\"/api/v1.0/<start>/<end>\"\n )", "def welcome():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/<start><br/>\"\n f\"/api/v1.0/<start>/<end>\"\n )", "def welcome():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/'start day'<br/>\"\n f\"/api/v1.0/'start day'/'end day'<br/>\"\n )", "def welcome():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/start<br/>\"\n f\"/api/v1.0/start/end\"\n )", "def welcome():\r\n return (\r\n f\"Available Routes:<br/>\"\r\n f\"/api/v1.0/precipitation<br/>\"\r\n f\"/api/v1.0/stations<br/>\"\r\n f\"/api/v1.0/tobs<br/>\"\r\n f\"/api/v1.0/<start><br/>\"\r\n f\"/api/v1.0/<start>/<end>\"\r\n )", "def welcome():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/<start><br/>\"\n f\"/api/v1.0/<start>/<end><br/>\"\n\n )", "def welcome():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/2017-07-01<br/>\"\n f\"/api/v1.0/2017-07-01/2017-07-10<br/>\"\n )", "def overview():\n return render_template('api/api.html', title='API Overview')", "def welcome():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/start<br/>\"\n f\"/api/v1.0/start/end<br/>\"\n \n )", "def welcome():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/start<br/>\"\n f\"/api/v1.0/start/end<br/>\"\n )", "def welcome():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/[start]<br/>\"\n f\"/api/v1.0/[start]/[end]\"\n )", "def welcome():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/start<br/>\"\n f\"/api/v1.0/start_end\" )", "def welcome():\n return (\n f\"Available Routes:<br/>\"\n f\"api/v1.0/precipitation<br/>\"\n f\"api/v1.0/stations<br/>\"\n f\"api/v1.0/tobs<br/>\"\n f\"api/v1.0/start<br/>\"\n f\"api/v1.0/vacation<br/>\"\n f\"api/v1.0/startend<br/>\"\n )", "def welcome():\r\n return (\r\n f\"Available Routes:<br/>\"\r\n f\"/api/v1.0/precipitation<br/>\"\r\n f\"/api/v1.0/stations<br/>\"\r\n f\"/api/v1.0/tobs<br/>\"\r\n f\"/api/v1.0/<start>start_date<br/>\"\r\n f\"/api/v1.0/<start>start_date/<end>end_date\"\r\n )", "def welcome():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/2017-03-22<br/>\"\n f\"/api/v1.0/2017-03-22/2017-03-30 <br/>\"\n )", "def welcome():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/temp/start/end\"\n )", "def welcome():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/2017<br/>\"\n f\"/api/v1.0/start2<br/>\"\n f\"/api/v1.0/range<br/>\"\n )", "def welcome():\n return(\n f\"Available Routes: <br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/start_date(yyyy-mm-dd)<br/>\"\n f\"/api/v1.0/start_date(yyyy-mm-dd)/end_date(yyyy-mm-dd)\")", "def welcome():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/START_DATE<br/>\"\n f\"/api/v1.0/START_DATE/END_DATE\"\n )", "def welcome():\n return (\n f\"Avalable Routes:<br/>\"\n f\"/api/v1.0/precipitation - List of Precipitation Observations from the previous year<br/>\"\n\n f\"/api/v1.0/stations\"\n f\"- List of observation stations<br/>\"\n\n f\"/api/v1.0/tobs\"\n f\"- List of Temperature Observations (tobs) for the previous year<br/>\"\n\n f\"/api/v1.0/temps/&ltstart&gt/&ltend&gt\"\n f\"- Min, avg, max temp for start or start-end date range (format yyyy-mm-dd)<br/>\"\n\n )", "def index():\n return 'Your api is up and running!'", "def welcome():\n return (\n f\"All Available Routes:<br/>\" \n f\"/api/v1.0/precipitation<br/>\" \n f\"/api/v1.0/stations<br/>\" \n f\"/api/v1.0/tobs<br/>\" \n f\"/api/v1.0/yyyy-mm-dd<br/>\"\n f\"/api/v1.0/yyyy-mm-dd/yyyy-mm-dd\"\n )", "def index():\n return (\n f\"Welcome to my Hawaii trip info!<br/>\"\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/<start><br/>\"\n f\"/api/v1.0/<start>/<end>\"\n )", "def index():\n # Message to the user\n message = {\n 'api_version': 'v1.0',\n 'status': '200',\n 'message': 'Welcome to the Flask API'\n }\n # Making the message looks good\n resp = jsonify(message)\n\n # Returning the object\n return resp", "def Welcome():\n return (\n f\"Welcome to the Surf's Up API!\"\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/surfs-up\"\n )", "def Home():\n return (\n f\"Welcome to the Climate App<br/>\"\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/<start><br/>\"\n f\"/api/v1.0/<start>/<end><br/>\"\n )", "def welcome():\r\n return (\r\n f\"Available Routes:<br/>\"\r\n f\"/api/v1.0/precipitation<br/>\"\r\n f\"/api/v1.0/stations<br/>\"\r\n f\"/api/v1.0/tobs<br/>\"\r\n f\"/api/v1.0/temp/<start> (Enter Start Date Only)<br/>\"\r\n f\"/api/v1.0/temp/<start>/<end> (Enter Range)\"\r\n\r\n )", "def welcome(): \n return (\n f\"/api/v1.0/precipitation</br>\"\n f\"/api/v1.0/stations</br>\"\n f\"/api/v1.0/tobs</br>\"\n f\"/api/v1.0/<start>\"\n )", "def welcome():\n return (f\"Available Routes:<br/>\"\\\n\n f\"/api/v1.0/precipitation\"\\\n\n f\"/api/v1.0/stations\"\\\n\n f\"/api/v1.0/tobs\"\\\n\n f\"/api/v1.0/start\"\\\n\n f\"/api/v1.0/start/end\")", "def welcome():\n\n return (\n f\"------------------------------<br/>\"\n f\"Available Routes:<br/>\"\n f\"------------------------------<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"<br/>\"\n f\"------------------------------<br/>\"\n f\"For the following routes please use this date format:<br/>\"\n f\"YYYY-MM-DD<br/>\"\n f\"ex: http://127.0.0.1:5000/api/v1.0/2012-02-28/2012-03-05<br/>\"\n f\"------------------------------<br/>\"\n f\"/api/v1.0/{{start date}}<br/>\"\n f\"/api/v1.0/{{start date}}/{{end date}}<br/>\"\n )", "def index():\n response.view_title = myconf.get('app.name') + ' Home Page'\n return dict(message='')", "def welcome():\r\n return (\r\n f\"Welcome to the Climate App<br/>\"\r\n f\"<br/>\"\r\n f\"<br/>\"\r\n\r\n f\"Available Routes:<br/>\"\r\n f\"<br/>\"\r\n\r\n f\"/api/v1.0/precipitation<br/>\"\r\n f\"- JSON list of last year's precipitation data<br/>\"\r\n f\"<br/>\"\r\n\r\n f\"/api/v1.0/stations<br/>\"\r\n f\"- JSON list of station data<br/>\"\r\n f\"<br/>\"\r\n\r\n f\"/api/v1.0/tobs<br/>\"\r\n f\"- JSON list of temperature observation data from the stations<br/>\"\r\n f\"<br/>\"\r\n\r\n f\"/api/v1.0/start<br/>\"\r\n f\"- JSON list of the minimum, average and maximum temperature when given the start date only (YYYY-MM-DD), for dates greater than and equal to the start date<br/>\"\r\n f\"<br/>\"\r\n\r\n f\"/api/v1.0/start/end<br/>\"\r\n f\"- JSON list of the minimum, average and maximum temperature when given the start and end dates (YYYY-MM-DD) for dates between the start and end date inclusive:<br/>\"\r\n f\"<br/>\"\r\n\r\n )", "def home():\n return (\n f\"Welcome to the Hawaii Weather API<br/>\"\n \"<br/>\"\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/start_date<br/>\"\n f\"/api/v1.0/start_date/end_date<br/>\"\n \"<br/>\"\n f\"Date format: YYYY-MM-DD\"\n )", "def home():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/preciptation<br/>\"\n f\"/api/v1.0/Stations\"\n )", "def welcome():\n return (\n f\"Available Routes:<br/>\"\n f\"<a href='/api/v1.0/precipitation'>precipitation</a><br/>\"\n f\"<a href='/api/v1.0/stations'>stations</a><br/>\"\n f\"<a href='/api/v1.0/tobs'>tobs</a><br/>\"\n f\"<a href='/api/v1.0/tobs/start_date'>tobs/start_date</a><br/>\"\n f\"<a href='/api/v1.0/tobs/start_date/end_date'>tobs/start_date/end_date</a><br/>\"\n )", "def api():\n api_routes = [\n \"/api/v1.0/beer\",\n \"/api/v1.0/breweries\",\n ]\n return render_template(\"api.html\", api_routes = api_routes)", "def index():\n\n return redirect(api)", "def welcome():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/start<br/>\"\n f\"/api/v1.0/start/end<br/>\"\n f\"Please note that start and end must be replaced with valid dates in the format off YYYY-MM-DD.<br/>\"\n )", "def welcome():\n return (\n f\"Available Routes:<br/>\"\n f\"<a href='/api/v1.0/precipitation'>Precipitation</a><br/>\"\n f\"<a href='/api/v1.0/stations'>Stations</a><br/>\"\n f\"<a href='/api/v1.0/tobs'>Temperature</a><br/>\"\n f\"<a href='/api/v1.0/start'>Start Date</a><br/>\"\n f\"<a href='/api/v1.0/start/end'>End Date</a><br/>\"\n )", "def welcome():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/&ltStartDate&gt<br/>\"\n f\"/api/v1.0/&ltStartDate&gt/&ltEndDate&gt<br/>\"\n f\"<b>Use yyyy-mm-dd format for dates</b>\"\n )", "def welcome():\n return (\n \"Hawaii Precipitation and Weather Data<br/><br/>\"\n \"Pick from the available routes below:<br/><br/>\"\n \"Precipiation from 2016-08-23 to 2017-08-23.<br/>\"\n \"/api/v1.0/precipitation<br/><br/>\"\n \"A list of all the weather stations in Hawaii.<br/>\"\n \"/api/v1.0/stations<br/><br/>\"\n \"The Temperature Observations (tobs) from 2016-08-23 to 2017-08-23.<br/>\"\n \"/api/v1.0/tobs<br/><br/>\"\n \"Type in a date (i.e., 2013-09-26) to see the min, max and avg temperature since that date.<br/>\"\n \"/api/v1.0/temp/<start><br/><br/>\"\n \"Type in a date range (anywhere between 2010-01-01/2017-08-23) to see the min, max and avg temperature for that range.<br/>\"\n \"/api/v1.0/temp/<start>/<end><br/>\"\n )", "def home_view(request):\n message = \"GET / - The base API route\\nPOST /api/v1/auth/ - for registering a new account and signing up\\nGET /api/v1/portfolio/{id}/ - for retrieving a user's portfolio\\nPOST /api/v1/stock/ - for creating a new company record\\nGET /api/v1/stock/{id}/ - for retrieving a companies information\\nDELETE /api/v1/stock/{id} - for deleting a company record\\nGET /api/v1/company/{symbol} - for retrieving company detail from 3rd party API, where `{symbol}` is variable\"\n\n return Response(body=message, content_type='text/plain', status=200)", "def welcome():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/&lt;start&gt;<br/>\"\n f\"/api/v1.0/&lt;start&gt;/&lt;end&gt;\"\n )", "def welcome():\n return (\n f\"Hawaii Climate <br/>\"\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation <br/>\"\n f\"/api/v1.0/stations <br/>\"\n f\"/api/v1.0/tobs <br/>\"\n f\"/api/v1.0/<start> <br/>\"\n f\"/api/v1.0/<start>/<end> <br/>\"\n )", "def welcome():\n\n return (\n f\"Available Routes:<br/>\"\n f\"<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"- List of previous year rain totals from all stations<br/>\"\n f\"<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"- List of station numbers and names<br/>\"\n f\"<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"- List of previous year temperatures from all stations<br/>\"\n f\"<br/>\"\n f\"/api/v1.0/start<br/>\"\n f\"- When given the start date (YYYY-MM-DD), calculates the MIN/AVG/MAX temperature for all dates greater than and equal to start date<br/>\"\n f\"<br/>\"\n f\"/api/v1.0/start/end<br/>\"\n f\"- When given the start and end date (YYYY-MM-DD), calculates the MIN/AVG/MAX temperature for dates between start and end date inclusive<br/>\"\n )", "def home():\n rkwargs = {\n 'description': 'Shows API info',\n 'message': 'Welcome to the lung cancer prediction API!',\n 'links': {algo: '{}{}/predict/'.format(request.url_root, algo) for\n algo in PREDICTORS.keys()}\n }\n\n return jsonify(**rkwargs)", "def home():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs\"\n )", "def welcome():\r\n return (\r\n f\"Available Routes:<br/>\"\r\n f\"<br/>\"\r\n f\"/api/v1.0/precipitation<br/>\"\r\n f\"- List of prior year rain totals from all stations<br/>\"\r\n f\"<br/>\"\r\n f\"/api/v1.0/stations<br/>\"\r\n f\"- List of Station numbers and names<br/>\"\r\n f\"<br/>\"\r\n f\"/api/v1.0/tobs<br/>\"\r\n f\"- List of prior year temperatures from all stations<br/>\"\r\n f\"<br/>\"\r\n f\"/api/v1.0/start<br/>\"\r\n f\"- When given the start date (YYYY-MM-DD), calculates the MIN/AVG/MAX temperature for all dates greater than and equal to the start date<br/>\"\r\n f\"<br/>\"\r\n f\"/api/v1.0/start/end<br/>\"\r\n f\"- When given the start and the end date (YYYY-MM-DD), calculate the MIN/AVG/MAX temperature for dates between the start and end date inclusive<br/>\"\r\n )", "def welcome():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/&lt;start&gt;</br>\"\n f\"/api/v1.0/&lt;start&gt;/&lt;end&gt;\"\n )", "def welcome():\n return (\n f\"Welcome to the Hawaii Climate API!<hr/>\"\n f\"Listed below are the available routes:<br/>\"\n f\"Precipitation Data for 8/23/16 - 8/23/17: /api/v1.0/precipitation<br/>\"\n f\"Stations: /api/v1.0/stations<br/>\"\n f\"Observed Temperatures for USC00519281: /api/v1.0/tobs<br/>\"\n f\"Temperature ranges: /api/v1.0/start_date or /api/v1.0/start_date/end_date<br/>\"\n f\"**start_date/end_date is in the format yyyy-mm-dd\"\n\n )", "def welcome():\n return (\n f\"Available Routes:<br/>\"\n f\"<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"- List of prior year rain totals from all stations<br/>\"\n f\"<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"- List of Station numbers and names<br/>\"\n f\"<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"- List of prior year temperatures from all stations<br/>\"\n f\"<br/>\"\n f\"/api/v1.0/start<br/>\"\n f\"- When given the start date (YYYY-MM-DD), calculates the MIN/AVG/MAX temperature for all dates greater than and equal to the start date<br/>\"\n f\"<br/>\"\n f\"/api/v1.0/start/end<br/>\"\n f\"- When given the start and the end date (YYYY-MM-DD), calculate the MIN/AVG/MAX temperature for dates between the start and end date inclusive<br/>\"\n\n )", "def index():\n return Response(\n \"Welcome to basic-http-server, you're ready to add some methods!\\n\" +\n str(request) + \"\\n\", mimetype='text/plain'\n )", "def index():\n response = jsonify(\n {'message':'Hello, RESTful API development!'}\n )\n \n return response, 200", "def welcome():\n return (\n f\"Available Routes:<br/>\"\n f\"<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"- List of prior year rain totals from all stations<br/>\"\n f\"<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"- List of Station numbers and names<br/>\"\n f\"<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"- List of prior year temperatures from all stations<br/>\"\n f\"<br/>\"\n f\"/api/v1.0/start<br/>\"\n f\"- When given the start only, calculate TMIN, TAVG, and TMAX for all dates greater than and equal to the start date<br/>\"\n f\"<br/>\"\n f\"/api/v1.0/start/end<br/>\"\n f\"- When given the start and the end date, calculate the TMIN, TAVG, and TMAX for dates between the start and end date inclusive<br/>\"\n )", "def welcome():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"<a href='/api/v1.0/2017-01-01'>/api/v1.0/2017-01-01</a><br></p>\"\n f\"<a href='/api/v1.0/2017-01-01/2017-01-07'>/api/v1.0/2017-01-01/2017-01-07</a></p>\"\n \n )", "def welcome():\n result = WelcomeModel()\n return WelcomeSchema().dump(result), 200", "def home():\n return(\n f\"Available Routes: <br/>\"\n\n f\"For Precipitation: /api/v1.0/precipitation<br/>\"\n f\"Returns Jsonify dictionary of dates and Precepitation<br/><br/>\"\n\n f\"For list of Stations: /api/v1.0/stations<br/>\"\n f\"Returns Jasonify list of stations <br/><br/>\"\n\n f\"For last year temperatures: /api/v1.0/tobs<br/>\"\n f\"Returns Jsonify dictionary of Temperature Observations for last year<br/><br/>\"\n\n f\"Temperature result from the date in format (yyyy-mm-dd): /api/v1.0/yyyy-mm-dd<br/>\"\n f\"Returns an Average, Max, and Min temperatures from given start date of dataset<br/><br/>\"\n\n f\"Temperature result from start date to end date in format (yyyy-mm-dd): /api/v1.0/yyyy-mm-dd/yyyy-mm-dd<br/>\"\n f\"Returns an Average, Max, and Min temperatures for a given date range\"\n\n )", "def home(request):\n response_status = {'text': \"API is up\"}\n return JsonResponse(response_status, status=200)", "def home():\n return (\n f\"These are the available routes:</br>\"\n f\"/api/v1.0/precipitation</br>\"\n f\"/api/v1.0/stations</br>\"\n f\"/api/v1.0/tobs</br>\"\n f\"/api/v1.0/< start ></br>\"\n f\"/api/v1.0/< start >/< end ></br>\"\n )", "def read_home():\n return {'message': 'API live!'}", "def welcome():\n return (\n f\"Available Routes:<br/>\"\n f\"/test<br/>\"\n )", "def root():\n if request.headers['Accept'] == 'application/json':\n return \"Welcome\\n\\n\", 200\n else:\n return redirect(url_for('index'))", "def welcome():\n return (\n f\"/api/v1.0/precipitation<br/>Returns a JSON list of percipitation data for the dates between 8/23/16 and 8/23/17<br/><br/>\"\n f\"/api/v1.0/stations<br/>Return a JSON list of stations from the dataset\"\n f\"/api/v1.0/tobs<br/>Return a JSON list of temperature observations (TOBS) for the previous year.\"\n f\"/api/v1.0/<start><br/>Return a JSON list of the minimum temperature, the average temperature, and the max temperature for a given\"\n f\"/api/v1.0/<start>/<end>Return a JSON list of the minimum temperature, the average temperature, and the max temperature for a given\"\n )", "def index():\n \n return dict(message=T('Welcome to Audi Volkswagon Porsche'))", "def home():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/<start><br/>\"\n f\"/api/v1.0/<start>/<end><br/>\"\n )", "def index():\n return \"Attendance Flask server\"", "def home():\n return response(\"OK\")", "def index():\n if auth.user:\n message=\"Welcome: \"\n user=auth.user\n else:\n message=\"Please use login for testing...\"\n user=None\n return dict(message=message, user=user)", "def home():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/2015-01-01<br/>\"\n f\"/api/v1.0/2015-01-01/2015-12-31\"\n )", "def index():\n \n return dict(message=T('Welcome to Learn2Cook!'))", "def welcome():\n return (\n f\"<b>Available Routes:</b><br/>\"\n f\"<br/>\"\n f\"<b>Stats:</b><br/>\"\n f\"Precipitation: /api/v1.0/precipitation<br/>\"\n f\"List of Stations: /api/v1.0/stations<br/>\"\n f\"Temperatures for last year: /api/v1.0/tobs<br/>\"\n f\"<br/>\"\n f\"<b>Stats for Dates:</b><br/>\"\n f\"Temperature stats a specific date(yyyy-mm-dd): /api/v1.0/yyyy-mm-dd<br/>\"\n f\"Temperature stats from start to end dates(yyyy-mm-dd): /api/v1.0/yyyy-mm-dd/yyyy-mm-dd<br/>\"\n f\"<br/>\"\n f\"<b>** Note: </b>First Record Date: 2010-01-01 , Last Record Date: 2017-08-23<br/>\" # from jupyter notebook\n )", "def index():\n return jsonify({'hello': 'What would you like to automate today?'})", "def home():\n return(\n f\"Available Routes:<br/>\"\n f\"Precipitation: /api/v1.0/precipitation<br/>\"\n f\"List of Stations: /api/v1.0/stations<br/>\"\n f\"Temperature for one year: /api/v1.0/tobs<br/>\"\n f\"Temperature stat from the start date(yyyy-mm-dd): /api/v1.0/min_max_avg/<start><br/>\"\n f\"Temperature stat from start to end dates(yyyy-mm-dd): /api/v1.0/min_max_avg/<start><br/>\"\n )", "def welcome():\n return (\n f'Available Routes: <br/>'\n f'<a href=\"/api/v1.0/precipitation\">/api/v1.0/precipitation</a><br/>'\n f'<a href=\"/api/v1.0/stations\">/api/v1.0/stations</a><br/>'\n f'<a href=\"/api/v1.0/tobs\">/api/v1.0/tobs</a><br/><br/>'\n f'<div> Use following link if you have a date range. \\\n Copy paste the link below after the server address. \\\n Enter the start date in yyyy-mm-dd format <br/>\\\n For example : Enter in following way after the server address<br/>\\\n /api/v1.0/2013-01-01 </div><br/>'\n f'/api/v1.0/<br/><br/><br/><br/>' \n f'<div> Use following link if you have a date range. \\\n Copy paste the link below after the server address. \\\n Enter the start and end date in yyyy-mm-dd format <br/>\\\n For example : Enter in following way after the server address<br/>\\\n /api/v1.0/2013-01-01/2013-12-31 </div><br/>'\n f'/api/v1.0/'\n )", "def get_overview():\n from app.core.api_views import Api\n from app.modules.overview import inc\n sar = inc.main()\n api = Api()\n return render_template(\"index.html\",\n sar=sar,\n )", "def welcome():\n\n # Assigning links to display the query results\n return (\n f\"<h2>Welcome to My Weather Data API <br> Available Routes:</h2>\"\n f\"<a href='/api/v1.0/precipitation'>Precipitation</a><br>\"\n f\"<a href='/api/v1.0/stations'>Stations</a><br>\"\n f\"<a href='/api/v1.0/tobs'>Tobs</a><br>\"\n f\"<h4>For the two queries below, please input dates between <strong>2010-01-01</strong> and <strong>2017-08-22</strong></h4>\"\n f\"<a href='/api/v1.0/2010-01-01'>01/01/2010</a><br>\"\n f\"<a href='/api/v1.0/2010-01-01/2017-08-22'>01/01/2010 to 08/22/2017</a><br><br>\"\n f\"/api/v1.0/< start ><br>\"\n f\"/api/v1.0/< start >/< end >\"\n )" ]
[ "0.78570443", "0.7700679", "0.76447624", "0.76302814", "0.76104695", "0.75441355", "0.7513181", "0.75017893", "0.748835", "0.748835", "0.747807", "0.7470789", "0.7469684", "0.7465586", "0.7445721", "0.74422014", "0.7441175", "0.7425304", "0.74079293", "0.7407577", "0.74064386", "0.74038523", "0.7395922", "0.7390834", "0.7385459", "0.7385459", "0.7385459", "0.7385363", "0.7381415", "0.7380793", "0.73742676", "0.73734576", "0.73733896", "0.7372343", "0.7367226", "0.736478", "0.73537654", "0.73499614", "0.7343398", "0.7340969", "0.7340363", "0.7315946", "0.73133683", "0.72958857", "0.72474056", "0.7241024", "0.72224396", "0.72220355", "0.7197246", "0.71685463", "0.71571606", "0.715485", "0.71530205", "0.7152323", "0.7113877", "0.71006787", "0.7095189", "0.7088009", "0.7065412", "0.7052341", "0.70166856", "0.70098203", "0.70071685", "0.6989748", "0.6987668", "0.69623566", "0.6961441", "0.6959058", "0.6954106", "0.69339246", "0.6932631", "0.6921443", "0.69179356", "0.6917496", "0.69154084", "0.6907068", "0.6903146", "0.6876674", "0.6871454", "0.6870268", "0.68668187", "0.6838166", "0.68289965", "0.68287855", "0.68076974", "0.6787226", "0.67721623", "0.6751821", "0.6706742", "0.66989523", "0.66986495", "0.6692138", "0.6690977", "0.6660488", "0.66332626", "0.6617203", "0.66004455", "0.6595152", "0.65946907", "0.65825117", "0.6570195" ]
0.0
-1
prettyprint a table. Every column's width is the width of the widest field in that column. The given table should be a list of lists. That is, it should be a list of rows, where every row is a list of fields. To get the width of each column, we'll transpose the table. For efficiency, if the caller already has a transposed version of the table, they can pass that into us so we don't have to retranspose it. Both the table, and the transposed version of the table, will be traversed exactly once, so it's fine if they are just generator functions.
def texttable(table, left=False): widths = (max(len(fld) for fld in line) for line in itertools.izip_longest(*table, fillvalue="")) lc = '-' if left else '' formats = ["%{0}{1}s".format(lc, width) for width in widths] return ORS.join("%s" % OFS.join(format % fld for (format, fld) in zip(formats, line)) for line in table)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_table(table):\n # transpose the table:\n table = map(list, zip(*table))\n # get the column width:\n col_width = [max(len(str(x)) for x in col) for col in zip(*table)]\n # print it to screen:\n print\n for line in table:\n print \"| \" + \" | \".join(\"{:{}}\".format(x, col_width[i]) for i, x in enumerate(line)) + \" |\"\n print", "def pprint_table(out, table):\n\n\tcol_paddings = []\n\n\tfor i in range(len(table[0])):\n\t\tcol_paddings.append(get_max_width(table, i))\n\n\tfor row in table:\n\t\t# left col\n\t\tout.write(str(row[0]).ljust(col_paddings[0] + 1))\n\t\t\n\t\t# rest of the cols\n\t\tfor i in range(1, len(row)):\n\t\t\tout.write(str(row[i]).rjust(col_paddings[i] + 2))\n\t\t\n\t\tout.write('\\n')", "def format_prettytable(table):\r\n for i, row in enumerate(table.rows):\r\n for j, item in enumerate(row):\r\n table.rows[i][j] = format_output(item)\r\n ptable = table.prettytable()\r\n ptable.hrules = FRAME\r\n ptable.horizontal_char = '.'\r\n ptable.vertical_char = ':'\r\n ptable.junction_char = ':'\r\n return ptable", "def tabulate(table):\n cw = {} # column widths\n\n # Trim leading and trailing whitespace from each element.\n for i, row in enumerate(table):\n for j, element in enumerate(row):\n table[i][j] = element.strip()\n\n # Find the max element width for each column.\n for row in table:\n for j, element in enumerate(row):\n cw[j] = max(cw.get(j, 0), len(element))\n\n # Reformat elements to align columns.\n for i, row in enumerate(table):\n for j, element in enumerate(row):\n table[i][j] = ' ' + element.ljust(cw[j]) + ' '", "def simple_format_table(table):\n s = [[str(e) for e in row] for row in table]\n lens = [max(map(len, col)) for col in zip(*s)]\n fmt = '\\t'.join('{{:{}}}'.format(x) for x in lens)\n table = [fmt.format(*row) for row in s]\n return '\\n'.join(table)", "def pprint_table(table, out=sys.stdout, rstrip=False):\n\n def max_width_col(table, col_idx):\n \"\"\"\n Get the maximum width of the given column index\n \"\"\"\n return max(len(row[col_idx]) for row in table)\n\n if rstrip:\n for row_idx, row in enumerate(table):\n table[row_idx] = [c.rstrip() for c in row]\n\n col_paddings = []\n ncols = len(table[0])\n for i in range(ncols):\n col_paddings.append(max_width_col(table, i))\n\n for row in table:\n # left col\n out.write(row[0].ljust(col_paddings[0] + 1))\n # rest of the cols\n for i in range(1, len(row)):\n col = row[i].rjust(col_paddings[i] + 2)\n out.write(col)\n out.write(\"\\n\")", "def pprint(table, truncate=40, padding=\" \", fill=\".\"):\n # Calculate the width of each column, based on the longest field in each column.\n # Long fields can be split across different lines, so we need to check each line.\n w = [0 for column in table.columns]\n R = []\n for i, row in enumerate(table.rows):\n fields = []\n for j, v in enumerate(row):\n # Cast each field in the row to a string.\n # Strings that span beyond the maximum column width are wrapped.\n # Thus, each \"field\" in the row is a list of lines.\n head, tail = _truncate(decode_utf8(v), truncate)\n lines = []\n lines.append(head)\n w[j] = max(w[j], len(head))\n while len(tail) > 0:\n head, tail = _truncate(tail, truncate)\n lines.append(head)\n w[j] = max(w[j], len(head))\n fields.append(lines)\n R.append(fields)\n for i, fields in enumerate(R):\n # Add empty lines to each field so they are of equal height.\n n = max([len(lines) for lines in fields])\n fields = [lines+[\"\"] * (n-len(lines)) for lines in fields]\n # Print the row line per line, justifying the fields with spaces.\n for k in range(n):\n for j, lines in enumerate(fields):\n s = lines[k]\n s += ((k==0 or len(lines[k]) > 0) and fill or \" \") * (w[j] - len(lines[k])) \n s += padding\n print s,\n print", "def print_table(table):\n for row in table:\n # Header column left justified\n print(\"{:<19}\".format(row[0]), end='')\n # Remaining columns right justified\n for col in row[1:]:\n print(\"{:>4}\".format(col), end='')\n print(\"\", end='\\n')", "def tableify(table):\n num_cols = 0\n maxes = []\n\n for row in table:\n num_cols = max(num_cols, len(row))\n if len(maxes) < len(row):\n maxes.extend([0] * (len(row) - len(maxes)))\n for i, cell in enumerate(row):\n maxes[i] = max(maxes[i], len(str(cell)))\n\n def fix_row(maxes, row):\n return ' '.join([\n str(cell) + (' ' * (maxes[i] - len(str(cell))))\n for i, cell in enumerate(row)\n ])\n\n return '\\n'.join(\n [\n fix_row(maxes, row)\n for row in table\n ]\n )", "def prettyTable(self, heads, rows): \n # First calculate the maximum lengths for each column.\n lengths = map(len, heads)\n for row in rows:\n lengths = map(max, lengths, map(len, row))\n\n # Create a format string for the maximum lengths.\n formatString = (\"|{{:^{}}}\" * len(heads) + \"|\").format(*lengths)\n\n # Print the heads, then the contents.\n headLine = formatString.format(*heads)\n border = \"-\" * len(headLine)\n print(border)\n print(headLine)\n print(border)\n\n # Remake the format string right-justified.\n formatString = (\"|{{:>{}}}\" * len(heads) + \"|\").format(*lengths)\n for row in rows:\n print(formatString.format(*row))\n print(border)", "def tabular_table(word_list=None, field_width=26, line_length=78, output_separator=\" \", truncate_elements=True):\n if not word_list:\n word_list = list()\n elements = [ANSIString(entry) for entry in word_list]\n if truncate_elements:\n elements = [entry[:field_width] for entry in elements]\n elements = [entry.ljust(field_width) for entry in elements]\n separator_length = len(output_separator)\n per_line = line_length / (field_width + separator_length)\n result_string = ANSIString(\"\")\n count = 0\n total = len(elements)\n for num, element in enumerate(elements):\n count += 1\n if count == 1:\n result_string += element\n elif count == per_line:\n result_string += output_separator\n result_string += element\n if not num+1 == total:\n result_string += '\\n'\n count = 0\n elif count > 1:\n result_string += output_separator\n result_string += element\n return result_string", "def print_table(outbuf, table, align):\n if len(table) == 0:\n return\n\n colwidths = None\n for row in table:\n if colwidths is None:\n colwidths = [len(x) for x in row]\n else:\n colwidths = [max(colwidths[i], len(x)) for i, x in enumerate(row)]\n\n for row in table:\n cells = []\n for i, cell in enumerate(row):\n padding = ' ' * (colwidths[i] - len(cell))\n if align[i] == 'r':\n cell = padding + cell\n elif i < len(row) - 1:\n # Do not pad the final column if left-aligned.\n cell += padding\n cells.append(cell.encode('utf-8', 'replace'))\n try:\n outbuf.write(b' '.join(cells) + b'\\n')\n except IOError: # pragma: no cover\n # Can happen on Windows if the pipe is closed early.\n pass", "def print_table(table):\n rest = table[1:]\n fmt = \"%-28s %-9s %-16s %s\"\n for row in rest:\n print(fmt % tuple(row))", "def print_table(table):\n for i in range(len(table)):\n print \"Row \", i, \"\\t\",\n for j in range(len(table[i])):\n print table[i][j],\n print \"\\n\"", "def print_table(table):\n for row in table:\n print(row)", "def print_table(table):\n for row in table:\n print(row)", "def print_table(table):\n for row in table:\n print(row)", "def print_table(table, fieldnames):\n print(\"{:<19}\".format(fieldnames[0]), end='')\n for field in fieldnames[1:]:\n print(\"{:>6}\".format(field), end='')\n print(\"\")\n for name, row in table.items():\n # Header column left justified\n print(\"{:<19}\".format(name), end='')\n # Remaining columns right justified\n for field in fieldnames[1:]:\n print(\"{:>6}\".format(row[field]), end='')\n print(\"\", end='\\n')", "def print_table(table, fieldnames):\n print(\"{:<19}\".format(fieldnames[0]), end='')\n for field in fieldnames[1:]:\n print(\"{:>6}\".format(field), end='')\n print(\"\")\n for name, row in table.items():\n # Header column left justified\n print(\"{:<19}\".format(name), end='')\n # Remaining columns right justified\n for field in fieldnames[1:]:\n print(\"{:>6}\".format(row[field]), end='')\n print(\"\", end='\\n')", "def print_table(table, separator=' ', **kwargs):\n num_cols = max([len(row) for row in table])\n for idx, _ in enumerate(table):\n while len(table[idx]) < num_cols:\n table[idx].append('')\n widths = [max([len(str(cell)) for cell in [row[col_idx] for row in table]])\n for col_idx in range(len(table[0]))]\n for row in table:\n msg = ''\n for idx, cell in enumerate(row):\n msg += f'{cell:{widths[idx]}}{separator}'\n cprint(msg, **kwargs)", "def print_table(table):\r\n print('/-----------------------------------------------------------------------------------\\\\')\r\n for item in table:\r\n\r\n while len(item[1]) <= 22:\r\n item[1] += ' '\r\n\r\n while len(item[2]) <= 27:\r\n item[2] += ' '\r\n\r\n while len(item[0]) <= 15:\r\n item[0] += ' '\r\n\r\n print('| '+item[0]+' | '+item[1]+'| '+item[2]+' |')\r\n\r\n print('\\\\-----------------------------------------------------------------------------------/')", "def prettytable(self):\r\n table = PrettyTable(self.columns)\r\n if self.sortby:\r\n table.sortby = self.sortby\r\n for a_col, alignment in self.align.items():\r\n table.align[a_col] = alignment\r\n\r\n # Adding rows\r\n for row in self.rows:\r\n table.add_row(row)\r\n return table", "def print_table(table, title_list):\n table.insert(0, title_list)\n for row_index, row in enumerate(table):\n for col_index, col in enumerate(row):\n if (type(col) == float) or (type(col) == int):\n table[row_index][col_index] = str(\"{0:,.2f}\".format(col))\n widths = [max(map(len, col)) for col in zip(*table)]\n sum_of_widths = sum(widths) + len(table[0]) * 3 - 1\n for row in table:\n print(\"-\" * sum_of_widths)\n print(\"|\" + \" \".join((val.ljust(width) + \"|\" for val, width in zip(row, widths))))\n print(\"-\" * sum_of_widths)", "def print_table(table, title_list):\n table.insert(0, title_list)\n for row_index, row in enumerate(table):\n for col_index, col in enumerate(row):\n if (type(col) == float) or (type(col) == int):\n table[row_index][col_index] = str(\"{0:,.2f}\".format(col))\n widths = [max(map(len, col)) for col in zip(*table)]\n sum_of_widths = sum(widths) + len(table[0]) * 3 - 1\n for row in table:\n print(\"-\" * sum_of_widths)\n print(\"|\" + \" \".join((val.ljust(width) + \"|\" for val, width in zip(row, widths))))\n print(\"-\" * sum_of_widths)", "def myformat(table):\n m = 0\n table = sorted(table, key=itemgetter(0))\n for t in table:\n t = str(t)\n if len(t[0]) > m:\n m = len(t[0])\n m += 10\n fstr = \"{0:}\" + m*\" \" + \"{1:}\"\n s = \"\"\n for x in table:\n try:\n a = float(x[0])\n b = float(x[1])\n s += \"{0:.5f}{1:{width}}\".format(a, b, width=m) + \"\\n\"\n except IndexError:\n pass\n return s\n \"\"\"\n out = \"\"\n for pair in table:\n out += str(pair[0]) + 5*\" \" + str(pair[1]) + \"\\n\"\n return out\"\"\"", "def pretty_print_table(data, list_of_dicts):\n # ensure that each dict has the same set of keys\n keys = None\n for d in list_of_dicts:\n if keys is None:\n keys = d.keys()\n else:\n if d.keys() != keys:\n print(\"Error! not all dicts have the same keys!\")\n return\n header = \"\\t\" + \"\\t\".join(['{:11.10s}'] * len(data))\n header = header.format(*data)\n rows = []\n for k in keys:\n r = k + \"\\t\"\n for d in list_of_dicts:\n if type(d[k]) is float:\n r += '{:.9f}'.format(d[k]) + \"\\t\"\n else:\n r += '{:10.9s}'.format(str(d[k])) + \"\\t\"\n rows.append(r)\n print(header)\n for row in rows:\n print(row)", "def pprint_table(out, table, headers=None, output_format='pretty',\n separator=None, vertical=False, title=None):\n\n assert(isinstance(table, (list, tuple))), \"Invalid table type\"\n if headers:\n assert(isinstance(headers, (list, tuple))), \"Invalid headers type\"\n\n sep = separator if separator else \" \"\n\n def stringnify(obj):\n if isinstance(obj, (unicode, str)):\n return udec(obj)\n else:\n return str(obj)\n\n if headers:\n headers = map(stringnify, headers)\n table = [map(stringnify, row) for row in table]\n\n if output_format == \"json\":\n assert(headers is not None), \"json output format requires headers\"\n table = [dict(zip(headers, row)) for row in table]\n out.write(json.dumps(table, indent=4))\n out.write(\"\\n\")\n elif output_format == \"csv\":\n cw = csv.writer(out)\n if headers:\n table.insert(0, headers)\n table = map(functools.partial(map, uenc), table)\n cw.writerows(table)\n elif output_format == \"pretty\":\n if vertical:\n assert(len(table) == 1)\n row = table[0]\n max_key = max(map(len, headers))\n for row in table:\n for (k, v) in zip(headers, row):\n k = uenc(k.ljust(max_key))\n v = uenc(v)\n out.write(\"%s: %s\\n\" % (k, v))\n else:\n # Find out the max width of each column\n columns = [headers] + table if headers else table\n widths = [max(map(len, col)) for col in zip(*(columns))]\n\n t_length = sum(widths) + len(sep) * (len(widths) - 1)\n if title is not None:\n t_length = max(t_length, len(title))\n out.write(\"-\" * t_length + \"\\n\")\n out.write(title.center(t_length) + \"\\n\")\n out.write(\"-\" * t_length + \"\\n\")\n if headers:\n # pretty print the headers\n line = sep.join(uenc(v.rjust(w))\n for v, w in zip(headers, widths))\n out.write(line + \"\\n\")\n out.write(\"-\" * t_length + \"\\n\")\n\n # print the rest table\n for row in table:\n line = sep.join(uenc(v.rjust(w)) for v, w in zip(row, widths))\n out.write(line + \"\\n\")\n else:\n raise ValueError(\"Unknown output format '%s'\" % output_format)", "def format_table(table, format_str):\n ret = []\n for t in table:\n if type(t) is list:\n ret.append(format_table(t, format_str))\n else:\n ret.append(format_str.format(t))\n return ret", "def generate_table(rows):\n\n # - figure out column widths\n widths = [len(max(columns, key=len)) for columns in zip(*rows)]\n\n # - print the header\n header, data = rows[0], rows[1:]\n yield (\n ' | '.join(format(title, \"%ds\" % width) for width, title in zip(widths, header))\n )\n\n # Print the separator\n first_col = ''\n # - print the data\n for row in data:\n if first_col == '' and row[0] != '':\n # - print the separator\n yield '-+-'.join('-' * width for width in widths)\n first_col = row[0]\n\n yield (\n \" | \".join(format(cdata, \"%ds\" % width) for width, cdata in zip(widths, row))\n )", "def describe_table(table, indentation=None):\n # -- STEP: Determine output size of all cells.\n cell_lengths = []\n all_rows = [table.headings] + table.rows\n for row in all_rows:\n lengths = [len(escape_cell(c)) for c in row]\n cell_lengths.append(lengths)\n\n # -- STEP: Determine max. output size for each column.\n max_lengths = []\n for col in range(0, len(cell_lengths[0])):\n max_lengths.append(max([c[col] for c in cell_lengths]))\n\n # -- STEP: Build textual table description.\n lines = []\n for r, row in enumerate(all_rows):\n line = u\"|\"\n for c, (cell, max_length) in enumerate(zip(row, max_lengths)):\n pad_size = max_length - cell_lengths[r][c]\n line += u\" %s%s |\" % (escape_cell(cell), \" \" * pad_size)\n line += u\"\\n\"\n lines.append(line)\n\n if indentation:\n return indent(lines, indentation)\n # -- OTHERWISE:\n return u\"\".join(lines)", "def table_format(row, header = False, width = 20):\n result = \"|\" + \"|\".join(str(entry).center(width) for entry in row) + \"|\"\n if header:\n result = result + \"\\n\" + \"|\" + \"|\".join([width * \"=\" for _ in row]) + \"|\"\n return result", "def _print_table(stats):\n max_key_len = max([len(key) for key in stats])\n width_right = 15\n width_left = max(width_right, max_key_len)\n divider = '+-' + '-' * width_left + '-+-' + '-' * width_right + '-+'\n\n def get_format_char(value):\n if isinstance(value, int):\n return 'd'\n elif isinstance(value, float):\n return '.4f'\n else:\n return 's'\n\n print(divider)\n for name, value in stats.items():\n left_format = f':>{width_left}s'\n right_format = f':<{width_right}{get_format_char(value)}'\n line_format = f'| {{{left_format}}} | {{{right_format}}} |'\n line = line_format.format(name, value)\n print(line)\n print(divider)", "def print_table(self, items, fields):\r\n formats = []\r\n borders = []\r\n for f in fields:\r\n length = max(len(f),\r\n max([len(self.string(getattr(i, f))) for i in items]))\r\n justify = '>' if isinstance(getattr(\r\n items[0], f), int) or f == 'size' or f == 'reward' else '<'\r\n formats.append('{:' + justify + self.string(length + 2) + '}')\r\n borders.append('-' * length + ' ')\r\n row_format = u''.join(formats)\r\n headers = [f + ' ' for f in fields]\r\n print(row_format.format(*headers))\r\n print(row_format.format(*borders))\r\n for i in items:\r\n i_fields = [self.string(getattr(i, f)) + ' ' for f in fields]\r\n try:\r\n print(row_format.format(*i_fields))\r\n except UnicodeEncodeError:\r\n print(row_format.format(*i_fields).encode('utf-8'))", "def table_format(row, header = False, width = 10):\n result = \"|\" + \"|\".join(str(entry).center(width) for entry in row) + \"|\"\n if header:\n l = len(result)\n result = result + \"\\n\" + \"|\" + (l-1) * \"-\"\n return result", "def format(self, table):\n #return table.data.to_json()\n m = table.as_array()\n rank = len(m.shape)\n is_table = len(table.headers)<=5 or (len(table.headers)>5 and (table.headers[0] != '0' or table.headers[1] != '1' or table.headers[2] != '2' ))\n\n if rank<3 and is_table:\n v = []\n for i in range(len(table.headers)):\n vv = {\n 'offset': table.offset,\n 'header': table.headers[i],\n 'type': table.types[i],\n 'data': _replace_nans(m[:,i].tolist()) if rank>1 else _replace_nans(m.tolist()),\n }\n if table.sizes is not None:\n vv[\"size\"] = table.sizes[0]\n v.append(vv)\n else:\n # if hasattr(data, \"strip\") or \\\n # (not hasattr(data, \"__getitem__\") and \\\n # not hasattr(data, \"__iter__\")):\n # # data is not a list/tuple => wrap it\n # data = [ data ]\n v = {\n 'offset': table.offset,\n #'headers': table.headers,\n 'type': table.types[0],\n 'data': _replace_nans(m.tolist()),\n }\n if table.sizes is not None:\n v[\"size\"] = table.sizes\n\n return json.dumps(v, cls=ExtEncoder)", "def EqualSpacedColmunTable(items,\n table_width=80,\n table_indent=2,\n column_pad=2):\n\n # determine the max width of all items -- this is the (minimum) column width\n column_width = len(max(items, key=len))\n # adjust the column width to include the column pad\n column_width += column_pad\n # determine the number of columns that fit the indent and pad constraints\n columns = (table_width - table_indent) / column_width\n # one more pass through the items to construct the table row by row\n table = '' # the table\n col = 0 # the current column index, col==0 => start of new row\n pad = ' ' * table_indent # the left-padding for the next column\n for item in items:\n table += pad + item\n col += 1\n if col >= columns:\n # the next column starts a new row\n col = 0\n table += '\\n'\n # the leftmost column is padded by the table indent\n pad = ' ' * table_indent\n else:\n # this pad aligns the next column\n pad = ' ' * (column_width - len(item))\n # a partial last row needs a line terminator\n if col > 0:\n table += '\\n'\n return table", "def as_html(table): \n if isinstance(table,Table):\n html = \"<table width=\\\"\" + str(table.total_width()) + \"\\\"\" + table.html_attributes + \" ><colgroup>\\n\"\n if table.col_width_dict:\n for i in range(table.no_of_columns()):\n html += \"<col width=\\\"\" + str(table.col_width_percent(i)) + \"%\\\"/>\\n\"\n html += \"</colgroup><tbody>\\n\" \n row = \"<tr>\"\n for c in range(table.no_of_columns()):\n row += \"<th width=\\\"\"+str(table.col_width_percent(c))+\"%\\\">\" + table.cell(0,c) +\"</th>\"\n row += \"</tr>\\n\"\n html += row\n for r in range(1,table.no_of_rows()):\n row = \"<tr>\"\n for c in range(table.no_of_columns()):\n row += \"<td>\" + table.cell(r,c) + \"</td>\"\n row += \"</tr>\\n\"\n html += row\n return mark_safe(html)\n else:\n return table", "def print_table(self, table):\n raise NotImplementedError('print_table method not defined!')", "def _render_table(\n table: Table,\n console_extras: Optional[Mapping[str, Any]],\n table_kwargs: Mapping[str, Any],\n column_kwargs: Mapping[str, Any],\n) -> str:\n console_kwargs = {'force_terminal': True, 'force_jupyter': False}\n if console_extras is not None:\n console_kwargs.update(console_extras)\n\n non_params_cols = 4\n rich_table = rich.table.Table(\n show_header=True,\n show_lines=True,\n show_footer=True,\n title=f'{table.module.__class__.__name__} Summary',\n **table_kwargs,\n )\n\n rich_table.add_column('path', **column_kwargs)\n rich_table.add_column('module', **column_kwargs)\n rich_table.add_column('inputs', **column_kwargs)\n rich_table.add_column('outputs', **column_kwargs)\n\n for col in table.collections:\n rich_table.add_column(col, **column_kwargs)\n\n for row in table:\n collections_size_repr = []\n\n for collection, size_bytes in row.size_and_bytes(table.collections).items():\n col_repr = ''\n\n if collection in row.module_variables:\n module_variables = _represent_tree(row.module_variables[collection])\n module_variables = _normalize_structure(module_variables)\n col_repr += _as_yaml_str(\n _summary_tree_map(_maybe_render, module_variables)\n )\n if col_repr:\n col_repr += '\\n\\n'\n\n col_repr += f'[bold]{_size_and_bytes_repr(*size_bytes)}[/bold]'\n collections_size_repr.append(col_repr)\n\n no_show_methods = {'__call__', '<lambda>'}\n path_repr = '/'.join(row.path)\n method_repr = (\n f' [dim]({row.method})[/dim]'\n if row.method not in no_show_methods\n else ''\n )\n rich_table.add_row(\n path_repr,\n row.module_type.__name__ + method_repr,\n _as_yaml_str(\n _summary_tree_map(_maybe_render, _normalize_structure(row.inputs))\n ),\n _as_yaml_str(\n _summary_tree_map(_maybe_render, _normalize_structure(row.outputs))\n ),\n *collections_size_repr,\n )\n\n # add footer with totals\n rich_table.columns[non_params_cols - 1].footer = rich.text.Text.from_markup(\n 'Total', justify='right'\n )\n\n # get collection totals\n collection_total = {col: (0, 0) for col in table.collections}\n for row in table:\n for col, size_bytes in row.size_and_bytes(table.collections).items():\n collection_total[col] = (\n collection_total[col][0] + size_bytes[0],\n collection_total[col][1] + size_bytes[1],\n )\n\n # add totals to footer\n for i, col in enumerate(table.collections):\n rich_table.columns[non_params_cols + i].footer = _size_and_bytes_repr(\n *collection_total[col]\n )\n\n # add final totals to caption\n caption_totals = (0, 0)\n for size, num_bytes in collection_total.values():\n caption_totals = (\n caption_totals[0] + size,\n caption_totals[1] + num_bytes,\n )\n\n rich_table.caption_style = 'bold'\n rich_table.caption = (\n f'\\nTotal Parameters: {_size_and_bytes_repr(*caption_totals)}'\n )\n\n return '\\n' + _get_rich_repr(rich_table, console_kwargs) + '\\n'", "def print_table(data_array, column_tag, row_tag, \n print_format = \"1.2f\", \n with_color_cell = True,\n colormap='Greys', colorscale=0.5, colorwrap=0, col_sep='', \n print_latex_table=True, print_text_table=True,\n print_format_along_row=True):\n if column_tag is None:\n column_tag = [\"\" for data in data_array[0, :]]\n if row_tag is None:\n row_tag = [\"\" for data in data_array]\n\n # check print_format\n if type(print_format) is not list:\n if print_format_along_row:\n # repeat the tag\n print_format = [print_format for x in row_tag]\n else:\n print_format = [print_format for x in column_tag]\n else:\n if print_format_along_row:\n assert len(print_format) == len(row_tag)\n else:\n assert len(print_format) == len(column_tag)\n\n\n # color configuration\n color_func = cm.get_cmap(colormap)\n data_idx = return_valid_number_idx(data_array)\n value_min = np.min(data_array[data_idx])\n value_max = np.max(data_array[data_idx])\n \n def get_latex_color(x):\n # return a color command for latex cell\n return return_latex_color_cell(x, value_min, value_max, \n colorscale, colorwrap, color_func)\n \n # maximum width for tags in 1st column\n row_tag_max_len = max([len(x) for x in row_tag])\n\n # maximum width for data and tags for other columns\n if print_format_along_row:\n tmp_len = []\n for idx, data_row in enumerate(data_array):\n tmp_len.append(\n max([len(\"{num:{form}}\".format(num=x, form=print_format[idx])) \\\n for x in data_row]))\n else:\n tmp_len = []\n for idx, data_col in enumerate(data_array.T):\n tmp_len.append(\n max([len(\"{num:{form}}\".format(num=x, form=print_format[idx])) \\\n for x in data_col]))\n col_tag_max_len = max([len(x) for x in column_tag] + tmp_len)\n \n # prepare buffer\n text_buffer = \"\"\n latex_buffer = \"\"\n \n # latex head\n latex_buffer += r\"\\begin{tabular}{\" \\\n + ''.join(['c' for x in column_tag + ['']]) + r\"}\" + \"\\n\"\n \n # head row\n # for latex\n hrow = [fill_cell(\"\", row_tag_max_len)] \\\n + [fill_cell(x, col_tag_max_len) for x in column_tag]\n latex_buffer += return_one_row_latex(hrow)\n # for plain text (add additional separator for each column)\n hrow = [fill_cell(\"\", row_tag_max_len, col_sep)] \\\n + [fill_cell(x, col_tag_max_len, col_sep) for x in column_tag]\n text_buffer += return_one_row_text(hrow)\n \n # contents\n row = data_array.shape[0]\n col = data_array.shape[1]\n for row_idx in np.arange(row):\n # row head\n row_content_latex = [fill_cell(row_tag[row_idx], row_tag_max_len)]\n row_content_text = [fill_cell(row_tag[row_idx],row_tag_max_len,col_sep)]\n \n # each column in the raw\n for col_idx in np.arange(col):\n\n if print_format_along_row:\n tmp_print_format = print_format[row_idx]\n else:\n tmp_print_format = print_format[col_idx]\n\n if is_valid_float(data_array[row_idx,col_idx]):\n num_str = \"{num:{form}}\".format(num=data_array[row_idx,col_idx],\n form=tmp_print_format)\n latex_color_cell = get_latex_color(data_array[row_idx,col_idx])\n elif type(data_array[row_idx,col_idx]) is str:\n num_str = \"{num:{form}}\".format(num=data_array[row_idx,col_idx],\n form=tmp_print_format)\n latex_color_cell = ''\n else:\n num_str = ''\n latex_color_cell = ''\n \n if not with_color_cell:\n latex_color_cell = ''\n \n row_content_text.append(\n fill_cell(num_str, col_tag_max_len, col_sep))\n\n row_content_latex.append(\n fill_cell(latex_color_cell + ' ' + num_str, col_tag_max_len))\n \n # latex table content\n latex_buffer += return_one_row_latex(row_content_latex)\n # text content\n text_buffer += return_one_row_text(row_content_text)\n \n latex_buffer += r\"\\end{tabular}\" + \"\\n\"\n\n if print_latex_table:\n print(latex_buffer)\n if print_text_table:\n print(text_buffer)\n return", "def tabular_formatted_printing(data_list):\n n = len(data_list)\n max = 0\n for i in range(0,n):\n if int(len(data_list[i][0])) > max:\n max = len(data_list[i][0])\n for i in range(0,n):\n if int(len(data_list[i][0])) < max:\n space = max - len(data_list[i][0])\n else:\n space = 0\n print(data_list[i][0]+space*' '+' : '+str(data_list[i][1]))\n return", "def print_table(listx):\r\n\tfor lists in listx:\r\n\t\tfor i in lists:\r\n\t\t\tprint str(i) , '\\t',\r\n\t\tprint()", "def generate_table(columns, rows, plain=False, sort=None, reversesort=False):\n tbl = PrettyTable(columns)\n tbl.set_style(PLAIN_COLUMNS if plain else DEFAULT)\n tbl.header = not plain\n [tbl.add_row(x) for x in rows]\n tbl.align = 'l'\n\n if sort:\n tbl.sortby = sort\n\n tbl.reversesort = reversesort\n\n return tbl", "def _pretty_table_line(self, items):\n padded_strings = []\n for i, s in enumerate(items):\n padding_value = self._padding_values[i]\n padded_strings.append('{:<{}s}'.format(str(s), padding_value))\n return \" \" + \"| \".join(padded_strings)", "def formatSimpleTable(data, stringify=True):\n\tif stringify:\n\t\tdata = [[str(v) for v in row] for row in data]\n\n\tif not data:\n\t\treturn \"\"\n\n\tcolWidthes = [max(len(row[colInd]) for row in data)\n\t\tfor colInd in range(len(data[0]))]\n\tfmtStr = \" \".join(\"%%%ds\"%w for w in colWidthes)\n\ttable = \"\\n\".join(fmtStr%tuple(row) for row in data)\n\treturn table", "def print_table(rows, labels=None):\n if labels is None:\n labels = ROW_LABELS\n\n output_table = prettytable.PrettyTable()\n output_table.field_names = labels\n output_table.align = 'l'\n output_table.vrules = prettytable.prettytable.ALL\n output_table.hrules = prettytable.prettytable.HEADER\n\n for row in rows:\n row = [x.strip() for x in row]\n output_table.add_row(row)\n\n print output_table\n print ''", "def pad_table(table, min_width=0, extra_pad=0):\n longest = []\n most_cols = 0\n for row in table:\n # naively assumes we're always passing in collections and not a string\n most_cols = max(len(row), most_cols)\n num = 0\n for row in table:\n if len(row) != most_cols:\n continue\n col_length = []\n for col in row:\n col_length.append(len(col))\n if not longest:\n longest = col_length\n num = len(col_length)\n else:\n for i in range(num):\n a = longest[i]\n b = col_length[i]\n if b > a:\n longest[i] = b\n # pad step\n for ri, row in enumerate(table):\n last_col = find_last_valid_col(row)\n for i, col in enumerate(row):\n # do not pad last column in each row as it makes reports format funny\n if i > last_col:\n continue\n if i == last_col:\n # trim off any space\n row[i] = col.strip()\n continue\n pad = longest[i]\n row[i] = \"%-*s\" % (max(pad + extra_pad, min_width), col)\n table[ri] = row", "def print_table(table, title_list):\n\n # your goes code\n cols = len(title_list)\n\n \n\n table.insert(0,title_list)\n\n for sublist in range(len(table)):\n if cols != len(table[sublist]):\n print('dataset does not match number of cols')\n quit()\n\n max_lenghts = []\n maxi = -1\n for sub_elem in range(cols): \n maxi = -1 \n for sublist in range(len(table)):\n if len(table[sublist][sub_elem]) > maxi:\n maxi = len(table[sublist][sub_elem])\n max_lenghts.append(maxi)\n \n\n \n\n sub_elem = 0\n \n for sublist in range(len(table)):\n if sublist == 0:\n while sub_elem < len(table[0]):\n \n if sub_elem == len(table[0])- 1:\n print('\\033[1;37;41m| {:^25} |'.format(table[sublist][sub_elem]), end =\"\")\n else:\n print('\\033[1;37;41m| {:^25} '.format(table[sublist][sub_elem]), end =\"\")\n sub_elem += 1\n \n print('\\033[0;32;48m\\n') \n sub_elem = 0 \n else:\n while sub_elem < len(table[0]):\n \n if sub_elem == len(table[0])- 1:\n print('\\033[0;37;44m| {:^25} |'.format(table[sublist][sub_elem]), end =\"\")\n else:\n print('\\033[0;37;44m| {:^25} '.format(table[sublist][sub_elem]), end =\"\")\n sub_elem += 1\n \n print('\\033[0;32;48m\\n') \n sub_elem = 0 \n print('\\033[0;37;48m\\n')\n table.pop(0)", "def transform_table_data(tableRows: list, table: bigquery.Table):\n colSchema: list = table.schema\n assert len(tableRows[0]) <= len(colSchema), f'table should have at most as many columns as its schema: {len(tableRows[0])} ! <= {len(colSchema)}'\n formatter = []\n for schemaField in colSchema:\n fn = None\n if schemaField.field_type in ('INT64', 'INTEGER'):\n fn = get_as_int\n elif schemaField.field_type == ('FLOAT64', 'FLOAT'):\n fn = float\n elif schemaField.field_type != 'STRING': print(schemaField.field_type)\n formatter.append(fn)\n\n for row in tableRows:\n for (idx, val) in enumerate(row):\n fn = formatter[idx]\n if fn is not None:\n result = fn(val)\n row[idx] = result if result is not None else 0\n return", "def pretty_display(self):\n\t\tpretty_space = PrettyTable()\n\t\tpretty_space.field_names = range(self.space.shape[1])\n\t\tcount = 0\n\t\tpretty_row = []\n\t\tfor cell in self.space.flat:\n\t\t\tcount = count + 1\n\t\t\tpretty_row.append(cell.state)\n\t\t\tif count >= self.space.shape[1]:\n\t\t\t\tpretty_space.add_row(pretty_row)\n\t\t\t\tcount = 0\n\t\t\t\tpretty_row = []\n\t\tprint(pretty_space)", "def tabulate(\n headers: List[str],\n rows: List[Dict[str, str]],\n header_labels: Optional[Dict[str, str]] = None,\n) -> str:\n col_widths = {}\n\n def label(name) -> str:\n label = (header_labels or {}).get(name, \"\")\n if label:\n return label\n return str(name.upper())\n\n def field(obj, name) -> str:\n return str(obj.get(name, \"\"))\n\n for name in headers:\n col_widths[name] = len(label(name))\n for row in rows:\n for name in headers:\n col_widths[name] = max(len(field(row, name)), col_widths[name])\n\n format_string = \"\"\n for col_width in col_widths.values():\n if format_string:\n format_string += \" \"\n format_string += \"{:<%d}\" % col_width\n\n output = format_string.format(*[label(name) for name in headers])\n for row in rows:\n output += \"\\n\"\n output += format_string.format(*[field(row, name) for name in headers])\n return output", "def print_table(table, exploded_at=[-1, -1]):\n\n # color codes just to look pretty\n NORMAL = '\\33[10m'\n BLUE_START = '\\33[104m'\n RED_START = '\\33[31m'\n PURPLE_START = '\\33[35m'\n GREEN_START = '\\33[92m'\n ORANGE_START = '\\33[93m'\n END = '\\033[0m'\n s = ' %s' % BLUE_START\n\n # print number headers along x-axis\n for i in range(0, width):\n s += \" %s\" % i\n if i < 10:\n s += \" \" * 2\n else:\n s += \" \"\n\n s += \"%s\\n\" % END\n # print letters for y-axis, + the relevant values in each coordinate\n # depending on table.\n for y in range(0, height):\n s += \"%s %s %s \\t\" % (BLUE_START, Minesweeper.letters[y], END)\n for x in range(0, width):\n value = table[y][x]\n if value == \"0\":\n s += \"%s%s%s\" % (NORMAL, value, END)\n elif value == \"1\":\n s += \"%s%s%s\" % (GREEN_START, value, END)\n elif value == \"2\":\n s += \"%s%s%s\" % (ORANGE_START, value, END)\n elif value == \"3\":\n s += \"%s%s%s\" % (RED_START, value, END)\n elif value == \"4\" or value == \"5\" or value == \"6\" or value == \"7\" or value == \"8\":\n s += \"%s%s%s\" % (PURPLE_START, value, END)\n # special\n elif value == \"-\":\n s += \"%s%s%s\" % (NORMAL, value, END)\n elif value == Minesweeper.BOMB:\n if y == exploded_at[0] and x == exploded_at[1]:\n # Make the bomb at the casualty site explode!\n s += \"%s%s%s\" % (RED_START, Minesweeper.EXPLOSION, END)\n else:\n # show normal bomb\n s += \"%s%s%s\" % (RED_START, value, END)\n elif value == Minesweeper.FLAG:\n s += \"%s%s%s\" % (RED_START, value, END)\n s += \" \" * 3\n s += \"\\n\"\n\n # use tabbing to space them nicely\n print s.expandtabs(3)", "def tab_delim_table(self):\n self.generate()\n\n header = ' \\t '.join([r'{: ^7}'.format(col) for col in self.columns])\n lines = []\n for row in self.rows:\n bits = []\n for col in self.columns:\n if col in self.formatters:\n bits.append(self.formatters[col].format(row[col]))\n else:\n bits.append(self.formatters.get(col, '{: ^7}').format(row[col] if row[col] else ''))\n lines.append(' \\t '.join(bits))\n\n return \"{}\\n{}\".format(header, '\\n'.join(lines))", "def make_table(header, align_map=None, rows=None):\n t = PrettyTable()\n t.horizontal_char = t.vertical_char = t.junction_char = ' '\n t.field_names = header\n if align_map:\n for field, align in zip(header, align_map):\n t.align[field] = align\n if rows:\n for row in rows:\n if len(row) < len(t.field_names):\n continue\n try:\n t.add_row(row)\n except Exception as err:\n print_('fields:', t.field_names)\n print_('row:', row)\n print_('rows:', rows)\n raise err\n return t", "def Table(self, line):\n if line is None:\n # TODO(user): Use resource_printer.TablePrinter() when it lands.\n if self._rows:\n cols = len(self._rows[0])\n width = [0 for _ in range(cols)]\n for row in self._rows:\n for i in range(cols - 1):\n w = len(row[i])\n if width[i] <= w:\n width[i] = w + 1\n for row in self._rows:\n self._out.write(' ' * (self._indent[self._level] + 2))\n for i in range(cols - 1):\n self._out.write(row[i].ljust(width[i]))\n self._out.write(row[-1] + '\\n')\n self._rows = []\n self._table = False\n self._out.write('\\n')\n elif not self._table:\n self._table = True\n self.Line()\n else:\n self._rows.append(line.split(','))", "def format(self, table):\n #return table.data.to_json()\n data = _replace_nans(table.as_array().tolist())\n if hasattr(data, \"strip\") or \\\n (not hasattr(data, \"__getitem__\") and \\\n not hasattr(data, \"__iter__\")):\n # data is not a list/tuple => wrap it\n data = [ data ]\n v = {\n 'offset': table.offset,\n 'data': data,\n 'headers': table.headers,\n 'types': table.types,\n }\n if table.sizes is not None:\n v[\"sizes\"] = table.sizes\n return json.dumps(v, cls=ExtEncoder)", "def pretty_print(self):\n pt = PrettyTable()\n for i in self.files_summary:\n pt.field_names = [\"File Name\", \"Classes\", \"Functions\", \"Lines\", \"Characters\"]\n pt.add_row(list([i, self.files_summary[i][\"class\"], self.files_summary[i][\"function\"], self.files_summary[i][\"line\"], self.files_summary[i][\"char\"]]))\n print(pt) #Using a Print statement here because i tried to return self.pt and it didnt give me anything but the print works", "def pretty_print_table(result, heading=False):\n # If the data is not in string, then it is likely a text format\n if type(result) == 'str':\n result = result.split('\\n')\n result = [line.split() for line in result]\n #Remove empty items\n result = [row for row in result if row!=['']]\n\n columns = len(result[0]) #Get the number of columns, this is used for row formatting\n row_format = '' #variable to construct the row formatting\n \n # Calculating the max length for each column\n for i in range(0, columns):\n # picking the length of the longest element\n #Need to convert the elements into string\n MAX_LEN = len(max([str(row[i]) for row in result], key=len))\n # Constructing the string formatting\n row_format += \"{:<\" + str(MAX_LEN) + \"} | \"\n\n pretty_result = ''\n if heading:\n pretty_result = row_format.format(*result[0]) + '\\n'\n pretty_result += len(row_format.format(*result[0])) * \"-\" + '\\n'\n result = result[1:]\n for row in result:\n pretty_result += row_format.format(*row) + '\\n'\n return pretty_result", "def __print_work_table(table):\n print \"%-5s %-30s %5s %5s %5s %5s %5s\" % ('Act', 'Pred', 'Block', 'Dummy', 'Succ', 'start', 'end')\n for k, col in sorted(table.items()):\n print \"%-5s %-30s %5s %5s %5s %5s %5s\" % tuple(\n [str(k)] + [list(col[0])] + [str(col[i]) for i in range(1, len(col))])", "def tabulate(items: typing.List[str]):\n rows, columns = find_shape(len(items))\n extra = (rows * columns) - len(items)\n items += [' '] * extra\n items = [\n [f'{items[i][0]}-{items[i + columns - 1][0]}', *items[i:i + columns]]\n for i in range(0, len(items), columns)\n ]\n items = [[column[i] for column in items] for i in range(columns + 1)]\n items = ['| ' + ' | '.join(row) + ' |' for row in items]\n items.insert(1, ('| --- ' * rows) + '|')\n return '\\n'.join(items)", "def print_table(rows, header=['Operation', 'OPS']):\n if len(rows) == 0:\n return\n col_max = [max([len(str(val[i])) for val in rows]) + 3 for i in range(len(rows[0]))]\n row_format = ''.join([\"{:<\" + str(length) + \"}\" for length in col_max])\n\n if len(header) > 0:\n print(row_format.format(*header))\n print(row_format.format(*['-' * (val - 2) for val in col_max]))\n\n for row in rows:\n print(row_format.format(*row))\n print(row_format.format(*['-' * (val - 3) for val in col_max]))", "def test_simple(cls):\n table_data = [\n ['Name', 'Color', 'Type'],\n ['Avocado', 'green', 'nut'],\n ['Tomato', 'red', 'fruit'],\n ['Lettuce', 'green', 'vegetable'],\n ]\n table = cls(table_data) # '| Lettuce | green | vegetable |'\n\n assert 56 == table.column_max_width(0)\n assert 54 == table.column_max_width(1)\n assert 58 == table.column_max_width(2)\n\n table_data.append(['Watermelon', 'green', 'fruit'])\n assert 56 == table.column_max_width(0)\n assert 51 == table.column_max_width(1)\n assert 55 == table.column_max_width(2)", "def pretty_print(self) -> PrettyTable:\n table_contain: PrettyTable = PrettyTable()\n table_contain.field_names = [\n \"File Name\", \"Classes\", \"Functions\", \"Lines\", \"Characters\"]\n for key, value in self.files_summary.items():\n table_contain.add_row([key] + list(value.values()))\n\n return table_contain", "def table(\n columns: typing.Iterable[str],\n rows: typing.Iterable[typing.Iterable[object]],\n *,\n title: str = None,\n buffer: int = 2\n):\n\n # Determine the width of the window\n _, terminalWidth = os.popen('stty size', 'r').read().split()\n terminalWidth = int(terminalWidth)\n tprint = lambda x: print(x) if len(x) < terminalWidth else print(x[:terminalWidth - 4] + '...')\n\n # Determine the columns widths\n columnWidths = [0]*len(columns)\n for row in [columns] + rows:\n for i in range(len(columns)):\n columnWidths[i] = max(columnWidths[i], len(str(row[i])))\n columnWidths = [x + buffer for x in columnWidths]\n\n # define the row formats\n rowTemplate = '|'.join(['{'+str(i)+':^{'+str(i + len(columns))+'}}' for i in range(len(columns))])\n\n header = rowTemplate.format(*columns, *columnWidths)\n print()\n\n if title is not None:\n width = min(terminalWidth, len(header))\n print(\"{0:^{1}}\".format(title, width))\n print('='*width)\n\n tprint(header)\n tprint('='*len(header))\n for row in rows:\n tprint(rowTemplate.format(*[str(x) for x in row], *columnWidths))\n print()", "def _pretty_print_2d_array(rows):\n s = [[str(e) for e in row] for row in rows]\n lens = [max(map(len, col)) for col in zip(*s)]\n fmt = \"\\t\".join(\"{{:{}}}\".format(x) for x in lens)\n table = [fmt.format(*row) for row in s]\n return \"\\n\" + \"\\n\".join(table)", "def format_no_tty(table):\r\n for i, row in enumerate(table.rows):\r\n for j, item in enumerate(row):\r\n table.rows[i][j] = format_output(item, fmt='raw')\r\n ptable = table.prettytable()\r\n for col in table.columns:\r\n ptable.align[col] = 'l'\r\n ptable.hrules = NONE\r\n ptable.border = False\r\n ptable.header = False\r\n ptable.left_padding_width = 0\r\n ptable.right_padding_width = 2\r\n return ptable", "def __repr__(self):\n return '<widths: {}, subtable: {}>'.format(self.widths, self.subtable)", "def columnize(L, indent=\"\", width=79):\n column_width = max(len(w) for w in L) + 1\n num_columns = (width - len(indent)) // column_width\n num_rows = len(L) // num_columns\n L = L + [\"\"] * (num_rows*num_columns - len(L))\n columns = [L[k*num_rows:(k+1)*num_rows] for k in range(num_columns)]\n lines = [\" \".join(\"%-*s\"%(column_width, entry) for entry in row)\n for row in zip(*columns)]\n output = indent + (\"\\n\"+indent).join(lines)\n return output", "def print_table(data):\n for key in sorted(data):\n print \"%s: %s\" % (key.rjust(16), data[key])", "def get_table(header, floatPercission=4, *rows):\n\n table = PrettyTable(header)\n table.padding_width = 1\n for row in rows:\n # go through row and round floats\n for i in xrange(len(row)):\n if type(row[i]) is float:\n row[i] = round(row[i], floatPercission)\n table.add_row(row)\n return table", "def print_table(rows, *column_headers) -> None:\n num_of_columns = len(rows[0])\n num_of_headers = len(column_headers)\n if num_of_headers != num_of_columns:\n raise TypeError(f\"Expected {num_of_columns} column_headers arguments, \"\n f\"got {num_of_headers}.\")\n\n rows_with_headers = itertools.chain([column_headers], rows)\n columns_of_rows = list(zip(*rows_with_headers))\n column_widths = [max(map(len, column)) for column in columns_of_rows]\n column_specs = (f'{{:{w}}}' for w in column_widths)\n format_spec = ' '.join(column_specs)\n print(format_spec.format(*column_headers))\n rules = ('-' * width for width in column_widths)\n print(format_spec.format(*rules))\n\n for row in rows:\n print(format_spec.format(*row))", "def _generateTable(self, obj, **args):\n\n if self._script.utilities.isLayoutOnly(obj):\n return []\n\n try:\n table = obj.queryTable()\n except:\n return []\n\n return [messages.tableSize(table.nRows, table.nColumns)]", "def format_table(self, table, use_schema=True, name=None):\n\n if name is None:\n name = table.name\n result = self.__generic_obj_format(table, name)\n if use_schema and getattr(table, \"schema\", None):\n result = self.__generic_obj_format(table, table.schema) + \".\" + result\n return result", "def print_table(table):\n print(\"City \", end='')\n for month in MONTHS:\n print(\"{:>6}\".format(month), end='')\n print(\"\")\n for name, row in table.items():\n # Header column left justified\n print(\"{:<19}\".format(name), end='')\n # Remaining columns right justified\n for month in MONTHS:\n print(\"{:>6}\".format(row[month]), end='')\n print(\"\", end='\\n')", "def tabulate(*args, **kwargs):\n import tabulate\n\n def _align_column(strings, alignment, minwidth=0, has_invisible=True, enable_widechars=False, is_multiline=False):\n strings, padfn = tabulate._align_column_choose_padfn(strings, alignment, has_invisible)\n width_fn = tabulate._choose_width_fn(has_invisible, enable_widechars, is_multiline)\n s_widths = list(map(width_fn, strings))\n maxwidth = max(max(s_widths), minwidth)\n if is_multiline:\n if not enable_widechars and not has_invisible:\n padded_strings = [\n \"\\n\".join([padfn(maxwidth, s) for s in ms.splitlines()])\n for ms in strings]\n else:\n lines = [line.splitlines() for line in strings]\n lines_pad = [[(s, maxwidth + len(s) - width_fn(s)) for s in group]\n for group in lines]\n padded_strings = [\"\\n\".join([padfn(w, s) for s, w in group])\n for group in lines_pad]\n else:\n if not enable_widechars and not has_invisible:\n padded_strings = [padfn(maxwidth, s) for s in strings]\n else:\n s_lens = list(map(len, strings))\n visible_widths = [maxwidth - (w - l) for w, l in zip(s_widths, s_lens)]\n padded_strings = [padfn(w, s) for s, w in zip(strings, visible_widths)]\n return padded_strings\n\n tabulate._align_column = _align_column\n return tabulate.tabulate(*args, **kwargs)", "def start_table(self):\n self.col_widths = []\n self.result = \"\"", "def write_table(*lists):\n print(\"<table>\")\n for columns in zip(*lists):\n print(\"<tr>\")\n for val in columns:\n print(\"<td>{}</td>\".format(val))\n print(\"</tr>\")\n print(\"</table>\")", "def print_table(header, rows, *, sortby=\"\", alignl=\"\", alignr=\"\", hrules=\"\"):\n output = prettytable.PrettyTable(header)\n output.format = True\n if hrules:\n output.hrules = getattr(prettytable, hrules)\n\n for row in rows:\n if len(header) != len(row):\n raise ValueError(\"row does not have same size of header\")\n row_entry = []\n for pos in row:\n row_entry.append(pos)\n output.add_row(row_entry)\n\n if sortby:\n # if sortby is invalid, ie, does not exist on header,\n # sort by first column by default\n output.sortby = sortby if sortby in header else header[0]\n for left in alignl:\n output.align[left] = \"l\"\n for right in alignr:\n output.align[right] = \"r\"\n\n print(output)", "def print_table(table_2D, title_list):\n \n max_length = [] # max length of item for each column\n\n # BELOW VAR NEEDS TO BE FIXED, GOT RID OFF\n # without this correction table horizontal lines displays unevenly\n length_correction = 2 \n\n # count max length of all elements in a table, so we can print all details in neat columns\n for row in table_2D:\n column = 0\n\n for item in row:\n item = str(item)\n\n try:\n if len(item) > max_length[column]:\n max_length[column] = len(item)\n column += 1\n # expand table if needed\n except IndexError:\n max_length.append(0)\n if len(item) > max_length[column]:\n max_length[column] = len(item)\n column += 1\n\n title_index = \"No\"\n\n # print titles, while keeping columns straight\n titles = side_sign + \" \" + title_index + separator_sign\n for i in range(len(title_list)):\n # count length of all titles, to check if they are longer than entries\n if len(title_list[i]) > max_length[i]:\n max_length[i] = len(title_list[i])\n\n titles += title_list[i] + fill(str(title_list[i]), max_length[i]) + separator_sign\n\n print(\"\\n\\t/\" + fill(\"\", len(titles.strip())-length_correction, sourrounding_sign) + \"\\\\\") # print top line\n print(\"\\t\" + titles)\n print(\"\\t\" + side_sign + fill(\"\", len(titles.strip())-length_correction, sourrounding_sign) + side_sign) # print line below titles\n\n table_content = \"\"\n # print all game details, while keeping columns straight\n for row in range(len(table_2D)):\n table_content += \"\\t\" + side_sign + \" \" + str(row+1) + fill(str(row+1), max(len(str(row+1)), len(title_index))) + separator_sign\n for item in range(len(table_2D[row])):\n table_content += str(table_2D[row][item]) + fill(str(table_2D[row][item]), max_length[item]) + separator_sign\n table_content += \"\\n\"\n\n print(table_content, end=\"\")\n print(\"\\t\\\\\" + fill(\"\", len(titles.strip())-length_correction, sourrounding_sign) + \"/\")", "def _get_pretty_table(self, fields: List[str], header: bool = None) -> prettytable.PrettyTable:\n pt = prettytable.PrettyTable(fields)\n pt.align = 'l'\n if header is not None:\n pt.header = header\n elif hasattr(self._args, 'no_header'):\n pt.header = not self._args.no_header\n else:\n pt.header = True\n pt.border = True\n pt.hrules = prettytable.HEADER\n pt.vrules = prettytable.NONE\n return pt", "def pretty_str(self):\n def row_at_a_time():\n strs = list(self.simple_cell_strings())\n rowstrs = []\n for r in range(self.size):\n row = ''.join(strs[r*self.size:(r+1)*self.size])\n pieces = []\n for c in range(self.square_size):\n pieces.append(row[c*self.square_size:(c+1)*self.square_size])\n yield ' '.join(pieces)\n if (r + 1) % self.square_size == 0:\n yield ''\n return '\\n'.join(row_at_a_time())", "def format_table(table, use_header=True, table_format=DEFAULT_TABLE_FORMAT, float_format=DEFAULT_FLOAT_FORMAT,\n\t\tcol_align=DEFAULT_COL_ALIGN):\n\n\tnum_cols = get_num_columns(table)\n\n\t# Parse parameters.\n\theaders = 'firstrow' if use_header else ()\n\tif isinstance(col_align, Iterable) and not isinstance(col_align, str):\n\t\t# Convert each `'none'` into `None`.\n\t\tcol_align = [\n\t\t\tNone if align.lower() == 'none' else align\n\t\t\tfor align in col_align\n\t\t]\n\telse:\n\t\t# Convert `'none'` into `None`.\n\t\tif col_align.lower() == 'none':\n\t\t\tcol_align = None\n\n\t\tcol_align = (col_align,) * num_cols\n\n\tformatted_table = tabulate(table, headers=headers, tablefmt=table_format, floatfmt=float_format, colalign=col_align)\n\t\n\treturn formatted_table", "def prettyPrintListHelper_ (l, stream, indent, pretty_print=True, indent_additive=4) :\r\n \r\n # Base case, empty table\r\n entries = len(l)\r\n if entries==0 :\r\n stream.write(\"[ ]\")\r\n return\r\n \r\n # Recursive case\r\n stream.write(\"[\")\r\n if pretty_print: stream.write('\\n')\r\n\r\n # Iterate through, printing each element\r\n for ii in xrange(0,entries) :\r\n if pretty_print : indentOut_(stream, indent+indent_additive)\r\n specialStream_(l[ii], stream, indent, pretty_print, indent_additive)\r\n if entries>1 and ii!=entries-1 :\r\n stream.write(\",\")\r\n if pretty_print: stream.write('\\n')\r\n\r\n if pretty_print : indentOut_(stream, indent); \r\n stream.write(\"]\")", "def texify_table(table, labels=None, row_labels=None, align='c'):\n rows = len(table)\n cols = len(table[0])\n if labels is not None and len(labels) != cols:\n raise Exception(\"Invalid argument value: labels.\")\n if row_labels is not None and len(row_labels) != rows:\n raise Exception(\"Invalid argument value: row_labels.\")\n # begin table\n s = \"\\\\begin{tabular}{\"\n if row_labels is not None: s += 'l|'\n s += align * cols\n s += \"}\\n\"\n s += \"\\\\toprule\\n\"\n # header\n if labels is not None:\n if row_labels is not None: s += ' & '\n s += \" & \".join(labels)\n s += \" \\\\\\\\ \\n\"\n s += \"\\\\midrule\\n\"\n # table\n for idx, row in enumerate(table):\n if row_labels is not None: s += row_labels[idx] + \" & \"\n s += \" & \".join(map(str, row))\n s += \" \\\\\\\\ \\n\"\n # end table\n s += \"\\\\bottomrule\\n\"\n s += \"\\\\end{tabular}\" \n return s", "def print_table(source, count=False):\n table_value = []\n table_header = []\n for source_key, source_value in source.items():\n for item in source_value:\n table_value.append([v for v in item.values()])\n table_header.append([k for k in item.keys()])\n if not count:\n print(tabulate(table_value,\n headers=table_header[0],\n tablefmt='orgtbl'))\n else:\n print(tabulate([[len(source_value)]],\n headers=[source_key],\n tablefmt='orgtbl'))", "def print_table(table, title_list):\n\n # your goes code\n \n table.insert(0, title_list)\n # title listet 0.helyre teszi\n # your code\n\n lenght_list = [] # tartalmazza az összes szót\n for lines in table:\n for items in lines:\n lenght_list.append(items)\n\n longest_words_length = len(max(lenght_list, key=len))\n multiplier = len(title_list)*(longest_words_length+1)\n\n for sublist in table:\n print(\"|\\n|\", \"-\"*multiplier, \"|\")\n\n for j in sublist:\n print(\"|\", j, end = \" \"*(longest_words_length-len(j)))\n\n print(\"|\\n|\",\"-\"*multiplier, \"|\")", "def get_table_max_columns_width(table, column_mapping):\n columns_widths = [len(name) for name, not_used in column_mapping]\n for row in table:\n for idx, (not_used, dict_item_key) in enumerate(column_mapping):\n if isinstance(row[dict_item_key], str):\n value_length = len(row[dict_item_key])\n else:\n raise Err('get_max_columns_width', 'Code-Error: table rows: values must be strings\\n Got: <{}> value: <{}>\\n row: <{}>'.format(type(row[dict_item_key]), row[dict_item_key], row))\n if columns_widths[idx] < value_length:\n columns_widths[idx] = value_length\n return columns_widths", "def pretty(self):\n #table = [\"\".join([\"%8s \" % s for s in self.alpha.getSymbols()])]\n table = []\n for row in PWM.getFreq(self):\n table.append(\"\".join([\"%8.6f \" % y for y in row]))\n return table", "def rotate_tbl(a, max_cols=20, line_wdth=79):\r\n cut = min(a.shape[0], max_cols)\r\n rc = (len(a[0]), cut + 1)\r\n a = a[:cut]\r\n e = np.empty(rc, dtype=np.object)\r\n e[:, 0] = a.dtype.names\r\n types = (list, tuple, np.ndarray)\r\n u0 = [[[j, 'seq'][isinstance(j, types)] for j in i] for i in a]\r\n u = np.array(u0, dtype=np.unicode_)\r\n e[:, 1:] = u[:].T\r\n widths = [max([len(i) for i in e[:, j]]) for j in range(e.shape[1])]\r\n f = [\"{{!s: <{}}} \".format(width + 1) for width in widths]\r\n txt = \"\".join(i for i in f)\r\n txt = \"\\n\".join([txt.format(*e[i, :])[:line_wdth]\r\n for i in range(e.shape[0])])\r\n hdr_txt = \"Attribute | Records....\\n{}\".format(txt)\r\n tweet(hdr_txt)\r\n return txt #, e, widths\r", "def out(lst, max_width=100, index=False, spaces=3, ret=False):\n # Not even a list - just print\n if not isinstance(lst, (list,tuple)):\n print lst\n return\n\n # List of lists of same size\n strs = []\n if all([isinstance(l, (list,tuple)) for l in lst]) and all([len(l) == len(lst[0]) for l in lst]):\n L = len(lst[0])\n temp_strs = []\n for l in lst:\n temp_line = []\n for x in l:\n temp_line.append(str(x))\n temp_strs.append(temp_line)\n fields_sizes = []\n for i in range(L):\n temp_size = []\n for ts in temp_strs:\n temp_size.append(len(ts[i]))\n fields_sizes.append(temp_size)\n widths = [min(max(fs),max_width) for fs in fields_sizes]\n for i,l in enumerate(lst):\n temp = ''\n for j,x in enumerate(l):\n temp += temp_strs[i][j].ljust(widths[j])+' '*spaces\n strs.append(temp)\n\n else:\n for l in lst:\n strs.append(str(l))\n\n if index:\n index_width=len(str(len(strs)))\n for i in range(len(strs)):\n strs[i] = str(i).rjust(index_width)+':'+' '*spaces + strs[i]\n\n s = '\\n'.join(strs)\n\n if (ret == False):\n print s\n else:\n return s", "def _calculate_column_widths(\n self, console: \"Console\", options: \"ConsoleOptions\"\n ) -> List[int]:\n max_width = options.max_width\n columns = self.columns\n width_ranges = [\n self._measure_column(console, options, column) for column in columns\n ]\n widths = [_range.maximum or 1 for _range in width_ranges]\n get_padding_width = self._get_padding_width\n extra_width = self._extra_width\n if self.expand:\n ratios = [col.ratio or 0 for col in columns if col.flexible]\n if any(ratios):\n fixed_widths = [\n 0 if column.flexible else _range.maximum\n for _range, column in zip(width_ranges, columns)\n ]\n flex_minimum = [\n (column.width or 1) + get_padding_width(column._index)\n for column in columns\n if column.flexible\n ]\n flexible_width = max_width - sum(fixed_widths)\n flex_widths = ratio_distribute(flexible_width, ratios, flex_minimum)\n iter_flex_widths = iter(flex_widths)\n for index, column in enumerate(columns):\n if column.flexible:\n widths[index] = fixed_widths[index] + next(iter_flex_widths)\n table_width = sum(widths)\n\n if table_width > max_width:\n widths = self._collapse_widths(\n widths,\n [(column.width is None and not column.no_wrap) for column in columns],\n max_width,\n )\n table_width = sum(widths)\n # last resort, reduce columns evenly\n if table_width > max_width:\n excess_width = table_width - max_width\n widths = ratio_reduce(excess_width, [1] * len(widths), widths, widths)\n table_width = sum(widths)\n\n width_ranges = [\n self._measure_column(console, options.update_width(width), column)\n for width, column in zip(widths, columns)\n ]\n widths = [_range.maximum or 0 for _range in width_ranges]\n\n if (table_width < max_width and self.expand) or (\n self.min_width is not None and table_width < (self.min_width - extra_width)\n ):\n _max_width = (\n max_width\n if self.min_width is None\n else min(self.min_width - extra_width, max_width)\n )\n pad_widths = ratio_distribute(_max_width - table_width, widths)\n widths = [_width + pad for _width, pad in zip(widths, pad_widths)]\n\n return widths", "def print_table(table, db_file):\n \n try:\n conn, c = connect_to_db(db_file)\n rows = c.execute('SELECT * FROM {t}'.format(t=safe(table))).fetchall()\n cols = c.execute(\"PRAGMA table_info({t})\".format(t=safe(table))).fetchall()\n conn.close()\n pstring = '\\nTABLE ' + table + '\\n'\n r = 1\n for row in rows:\n pstring += '\\nROW ' + str(r)\n for i in range(len(cols)):\n pstring += '\\n ' + cols[i][1].ljust(16) + ' '\n if isinstance(row[i], int):\n pstring += str(row[i])\n elif isinstance(row[i], bytes):\n pstring += row[i].decode('utf-8')\n else:\n pstring += row[i]\n pstring += '\\n'\n r += 1\n return pstring\n except Exception as e:\n print(\"Error when trying to print table\", table)\n print(e)", "def print_table(self):\n print(\"%-12s%-12s%-12s%-12s%-12s\" % (\"index\",\"balance\",\"payment\",\"interest\",\"amortization\"))\n print(\"-------------------------------------------------------------\")\n for i in self.table[\"index\"]:\n print(\"%-12i%-12i%-12i%-12i%-12i\" % (self.table[\"index\"][i],self.table[\"balance\"][i]\\\n ,self.table[\"payment\"][i],self.table[\"interest\"][i],\\\n self.table[\"amortization\"][i]))", "def print_tables(hash_table, f_output, l_samples):\n\n l_fields = ['chr', 'pos', 'ref', 'alt', 'QUAL', 'FILTER',\n 'Func.refGene', 'Gene.refGene', 'GeneDetail.refGene', 'ExonicFunc.refGene', 'AAChange.refGene',\n 'cytoBand', 'ExAC_ALL', 'ExAC_AFR', 'ExAC_AMR', 'ExAC_EAS', 'ExAC_FIN', 'ExAC_NFE', 'ExAC_OTH',\n 'ExAC_SAS',\n 'avsnp147', 'SIFT_score', 'SIFT_pred', 'Polyphen2_HDIV_score', 'Polyphen2_HDIV_pred',\n 'Polyphen2_HVAR_score',\n 'Polyphen2_HVAR_pred', 'LRT_score', 'LRT_pred', 'MutationTaster_score', 'MutationTaster_pred',\n 'MutationAssessor_score', 'MutationAssessor_pred', 'FATHMM_score', 'FATHMM_pred', 'PROVEAN_score',\n 'PROVEAN_pred', 'VEST3_score', 'CADD_raw', 'CADD_phred', 'DANN_score', 'fathmm-MKL_coding_score',\n 'fathmm-MKL_coding_pred', 'MetaSVM_score', 'MetaSVM_pred', 'MetaLR_score', 'MetaLR_pred',\n 'integrated_fitCons_score', 'integrated_confidence_value', 'GERP++_RS', 'phyloP7way_vertebrate',\n 'phyloP20way_mammalian', 'phastCons7way_vertebrate', 'phastCons20way_mammalian', 'SiPhy_29way_logOdds']\n l_fields = l_fields + l_samples\n \n l_chr = set([item[0] for item in hash_table.keys()])\n\n fo = open(f_output, 'w')\n fo.write(','.join(l_fields) + '\\n')\n for key in sorted(hash_table.keys(), key=itemgetter(1)):\n fo.write(','.join(map(lambda field: hash_table[key].get(field, '.'), l_fields)) + '\\n')\n fo.close()", "def get_table(ports):\n table = PrettyTable([\"Name\", \"Port\", \"Protocol\", \"Description\"])\n table.align[\"Name\"] = \"l\"\n table.align[\"Description\"] = \"l\"\n table.padding_width = 1\n\n for p in ports:\n table.add_row(p)\n\n return table", "def tabulate(d, transpose=False, thousands=True, key_fun=None, sep=\",\", align=True):\n pairs = d.keys()\n rows, cols = zip(*pairs)\n if transpose:\n rows, cols = cols, rows\n\n rows = sorted(set(rows))\n cols = sorted(set(cols))\n header = [\"o\"] + list(cols)\n table = []\n for r in rows:\n combo = [(r, c) for c in cols]\n if transpose:\n combo = [(c, r) for (r, c) in combo]\n data = [d.get(x, \"n/a\") for x in combo]\n data = [\"{0:.1f}\".format(x) if isinstance(x, float) else x for x in data]\n if key_fun:\n data = [key_fun(x) for x in data]\n table.append([str(r)] + data)\n\n if not align:\n formatted = load_csv(header, table, sep=sep)\n return \"\\n\".join(formatted)\n\n return loadtable(header, table, thousands=thousands)", "def print_table(ledger):\n\n table = PrettyTable() # defines a PrettyTable object\n\n table.field_names = [\n \"hospital\",\n \"patient\",\n \"status\",\n \"nonce\",\n \"prev_hash\",\n \"a\",\n \"b\",\n \"c\",\n \"current_hash\",\n ] # define field names for table\n\n for block in ledger:\n table.add_row(\n [\n block[\"hospital\"],\n block[\"patient\"],\n block[\"status\"],\n block[\"nonce\"],\n block[\"prev_hash\"],\n block[\"a\"],\n block[\"b\"],\n block[\"c\"],\n block[\"current_hash\"],\n ]\n ) # add data to table\n\n print(\"\\n\\n\" + color.BOLD + \"Printing Your Ledger:\" + color.END)\n print(table) # print prettytable of patient info", "def print_structure(self, max_rows=20, output=sys.stdout):\n max_length = min(len(self.items()), max_rows)\n\n left_column = self.keys()[0:max_length]\n right_column = [str(len(table.rows)) for key, table in self.items()[0:max_length]]\n column_headers = ['table_keys', 'row_count']\n\n print_structure(left_column, right_column, column_headers, output)", "def pretty_print(self):\n for dtr in self.dtrs:\n dtr.pretty_print(indent=2)", "def _format_table(fmt, headers, rows, colwidths, colaligns, is_multiline, rowaligns):\n lines = []\n hidden = fmt.with_header_hide if (headers and fmt.with_header_hide) else []\n pad = fmt.padding\n headerrow = fmt.headerrow\n\n padded_widths = [(w + 2 * pad) for w in colwidths]\n if is_multiline:\n pad_row = lambda row, _: row # noqa do it later, in _append_multiline_row\n append_row = partial(_append_multiline_row, pad=pad)\n else:\n pad_row = _pad_row\n append_row = _append_basic_row\n\n padded_headers = pad_row(headers, pad)\n padded_rows = [pad_row(row, pad) for row in rows]\n\n if fmt.lineabove and \"lineabove\" not in hidden:\n _append_line(lines, padded_widths, colaligns, fmt.lineabove)\n\n if padded_headers:\n append_row(lines, padded_headers, padded_widths, colaligns, headerrow)\n if fmt.linebelowheader and \"linebelowheader\" not in hidden:\n _append_line(lines, padded_widths, colaligns, fmt.linebelowheader)\n\n if padded_rows and fmt.linebetweenrows and \"linebetweenrows\" not in hidden:\n # initial rows with a line below\n for row, ralign in zip(padded_rows[:-1], rowaligns):\n append_row(\n lines, row, padded_widths, colaligns, fmt.datarow, rowalign=ralign\n )\n _append_line(lines, padded_widths, colaligns, fmt.linebetweenrows)\n # the last row without a line below\n append_row(\n lines,\n padded_rows[-1],\n padded_widths,\n colaligns,\n fmt.datarow,\n rowalign=rowaligns[-1],\n )\n else:\n separating_line = (\n fmt.linebetweenrows\n or fmt.linebelowheader\n or fmt.linebelow\n or fmt.lineabove\n or Line(\"\", \"\", \"\", \"\")\n )\n for row in padded_rows:\n # test to see if either the 1st column or the 2nd column (account for showindex) has\n # the SEPARATING_LINE flag\n if _is_separating_line(row):\n _append_line(lines, padded_widths, colaligns, separating_line)\n else:\n append_row(lines, row, padded_widths, colaligns, fmt.datarow)\n\n if fmt.linebelow and \"linebelow\" not in hidden:\n _append_line(lines, padded_widths, colaligns, fmt.linebelow)\n\n if headers or rows:\n output = \"\\n\".join(lines)\n if fmt.lineabove == _html_begin_table_without_header:\n return JupyterHTMLStr(output)\n else:\n return output\n else: # a completely empty table\n return \"\"" ]
[ "0.7072573", "0.70656246", "0.68265593", "0.6811348", "0.67818946", "0.6566554", "0.6547522", "0.6480734", "0.64619523", "0.6371777", "0.6231521", "0.61801976", "0.6153712", "0.61120504", "0.60410714", "0.60410714", "0.6018116", "0.6011745", "0.6011745", "0.6009689", "0.59695274", "0.5945414", "0.5926213", "0.5926213", "0.5894668", "0.5890813", "0.5875803", "0.58757406", "0.5850001", "0.5848657", "0.57882744", "0.5759733", "0.57536435", "0.5714251", "0.57059985", "0.56776464", "0.56685215", "0.5647151", "0.5615308", "0.5597475", "0.5589268", "0.5585378", "0.55701524", "0.5568564", "0.5528341", "0.551322", "0.55108756", "0.5508629", "0.5487363", "0.5485027", "0.54810303", "0.54749465", "0.54707307", "0.54701304", "0.54587257", "0.5457717", "0.54471517", "0.54467016", "0.5442986", "0.54289764", "0.54041624", "0.53853196", "0.53750867", "0.5367453", "0.5349758", "0.5348468", "0.53467417", "0.53371584", "0.5316016", "0.5312915", "0.5305785", "0.5297868", "0.5293859", "0.5293116", "0.5286821", "0.52763265", "0.52751213", "0.5268626", "0.5268013", "0.526679", "0.5264979", "0.5263372", "0.5255742", "0.51914644", "0.51834685", "0.5182184", "0.51792467", "0.51424336", "0.51412976", "0.51399136", "0.5131413", "0.5116373", "0.51155573", "0.5113767", "0.51095396", "0.51093626", "0.5109191", "0.5104716", "0.50971264", "0.50968194" ]
0.5369287
63
Get the nbest logits from a list.
def _get_best_indexes(logits, n_best_size): index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True) best_indexes = [] for i in range(len(index_and_score)): if i >= n_best_size: break best_indexes.append(index_and_score[i][0]) return best_indexes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_best_indexes(logits, n_best_size):\n index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)\n\n best_indexes = []\n for i in range(len(index_and_score)):\n if i >= n_best_size:\n break\n best_indexes.append(index_and_score[i][0])\n return best_indexes", "def _get_best_indexes(logits, n_best_size):\r\n index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)\r\n best_indexes = []\r\n for i in range(len(index_and_score)):\r\n if i >= n_best_size:\r\n break\r\n best_indexes.append(index_and_score[i][0])\r\n return best_indexes", "def _get_best_indexes(logits, n_best_size):\r\n index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)\r\n\r\n best_indexes = []\r\n for i in range(len(index_and_score)):\r\n if i >= n_best_size:\r\n break\r\n best_indexes.append(index_and_score[i][0])\r\n return best_indexes", "def get_n_best(self):\n pass", "def _get_best_indices(logits, n_best_size):\n index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)\n\n best_indices = []\n for i in range(len(index_and_score)):\n if i >= n_best_size:\n break\n best_indices.append(index_and_score[i][0])\n return best_indices", "def get_top_n_motif_scores(score_list,top_n):\r\n\treturn score_list.argsort()[-top_n:],score_list[score_list.argsort()[-top_n:]]", "def get_best_guess(self, lst):\n maxlen = 0\n pass\n #for elem in lst:", "def get_n_best(init_table, n_best):\n if n_best < 1:\n InferenceUtils.log.warning(\"nbest should be >=, but is %s\" % n_best)\n raise ValueError()\n\n entries = [entry for entry in init_table.items()]\n\n # TODO: 버그인지 확인.\n # random.shuffle(entries)\n\n def n_best_entry_comparator(a, b):\n value = a[1] - b[1]\n if abs(value) < 0.0001:\n return 1 if random.randint(0, 1) == 1 else -1\n else:\n return int(value * 10000000)\n\n entries = sorted(entries, key=functools.cmp_to_key(n_best_entry_comparator), reverse=True)\n\n table = dict()\n cnt = 0\n for entry in entries:\n if cnt < n_best:\n table[entry[0]] = entry[1]\n cnt += 1\n\n return table", "def recommend_from_scores(scores: List[List[float]], n: int) -> List[List[int]]:\n\n def top_idx(scores):\n return np.array(scores).argsort()[::-1][:n]\n\n return [top_idx(s) for s in scores]", "def n_accuracy(logits, labels, n):\n\n if n==1:\n return np.sum(np.argmax(labels,1) == np.argmax(logits,1)) / float(labels.shape[0])\n else:\n raise NotImplementedError, \"TODO - finish this\"", "def nbest(self, score_fn, n):\n return [p for p, s in self.score_ngrams(score_fn)[:n]]", "def find_best_k(data, anots, neibhours_range):\r\n \r\n best_k = 0\r\n best_acc = 0\r\n for n_neighbors in neibhours_range:\r\n accur = iterate_over_chanels(data, anots, n_neighbors)\r\n mean_acc = accur.mean()\r\n if mean_acc > best_acc:\r\n best_acc = mean_acc\r\n best_k = n_neighbors\r\n return best_k", "def get_mostFrequent(self, n=5):\r\n pass", "def get_mostFrequent(self, n=5):\r\n pass", "def getFeatures(self, N=2):\n features = self.labels[1:]\n classes = map(int, features[0::2])\n preds = np.array(features[1::2], dtype=np.float32)\n topN = []\n for n in range(N):\n valmax = preds.max()\n imax = preds.argmax()\n topN.append((classes[imax], valmax))\n preds[imax] = -1\n return topN", "def get_log_scores(recipes, C, min_sup=15, option=1):\r\n scores = [None] * len(C)\r\n for i, candidate in enumerate(C):\r\n # score the candidate set\r\n one_hot_cols = recipes.iloc[:,[*candidate,]]\r\n item_appearances = [np.nonzero(recipes[col])[0].tolist() for col in one_hot_cols]\r\n joint_count = len(set(item_appearances[0]).intersection(*item_appearances[1:]))\r\n #item_appearances = []\r\n #print(item_appearances[0], item_appearances[1])\r\n if joint_count >= min_sup:\r\n item_counts = [len(index_list) for index_list in item_appearances]\r\n N = len(recipes.index)\r\n # use log of scores for numerical stability\r\n log_numerator = np.log(joint_count) + (len(candidate)-1) * np.log(N)\r\n log_denominator = np.sum([np.log(ct) for ct in item_counts])\r\n scores[i] = log_numerator - log_denominator\r\n else:\r\n scores[i] = -np.inf\r\n pass\r\n return scores", "def get_best_match(self, list):\n raise NotImplementedError", "def get_top_n_ip(log_file=default_file, top_n=20):\n \n ip_list = []\n with open(log_file) as access_log:\n for line in access_log:\n cols = [x for x in line.split()]\n ip_list.append(cols[0])\n \n# print 'RAM used: %d MB' % int(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss/1024/1024)\n# time.sleep(3)\n ip_list.sort(cmp=None, key=None, reverse=False)\n ip_dict = {}\n# print [len(list(group)) for key, group in groupby(ip_list)]\n for key, value in groupby(ip_list):\n ip_dict[key] = len(list(value))\n## sorted_dict = sorted(ip_dict.iteritems(), key=itemgetter(1), reverse=True)[:top_n]\n# print sorted_dict\n# print nlargest(top_n, ip_dict.iteritems(), itemgetter(1))\n return nlargest(top_n, ip_dict.iteritems(), itemgetter(1))", "def findBestNbClusters ( nbClustersList, inertias, kMeansModels ):\r\n\tinertiaAccelerationCurve = np.diff( inertias, 2)\r\n\tprint(\" inertia acceleration\")\r\n\tfor acc in inertiaAccelerationCurve: print (round(acc/1000000000))\r\n\tmaxAccelerationIndex = np.argmax( inertiaAccelerationCurve)\r\n\tbestNbClusters = nbClustersList [maxAccelerationIndex + 1]\r\n\tbestInertia = inertias[maxAccelerationIndex + 1]\r\n\tbestKMeansModel = kMeansModels[ maxAccelerationIndex +1]\r\n\treturn bestNbClusters, bestInertia, bestKMeansModel", "def findRFBestN():\n resultList = []\n BestScore = 0\n nList = [ n for n in range(1,200) if n%10 == 0]\n for n in nList:\n rforest = ensemble.RandomForestClassifier(max_depth=5, n_estimators=n)\n trainng_score = []\n testing_score = []\n # run 10 different cross-validation\n for index in range(10):\n # split into cross-validation sets.\n cv_data_train, cv_data_test, cv_target_train, cv_target_test = \\\n cross_validation.train_test_split(X_train, y_train, test_size=0.1)\n\n # fit the model using the cross-validation data\n # and tune parameter, such as max_depth here\n rforest = rforest.fit(cv_data_train, cv_target_train)\n trainng_score += [rforest.score(cv_data_train,cv_target_train)]\n testing_score += [rforest.score(cv_data_test,cv_target_test)]\n\n # Compute the average score for both traning and testing data\n trainng_avgScore = 1.0 * sum(trainng_score)/len(trainng_score)\n testing_avgScore = 1.0 * sum(testing_score)/len(testing_score)\n\n # find the best score\n if testing_avgScore > BestScore:\n BestScore = testing_avgScore\n best_n = n\n resultList += [[n, trainng_avgScore, testing_avgScore]]\n print ('The best average score and the corresponding n_estimator is: ')\n return BestScore, best_n", "def best_genomes(self, n):\n def key(g):\n return g.fitness\n\n return sorted(self.most_fit_genomes, key=key, reverse=True)[:n]", "def required_nb_data_func(list_nb_coeff):\n return max(list_nb_coeff / (1+np.arange(1, N+1)//2))", "def MaxHks(N): \n return np.log2(N-1)/2", "def get_top_n(predictions, n):\n # First map the predictions to each user.\n top_n = defaultdict(list)\n for uid, iid, true_r, est, _ in predictions:\n top_n[uid].append((iid, est))\n\n # Then sort the predictions for each user and retrieve the k highest ones.\n for uid, user_ratings in top_n.items():\n user_ratings.sort(key=lambda x: x[1], reverse=True)\n top_n[uid] = user_ratings[:n]\n\n return top_n", "def get_top_n(predictions, n=10):\n\n # First map the predictions to each user.\n top_n = defaultdict(list)\n for uid, iid, true_r, est, _ in predictions:\n top_n[uid].append((iid, est))\n\n # Then sort the predictions for each user and retrieve the k highest ones.\n for uid, user_ratings in top_n.items():\n user_ratings.sort(key=lambda x: x[1], reverse=True)\n top_n[uid] = user_ratings[:n]\n\n return top_n", "def popularity(self,train = None,test = None,k = 8,nitem = 10):\n train = train or self.traindata\n test = test or self.testdata\n item_popularity = dict()\n for user ,items in train.items():\n for item in items.keys():\n item_popularity.setdefault(item,0)\n item_popularity[item] += 1\n ret = 0\n n = 0\n for user in train.keys():\n rank = self.recommend(user, train, k = k, nitem = nitem)\n for item ,_ in rank.items():\n ret += math.log(1+item_popularity[item])\n n += 1\n return ret / (n * 1.0)", "def nmax(num, T, nwords):\n values = []\n top_n = T.argsort()[-num:][::-1]\n for n in top_n:\n nwords.append(((data['all_words'][n])))\n values.append(round(T[n],3))\n return nwords", "def approx_ranks(logits):\n list_size = tf.shape(input=logits)[1]\n x = tf.tile(tf.expand_dims(logits, 2), [1, 1, list_size])\n y = tf.tile(tf.expand_dims(logits, 1), [1, list_size, 1])\n pairs = tf.sigmoid(y - x)\n return tf.reduce_sum(input_tensor=pairs, axis=-1) + .5", "def test_top_n_freqs():\n ngrams = NgramFrequencies()\n top_list = [(\"d\", 4), (\"c\", 3), (\"b\", 2), (\"a\", 1)]\n top_freq = ngrams.top_n_freq(top_list, 10)\n assert top_freq == [(\"d\", 0.4), (\"c\", 0.3), (\"b\", 0.2), (\"a\", 0.1)]", "def predictions(logits):\n pred_idx = torch.argmax(logits, dim=1)\n\n return pred_idx", "def required_nb_data_func(list_nb_coeff):\n return max(list_nb_coeff)", "def required_nb_data_func(list_nb_coeff):\n return max(list_nb_coeff)", "def required_nb_data_func(list_nb_coeff):\n return max(list_nb_coeff)", "def nmax(num, T, nwords):\n top_n = T.argsort()[-num:][::-1]\n for n in top_n:\n nwords.append(data['all_words'][n])\n return nwords", "def nmax(num, T, nwords):\n top_n = T.argsort()[-num:][::-1]\n for n in top_n:\n nwords.append(data['all_words'][n])\n return nwords", "def get_strongest(weights, topn):\n nstrongest_idx = np.argpartition(np.abs(weights), -topn, axis=0)[-topn:]\n nstrongest = np.array([[weights[nstrongest_idx[i, j], j] for j in range(nstrongest_idx.shape[1])]\n for i in range(topn)])\n\n return nstrongest_idx, nstrongest", "def recallN(order_l, gt_idx_l, n_values):\n #print(len(gt_idx_l))\n #print(len(order_l))\n correct_at_n = np.zeros(len(n_values))\n #TODO can we do this on the matrix in one go?\n for qIx, pred in enumerate(order_l):\n for i,n in enumerate(n_values):\n # if in top N then also in top NN, where NN > N\n if np.any(np.in1d(pred[:n], gt_idx_l[qIx])):\n correct_at_n[i:] += 1\n break\n numQ = len(order_l) # for when I do partial retrieval\n recall_at_n = correct_at_n /numQ\n \n recall_l = []\n for i,n in enumerate(n_values):\n recall_l.append(recall_at_n[i])\n #print(\"- Recall@%d: %.4f\"%(n, recall_at_n[i]))\n return recall_l", "def evaluation(logits, labels):\n # For a classifier model, we can use the in_top_k Op.\n # It returns a bool tensor with shape [batch_size] that is true for\n # the examples where the label is in the top k (here k=1)\n # of all logits for that example.\n correct = tf.nn.in_top_k(logits, labels, 1)\n # Return the number of true entries.\n return tf.reduce_sum(tf.cast(correct, tf.int32)) \n # Do the actual training", "def entropy(predictions, number):\n val = []\n for i in range(0, predictions.shape[0]):\n tmp = 0\n for j in range(0, len(predictions[i])):\n if predictions[i][j] > 0:\n tmp -= predictions[i][j] * np.log(predictions[i][j])\n else:\n tmp -= 0.000001 * np.log(0.000001)\n val.append(tmp)\n\n return __get_max_indexes(val, number)", "def find_n_qubits(gates):\n return max((get_maximum_index(g.targets) for g in gates), default=-1) + 1", "def nll_logprobs(self, input, target_idx):\n raise NotImplementedError()", "def getbestnumberoftrees(features: ndarray, target: ndarray, limit:int) -> tuple:\n\n # Defining the initial accuracy value to compare with different number of trees in training\n accuracy = 0\n accuracyList = []\n\n for n in range(1, limit+1, 1):\n # Training\n trained_model = InternalRandomForest.modeltrain(features, target, n)\n\n # Calculating the percentual accuracy of the training\n accuracy_t = accuracy_score(target, trained_model.predict(features), normalize=True)\n\n # Build accuracy array for this set of number of trees\n accuracyList.append(accuracy_t)\n\n # Verifying if the current training is better than the last one\n if accuracy_t > accuracy:\n bestNumberTrees = n\n accuracy = accuracy_t\n\n # Obtain best trained model\n best_model = InternalRandomForest.modeltrain(features, target, bestNumberTrees)\n\n return bestNumberTrees, accuracyList, best_model", "def ensemble(scores):\r\n c = Counter ()\r\n for probs in zip (scores):\r\n idx = int (np.argmax (np.array (probs)))\r\n c.update ([idx])\r\n best = c.most_common (1)[0][0]\r\n return best", "def personal_best(scores: list) -> int:\n return max(scores)", "def bestNPredictions(probabilities, classes, n):\n assert(len(probabilities) == len(classes))\n reverse_sorted_probability_indices = sorted(range(0, len(probabilities)),\n key = lambda idx: probabilities[idx],\n reverse = True)\n return classes[reverse_sorted_probability_indices[0:n]]", "def most_common_labels(examples: List[Example], top_n: int = 1) -> List:\n top_labels = Counter([example.label for example in examples]).most_common(top_n)\n return [label[0] for label in top_labels]", "def get_num_instances_from_weights(groundtruth_weights_list):\n num_instances = tf.reduce_sum(\n [tf.math.count_nonzero(w) for w in groundtruth_weights_list])\n num_instances = tf.maximum(num_instances, 1)\n return num_instances", "def get_top_predictions(preds, top=5):\n results = []\n for pred in preds:\n top_indices = pred.argsort()[-top:][::-1]\n # result = [tuple(CLASS_INDEX[str(i)]) + (pred[i],) for i in top_indices]\n # result.sort(key=lambda x: x[2], reverse=True)\n # results.append(result)\n return top_indices", "def best_other_class(logits, exclude):\n y_onehot = torch.zeros_like(logits)\n y_onehot.scatter_(1, exclude, 1)\n # make logits that we want to exclude a large negative number\n other_logits = logits - y_onehot * 1e9\n return other_logits.max(1)[0]", "def mostVisited(self, n: int, rounds):\n start, end = rounds[0], rounds[-1]\n if start <= end:\n return list(range(start, end+1))\n else:\n return list(range(1, end+1)) + list(range(start, n+1))", "def LeastConfident(model, pool, addn):\n\n # Get probability distribution over labels\n pool_p = model.predict_proba(pool)\n\n # Find the labels with highes probabilities - Choose the addn of these with lowest score\n x_star = np.argsort(pool_p.max(1))[:addn]\n return x_star", "def evaluation(logits, labels):\n # For a classifier model, we can use the in_top_k Op.\n # It returns a bool tensor with shape [batch_size] that is true for\n # the examples where the label's is was in the top k (here k=1)\n # of all logits for that example.\n correct = tf.nn.in_top_k(logits, labels, 1)\n # Return the number of true entries.\n return tf.reduce_sum(tf.cast(correct, tf.int32))", "def decode_dist(self, scores: Tensor, nbest: int) \\\n -> List[Dict[int, float]]:\n heads = []\n probs = torch.softmax(scores, dim=1)\n for prob in probs:\n dist = []\n prob_sorted, indices = torch.sort(prob, descending=True)\n for k, prob in zip(range(nbest), prob_sorted):\n ix = int(indices[k])\n dist.append((ix, prob))\n heads.append(dict(dist))\n return heads", "def maiores(lista, n):\n numeros = [lista for lista in lista if lista > n]\n return numeros", "def evaluation(logits, labels):\n # For a classifier model, we can use the in_top_k Op.\n # It returns a bool tensor with shape [batch_size] that is true for\n # the examples where the label is in the top k (here k=1)\n # of all logits for that example.\n correct = tf.nn.in_top_k(logits, labels, 1)\n # Return the number of true entries.\n return tf.reduce_sum(tf.cast(correct, tf.int32))", "def evaluation(logits, labels):\n # For a classifier model, we can use the in_top_k Op.\n # It returns a bool tensor with shape [batch_size] that is true for\n # the examples where the label's is was in the top k (here k=1)\n # of all logits for that example.\n correct = tf.nn.in_top_k(logits, labels, 1)\n # Return the number of true entries.\n return tf.reduce_sum(tf.cast(correct, tf.int32))", "def predictions(logits):\n return logits.max(1)[1]", "def get_top_n_words(word_list, n):\n words = []\n\n # Change all words to lowercase\n for word in word_list:\n word = str.lower(word)\n if word not in words:\n words.append(word)\n\n # Calculate frequency of each word\n frequency = []\n for word in words:\n word_count = 0\n for test in word_list:\n if word == test:\n word_count += 1\n frequency.append(word_count)\n\n dic = dict()\n for i, word in enumerate(words):\n dic[frequency[i]] = word\n\n # Sort dictionary to return ranks\n keys = dic.keys()\n keys = sorted(keys)\n words_ranked = []\n for key in keys:\n words_ranked.append(dic.get(key))\n words_ranked = words_ranked[::-1]\n words_ranked = words_ranked[:n]\n return words_ranked", "def Kbest(kbest_init, kbest, i, iter_number, all_iter_number=None):\n iter_number = iter_number if all_iter_number is None else all_iter_number\n d = iter_number / kbest_init\n nkbest = math.ceil(abs(kbest_init - i/d))\n return nkbest", "def __findBestLogProbability(self):\n best_model = None\n highest_log_probability = -sys.maxsize# (np.finfo(float).eps)\n\n # Find the highest model\n for item in self.data_array:\n if item[1] > highest_log_probability:\n best_model = item\n highest_log_probability = item[1]\n\n return best_model", "def top_n(self, n):\n top = {}\n for code, feat_set in self.iteritems():\n tuples = sorted(feat_set.items(), reverse=True, key=itemgetter(1))\n best = {feat for feat, _ in tuples[:n]}\n top[code] = best\n return top", "def get_W_L_sets(vote_count, n_winners):\n tuples = list(vote_count.items())\n sorted_tuples = sorted(tuples, key=operator.itemgetter(1), reverse=True)\n W = [c[0] for c in sorted_tuples[:n_winners]]\n L = [c[0] for c in sorted_tuples[n_winners:]]\n return W, L", "def N_states_for_learner(self):\n idx_max = []\n limits = 50, 2*_math.pi, 50, 50, 50, 50, 50, 50, 50\n for idx, limit in enumerate(limits):\n test = [0 for i in xrange(len(limits))]\n check = _arange(-limit,limit,limit/1000.)\n maxi = 0\n for v in check:\n test[idx]=v\n ret = self._state_index(*test)\n maxi = max((maxi, ret[idx]))\n idx_max.append(maxi)\n\n return tuple([idx+1 for idx in idx_max])", "def out(input_lst, weight_lst, bias):\r\n return 1 / (1 + math.exp(-1 * net(input_lst, weight_lst, bias)))", "def top_coefs(clf, label, n, vocab):\n result = []\n coef = []\n if label == 0:\n for v in vocab:\n result.append((v, clf.coef_[0][vocab[v]]))\n sorted_ans = sorted(result, key=lambda x: x[1])\n \n for tuples in sorted_ans[:n]:\n coef.append((tuples[0], -1 * tuples[1]))\n return coef \n\n elif label == 1:\n for i in vocab:\n result.append((i,clf.coef_[0][vocab[i]]))\n result= sorted(result,key=lambda x:-x[1])[:n]\n return result", "def list_best_matches(self, n=5):\n bestTypes, softmaxes, bestLabels, inputImages = self._input_spectra_info()\n bestMatchLists = []\n bestBroadTypes = []\n rejectionLabels = []\n reliableFlags = []\n redshifts = []\n for specNum in range(self.numSpectra):\n bestMatchList = []\n for i in range(20):\n host, name, age = classification_split(bestTypes[specNum][i])\n if not self.knownZ:\n redshifts.append(self.calc_redshift(inputImages[i], name, age)[0])\n prob = softmaxes[specNum][i]\n bestMatchList.append((host, name, age, prob))\n bestMatchList = np.array(bestMatchList)\n bestMatchLists.append(bestMatchList[0:n])\n bestBroadType, reliableFlag = self.best_broad_type(bestMatchList)\n bestBroadTypes.append(bestBroadType)\n reliableFlags.append(reliableFlag)\n rejectionLabels.append(self.false_positive_rejection(bestTypes[specNum][0], inputImages[specNum]))\n\n bestMatchLists = np.array(bestMatchLists)\n\n if not redshifts:\n redshifts = self.redshifts\n else:\n redshifts = np.array(redshifts)\n\n return bestMatchLists, redshifts, bestBroadTypes, rejectionLabels, reliableFlags", "def get_top_words(self, label, n):\n score_list = []\n if('sod' in label):\n for term in self.vocab:\n score = self.cond_prob_sod[term] / self.cond_prob_pop[term]\n score_list.append((score,term)) \n else:\n for term in self.vocab:\n score = self.cond_prob_pop[term] / self.cond_prob_sod[term]\n score_list.append((score,term))\n score_list = sorted(score_list, key=lambda x:x[0],reverse=True)[:n]\n return score_list \n pass", "def _weight_boosting_n_estimators(name: str):\n return scope.int(hp.qloguniform(name, np.log(10.5), np.log(1000.5), 1))", "def get_max_loot(input_list):\n even = sum(input_list[::2])\n odd = sum(input_list[1::2])\n return even if even > odd else odd", "def max_info(lst):\n k = []\n maxm = -1\n for i in range(len(lst)):\n if lst[i] == maxm:\n k.append(i)\n if lst[i] > maxm:\n maxm = lst[i]\n k = [i]\n return k", "def search(x_data, y_data, n = 5):\r\n alpha = np.arange(0.01, 8, 0.01)\r\n param_grid = {'alpha' : alpha} \r\n clf = MultinomialNB() \r\n grid_search = GridSearchCV(clf, param_grid, cv=n)\r\n grid_search.fit(x_data, y_data)\r\n return grid_search.best_params_", "def optimal_nffts(arr):\n\n return int(8 * 2 ** np.ceil(np.log2(len(arr))))", "def get_top_n_words(word_list, n):\n\t\n\t#Uses Counter function to create tuples of words and number of instances of word\n\twordCount = Counter(word_list)\n\ttopWords = []\n\n\torderedByFrequency = sorted(wordCount, key=wordCount.get, reverse=True)\n\n\t#create list of inputted 'n' top words\n\tfor i in range (0 , n):\n\t\ttopWords.append(orderedByFrequency[i])\n\n\treturn topWords", "def max_log_likelihood(data):\n # Assume data is given as counts\n tot = sum(data)\n return _np.sum([nlogp(n, n / tot) for n in data if n > 0])", "def bestOf(predictorList):\n assert predictorList != [], \"Predictor list is empty!\"\n bestList = []\n bestRate = -1.0\n for p in predictorList:\n if p.successRate > bestRate:\n bestList = [p]\n bestRate = p.successRate\n elif p.successRate == bestRate:\n bestList.append(p)\n return bestList", "def get_top_n_words(word_list, n):\n\tfreq_dict = make_freq_dict (word_list) # get a dictionary\n\tordered_by_frequency = sorted(freq_dict, key=freq_dict.get, reverse=True) # sort\n\tprint ordered_by_frequency[0:n] # print\n\treturn ordered_by_frequency[0:n]", "def top_n_combined(self, n):\n top = set()\n for feat_set in self.itervalues():\n tuples = sorted(feat_set.items(), reverse=True, key=itemgetter(1))\n best = {feat for feat, _ in tuples[:n]}\n top |= best\n return top", "def top_coefs(clf, label, n, vocab):\n ###TODO\n \n # step 1 -> get .coef_\n coefficient = clf.coef_[0] #***** \n \n # step 2 -> check label and sort\n if label == 1: # positive class -> descending sorting\n # get indices of sorted list i.e. [2,3,1] -> sorting [1,2,3] -> indices[3,1,2]\n top_coef_ind = np.argsort(coefficient)[::-1][:n] # requires very less time by this methos of sorting and get sorted element's indices \n \n if label == 0: # negative class -> ascending sorting\n top_coef_ind = np.argsort(coefficient)[::1][:n]\n \n \n #step 3 -> get all top coefficient' indices\n #print('top_coef_ind = ',top_coef_ind)\n top_coef = abs(coefficient[top_coef_ind])\n #print('top_coef = ',top_coef)\n \n #step 4 -> get all top coefficient' terms i.e. tokens\n rev_Vocab = {}\n \n for term,colId in vocab.items():\n rev_Vocab.setdefault(colId,term)\n #alternatives -> check for fasted \n #vocab.__class__(map(reversed, vocab.items()))\n #rev_Vocab = lambda vocab: {v:k for k, v in vocab.items()}\n #rev_Vocab = lambda vocab: dict( zip(vocab.values(), vocab.keys()) )\n \n \n top_coef_terms = []\n \n for colId in top_coef_ind:\n top_coef_terms.append(rev_Vocab[colId])\n \n #step 5 -> get touple (top_coef_terms, top_coef) and send\n return ([x for x in zip(top_coef_terms, top_coef)])", "def Entropy(model, pool, addn):\n\n # Get probability distribution over labels\n pool_p = model.predict_proba(pool)\n\n # Calculate entropy and sort for each datapoint in pool\n Entropy = (pool_p * np.log(1/pool_p)).sum(axis = 1)\n Information_gain = np.argsort(Entropy)\n\n # Choose addn datapoints with highest entropy\n x_star = Information_gain[-addn:]\n return x_star", "def log_softmax_nd(logits, axes=(-1,)):\n logits -= tf.reduce_max(logits, axis=axes, keepdims=True)\n return logits - tf.reduce_logsumexp(logits, axis=axes, keepdims=True)", "def popularity(self, user_list):\n item_popular = Counter(self.train['movieId'].values)\n ret = 0\n n = 0\n print('\\nCalculate popularity: ')\n for user in tqdm(user_list):\n recom_data = self._get_recommend(user)\n for rec in set([data[0] for data in recom_data]):\n ret += math.log(1 + item_popular.get(rec))\n n += 1\n ret /= n * 1.0\n print('\\npopularity: ', ret)\n return ret", "def get_training_index():\n return list(range(0, 305))", "def worstOf(predictorList):\n assert predictorList != [], \"Predictor list is empty!\"\n worstList = []\n worstRate = 2.0\n for p in predictorList:\n if p.successRate < worstRate:\n worstList = [p]\n worstRate = p.successRate\n elif p.successRate == worstRate:\n worstList.append(p)\n return worstList", "def nbest(self, n, aw=1.0, lw=1.0, ip=0.0):\n # Clear node score and prev\n for w in self.nodes:\n w.score = []\n w.prev = []\n self.start.score = [0]\n self.start.prev = [(None, None)]\n # Keep path with the highest score if same history exists.\n def remove_repetition(node, n):\n pruned_scores, pruned_prevs = [], []\n existing_hyps = set()\n for score, prev in zip(node.score, node.prev):\n # Backtrace\n arc, idx = prev\n hyp = []\n while arc:\n hyp.append(arc.dest.sym)\n arc, idx = arc.src.prev[idx]\n hyp = ' '.join(list(hyp))\n # Check existing history\n if hyp not in existing_hyps:\n pruned_scores.append(score)\n pruned_prevs.append(prev)\n existing_hyps.add(hyp)\n # Cut off for nbest\n if len(pruned_scores) >= n:\n break\n # Update attributes\n node.score = pruned_scores\n node.prev = pruned_prevs\n # Run Viterbi but keep top n paths & pointers\n for w in self.nodes[1:]:\n for e in w.entries:\n arc_score = e.ascr * aw + e.lscr * lw - ip\n w.score.extend([i + arc_score for i in e.src.score])\n w.prev.extend([(e, idx) for idx in range(len(e.src.prev))])\n w.score, w.prev = zip(*sorted(\n zip(w.score, w.prev), key=lambda x: x[0], reverse=True))\n remove_repetition(w, n)\n # Backtrace\n best_paths = []\n for end_ in self.end.prev:\n arc, idx = end_\n best_path = []\n while arc:\n best_path.append(arc)\n arc, idx = arc.src.prev[idx]\n best_paths.append(list(reversed(best_path)))\n return best_paths", "def get_top_friends(common_friends_list):\n n = 4\n top_n_users = []\n top_scores = sorted(common_friends_list, reverse=True)\n for score in top_scores:\n top_n_users.append(sorted(common_friends_list[score]))\n top_n_users = sum(top_n_users, [])\n return top_n_users[:n]", "def nloglikeobs(self, params):\n lambda_ = params[0]\n\n ll_output = self._LL(self.endog, rate=lambda_)\n\n return -np.log(ll_output)", "def max_sum_xlogx(n, B, lb, ub): \r\n # Initialize the optimal solution and optimal objective\r\n opt_obj = - n/np.e\r\n opt_sol = np.array([])\r\n \r\n # First select a variable whose value may not be at bound, indexed by idx-var_interior\r\n for idx_var_interior in range(n):\r\n for idx in range(pow(2, n-1)):\r\n idx_binary_expansion = int2bin(idx, n-1)\r\n # Insert element 0 into position idx_var_interior\r\n idx_binary_expansion = np.insert(idx_binary_expansion, idx_var_interior, 0)\r\n # Compute the solution x with all (but one) variables at bounds\r\n x = getx(idx_binary_expansion, lb, ub, idx_var_interior, B)\r\n \r\n if x.size > 0:\r\n obj = np.multiply(x, np.log(x)).sum()\r\n if obj >= opt_obj:\r\n opt_obj = obj\r\n opt_sol = x\r\n return [opt_obj, opt_sol]", "def max_n(inputs):\n ret = inputs[0]\n inputs = inputs[1:]\n for x in inputs:\n ret = tf.maximum(ret, x)\n return ret", "def highestAverage(requestContext, seriesList, n):\n\n return sorted( seriesList, key=lambda s: safeDiv(safeSum(s),safeLen(s)) )[-n:]", "def nits(self):", "def least_popular_influencers(self, influencerTopSim, count):\n infPopularity = {influencer: 0 for influencer in influencerTopSim}\n for influencer in influencerTopSim:\n infTweetPop = self.userTweetsStat[influencer]\n avgPop = []\n for tweet in influencerTopSim[influencer]:\n infTweet = infTweetPop[len(infTweetPop)-1]\n avgPop.append(self.assign_popularity_to_tweet(infTweet,tweet))\n infPopularity[influencer] = np.mean(avgPop)\n \n tmp = {key: rank for rank, key in enumerate(sorted(set(infPopularity.values()), reverse=True), 1)}\n rankInfluencer = {k: tmp[v] for k,v in infPopularity.items()}\n leastPopInfluencer = [a for a in dict(sorted(rankInfluencer.items(), key=operator.itemgetter(1), reverse=True)[:count]).keys()]\n \n return leastPopInfluencer", "def mostDeviant(requestContext, seriesList, n):\n\n deviants = []\n for series in seriesList:\n mean = safeDiv( safeSum(series), safeLen(series) )\n if mean is None: continue\n square_sum = sum([ (value - mean) ** 2 for value in series if value is not None ])\n sigma = safeDiv(square_sum, safeLen(series))\n if sigma is None: continue\n deviants.append( (sigma, series) )\n deviants.sort(key=lambda i: i[0], reverse=True) #sort by sigma\n return [ series for (_, series) in deviants ][:n] #return the n most deviant series", "def get_top_n_words(word_list, n):\n d = dict()\n for w in word_list:\n d[w] = d.get(w, 0) + 1\n ordered_by_frequency = sorted(d, key=d.get, reverse=True)\n return ordered_by_frequency[0:n]", "def getPredictions(self):\n\t\tself.bestLabel = self.testingProbs.apply(lambda x: x.argmax(),1)", "def get_majority_element_linear(self, lst):\r\n # Idea: Boyer–Moore majority vote algorithm (O(n) time)\r\n lst, n = sorted(lst), len(lst)\r\n \r\n if n <= 2:\r\n if lst[0] == lst[1]: return 1\r\n else: return -1\r\n \r\n temp_lst, count = [], 1\r\n for i in range(n-1):\r\n if lst[i] == lst[i+1]:\r\n temp_lst.append(lst[i])\r\n count += 1\r\n else: \r\n if count > int(math.floor(n / 2.0)): return count\r\n else: count = 1\r\n i += 1\r\n \r\n return -1", "def compute_per_list(self, labels, logits, weights, mask=None):\n raise NotImplementedError('Calling an abstract method.')", "def evaluation(logits, labels):\r\n # For a classifier model, we can use the in_top_k Op.\r\n # It returns a bool tensor with shape [batch_size] that is true for\r\n # the examples where the label is in the top k (here k=1)\r\n # of all logits for that example.\r\n labels = tf.to_int64(labels)\r\n correct = tf.equal(tf.argmax(logits, 1), labels)\r\n accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))\r\n tf.summary.scalar('accuracy', accuracy)\r\n # Return the number of true entries.\r\n return accuracy", "def evaluation(logits, labels):\r\n # For a classifier model, we can use the in_top_k Op.\r\n # It returns a bool tensor with shape [batch_size] that is true for\r\n # the examples where the label is in the top k (here k=1)\r\n # of all logits for that example.\r\n labels = tf.to_int64(labels)\r\n correct = tf.equal(tf.argmax(logits, 1), labels)\r\n accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))\r\n tf.summary.scalar('accuracy', accuracy)\r\n # Return the number of true entries.\r\n return accuracy" ]
[ "0.6809752", "0.67892396", "0.677396", "0.6740539", "0.6667522", "0.623828", "0.585659", "0.5845603", "0.5799747", "0.5723129", "0.56909186", "0.565728", "0.5616109", "0.5616109", "0.56132007", "0.56086874", "0.55966324", "0.5583005", "0.5578795", "0.55542535", "0.5510991", "0.550989", "0.5490322", "0.5473248", "0.5469323", "0.5449274", "0.541273", "0.5412123", "0.5392337", "0.5381691", "0.534076", "0.534076", "0.534076", "0.53364784", "0.53364784", "0.5319548", "0.53026885", "0.5297756", "0.52951556", "0.5286264", "0.52788705", "0.5276652", "0.52571225", "0.52565515", "0.52506036", "0.52478564", "0.5235553", "0.52303207", "0.5222805", "0.5217703", "0.52105135", "0.52045697", "0.51837", "0.5164106", "0.51634485", "0.5157762", "0.51520866", "0.5133206", "0.51309705", "0.51308656", "0.5114069", "0.5111889", "0.5108928", "0.5108333", "0.51074463", "0.5106383", "0.51047486", "0.5101656", "0.5100179", "0.50852937", "0.50719285", "0.5067894", "0.50668204", "0.5059448", "0.5056365", "0.5053598", "0.50532204", "0.5050949", "0.5041363", "0.5040592", "0.5029172", "0.50177944", "0.50137746", "0.49988312", "0.49974486", "0.49974152", "0.49935195", "0.49884364", "0.4984664", "0.498422", "0.49832064", "0.49807137", "0.49805734", "0.49786508", "0.497116", "0.49680227", "0.49676403", "0.49676403" ]
0.67765886
4
Make sure that when we remove an option, only ~1/10th of the keys get moved.
def test_rendezvous_hash_roughly_fractional_change(): first_choices = range(10) second_choices = range(9) test_keys = [str(x) for x in range(10000)] first_results = [ marathon_tools.rendezvous_hash(first_choices, k) for k in test_keys ] second_results = [ marathon_tools.rendezvous_hash(second_choices, k) for k in test_keys ] num_same = len([1 for x, y in zip(first_results, second_results) if x == y]) assert num_same > 8900 assert num_same < 9100
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def truncateto(self, commandnumber):\n keytuples = self.pvalues.keys()\n allkeys = sorted(keytuples, key=lambda keytuple: keytuple[0])\n # Sanity checking\n lastkey = allkeys[0][0]\n candelete = True\n for (cmdno,proposal) in allkeys:\n if cmdno == lastkey:\n lastkey += 1\n else:\n candelete = False\n break\n # Truncating\n if not candelete:\n return False\n for (cmdno,proposal) in allkeys:\n if cmdno < commandnumber:\n del self.pvalues[(cmdno,proposal)]\n return True", "def delete_key(self, i):\n self.decrease_key(i, float('-inf'))\n self.extract_min()", "def _optionsmenu_restart():\n self.input_box.delete(1.0, END)\n pass", "def _delete_option(key: str) -> None:\n try:\n del _config_options_template[key]\n del cast(Dict[str, ConfigOption], _config_options)[key]\n except Exception:\n # We don't care if the option already doesn't exist.\n pass", "def decrease_key(self, old_item, new_item):", "def del_psana_options(self, keys):\n try:\n for key in keys:\n self._data.psana_cfg_dict.pop(key, None)\n except:\n print 'Invalid keys to remove from psana options:', keys", "def remove_option_from_value(self, o):\n result = False\n for k in self._options:\n if self._options.get(k) == o:\n self._options.pop(k)\n result = True\n return result", "def __delitem__(self, key):\n\n if '.' in key:\n path = key.split('.', 1)\n self.parser.remove_option(path[0], path[1])\n else:\n raise KeyError", "def delete_obsolete_command_items(default, control):\n delete_keys = []\n for cmd_id in control:\n if cmd_id not in default:\n delete_keys.append(cmd_id)\n\n for key in delete_keys:\n del control[key]", "def remove(self, key):\n if key < self.length:\n self.buckets[key] = -1", "def remove_option(self, option):\n self.__options.pop(option)", "def _update_prepend_key(self):\n self.prepend_key -= 1", "def test_fuzz_deletions():\n key_range = 2 ** 64\n value_range = 1024\n key_set = set()\n \n d = OrderedTreeDict()\n for value in range(0, value_range):\n key = randint(0, key_range)\n d.put(key, value)\n key_set.add(key)\n \n sorted_keys = list(sorted(key_set))\n sorted_keys_slice = sorted_keys[0:len(sorted_keys) // 2]\n \n for key in sorted_keys_slice:\n d.delete(key)\n assert len(d) > 0\n assert key not in d\n assert d.depth() <= int(2 * math.log(len(d), 2)), \"Should stay as balanced as a red black tree. \"\n \n keys = list(d.keys())\n assert len(keys) == len(sorted_keys_slice), \"Length should reflect number of items inserted.\"\n assert len(keys) == len(list(keys)), \"Iteration should find all items in tree.\"", "def prune_option_list(opts, keys):\n opt_d = opt_to_dict(opts)\n for k in keys:\n if k in opt_d:\n del opt_d[k]\n return [k for item in opt_d.iteritems() for k in item]", "def remove(self, key):\n i = key //1000\n j = key%1000\n self.container[i][j] = -1", "def remove(self, key):", "def remove_option(self, label):\n del self._options[label]\n index = self._menu.index(label)\n self._menu.delete(index, index)", "def remove(self, key: int) -> None:\n if key < self.len:\n self._map[key] = -1", "def remove_option(self, option):\n splitvals = option.split('/')\n section, key = \"/\".join(splitvals[:-1]), splitvals[-1]\n\n RawConfigParser.remove_option(self, section, key)\n self._dirty = True", "def _remove_old_items(self):\n if self.size_limit is not None:\n while len(self) > self.size_limit:\n self.popitem(last=False)", "def _increase_size(self) -> None:\n keys_vals_to_move = [item for item in self.HashMap if item]\n self.length = 0\n self.capacity = self.capacity * 2\n self.HashMap = [None] * self.capacity\n for item in keys_vals_to_move:\n while len(item) > 0:\n self.add(item[0], item[1])\n item.pop(0)\n item.pop(0)", "def update(self, option_old, option_new=\"\"):\n if option_old == option_new:\n return\n self.pile_list.remove(option_old)\n if option_new != \"\":\n self.pile_list.append(option_new)", "def unset_limit(self, key: str) -> None:\n self.reset(key)\n self.keys.pop(key, None)", "def _remove_key(self):\n heaps = self.priorities\n keys = heaps.keys()\n keys = min(keys)\n heaps.pop(keys)", "def test_del_handles_multiple_place_changes(robust):\n robust.delete(9)\n assert robust.balance() == 1\n assert tuple(robust.in_order()) == (\n 1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19\n )\n robust.delete(10)\n assert tuple(robust.in_order()) == (\n 1, 2, 3, 4, 5, 6, 7, 8, 11, 12, 13, 14, 15, 16, 17, 18, 19\n )\n assert robust.balance() == 1\n assert robust.depth() == 5\n robust.delete(19)\n robust.delete(11)\n robust.delete(12)\n assert tuple(robust.in_order()) == (\n 1, 2, 3, 4, 5, 6, 7, 8, 13, 14, 15, 16, 17, 18\n )\n assert tuple(robust.breadth_first()) == (\n 8, 4, 16, 2, 6, 14, 18, 1, 3, 5, 7, 13, 15, 17\n )\n assert robust.balance() == 0\n assert robust.depth() == 4", "def evictOldkeys(self, cutOff):\n for key in self.values.keys():\n time = self.values[key][3]\n if time < cutOff:\n del self.values[key]", "def moveOptions(self, direction, options, tag): \n if direction == \"left\":\n if int(self.avatarConfiguration[tag]) == 1:\n return\n newImgIndex = int(self.avatarConfiguration[tag]) - 1\n else:\n if int(self.avatarConfiguration[tag]) == len(options):\n return\n newImgIndex = int(self.avatarConfiguration[tag]) + 1\n self.avatarConfiguration[tag] = str(newImgIndex)\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(os.path.join(GG.utils.PATH_EDITOR_INTERFACE, options[int(self.avatarConfiguration[tag]) - 1]))\n img = ocempgui.draw.Image.load_image(imgPath)\n self.imgOptionsTab.picture = img\n self.updateAvatar(tag)", "def remove(self, key):\n self.arr[key] = -1", "def move1(self):\n\n options = self.location.exits.keys()\n self.location.objects.remove(a)\n print('fred is moving..')\n self.location = self.location.exits[random.choice(list(options))]\n self.location.objects.append(a)", "def remove_state(self, state):\n if isinstance(self.options, list):\n self.options.remove(state)\n else:\n temp = list(self.options)\n temp.remove(state)\n self.options = tuple(temp)", "async def remove(message: discord.Message, opt: options):\n for q in db.data[\"questions\"]:\n if q[\"choices\"][0] == opt[0] and q[\"choices\"][1] == opt[1]:\n db.data[\"questions\"].remove(q)\n db.save()\n await client.say(message, \"**Entry removed.**\")\n break\n else:\n await client.say(message, \"**Could not find the question.**\")", "def remove(self, key):\n self.container[key] = -1", "def test_size_changes_on_remove(multi_trie):\n multi_trie.remove(\"hello\")\n assert multi_trie.size() == 5", "def _clean_some(self, txn, cursor, cutoff):\n count = 0\n for key, value in cursor:\n count += 1\n # Limit to 100 keys per iteration.\n if count > 100:\n return key\n\n split = self.__split_payload(self._decode(value))\n if split is None:\n logging.warning(\n \"Removing undecodable key '%s' with value %s\" % (key, value)\n )\n txn.delete(key=key)\n continue\n\n _, _, timestamp = split\n if timestamp < cutoff:\n logging.warning(\n \"Removing expired key '%s' with timestamp %d\" % (key, timestamp)\n )\n txn.delete(key=key)\n return None", "def remove_key(attr):\n pm.cutKey(attr, clear=True, time=pm.currentTime())", "def eliminate(self):\n deleteKey = []\n for key,value in self._sets[self._currentSet].items():\n if value < self._minSupport:\n deleteKey.append(key)\n \n for key in deleteKey:\n del self._sets[self._currentSet][key]", "def _remove(self, key):\n del self._items[key]\n self._listbox.delete(ANCHOR)", "def remove(self, val: int) -> bool:\n if val not in self.map:\n return False\n index = self.map[val]\n del self.map[val]\n \n if index+1 != len(self.keys):\n var = self.keys[-1]\n self.keys[index] = self.keys[-1]\n self.map[var] = index\n self.keys = self.keys[:-1]\n # print('removing. ', self.map)\n return True", "def remove(self, e):\n try:\n self.vals[e] -= 1\n except:\n return", "def removeAllKeys(self) -> None:\n ...", "def delete(self, key):", "def _removeKeys(self, frames, start):\n # remove keys\n cmds.cutKey(self.path, time=(frames[0] + 0.01, frames[-1]), option=\"keys\")\n\n # move first key to start position in case start position is not on\n # an even frame.\n cmds.keyframe(self.path, edit=True, index=(0,), timeChange=start)", "def remove(self, key: int) -> None:\n idx = key % self.size\n if self.mp[idx]:\n for i in range(len(self.mp[idx])):\n if self.mp[idx][i][0] == key:\n #self.mp[idx].pop(i)\n del self.mp[idx][i]\n break", "def delete_order():", "def _remove_additional_elements(self):\n # Produces a list of keys in sample sorted by seed\n sorted_elements = sorted(self.elements.items(), key=lambda x: x[1][0])\n\n # Removes the keys with largest seed values (beyond the first k keys)\n for i in range(self.k, len(sorted_elements)):\n del self.elements[sorted_elements[i][0]]", "def removeOption(self, *args):\n return _libsbml.ConversionProperties_removeOption(self, *args)", "def _remove_additional_elements(self):\n # Produces a list of keys in sample sorted by seed\n sorted_elements = sorted(self.elements.items(), key=lambda x: x[1][0])\n\n # Removes the keys with largest seed values (beyond the\n # first k keys)\n for i in range(self.k, len(sorted_elements)):\n del self.elements[sorted_elements[i][0]]", "def remove(self, key: int) -> None:\n if key in self.keys:\n idx = self.keys.index(key)\n self.keys.pop(idx)\n self.values.pop(idx)", "def remove(self, key: int) -> None:\n index = key % 10000\n previous = self.array[index]\n current = previous.next\n while current:\n if current.key == key:\n previous.next = current.next\n break\n previous = previous.next\n current = current.next", "def remove(self, key):\n\t\tfor i in self.getBitArrayIndices(key):\n\t\t\tself.ba[i] -= 1\n\t\tself.n -= 1", "def remove(self, key):\n pass", "def remove(self, key: int) -> None:\n sh = key % 37\n if self.map[sh] == None:\n return\n for i in range(len(self.map[sh])):\n kv = self.map[sh][i]\n if kv[0] == key:\n self.map[sh].remove(kv)\n return", "def remove_key(self, key) -> None:\n with suppress(KeyError):\n self.size -= len(self.data[key])\n del self.data[key]", "def close_option(self, *args, **kwargs):\n self.order_option(*args, **kwargs)\n while not self.status:\n self.replace_option(*args, **kwargs)\n return True", "def _delKey(self, key):\n pass", "def __delitem__(self, key):\n pass", "def __delitem__(self, key):\n pass", "def delete_key(self,\r\n dkey):\r\n\r\n\r\n if (input(queries.DELETE_CONF_BEG\r\n +dkey+queries.DELETE_CONF_END) in YESTERMS):\r\n\r\n if dkey in self.keys():\r\n\r\n for i_temp in self.get_all_indexes():\r\n if dkey in self.get_keys_from_note(i_temp):\r\n tempnote = self.get_note(i_temp).delete_keys({dkey})\r\n self.add_note(i_temp,note=tempnote)\r\n if self.get_keys_from_note(i_temp) == set():\r\n temp = self.get_keys_from_note(i_temp)\r\n temp.add(VOIDTERM)\r\n self.add_note(i_temp,\r\n keyset_only=temp)\r\n self.add_keys_tags(i_temp,\r\n {VOIDTERM})\r\n\r\n self.delete_keys_tags(i_temp, {dkey})", "def remove(self, value) -> None:\n key = getattr(value, self.keyattr)\n if callable(key):\n key = key()\n with suppress(ValueError):\n self.data[key].remove(value)\n self.size -= 1", "def on_step_options_swapped(self, plugin, old_step_number, step_number):\n pass", "def moveOPeople(self, li: list):\r\n busyOpts = sum(li[7:-2])\r\n freeOpts = self.NOPTS - busyOpts\r\n\r\n li[21] += li[20]\r\n li[20] = 0\r\n li = self.skipO2n(li, 19)\r\n\r\n li[19] = li[18]\r\n li[18] = 0\r\n li = self.skipO2n(li, 17)\r\n\r\n li[17] = li[16]\r\n li[16] = 0\r\n li = self.skipO2n(li, 15)\r\n\r\n li[15] = li[14]\r\n li[14] = 0\r\n li = self.skipO2n(li, 13)\r\n\r\n li[13] = li[12]\r\n li[12] = 0\r\n li = self.skipO2n(li, 11)\r\n\r\n li[11] = li[10]\r\n li[10] = 0\r\n li = self.skipO2n(li, 9)\r\n\r\n li[9] = li[8]\r\n li[8] = 0\r\n li = self.skipO2n(li, 7)\r\n li[7] = 0\r\n\r\n\r\n # # Add the number of free optomologists to O1\r\n toAdd = min(freeOpts, li[6])\r\n li[7] += toAdd\r\n li[6] -= toAdd\r\n\r\n return li", "def undoPossibleBarMoves(self):\r\n for num in self.diceNumbers:\r\n if self.currentPlayer == 0:\r\n potentialPoint = num - 1\r\n else:\r\n potentialPoint = num * (-1)\r\n self.points[potentialPoint].setValidMove(False)\r\n self.points[potentialPoint].setBorder(BLACK, 1)", "def _del_item(dic: dict, keys: list):\n\tdic = _get_item(dic, keys[:-1])\n\tdel dic[keys[-1]]", "def remove_extra_index_from_context_actions(context_action_dict):\n keys_to_keep = {'initial_value', 'replacement_value'}\n for question in context_action_dict:\n for obj_dct in context_action_dict[question]:\n total_keys = set(obj_dct.keys())\n keys_to_remove = total_keys - keys_to_keep\n for key in keys_to_remove:\n obj_dct.pop(key)\n return context_action_dict", "def _del(self, entry):\n entry.key = dummy\n entry.value = None\n self.used -= 1", "def remove(self, key: int) -> None:\n t = key % 20011\n delete = []\n for item in self.hash[t]:\n if item[0] == key:\n delete = item\n if delete:\n self.hash[t].remove(delete)", "def go_up(self, _: int = 0) -> None:\n if self.current_option > 0:\n self.current_option += -1\n else:\n self.current_option = self.last_item_index\n self.draw()", "def delete_keys_from(self, entry_from_key, do_manual_check=True):\r\n entry = self.get_entry()\r\n for key in self.keys:\r\n del entry_from_key[key]\r\n if do_manual_check:\r\n to_del = []\r\n for key, key_entry in entry_from_key.iteritems():\r\n if key_entry == entry:\r\n to_del.append(key)\r\n for key in to_del:\r\n del entry_from_key[key]", "def remove():", "def remove(self, key: str) -> None:\n thekey = self._gethash(key)\n if self.HashMap[thekey] is not None:\n if len(self.HashMap[thekey]) == 2:\n self.HashMap[\n self._gethash(key)\n ] = None # Keep the location but set the value to None\n else:\n hashkey = self._gethash(key)\n idx = self._find_if_hashclash(key, hashkey, \"i\")\n self.HashMap[hashkey].pop(idx)\n self.HashMap[hashkey].pop(idx)\n self.length -= 1", "def remove_pos(self):\r\n selected_items = self.treeview.selection()\r\n for items in selected_items:\r\n values = self.treeview.item(items, 'values')\r\n if values[0] in self.holdings:\r\n del self.holdings[values[0]]\r\n self.treeview.delete(items)\r\n return None", "def removeControl(*args):", "def removeControl(*args):", "def removeControl(*args):", "def removeControl(*args):", "def delete(self, key: int) -> None:\n i = k % self.capacity\n cur = pre = self.data[i]\n if not cur:\n return\n if cur.pair[0] == k:\n self.data[i] = cur.next\n else:\n cur = cur.next\n while cur:\n if cur.pair[0] == k:\n pre.next = cur.next\n break\n else:\n cur, pre = cur.next, pre.next", "def _remove(self, targetIndex):\n if 0 <= targetIndex and targetIndex < len(self):\n limit = 0\n for i in xrange(len(self)):\n if self._items[i] != self._fillValue:\n #There's an element at index i, so update the limit\n limit = i\n \n #Move items in the list to their future positions\n for i in xrange(targetIndex, limit):\n self._items[i] = self._items[i+1]\n \n \n #Only update the logicalSize if the value being removed was not a None value.\n if self._items[targetIndex] != self._fillValue:\n self._logicalSize -= 1\n \n #The vacated element will be set to the fillValue\n self._items[limit] = self._fillValue \n \n #Shrink the physical size if need be\n self._shrink()", "def clean_keys(keyList):\n for target in ['CM', 'Max', 'MI']:\n while True:\n try:\n i = keyList.index(target)\n keyList.pop(i)\n keyList[i] = '-'.join([target, keyList[i]])\n except ValueError:\n break", "def cleanOptions(options):\r\n daemonize = options.pop('daemonize')\r\n _reload = options.pop('reload')\r\n dev = options.pop('dev')\r\n opts = []\r\n store_true = [\r\n '--nocache', '--global_cache', '--traceback', '--quiet', '--loud'\r\n ]\r\n store_false = []\r\n for key, value in options.iteritems():\r\n key = '--' + key\r\n if (key in store_true and value) or (key in store_false and not value):\r\n opts += [key, ]\r\n elif value:\r\n opts += [key, str(value)]\r\n return daemonize, _reload, opts", "def remove(self, pos, length):\n if pos in self.removals:\n self.removals[pos] += length\n else:\n self.removals[pos] = length", "def remove(self, item):\n try:\n entry = self.set.pop(item)\n entry[-1] = self.REMOVED\n except KeyError:\n print(\"Can't remove a non-existing item\")", "def _delete_volatile_keys(self, solr_dict):\n\n def delete(del_solr_dict, path_list):\n k = path_list[0]\n if k in del_solr_dict:\n if len(path_list) > 1:\n delete(del_solr_dict[k], path_list[1:])\n else:\n del del_solr_dict[k]\n\n delete(solr_dict, ['response', 'maxScore'])\n delete(solr_dict, ['responseHeader', 'QTime'])", "def unregister_opt(self, key, group='default'):\n if group not in self.__cache:\n return True\n if key not in self.__cache[group]:\n return True\n try:\n del self.__cache[group][key]\n if not self.__cache[group]:\n del self.__cache[group]\n except Exception as e:\n return False\n return True", "def remove(self):\n if LongObjectHashMap.self.modCount != self.expectedModCount:\n raise ConcurrentModificationException()\n if self.lastReturned == self.EMPTY_KEY:\n raise IllegalStateException()\n self.count -= 1\n LongObjectHashMap.self.remove(self.lastReturned)\n self.lastReturned = self.EMPTY_KEY\n self.expectedModCount = LongObjectHashMap.self.modCount", "def reset():\n\n total = 0\n changed = 0\n previous = {}\n for key in cmds.optionVar(list=True):\n if key.startswith(\"ragdoll\"):\n previous[key] = cmds.optionVar(query=key)\n cmds.optionVar(remove=key)\n total += 1\n\n install()\n\n for key in __.optionvars:\n prev = previous.get(_optionvarkey(key), \"\")\n new = read(key)\n\n if prev != new:\n changed += 1\n log.info(\"Resetting %s (%s = %s)\" % (key, prev, new))\n\n log.info(\"Resetted %d/%d optionvars\" % (changed, total))", "def _remove_pod_keys(self):\n self.labels = self.spec.pop(\"labels\", {})\n self.ports = self.spec.pop(\"ports\", [])\n self.pod_opts[\"grace\"] = self.spec.pop(\"stop_grace_period\", None)\n self.pod_opts[\"pid\"] = self.spec.pop(\"pid\", None)\n self.pod_opts[\"dns\"] = self.spec.pop(\"dns\", [])\n self.pod_opts[\"dns_search\"] = self.spec.pop(\"dns_search\", [])", "def popall(self, k, default=_MISSING):\n super_self = super(OrderedMultiDict, self)\n if super_self.__contains__(k):\n self._remove_all(k)\n if default is _MISSING:\n return super_self.pop(k)\n return super_self.pop(k, default)", "def reset_options(self, keylist):\r\n return self.sendAndRecv(\"RESETCONF %s\\r\\n\"%(\" \".join(keylist)))", "def finalize_options(self):", "def finalize_options(self):", "def remove(self, key: int | str):\n self.__delitem__(key)", "def handle_remove(self):\r\n self.del_common()", "def remove_option_window():\r\n for wind in windows:\r\n if type(wind) is tuple:\r\n wind[0].destroy()\r\n windows.remove(wind)", "def remove_extra(self, entry):\n\n config_spec = vim.vm.ConfigSpec()\n self.logger.info(\"Removing extra config {0}\".format(entry))\n opt = vim.option.OptionValue()\n opt.key = entry\n opt.value = \"\"\n config_spec.extraConfig = [opt]\n return self.vm_obj.ReconfigVM_Task(spec=config_spec)", "def delete(self, key):\n pass", "def delete(self, key):\n pass", "def rechargeHint(self):\n if self.hints < 8:\n self.hints = self.hints + 1", "def removeKey(self, time, attributeIndex, view) -> None:\n ...", "def removeKey(self, timeOrHash) -> None:\n ...", "def adjust_options(self, options):\n if options['begin_calibration_index']: \n options['begin_calibration_index'] = str(int(options['begin_calibration_index']) - 2)\n if options['end_calibration_index']:\n options['end_calibration_index'] = str(int(options['end_calibration_index']) - 2)\n options['begin_index'] = str(int(options['begin_index']) - 2)\n options['end_index'] = str(int(options['end_index']) - 2)\n return options", "def test_splittable_popitem(self):\n a, b = self.make_shared_key_dict(2)\n\n orig_size = sys.getsizeof(a)\n\n item = a.popitem() # split table is combined\n self.assertEqual(item, ('z', 3))\n with self.assertRaises(KeyError):\n del a['z']\n\n self.assertGreater(sys.getsizeof(a), orig_size)\n self.assertEqual(list(a), ['x', 'y'])\n self.assertEqual(list(b), ['x', 'y', 'z'])" ]
[ "0.6069215", "0.60325634", "0.59787893", "0.5949097", "0.58923924", "0.573072", "0.56973803", "0.56719327", "0.5616844", "0.5563222", "0.55309904", "0.55171895", "0.5499191", "0.54319483", "0.5431215", "0.5413914", "0.5403386", "0.53861636", "0.53794116", "0.53793514", "0.53664505", "0.53649014", "0.52778965", "0.527467", "0.5255789", "0.52518713", "0.5245402", "0.5235645", "0.5232755", "0.5229471", "0.5228877", "0.5226179", "0.5226005", "0.52249515", "0.5223135", "0.52195555", "0.5205931", "0.5195158", "0.5190194", "0.5179757", "0.5170291", "0.51675695", "0.5156486", "0.5151828", "0.5147759", "0.51474154", "0.514677", "0.5146408", "0.5141451", "0.5135894", "0.5092109", "0.50827783", "0.50822973", "0.50768465", "0.5050076", "0.5049091", "0.5049091", "0.50481", "0.503259", "0.5031447", "0.50260055", "0.5015626", "0.5015273", "0.5007637", "0.5000157", "0.4973834", "0.4971287", "0.49694946", "0.49618408", "0.4961302", "0.4959887", "0.4958497", "0.4958497", "0.4958497", "0.4958497", "0.4957998", "0.4950321", "0.49486417", "0.49447724", "0.49444944", "0.49374345", "0.4931559", "0.49305326", "0.49252558", "0.49154475", "0.49010918", "0.49002644", "0.48958677", "0.48926577", "0.48926577", "0.48894885", "0.48864084", "0.4871798", "0.48712984", "0.4869715", "0.4869715", "0.48653156", "0.4858505", "0.48575985", "0.48548204", "0.48536712" ]
0.0
-1
Calculate the min number of refills to reach 'distance'. You start with a full tank.
def compute_min_refills(distance: int, tank: int, stops: List[int]): location: int = 0 n_stops = 0 last_stop = 0 max_drive = location + tank while max_drive < distance: counter = 0 # Handle the case that stops are depleted before we reach distance if len(stops) == 0: return -1 for s in stops: if s <= max_drive: counter += 1 last_stop = s max_drive = last_stop + tank # Handle the case that wi did not reach the next stop if counter == 0: return -1 else: del stops[0:counter] n_stops += 1 return n_stops
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_min_refills(distance, tank, stops):\n\n num_refills = 0\n current_refill = 0\n\n all_stops = []\n all_stops.append(0)\n for stop in stops:\n \tall_stops.append(stop)\n all_stops.append(distance)\n\n num_stops = len(all_stops)\n\n while current_refill < num_stops:\n \tlast_refill = current_refill\n\n \twhile (current_refill < num_stops and all_stops[current_refill+1]\n \t\t - all_stops[last_refill] <= tank):\n \t\tcurrent_refill += 1\n \t\tif current_refill == num_stops-1:\n \t\t\treturn num_refills\n\n \tif current_refill == last_refill:\n \t\treturn -1\n \tif current_refill < num_stops:\n \t\tnum_refills += 1\n\n return num_refills", "def distance(self) -> int:\n return 0", "def _distance_next(self):\n\n self.distance = 10\n\n # Here a set index to 0 if the car is finishing a lap\n # Also reset the farthest\n if self.index > (len(self.x_trajectory) - 6) and self.closed:\n self.index = 0\n self.farthest = -1\n self.laps += 1\n\n for w in range(self.index, self.index + 20):\n\n self.dist_point = math.sqrt((self.x_trajectory[w] - self.x)**2\n + (self.y_trajectory[w] - self.y)**2)\n\n if self.dist_point < self.distance:\n self.distance = self.dist_point\n self.index = w\n\n if w >= (len(self.x_trajectory) - 1):\n break\n\n self._calc_side()\n\n self.distance = self.distance * self.sign\n\n return self.distance", "def num_points_in_distance(d):\n return 1 + 3 * d * (d + 1)", "def _distance_acc(distances, thr=0.5):\n distance_valid = distances != -1\n num_distance_valid = distance_valid.sum()\n if num_distance_valid > 0:\n return (distances[distance_valid] < thr).sum() / num_distance_valid\n return -1", "def getMinimumDistancePacmanLand(self, pos):\n minD = 10000\n for p in self.ghostLandPositions:\n minD = min(minD, self.getMazeDistance(pos, p))\n return minD", "def getMinimumDistanceOpponent(self, idx, pos, defense=False):\n minD = 10000\n if defense:\n for p in self.beliefs[idx]:\n minD = min(minD, self.getMazeDistanceDefense(pos, p))\n else:\n for p in self.beliefs[idx]:\n minD = min(minD, self.getMazeDistance(pos, p))\n return minD", "def _edit_distance(prediction_tokens: List[str], reference_tokens: List[str]) ->int:\n dp = [([0] * (len(reference_tokens) + 1)) for _ in range(len(prediction_tokens) + 1)]\n for i in range(len(prediction_tokens) + 1):\n dp[i][0] = i\n for j in range(len(reference_tokens) + 1):\n dp[0][j] = j\n for i in range(1, len(prediction_tokens) + 1):\n for j in range(1, len(reference_tokens) + 1):\n if prediction_tokens[i - 1] == reference_tokens[j - 1]:\n dp[i][j] = dp[i - 1][j - 1]\n else:\n dp[i][j] = min(dp[i - 1][j], dp[i][j - 1], dp[i - 1][j - 1]) + 1\n return dp[-1][-1]", "def part_1(distances: Distances) -> int:\n\n result, _ = min(generate_routes(distances))\n print(f\"part 1: shortest route has distance {result}\")\n return result", "def get_distance(self, other, max_distance) -> int:\n if self == other:\n return 0\n # Iteratively check larger neighbourhoods, until other is in it\n distance = 1\n while True:\n if other in self.get_neighbourhood(distance):\n return distance\n distance += 1\n if distance > max_distance:\n break", "def evaluate_distance(self):\n\n fitness = 0\n routes = split_to_routes(self)\n\n for route in routes:\n route = [home] + route + [home]\n for i in range(1,len(route)):\n # Calculates full distance, including from last city\n # to first, to terminate the trip\n pos_from = route[i - 1]\n pos_to = route[i]\n distance = dm[pos_from][pos_to]\n fitness += distance\n\n return int(fitness)", "def _calc_min_distance(self, walker):\n\n cell_lengths, cell_angles = box_vectors_to_lengths_angles(walker.state['box_vectors'])\n\n t2 = time.time()\n # make a traj out of it so we can calculate distances through\n # the periodic boundary conditions\n walker_traj = mdj.Trajectory(walker.state['positions'],\n topology=self._mdj_top,\n unitcell_lengths=cell_lengths,\n unitcell_angles=cell_angles)\n\n t3 = time.time()\n # calculate the distances through periodic boundary conditions\n # and get hte minimum distance\n min_distance = np.min(mdj.compute_distances(walker_traj,\n it.product(self.ligand_idxs,\n self.receptor_idxs),\n periodic=self._periodic)\n )\n t4 = time.time()\n logging.info(\"Make a traj: {0}; Calc dists: {1}\".format(t3-t2,t4-t3))\n\n return min_distance", "def score_waypoint(distance):\n return max(0,\n float(settings.SATISFIED_WAYPOINT_DIST_MAX_FT - distance) /\n settings.SATISFIED_WAYPOINT_DIST_MAX_FT)", "def distance_tolerance(distance: float) -> float:\n ret = 10.0\n if distance < 0:\n ret += distance * (100 - ret) / -2500.0\n return ret", "def make_exceeding_distance_cost(allowed_distance):\n\tdef exceeding_distance_cost(node, caller_relative_probabilities, caller_distances):\n\t\treturn np.sum(caller_relative_probabilities[caller_distances > allowed_distance])\n\n\treturn exceeding_distance_cost", "async def distance(self):\n return round(await self._rpc.distance(), 2)", "def drive(self, distance=0):\n if random.uniform(1, 100) <= self.reliability:\n distance_driven = super().drive(distance)\n return distance_driven\n return 0", "def apply_penalty(self, distance):\n\n self.cities_hit += 1 # Adds to the counter of cities without visiting a prime.\n\n if self.cities_hit > 10: # If Santa has visted more than 10 cities ...\n penalty_distance = (\n distance * 0.1) + distance # ...Applies the penalty for not showing up to a prime city...\n return penalty_distance # ...and returns the value based on the penalty\n else:\n return distance # Else return the distance.", "def distance_reward(action, distances): \n\n def index_by_size(distances):\n # bigger element gets a bigger number\n idxs = np.zeros_like(distances)\n arr = np.copy(distances)\n c = distances.size\n for i in range(distances.size):\n min_curr_idx = np.argmin(arr)\n arr[min_curr_idx] = 1000\n idxs[min_curr_idx] = c\n c -= 1\n return idxs\n\n idxs_array = index_by_size(distances)\n rews = np.array([-10, -2, 0, 2, 10]) # BUG: pozor pri temu arraju nared f(no_robots), interpolacija\n idxs_array = idxs_array - 1 # 0-4\n chosen_idx = int(idxs_array[action])\n return rews[chosen_idx]", "def min_distance(self, target):\n difference = self.pivot - target\n return max(math.sqrt(np.dot(difference, difference)) - self.radius, 0)", "def calculate_move_fast_reward(self, packet):\r\n return get_distance_location(packet.gamecars[self.index].Location, self.previous_car_location)", "def get_min_distance(self):\n return round(min(self.combined_euclidian_distance))", "def min_distance(ref_curve,curve,acc=1):\n euc_length = lambda a,b: pow(sum(pow(a-b,2)),0.5)\n ref_curve_c = Curve(ref_curve)\n #print(\"comparing distance\")\n calc_dist = lambda r: sum((euc_length(r.find_nearest_point(point),point) for point in curve))\n #ref_distance = calc_dist(ref_curve_c)\n #ref_curve_c.set_points(ref_curve_c.gen_num_points(ref_curve_c.points.shape[0]*2))\n distance = calc_dist(ref_curve_c)\n #print(ref_distance,distance)\n return pow(distance,0.5)", "def heuristic(self):\n if self._dist < 0:\n self._dist = 0\n for pos, idx in enumerate(self.config):\n if idx != 0: # Skip blank\n self._dist += manhattan_dist(idx, pos, self.n)\n return self._dist", "def min_distance(distance, spt_set, self_nodes):\n minimum = sys.maxsize\n minimum_node = None\n for curr_node in self_nodes.values():\n if distance[curr_node.id] < minimum and not spt_set[curr_node.id]:\n minimum = distance[curr_node.id]\n minimum_node = curr_node\n return minimum_node", "def gap_to_next_car(self):\n c = self.next_car()\n if c.x > self.x:\n return c.x - c.length_in_cells - self.x\n elif c.x < self.x:\n return (self.road.N - self.x) + (c.x - c.length_in_cells)\n elif c.x == self.x:\n return self.road.N", "def calculate_zoom(self):\n distances = [geopy.distance.geodesic(self.centre_location, centroid).km for centroid in self.centroids]\n a = 4 / 20000\n distances = [1 + 4 - a * distance for distance in distances]\n print(min(distances))\n return min(distances)", "def minimumDominationCount(leaf):\n minimumDominationCount = np.nanmin(leaf.calDominationCount())\n return minimumDominationCount", "def heuristic_1(node):\n x_node, y_node = node.state.location()\n goals = node.state.grid.components.white_walkers\n goals.append(node.state.grid.components.dragon_stone)\n distance = [np.sqrt((x_node - x)**2 + (y_node - y)**2) for x, y in goals]\n return distance[np.argmin(distance)]", "def _distance_covered(self):\n\n # Calculation of distance traveled compared to the previous point\n self.gap = math.sqrt((self.x - self.x_ant)**2\n + (self.y - self.y_ant)**2)\n\n self.x_ant = self.x\n self.y_ant = self.y\n\n return self.gap", "def manhatam_distance(self) -> int:\n raise NotImplementedError", "def player_goal_distance(self) -> float:\n route = self.best_route\n return sum(route.values())", "def nremaining(self) -> int:\n return self._nmines - self._nfound", "def _calculate_distance(self):\n xy = list(zip(self.x, self.y))\n\n dist = [0]\n for i in range(1, len(xy)):\n dist.append(self.distance_between_two_points(xy[i-1], xy[i]))\n\n return np.array(dist).cumsum()", "def _kings_distance(self, piece):\n min_distance = constant.BOARD_DIMENSION - 1\n opponent_pieces = self.get_all_pieces(piece.get_player().other)\n for opp_piece in opponent_pieces:\n distance = abs(piece.row - opp_piece.row) + abs(piece.col - opp_piece.col) / 2\n if distance < min_distance:\n min_distance = distance\n\n evaluation = constant.BOARD_DIMENSION - min_distance\n return evaluation", "def __call__(self, state: Grid2D.State):\n if self.problem.goals:\n pos = state.agent_position\n return min([manhattan_distance_2d(pos, g) for g in self.problem.goals])\n return INFINITY", "def relative_distance_with(self, boxes):\n return relative_distance(self.best_box, boxes)", "def minimum_distance(object_1, object_2):\n\n # package import\n import numpy as np\n\n # main algorithm\n minimum_distance = 100000\n\n for coord_1 in object_1:\n for coord_2 in object_2:\n distance_btwn_coords = np.linalg.norm(coord_1 - coord_2)\n if distance_btwn_coords == 0:\n minimum_distance = distance_btwn_coords\n return float(minimum_distance)\n elif distance_btwn_coords < minimum_distance:\n minimum_distance = distance_btwn_coords\n\n return float(minimum_distance)", "def get_cost(self):\n if self.distance == 0:\n for i in range(1, len(self.cities) + 1):\n point1 = self.cities[i - 1]\n point2 = self.cities[i % len(self.cities)]\n self.distance += self.distance_to(point1, point2)\n return self.distance", "def earliest_arrival(jump_distance, stones):\n #jump_distance of 3 means they skip 2 stones and land on 3rd\n stone = ''\n #based on jump_distance, what are all the stone nums within that distance\n #when jump_distance is 5, can jujp to stones[4]\n \n stone = min(stones[:(jump_distance - 1)]) #stone = 2\n\n t = max(0, stone) #t = 2\n print(f't starts at = {t}')\n i = stones.index(stone) # i = 1\n print(f'i starts at = {i}') \n #as long as \n while i + jump_distance <= len(stones) - 1: # 3 + 5 <= 7\n print(f'i + jump_distance = {i + jump_distance}')\n\n stone = min(stones[(i + 1):(i + jump_distance)]) #stone = 3\n print(f'stone: {stone}')\n if t < stone: \n t = stone #reassign to 3\n \n i = stones.index(stone) # i = 2\n print(f'i = {i}') \n stone = min(stones[(i + 1):(i + jump_distance)]) #stone \n t = max(t, stone)\n print(f'end of while loop: t = {t}') \n \n return t\n #what's the lowest num within that jump_distance \n #go to that stone, then look at next stones in new jump_distance\n #t becomes whatever stone we jumped to\n #if the next stone we jump to is a higher value, reassign t\n #get length of list of stones so we don't go past length\n #var assigned to len(list) and compare to index we're at \n #if index we're jumping to is higher than len of list, then we're done\n #control for indexerror with while loop?\n # ", "def min_number_of(g, elt, n_fuel):\n if elt == 'FUEL':\n return n_fuel\n t = sum([g.edges[elt, s]['cost'] * min_number_of(g, s, n_fuel) for s in\n g.successors(elt)])\n return divup(t, g.nodes[elt]['batch_size'])", "def MINIMUM_BET() -> int:\n return 10", "def get_distance(self) -> int:\n return self.get_measurement_data().distance", "def nearest_neighbour_heuristic(self, city_count):\n return 1.0 / (500.0 * city_count)", "def top_of_climb_distance(self):\n return self.distances[self.top_of_climb_index()]", "def min_mireds(self):\n return 175", "def _minimum_distance(self,arg):\n return min([abs(arg-e) for e in self if not e is arg])", "def find_min_distance():\n return np.argmin(d)", "def single_walk(self):\n\n walker = BoundedWalker(self.start, self.home,\n self.left_limit, self.right_limit)\n\n num_steps = 0\n\n while not walker.is_at_home():\n walker.move()\n num_steps += 1\n\n return num_steps", "def distance(self):\n _, _, costs = self.calculate_costs()\n return np.sum(costs)", "def find_free(min_=0):\n while is_occupied(min_):\n min_ += 1\n return min_", "def distance_between(self, n1, n2):\n if self.distance_method == 'direct':\n n1_relevants = 0\n n2_relevants = 0\n for i in range(len(self.sample)):\n if is_relevant(self.sample.iloc[i], n1.anchor):\n n1_relevants += 1\n if is_relevant(self.sample.iloc[i], n2.anchor):\n n2_relevants += 1\n return (n1_relevants - n2_relevants)/len(self.sample)\n else:\n return 0.5", "def max_distance_for_rate(self, min_rate):\n # min_snr = 2**min_rate - 1\n # min_rx_dbpower = natural_to_dB(min_snr) + self.noise_dbpower\n # min_dbgain = min_rx_dbpower - self.tx_dbpower\n min_dbgain = self.capacity_to_dbgain(min_rate)\n\n radius = self.dbgain_to_dist_free_space(\n min_dbgain,\n wavelength=self.wavelength,\n antenna_dbgain_tx=self.antenna_dbgain_tx,\n antenna_dbgain_rx=self.antenna_dbgain_rx,\n )\n\n return radius", "def required_points(self):\n req_points = self.min_performance * self.initial_available_points()\n return np.maximum(0, np.int64(np.ceil(req_points)))", "def weight() -> int:\n return floor(stakedTokens / MINIMUM_STAKE)", "def adjusted_distance_reciprocal(distance: float) -> float:\n return 1 / (1 + distance)", "def distance(j, k, disk_count):\n \n if j <= k:\n return k - j\n else:\n return distance(j, disk_count, disk_count) + k", "def distance(self) -> float:\n return self._dist_two_wire() # at this time we only support 2-wire meausre", "def small_straight_points(dice_list):\n if straight_size(dice_list) >= 4 or check_yahtzee(dice_list):\n return 30\n else:\n return 0", "def h(self,node):\n \"*** YOUR CODE HERE ***\"\n dist_arr = []\n for goal in self.goals:\n dist_arr.append(manhattan_distance_with_heading(node.state, goal))\n return min(dist_arr)", "def look_ahead_heuristic(game, player):\n if game.is_loser(player):\n return float('-inf')\n\n if game.is_winner(player):\n return float('inf')\n\n own_legal_moves = game.get_legal_moves(player)\n own_moves = len(own_legal_moves)\n for m in own_legal_moves:\n own_moves += len(game.__get_moves__(m))\n\n opp_legal_moves = game.get_legal_moves(game.get_opponent(player))\n opp_moves = len(opp_legal_moves)\n for m in opp_legal_moves:\n opp_moves += len(game.__get_moves__(m))\n\n return float(own_moves - opp_moves)", "def get_lift(self):\n return 0.0", "def h(self,node):\n \"*** YOUR CODE HERE ***\"\n dist_arr = [] #Initialize Array\n for goal in self.goals: # Iterate through Goals\n dist_arr.append(manhattan_distance_with_heading(node.state, goal)) # Add distance between node and goal\n return min(dist_arr) # Return minimum", "def procrustes_distance(ref_curve,curve):\n super_imposed = superposition(ref_curve,curve)\n distances = min_distance(*super_imposed)\n return distances", "def _get_distance(reindeer, race_time):\n interval = reindeer.flight_time + reindeer.rest_time\n cycles = race_time // interval\n flight_time = min(reindeer.flight_time, race_time - interval * cycles)\n total_flying_time = reindeer.flight_time * cycles + flight_time\n return total_flying_time * reindeer.flight_speed", "def minimum_distance(self, state, *args, **kwargs):\n raise NotImplementedError", "def distance_factor(self):\n return self._distancefactor", "def countTotalDistance(path):\n current = path[0]\n totalDistance = 0\n\n for node in path[1:]:\n totalDistance += distance_func(current, node)\n current = node\n\n return totalDistance", "def find_distance(self, other):\n if len(self.reactants) != len(other.reactants):\n return 1.0\n\n distances = []\n for mols in itertools.permutations(self.reactants):\n d = 0\n for idx, (moli, molj) in enumerate(zip(mols, other.reactants)):\n moli_size = len(moli.GetAtoms())\n molj_size = len(molj.GetAtoms())\n\n mcs_size = 0\n if moli_size == 1 or molj_size == 1:\n if (moli.HasSubstructMatch(other.retrons[idx]) or\n molj.HasSubstructMatch(self.retrons[idx])):\n mcs_size = 1\n else:\n mcs = MCS.FindMCS([moli, molj], ringMatchesRingOnly=True)\n if mcs.numAtoms != -1:\n mcs_size = mcs.numAtoms\n\n d += 1.0 - float(mcs_size) / max(moli_size, molj_size)\n distances.append(d / len(mols))\n return min(distances)", "def distance_between_wheels():", "def _get_distance_by_span(matched_positions, forms):\n if len(set(forms[matched_positions])) < 2:\n return 0\n if len(matched_positions) == 2:\n return _get_trivial_distance(matched_positions)\n start_pos = np.min(matched_positions)\n end_pos = np.max(matched_positions)\n if start_pos != end_pos:\n return np.abs(end_pos - start_pos) + 1\n return 0", "def fn(i, k):\n if k < 0: return inf # impossible \n if i == 0: return 0 \n return min(ceil((fn(i-1, k) + dist[i-1])/speed) * speed, dist[i-1] + fn(i-1, k-1))", "def manhattan_distance(self):\n dist = 0\n for target, tile in zip(self.winCdt[:-1], self.tiles[:-1]):\n dist += abs(target[0] - tile[0]) + abs(target[1] - tile[1])\n return dist", "def large_straight_points(dice_list):\n if straight_size(dice_list) >= 5 or check_yahtzee(dice_list):\n return 40\n else:\n return 0", "def number_of_atoms_within_radius(self, distance_cutoff):\n n_atoms = 0\n atom_ids = []\n for contact in self.nearby_atoms:\n other_id = contact.atom_id_no_altloc()\n if (not other_id in atom_ids):\n if (contact.distance() < distance_cutoff):\n n_atoms += 1\n atom_ids.append(other_id) # check for alt confs.\n return n_atoms", "async def distance_threshold(self, *args):\n return await self._rpc.distance_threshold(*args)", "def minimalDistance(a1, a2, b1, b2):\n adir = a2 - a1\n bdir = b2 - b1\n amid = a1 + 0.5 * adir\n s = b1 - amid\n A = np.dot(bdir, bdir)\n B_2 = np.dot(bdir, s)\n lambda_beta = - B_2 / A\n bOpt = lambda_beta * bdir + b1\n s = a1 - bOpt\n A = np.dot(adir, adir)\n B_2 = np.dot(adir, s)\n lambda_alpha = - B_2 / A\n aOpt = lambda_alpha * adir + a1\n Delta = bOpt - aOpt\n return np.sqrt(np.dot(Delta, Delta))", "def get_distance(highway_now: list, car_index: int) -> int:\n\n distance = 0\n cells = highway_now[car_index + 1 :]\n for cell in range(len(cells)): # May need a better name for this\n if cells[cell] != -1: # If the cell is not empty then\n return distance # we have the distance we wanted\n distance += 1\n # Here if the car is near the end of the highway\n return distance + get_distance(highway_now, -1)", "def closest_distance(node_a, node_b):\n min_distance = 999999\n for loc_a in node_a.locations:\n for loc_b in node_b.locations:\n distance = abs(loc_a - loc_b)\n if distance < min_distance:\n min_distance = distance\n return min_distance", "def lidar_relative(self):\n return self.distance", "def distance(mass_1: ObjectMass, mass_2: ObjectMass) -> int:\n\n # collect orbit hops\n orbits_1 = mass_1.get_orbit_hops()\n\n orbits_2 = mass_2.get_orbit_hops()\n\n # find common orbit hop with least amount of hops\n common_hops: set = orbits_1.keys() & orbits_2.keys()\n\n hop = common_hops.pop()\n smallest_total_hops = orbits_1[hop] + orbits_2[hop]\n for hop in common_hops:\n total_hops = orbits_1[hop] + orbits_2[hop]\n\n if total_hops < smallest_total_hops:\n smallest_total_hops = total_hops\n\n return smallest_total_hops", "def get_walk_activity_points(self, elevation: float, distance: float) -> int:\n\n return ceil(distance * (1 + self._get_elevation(elevation, distance)))", "def calculate_distance(mark1, mark2):\n return abs(mark1 - mark2) / MAX_MARK_VALUE", "def computeDistance(self):\n distance = 0.0\n height = self.heightField.getNumber()\n ratio = self.indexField.getNumber()\n numBounces = self.bouncesField.getNumber()\n\n for bounce in range(numBounces):\n bounceHeight = height * ratio\n distance += height + bounceHeight\n height = bounceHeight\n\n self.distanceField.setNumber(distance)", "def min_depth(t):\n if is_leaf(t):\n return 0\n h = float('inf')\n for b in branches(t):\n # Still works fine!\n h = min(h, 1 + min_depth(b))\n return h", "def computeDistance(height, index, bounces):\n pass", "def available_distance(tk, target_distance, offset_distance=0):\n is_down = tk.isdown()\n if not is_down:\n # Must be pendown to be able to bump into things\n return target_distance\n # Optional step forward\n if offset_distance:\n # Assume we are pen_set_solid - Version 0.2 does not give us\n # any hint!\n tk.pen_unset_solid()\n tk.penup()\n tk.forward(offset_distance)\n tk.pendown()\n tk.forward(target_distance-offset_distance)\n op = tk.last_distance()+offset_distance\n tk.undo() # forward motion\n if offset_distance:\n tk.undo() # initial forward motion and pen up/down\n tk.undo()\n tk.undo()\n tk.pen_set_solid()\n return op", "def minContigLength(self):\n\t\tstats = self.scores()\n\t\treturn stats['smallestContig']", "def findCheapestPrice(self, n: int, flights: List[List[int]], src: int, dst: int, K: int) -> int:\n\n #bellmann ford algorithm\n \n #### what i tried\n \n costs = [float('inf')] * n\n costs[src] = 0\n\n for _ in range(K+1):\n copy = costs[:]\n for s, d, w in flights:\n copy[d] = min(copy[d], costs[s] + w)\n costs = copy\n\n return -1 if costs[dst] == float('inf') else costs[dst]", "def throw_factor(min_distance, container_distance):\n max_distance = min_distance * settings.willFactor\n\n # thresold definition\n th1_dist = min_distance * (1 + (settings.willFactor - 1) * 0.05)\n th1_fact = 1.00\n th2_dist = min_distance * (1 + (settings.willFactor - 1) * 0.8)\n th2_fact = 0.25\n\n angular_coefficient = (th1_fact - th2_fact) / (th2_dist - th1_dist)\n\n if container_distance < min_distance:\n x_fact = 'Error: No containers should be nearer than nearest container'\n elif container_distance < th1_dist:\n x_fact = th1_fact\n elif container_distance < th2_dist:\n x_fact = th1_fact - (container_distance - th1_dist) * angular_coefficient\n elif container_distance < max_distance:\n x_fact = th2_fact\n else:\n x_fact = 0\n\n return x_fact", "def minim(self) -> int:\n\t\treturn 2", "def drive(self, distance):\n if random.uniform(0.0, 100.0) < self.reliability:\n distance_driven = super().drive(distance)\n else:\n distance_driven = 0\n\n return distance_driven", "def drive(self, distance):\n random_number = random.randint(0,101)\n if float(random_number)<self.reliability:\n distance_driven = super().drive(distance)\n\n else:\n distance_driven=0\n\n return distance_driven", "def get_distance(self):\n print(\"voici la distance à l'obstacle\")", "def __find_max_distance(self):\n return utils.find_max_distance(self.__game)", "def get_min_takeover(cell: Cell) -> int:\n if cell.creature == 'humans':\n return math.ceil(cell.number * parameters.HUMANS_TAKEOVER_FACTOR)\n return math.ceil(cell.number * parameters.OPPONENT_TAKEOVER_FACTOR)", "def distance(self):\n return self._distance", "def number_transfers(self, quota):\n if self.elected:\n return len(self.first_votes) - quota\n else:\n return 0", "def distance(wires) -> int:\n\n wire_0_pos = get_positions(wires[0])\n wire_1_pos = get_positions(wires[1])\n\n # find intersections\n intersections = list(set(wire_0_pos).intersection(set(wire_1_pos)))\n # ignore the 0,0 intersect\n intersections.remove((0, 0))\n m_distances = [manhattan_distance(x, y) for x, y in intersections]\n\n\n return min(m_distances)", "def getCostOfActions(self, actions):\n if actions == None: return 999999\n x, y = self.startingPosition\n for action in actions:\n dx, dy = Actions.directionToVector(action)\n x, y = int(x + dx), int(y + dy)\n if self.walls[x][y]: return 999999\n return len(actions)" ]
[ "0.8302289", "0.6621628", "0.645036", "0.6314632", "0.6262712", "0.61484736", "0.60885", "0.603681", "0.59223753", "0.58915836", "0.5876038", "0.5869804", "0.58609515", "0.58593315", "0.58491707", "0.5815681", "0.5806749", "0.57953686", "0.57733434", "0.5757533", "0.57270324", "0.57073605", "0.56928164", "0.56865036", "0.5681687", "0.5680497", "0.5679108", "0.56686556", "0.562771", "0.56275225", "0.56146014", "0.55715626", "0.5564172", "0.5547411", "0.55405986", "0.5535085", "0.55347824", "0.5534443", "0.55311686", "0.55310035", "0.55244124", "0.55195045", "0.55090016", "0.5507519", "0.5501168", "0.54817337", "0.5479433", "0.54747516", "0.54715514", "0.54707587", "0.5464035", "0.54638594", "0.5461864", "0.5460158", "0.5457549", "0.5451798", "0.5451227", "0.5448023", "0.54456085", "0.54382175", "0.5430972", "0.54303396", "0.54259694", "0.54257673", "0.5418253", "0.541822", "0.54176474", "0.5417308", "0.54116154", "0.54033", "0.5394662", "0.5375546", "0.53746855", "0.5360643", "0.5359052", "0.5357061", "0.5356895", "0.5352663", "0.5346965", "0.5345696", "0.53390473", "0.53359795", "0.53310454", "0.5326918", "0.5323373", "0.53188705", "0.5304826", "0.52985483", "0.529802", "0.52937704", "0.5293319", "0.52900857", "0.5289171", "0.5282521", "0.5275883", "0.5270332", "0.5267884", "0.5266367", "0.5264499", "0.5260452" ]
0.7821262
1
Given the positions of a list of the indices, create a unique key to register the position.
def placementKey( geo): def diagcmp( xyA, xyB): """ Compare two positions based on x + y. If x + y is the same for the two, compare based on x. """ return cmp(xyA[0] + xyA[1], xyB[0] + xyB[1]) or cmp(xyA[0], xyB[0]) sorted = [ tuple(geo[i]) for i in xrange(geo.shape[0]) ] sorted.sort( diagcmp) return hash(tuple(sorted))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_new_key(idx, key, d):\n\n new_key = \"%s_%d\" % (key, idx)\n if new_key in d:\n return make_new_key(idx + 1, key, d)\n return new_key", "def _make_key(self):\n all_position_values = (chromosome_sort_key(self.chromosome), self.min_position, self.max_position, \n self.strand, self.position_before, self.position_after)\n return all_position_values", "def _create_idx(self):\n self._idx = {}\n for idx, (L, M, N) in enumerate(self.modes):\n if L not in self._idx:\n self._idx[L] = {}\n if M not in self._idx[L]:\n self._idx[L][M] = {}\n self._idx[L][M][N] = idx", "def create_indices():\n conn = connect()\n c = conn.cursor()\n\n # To prevent rematch btw players\n c.execute(\n \"\"\"\n CREATE UNIQUE INDEX matches_uniq_idx ON matches\n (greatest(winner, loser), least(winner, loser));\n \"\"\")\n conn.commit()\n conn.close()", "def gen_keys():", "def create_key_index(name):\r\n global _existing_indices\r\n _existing_indices = _existing_indices or execute_query('g.getIndexedKeys(Vertex.class)')\r\n if name not in _existing_indices:\r\n execute_query(\r\n \"g.createKeyIndex(keyname, Vertex.class); g.stopTransaction(SUCCESS)\",\r\n {'keyname':name}, transaction=False)\r\n _existing_indices = None", "def make_key(*values, **kwargs):\n if len(kwargs) == 0:\n key = tuple(v.key for v in values)\n else:\n res = [v.key for v in values]\n for k, v in sorted(kwargs.items()):\n if isinstance(v, (int, float, str)):\n res.append(k)\n res.append(v)\n else:\n raise TypeError(\n f\"Type {type(v)} is not yet supported, \"\n f\"v={v} and parameter {k!r}.\")\n key = tuple(res)\n return key", "def _key_generated(self, key, index):\n self.keys[self.get_address(key)] = key\n self.last_generated_index = index", "def make_key(*args, **kwargs) -> Hashable:\n if len(args) == 1 and isinstance(args[0], (int, str)):\n return args[0]\n if kwargs:\n args = sum(kwargs.items(), (*args, _KWD_MARK))\n return _HashedSeq(args)", "def setUniqueId(self, idsOfElementaryExpressions):\n if self.name in idsOfElementaryExpressions:\n self.uniqueId = idsOfElementaryExpressions[self.name]\n else:\n error_msg = (\n f'No index is available for expression {self.name}.'\n f' List of available indices: '\n f'{[n for n, i in idsOfElementaryExpressions.items() ]}'\n )\n raise excep.biogemeError(error_msg)", "def initiate_new_key (self,key,index):\r\n\r\n #with shelf\r\n if self.using_shelf:\r\n\r\n\r\n self.key_dict[key] = {str(index)}\r\n\r\n #with database\r\n if self.using_database:\r\n\r\n value_tuple = (notebookname, key,)\r\n db_cursor.execute(\"INSERT OR REPLACE \"\r\n +\"INTO all_keys (keyword, notebook)\"\r\n +\" VALUES (?,?);\",\r\n value_tuple)\r\n value_tuple = (notebookname, key, str(index))\r\n db_cursor.execute(\"INSERT OR REPLACE \"\r\n +\"INTO keys_to_indexes\"\r\n +\" (notebook, keyword, note_index)\"\r\n +\" VALUES (?,?,?);\",\r\n value_tuple)", "def create_keys(i):\n sk = elgamal.create_sk()\n secret_keys.append(sk)\n\n keys = [0, 0]\n\n keys[x[i]] = elgamal.gen(sk)\n keys[1 - x[i]] = elgamal.o_gen()\n\n public_keys.append(keys)", "def table_key(self, reindex_dict):\n reindexed_marks = []\n for m in self.component1.marks:\n new_m = reindex_dict.get(m)\n if new_m == None:\n if len(reindex_dict) == 0:\n new_m = 0\n else:\n new_m = max(reindex_dict.values())+1\n reindex_dict[m] = new_m\n reindexed_marks.append(new_m)\n return tuple( [self.component1.genus] + sorted(reindexed_marks) )", "def set_index(self, list):\n for key in list:\n self.find_label_by_id(key).index = True", "def create_key ():", "def create_unique_index(name, data_type):\r\n global _existing_indices\r\n _existing_indices = _existing_indices or execute_query('g.getIndexedKeys(Vertex.class)')\r\n \r\n if name not in _existing_indices:\r\n execute_query(\r\n \"g.makeType().name(name).dataType({}.class).functional().unique().indexed().makePropertyKey(); g.stopTransaction(SUCCESS)\".format(data_type),\r\n {'name':name}, transaction=False)\r\n _existing_indices = None", "def create_index(args, client):\n policy = {}\n client.index_geo2dsphere_create(args.nspace, args.set,\n LOCBIN, LOCNDX, policy)\n client.index_integer_create(args.nspace, args.set,\n HSHBIN, HSHNDX, policy)", "def crearIndices(self):\n l = self.encontrarCaracteres()\n i=0\n for c in l:\n self.indices[c] = i\n i+=1", "def create_index():", "def update_id2idx(self):\n self._id2idx = {}\n for n, cell in enumerate(self._cell_list):\n self._id2idx[cell.id()] = n", "def new_player_id(index):\n new_player_id.index = index\n def new_id(users):\n \"\"\"\n Generates a unique identifier for a\n list of users. If the list has\n only one user, then the id is mantained\n else, a new user id is created for the\n whole list.\n\n Parameters\n ----------\n users: list of int\n List of 1 or more user's identifiers.\n Precondition: all elements of users\n are smaller than index.\n Returns\n -------\n int\n The old identifier if in the list\n there was only one player\n and or the next new consecutive\n identifier if there where more\n than one.\n\n \"\"\"\n\n #nonlocal index\n if len(users) > 1:\n new_index = new_player_id.index\n new_player_id.index += 1\n else:\n new_index = users[0]\n\n return new_index\n\n return new_id", "def __generate_key_from_list_of(self, list_of_keys):\r\n list_of_keys = list(list_of_keys)\r\n list_of_keys.sort()\r\n return \",\".join(list_of_keys)", "def generate_prototype_key(self, *args):\n return str(uuid.uuid5(UUID_XYZ_NAMESPACE, str((self.X, self.Y, self.Z, *args))))", "def setPositionKey(self, time, index, value, id, view) -> None:\n ...", "def __setKeyIDFromFlaglist( self , flaglist ):\n\t\tif 'idx0' in flaglist:\n\t\t\tself.keyID = 'idx0'\n\t\tif 'idx1' in flaglist:\n\t\t\tself.keyID = 'idx1'\n\t\tif 'idx2' in flaglist:\n\t\t\tself.keyID = 'idx2'", "def CreateIndicesForBasisFilter(indices):\n indices = np.array(indices, dtype = \"uint32\")\n for ind, it in enumerate(indices):\n indices[ind] = ind * it\n return indices", "def keyIndex(self, key):\n key ^= bsr(key, 33)\n key *= 0xff51afd7ed558ccdL\n key ^= bsr(key, 33)\n key *= 0xc4ceb9fe1a85ec53L\n key ^= bsr(key, 33)\n return key", "def make_unique_index(config, stash_code):\n for data in get_section_new_indices(config):\n section_base, old_index, new_index = data\n key = config.value.keys()\n isec_item = stash_code['section']+stash_code['item']\n old_index_sections = old_index.split('_')\n old_section = SECTION_FORMAT.format(section_base, old_index_sections[0], old_index_sections[1])\n new_section = SECTION_FORMAT.format(section_base, isec_item, new_index)\n print 'old_section, new', old_section, new_section\n\n old_node = config.unset([old_section])\n old_id_opt_values = []\n for opt, node in old_node.value.items():\n old_id = rose.CONFIG_DELIMITER.join([old_section, opt])\n old_id_opt_values.append((old_id, opt, node.value))\n # update key value\n config.value.update({new_section: old_node})", "def addPositionKey(\n self,\n time: float,\n positionOrDim: Union[CVec3, int],\n view: Optional[Str] = ...,\n ) -> None:\n ...", "def scatter_embedding_vector(values, indices, bucket_num):\n ps_ids = {}\n indices_list = indices.tolist()\n for i, item_id in enumerate(indices_list):\n ps_id = int_to_id(item_id, bucket_num)\n if ps_id not in ps_ids:\n ps_ids[ps_id] = [(i, item_id)]\n else:\n ps_ids[ps_id].append((i, item_id))\n results = {}\n for ps_id, i_item_id in ps_ids.items():\n i = [v[0] for v in i_item_id]\n item_id = [v[1] for v in i_item_id]\n results[ps_id] = (values[i, :], item_id)\n return results", "def test_duplicates_indices(self):\r\n\r\n no_dups = ['1', '2', '3', '4']\r\n\r\n results = duplicates_indices(no_dups)\r\n\r\n expected_results = defaultdict(list)\r\n\r\n self.assertEqual(results, expected_results)\r\n\r\n dups = ['1', '2', '3', '4', '2']\r\n\r\n results = duplicates_indices(dups)\r\n\r\n expected_results = defaultdict(list)\r\n expected_results['2'] = [1, 4]\r\n\r\n self.assertEqual(results, expected_results)", "def add_key(self,key,index):\r\n\r\n #with shelf\r\n\r\n if self.using_shelf:\r\n\r\n if key in self.key_dict:\r\n\r\n self.key_dict[key].add(str(index))\r\n\r\n else:\r\n self.key_dict[key] = {str(index)}\r\n\r\n #with database\r\n if self.using_database:\r\n\r\n value_tuple = (notebookname, key,)\r\n db_cursor.execute(\"INSERT OR REPLACE\"\r\n +\" INTO all_keys (keyword, notebook)\"\r\n +\" VALUES (?,?);\",\r\n value_tuple)\r\n value_tuple = (notebookname, key, str(index))\r\n db_cursor.execute(\"INSERT OR REPLACE\"\r\n +\" INTO keys_to_indexes\"\r\n +\" (notebook, keyword, note_index)\"\r\n +\" VALUES (?,?,?);\",\r\n value_tuple)", "def faiss_index(vectors, ids=None):\n index = faiss.IndexFlatL2(vectors.shape[1])\n if ids:\n index = faiss.IndexIDMap(index)\n index.add_with_ids(vectors, np.array([i for i in ids]))\n else:\n index.add(vectors)\n\n return index", "def test_integer_map_key_index():\n\tlib.backup_and_restore(\n\t\tlambda context: create_indexes(lib.create_integer_map_key_index),\n\t\tNone,\n\t\tlambda context: check_indexes(lib.check_map_key_index, 12345)\n\t)", "def id_index_map(self):\n result = {}\n for index, component_data in iteritems(self):\n result[id(component_data)] = index\n return result", "def _id_for_index(prefix, index):\r\n return \"%s%d\" % (prefix, index + 1)", "def generate_bitmap_to_linear_index_map(bladeTupList, firstIdx):\n bitmap_map = np.zeros(len(bladeTupList), dtype=int)\n for ind, blade in enumerate(bladeTupList):\n bitmap_map[compute_bitmap_representation(blade, firstIdx)] = ind\n return bitmap_map", "def build_index(self, dict_pg_info, list_insert):\n flag_exit = True\n if flag_exit is False:\n self.create_new_index(dict_pg_info)\n self.insert_index(dict_pg_info, list_insert)", "def _make_key(args, kwds, typed,\r\n kwd_mark = (object(),),\r\n fasttypes = {int, str, frozenset, type(None)},\r\n tuple=tuple, type=type, len=len):\r\n # All of code below relies on kwds preserving the order input by the user.\r\n # Formerly, we sorted() the kwds before looping. The new way is *much*\r\n # faster; however, it means that f(x=1, y=2) will now be treated as a\r\n # distinct call from f(y=2, x=1) which will be cached separately.\r\n key = args\r\n if kwds:\r\n key += kwd_mark\r\n for item in kwds.items():\r\n key += item\r\n if typed:\r\n key += tuple(type(v) for v in args)\r\n if kwds:\r\n key += tuple(type(v) for v in kwds.values())\r\n elif len(key) == 1 and type(key[0]) in fasttypes:\r\n return key[0]\r\n return _HashedSeq(key)", "def makewordindex(wordset):\n indexmap = {}\n sortwords = sorted(list(wordset))\n for i in range(len(sortwords)):\n word = sortwords[i]\n indexmap[word] = i\n return indexmap", "def makeIndexMap(self):\n\t\tn = self.numRects\n\t\thalfList = [[(j,n-1-i+j) for j in range(i+1)] for i in range(n)]\n\t\tfullList = halfList + [[(j[1],j[0]) for j in i] for i in halfList[n-2::-1]]\n\t\treturn fullList", "def generate_new_key(self, index):\n new_key = self.chain_key.subkey(index)\n self._key_generated(new_key, index)", "def make_consistent(self):\r\n\r\n for key in self.get_keys():\r\n self.eliminate_key(key)\r\n\r\n for i_temp in self.indexes(): #i will be a note index\r\n for j_temp in self.get_keys_from_note(i_temp):\r\n if self.key_dict_contains(j_temp):\r\n self.add_key(j_temp,Index(i_temp))\r\n## self.key_dict[j_temp].add(str(Index(i_temp)))\r\n else:\r\n self.initiate_new_key(j_temp,Index(i_temp))", "def format_as_index(indices):\r\n\r\n if not indices:\r\n return \"\"\r\n return \"[%s]\" % \"][\".join(repr(index) for index in indices)", "def obs_key_func(state, **hx_kwargs):\n n = hx_kwargs[\"n\"]\n index = hx_kwargs[\"index\"]\n \n key = np.zeros(n)\n key[index] +=2\n \n return key", "def add_self_loops_to_indexlist(indices):\r\n max_ind = np.max(indices)\r\n self_loops = np.arange(max_ind+1,dtype=np.int)\r\n self_loops = np.concatenate([np.expand_dims(self_loops,axis=-1),np.expand_dims(self_loops,axis=-1)],axis=-1)\r\n added_loops = np.concatenate([indices,self_loops],axis=0)\r\n clean_index = np.unique(added_loops,axis=0)\r\n index_order = np.argsort(clean_index[:,0])\r\n out_indices = clean_index[index_order]\r\n return out_indices", "def _create_two_group_jackknife_indexes(x0, x1, is_paired):\n\n if is_paired and len(x0) == len(x1):\n out = list(zip([j for j in create_jackknife_indexes(x0)],\n [i for i in create_jackknife_indexes(x1)]\n )\n )\n else:\n jackknife_c = list(zip([j for j in create_jackknife_indexes(x0)],\n [i for i in create_repeated_indexes(x1)]\n )\n )\n\n jackknife_t = list(zip([i for i in create_repeated_indexes(x0)],\n [j for j in create_jackknife_indexes(x1)]\n )\n )\n out = jackknife_c + jackknife_t\n del jackknife_c\n del jackknife_t\n\n return out", "def key(self, patterns=None, indices=None):\n\n key = []\n\n # if the user doesn't provide indices, get indices from the pattern\n if not indices and patterns:\n indices = get_column_indices(patterns, self.columns)\n\n if indices:\n # if we have indices, use them to build the key\n for i in indices:\n if i < len(self.values):\n key.append(hxl.datatypes.normalise(self.values[i], self.columns[i]))\n else:\n # if there are still no indices, use the whole row for the key\n for i, value in enumerate(self.values):\n key.append(hxl.datatypes.normalise(value, self.columns[i]))\n\n return tuple(key) # make it into a tuple so that it's hashable", "def CreateIndex(self, arg0: 'unsigned long long') -> \"void\":\n return _itkQuadEdgeCellTraitsInfoPython.itkMapContainerULLQEMPF2GQEULLULLBBT_CreateIndex(self, arg0)", "def makeKey(text):\n key, n = {}, 0\n for i in text:\n key[i] = str(n)\n n += 1\n return key", "def __compound_key(key):\n x_int = int(key[0])\n y_int = int(key[1])\n zeros = len(str(y_int))\n key = x_int * (10 ** zeros) + y_int\n\n return key", "def _index_key(self, sig, codegen):\n return (sig, codegen.magic_tuple())", "def create_index(log_df, column):\n temp_list = log_df[[column]].values.tolist()\n subsec_set = {(x[0]) for x in temp_list}\n subsec_set = sorted(list(subsec_set))\n alias = dict()\n for i, _ in enumerate(subsec_set):\n alias[subsec_set[i]] = i + 1\n return alias", "def make_cache_keys(self, identifiers):\n\n raise NotImplementedError", "def CreateIndex(self, arg0: 'unsigned long long') -> \"void\":\n return _itkQuadEdgeCellTraitsInfoPython.itkMapContainerULLQEMPF3GQEULLULLBBT_CreateIndex(self, arg0)", "def generate_unique_key(*args, **kwargs):\n hashed_args = ['%s' % hash(arg) for arg in args]\n hashed_kwargs = ['%s ' % hash((key, value)) for (key, value) in kwargs.items()]\n # this is md5 hashed again to avoid the key growing too large for memcached\n return hashlib.md5(':'.join(hashed_args + hashed_kwargs)).hexdigest()", "def _create_indices(cls):\r\n from thunderdome.connection import _hosts, _index_all_fields, create_key_index\r\n \r\n if not _hosts: return\r\n for column in cls._columns.values():\r\n if column.index or _index_all_fields:\r\n create_key_index(column.db_field_name)", "def hoggar_indices():\n return list(product([0,1], repeat=6))", "def map_ord_to_index(origin_char_list, save_path):\n ord_2_index_dict = {str(i) + '_index': str(ord(c)) for i, c in\n enumerate(CharDictBuilder._read_chars(origin_char_list))}\n index_2_ord_dict = {str(ord(c)) + '_ord': str(i) for i, c in\n enumerate(CharDictBuilder._read_chars(origin_char_list))}\n total_ord_map_index_dict = dict(ord_2_index_dict)\n total_ord_map_index_dict.update(index_2_ord_dict)\n CharDictBuilder._write_json(save_path, total_ord_map_index_dict)", "def map_geo_hashed_value(l):\n \n l = sorted(l)\n return {k: index for index, k in enumerate(l)}", "def generate_inv_index(people):\n pass", "def createkey(*args): # {{{2\n return '-'.join(map(simplifyname, args))", "def create_label_map(label_lists, trailing_piece_tag=\"X\"):\n\n label_set = set()\n for labels in label_lists:\n label_set.update(labels)\n\n label_map = {label: i for i, label in enumerate(label_set)}\n\n if trailing_piece_tag not in label_set:\n label_map[trailing_piece_tag] = len(label_set)\n return label_map", "def create_index(shapes_with_props):\n index = rtree.index.Index()\n for id, shape_with_props in enumerate(shapes_with_props):\n index.insert(id, shape_with_props.shape.bounds)\n return index", "def createindexes():\n index = [{}, {}, {}, {}]\n readcorpus(index)\n buildindex4(index[2], index[3])\n writeindextofile(index)\n return index", "def __init__(self, positions, actives):\r\n self.positions = positions\r\n self.actives = actives\r\n self.hash = hash(self.actives.tobytes()) + 31 * hash(self.positions.tobytes())", "def map_position(pos):\n\n posiction_dict = dict(zip(range(1, 17), [i for i in range(30, 62) if i % 2]))\n return posiction_dict[pos]", "def _index_document(index_list):\n if isinstance(index_list, abc.Mapping):\n raise TypeError(\"passing a dict to sort/create_index/hint is not \"\n \"allowed - use a list of tuples instead. did you \"\n \"mean %r?\" % list(index_list.items()))\n elif not isinstance(index_list, (list, tuple)):\n raise TypeError(\"must use a list of (key, direction) pairs, \"\n \"not: \" + repr(index_list))\n if not len(index_list):\n raise ValueError(\"key_or_list must not be the empty list\")\n\n index = SON()\n for (key, value) in index_list:\n if not isinstance(key, str):\n raise TypeError(\"first item in each key pair must be a string\")\n if not isinstance(value, (str, int, abc.Mapping)):\n raise TypeError(\"second item in each key pair must be 1, -1, \"\n \"'2d', 'geoHaystack', or another valid MongoDB \"\n \"index specifier.\")\n index[key] = value\n return index", "def _indices_to_coords(c,r):\n\n column = _index_to_column(c)\n row = r + 1\n\n return {'c': column, 'r': row, 'coord': f'{column}{row}'}", "def make_DBL_index(angle_tube_pos, BDs, VTP2BD_func):\n # must have equal length arrays\n msg = 'Number of angle tube positions != number of BD values'\n assert len(angle_tube_pos) == 2, 'Formatting error: angled tube positions'\n assert len(angle_tube_pos[0]) == len(BDs), msg\n assert len(angle_tube_pos[1]) == len(BDs), msg\n\n # converting angle_tube_pos values to BD values\n low_pos_BD = VTP2BD_func(angle_tube_pos[0])\n high_pos_BD = VTP2BD_func(angle_tube_pos[1])\n \n # making a dict of BD : np.random.uniform(low_pos_BD, high_pos_BD)\n DBL_index = {}\n for i in xrange(len(low_pos_BD)):\n BD = round(BDs[i], 3)\n if np.isnan(low_pos_BD[i]) or np.isnan(high_pos_BD[i]):\n pass\n else:\n # note: lower tube position = higher BD \n DBL_index[BD] = partial(np.random.uniform,\n high = low_pos_BD[i], \n low = high_pos_BD[i], \n size=1)\n return DBL_index", "def add_position_map(lst, number_from=0):\r\n return map(lambda (val, ind): val + ind + number_from, enumerate(lst))", "def new_id(users):\n\n #nonlocal index\n if len(users) > 1:\n new_index = new_player_id.index\n new_player_id.index += 1\n else:\n new_index = users[0]\n\n return new_index", "def create_index(cluster_times, cluster_geometries):\n\n lookup = {} # create dict for key-value lookup\n for ct, cg in zip(cluster_times, cluster_geometries):\n if ct in lookup: # Check if STR-tree is drawn for t\n lookup[ct] = STRtree(lookup[ct]._geoms+[cg]) # Redraw STR_tree if record exists\n else:\n lookup[ct] = STRtree([cg]) # Create STR-tree from geometry list\n\n return lookup", "def __createkey__(self):\n return str(self.currentCol) + \",\" + str(self.currentRow)", "def _BuildEventTagIndex(self):\n self._event_tag_index = {}\n for event_tag in self.GetEventTags():\n event_identifier = event_tag.GetEventIdentifier()\n lookup_key = event_identifier.CopyToString()\n self._event_tag_index[lookup_key] = event_tag.GetIdentifier()", "def _create_subscript_mapping():\n # Create the normal and subscript digits list.\n normal_digits = [i for i in range(10)]\n subscript_digits = [chr(0x2080 + i) for i in range(10)]\n\n # Convert the normal digits to strings.\n normal_digits = [str(i) for i in normal_digits]\n\n # Create a dict mapping the two.\n return DefaultDictionary(zip(normal_digits, subscript_digits))", "def points_to_index(points, points_dict):\r\n index_locations = ''\r\n for point in points:\r\n index_locations += str(points_dict[point]) + ' '\r\n return index_locations", "def generate_prototype_key(self):\n return str(uuid.uuid5(UUID_XYZ_NAMESPACE, str((self.X, self.Y, self.Z))))", "def _add_pk(self, conn, *, tblname, pk_columns):\n idx_metadatum = partition_utils.IndexMetadatum(idx_cols=pk_columns, is_unique=True)\n partition_utils.add_indices(conn, tbl_name=tblname, idx_metadata=[idx_metadatum])", "def __hash__(self):\n index_list = [allele.index for allele in self.genes]\n return hash(tuple(index_list))", "def create_location_index():\n get_rsvps_database().groups.create_index([(\"location\", GEOSPHERE)])", "def create_position_ids_from_input_ids(self, x):\r\n mask = x.ne(self.padding_idx).long()\r\n incremental_indicies = torch.cumsum(mask, dim=1) * mask\r\n return incremental_indicies + self.padding_idx", "def setUniqueId(self, idsOfElementaryExpressions):\n if self.elementaryName in idsOfElementaryExpressions:\n self.elementaryIndex = idsOfElementaryExpressions[\n self.elementaryName\n ]\n else:\n error_msg = (\n f'No index is available for elementary '\n f'expression {self.elementaryName}.'\n )\n raise excep.biogemeError(error_msg)\n self.child.setUniqueId(idsOfElementaryExpressions)", "def from_list(hash, list):\r\n for k, v in enumerate(list):\r\n put(hash, k, v)", "def word_indexer(word_lst):\n unique_words = list(set(word_lst))\n word_index = {}\n for i in range(len(unique_words)):\n word_index[unique_words[i].lower()] = i + 4\n word_index['<PAD>'] = 0\n word_index['<START>'] = 1\n word_index['<UNK>'] = 2\n word_index['<UNUSED>'] = 3\n return word_index", "def getGlobalIndices( self, indices: list):\n result = indices.copy()\n for i,toAdd in enumerate(self._layout.starts):\n result[self._layout.dims_order[i]]=indices[i]+toAdd\n return result", "def vector_indx_to_map_matrix_indx(index,senzory_map):\n xs = dict(zip(np.unique(senzory_map[:,0]), it.count()))\n ys = dict(zip(np.negative(np.unique(senzory_map[:,1])), it.count()))\n x, y = senzory_map[index]\n return ys[y],xs[x]", "def unique_coordinate_map(\n coordinates: torch.Tensor,\n tensor_stride: Union[int, Sequence, np.ndarray] = 1,\n) -> Tuple[torch.IntTensor, torch.IntTensor]:\n assert coordinates.ndim == 2, \"Coordinates must be a matrix\"\n assert isinstance(coordinates, torch.Tensor)\n if not coordinates.is_cuda:\n manager = MEB.CoordinateMapManagerCPU()\n else:\n manager = MEB.CoordinateMapManagerGPU_c10()\n tensor_stride = convert_to_int_list(tensor_stride, coordinates.shape[-1] - 1)\n key, (unique_map, inverse_map) = manager.insert_and_map(\n coordinates, tensor_stride, \"\"\n )\n return unique_map, inverse_map", "def indices_from_subtensor(idx_list, indices):\n return tuple(\n tuple(convert_indices(list(indices), idx) for idx in idx_list) if idx_list else indices\n )", "def export_character_indices(self, indices):\n clone = self.__class__(self)\n # clone.clone_from(self)\n for vec in clone.taxon_seq_map.values():\n for cell_idx in range(len(vec)-1, -1, -1):\n if cell_idx not in indices:\n del(vec[cell_idx])\n return clone", "def update_vectors_by_position(data, val, positions):\n positions = positions.astype(np.int32)\n # batch_idx.shape = (batch_size, 1) as [[0], [1], [2], ...]\n batch_idx = np.expand_dims(npx.arange_like(positions, axis=0),\n axis=1).astype(np.int32)\n batch_idx = batch_idx + np.zeros_like(positions)\n indices = np.stack([batch_idx.reshape((-1,)), positions.reshape((-1,))])\n\n out = npx.index_update(data, indices, npx.reshape(val, (-5, -4)))\n return out", "def index_object(idxs=None):", "def encode_pos(i, j):\n return 3 * i + j", "def _make_unique(name, idx):\n p = re.compile(\".[aA-zZ]+_x[0-9]+\")\n if p.match(name):\n tags = name[1:].split(\"_x\")\n return \">%s_%s_x%s\" % (tags[0], idx, tags[1])\n return name.replace(\"@\", \">\")", "def _make_index_list(self, used_sample_id_list, num_id_repeats=1):\n if used_sample_id_list is None:\n self.index_list = [i for i in range(self.data.shape[0])]\n\n else:\n self.index_list = [i for i in range(self.data.shape[0])\n if self.data[i][DATA_ID_INDEX] in used_sample_id_list\n ]\n\n if len(self.index_list) != len(used_sample_id_list):\n warnings.warn(\"Not all images found. \\\n Found: {}, requested: {}\".format(len(self.index_list),\n len(used_sample_id_list))\n )\n\n # for small datasets,\n # the ids can be repeated to get a reasonable batch size working\n self.index_list = self.index_list*num_id_repeats", "def create_key_index_object(key_index_name: str, iterables: Mapping[str, Any]) -> Any:\n # Validation\n # We are going to use the iterators when determining the fields, so we need to notify if an iterator was\n # passed, as this will cause a problem later. Instead of passing an iterator, a iterable should be passed,\n # which can recreate the iter.\n # See: https://effectivepython.com/2015/01/03/be-defensive-when-iterating-over-arguments/\n for name, iterable in iterables.items():\n if iter(iterable) == iter(iterable):\n raise TypeError(\n f\"Iterable {name} is in iterator which can be exhausted. Please pass the iterable\"\n f\" in a container that can recreate the iterable. See the comments here for more info.\"\n )\n\n # We need the types of the fields to create the dataclass. However, we are provided with iterables\n # in the values of the iterables dict. Thus, we need to look at one value of each iterable, and use\n # that to determine the type of that particular iterable. This is safe to do because the iterables\n # must always have at least one entry (or else they wouldn't be one of the iterables).\n # NOTE: The order here matters when we create the ``KeyIndex`` later, so we cannot just take all\n # objects from the iterables and blindly use set because set won't preserve the order.\n fields = [(name, type(next(iter(iterable)))) for name, iterable in iterables.items()]\n KeyIndex = dataclasses.make_dataclass(key_index_name, fields, frozen=True)\n # Allow for iteration over the key index values\n KeyIndex.__iter__ = _key_index_iter # type: ignore\n\n return KeyIndex", "def _index_key(self, sig, codegen):\n codebytes = self._py_func.__code__.co_code\n if self._py_func.__closure__ is not None:\n cvars = tuple([x.cell_contents for x in self._py_func.__closure__])\n # Note: cloudpickle serializes a function differently depending\n # on how the process is launched; e.g. multiprocessing.Process\n cvarbytes = dumps(cvars)\n else:\n cvarbytes = b''\n\n hasher = lambda x: hashlib.sha256(x).hexdigest()\n return (sig, codegen.magic_tuple(), (hasher(codebytes),\n hasher(cvarbytes),))", "def __scatterRepeated ( self, posList ):\n\n #-- 1 --\n # [ numNonReps := len(self.posSpecs) - 1\n # numReps := len(posList) - (len(self.posSpecs) - 1) ]\n # NB: numNonReps is the total number of non-repeating required\n # arguments, and numReps is the number of positionals from posList\n # that correspond to the repeated argument.\n numNonReps = len(self.posSpecs) - 1\n numReps = len(posList) - numNonReps\n\n #-- 2 --\n # [ if numReps < 0 ->\n # sys.stderr +:= (usage message) + (error message)\n # stop execution\n # else -> I ]\n if numReps < 0:\n usage ( self.switchSpecs, self.posSpecs,\n \"Only %d positional arguments were supplied, \"\n \"need at least %d.\" %\n ( len(posList), len(self.posSpecs) - 1 ) )\n\n #-- 3 --\n # [ self.posMap +:= entries mapping keys of\n # self.posSpecs[0:self.__repx] |-> poslist[0:self.__repx] ]\n for posx in range ( self.__repx ):\n self.posMap[self.posSpecs[posx].key] = posList[posx]\n\n #-- 4 --\n # [ self.posMap +:= an entry mapping the key of\n # self.posSpecs[self.__repx].key |-> the list\n # posList[self.__repx:self__repx+numReps] ]\n self.posMap[self.posSpecs[self.__repx].key] = (\n posList[self.__repx:self.__repx+numReps] )\n\n #-- 5 --\n # [ self.posMap +:= entries mapping keys of\n # self.posSpecs[self.__repx+1:] |->\n # posList[self.__repx+numReps:] ]\n for spex in range ( self.__repx+1, len(self.posSpecs)):\n sourcex = spex - 1 + numReps\n self.posMap[self.posSpecs[spex].key] = posList[sourcex]", "def __init__(self, key):\n self.key = [int_mapping(k) for k in key]", "async def build_secret_index(self):\n pass" ]
[ "0.6364943", "0.59690577", "0.58550936", "0.58094066", "0.5801118", "0.5798134", "0.5770239", "0.57411945", "0.57236964", "0.5721956", "0.56699497", "0.56316316", "0.5616401", "0.5541196", "0.55281365", "0.5510795", "0.5509949", "0.54963547", "0.5487912", "0.5483186", "0.54695016", "0.54673046", "0.54664016", "0.54646", "0.5451394", "0.54313177", "0.5409055", "0.5400094", "0.53891605", "0.5377564", "0.5375951", "0.5373333", "0.53706557", "0.5360952", "0.53558743", "0.5352769", "0.5346403", "0.5344646", "0.53445137", "0.53354806", "0.52998906", "0.52956444", "0.52916014", "0.5284692", "0.52783716", "0.52728117", "0.5266184", "0.5255147", "0.5254929", "0.52391416", "0.52294207", "0.5228083", "0.52138203", "0.5210031", "0.52090615", "0.52008414", "0.52000403", "0.5199269", "0.5194044", "0.51875746", "0.51856565", "0.5179044", "0.5178672", "0.5165031", "0.5163238", "0.5153162", "0.51524544", "0.5147619", "0.51469517", "0.5142718", "0.5138888", "0.5127746", "0.5114802", "0.5113369", "0.5111165", "0.5110894", "0.511055", "0.5094354", "0.5092128", "0.50916964", "0.50879806", "0.507794", "0.5077165", "0.5073647", "0.5072265", "0.5040428", "0.50317156", "0.5029345", "0.50258076", "0.5016489", "0.50135684", "0.5010062", "0.5007419", "0.500642", "0.50043786", "0.4996942", "0.49967125", "0.4995857", "0.49860457", "0.49827707" ]
0.53552693
35
Compare two positions based on x + y. If x + y is the same for the two, compare based on x.
def diagcmp( xyA, xyB): return cmp(xyA[0] + xyA[1], xyB[0] + xyB[1]) or cmp(xyA[0], xyB[0])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cmp(x, y):\n if x + y > y + x: return 1\n elif x + y == y + x: return 0\n else: return -1", "def same(self, x: int, y: int):\n\n return self.find(x) == self.find(y)", "def position_equal(self, a, b):\n return None", "def cmp_position(self, other):\n if self.position.data == other.position.data:\n return 0\n elif self.position.data < other.position.data:\n return 1\n else:\n return -1", "def same(self, x, y):\n return self.find(x) == self.find(y)", "def coordinate_positions_compare(self, other, r=10):\r\n # get max radius of forgiveness\r\n if isinstance(self[0], list): # [(x, y), r] case\r\n r = max(self[1], r)\r\n x1, y1 = self[0]\r\n else:\r\n x1, y1 = self\r\n\r\n if isinstance(other[0], list): # [(x, y), r] case\r\n r = max(other[1], r)\r\n x2, y2 = other[0]\r\n else:\r\n x2, y2 = other\r\n\r\n if (x2 - x1) ** 2 + (y2 - y1) ** 2 > r * r:\r\n return False\r\n\r\n return True", "def compare_coordinates(a: tuple, b: tuple) -> bool:\n return all(np.array(a) < np.array(b))", "def __lt__(self, other):\n return self.x ** 2 + self.y ** 2 < other.x ** 2 + other.y ** 2", "def _point_equal(a,b):\n return np.array_equal(a._Point__loc, b._Point__loc)", "def __cmp__(self, secondPoint):\n return __cmp__(self.value, secondPoint.value)", "def is_same_as(self, other) -> bool:\n return self.x == other.x and self.y == other.y", "def __ge__(self, other):\n return self.x ** 2 + self.y ** 2 >= other.x ** 2 + other.y ** 2", "def __eq__(self, second):\r\n\t\treturn self.x == other.x and self.y == other.y", "def __le__(self, other):\n return self.x ** 2 + self.y ** 2 <= other.x ** 2 + other.y ** 2", "def __eq__(self, other):\n return self.x == other.x and self.y == other.y", "def near(self,x1,y1,x2,y2):\n if x1 - x2 >= -1 and x1 - x2 <= 1 and\\\n y1 - y2 >= -1 and y1 - y2 <= 1:\n return True\n else:\n return False", "def x_in_y(self, x: int, y: int) -> bool:\n return len(set(self.MAPPING[x] + self.MAPPING[y])) == len(self.MAPPING[y])", "def __eq__(self, secondPoint):\n return self.xCoordinate == secondPoint.xCoordinate and self.yCoordinate == secondPoint.yCoordinate", "def __gt__(self, other):\n return self.x ** 2 + self.y ** 2 > other.x ** 2 + other.y ** 2", "def __eq__(self, other):\n return self.x == other.x and self.y == other.y", "def __lt__(self, other):\n return (self.point.x, self.point.y) < (other.point.x, other.point.y)", "def _is_same_position(pos1, pos2, position_tolerance):\n return np.isclose(_pos_distance(pos1, pos2), 0, atol=position_tolerance)", "def cmp(space, w_x, w_y):\n return space.cmp(w_x, w_y)", "def __comparing_points(self, point1, point2) -> bool:\n return (abs(point1.x - point2.x) <= self.dirt_pos_tolerance and abs(\n point1.y - point2.y) <= self.dirt_pos_tolerance)", "def seg_x_in_y(self, x: str, y: str) -> bool:\n return len(set(x + y)) == len(y)", "def equals(self,other):\n return self._x == other.get_x() and self._y == other.get_y()", "def unite(self, x: int, y: int):\n\n x = self.find(x)\n y = self.find(y)\n if x == y:\n return False\n if self.root[x] > self.root[y]:\n x, y = y, x\n self.root[x] += self.root[y]\n self.root[y] = x\n return True", "def compare(self, *args):\n return _ida_hexrays.ctext_position_t_compare(self, *args)", "def __eq__(self, other):\n x_eq = self.x == other.x\n y_eq = self.y == other.y\n return x_eq and y_eq", "def __eq__(self, other):\n x_eq = self.x == other.x\n y_eq = self.y == other.y\n return x_eq and y_eq", "def __eq__(self,other):\n if self.x == other.x and self.y == other.y:\n return True\n else:\n return False\n pass", "def __eq__(self, rhs):\n return self.x == rhs.x and self.y == rhs.y", "def validate_points(a, b):\r\n\tdiff_y = b[0] - a[0]\r\n\tdiff_x = b[1] - a[1]\r\n\r\n\treturn (diff_y == 0 and diff_x != 0) or (diff_x == 0 and diff_y != 0) or abs(diff_x) == abs(diff_y)", "def __eq__(self, other):\r\n return abs(self.x - other.x) + abs(self.y - other.y) < Vertex.epsilon", "def match_min2(coords1,coords2,tail1=(),tail2=()):\n nc=len(coords1)\n np1=len(coords1[0])\n np2=len(coords2[0])\n a1=array(coords1)\n a2=array(coords2)\n nt1=len(tail1)\n for i in range(nt1): \n if len(tail1[i])!= np1: raise 'Not the same lenght as coordinates 1'\n nt2=len(tail2)\n for i in range(nt2): \n if len(tail2[i])!= np2: raise 'Not the same lenght as coordinates 2'\n match=zeros(np1, int)-1\n dist_min=zeros(np1)*1.\n x2=zeros(np1)*1.\n y2=zeros(np1)*1.\n for j in range(np1):\n #dist=add.reduce((a1[:,j,NewAxis]-a2[:,:])**2)\n a1j = a1[:,j]\n dist=add.reduce((reshape(a1j, (len(a1j), 1)) - a2)**2)\n i_min=argmin(dist)\n dist_min[j]=dist[i_min]\n x2[j],y2[j]=a2[0,i_min],a2[1,i_min]\n match[j]=i_min\n \n salida=list(a1)\n salida.append(x2)\n salida.append(y2)\n\n for i in range(nt1):salida.append(tail1[i])\n \n for i in range(nt2):\n if type(tail2[i][0])==type('si'):\n t=[]\n for j in match: t.append(tail2[i][j])\n else:\n t=take(tail2[i],match)\n salida.append(t)\n\n salida.append(dist_min)\n return tuple(salida)", "def hasher((x1, y1), (x0, y0)=(0,0)):\n return _point_hash((x0, y0)) ^ _point_hash((x0, y1)) ^ _point_hash((x1, y0)) ^ _point_hash((x1, y1))", "def check_position_for_same_occupancy(self, position1, position2):\n return self.board.board[position1] == self.board.board[position2]", "def equal(self,other):\n if(self.x == other.x) and (self.y == other.y):\n return True\n else:\n return False", "def __eq__(self, other):\n return sorted(self.points) == sorted(other.points)", "def __eq__(self, other):\n return self._coords == other._coords", "def __eq__(self, other):\n return self._coords == other._coords", "def __eq__(self, other):\n\t\treturn self._coords == other._coords", "def sameLocation(self, other):\n if not isinstance(other,Point):\n return False\n return self.longitude == other.getLongitude() and self.latitude == other.getLatitude()", "def __eq__(self, other):\n if isinstance(other, self.__class__):\n return self.x == other.x and self.y == other.y\n return False", "def transition_point(x1, y1, x2, y2):\n return (\n ((x1, y1), True) if abs(x1) > abs(x2) and abs(y1) > abs(y2)\n else ((x2, y2), False))", "def compare(self, x, y):\n return (self.ordering[x][y] is True) or (x == y)", "def pos_updated(self,next_pos):\n #if (int(self.oldx) == int(self.x) and int(self.oldy) == int(self.y)):\n if (int(next_pos[0]) == int(self.x) and int(next_pos[1]) == int(self.y)):\n return False\n else:\n return True", "def match_marking_points(point_a, point_b):\n \n squared_distance_thresh = 0.000277778 # 10 pixel in 600*600 image\n direction_angle_thresh = 0.5235987755982988 # 30 degree in rad \n \n dist_square = calc_point_squre_dist(point_a, point_b)\n #if min(point_a.shape[1], point_b.shape[1]) <= 2:\n if True:\n return dist_square < squared_distance_thresh\n\n angle = calc_point_direction_angle(point_a, point_b)\n if point_a[3] > 0.5 and point_b[3] < 0.5:\n return False\n if point_a[3] < 0.5 and point_b[3] > 0.5:\n return False\n return (dist_square < squared_distance_thresh\n and angle < direction_angle_thresh)", "def union(self, x, y):\n xr, yr = self.find(x), self.find(y)\n if xr == yr:\n return False\n\n if self.sz[xr] < self.sz[yr]:\n xr, yr = yr, xr\n\n self.par[yr] = xr\n self.sz[xr] += self.sz[yr]\n self.sz[yr] = self.sz[xr]\n\n return True", "def is_ate(self, snake_x, snake_y):\n if snake_x == self.x and snake_y == self.y:\n return True", "def match_min(coords1,coords2,tail1=(),tail2=()):\n nc=len(coords1)\n np1=len(coords1[0])\n np2=len(coords2[0])\n a1=array(coords1)\n a2=array(coords2)\n nt1=len(tail1)\n for i in range(nt1): \n if len(tail1[i])!= np1: raise 'Not the same lenght as coordinates 1'\n nt2=len(tail2)\n for i in range(nt2): \n if len(tail2[i])!= np2: raise 'Not the same lenght as coordinates 2'\n match=zeros(np1, int)-1\n\n dist_min=zeros(np1)*1.\n\n for j in range(np1):\n #dist=sqrt(add.reduce((a1[:,j,NewAxis]-a2[:,:])**2))\n a1j = a1[:,j]\n dist=add.reduce((reshape(a1j, (len(a1j), 1)) - a2)**2)\n i_min=argmin(dist)\n dist_min[j]=dist[i_min]\n match[j]=i_min\n\n salida=list(a1)\n for i in range(nt1):salida.append(tail1[i])\n \n for i in range(nt2):\n if type(tail2[i][0])==type('si'):\n t=[]\n for j in match: t.append(tail2[i][j])\n else:\n t=take(tail2[i],match)\n salida.append(t)\n\n salida.append(dist_min)\n return tuple(salida)", "def __eq__(self, *args):\n return _ida_hexrays.ctext_position_t___eq__(self, *args)", "def position_after(self, a, b):\n return self.position_at_least(a, b) and not self.position_equal(a, b)", "def overlap(x,y):\n if (x[0]<=y[-1] and x[-1]>y[0]) or (y[0]<=x[-1] and y[-1]>x[0]):\n return 1\n else: return 0", "def _overlapping(self, atom1, atom2):\n\n if np.linalg.norm(atom1.pos-atom2.pos) < (atom1.rad+atom2.rad):\n return True\n else:\n return False", "def all_lt(self, other):\n return self.x < other.x and self.y < other.y", "def is_equal(self, a, b):\n return a.X[0] == b.X[0]", "def nearlyEqual(self, x, y):\n return self.absoluteerror(x).nearlyEqual(x, y)", "def __lt__(self, other):\n return self.y < other.y or (\n not self.y > other.y and\n self.x < other.x\n )", "def closest(self, x, y):\n if self.direction == 'horizontal':\n p_pts = np.array([\n self.ax.transData.transform((p, 0))[0] for p in self.positions\n ])\n dist = abs(p_pts - x)\n else:\n p_pts = np.array([\n self.ax.transData.transform((0, p))[1] for p in self.positions\n ])\n dist = abs(p_pts - y)\n index = np.argmin(dist)\n return index, dist[index]", "def union(self, x, y):\n \n px, py = self.find(x), self.find(y)\n if px != py:\n \n if self.size[px] > self.size[py]:\n px, py = py, px\n \n\n self.parent[px] = py\n self.size[py] += self.size[px]", "def are_similar(first_coords: List[Tuple[int, int]], second_coords: List[Tuple[int, int]]) -> bool:\n # Step 1: Get angles of each triangle\n # Step 2: Compare grades of two triangles\n # Step 3: If two angles are equal then first triangle is similar to second triangle\n pass", "def inSameSquare(self, pos1, pos2):\n sameColThird = (math.floor((pos1 % 9) / 3) == math.floor((pos2 % 9) / 3))\n sameRowThird = (math.floor(pos1/27) == math.floor(pos2/27))\n\n return sameRowThird and sameColThird", "def cmp(x, y):\n return (x > y) - (x < y)", "def heuristic(self, a, b):\n return math.fabs(a[0] - b[0]) + math.fabs(a[1] - b[1])", "def compare(chr1, pos1, chr2, pos2):\n\tpos1 = int(pos1)\n\tpos2 = int(pos2)\n\tif chrsort == 'version':\n\t\tchr1 = mapChrForVersion(chr1)\n\t\tchr2 = mapChrForVersion(chr2)\n\telif chrsort == 'natural':\n\t\tpass # use original chr1, chr2\n\telse:\n\t\tchr1 = chrsort.get(chr1, chr1)\n\t\tchr2 = chrsort.get(chr2, chr2)\n\treturn -1 if (chr1, pos1) < (chr2, pos2) else 1 if (chr1, pos1) > (chr2, pos2) else 0", "def almost_equals(self, other):\n import math\n ox, oy = other\n dx = self[0] - ox\n dy = self[1] - oy\n return (dx*dx + dy*dy) < pygonal.EPSILON2", "def PlayerCMP(player_x, player_y):\n\n if player_x.GetElo() > player_y.GetElo():\n return -1\n elif player_y.GetElo() > player_x.GetElo():\n return 1\n else:\n return 0", "def cmp(x, y):\n if x == y:\n return 0\n elif x is None:\n if y is None:\n return 0\n else:\n return -1\n elif y is None:\n return 1\n else:\n # TODO: consider casting the values to string or int or floats?\n # note that this is the minimal replacement function\n return (x > y) - (x < y)", "def lines_up(x1, y1, x2, y2):\n if x1 == x2 and y1 == y2:\n return 4\n if x1 <= x2 and y1 >= y2:\n return 3\n if x1 >= x2 and y1 <= y2:\n return 2\n if (x2 <= x1 and x1 <= y2) or (x2 <= y1 and y1 <= y2):\n return 1\n if y1 < x2 or y2 < x1:\n return 0\n return 5", "def get_offset(a, b):\r\n\tdiff_y = b[0] - a[0]\r\n\tdiff_x = b[1] - a[1]\r\n\toffset_x = 0\r\n\toffset_y = 0\r\n\r\n\tif diff_y > 0:\r\n\t\toffset_y = 1\r\n\telif diff_y < 0:\r\n\t\toffset_y = -1\r\n\r\n\tif diff_x > 0:\r\n\t\toffset_x = 1\r\n\telif diff_x < 0:\r\n\t\toffset_x = -1\r\n\r\n\treturn (offset_x, offset_y)", "def ball_touch(self, ball1, ball2):\r\n cdistsq = ((ball2.x-ball1.x)**2 +\r\n (ball2.y-ball1.y)**2)\r\n if cdistsq < (ball1.r+ball2.r)**2:\r\n return True\r\n return False", "def compare(first_point, ref_point, compare_point):\n x2, y2 = first_point\n x1, y1 = ref_point\n x3, y3 = compare_point\n m = (y2 - (y1 * 1.00001)) / (x2 - (x1 * 1.00001))\n return y3 - y1 - m * (x3 - x1)", "def resta(x, y):\n return x - y", "def __eq__(self, other):\n return self.point == other.point", "def __eq__(self, other):\n return np.all(self.grid == other.grid) and np.all(self.pos == other.pos)", "def findDct(self, x, y, X, Y):\n if x == X:\n if Y < y:\n return 3\n else:\n return 1\n else:\n if X > x:\n return 0\n else:\n return 2", "def __eq__(self, other):\n return self.points == other.points", "def __eq__(self, other):\n try:\n return (self.id, self.x, self.y) == (other.id, other.x, other.y)\n except AttributeError:\n return NotImplemented", "def test_point_relations(p1, p2):\n assert p1.left_of(p2) or p1.x >= p2.x\n assert p1.is_right_of(p2) or p1.x <= p2.x\n\n assert p1.left_of(p2) == p2.is_right_of(p1) or p1.x == p2.x\n assert not p1.left_of(p2) or not p1.is_right_of(p2)\n assert not p2.left_of(p1) or not p2.is_right_of(p1)", "def diff(self, other):\n Δx = self.x - other.x\n Δy = self.y - other.y\n return (Δx, Δy)", "def __flt_eq_pos(self, other):\n if self.position is None:\n return True\n\n return (\n self.position == other.position\n and self.is_lean == other.is_lean\n )", "def nearby():\n for i in ids:\n for j in ids:\n if i != j:\n if sum([1 for x,y in zip(i,j) if x!=y]) == 1:\n print(\"\".join([x for x,y in zip(i,j) if x==y]))\n return", "def is_adjacent(self, other: ops.Qid) -> bool:\n return (isinstance(other, GridQubit) and\n abs(self.row - other.row) + abs(self.col - other.col) == 1)", "def __eq__(self, other):\n return self.master.phy2abs(pos=other)", "def fn(x, y):\n x, y = abs(x), abs(y) # symmetry \n if x == y == 0: return 0 \n if x + y == 2: return 2\n return 1 + min(fn(x-2, y-1), fn(x-1, y-2))", "def _is_equal(x, y):\n return x[0] == y", "def overlaps(x1, x2, y1, y2):\n\n return x1 <= y2 and y1 <= x2", "def compareNodes(x, y):\n return x.pathValue - y.pathValue", "def _cmp(x, y):\n if x[1].count > y[1].count:\n return CmpRelation.GREATER\n if x[1].count < y[1].count:\n return CmpRelation.LESS\n if x[1].ptn_length < y[1].ptn_length:\n return CmpRelation.GREATER\n if x[1].ptn_length > y[1].ptn_length:\n return CmpRelation.LESS\n return CmpRelation.EQUAL", "def match_objects(coords1,coords2,tail1=(),tail2=(),accuracy=1.):\n acc2=accuracy**2\n nc=len(coords1)\n np1=len(coords1[0])\n np2=len(coords2[0])\n a1=array(coords1)\n a2=array(coords2)\n nt1=len(tail1)\n for i in range(nt1): \n if len(tail1[i])!= np1: raise 'Not the same lenght as coordinates 1'\n nt2=len(tail2)\n for i in range(nt2): \n if len(tail2[i])!= np2: raise 'Not the same lenght as coordinates 2'\n match=zeros(np1, int)-1\n for j in range(np1):\n #dist=add.reduce((a1[:,j,NewAxis]-a2[:,:])**2)\n a1j = a1[:,j]\n dist=add.reduce((reshape(a1j, (len(a1j), 1)) - a2)**2)\n i_min=argmin(dist)\n if dist[i_min]<acc2:match[j]=i_min\n good=greater_equal(match,0)\n n1=compress(good,list(range(np1))) \n match=compress(good,match)\n a1=compress(good,a1)\n salida=list(a1)\n for i in range(nt1):\n if type(tail1[i][0])==type('si'):\n t=[]\n for j in n1: t.append(tail1[i][j])\n else:\n t=take(tail1[i],n1)\n salida.append(t)\n for i in range(nt2):\n if type(tail2[i][0])==type('si'):\n t=[]\n for j in match: t.append(tail2[i][j])\n else:\n t=take(tail2[i],match)\n salida.append(t)\n return salida", "def comparekp (left, right, kp1, kp2):\n subplot (121)\n arx = array ([kp1.pt[0]])\n ary = array ([kp1.pt[1]])\n hold(True)\n imshow(left)\n scatter (arx, ary)\n\n subplot (122)\n arx = array ([kp2.pt[0]])\n ary = array ([kp2.pt[1]])\n hold(True)\n imshow(right)\n scatter (arx, ary)\n\n show()", "def dist_points(x,y):\n\n return abs(x[0]-y[0]) + abs(x[1]-y[1])", "def dist(self, one, two):\n return sum((one[0] != two[0], one[1] != two[1]))", "def all_gt(self, other):\n return self.x > other.x and self.y > other.y", "def is_position_allowed(new_x, new_y):\n\n return min_x <= new_x <= max_x and min_y <= new_y <= max_y", "def __eq__(self, other: Union[Coordinate, Tuple]) -> bool:\n if isinstance(other, Coordinate):\n return self.x == other.x and self.y == other.y\n elif isinstance(other, Tuple):\n return self.x == other[0] and self.y == other[1]\n else:\n return NotImplemented", "def compute_y_intersection(x, x1, x2, y1, y2):\n delta_x = x2 - x1\n if delta_x == 0:\n return x1\n return ((x - x1) * (y2 - y1) / delta_x) + y1", "def closest_points_naive(self, x, y):\r\n # Running time: O(n ** 2)\r\n\r\n dist = []\r\n for i in range(len(x)):\r\n for j in range(i+1, len(x)):\r\n d = self.get_euclidean_distance(x[i], x[j], y[i], y[j])\r\n dist.append(d)\r\n \r\n return min(dist)", "def _compare(self, boxlist1, boxlist2):\n\n ycenter1, xcenter1, _, _ = BoxList.get_center_coordinates_and_sizes(boxlist1)\n ycenter2, xcenter2, _, _ = BoxList.get_center_coordinates_and_sizes(boxlist2)\n\n centers1 = tf.transpose(tf.stack((ycenter1, xcenter1)))\n centers2 = tf.transpose(tf.stack((ycenter2, ycenter2)))\n\n centers_diff = tf.expand_dims(centers1, 1) - tf.expand_dims(centers2, 0)\n neg_l2_distance = -tf.norm(centers_diff, axis=2)\n return neg_l2_distance\n #return box_list_ops.iou(boxlist1, boxlist2)" ]
[ "0.723032", "0.7027254", "0.69411623", "0.6835873", "0.6783057", "0.6730236", "0.67250097", "0.6706741", "0.66703665", "0.65392756", "0.65288377", "0.65262926", "0.652476", "0.6507796", "0.6501238", "0.64775836", "0.6462285", "0.6461377", "0.64430475", "0.6422668", "0.637349", "0.63489276", "0.6331874", "0.6329877", "0.6292162", "0.628708", "0.6251681", "0.6250022", "0.62484777", "0.62484777", "0.6242984", "0.6227739", "0.62238836", "0.61968946", "0.6185317", "0.6166047", "0.6143137", "0.6142537", "0.6138443", "0.61379665", "0.61379665", "0.61195266", "0.6117847", "0.6107979", "0.6107549", "0.60957026", "0.60950685", "0.6085049", "0.60756356", "0.6052897", "0.6039782", "0.6035079", "0.6032038", "0.6031448", "0.6030214", "0.60283583", "0.60250837", "0.60034186", "0.5990499", "0.5978595", "0.5977365", "0.59630334", "0.5959755", "0.595875", "0.59468704", "0.5945896", "0.5936372", "0.5935725", "0.5924913", "0.5914669", "0.5913764", "0.5909542", "0.59087646", "0.5906458", "0.59022903", "0.5899917", "0.5899569", "0.58892953", "0.5885489", "0.58743393", "0.5862425", "0.5859685", "0.58596784", "0.5856241", "0.58501065", "0.5848532", "0.58211446", "0.58167803", "0.5815839", "0.581548", "0.58116454", "0.58055675", "0.5801935", "0.5800859", "0.57923996", "0.5790876", "0.57817787", "0.57748485", "0.5771696", "0.57671344" ]
0.6058523
49
For the given geometry, construct the symmetry and nondegenerate operations associated with the piece.
def __init__( self, geo, index=None): # the column vector self.geo[:,i] gives the i'th vertex's positions self.geo = np.array( geo, dtype=int) self.findNondegeneratePlacements() self.id = index
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sym_elements(self):\n def trans(name, *sym):\n t = Element.TRANSFORMS\n n = name.split('_')\n\n for x in sym:\n n[-1] = t[x][n[-1]]\n\n return '_'.join(n)\n\n def primary():\n e = self.copy()\n e.name = '{}_p'.format(self.name)\n return e\n\n def x_sym():\n e = self.copy()\n e.name = '{}_x'.format(self.name)\n e.inode = trans(self.inode, 'x')\n e.jnode = trans(self.jnode, 'x')\n return e\n\n def y_sym():\n e = self.copy()\n e.name = '{}_y'.format(self.name)\n e.inode = trans(self.inode, 'y')\n e.jnode = trans(self.jnode, 'y')\n return e\n\n def xy_sym():\n e = self.copy()\n e.name = '{}_xy'.format(self.name)\n e.inode = trans(self.inode, 'x', 'y')\n e.jnode = trans(self.jnode, 'x', 'y')\n return e\n\n if self.symmetry is None:\n return primary(),\n\n elif self.symmetry == 'x':\n return primary(), x_sym()\n\n elif self.symmetry == 'y':\n return primary(), y_sym()\n\n elif self.symmetry == 'xy':\n return primary(), x_sym(), y_sym(), xy_sym()", "def compute_all_jxy(polygon=None):\n expressions = []\n symmetric = []\n\n # given a 12-gon, we do the following:\n # polygon = Symbolic12Gon()\n # polygon = make_regular()\n if polygon is None:\n polygon = make_any_gon()\n # polygon = make_assumption_gon()\n\n # print(polygon.vertices)\n for i in range(6):\n print(i)\n # translate such that this point is the origin\n# polygon = polygon.translate(polygon.vertices[i])\n# print(polygon)\n # shear so that the diagonal we are considering is vertical\n try:\n q = polygon.vertices[i].qx_to_shear_by(polygon.vertices[i+1])\n# print(\"q1:\", q.rational(D=3), q.irrational(D=3))\n except ZeroDivisionError:\n print(\"-------\")\n print(\"division by 0!\")\n print(\"-------\")\n continue\n\n sheared_polygon = polygon.shear_x_zero(q)\n# print(sheared_polygon)\n# print(\"test:\", sheared_polygon.vertices[i] - sheared_polygon.vertices[i+1])\n w, h = sheared_polygon.get_cylinder(i)\n # print(\"h: \",h.full_simplify())\n# print(\"shear 1 w: \",w.full_simplify())\n # print(len(sheared_polygon.vertices))\n# print(sheared_polygon.vertices[i])\n # shear again so that the edge that we consider is horizontal\n try:\n q = sheared_polygon.vertices[i].qy_to_shear_by(sheared_polygon.vertices[(i + 7) % 12])\n# print(sheared_polygon.vertices[i], sheared_polygon.vertices[(i + 7) % 12])\n# print(\"q2:\", q.rational(D=3), q.irrational(D=3))\n except ZeroDivisionError:\n print(\"-------\")\n print(\"division by 0!\")\n print(\"-------\")\n continue\n\n twice_sheared = sheared_polygon.shear_y_zero(q)\n\n # rescale such that the modulus of the vertical cylinder is rational\n w, h = twice_sheared.get_cylinder(i)\n# print(\"shear 2 h: \",h.full_simplify())\n# print(\"shear 2 w: \",w.full_simplify())\n # print(w.y, h.x)\n stretch_factor = w.x/h.y # this should be reciprocated, but we just care it is rational\n # print(stretch_factor)\n stretched_polygon = sheared_polygon.stretch_y(stretch_factor)\n\n # compute Jxy\n jxy = stretched_polygon.jxy()\n expressions.append(jxy)\n symmetric.append((jxy[1], jxy[2]))\n\n return expressions, symmetric", "def sym_nodes(self):\n def primary():\n n = self.copy()\n n.name = '{}_p'.format(self.name)\n return n\n\n def x_sym():\n n = self.copy()\n n.name = '{}_x'.format(self.name)\n n[1] *= -1\n return n\n\n def y_sym():\n n = self.copy()\n n.name = '{}_y'.format(self.name)\n n[0] *= -1\n return n\n\n def xy_sym():\n n = self.copy()\n n.name = '{}_xy'.format(self.name)\n n[:2] *= -1\n return n\n\n if self.symmetry is None:\n return primary(),\n\n elif self.symmetry == 'x':\n return primary(), x_sym()\n\n elif self.symmetry == 'y':\n return primary(), y_sym()\n\n elif self.symmetry == 'xy':\n return primary(), x_sym(), y_sym(), xy_sym()", "def simplify_basic(drawing, process=False, **kwargs):\n\n if any(entity.__class__.__name__ != 'Line'\n for entity in drawing.entities):\n log.debug('Skipping path containing entities other than `Line`')\n return drawing\n\n # we are going to do a bookkeeping to avoid having\n # to recompute literally everything when simplification is ran\n cache = copy.deepcopy(drawing._cache)\n\n # store new values\n vertices_new = collections.deque()\n entities_new = collections.deque()\n\n # avoid thrashing cache in loop\n scale = drawing.scale\n\n # loop through (n, 2) closed paths\n for discrete in drawing.discrete:\n # check to see if the closed entity is a circle\n circle = is_circle(discrete,\n scale=scale)\n if circle is not None:\n # the points are circular enough for our high standards\n # so replace them with a closed Arc entity\n entities_new.append(entities.Arc(points=np.arange(3) +\n len(vertices_new),\n closed=True))\n vertices_new.extend(circle)\n else:\n # not a circle, so clean up colinear segments\n # then save it as a single line entity\n points = merge_colinear(discrete, scale=scale)\n # references for new vertices\n indexes = np.arange(len(points)) + len(vertices_new)\n # discrete curves are always closed\n indexes[-1] = indexes[0]\n # append new vertices and entity\n entities_new.append(entities.Line(points=indexes))\n vertices_new.extend(points)\n\n # create the new drawing object\n simplified = type(drawing)(\n entities=entities_new,\n vertices=vertices_new,\n metadata=copy.deepcopy(drawing.metadata),\n process=process)\n # we have changed every path to a single closed entity\n # either a closed arc, or a closed line\n # so all closed paths are now represented by a single entity\n cache.cache.update({\n 'paths': np.arange(len(entities_new)).reshape((-1, 1)),\n 'path_valid': np.ones(len(entities_new), dtype=bool),\n 'dangling': np.array([])})\n\n # force recompute of exact bounds\n if 'bounds' in cache.cache:\n cache.cache.pop('bounds')\n\n simplified._cache = cache\n # set the cache ID so it won't dump when a value is requested\n simplified._cache.id_set()\n\n return simplified", "def build_symmetry_operations(symmetry: List[Any]) -> None:\n dim = len(symmetry[0][0])\n unit = numpy.identity(dim, dtype=int)\n for permutation in symmetry:\n perm = unit[:, numpy.argsort(permutation[0])]\n permutation[0] = perm", "def switch_notation(self, ordering_symbol='a', add_zeros=0):\n fermionic_part = self[0]\n bosonic_part = self[1]\n\n fermions = [\n tuple([\n fermionic_part[x],\n 'circle',\n tuple([ordering_symbol, x])\n ])\n for x in range(len(fermionic_part))\n ]\n bosons = [\n tuple([\n bosonic_part[x],\n 'box',\n tuple([ordering_symbol])\n ])\n for x in range(len(bosonic_part))\n ]\n if add_zeros > 0:\n bosons += [tuple([0, 'box', tuple([''])])\n for x in range(add_zeros)]\n notated_spart = fermions + bosons\n notated_spart.sort(reverse=True)\n return tuple(notated_spart)", "def generate_symmetry(points, pts_left, pts_right, mode = 1, offset = 0.2):\n\n assert(mode == 1 or mode == 2), \"The selected mode (%d) is not defined!\" % mode\n\n # horizental\n box_min, box_max = points.min(0), points.max(0)\n hor_axis = box_max[0] + offset\n \n new_comp = deepcopy(points)\n new_comp[:, 0] = 2 * hor_axis - new_comp[:, 0]\n \n points = np.vstack([points, new_comp])\n pts_left = np.hstack([pts_left, pts_left + len(pts_left)])\n pts_right = np.hstack([pts_right, pts_right + len(pts_right)])\n \n if mode == 2:\n # vertical\n ver_axis = box_min[1] + offset\n \n new_comp = deepcopy(points)\n new_comp[:, 1] = 2 * offset - new_comp[:, 1]\n \n points = np.vstack([points, new_comp])\n pts_left = np.hstack([pts_left, pts_left + len(pts_left)])\n pts_right = np.hstack([pts_right, pts_right + len(pts_right)])\n\n return points, pts_left, pts_right", "def run(self, dag):\n\n q_gate_list = ['cx', 'cy', 'cz', 'h', 'x', 'y', 'z']\n\n # Gate sets to be cancelled\n cancellation_sets = defaultdict(lambda: [])\n\n for wire in dag.wires:\n wire_name = \"{0}[{1}]\".format(str(wire.register.name), str(wire.index))\n wire_commutation_set = self.property_set['commutation_set'][wire_name]\n\n for com_set_idx, com_set in enumerate(wire_commutation_set):\n if com_set[0].type in ['in', 'out']:\n continue\n for node in com_set:\n num_qargs = len(node.qargs)\n if num_qargs == 1 and node.name in q_gate_list:\n cancellation_sets[(node.name, wire_name, com_set_idx)].append(node)\n if num_qargs == 1 and node.name in ['u1', 'rz', 't', 's']:\n cancellation_sets[('z_rotation', wire_name, com_set_idx)].append(node)\n elif num_qargs == 2 and node.qargs[0] == wire:\n second_op_name = \"{0}[{1}]\".format(str(node.qargs[1].register.name),\n str(node.qargs[1].index))\n q2_key = (node.name, wire_name, second_op_name,\n self.property_set['commutation_set'][(node, second_op_name)])\n cancellation_sets[q2_key].append(node)\n\n for cancel_set_key in cancellation_sets:\n set_len = len(cancellation_sets[cancel_set_key])\n if ((set_len) > 1 and cancel_set_key[0] in q_gate_list):\n gates_to_cancel = cancellation_sets[cancel_set_key]\n for c_node in gates_to_cancel[:(set_len // 2) * 2]:\n dag.remove_op_node(c_node)\n\n elif((set_len) > 1 and cancel_set_key[0] == 'z_rotation'):\n run = cancellation_sets[cancel_set_key]\n run_qarg = run[0].qargs[0]\n total_angle = 0.0 # lambda\n for current_node in run:\n if (current_node.condition is not None\n or len(current_node.qargs) != 1\n or current_node.qargs[0] != run_qarg):\n raise TranspilerError(\"internal error\")\n\n if current_node.name in ['u1', 'rz']:\n current_angle = float(current_node.op.params[0])\n elif current_node.name == 't':\n current_angle = sympy.pi / 4\n elif current_node.name == 's':\n current_angle = sympy.pi / 2\n\n # Compose gates\n total_angle = current_angle + total_angle\n\n # Replace the data of the first node in the run\n new_op = U1Gate(total_angle)\n new_qarg = QuantumRegister(1, 'q')[0]\n new_dag = DAGCircuit()\n new_dag.add_qreg(new_qarg.register)\n new_dag.apply_operation_back(new_op, [new_qarg])\n dag.substitute_node_with_dag(run[0], new_dag)\n\n # Delete the other nodes in the run\n for current_node in run[1:]:\n dag.remove_op_node(current_node)\n\n return dag", "def generate_symbole(figure_name = \"canon\"):\n if figure_name == \"planeur\": #PLANNEUR\n planneur = np.zeros((3, 3))\n planneur[1, 0] = 1\n planneur[0, 1] = 1\n planneur[0, 2] = 1\n planneur[1, 2] = 1\n planneur[2, 2] = 1\n return planneur\n\n elif figure_name == \"canon\": #CANON\n canon = np.zeros((36,9))\n canon[0:2,5:7] = 1\n canon[11,4:7] = 1\n canon[15:17,4:7] = 1\n canon[12,3] = 1\n canon[14,3] = 1\n canon[13,2] = 1\n canon[12,7] = 1\n canon[14,7] = 1\n canon[13,8] = 1\n canon[25,0:2] = 1\n canon[22:25,1:3] = 1\n canon[21,2:5] = 1\n canon[24,3] = 1\n canon[22:25,4:6] = 1\n canon[25,5:7] = 1\n canon[30,1:3] = 1\n canon[34:36,3:5] = 1\n return canon\n\n elif figure_name == \"blinker\": #BLINKER\n blinker = np.ones((3,1))\n return blinker\n\n elif figure_name == \"oscillator_alone\":\n osc = np.zeros((11,11))\n osc[2,2:9] = 1\n osc[8,2:9] = 1\n osc[2:9,2] = 1\n osc[2:9,8] = 1\n osc[5,2] = 0\n osc[5,8] = 0\n osc[2,5] = 0\n osc[8,5] = 0\n osc[0,5] = 1\n osc[10,5] = 1\n osc[5,0] = 1\n osc[5,10] = 1\n osc[1,4:7] = 1\n osc[9,4:7] = 1\n osc[4:7,1] = 1\n osc[4:7,9] = 1\n return osc\n\n elif figure_name == \"oscillator_one_block\":\n osc = generate_symbole(\"oscillator_alone\")\n osc[0:2,-2:] = 1\n return osc\n\n elif figure_name == \"oscillator_four_blocks\":\n osc = generate_symbole(\"oscillator_alone\")\n osc[0:2, -2:] = 1\n osc[0:2,0:2] = 1\n osc[-2:,0:2] = 1\n osc[-2:,-2:] = 1\n return osc\n\n elif figure_name == \"croix\":\n return osc\n\n elif figure_name == \"diag\":\n return osc\n\n elif figure_name == \"octogone\":\n return osc\n\n else:\n return 0", "def build_operators(self, pixels, plate_scale, return_bispectrum_operator=True):\n A = self.ndftm_matrix(pixels, plate_scale)\n Ainv = self.ndftm_matrix(pixels, plate_scale, inv=True)\n if return_bispectrum_operator:\n A1, A2, A3 = self.closure_fourier_matrices(A)\n return A, Ainv, A1, A2, A3\n else:\n return A, Ainv", "def _apply_symmetry(self):\r\n self.command_stack.do(model.structure.ApplySymmetry(self._structure))", "def patterning(mesh, operator):\n\n\n operators = {\n 'conway_dual': conway_dual,\n 'conway_join': conway_join,\n 'conway_ambo': conway_ambo,\n 'conway_kis': conway_kis,\n 'conway_needle': conway_needle,\n 'conway_zip': conway_zip,\n 'conway_truncate': conway_truncate,\n 'conway_ortho': conway_ortho,\n 'conway_expand': conway_expand,\n 'conway_gyro': conway_gyro,\n 'conway_snub': conway_snub,\n 'conway_meta': conway_meta,\n 'conway_bevel': conway_bevel\n }\n\n try:\n operators[operator](mesh)\n return mesh\n except:\n return mesh", "def cantor() -> bigger.MCG[Edge]: # pylint: disable=too-many-statements\n\n POS, EQ, NEG = +1, 0, -1\n\n def edges() -> Iterable[Edge]:\n for x in naturals():\n for y in [POS, EQ, NEG]:\n yield x, y\n\n def negate(X: Edge) -> Edge:\n return X[0], -X[1]\n\n def invert(sign: int, X: tuple[Edge, bool, Edge, bool, Edge, bool, Edge, bool]) -> tuple[Edge, bool, Edge, bool, Edge, bool, Edge, bool]:\n return X if sign == POS else (negate(X[6]), not X[7], negate(X[4]), not X[5], negate(X[2]), not X[3], negate(X[0]), not X[1])\n\n def link(edge: Edge) -> tuple[Edge, bool, Edge, bool, Edge, bool, Edge, bool]:\n n, k = edge\n if k == EQ: # Equator\n if n == 0:\n return ((0, NEG), False, (1, NEG), True, (1, POS), False, (0, POS), True)\n elif n == 1:\n return ((2, POS), False, (0, POS), False, (0, NEG), True, (2, NEG), True)\n else: # n > 1\n return ((3 * n - 3, NEG), False, (3 * n - 1, NEG), True, (3 * n - 1, POS), False, (3 * n - 3, POS), True)\n\n # Northern / Southern hemisphere.\n if n == 0:\n return invert(k, ((0, EQ), False, (1, POS), False, (1, EQ), True, (2, POS), False))\n elif n == 1:\n return invert(k, ((4, POS), False, (3, POS), False, (0, POS), True, (0, EQ), False))\n elif n == 2:\n return invert(k, ((7, POS), False, (6, POS), False, (0, POS), False, (1, EQ), True))\n N, r = n // 3 + 1, n % 3\n incoming = 3 * (N // 2) - (1 if N % 2 else 2)\n if r == 0:\n return invert(k, ((N, EQ), False, (n + 2, POS), False, (incoming, POS), True, (n + 1, POS), False))\n elif r == 1:\n return invert(k, ((6 * N - 2, POS), False, (6 * N - 3, POS), False, (n - 1, POS), False, (incoming, POS), True))\n else: # r == 2:\n return invert(k, ((6 * N + 1, POS), False, (6 * N + 0, POS), False, (n - 2, POS), True, (N, EQ), False))\n\n T = bigger.Triangulation.from_pos(edges, link)\n\n def generator(name: str) -> bigger.Encoding[Edge]: # pylint: disable=too-many-branches\n twist_match = re.match(r\"(?P<curve>[ab])_(?P<n>-?\\d+)$\", name)\n rotate_match = re.match(r\"r$\", name)\n\n if twist_match is not None:\n parameters = twist_match.groupdict()\n curve_name = parameters[\"curve\"]\n N = int(parameters[\"n\"])\n if curve_name == \"a\":\n if N == 1:\n cut_sequence = [(0, EQ), (0, POS), (1, EQ)]\n else:\n cut_sequence = [(0, EQ), (N, EQ), (3 * N - 3, POS)]\n while N > 1:\n low_N = N // 2\n cut_sequence.append((3 * low_N - (1 if N % 2 else 2), POS))\n if N % 2:\n cut_sequence.append((3 * low_N - 3, POS))\n N = low_N\n elif curve_name == \"b\":\n if N <= 3:\n cut_sequence = [(0, EQ), (0, POS), (1, EQ)]\n else:\n extend_left = N % 2\n N = N // 2\n cut_sequence = [(N, EQ), (3 * N - 3, POS)]\n while N > 1:\n N_low = N // 2\n cut_sequence.append((3 * N_low - (1 if N % 2 else 2), POS))\n if extend_left:\n cut_sequence.append((3 * N_low - 3, POS))\n if N % 2 != extend_left:\n cut_sequence.append((N_low, EQ))\n break\n N = N_low\n else:\n cut_sequence.append((0, EQ))\n\n curve = T(dict(((x, y * s), 1) for x, y in cut_sequence for s in [+1, -1]))\n return curve.twist()\n elif rotate_match is not None:\n\n def isom(edge: Edge) -> Edge:\n n, k = edge\n if k == EQ:\n if n == 0:\n return (1, EQ)\n elif n == 1:\n return (0, EQ)\n return (n ^ (1 << n.bit_length() - 2), k)\n\n if n == 0:\n return (0, k)\n elif n == 1:\n return (2, k)\n elif n == 2:\n return (1, k)\n N, r = n // 3 + 1, n % 3\n return (3 * (N ^ (1 << N.bit_length() - 2)) - 3 + r, k)\n\n return T.encode([(-1, isom, isom)])\n\n raise ValueError(f\"Unknown mapping class {name}\")\n\n return bigger.MCG(T, generator)", "def to_DiGraph(program):\r\n grid = {}\r\n\r\n for idx, op in enumerate(program.operations):\r\n dependencies = set(op['modes'])\r\n\r\n if 'args' in op:\r\n\r\n for a in op['args']:\r\n if isinstance(a, RegRefTransform):\r\n dependencies |= set(a.regrefs)\r\n\r\n for _, v in op['kwargs'].items():\r\n if isinstance(v, RegRefTransform):\r\n dependencies |= set(v.regrefs)\r\n else:\r\n op['args'] = []\r\n op['kwargs'] = {}\r\n\r\n cmd = Command(name=op['op'], args=op['args'], kwargs=op['kwargs'], modes=tuple(op['modes']))\r\n\r\n for q in dependencies:\r\n # Add cmd to the grid to the end of the line r.ind.\r\n if q not in grid:\r\n # add a new line to the circuit\r\n grid[q] = []\r\n\r\n grid[q].append([idx, cmd])\r\n\r\n G = nx.DiGraph()\r\n\r\n for q, cmds in grid.items():\r\n if cmds:\r\n # add the first operation on the wire that does not depend on anything\r\n attrs = cmds[0][1]._asdict()\r\n G.add_node(cmds[0][0], **attrs)\r\n\r\n for i in range(1, len(cmds)):\r\n # add the edge between the operations, and the operation nodes themselves\r\n if cmds[i][0] not in G:\r\n attrs = cmds[i][1]._asdict()\r\n G.add_node(cmds[i][0], **attrs)\r\n\r\n G.add_edge(cmds[i-1][0], cmds[i][0])\r\n\r\n return G", "def simplify(self, tolerance, preserve_topology=...): # -> BaseGeometry:\n ...", "def compute_symmetry_3d(structdata, standardize, primitive, idealize, symprec,\n angletol):\n validate_with_json(structdata, \"structure\")\n\n angletol = -1 if angletol is None else angletol\n\n # first create the cell to pass to spglib\n lattice = structdata[\"lattice\"]\n ccoords = structdata[\"ccoords\"]\n\n # spglib only uses the atomic numbers to demark inequivalent sites\n inequivalent_sites = (np.array(structdata[\"atomic_numbers\"]) * 1000 +\n np.array(structdata[\"equivalent\"])).tolist()\n\n if \"kinds\" in structdata:\n inequivalent_to_kind = {\n i: k\n for i, k in zip(inequivalent_sites, structdata[\"kinds\"])\n }\n else:\n inequivalent_to_kind = None\n\n fcoords = cart2frac(lattice, ccoords)\n cell = [lattice, fcoords, inequivalent_sites]\n cell = tuple(cell)\n\n if standardize or primitive:\n scell = spglib.standardize_cell(\n cell,\n no_idealize=not idealize,\n to_primitive=primitive,\n symprec=symprec,\n angle_tolerance=angletol)\n if scell is None:\n raise ValueError(\"standardization of cell failed: {}\".format(cell))\n cell = scell\n\n lattice = cell[0].tolist()\n fcoords = cell[1]\n ccoords = frac2cart(lattice, fcoords)\n inequivalent_sites = cell[2].tolist()\n\n # find symmetry\n # TODO can we get only the symmetry operators accepted by CRYSTAL?\n symm_dataset = spglib.get_symmetry_dataset(\n cell, symprec=symprec, angle_tolerance=angletol)\n if symm_dataset is None:\n # TODO option to use P1 symmetry if can't find symmetry\n raise ValueError(\"could not find symmetry of cell: {}\".format(cell))\n sg_num = symm_dataset[\n 'number'] if symm_dataset['number'] is not None else 1\n crystal_type = get_crystal_system(sg_num, as_number=True)\n\n # format the symmetry operations (fractional to cartesian)\n symops = []\n for rot, trans in zip(symm_dataset[\"rotations\"],\n symm_dataset[\"translations\"]):\n # rot, trans = operation_frac_to_cart(lattice, rot, trans)\n symops.append(rot[0].tolist() + rot[1].tolist() + rot[2].tolist() +\n trans.tolist())\n\n # find and set centering code\n # the origin_setting (aka centering code) refers to how to convert conventional to primitive\n if primitive:\n origin_setting = get_centering_code(sg_num,\n symm_dataset[\"international\"])\n else:\n origin_setting = 1\n\n equivalent = np.mod(inequivalent_sites, 1000).tolist()\n atomic_numbers = ((np.array(inequivalent_sites) - np.array(equivalent)) /\n 1000).astype(int).tolist()\n\n # from jsonextended import edict\n # edict.pprint(symm_dataset)\n\n structdata = {\n \"lattice\": lattice,\n \"ccoords\": ccoords,\n \"pbc\": [True, True, True],\n \"atomic_numbers\": atomic_numbers,\n \"equivalent\": equivalent\n }\n\n if inequivalent_to_kind:\n structdata[\"kinds\"] = [\n inequivalent_to_kind[i] for i in inequivalent_sites\n ]\n\n symmdata = {\n \"space_group\": sg_num,\n \"operations\": symops,\n \"crystal_type\": crystal_type,\n \"centring_code\": origin_setting,\n }\n\n return structdata, symmdata", "def build_geometry(self):\n\n [Z1, Z2, Z3, Z4, Z5, Z6, Z7, Z8, rot_sign] = self._comp_point_coordinate()\n\n # Creation of curve\n curve_list = list()\n curve_list.append(Segment(Z1, Z2))\n curve_list.append(Arc1(Z2, Z3, rot_sign * self.R1, self.is_outwards()))\n curve_list.append(Segment(Z3, Z4))\n curve_list.append(Arc3(Z4, Z5, self.is_outwards()))\n curve_list.append(Segment(Z5, Z6))\n curve_list.append(Arc1(Z6, Z7, rot_sign * self.R1, self.is_outwards()))\n curve_list.append(Segment(Z7, Z8))\n\n return curve_list", "def build_geometry(self):\n\n Rbo = self.get_Rbo()\n\n point_dict = self._comp_point_coordinate()\n Z1 = point_dict[\"Z1\"]\n Z2 = point_dict[\"Z2\"]\n Z3 = point_dict[\"Z3\"]\n Z4 = point_dict[\"Z4\"]\n Z5 = point_dict[\"Z5\"]\n Z6 = point_dict[\"Z6\"]\n Z7 = point_dict[\"Z7\"]\n Z8 = point_dict[\"Z8\"]\n Z9 = point_dict[\"Z9\"]\n Z10 = point_dict[\"Z10\"]\n\n # Creation of curve\n curve_list = list()\n curve_list.append(Segment(Z1, Z2))\n curve_list.append(Arc1(Z2, Z3, -Rbo + self.H0, is_trigo_direction=False))\n curve_list.append(Arc1(Z3, Z4, -self.R1, is_trigo_direction=False))\n curve_list.append(Segment(Z4, Z5))\n curve_list.append(Arc1(Z5, Z6, Rbo - self.H0 - self.H2, is_trigo_direction=True))\n curve_list.append(Segment(Z6, Z7))\n curve_list.append(Arc1(Z7, Z8, -self.R1, is_trigo_direction=False))\n curve_list.append(Arc1(Z8, Z9, -Rbo + self.H0, is_trigo_direction=False))\n curve_list.append(Segment(Z9, Z10))\n\n return curve_list", "def symmetric_difference(self, other): # -> BaseGeometry:\n ...", "def createStoichiometryMath(self):\n return _libsbml.SpeciesReference_createStoichiometryMath(self)", "def __init__(self, geom, matrix, materialnodebysymbol):\n self.matrix = matrix\n self.materialnodebysymbol = materialnodebysymbol\n self._primitives = geom._primitives\n self.original = geom", "def set_symmetry(self, Gl, Gr, verbose=False):\n symmetry_pairs = iproduct(Gl, Gr)\n if verbose:\n import tqdm\n symmetry_pairs = tqdm.tqdm(symmetry_pairs, total=Gl.size * Gr.size)\n orientation_region = OrientationRegion.from_symmetry(Gl, Gr)\n o_inside = self.__class__.identity(self.shape)\n outside = np.ones(self.shape, dtype=bool)\n for gl, gr in symmetry_pairs:\n o_transformed = gl * self[outside] * gr\n o_inside[outside] = o_transformed\n outside = ~(o_inside < orientation_region)\n if not np.any(outside):\n break\n o_inside._symmetry = (Gl, Gr)\n return o_inside", "def SetOperatorSymmetry(self, is_sym):\n return _hypre.HypreSmoother_SetOperatorSymmetry(self, is_sym)", "def _represent_ZGate(self, basis, **options):\n _format = options.get('format', 'sympy')\n n = 1\n definite_state = 0\n for it in reversed(self.qubit_values):\n definite_state += n*it\n n = n*2\n result = [0]*(2**self.dimension)\n result[int(definite_state)] = 1\n if _format == 'sympy':\n return Matrix(result)\n elif _format == 'numpy':\n import numpy as np\n return np.array(result, dtype='complex').transpose()\n elif _format == 'scipy.sparse':\n from scipy import sparse\n return sparse.csr_matrix(result, dtype='complex').transpose()", "def make_oneq_cliffords():\n ixyz_list = [g().to_matrix() for g in (IGate, XGate, YGate, ZGate)]\n ih_list = [g().to_matrix() for g in (IGate, HGate)]\n irs_list = [\n IGate().to_matrix(),\n SdgGate().to_matrix() @ HGate().to_matrix(),\n HGate().to_matrix() @ SGate().to_matrix(),\n ]\n oneq_cliffords = [\n Operator(ixyz @ ih @ irs) for ixyz in ixyz_list for ih in ih_list for irs in irs_list\n ]\n return oneq_cliffords", "def test_SIS():\r\n def is_symmetric_mode(beta, k0, g, a_over_d, h):\r\n \"\"\"\r\n Eq (7i) of paper\r\n beta is what I call kx\r\n k0 is vacuum angular wavenumber\r\n g is thickness of air layer\r\n h is thickness of corrugated layer\r\n a_over_d is the fraction of corrugated layer which is air\r\n \"\"\"\r\n lhs = ((cmath.sqrt(beta**2 - k0**2) / k0)\r\n * cmath.tanh(g/2 * cmath.sqrt(beta**2 - k0**2)))\r\n rhs = a_over_d * cmath.tan(k0 * h)\r\n return floats_are_equal(lhs, rhs, tol=1e-4)\r\n\r\n def is_antisymmetric_mode(beta, k0, g, a_over_d, h):\r\n \"\"\"\r\n Eq (7ii) of paper\r\n \"\"\"\r\n lhs = ((cmath.sqrt(beta**2 - k0**2) / k0)\r\n / cmath.tanh(g/2 * cmath.sqrt(beta**2 - k0**2)))\r\n rhs = a_over_d * cmath.tan(k0 * h)\r\n return floats_are_equal(lhs, rhs, tol=1e-4)\r\n # Choose some parameters (can be anything, these are from Fig. 3 caption)\r\n w = 2 * pi * (4 * nu.THz)\r\n h = 50 * nu.um\r\n g = 50 * nu.um\r\n a_over_d = 0.1\r\n \r\n # Now run analysis\r\n k0 = w / nu.c0\r\n d_over_a = a_over_d**-1\r\n # epsilon of a PEC (perfect electric conductor) is -infinity, but code\r\n # doesn't allow that. Use big value instead...\r\n PEC_eps = -1e11\r\n params = {'d_list': [inf, h, g, h, inf],\r\n 'ex_list': [PEC_eps, d_over_a, 1, d_over_a, PEC_eps],\r\n 'ez_list': [PEC_eps, PEC_eps, 1, PEC_eps, PEC_eps],\r\n 'mu_list': [1, a_over_d, 1, a_over_d, 1],\r\n 'w': w}\r\n \r\n kx_list = find_kx(params, grid_points=30, iterations=11, reduction_factor=14,\r\n plot_full_region=True,\r\n search_domain=[-1e5 * nu.m**-1, 1e5 * nu.m**-1, 0, 1e5 * nu.m**-1])\r\n \r\n print('kx_list -- ' + str(len(kx_list)) + ' entries...')\r\n print(['(%.5g+%.5gj) rad/um' % (kx.real / nu.um**-1, kx.imag / nu.um**-1)\r\n for kx in kx_list])\r\n # Here, I'm only interested in solutions on the positive real axis\r\n kx_list = [kx for kx in kx_list if abs(kx.real) > 1e5 * abs(kx.imag)]\r\n kx_list = [-kx if kx.real < 0 else kx for kx in kx_list]\r\n # Delete repeats with tolerance 1e-4\r\n kx_list_norepeat = []\r\n for kx in kx_list:\r\n if not any(floats_are_equal(kx, kx2, tol=1e-4) for kx2 in kx_list_norepeat):\r\n kx_list_norepeat.append(kx)\r\n kx_list = kx_list_norepeat\r\n print('kx_list (cleaned up) -- ' + str(len(kx_list)) + ' entries...')\r\n print(['(%.5g+%.5gj) rad/um' % (kx.real / nu.um**-1, kx.imag / nu.um**-1)\r\n for kx in kx_list])\r\n found_sym_mode = False\r\n for kx in kx_list:\r\n if is_symmetric_mode(kx, k0, g, a_over_d, h):\r\n found_sym_mode = True\r\n print('Found symmetric mode! ',\r\n '(%.5g+%.5gj) rad/um' % (kx.real / nu.um**-1, kx.imag / nu.um**-1))\r\n params2 = deepcopy(params)\r\n params2['kx'] = kx\r\n params2 = find_all_params_from_kx(params2)\r\n if check_mode(params2) is not True:\r\n print('Not a real mode? ... Error code:')\r\n print(check_mode(params2))\r\n else:\r\n plot_mode(params2)\r\n assert found_sym_mode\r\n found_anti_mode = False\r\n for kx in kx_list:\r\n if is_antisymmetric_mode(kx, k0, g, a_over_d, h):\r\n found_anti_mode = True\r\n print('Found antisymmetric mode! ',\r\n '(%.5g+%.5gj) rad/um' % (kx.real / nu.um**-1, kx.imag / nu.um**-1))\r\n params2 = deepcopy(params)\r\n params2['kx'] = kx\r\n params2 = find_all_params_from_kx(params2)\r\n if check_mode(params2) is not True:\r\n print('Not a real mode? ... Error code:')\r\n print(check_mode(params2))\r\n else:\r\n plot_mode(params2)\r\n assert found_anti_mode\r\n \r\n print('Congratulations, the solver found the correct kx for both the')\r\n print('symmetric and antisymmetric mode of the structure, consistent')\r\n print('with the analytical formula in the literature.')", "def _make_symmetry_edges(self, edge_type: Tuple[int, int],\n symmetry_edge_type: Tuple[int, int],\n edge_class: int) -> NoReturn:\n all_edges_splits = [self.train_edges, self.val_edges, self.test_edges,\n self.val_edges_false, self.test_edges_false]\n for edges_split in all_edges_splits:\n edges_split[symmetry_edge_type][edge_class] = self._inverse_edges(\n edges_split[edge_type][edge_class])", "def test_disjunct_hs():\n hs1 = LocalSpace(\"1\")\n hs2 = LocalSpace(\"2\")\n alpha, beta = symbols('alpha, beta')\n A = OperatorSymbol('A', hs=hs1)\n B = OperatorSymbol('B', hs=hs2)\n assert Commutator.create(A, B) == ZeroOperator\n assert Commutator.create(alpha, beta) == ZeroOperator\n assert Commutator.create(alpha, B) == ZeroOperator\n assert Commutator.create(A, beta) == ZeroOperator", "def __init__(self, structure_a = None, structure_b = None):\n\n self.cell_1 = None\n self.cell_2 = None\n self.rep_1 = None\n self.rep_2 = None\n self.eps_11 = None\n self.eps_22 = None\n self.eps_12 = None\n self.eps_mas = None\n self.atoms = None\n self.ang = None\n\n \"\"\"(E_s1 + E_s2 - E_i)/A\"\"\"\n self.w_sep_c = None\n self.w_sep_d = None\n\n \"\"\"(E_s1 + E_s2(strained) - E_i)/A\"\"\"\n self.w_seps_c = None\n self.w_seps_d = None\n\n \"\"\"(E_i - E_b1 - E_b2)/A\"\"\"\n self.e_int_c = None\n self.e_int_d = None\n\n \"\"\"Holder for parameters usefull in various surface/interface calculations\"\"\"\n self.parameters = {\"sigma_c_11\": 0, \"sigma_c_12\": 0, \"sigma_c_21\": 0, \"sigma_c_22\": 0,\\\n \"sigma_d_11\": 0, \"sigma_d_12\": 0, \"sigma_d_21\": 0, \"sigma_d_22\": 0}\n\n if structure_a is None or structure_b is None:\n self.base_1 = None\n self.base_2 = None\n self.pos_1 = None\n self.pos_2 = None\n self.spec_1 = None\n self.spec_2 = None\n self.mass_1 = None\n self.mass_2 = None\n else:\n self.base_1 = structure_a.cell.copy()\n self.base_2 = structure_b.cell.copy()\n self.pos_1 = structure_a.pos.copy()\n self.pos_2 = structure_b.pos.copy()\n self.spec_1 = structure_a.type_n.copy()\n self.spec_2 = structure_b.type_n.copy()\n self.mass_1 = structure_a.mass.copy()\n self.mass_2 = structure_b.mass.copy()\n\n self.filename = None\n self.alt_base_1 = None\n self.alt_base_2 = None\n self.order = None", "def byGeometry(graph, attrs = [], field=('','')):\n edgeId = 0 # for edge's id\n outputGraph = nx.DiGraph() # output : networkx graph\n \n intersectedNodes = intersect._intersections(graph)\n \n # preparation for setting attributes\n attrDict = {}\n for attr in attrs:\n attrDict[attr] = {}\n \n ## for each edge, check if any of intersected points are on the edge\n \n # if there is a rule to implement the intersect function using a data field\n if field == ('', ''):\n edges = [edge for edge in graph.edges(data=True)]\n else:\n edges = []\n not_consdiered_edges = []\n for edge in graph.edges(data=True):\n if edge[2][field[0]] == field[1]:\n edges.append(edge)\n else:\n not_consdiered_edges.append(edge)\n \n # add edges that not considered\n for n_edge in not_consdiered_edges:\n outputGraph.add_edge(n_edge[0], n_edge[1], Ind= edgeId, \\\n coordinates = n_edge[2]['coordinates'])\n for attr in attrs:\n attrDict[attr][n_edge[0], n_edge[1]] = n_edge[2][attr]\n edgeId+=1\n \n # use edges that considered\n for edge in edges:\n intersectedNodeDict = {}\n for iNode in intersectedNodes:\n # excludes the two end points\n for ind, vertex in enumerate(edge[2]['coordinates'][:-1]):\n if ind == 0:\n pass\n else:\n if tuple(vertex) == iNode:\n intersectedNodeDict[ind] = vertex\n \n coordinatesList = []\n coordinatesList.append(list(edge[0]))\n for key in sorted(intersectedNodeDict.keys()):\n coordinatesList.append(intersectedNodeDict[key])\n coordinatesList.append(list(edge[1]))\n \n startVertex = None\n for ind, coord in enumerate(coordinatesList):\n outputGraph.add_node(tuple(coord))\n if ind == 0:\n startVertex = coord\n else:\n endVertex = coord\n startInd = edge[2]['coordinates'].index(startVertex)\n endInd = edge[2]['coordinates'].index(endVertex)\n \n vertexList = None\n if edge[2]['coordinates'][startInd:endInd+1] == []:\n vertexList = edge[2]['coordinates'][endInd:startInd+1]\n else:\n vertexList = edge[2]['coordinates'][startInd:endInd+1]\n \n # add edge\n outputGraph.add_edge(tuple(startVertex), tuple(endVertex),\\\n Ind= edgeId, coordinates= vertexList)\n # add attributes\n \n for attr in attrs:\n attrDict[attr][tuple(startVertex), tuple(endVertex)] \\\n =edge[2][attr]\n \n startVertex = endVertex\n edgeId +=1\n \n for attr in attrs:\n nx.set_edge_attributes(outputGraph, attr, attrDict[attr])\n \n graphCalculate.addDistance(outputGraph)\n return outputGraph", "def test_methylcyclohexane(self):\n def draw(image: ShapeImage):\n image.add_regular_hexagon(\n 100, start_coord=(400, 400)\n )\n image.add_line((487, 350), (487, 250))\n\n self._test_shape(\n image_size=(1000, 1000),\n expected_corners=np.array([\n [[400, 400]],\n [[487, 350]],\n [[574, 400]],\n [[574, 500]],\n [[487, 550]],\n [[400, 500]],\n # Methyl group\n [[487, 250]]\n ]),\n drawer=draw,\n expected_edges=np.array([\n [[400, 400, 487, 350]],\n [[487, 350, 574, 400]],\n [[574, 400, 574, 500]],\n [[574, 500, 487, 550]],\n [[487, 550, 400, 500]],\n [[400, 500, 400, 400]],\n # To methyl group\n [[487, 350, 487, 250]]\n ])\n )", "def simplified_graph(input_smiles, iso_flag=True):\n mol = Chem.MolFromSmiles(input_smiles)\n for atom in mol.GetAtoms():\n atom.SetFormalCharge(0)\n if atom.IsInRing():\n atom.SetAtomicNum(6)\n atom.SetIsAromatic(False)\n for bond in atom.GetBonds():\n bond.SetBondType(Chem.BondType.SINGLE)\n if not iso_flag:\n atom.SetChiralTag(Chem.ChiralType.CHI_OTHER)\n return Chem.MolToSmiles(mol, isomericSmiles=True)", "def trivial_representation(self, side=\"twosided\"):\n S = self.basis().keys()\n return S.trivial_representation(self.base_ring())", "def vsepr_build_correct_answer(geometry, atoms):\r\n return {'geometry': geometry, 'atoms': atoms}", "def test_1_2_dimethylcyclohexane(self):\n def draw(image: ShapeImage):\n image.add_regular_hexagon(\n 100, start_coord=(400, 400)\n )\n image.add_line((487, 350), (487, 250))\n image.add_line((574, 400), (661, 350))\n\n self._test_shape(\n image_size=(1000, 1000),\n expected_corners=np.array([\n [[400, 400]],\n [[487, 350]],\n [[574, 400]],\n [[574, 500]],\n [[487, 550]],\n [[400, 500]],\n # Methyl groups\n [[487, 250]],\n [[661, 350]]\n ]),\n drawer=draw,\n expected_edges=np.array([\n [[400, 400, 487, 350]],\n [[487, 350, 574, 400]],\n [[574, 400, 574, 500]],\n [[574, 500, 487, 550]],\n [[487, 550, 400, 500]],\n [[400, 500, 400, 400]],\n # To methyl groups\n [[487, 350, 487, 250]],\n [[574, 400, 661, 350]]\n ])\n )", "def representation_2_txyz(self: Q, representation: str = \"\") -> List:\n\n symbolic = False\n\n if (\n hasattr(self.t, \"free_symbols\")\n or hasattr(self.x, \"free_symbols\")\n or hasattr(self.y, \"free_symbols\")\n or hasattr(self.z, \"free_symbols\")\n ):\n symbolic = True\n\n if representation == \"\":\n box_t, box_x, box_y, box_z = self.t, self.x, self.y, self.z\n\n elif representation == \"polar\":\n amplitude, theta_x, theta_y, theta_z = self.t, self.x, self.y, self.z\n\n theta = (theta_x ** 2 + theta_y ** 2 + theta_z ** 2) ** (1 / 2)\n\n if theta == 0:\n box_t = self.t\n box_x, box_y, box_z = 0, 0, 0\n\n else:\n if symbolic:\n box_t = amplitude * sp.cos(theta)\n box_x = self.x / theta * amplitude * sp.sin(theta)\n box_y = self.y / theta * amplitude * sp.sin(theta)\n box_z = self.z / theta * amplitude * sp.sin(theta)\n else:\n box_t = amplitude * math.cos(theta)\n box_x = self.x / theta * amplitude * math.sin(theta)\n box_y = self.y / theta * amplitude * math.sin(theta)\n box_z = self.z / theta * amplitude * math.sin(theta)\n\n elif representation == \"spherical\":\n box_t, R, theta, phi = self.t, self.x, self.y, self.z\n\n if symbolic:\n box_x = R * sp.sin(theta) * sp.cos(phi)\n box_y = R * sp.sin(theta) * sp.sin(phi)\n box_z = R * sp.cos(theta)\n else:\n box_x = R * math.sin(theta) * math.cos(phi)\n box_y = R * math.sin(theta) * math.sin(phi)\n box_z = R * math.cos(theta)\n\n elif representation == \"hyperbolic\":\n u, v, theta, phi = self.t, self.x, self.y, self.z\n\n if symbolic:\n box_t = v * sp.exp(u)\n box_x = v * sp.exp(-u)\n box_y = v * sp.sin(theta) * sp.sin(phi)\n box_z = v * sp.cos(theta)\n\n else:\n box_t = v * math.exp(u)\n box_x = v * math.exp(-u)\n box_y = v * math.sin(theta) * sp.sin(phi)\n box_z = v * math.cos(theta)\n\n else:\n raise ValueError(f\"Oops, don't know representation: representation\")\n\n return [box_t, box_x, box_y, box_z]", "def __init__(self, workplane, measures):\n\n cq.Workplane.bracket = utilities.bracket\n cq.Workplane.transformedWorkplane = utilities.transformedWorkplane\n cq.Workplane.bolt = utilities.bolt\n cq.Workplane.cutEachAdaptive = utilities.cutEachAdaptive\n\n self.model = workplane\n self.debug = False\n self.measures = measures\n m = self.measures\n\n # The bracket lengths are measured at the outside, but the construction actually uses a \n # central cuboid block with two attached brackets. Adapting the measures accordingly.\n m.center_block = Measures(\n # Naming is as seen from the horizontal leg.\n width = max(m.horizontal_leg.width, m.vertical_leg.width),\n depth = m.vertical_leg.height,\n height = m.horizontal_leg.height\n )\n m.horizontal_leg.depth -= m.center_block.depth\n m.vertical_leg.depth -= m.center_block.height\n\n # Create hole specs which combine the other hole measures in the format expected by bolthole().\n m.horizontal_leg.hole_specs = [\n {\n \"diameter\": m.horizontal_leg.hole_diameters[i] if isinstance(m.horizontal_leg.hole_diameters, list) else m.horizontal_leg.hole_diameters,\n \"clamp_length\": m.horizontal_leg.clamp_lengths[i] if isinstance(m.horizontal_leg.clamp_lengths, list) else m.horizontal_leg.clamp_lengths, \n \"nuthole_size\": m.horizontal_leg.nuthole_sizes[i] if isinstance(m.horizontal_leg.nuthole_sizes, list) else m.horizontal_leg.nuthole_sizes, \n \"nuthole_depth\": 1.1 * m.vertical_leg.depth # Just choose something large enough for cutting. \n }\n for i in range(m.horizontal_leg.hole_count)\n ]\n m.vertical_leg.hole_specs = [\n {\n \"diameter\": m.vertical_leg.hole_diameters[i] if isinstance(m.vertical_leg.hole_diameters, list) else m.vertical_leg.hole_diameters,\n \"clamp_length\": m.vertical_leg.clamp_lengths[i] if isinstance(m.vertical_leg.clamp_lengths, list) else m.vertical_leg.clamp_lengths, \n \"nuthole_size\": m.vertical_leg.nuthole_sizes[i] if isinstance(m.vertical_leg.nuthole_sizes, list) else m.vertical_leg.nuthole_sizes, \n \"nuthole_depth\": 1.1 * m.horizontal_leg.depth # Just choose something large enough for cutting. \n }\n for i in range(m.vertical_leg.hole_count)\n ]\n\n # TODO: Initialize missing measures with defaults.\n\n self.build()", "def _make_circuit_instructions(n_qubits, depth, type_circuit):\n\n if type_circuit in [0, 1, 2]:\n\n # if type_circuit == 1:\n # if depth > 8:\n # raise ValueError(\n # \"For type-1 circuits, only at most depth=8 allowed!\"\n # )\n\n # define rotations for circuit in each layer, 0: identity, 1:X, 2:Y 3:Z\n ini_pauli = np.zeros([depth, n_qubits], dtype=int)\n\n # set first and second layer, rest comes later\n ini_pauli[0, :] = 2 # y rotation\n if depth > 1:\n ini_pauli[1, :] = 3 # z rotation\n\n # construct natural parameterized circuit\n # gives which type of entangling gates at each layer -- first entry is\n # first qubit index, second is second qubit index, third entry is type\n # of entangling gate\n entangling_gate_index_list = [[] for i in range(depth)]\n orderList = []\n for i in range(n_qubits//2):\n if i % 2 == 0:\n orderList.append(i//2)\n else:\n orderList.append((n_qubits-i)//2)\n\n if n_qubits > 1:\n shiftList = [orderList[0]]\n else:\n shiftList = []\n for i in range(1, n_qubits//2):\n shiftList.append(orderList[i])\n shiftList += shiftList[:-1]\n\n # this list gives which entangling gates are applied in which layer\n if type_circuit == 0:\n # deep natural PQC, includes non-nearest neighbor gates\n for j in range(min(len(shiftList), int(np.ceil(depth/2))-1)):\n entangling_gate_index_list[1+2*j] = [\n [2*i, (2*i+1+2*shiftList[j]) % n_qubits, 0]\n for i in range(n_qubits//2)\n ]\n elif type_circuit == 1:\n # only do 2 entangling layers at max, and only do gates with\n # nearest neighbor and no ring\n for j in range(min(len(shiftList), 3)):\n if j == 0:\n entangling_gate_index_list[1+2*j] = [\n [2*i, (2*i+1+2*shiftList[j]) % n_qubits, 0]\n for i in range(n_qubits//2)\n ]\n elif (j == 1 or j == 2):\n # exclude ring gate and gate 0,1 on third entangling layer\n entangling_gate_index_list[1+2*j] = [\n [2*i, (2*i+1+2*shiftList[j]) % n_qubits, 0]\n for i in range(1, n_qubits//2)\n ]\n\n elif type_circuit == 2:\n # only do 3 regular entangling layers in a ring topology, then two\n # more phase gates with next-nearst neighbor, which requires one\n # swap. This adds 4 more parameters\n for j in range(min(len(shiftList), 3)):\n entangling_gate_index_list[1+2*j] = [\n [2*i, (2*i+1+2*shiftList[j]) % n_qubits, 0]\n for i in range(n_qubits//2)\n ]\n # entangling_gate_index_list[1+2*3]=[[0,n_qubits-1,1],[0,1,0],[n_qubits-1,n_qubits-2,0]]\n # entangling_gate_index_list[1+2*3]=[[0,n_qubits-1,1],[0,1,0],[n_qubits-1,n_qubits-2,0]]\n entangling_gate_index_list[1+2*3] = [\n [n_qubits-1, 1, 0],\n [0, n_qubits-2, 0]\n ]\n\n for i in range(len(entangling_gate_index_list)-1):\n if len(entangling_gate_index_list[i]) > 0:\n for j in range(len(entangling_gate_index_list[i])):\n qubit_index = entangling_gate_index_list[i][j][0]\n ini_pauli[i+1, qubit_index] = 2\n if i+2 < depth:\n ini_pauli[i+2, qubit_index] = 3\n\n elif type_circuit == 3:\n\n ini_pauli = np.ones([depth, n_qubits], dtype=int)*2\n\n for i in range(1, depth, 2):\n ini_pauli[i, :] = 3\n\n if n_qubits % 2 == 0:\n # even qubits ALT circuit needs to get rid of boundary rotations at\n # even entangling layers\n for i in range(4, depth, 4):\n ini_pauli[i, 0] = 0\n ini_pauli[i, -1] = 0\n if i+1 < depth:\n ini_pauli[i+1, 0] = 0\n ini_pauli[i+1, -1] = 0\n else:\n # for odd qubits, get rid of boundary either on top or bottom qubit\n for i in range(2, depth, 4):\n ini_pauli[i, -1] = 0\n if i+1 < depth:\n ini_pauli[i+1, -1] = 0\n for i in range(4, depth, 4):\n ini_pauli[i, 0] = 0\n if i+1 < depth:\n ini_pauli[i+1, 0] = 0\n\n # CNOT entangling gates\n entangling_gate_index_list = [[] for i in range(depth)]\n counter = 0\n # third index indicates type of entangling gate\n for k in range(1, depth-1, 2):\n\n # place entangler every second layer, do not place any at last\n if counter % 2 == 0:\n # even layer\n entangling_gate_index_list[k] = [\n [2*j, 2*j+1, 1] for j in range(n_qubits//2)\n ]\n else:\n # odd layer\n entangling_gate_index_list[k] = [\n [2*j+1, 2*j+2, 1] for j in range((n_qubits-1)//2)\n ]\n counter += 1\n\n else:\n raise ValueError('type_circuit='+f'{type_circuit}'+' not recognised.')\n\n return ini_pauli, entangling_gate_index_list", "def build_topology(self):\n# errstr = \"build_topology() is not implemented.\\n\"\n# errstr += textwrap.dedent(self.build_topology.__doc__)\n# raise NotImplementedError(errstr)\n pass # May be a 1-compartment neuron. No need to abstract. ", "def make_internal_coords(o_molsys, params=None):\n if params is None:\n params = op.Params\n optimize_log = logging.getLogger(__name__)\n optimize_log.debug(\"\\t Adding internal coordinates to molecular system\")\n\n # Use covalent radii to determine bond connectivity.\n connectivity = addIntcos.connectivity_from_distances(o_molsys.geom, o_molsys.Z)\n optimize_log.debug(\"Connectivity Matrix\\n\" + print_mat_string(connectivity))\n\n if params.frag_mode == \"SINGLE\":\n # Make a single, supermolecule.\n o_molsys.consolidate_fragments() # collapse into one frag (if > 1)\n o_molsys.split_fragments_by_connectivity() # separate by connectivity\n # increase connectivity until all atoms are connected\n o_molsys.augment_connectivity_to_single_fragment(connectivity)\n o_molsys.consolidate_fragments() # collapse into one frag\n\n if params.opt_coordinates in [\"REDUNDANT\", \"BOTH\"]:\n o_molsys.fragments[0].add_intcos_from_connectivity(connectivity)\n\n if params.opt_coordinates in [\"CARTESIAN\", \"BOTH\"]:\n o_molsys.fragments[0].add_cartesian_intcos()\n\n elif params.frag_mode == \"MULTI\":\n # if provided multiple frags, then we use these.\n # if not, then split them (if not connected).\n if o_molsys.nfragments == 1:\n o_molsys.split_fragments_by_connectivity()\n\n if o_molsys.nfragments > 1:\n addIntcos.add_dimer_frag_intcos(o_molsys)\n # remove connectivity so that we don't add redundant coordinates\n # between fragments\n o_molsys.purge_interfragment_connectivity(connectivity)\n\n if params.opt_coordinates in [\"REDUNDANT\", \"BOTH\"]:\n for iF, F in enumerate(o_molsys.fragments):\n C = np.ndarray((F.natom, F.natom))\n C[:] = connectivity[o_molsys.frag_atom_slice(iF), o_molsys.frag_atom_slice(iF)]\n F.add_intcos_from_connectivity(C)\n\n if params.opt_coordinates in [\"CARTESIAN\", \"BOTH\"]:\n for F in o_molsys.fragments:\n F.add_cartesian_intcos()\n\n addIntcos.add_constrained_intcos(o_molsys) # make sure these are in the set\n return", "def generateEqns(\n self, Simplify=False, Lambdify=True, FloatingBase=False,\n backend=\"numpy\"\n ):\n self.joint_syms = OrderedDict()\n self.global_syms = {}\n self.global_syms[\"Jname2q\"] = {}\n self.global_syms[\"q2Jname\"] = {}\n _Lname2parentJname, _Jname2parentJname = self._preprocess_heirarchy(\n FloatingBase\n )\n self.global_syms[\"Lname2parentJname\"] = _Lname2parentJname\n self.global_syms[\"Jname2parentJname\"] = _Jname2parentJname\n\n # record the number of degrees of freedom\n degrees_of_freedom = sum(\n [self.Joints[jnt][\"type\"] != \"fixed\" for jnt in self.Joints]\n )\n self.global_syms[\"dof\"] = degrees_of_freedom\n\n # joint positions q\n self.global_syms[\"q\"] = [\n sp.Symbol(f\"{self.sym_prefix}q{j}\")\n for j in range(degrees_of_freedom)\n ]\n\n # joint velocities dq\n self.global_syms[\"dq\"] = [\n sp.Symbol(f\"{self.sym_prefix}dq{j}\")\n for j in range(degrees_of_freedom)\n ]\n\n # joint user forces tau\n self.global_syms[\"qTau\"] = [\n sp.Symbol(f\"{self.sym_prefix}qTau{j}\")\n for j in range(degrees_of_freedom)\n ]\n\n # [x,y,z] translations (meaning relative to useage)\n self.global_syms[\"xyz\"] = [\n sp.Symbol(f\"{self.sym_prefix}x\"),\n sp.Symbol(f\"{self.sym_prefix}y\"),\n sp.Symbol(f\"{self.sym_prefix}z\"),\n ]\n zero_xyz = [(s, 0) for s in self.global_syms[\"xyz\"]]\n\n # [Wx,Wy,Wz] rotations (meaning relative to useage)\n self.global_syms[\"Wxyz\"] = [\n sp.Symbol(f\"{self.sym_prefix}Wx\"),\n sp.Symbol(f\"{self.sym_prefix}Wy\"),\n sp.Symbol(f\"{self.sym_prefix}Wz\"),\n ]\n zero_Wxyz = [(s, 0) for s in self.global_syms[\"Wxyz\"]]\n\n # translational and rotational accelerations [Ax,Ay,Az,AWx,AWy,AWz]\n # (meaning relative to useage)\n self.global_syms[\"extAccel\"] = [\n sp.Symbol(f\"{self.sym_prefix}Ax\"),\n sp.Symbol(f\"{self.sym_prefix}Ay\"),\n sp.Symbol(f\"{self.sym_prefix}Az\"),\n sp.Symbol(f\"{self.sym_prefix}AWx\"),\n sp.Symbol(f\"{self.sym_prefix}AWy\"),\n sp.Symbol(f\"{self.sym_prefix}AWz\"),\n ]\n\n #\n # create terms for each joint/link combo in the local isolated\n # reference frame (terms that need no other connected joint terms)\n #\n q_indx = 0\n for j_name in self.Joints:\n joint = self.Joints[j_name]\n if joint[\"child\"] not in self.Links:\n raise RuntimeError(\n f'child ({joint[\"child\"]}) of joint({j_name})'\n ' did not exist. Must create a link with this name.')\n clink = self.Links[joint[\"child\"]]\n joint_type = joint[\"type\"]\n\n # initialize an eqn dict for this joint (and link)\n self.joint_syms[j_name] = {}\n E = self.joint_syms[j_name]\n\n # joint (and link) mass\n E[\"mass\"] = clink[\"mass\"]\n\n # joint (and link) specific inertia matrix\n Inertia = sp.Matrix(clink[\"inertia\"])\n if Inertia.shape == (3, 3):\n E[\"M\"] = sp.Matrix(\n [\n [clink[\"mass\"], 0, 0, 0, 0, 0],\n [0, clink[\"mass\"], 0, 0, 0, 0],\n [0, 0, clink[\"mass\"], 0, 0, 0],\n [0, 0, 0, Inertia[0, 0], Inertia[0, 1], Inertia[0, 2]],\n [0, 0, 0, Inertia[1, 0], Inertia[1, 1], Inertia[1, 2]],\n [0, 0, 0, Inertia[2, 0], Inertia[2, 1], Inertia[2, 2]],\n ]\n )\n elif Inertia.shape == (6, 6):\n E[\"M\"] = Inertia\n else:\n raise ValueError(\n f\"inertia shape must be 3x3 or 6x6, not {Inertia.shape}\")\n\n # re-record (for convenience) the local q and dq, joint and joint\n # velocity terms, in their joint symbol containers\n if joint_type == \"fixed\":\n E[\"q\"] = 0\n E[\"dq\"] = 0\n E[\"qTau\"] = 0\n else:\n E[\"q\"] = self.global_syms[\"q\"][q_indx]\n E[\"dq\"] = self.global_syms[\"dq\"][q_indx]\n E[\"qTau\"] = self.global_syms[\"qTau\"][q_indx]\n q_indx += 1\n self.global_syms[\"q2Jname\"][E[\"q\"]] = j_name\n self.global_syms[\"Jname2q\"][j_name] = E[\"q\"]\n\n # process each joint type and apply the relevant q to a rpy,xyz\n # transform\n E[\"q_rpy\"] = sp.Matrix([0, 0, 0])\n E[\"q_xyz\"] = sp.Matrix([0, 0, 0])\n if joint_type == \"revolute\" or joint_type == \"continuous\":\n E[\"q_rpy\"] = E[\"q\"] * sp.Matrix(joint[\"axis_xyz\"])\n elif joint_type == \"prismatic\":\n E[\"q_xyz\"] = E[\"q\"] * sp.Matrix(joint[\"axis_xyz\"])\n elif joint_type == \"fixed\":\n pass\n elif joint_type == \"floating\":\n raise ValueError(\n \"no direct floating joint support (should have been\" +\n \" replaced by 3 prismatic, 3 continuous)\"\n )\n elif joint_type == \"planar\":\n raise ValueError(\n \"no direct planar joint support (should have been\" +\n \" replaced by 2 prismatic)\"\n )\n\n # creating homogeneous transformation matrix T, in joint and mass\n # spaces for various tranforms.\n #\n # The chain of transformations is diagramed as:\n # ... parent joint --> joint origin --> joint actuated --> ... etc.\n # actuated | |\n # --> parent link --> link\n #\n\n # parent joint's actuateed frame to joint's actuated frame\n E[\"Tlocal_joint\"] = rigmech.T(\n joint[\"origin_xyz\"], joint[\"origin_rpy\"]\n ) * rigmech.T(E[\"q_xyz\"], E[\"q_rpy\"])\n\n # joint's actuated frame to the child link's inertial frame\n E[\"T_joint2cLink\"] = rigmech.T(\n clink[\"origin_xyz\"], clink[\"origin_rpy\"])\n\n # parent joint's actuateed frame to child link's frame\n E[\"Tlocal_link\"] = E[\"Tlocal_joint\"] * E[\"T_joint2cLink\"]\n\n # inverse transformations\n E[\"Tlocal_joint_inv\"] = rigmech.T_inv(E[\"Tlocal_joint\"])\n E[\"Tlocal_link_inv\"] = rigmech.T_inv(E[\"Tlocal_link\"])\n\n print(f\"rigmech: Calculated {j_name} isolated.\")\n #\n # create non-isolated terms for each joint (terms that require\n # information about other connected joints)\n #\n\n for j_name in self.Joints:\n E = self.joint_syms[j_name]\n\n # T: transforms from base to joint or mass, for forward transform\n # calculations\n E[\"T_joint\"] = self.T_joint_chain(j_name)\n E[\"T_link\"] = E[\"T_joint\"] * E[\"T_joint2cLink\"]\n\n # T_inv: transforms for forward inverse transform calculations\n E[\"T_inv_joint\"] = rigmech.T_inv(E[\"T_joint\"])\n E[\"T_inv_link\"] = rigmech.T_inv(E[\"T_link\"])\n\n # xyz: translation from base to joint or link frame\n E[\"xyz_joint\"] = rigmech.applyTx(\n E[\"T_joint\"], sp.Matrix(self.global_syms[\"xyz\"]))\n E[\"xyz_link\"] = rigmech.applyTx(\n E[\"T_link\"], sp.Matrix(self.global_syms[\"xyz\"]))\n E[\"xyz_coj\"] = E[\"xyz_joint\"].subs(zero_xyz) # center of joint\n E[\"xyz_com\"] = E[\"xyz_link\"].subs(zero_xyz) # center of mass\n\n # Wxyz: rotation from base to joint or link frame\n E[\"W\"] = self.W_joint_chain(j_name)\n E[\"Wxyz_joint\"] = rigmech.applyTw(\n E[\"T_joint\"], E[\"W\"]+sp.Matrix(self.global_syms[\"Wxyz\"]))\n E[\"Wxyz_link\"] = rigmech.applyTw(\n E[\"T_link\"], E[\"W\"]+sp.Matrix(self.global_syms[\"Wxyz\"]))\n E[\"Wxyz_coj\"] = E[\"Wxyz_joint\"].subs(zero_Wxyz) # coj orientation\n E[\"Wxyz_com\"] = E[\"Wxyz_link\"].subs(zero_Wxyz) # com orientation\n\n # calculate the d[x(i) y(i) z(i) Wx(i) Wy(i) Wz(i)]/dq(j)\n # a.k.a. jacobian components for the current joint/link frame\n # (i) with respect to all the other joints (j) to form a\n # complete Jacobian matrix\n E[\"J_joint\"] = sp.Matrix()\n E[\"J_link\"] = sp.Matrix()\n for jnm in self.Joints:\n jnm_q = self.joint_syms[jnm][\"q\"]\n if jnm_q is not 0:\n\n # joints:\n dxyz_dq__joint = E[\"xyz_joint\"].diff(jnm_q)\n dWxyz_dq__joint = E[\"Wxyz_joint\"].diff(jnm_q)\n new_row = dxyz_dq__joint.col_join(dWxyz_dq__joint)\n E[\"J_joint\"] = E[\"J_joint\"].row_join(new_row)\n\n # links:\n dxyz_dq__link = E[\"xyz_link\"].diff(jnm_q)\n dWxyz_dq__link = E[\"Wxyz_link\"].diff(jnm_q)\n new_row = dxyz_dq__link.col_join(dWxyz_dq__link)\n E[\"J_link\"] = E[\"J_link\"].row_join(new_row)\n\n # evaluate the link frame Jacobian at xyz = [0,0,0] and\n # Wxyz = [0,0,0] to get the center of mass (COM) Jacobian\n E[\"J_com\"] = E[\"J_link\"].subs(zero_xyz + zero_Wxyz)\n # evaluate the joint frame Jacobian at xyz = [0,0,0] and\n # Wxyz = [0,0,0] to get the center of joint (COJ) Jacobian\n E[\"J_coj\"] = E[\"J_joint\"].subs(zero_xyz + zero_Wxyz)\n\n # Mq: joint space inertia matrix of single joint\n E[\"Mq\"] = E[\"J_com\"].T * E[\"M\"] * E[\"J_com\"]\n\n # qFext: joint space matrix of the forces due to external\n # accelerations (such as gravity) on single joint\n E[\"qFext\"] = E[\"J_com\"].T * E[\"M\"] * \\\n sp.Matrix(self.global_syms[\"extAccel\"])\n\n print(f\"rigmech: Calculated {j_name} non-isolated.\")\n\n #\n # create terms common to entire mechanism\n #\n\n # Mq: joint space inertia matrix of entire mechanism\n self.global_syms[\"Mq\"] = sp.zeros(degrees_of_freedom)\n for j_name in self.Joints:\n self.global_syms[\"Mq\"] += self.joint_syms[j_name][\"Mq\"]\n\n # qFext: joint space matrix of the forces due to external\n # accelerations (such as gravity) on entire mechanism\n self.global_syms[\"qFext\"] = sp.zeros(degrees_of_freedom, 1)\n for j_name in self.Joints:\n self.global_syms[\"qFext\"] += self.joint_syms[j_name][\"qFext\"]\n\n # qFrict: joint friction in a convenient list\n self.global_syms[\"qFrict\"] = [\n self.Joints[jnt][\"friction\"]\n for jnt in self.Joints\n if not self.joint_syms[jnt][\"q\"] is 0\n ]\n\n # xyz_com: xyz center of mass of entire mechanism\n total_mass = 0.0\n weighted_mass = sp.Matrix([0, 0, 0])\n for j_name in self.Joints:\n E = self.joint_syms[j_name]\n total_mass += E[\"mass\"]\n weighted_mass += E[\"xyz_com\"] * E[\"mass\"]\n self.global_syms[\"xyz_com\"] = weighted_mass / total_mass\n self.global_syms[\"mass\"] = total_mass\n\n # Cq(q,dq) joint space Coriolis matrix (coriolis and centrifugal terms)\n # of entire mechanism\n i_max, j_max = self.global_syms[\"Mq\"].shape\n Mq = self.global_syms[\"Mq\"]\n q = self.global_syms[\"q\"]\n dq = self.global_syms[\"dq\"]\n Cq = sp.zeros(i_max, j_max)\n for k in range(len(q)):\n for i in range(i_max):\n for j in range(i_max):\n if not dq[k] is 0:\n dmij_dqk = 0 if q[k] is 0 else Mq[i, j].diff(q[k])\n dmik_dqj = 0 if q[j] is 0 else Mq[i, k].diff(q[j])\n dmkj_dqi = 0 if q[i] is 0 else Mq[k, j].diff(q[i])\n Cq[i, j] += (dmij_dqk + dmik_dqj - dmkj_dqi) * dq[k]\n Cq = 0.5 * Cq\n self.global_syms[\"Cq\"] = Cq\n\n # forces due to coriolis matrix in joint space\n self.global_syms[\"qFCoriolis\"] = Cq * sp.Matrix(dq)\n\n print(f\"rigmech: Calculated global_syms.\")\n\n if Simplify:\n print(f\"rigmech: starting simplify()\")\n self.simplify()\n\n if Lambdify:\n print(f\"rigmech: starting lambdify()\")\n self.lambdify(backend)\n\n self.global_syms[\"limits_upper\"] = \\\n np.array([\n [jnt.get('limit_upper', np.Inf)]\n for jnt in self.Joints.values()])\n self.global_syms[\"limits_lower\"] = \\\n np.array([\n [jnt.get('limit_lower', np.NINF)]\n for jnt in self.Joints.values()])\n\n print(f\"rigmech: done\")\n\n return self.joint_syms, self.global_syms", "def polySoftEdge(*args, angle: Union[float, bool]=30, caching: bool=True, constructionHistory:\n bool=True, name: AnyStr=\"\", nodeState: Union[int, bool]=0, worldSpace:\n bool=True, q=True, query=True, e=True, edit=True, **kwargs)->Union[AnyStr,\n Any]:\n pass", "def ops2alg(ops):\n return Model(cardinality=len(ops[0]), \n operations=dict([\"h\"+str(i),list(ops[i])] for i in range(len(ops))))", "def test_compound_stiffened_isection():\n uc = steel_sections.i_section(d=400, b=400, t_f=25, t_w=25, r=30, n_r=8)\n plate1 = (\n sections.rectangular_section(b=500, d=10).align_center(uc).align_to(uc, \"top\")\n )\n plate2 = (\n sections.rectangular_section(b=500, d=10)\n .align_center(uc)\n .align_to(uc, \"bottom\")\n )\n geom = uc + plate1 + plate2\n\n new_geom = geom.offset_perimeter(-9)\n new_geom.create_mesh([100])\n section = Section(new_geom)\n\n new_geom = geom.offset_perimeter(-10)\n new_geom.create_mesh([100])\n section = Section(new_geom)\n\n new_geom = geom.offset_perimeter(-11)\n new_geom.create_mesh([100])\n section = Section(new_geom)", "def convertor(geometry, method=\"wgs2gcj\"):\n if geometry['type'] == 'Point':\n coords = geometry['coordinates']\n coords[0], coords[1] = methods[method](coords[0], coords[1])\n elif geometry['type'] == 'LineString' or geometry['type'] == 'MutliPoint':\n coordinates = geometry['coordinates']\n for coords in coordinates:\n coords[0], coords[1] = methods[method](coords[0], coords[1])\n elif geometry['type'] == 'Polygon' or geometry['type'] == 'MultiLineString':\n coordinates = geometry['coordinates']\n for rings in coordinates:\n for coords in rings:\n coords[0], coords[1] = methods[method](coords[0], coords[1])\n elif geometry['type'] == 'MultiPolygon':\n coordinates = geometry['coordinates']\n for rings in coordinates:\n for lines in rings:\n for coords in lines:\n coords[0], coords[1] = methods[method](coords[0], coords[1])\n return geometry", "def expression(component, anonymous_symbols=False):\n # <x> in set() is faster than list\n seen = set()\n # An input can output into multiple components\n # This maps inputs -> symbols\n component_dict = {}\n\n def recursive_expression(c):\n \"\"\"\n Recursively creates an expression from a component.\n\n his function raises RecursionError if any component is self referencing.\n \"\"\"\n if c in (None, True, False):\n # The component is not connected to anything\n # Act as if they are connected to an input\n return boolean.Symbol(None)\n elif c in seen:\n raise RecursionError(\n \"The logic circuit is self referencing and cannot be converted into a boolean expression\")\n\n if not isinstance(c, Input):\n # Inputs can appear twice in the circuit\n seen.add(c)\n\n if isinstance(c, Input):\n if c in component_dict.keys():\n return component_dict[c]\n else:\n component_dict[c] = boolean.Symbol(None)\n return component_dict[c]\n elif isinstance(c, (Output, Wire)):\n # Wires don't matter\n return recursive_expression(c.input)\n elif isinstance(c, Gate):\n subs_dict = {}\n for k, v in c.inputs.items():\n subs_dict[k] = recursive_expression(v)\n return c.expression.subs(subs_dict, eval=False)\n\n if anonymous_symbols:\n return recursive_expression(component)\n else:\n def letters_generator():\n \"\"\"\n Returns a generator that outputs A^n->Z^n for n->inf.\n Where <letter>^n = <letter><letter>^(n-1)\n \"\"\"\n def multiletters(seq):\n for n in itertools.count(1):\n for s in itertools.product(seq, repeat=n):\n yield \"\".join(s)\n letters = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n return multiletters(letters)\n\n expression = recursive_expression(component)\n subs_dict = {}\n g = letters_generator()\n for s in expression.symbols:\n subs_dict[s] = boolean.Symbol(g.__next__())\n\n return expression.subs(subs_dict, eval=False)", "def get_symmetric_system(self):\n W = DynamicalSystem(lambda x:1.0-self.f1(1.0-x),\n lambda x:1.0-self.f0(1.0-x))\n W.set_rho(1.0-self.rho)\n return W", "def SO4_circuit(a_alpha, a_theta, a_beta, b_alpha, b_theta, b_beta):\n # return np.kron(S1_inv, I2) @ np.kron(I2, S1_inv) @ np.kron(I2, R1_inv) @ CNOT2 \\\n # @ np.kron(I2, R_z(b_beta)) @ np.kron(I2, R_y(b_theta)) @ np.kron(I2, R_z(b_alpha)) \\\n # @ np.kron(R_z(a_beta), I2) @ np.kron(R_y(a_theta), I2) @ np.kron(R_z(a_alpha), I2) \\\n # @ CNOT2 @ np.kron(I2, R1) @ np.kron(I2, S1) @ np.kron(S1, I2)\n\n return np.linalg.inv(magic_gate) \\\n @ np.kron(I2, R_z(b_beta)) @ np.kron(I2, R_y(b_theta)) @ np.kron(I2, R_z(b_alpha)) \\\n @ np.kron(R_z(a_beta), I2) @ np.kron(R_y(a_theta), I2) @ np.kron(R_z(a_alpha), I2) \\\n @ magic_gate", "def format_molecule_for_nwchem(self):\n from collections import defaultdict\n\n self.update_geometry()\n factor = 1.0 \n\n #text = 'geometry units %s\\n' %(self.PYunits)\n text = 'geometry noautosym nocenter units %s\\n' %(self.PYunits) # No reorienting input geometry \n\n # append atoms and coordentries\n for i in range(self.natom()):\n [x, y, z] = self.atoms[i].compute()\n text += '%-2s %17.12f %17.12f %17.12f\\n' % ((self.symbol(i) if self.Z(i) else \"GH\"), \\\n x * factor, y * factor, z * factor)\n if (self.symmetry_from_input() == None):\n text += 'symmetry c1\\n'\n text += 'end\\n' \n else:\n text += 'symmetry %s\\n' %(self.symmetry_from_input())\n text += 'end\\n'\n #print (text)\n\n # prepare molecule keywords to be set as c-side keywords\n options = defaultdict(lambda: defaultdict(dict))\n options['NWCHEM']['NWCHEM_CHARGE']['value'] = self.molecular_charge()\n options['NWCHEM']['NWCHEM_CHARGE']['clobber'] = True\n \n if (self.multiplicity() != 1):\n options['NWCHEM']['NWCHEM_SCF_NOPEN']['value'] = self.multiplicity()-1\n options['NWCHEM']['NWCHEM_DFT_MULT']['value'] = self.multiplicity()\n\n options['NWCHEM']['NWCHEM_SCF_NOPEN']['clobber'] = True\n options['NWCHEM']['NWCHEM_DFT_MULT']['clobber'] = True\n\n return text, options", "def create_noche(y0, y1):\n # Defining the location and colors of each vertex of the shape \n vertices = [\n # positions colors\n -1.0, y0, 0.0, 0.15, 0.16, 0.5,\n 1.0, y0, 0.0, 0.15, 0.16, 0.5,\n 1.0, y1, 0.0, 0.15, 0.16, 0.1,\n -1.0, y1, 0.0, 0.15, 0.16, 0.1]\n\n # Defining connections among vertices\n # We have a triangle every 3 indices specified\n indices = [0, 1, 2,\n 2, 3, 0]\n\n return Shape(vertices, indices)", "def get_operator(self):\n\n Operator = []\n\n '''\n print('Create H - 150 & 220 GHz')\n ope=[]\n for i in range(self.nfreqs):\n ope.append(self.H150.operands[i])\n for i in range(self.nfreqs):\n ope.append(self.H220.operands[i])\n self.Hboth = BlockRowOperator(ope, new_axisin=0)\n self.H=self.Hboth\n '''\n\n\n\n H_qubic = self.qubic.get_operator()\n R_qubic = ReshapeOperator(H_qubic.shapeout, H_qubic.shape[0])\n Operator.append(R_qubic(H_qubic))\n\n H_planck = self.planck.get_operator()\n R_planck = ReshapeOperator(H_planck.shapeout, H_planck.shape[0])\n Operator.append(R_planck(H_planck))\n return BlockColumnOperator(Operator, axisout=0)", "def build_triples(x, y, op_str):\n if op_str not in EXPECTED_OPS:\n raise ValueError(f\"{op_str} should be in {EXPECTED_OPS}\")\n\n session = x.session\n shape_x = x.shape\n shape_y = y.shape\n conf = session.config\n min_val = conf.min_value\n max_val = conf.max_value\n\n # TODO: Move this to a library specific file\n a = torch.randint(min_val, max_val, shape_x).long()\n b = torch.randint(min_val, max_val, shape_y).long()\n\n cmd = getattr(operator, op_str)\n c = modulo(cmd(a, b).long(), session)\n\n from sympc.tensor import AdditiveSharingTensor\n\n session_copy = session.get_copy()\n session_copy.config.enc_precision = 0\n\n a_sh = AdditiveSharingTensor(secret=a, session=session_copy)\n b_sh = AdditiveSharingTensor(secret=b, session=session_copy)\n c_sh = AdditiveSharingTensor(secret=c, session=session_copy)\n\n return a_sh, b_sh, c_sh", "def test_decomposed_operator_correct_wires(self, obs, expected):\n dev = qml.device('orquestra.qulacs', wires=3)\n\n res = dev.serialize_operator(obs)\n assert res == expected", "def _build_topology(self):\n\t\t# childSection.connect(parentSection, [parentX], [childEnd])\n\t\tfor i in range(self._axonNodes-1):\n\t\t\tself.node[i].connect(self.mysa[2*i],0,1)\n\t\t\tself.mysa[2*i].connect(self.flut[2*i],0,1)\n\t\t\tself.flut[2*i].connect(self.stin[6*i],0,1)\n\t\t\tself.stin[6*i].connect(self.stin[6*i+1],0,1)\n\t\t\tself.stin[6*i+1].connect(self.stin[6*i+2],0,1)\n\t\t\tself.stin[6*i+2].connect(self.stin[6*i+3],0,1)\n\t\t\tself.stin[6*i+3].connect(self.stin[6*i+4],0,1)\n\t\t\tself.stin[6*i+4].connect(self.stin[6*i+5],0,1)\n\t\t\tself.stin[6*i+5].connect(self.flut[2*i+1],0,1)\n\t\t\tself.flut[2*i+1].connect(self.mysa[2*i+1],0,1)\n\t\t\tself.mysa[2*i+1].connect(self.node[i+1],0,1)", "def twod_shape_2_homogeneous_matrix(twod_shape): \n homogenous_4d_array =[] \n for i in range(0, len(twod_shape), 2): \n new_x = twod_shape[ i] \n new_y = twod_shape[ i + 1] \n new_z = 0.0 \n new_w = 1 \n new_vertex = [new_x, new_y, new_z, new_w] \n homogenous_4d_array.append(new_vertex) \n homogenous_4d_mat = numpy.matrix(homogenous_4d_array) \n return homogenous_4d_mat", "def make_gwcs(shape, galactic=False):\n from gwcs import coordinate_frames as cf\n from gwcs import wcs as gwcs_wcs\n\n rho = np.pi / 3.0\n scale = 0.1 / 3600.0 # 0.1 arcsec/pixel in deg/pix\n\n shift_by_crpix = (models.Shift((-shape[1] / 2) + 1)\n & models.Shift((-shape[0] / 2) + 1))\n\n cd_matrix = np.array([[-scale * np.cos(rho), scale * np.sin(rho)],\n [scale * np.sin(rho), scale * np.cos(rho)]])\n\n rotation = models.AffineTransformation2D(cd_matrix, translation=[0, 0])\n rotation.inverse = models.AffineTransformation2D(\n np.linalg.inv(cd_matrix), translation=[0, 0])\n\n tan = models.Pix2Sky_TAN()\n celestial_rotation = models.RotateNative2Celestial(197.8925, -1.36555556,\n 180.0)\n\n det2sky = shift_by_crpix | rotation | tan | celestial_rotation\n det2sky.name = 'linear_transform'\n\n detector_frame = cf.Frame2D(name='detector', axes_names=('x', 'y'),\n unit=(u.pix, u.pix))\n\n if galactic:\n sky_frame = cf.CelestialFrame(reference_frame=coord.Galactic(),\n name='galactic', unit=(u.deg, u.deg))\n else:\n sky_frame = cf.CelestialFrame(reference_frame=coord.ICRS(),\n name='icrs', unit=(u.deg, u.deg))\n\n pipeline = [(detector_frame, det2sky), (sky_frame, None)]\n\n return gwcs_wcs.WCS(pipeline)", "def elemental_descriptor(A1_ion, A2_ion, B_ion):\n ele_A1 = mg.Element(A1_ion)\n ele_A2 = mg.Element(A2_ion)\n ele_B = mg.Element(B_ion)\n ele_O = mg.Element('O') \n # A/B ion oxidation state \n common_oxidation_states_A1 = ele_A1.common_oxidation_states[0]\n common_oxidation_states_A2 = ele_A2.common_oxidation_states[0]\n common_oxidation_states_A = np.mean(common_oxidation_states_A1 + common_oxidation_states_A2)\n common_oxidation_states_B = ele_B.common_oxidation_states[0]\n # ionic radius property\n ionic_radius_A1 = float(str(ele_A1.average_ionic_radius)[:-4])\n ionic_radius_A2 = float(str(ele_A2.average_ionic_radius)[:-4])\n ionic_radius_A = (ionic_radius_A1+ ionic_radius_A2)/2\n ionic_radius_B = float(str(ele_B.average_ionic_radius)[:-4])\n ionic_radius_O = float(str(ele_O.average_ionic_radius)[:-4])\n # Tolerance factor \n TF = (ionic_radius_A + ionic_radius_O)/(np.sqrt(2)*(ionic_radius_B + ionic_radius_O))\n # Octahedral factor\n OF = ionic_radius_B/ionic_radius_O \n # ionic_radius ratios\n ionic_ration_AO = ionic_radius_A / ionic_radius_O\n ionic_ration_BO = ionic_radius_B / ionic_radius_O\n # averaged electronegativity for A and B atoms\n Pauling_electronegativity_A1 = ele_A1.X\n Pauling_electronegativity_A2 = ele_A2.X\n Pauling_electronegativity_A = (Pauling_electronegativity_A1 + Pauling_electronegativity_A2)/2\n Pauling_electronegativity_B = ele_B.X\n Pauling_electronegativity_O = ele_O.X\n # Difference in the electronegativity for A-O and B-O\n Diff_A_O = Pauling_electronegativity_A - Pauling_electronegativity_O\n Diff_B_O = Pauling_electronegativity_B - Pauling_electronegativity_O\n return [common_oxidation_states_A, common_oxidation_states_B, Pauling_electronegativity_A, Pauling_electronegativity_B, TF, OF, ionic_ration_AO, ionic_ration_BO, Diff_A_O, Diff_B_O]", "def build_sym_geom_adjacency(geoms, max_gnn=100):\n global INTERNAL_PARAMETERS\n min_gnn = INTERNAL_PARAMETERS['min_geom_neighbors']\n assert min_gnn < max_gnn, \"Too high minimum number of neighbors\"\n n_pts = geoms.shape[0]\n for n_neighbors in range(min_gnn, max_gnn + 1):\n # find the lowest number of NN s.t. the graph is not too disconnected\n C = build_geom_neighbor_graph(geoms, n_neighbors)\n neighbs = C.indices.reshape((n_pts, n_neighbors))\n C = C + C.T\n C.data[:] = 1\n n_comp, _ = sparse.cs_graph_components(C)\n if n_comp == 1:\n print \"# use n_neighbors=%d\" % n_neighbors\n break\n elif n_comp < 1:\n raise ValueError('Bug: n_comp=%d' % n_comp)\n if n_comp > 1:\n print \"# use maximum n_neighbors=%d (%d components)\" % (\n n_neighbors, n_comp)\n return n_comp, C, neighbs", "def get_operator(self):\n distribution = self.get_distribution_operator()\n temp = self.get_unit_conversion_operator()\n aperture = self.get_aperture_integration_operator()\n filter = self.get_filter_operator()\n projection = self.get_projection_operator()\n hwp = self.get_hwp_operator()\n polarizer = self.get_polarizer_operator()\n integ = self.get_detector_integration_operator()\n trans_inst = self.instrument.get_transmission_operator()\n trans_atm = self.scene.atmosphere.transmission\n response = self.get_detector_response_operator()\n\n with rule_manager(inplace=True):\n H = CompositionOperator([\n response, trans_inst, integ, polarizer, hwp * projection,\n filter, aperture, trans_atm, temp, distribution])\n if self.scene == 'QU':\n H = self.get_subtract_grid_operator()(H)\n return H", "def get_modified_noisy_gate(gate_name: str, params: Iterable[ParameterDesignator]) -> Tuple[np.ndarray, str]:\n params = tuple(params)\n if gate_name == \"I\":\n assert params == ()\n return np.eye(2), \"NOISY-I\"\n if gate_name == \"RX\":\n angle, = params\n if np.isclose(angle, np.pi / 2, atol=ANGLE_TOLERANCE):\n return (np.array([[1, -1j],\n [-1j, 1]]) / np.sqrt(2),\n \"NOISY-RX-PLUS-90\")\n elif np.isclose(angle, -np.pi / 2, atol=ANGLE_TOLERANCE):\n return (np.array([[1, 1j],\n [1j, 1]]) / np.sqrt(2),\n \"NOISY-RX-MINUS-90\")\n elif np.isclose(angle, np.pi, atol=ANGLE_TOLERANCE):\n return (np.array([[0, -1j],\n [-1j, 0]]),\n \"NOISY-RX-PLUS-180\")\n elif np.isclose(angle, -np.pi, atol=ANGLE_TOLERANCE):\n return (np.array([[0, 1j],\n [1j, 0]]),\n \"NOISY-RX-MINUS-180\")\n if gate_name == \"RY\":\n angle, = params\n if np.isclose(angle, np.pi, atol=ANGLE_TOLERANCE):\n return (np.array([[0, -1],\n [1, 0]]),\n \"NOISY-RY-PLUS-180\")\n elif np.isclose(angle, -np.pi, atol=ANGLE_TOLERANCE):\n return (np.array([[0, 1],\n [-1, 0]]),\n \"NOISY-RY-MINUS-180\")\n \n \n elif gate_name == \"CZ\":\n assert params == ()\n return np.diag([1, 1, 1, -1]), \"NOISY-CZ\"\n \n if \"RX-\" in gate_name: \n matrix, new_name = get_combined_gate_representation_for_noise_model(gate_name)\n return matrix, \"NOISY-\" + gate_name \n if \"RZ-\" in gate_name:\n matrix, new_name = get_combined_gate_representation_for_noise_model(gate_name)\n return matrix, \"NOISY-\" + gate_name \n if \"CZ-\" in gate_name:\n matrix, new_name = get_combined_gate_representation_for_noise_model(gate_name)\n return matrix, \"NOISY-\" + gate_name \n \n\n raise NoisyGateUndefined(\"Undefined gate and params: {}{}\\n\"\n \"Please restrict yourself to I, RX(+/-pi), RX(+/-pi/2), CZ\"\n .format(gate_name, params))", "def parse_equations(eqs, ops):\n eeqs = []\n prop_list = ['unit of', 'commutative', 'associative', 'distributes over', 'inverse of', \n 'annihilates', 'idempotent', 'absorbs', 'absorptive', 'involutive']\n props = []\n for eq in eqs:\n if not any_in(prop_list, eq):\n eeqs.append(Eq.parse_eq(eq, ops))\n else:\n if 'unit of' in eq:\n m = re.search(\"^'(\\w+)'\\s+(left|right)?\\s*unit of\\s+'(\\w+)'$\", eq)\n unit, side, op = m.groups()\n props.append(Unit(unit, op, side))\n elif \"annihilates\" in eq: \n m = re.search(\"^'(\\w+)'\\s+(left|right)?\\s*annihilates\\s+'(\\w+)'$\", eq)\n unit, side, op = m.groups()\n props.append(Annih(unit, op, side))\n elif \"distributes over\" in eq:\n m = re.search(\"^'(\\w+)'\\s+(left|right)?\\s*distributes over\\s+'(\\w+)'$\", eq)\n op1, side, op2 = m.groups()\n props.append(Dist(op1, op2, side))\n elif \"absorbs\" in eq:\n m = re.search(\"^'(\\w+)'\\s+(left|right)?\\s*absorbs\\s+'(\\w+)'$\", eq)\n op1, side, op2 = m.groups()\n props.append(Absorb(op1, op2, side))\n elif \"inverse of\" in eq:\n m = re.search(\"^'(\\w+)'\\s+(left|right)?\\s*inverse of\\s+'(\\w+)'\\s+with\\s+'(\\w+)'$\", eq)\n uop, side, op, unit = m.groups()\n props.append(Inverse(uop, op, unit, side))\n elif \"absorptive\" in eq:\n m = re.search(\"^'(\\w+)'\\s+and\\s+'(\\w+)'\\s+absorptive$\", eq)\n op1, op2 = m.groups()\n props.append(Absorb(op1, op2, None))\n props.append(Absorb(op2, op1, None))\n else:\n m = re.search(\"^'(\\w+)'\\s+(.*)$\", eq)\n op = m.group(1)\n kws = splitstrip(m.group(2), \",\")\n if 'associative' in kws:\n props.append(Assoc(op))\n if 'commutative' in kws:\n props.append(Comm(op))\n if 'idempotent' in kws:\n props.append(Idemp(op))\n if 'involutive' in kws:\n props.append(Invol(op))\n\n return eeqs, props", "def propanolIntermediate():\n coords = [\n [-1.60306996, 0.10333519, 0.50792736],\n [-0.66904416, -0.46962566, -0.55371646],\n [0.67345677, 0.26436258, -0.61179298],\n [1.26292797, -0.10585085, -1.45392921],\n [0.49744830, 1.34089332, -0.75955140],\n [1.47742183, 0.05176805, 0.52349829],\n [0.98773122, 0.34094585, 1.30125393],\n [-0.48213061, -1.52528483, -0.34815476],\n [-1.14165995, -0.39229359, -1.53423716],\n [-2.56608070, -0.40007121, 0.47312929],\n [-1.76619136, 1.16652831, 0.34003517],\n [-1.19366144, -0.03197289, 1.50775619],\n ]\n\n symbols = [\n \"C\",\n \"C\",\n \"C\",\n \"H\",\n \"H\",\n \"O\",\n \"H\",\n \"H\",\n \"H\",\n \"H\",\n \"H\",\n \"H\",\n ]\n\n atoms = []\n for i, _ in enumerate(coords):\n atoms.append(Atom(symbols[i], position=coords[i]))\n return Molecule(symbols=atoms)", "def mesh_wing(wing):\n\n # Define the number of chordwise panels and points.\n num_chordwise_panels = wing.num_chordwise_panels\n num_chordwise_coordinates = num_chordwise_panels + 1\n\n # Initialize the list of cross sections to None.\n wing_cross_section = None\n\n # Initialize an empty ndarray that will hold the panels of this wing. It currently has 0 columns and M rows,\n # where M is the number of the wing's chordwise panels.\n panels = np.empty((num_chordwise_panels, 0), dtype=object)\n\n # Get the chordwise coordinates.\n if wing.chordwise_spacing == \"uniform\":\n nondim_chordwise_coordinates = np.linspace(\n 0, 1, num_chordwise_coordinates, endpoint=True,\n )\n elif wing.chordwise_spacing == \"cosine\":\n nondim_chordwise_coordinates = ps.geometry.cosspace(\n 0, 1, num_chordwise_coordinates, endpoint=True,\n )\n else:\n raise Exception(\"Bad value of wing.chordwise_spacing!\")\n\n # Initialize two empty 0 x 3 ndarrays to hold the corners of each cross section. They will eventually be L x 3\n # ndarrays, where L is number of cross sections.\n cross_section_xyz_le = np.empty((0, 3))\n cross_section_xyz_te = np.empty((0, 3))\n\n # Iterate through the meshed wing cross sections and vertically stack the global location each cross sections\n # leading and trailing edges. cross_section.xyz_te is a method that returns the cross section's trailing edge's\n # coordinates.\n for wing_cross_section in wing.wing_cross_sections:\n cross_section_xyz_le = np.vstack(\n (cross_section_xyz_le, wing_cross_section.xyz_le + wing.xyz_le)\n )\n cross_section_xyz_te = np.vstack(\n (cross_section_xyz_te, wing_cross_section.xyz_te() + wing.xyz_le)\n )\n\n # Get the quarter chord vectors, which are a L x 3 ndarray of points which are the quarter-chord points of cross\n # section, where L is the number of cross sections.\n cross_section_xyz_quarter_chords = cross_section_xyz_le + 0.25 * (\n cross_section_xyz_te - cross_section_xyz_le\n )\n\n # Get a (L - 1) x 3 ndarray of vectors connecting the cross section quarter chord points, where L is the number of\n # cross sections.\n section_quarter_chords = (\n cross_section_xyz_quarter_chords[1:, :]\n - cross_section_xyz_quarter_chords[:-1, :]\n )\n\n # Get directions for transforming 2D airfoil data to 3D:\n # Project quarter chords onto yz plane and normalize.\n # Create a L x 2 ndarray with just the y and z components of the the section quarter chord vectors.\n section_quarter_chords_yz = section_quarter_chords[:, 1:]\n\n # Create a list of the magnitudes of each row of the section_quarter_chords_yz ndarray.\n section_quarter_chords_yz_magnitude_list = np.linalg.norm(\n section_quarter_chords_yz, axis=1\n )\n\n # Convert section_quarter_chords_yz_magnitude_list into a column vector.\n section_quarter_chords_yz_magnitude_column_vector = np.expand_dims(\n section_quarter_chords_yz_magnitude_list, axis=1\n )\n # Normalize the y and z components by the magnitudes\n section_quarter_chords_yz_norm_magnitudes = (\n section_quarter_chords_yz / section_quarter_chords_yz_magnitude_column_vector\n )\n\n # Calculate the number of quarter chord vectors\n num_quarter_chords = section_quarter_chords_yz_magnitude_column_vector.shape[0]\n\n # Create a column vector of all zeros with height equal to the number of quarter chord vectors\n zero_column_vector_stand_in_for_quarter_chords_x_values = np.zeros(\n (num_quarter_chords, 1)\n )\n\n # Horizontally stack the zero column vector with the section_quarter_chords_yz_norm_magnitudes to\n # produce the normalized section quarter chords projected onto the yz plane.\n section_quarter_chords_proj_yz_norm = np.hstack(\n (\n zero_column_vector_stand_in_for_quarter_chords_x_values,\n section_quarter_chords_yz_norm_magnitudes,\n )\n )\n\n # Then, construct the normal directions for each cross_section. Make the normals for the inner\n # wing_cross_sections, where we need to merge directions.\n if len(wing.wing_cross_sections) > 2:\n # Add together the adjacent normalized section quarter chords projected onto the the yz plane.\n cross_sections_local_normal_inners_non_norm = (\n section_quarter_chords_proj_yz_norm[:-1, :]\n + section_quarter_chords_proj_yz_norm[1:, :]\n )\n\n # Create a list of the magnitudes of the summed adjacent normalized section quarter chords projected onto the yz\n # plane.\n cross_sections_local_normal_inners_mag_list = np.linalg.norm(\n cross_sections_local_normal_inners_non_norm, axis=1\n )\n\n # Convert the list to a column vector.\n cross_section_local_normal_inners_mag_column_vector = np.expand_dims(\n cross_sections_local_normal_inners_mag_list, axis=1\n )\n\n # Normalize the summed adjacent normalized section quarter chords projected onto the yz plane by their\n # magnitudes.\n cross_section_local_normal_inners_norm = (\n cross_sections_local_normal_inners_non_norm\n / cross_section_local_normal_inners_mag_column_vector\n )\n\n # Vertically stack the first normalized section quarter chord, the inner normalized section quarter chords, and\n # the last normalized section quarter chord.\n cross_sections_local_normal = np.vstack(\n (\n section_quarter_chords_proj_yz_norm[0, :],\n cross_section_local_normal_inners_norm,\n section_quarter_chords_proj_yz_norm[-1, :],\n )\n )\n else:\n # Vertically stack the first normalized section quarter chord, and the last normalized section quarter chord.\n cross_sections_local_normal = np.vstack(\n (\n section_quarter_chords_proj_yz_norm[0, :],\n section_quarter_chords_proj_yz_norm[-1, :],\n )\n )\n # cross_sections_local_normal is now a L x 3 array that represents the normal direction at each cross section.\n\n # Then, construct the back directions for each cross section.\n cross_section_local_back_non_norm = cross_section_xyz_te - cross_section_xyz_le\n\n # Create a list of the cross section chord lengths.\n cross_section_chord_length_list = np.linalg.norm(\n cross_section_local_back_non_norm, axis=1\n )\n\n # Convert the list to a column vector.\n cross_section_chord_length_column_vector = np.expand_dims(\n cross_section_chord_length_list, axis=1\n )\n\n # Normalize the cross section back vectors by their magnitudes.\n cross_section_local_back_norm = (\n cross_section_local_back_non_norm / cross_section_chord_length_column_vector\n )\n\n # Then, construct the up direction for each cross sections.\n cross_section_local_up = np.cross(\n cross_section_local_back_norm, cross_sections_local_normal, axis=1\n )\n\n # Get the scaling factor (airfoils at dihedral breaks need to be \"taller\" to compensate).\n cross_section_scaling_factor = 1 / np.sqrt(\n (\n 1\n + np.sum(\n section_quarter_chords_proj_yz_norm[1:, :]\n * section_quarter_chords_proj_yz_norm[:-1, :],\n axis=1,\n )\n )\n / 2\n )\n cross_section_scaling_factor = np.hstack((1, cross_section_scaling_factor, 1))\n\n # Make the panels for each section.\n for section_num in range(len(wing.wing_cross_sections) - 1):\n # Define the relevant cross sections.\n inner_cross_section = wing.wing_cross_sections[section_num]\n outer_cross_section = wing.wing_cross_sections[section_num + 1]\n\n # Define the airfoils at each cross section.\n inner_airfoil = inner_cross_section.airfoil.add_control_surface(\n deflection=inner_cross_section.control_surface_deflection,\n hinge_point=inner_cross_section.control_surface_hinge_point,\n )\n outer_airfoil = outer_cross_section.airfoil.add_control_surface(\n deflection=inner_cross_section.control_surface_deflection,\n # The inner cross section dictates control surface deflections.\n hinge_point=inner_cross_section.control_surface_hinge_point,\n )\n\n # Make the mean camber lines for each cross section. First index is point number, second index is xyz.\n inner_cross_section_mcl_nondim = inner_airfoil.get_downsampled_mcl(\n nondim_chordwise_coordinates\n )\n outer_cross_section_mcl_nondim = outer_airfoil.get_downsampled_mcl(\n nondim_chordwise_coordinates\n )\n\n # Put the inner cross section's local up airfoil frame coordinates in a column vector.\n inner_cross_section_mcl_nondim_local_up_column_vector = np.expand_dims(\n inner_cross_section_mcl_nondim[:, 1], 1\n )\n\n # Put the inner cross section's local back airfoil frame coordinates in a column vector.\n inner_cross_section_mcl_nondim_local_back_column_vector = np.expand_dims(\n inner_cross_section_mcl_nondim[:, 0], 1\n )\n # Put the outer cross section's local up airfoil frame coordinates in a column vector.\n outer_cross_section_mcl_nondim_local_up_column_vector = np.expand_dims(\n outer_cross_section_mcl_nondim[:, 1], 1\n )\n\n # Put the outer cross section's local back airfoil frame coordinates in a column vector.\n outer_cross_section_mcl_nondim_local_back_column_vector = np.expand_dims(\n outer_cross_section_mcl_nondim[:, 0], 1\n )\n\n # Convert the inner cross section's non dimensional local back airfoil frame coordinates to meshed wing\n # coordinates.\n inner_cross_section_mcl_local_back = (\n cross_section_local_back_norm[section_num, :]\n * inner_cross_section_mcl_nondim_local_back_column_vector\n * cross_section_chord_length_list[section_num]\n )\n\n # Convert the inner cross section's non dimensional local up airfoil frame coordinates to meshed wing\n # coordinates.\n inner_cross_section_mcl_local_up = (\n cross_section_local_up[section_num, :]\n * inner_cross_section_mcl_nondim_local_up_column_vector\n * cross_section_chord_length_list[section_num]\n * cross_section_scaling_factor[section_num]\n )\n\n # Convert the outer cross section's non dimensional local back airfoil frame coordinates to meshed wing\n # coordinates.\n outer_cross_section_mcl_local_back = (\n cross_section_local_back_norm[section_num + 1, :]\n * outer_cross_section_mcl_nondim_local_back_column_vector\n * cross_section_chord_length_list[section_num + 1]\n )\n\n # Convert the outer cross section's non dimensional local up airfoil frame coordinates to meshed wing\n # coordinates.\n outer_cross_section_mcl_local_up = (\n cross_section_local_up[section_num + 1, :]\n * outer_cross_section_mcl_nondim_local_up_column_vector\n * cross_section_chord_length_list[section_num + 1]\n * cross_section_scaling_factor[section_num + 1]\n )\n\n # Convert the inner cross section's meshed wing coordinates to absolute coordinates. This is size M x 3, where M\n # is the number of chordwise points.\n inner_cross_section_mcl = (\n cross_section_xyz_le[section_num, :]\n + inner_cross_section_mcl_local_back\n + inner_cross_section_mcl_local_up\n )\n\n # Convert the outer cross section's meshed wing coordinates to absolute coordinates. This is size M x 3, where M\n # is the number of chordwise points.\n outer_cross_section_mcl = (\n cross_section_xyz_le[section_num + 1, :]\n + outer_cross_section_mcl_local_back\n + outer_cross_section_mcl_local_up\n )\n\n # Define number of spanwise points and panels.\n num_spanwise_panels = wing_cross_section.num_spanwise_panels\n num_spanwise_coordinates = num_spanwise_panels + 1\n\n # Get the spanwise coordinates.\n if wing_cross_section.spanwise_spacing == \"uniform\":\n nondim_spanwise_coordinates = np.linspace(\n 0, 1, num_spanwise_coordinates, endpoint=True,\n )\n elif wing_cross_section.spanwise_spacing == \"cosine\":\n nondim_spanwise_coordinates = ps.geometry.cosspace(\n n_points=num_spanwise_coordinates, endpoint=True,\n )\n else:\n raise Exception(\"Bad value of section.spanwise_spacing!\")\n\n # Make section_mcl_coordinates: M x N x 3 array of mean camberline coordinates. The first index is chordwise\n # point number, second index is spanwise point number, third is the x, y, and z coordinates. M is the number of\n # chordwise points. N is the number of spanwise points. Put a reversed version (from 1 to 0) of the non\n # dimensional spanwise coordinates in a row vector. This is size 1 x N, where N is the number of spanwise\n # points.\n reversed_nondim_spanwise_coordinates_row_vector = np.expand_dims(\n (1 - nondim_spanwise_coordinates), 0\n )\n\n # Convert the reversed non dimensional spanwise coordinate row vector (from 1 to 0) to a matrix. This is size\n # 1 x N x 1, where N is the number of spanwise points.\n reversed_nondim_spanwise_coordinates_matrix = np.expand_dims(\n reversed_nondim_spanwise_coordinates_row_vector, 2\n )\n\n # Convert the inner and outer cross section's mean camberline coordinates column vectors to matrices. These are\n # size M x 1 x 3, where M is the number of chordwise points.\n inner_cross_section_mcl_matrix = np.expand_dims(inner_cross_section_mcl, 1)\n outer_cross_section_mcl_matrix = np.expand_dims(outer_cross_section_mcl, 1)\n\n # Put the non dimensional spanwise coordinates (from 0 to 1) in a row vector. This is size 1 x N, where N is the\n # number of spanwise points.\n nondim_spanwise_coordinates_row_vector = np.expand_dims(\n nondim_spanwise_coordinates, 0\n )\n\n # Convert the non dimensional spanwise coordinate row vector (from to 0 to 1) to a matrix. This is size\n # 1 x N x 1, where N is the number of spanwise points.\n nondim_spanwise_coordinates_matrix = np.expand_dims(\n nondim_spanwise_coordinates_row_vector, 2\n )\n\n # Linearly interpolate between inner and outer cross sections.\n # This uses the following equation:\n # f(a, b, i) = i * a + (1 - i) * b\n # \"a\" is an N x 3 ndarray of the coordinates points along the outer cross section's mean\n # camber line.\n # \"b\" is an N x 3 ndarray of the coordinates of points along the inner cross section's mean\n # camber line.\n # \"i\" is a 1D array (or vector) of length M that holds the nondimensionalized spanwise panel\n # spacing from 0 to 1.\n # This produces a M x N x 3 ndarray where each slot holds the coordinates of a point on the\n # surface between the inner and outer cross sections.\n section_mcl_vertices = (\n reversed_nondim_spanwise_coordinates_matrix * inner_cross_section_mcl_matrix\n + nondim_spanwise_coordinates_matrix * outer_cross_section_mcl_matrix\n )\n\n # Compute the corners of each panel.\n front_inner_vertices = section_mcl_vertices[:-1, :-1, :]\n front_outer_vertices = section_mcl_vertices[:-1, 1:, :]\n back_inner_vertices = section_mcl_vertices[1:, :-1, :]\n back_outer_vertices = section_mcl_vertices[1:, 1:, :]\n\n # Compute a matrix that is M x N, where M and N are the number of chordwise and spanwise panels. The values are\n # either 1 if the panel at that location is a trailing edge, or 0 if not.\n section_is_trailing_edge = np.vstack(\n (\n np.zeros((num_chordwise_panels - 1, num_spanwise_panels), dtype=bool),\n np.ones((1, num_spanwise_panels), dtype=bool),\n )\n )\n\n # Compute a matrix that is M x N, where M and N are the number of chordwise and spanwise panels. The values are\n # either 1 if the panel at that location is a leading edge, or 0 if not.\n section_is_leading_edge = np.vstack(\n (\n np.ones((1, num_spanwise_panels), dtype=bool),\n np.zeros((num_chordwise_panels - 1, num_spanwise_panels), dtype=bool),\n )\n )\n\n # Initialize an empty ndarray to hold this sections. The matrix is size M x N, where M and N are the number\n # of chordwise and spanwise panels.\n section_panels = np.empty(\n (num_chordwise_panels, num_spanwise_panels), dtype=object\n )\n\n # Loop through the empty section panels matrix and create a new panel object in each slot.\n for chordwise_position in range(num_chordwise_panels):\n for spanwise_position in range(num_spanwise_panels):\n section_panels[\n chordwise_position, spanwise_position\n ] = ps.geometry.Panel(\n front_left_vertex=front_inner_vertices[\n chordwise_position, spanwise_position\n ],\n front_right_vertex=front_outer_vertices[\n chordwise_position, spanwise_position\n ],\n back_left_vertex=back_inner_vertices[\n chordwise_position, spanwise_position\n ],\n back_right_vertex=back_outer_vertices[\n chordwise_position, spanwise_position\n ],\n is_trailing_edge=section_is_trailing_edge[\n chordwise_position, spanwise_position\n ],\n is_leading_edge=section_is_leading_edge[\n chordwise_position, spanwise_position\n ],\n )\n\n # This section's panel matrix is stack horizontally, to the right of the wing's panel matrix.\n panels = np.hstack((panels, section_panels))\n\n # Handle symmetry.\n if wing.symmetric:\n # Define the airfoils at each cross section.\n inner_airfoil = inner_cross_section.airfoil.add_control_surface(\n deflection=-inner_cross_section.control_surface_deflection,\n hinge_point=inner_cross_section.control_surface_hinge_point,\n )\n outer_airfoil = outer_cross_section.airfoil.add_control_surface(\n deflection=-inner_cross_section.control_surface_deflection,\n # The inner cross section dictates control surface deflections.\n hinge_point=inner_cross_section.control_surface_hinge_point,\n )\n\n # Make the mean camber lines for each cross section. First index is point number, second index is xyz.\n inner_cross_section_mcl_nondim = inner_airfoil.get_downsampled_mcl(\n nondim_chordwise_coordinates\n )\n outer_cross_section_mcl_nondim = outer_airfoil.get_downsampled_mcl(\n nondim_chordwise_coordinates\n )\n\n # Put the inner cross section's local up airfoil frame coordinates in a column vector.\n inner_cross_section_mcl_nondim_local_up_column_vector = np.expand_dims(\n inner_cross_section_mcl_nondim[:, 1], 1\n )\n\n # Put the inner cross section's local back airfoil frame coordinates in a column vector.\n inner_cross_section_mcl_nondim_local_back_column_vector = np.expand_dims(\n inner_cross_section_mcl_nondim[:, 0], 1\n )\n\n # Put the outer cross section's local up airfoil frame coordinates in a column vector.\n outer_cross_section_mcl_nondim_local_up_column_vector = np.expand_dims(\n outer_cross_section_mcl_nondim[:, 1], 1\n )\n\n # Put the outer cross section's local back airfoil frame coordinates in a column vector.\n outer_cross_section_mcl_nondim_local_back_column_vector = np.expand_dims(\n outer_cross_section_mcl_nondim[:, 0], 1\n )\n\n # Convert the inner cross section's non dimensional local back airfoil frame coordinates to meshed wing\n # coordinates.\n inner_cross_section_mcl_local_back = (\n cross_section_local_back_norm[section_num, :]\n * inner_cross_section_mcl_nondim_local_back_column_vector\n * cross_section_chord_length_list[section_num]\n )\n\n # Convert the inner cross section's non dimensional local up airfoil frame coordinates to meshed wing\n # coordinates.\n inner_cross_section_mcl_local_up = (\n cross_section_local_up[section_num, :]\n * inner_cross_section_mcl_nondim_local_up_column_vector\n * cross_section_chord_length_list[section_num]\n * cross_section_scaling_factor[section_num]\n )\n\n # Convert the outer cross section's non dimensional local back airfoil frame coordinates to meshed wing\n # coordinates.\n outer_cross_section_mcl_local_back = (\n cross_section_local_back_norm[section_num + 1, :]\n * outer_cross_section_mcl_nondim_local_back_column_vector\n * cross_section_chord_length_list[section_num + 1]\n )\n\n # Convert the outer cross section's non dimensional local up airfoil frame coordinates to meshed wing\n # coordinates.\n outer_cross_section_mcl_local_up = (\n cross_section_local_up[section_num + 1, :]\n * outer_cross_section_mcl_nondim_local_up_column_vector\n * cross_section_chord_length_list[section_num + 1]\n * cross_section_scaling_factor[section_num + 1]\n )\n\n # Convert the inner cross section's meshed wing coordinates to absolute coordinates. This is size M x 3,\n # where M is the number of chordwise points.\n inner_cross_section_mcl = (\n cross_section_xyz_le[section_num, :]\n + inner_cross_section_mcl_local_back\n + inner_cross_section_mcl_local_up\n )\n\n # Convert the outer cross section's meshed wing coordinates to absolute coordinates. This is size M x 3,\n # where M is the number of chordwise points.\n outer_cross_section_mcl = (\n cross_section_xyz_le[section_num + 1, :]\n + outer_cross_section_mcl_local_back\n + outer_cross_section_mcl_local_up\n )\n\n # Make section_mcl_coordinates: M x N x 3 array of mean camberline coordinates. First index is chordwise\n # point number, second index is spanwise point number, third are the x, y, and z coordinates. M is the\n # number of chordwise points. N is the number of spanwise points. Put a reversed version (from 1 to 0) of\n # the non dimensional spanwise coordinates in a row vector. This is size 1 x N, where N is the number of\n # spanwise points.\n reversed_nondim_spanwise_coordinates_row_vector = np.expand_dims(\n (1 - nondim_spanwise_coordinates), 0\n )\n\n # Convert the reversed non dimensional spanwise coordinate row vector (from 1 to 0) to a matrix. This is\n # size 1 x N x 1, where N is the number of spanwise points.\n reversed_nondim_spanwise_coordinates_matrix = np.expand_dims(\n reversed_nondim_spanwise_coordinates_row_vector, 2\n )\n\n # Convert the inner and outer cross section's mean camberline coordinates column vectors to matrices. These\n # are size M x 1 x 3, where M is the number of chordwise points.\n inner_cross_section_mcl_matrix = np.expand_dims(inner_cross_section_mcl, 1)\n outer_cross_section_mcl_matrix = np.expand_dims(outer_cross_section_mcl, 1)\n\n # Put the non dimensional spanwise coordinates (from 0 to 1) in a row vector. This is size 1 x N, where N is\n # the number of spanwise points.\n nondim_spanwise_coordinates_row_vector = np.expand_dims(\n nondim_spanwise_coordinates, 0\n )\n\n # Convert the non dimensional spanwise coordinate row vector (from to 0 to 1) to a matrix. This is size\n # 1 x N x 1, where N is the number of spanwise points.\n nondim_spanwise_coordinates_matrix = np.expand_dims(\n nondim_spanwise_coordinates_row_vector, 2\n )\n\n # Linearly interpolate between inner and outer cross sections.\n # This uses the following equation:\n # f(a, b, i) = i * a + (1 - i) * b\n # \"a\" is an N x 3 ndarray of the coordinates points along the outer cross section's mean\n # camber line.\n # \"b\" is an N x 3 ndarray of the coordinates of points along the inner cross section's mean\n # camber line.\n # \"i\" is a 1D array (or vector) of length M that holds the nondimensionalized spanwise panel\n # spacing from 0 to 1.\n # This produces a M x N x 3 ndarray where each slot holds the coordinates of a point on the\n # surface between the inner and outer cross sections.\n section_mcl_vertices = (\n reversed_nondim_spanwise_coordinates_matrix\n * inner_cross_section_mcl_matrix\n + nondim_spanwise_coordinates_matrix * outer_cross_section_mcl_matrix\n )\n\n # Compute the corners of each panel.\n front_inner_vertices = section_mcl_vertices[:-1, :-1, :]\n front_outer_vertices = section_mcl_vertices[:-1, 1:, :]\n back_inner_vertices = section_mcl_vertices[1:, :-1, :]\n back_outer_vertices = section_mcl_vertices[1:, 1:, :]\n\n # Compute a matrix that is M x N, where M and N are the number of chordwise and spanwise panels. The values\n # are either 1 if the panel at that location is a trailing edge, or 0 if not.\n section_is_trailing_edge = np.vstack(\n (\n np.zeros(\n (num_chordwise_panels - 1, num_spanwise_panels), dtype=bool\n ),\n np.ones((1, num_spanwise_panels), dtype=bool),\n )\n )\n\n # Compute a matrix that is M x N, where M and N are the number of chordwise and spanwise panels. The values\n # are either 1 if the panel at that location is a leading edge, or 0 if not.\n section_is_leading_edge = np.vstack(\n (\n np.ones((1, num_spanwise_panels), dtype=bool),\n np.zeros(\n (num_chordwise_panels - 1, num_spanwise_panels), dtype=bool\n ),\n )\n )\n\n # Initialize an empty ndarray to hold this sections. The matrix is size M x N, where M and N are the\n # number of chordwise and spanwise panels.\n section_panels = np.empty(\n (num_chordwise_panels, num_spanwise_panels), dtype=object\n )\n\n # Loop through the empty section panels matrix and create a new panel object in each slot.\n for chordwise_position in range(num_chordwise_panels):\n for spanwise_position in range(num_spanwise_panels):\n # Reflect the vertices to create the reflected wing for the symmetric case.\n front_inner_vertices_reflected = ps.geometry.reflect_over_xz_plane(\n front_inner_vertices[chordwise_position, spanwise_position]\n )\n front_outer_vertices_reflected = ps.geometry.reflect_over_xz_plane(\n front_outer_vertices[chordwise_position, spanwise_position]\n )\n back_inner_vertices_reflected = ps.geometry.reflect_over_xz_plane(\n back_inner_vertices[chordwise_position, spanwise_position]\n )\n back_outer_vertices_reflected = ps.geometry.reflect_over_xz_plane(\n back_outer_vertices[chordwise_position, spanwise_position]\n )\n\n section_panels[\n chordwise_position, spanwise_position\n ] = ps.geometry.Panel(\n front_left_vertex=front_outer_vertices_reflected,\n front_right_vertex=front_inner_vertices_reflected,\n back_left_vertex=back_outer_vertices_reflected,\n back_right_vertex=back_inner_vertices_reflected,\n is_trailing_edge=section_is_trailing_edge[\n chordwise_position, spanwise_position\n ],\n is_leading_edge=section_is_leading_edge[\n chordwise_position, spanwise_position\n ],\n )\n\n # This section's panel matrix is stack horizontally, to the left of the wing's panel matrix.\n panels = np.hstack((np.flip(section_panels, axis=1), panels))\n\n # Iterate through the panels and populate their left and right edge flags. Also populate their local position\n # attributes.\n for chordwise_position in range(wing.num_chordwise_panels):\n for spanwise_position in range(wing.num_spanwise_panels):\n panel = panels[chordwise_position, spanwise_position]\n panel.local_chordwise_position = chordwise_position\n panel.local_spanwise_position = spanwise_position\n if spanwise_position == 0:\n panel.is_left_edge = True\n else:\n panel.is_left_edge = False\n if spanwise_position == wing.num_spanwise_panels - 1:\n panel.is_right_edge = True\n else:\n panel.is_right_edge = False\n\n # Populate the wing's panels attribute.\n wing.panels = panels", "def compute_tangential_and_cross_components(\n self, shape_component1='e1', shape_component2='e2', tan_component='et',\n cross_component='ex', geometry='curve', is_deltasigma=False, cosmo=None, add=True):\n # Check is all the required data is available\n missing_cols = ', '.join(\n [f\"'{t_}'\" for t_ in ('ra', 'dec', shape_component1, shape_component2)\n if t_ not in self.galcat.columns])\n if len(missing_cols)>0:\n raise TypeError('Galaxy catalog missing required columns: '+missing_cols+\\\n '. Do you mean to first convert column names?')\n if is_deltasigma:\n self.add_critical_surface_density(cosmo)\n # compute shears\n angsep, tangential_comp, cross_comp = compute_tangential_and_cross_components(\n ra_lens=self.ra, dec_lens=self.dec,\n ra_source=self.galcat['ra'], dec_source=self.galcat['dec'],\n shear1=self.galcat[shape_component1], shear2=self.galcat[shape_component2],\n geometry=geometry, is_deltasigma=is_deltasigma,\n sigma_c=self.galcat['sigma_c'] if 'sigma_c' in self.galcat.columns else None)\n if add:\n self.galcat['theta'] = angsep\n self.galcat[tan_component] = tangential_comp\n self.galcat[cross_component] = cross_comp\n return angsep, tangential_comp, cross_comp", "def products(q_1: Qs, q_2: Qs, kind: str = \"\", reverse: bool = False) -> Qs:\n\n q_1_copy = deepcopy(q_1)\n q_2_copy = deepcopy(q_2)\n qs_left, qs_right = Qs(), Qs()\n\n # Diagonalize if need be.\n if ((q_1.rows == q_2.rows) and (q_1.columns == q_2.columns)) or (\n \"scalar_q\" in [q_1.qs_type, q_2.qs_type]\n ):\n\n if q_1.columns == 1:\n qs_right = q_2_copy\n qs_left = diagonal(q_1_copy, qs_right.rows)\n\n elif q_2.rows == 1:\n qs_left = q_1_copy\n qs_right = diagonal(q_2_copy, qs_left.columns)\n\n else:\n qs_left = q_1_copy\n qs_right = q_2_copy\n\n # Typical matrix multiplication criteria.\n elif q_1.columns == q_2.rows:\n qs_left = q_1_copy\n qs_right = q_2_copy\n\n else:\n print(\n \"Oops, cannot multiply series with row/column dimensions of {}/{} to {}/{}\".format(\n q_1.rows, q_1.columns, q_2.rows, q_2.columns\n )\n )\n\n # Operator products need to be transposed.\n operator_flag = False\n if qs_left in [\"op\", \"operator\"] and qs_right in [\"op\", \"operator\"]:\n operator_flag = True\n\n outer_row_max = qs_left.rows\n outer_column_max = qs_right.columns\n shared_inner_max = qs_left.columns\n projector_flag = (\n (shared_inner_max == 1) and (outer_row_max > 1) and (outer_column_max > 1)\n )\n\n result = [\n [q0(q_type=\"\") for _i in range(outer_column_max)]\n for _j in range(outer_row_max)\n ]\n\n for outer_row in range(outer_row_max):\n for outer_column in range(outer_column_max):\n for shared_inner in range(shared_inner_max):\n\n # For projection operators.\n left_index = outer_row\n right_index = outer_column\n\n if outer_row_max >= 1 and shared_inner_max > 1:\n left_index = outer_row + shared_inner * outer_row_max\n\n if outer_column_max >= 1 and shared_inner_max > 1:\n right_index = shared_inner + outer_column * shared_inner_max\n\n result[outer_row][outer_column] = add(result[outer_row][outer_column],\n product(qs_left.qs[left_index],\n qs_right.qs[right_index], kind=kind, reverse=reverse\n )\n )\n\n # Flatten the list.\n new_qs = [item for sublist in result for item in sublist]\n new_states = Qs(new_qs, rows=outer_row_max, columns=outer_column_max)\n\n if projector_flag or operator_flag:\n return transpose(new_states)\n\n else:\n return new_states", "def test_elliptic_special_triangles(self):\n import itertools\n\n s = space(curvature=1)\n\n # turning constants in radians\n t1_ref = 6.28318530717958647692528676655867\n t2_ref = t1_ref / 2\n t3_ref = t1_ref / 3\n t4_ref = t1_ref / 4\n t5_ref = t1_ref / 5\n t6_ref = t1_ref / 6\n # random number\n magic = 7.77733337337373737373\n tm_ref = t1_ref / magic\n nagic = magic - 4 # strangely named other magic constant\n tn_ref = t1_ref / nagic\n # tetrahedron edge central angle\n p4_ref = 1.91063323624901855632771420503144 # = acos(-1/3)\n # icosahedron edge central angle\n p20_ref = 1.10714871779409050301706546017856 # = atan(2)\n # area constant\n sm = space(0).sphere_s2(1)\n\n # test with each known triangle\n for a, C, b, A, c, B, m in (\n (t3_ref, t2_ref, t3_ref, t2_ref, t3_ref, t2_ref, sm / 2), # literally a hemisphere, which takes up the entire space\n (t2_ref, t4_ref, t4_ref, t2_ref, t4_ref, t4_ref, sm / 4), # diangle which is 1/4 of the sphere\n (t2_ref, tm_ref, t3_ref, t2_ref, t6_ref, tm_ref, sm / magic), # a different diangle\n (t2_ref, tn_ref, t3_ref, t2_ref, t6_ref, tn_ref, sm / nagic), # a different diangle, obtuse angle this time\n (t4_ref, t4_ref, t4_ref, t4_ref, t4_ref, t4_ref, sm / 8), # triangle with 3 right angles\n (t4_ref, tm_ref, t4_ref, t4_ref, tm_ref, t4_ref, sm / magic / 2), # different slice of the previous one, has 2 right angles\n (t4_ref, tn_ref, t4_ref, t4_ref, tn_ref, t4_ref, sm / nagic / 2), # another one but with an obtuse angle\n (p4_ref, t3_ref) * 3 + (sm / 4,), # regular tetrahedron face, projected onto the sphere\n (p20_ref, t5_ref) * 3 + (sm / 20,) # regular icosahedron face, projected onto the sphere\n ):\n # go through all vertex permutations\n for (a, A), (b, B), (c, C) in itertools.permutations([(a, A), (b, B), (c, C)], 3):\n self.assertTrue(isclose(\n s.cosine_law_side(a, b, C),\n c\n ))\n self.assertTrue(t2_ref in (A, B) or isclose(\n s.cosine_law_angle(a, b, c),\n C,\n rel_tol = 1e-5\n ))\n self.assertTrue(isclose(\n s.dual_cosine_law_angle(A, B, c),\n C,\n rel_tol = 1e-5\n ))\n self.assertTrue(t2_ref in (A, B) or isclose(\n s.dual_cosine_law_side(A, B, C),\n c\n ))\n self.assertTrue(A == t2_ref or isclose(\n s.sine_law_side(a, A, B),\n b,\n rel_tol = 1e-5,\n abs_tol = 1e-15\n ) or isclose(\n s.sine_law_side(a, A, B),\n t2_ref - b,\n rel_tol = 1e-5,\n abs_tol = 1e-15\n ))\n self.assertTrue(A == t2_ref or isclose(\n s.sine_law_angle(a, A, b),\n B,\n rel_tol = 1e-5,\n abs_tol = 1e-15\n ) or B > t4_ref and isclose( # SSA triangle solving strangeness\n s.sine_law_angle(a, A, b),\n t2_ref - B,\n rel_tol = 1e-5,\n abs_tol = 1e-15\n ))\n self.assertTrue((A, B, C).count(t2_ref) == 1 or isclose(\n s.triangle_area_from_sides(a, b, c),\n m,\n rel_tol = 1e-5\n ))\n self.assertTrue(isclose(\n s.triangle_area_from_angles(A, B, C),\n m\n ))", "def assemble_operator(self, parameters, space_group='default'):\n operator = super(RWGDominantSystem, self).assemble_operator(parameters, space_group)\n return operator.weak_form()", "def product(q_1: Q, q_2: Q, kind: str = \"\", reverse: bool = False) -> Q:\n\n q_1.check_representations(q_2)\n\n commuting = _commuting_products(q_1, q_2)\n q_even = Q()\n q_even.t = commuting[\"tt\"] - commuting[\"xx+yy+zz\"]\n q_even.x = commuting[\"tx+xt\"]\n q_even.y = commuting[\"ty+yt\"]\n q_even.z = commuting[\"tz+zt\"]\n\n anti_commuting = _anti_commuting_products(q_1, q_2)\n q_odd = Q()\n\n if reverse:\n q_odd.x = anti_commuting[\"zy-yz\"]\n q_odd.y = anti_commuting[\"xz-zx\"]\n q_odd.z = anti_commuting[\"yx-xy\"]\n\n else:\n q_odd.x = anti_commuting[\"yz-zy\"]\n q_odd.y = anti_commuting[\"zx-xz\"]\n q_odd.z = anti_commuting[\"xy-yx\"]\n\n if kind == \"\":\n result = add(q_even, q_odd)\n times_symbol = \"x\"\n elif kind.lower() == \"even\":\n result = q_even\n times_symbol = \"xE\"\n elif kind.lower() == \"odd\":\n result = q_odd\n times_symbol = \"xO\"\n elif kind.lower() == \"even_minus_odd\":\n result = dif(q_even, q_odd)\n times_symbol = \"xE-xO\"\n else:\n raise Exception(\n \"Four 'kind' values are known: '', 'even', 'odd', and 'even_minus_odd'.\"\n )\n\n if reverse:\n times_symbol = times_symbol.replace(\"x\", \"xR\")\n\n result.q_type = f\"{q_1.q_type}{times_symbol}{q_2.q_type}\"\n result.representation = q_1.representation\n\n return result", "def test_dag():\n qr = QuantumRegister(5, 'qr')\n cr = ClassicalRegister(5, 'cr')\n ghz = QuantumCircuit(qr, cr, name='ghz')\n\n ghz.h(qr[2])\n ghz.cx(qr[2], qr[1])\n ghz.cx(qr[1], qr[0])\n ghz.cx(qr[2], qr[3])\n ghz.cx(qr[3], qr[4])\n ghz.draw()\n\n # ghz_dag = circuit_to_dag(ghz)\n\n # print(ghz.width(), ghz_dag.width())", "def unfold_wire(pl):\n\tpl = phone_pl\n\tshape_points = [np.array(p) for p in pl.points]\n\tpointIter = takeNGenerator(shape_points, 4)\n\td0 = getDistance(*shape_points[0:2])\n\tpoints = [np.array([0, 0]), np.array([0, d0])]\n\tfor i in range(len(shape_points)-3):\n\t\t(p1,p2,p3,p4) = pointIter.next()\n\t\tv1 =p1-p2\n\t\tv2 = p3-p2\n\t\tv3 = p2-p3\n\t\tv4 = p4-p3\n\t\told_normal = np.cross(v1,v2)\n\t\tnew_normal = np.cross(v3,v4)\n\t\tnorm_old = old_normal/la.norm(old_normal)\n\t\tnorm_new = old_normal/la.norm(new_normal)\n\n\n\t\t#check if we need to transform:\n\t\tif any(norm_old != norm_new):\n\t\t\tprint norm_old, norm_new\n\t\t\t#create a transform that will rotate the next points to the old orientation\n\t\t\ttransform = create_transform(norm_new, norm_old)\n\t\t\trot_pot = p2\n\t\t\tpose = (rot_pot, transform)\n\t\t\tpoly = PolyLine(shape_points[i:])\n\t\t\ttranslated = poly.transformed(pose)\n\t\t\tnew_pts = [np.array(p) for p in translated.points]\n\n\t\t\tif len(shape_points[:i]) is 0:\n\t\t\t\tshape_points = new_pts\n\t\t\telse:\n\t\t\t\tshape_points = np.vstack((shape_points[:i], new_pts))\n\t\t\tpointIter = takeNGenerator(shape_points, 4)\n\t\t\tfast_forward(pointIter, i)\n\treturn PolyLine(shape_points)", "def __init__(self, topology_proposal, direction=\"forward\"):\n from simtk.openmm import app\n\n self._topology_proposal = topology_proposal\n self._direction = direction\n self._hydrogen = app.Element.getByAtomicNumber(1.0)\n\n # Set the direction\n if direction == \"forward\":\n self._destination_system = self._topology_proposal.new_system\n self._new_atoms = self._topology_proposal.unique_new_atoms\n self._destination_topology = self._topology_proposal.new_topology\n self._atoms_with_positions = self._topology_proposal.new_to_old_atom_map.keys()\n _nx_graph = self._topology_proposal._new_topology._get_networkx_molecule()\n elif direction == \"reverse\":\n self._destination_system = self._topology_proposal.old_system\n self._new_atoms = self._topology_proposal.unique_old_atoms\n self._destination_topology = self._topology_proposal.old_topology\n self._atoms_with_positions = self._topology_proposal.old_to_new_atom_map.keys()\n _nx_graph = self._topology_proposal._old_topology._get_networkx_molecule()\n else:\n raise ValueError(\"Direction must be either forward or reverse.\")\n\n self._new_atom_objects = list(self._destination_topology.atoms())\n self._new_atoms_to_place = [atom for atom in self._destination_topology.atoms() if atom.index in self._new_atoms]\n\n self._atoms_with_positions_set = set(self._atoms_with_positions)\n\n self._hydrogens = []\n self._heavy = []\n\n # Sort the new atoms into hydrogen and heavy atoms:\n for atom in self._new_atoms_to_place:\n if atom.element == self._hydrogen:\n self._hydrogens.append(atom.index)\n else:\n self._heavy.append(atom.index)\n\n # Sanity check\n if len(self._hydrogens)==0 and len(self._heavy)==0:\n msg = 'NetworkXProposalOrder: No new atoms for direction {}\\n'.format(direction)\n msg += str(topology_proposal)\n raise Exception(msg)\n\n # Choose the first of the new atoms to find the corresponding residue:\n #transforming_residue = self._new_atom_objects[self._new_atoms[0]].residue\n\n self._residue_graph = _nx_graph\n self._reference_connectivity_graph = self._create_reference_connectivity_graph()", "def generate_stokes_function_spaces(self, kind='mini'):\n s = \"::: generating Stokes function spaces :::\"\n print_text(s, cls=self)\n \n # mini elements :\n if kind == 'mini':\n self.Bub = FunctionSpace(self.mesh, \"B\", 4, \n constrained_domain=self.pBC)\n self.MQ = self.Q + self.Bub\n M3 = MixedFunctionSpace([self.MQ]*3)\n self.Q4 = MixedFunctionSpace([M3, self.Q])\n self.Q5 = MixedFunctionSpace([M3, self.Q, self.Q])\n\n # Taylor-Hood elements :\n elif kind == 'th':\n V = VectorFunctionSpace(self.mesh, \"CG\", 2,\n constrained_domain=self.pBC)\n self.Q4 = V * self.Q\n self.Q5 = V * self.Q * self.Q\n \n else:\n s = \">>> METHOD generate_stokes_function_spaces <kind> FIELD <<<\\n\" + \\\n \">>> MAY BE 'mini' OR 'th', NOT '%s'. <<<\" % kind\n print_text(s, 'red', 1)\n sys.exit(1)\n\n s = \" - Stokes function spaces created - \"\n print_text(s, cls=self)", "def _basic_operators_init():\n global BASIC_OPERATORS\n\n BASIC_OPERATORS = {\n \"angle_between\": {\n \"node\": \"angleBetween\",\n \"inputs\": [\n [\"vector1X\", \"vector1Y\", \"vector1Z\"],\n [\"vector2X\", \"vector2Y\", \"vector2Z\"],\n ],\n \"outputs\": [\n [\"angle\"],\n ],\n },\n\n \"average\": {\n \"node\": \"plusMinusAverage\",\n \"inputs\": [\n [\n \"input3D[{array}].input3Dx\",\n \"input3D[{array}].input3Dy\",\n \"input3D[{array}].input3Dz\"\n ],\n ],\n \"outputs\": [\n [\"output3Dx\", \"output3Dy\", \"output3Dz\"],\n ],\n \"operation\": 3,\n },\n\n \"blend\": {\n \"node\": \"blendColors\",\n \"inputs\": [\n [\"color1R\", \"color1G\", \"color1B\"],\n [\"color2R\", \"color2G\", \"color2B\"],\n [\"blender\"],\n ],\n \"outputs\": [\n [\"outputR\", \"outputG\", \"outputB\"],\n ],\n },\n\n \"choice\": {\n \"node\": \"choice\",\n \"inputs\": [\n [\"input[{array}]\"],\n [\"selector\"],\n ],\n \"outputs\": [\n [\"output\"],\n ],\n },\n\n \"clamp\": {\n \"node\": \"clamp\",\n \"inputs\": [\n [\"inputR\", \"inputG\", \"inputB\"],\n [\"minR\", \"minG\", \"minB\"],\n [\"maxR\", \"maxG\", \"maxB\"],\n ],\n \"outputs\": [\n [\"outputR\", \"outputG\", \"outputB\"],\n ],\n },\n\n \"compose_matrix\": {\n \"node\": \"composeMatrix\",\n \"inputs\": [\n [\"inputTranslateX\", \"inputTranslateY\", \"inputTranslateZ\"],\n [\"inputRotateX\", \"inputRotateY\", \"inputRotateZ\"],\n [\"inputScaleX\", \"inputScaleY\", \"inputScaleZ\"],\n [\"inputShearX\", \"inputShearY\", \"inputShearZ\"],\n [\"inputRotateOrder\"],\n [\"useEulerRotation\"],\n ],\n \"outputs\": [\n [\"outputMatrix\"],\n ],\n },\n\n \"decompose_matrix\": {\n \"node\": \"decomposeMatrix\",\n \"inputs\": [\n [\"inputMatrix\"],\n ],\n \"outputs\": [\n [\"outputTranslateX\", \"outputTranslateY\", \"outputTranslateZ\"],\n [\"outputRotateX\", \"outputRotateY\", \"outputRotateZ\"],\n [\"outputScaleX\", \"outputScaleY\", \"outputScaleZ\"],\n [\"outputShearX\", \"outputShearY\", \"outputShearZ\"],\n ],\n \"output_is_predetermined\": True,\n },\n\n \"inverse_matrix\": {\n \"node\": \"inverseMatrix\",\n \"inputs\": [\n [\"inputMatrix\"],\n ],\n \"outputs\": [\n [\"outputMatrix\"],\n ],\n },\n\n \"length\": {\n \"node\": \"distanceBetween\",\n \"inputs\": [\n [\"point1X\", \"point1Y\", \"point1Z\"],\n [\"point2X\", \"point2Y\", \"point2Z\"],\n ],\n \"outputs\": [\n [\"distance\"],\n ],\n },\n\n \"matrix_distance\": {\n \"node\": \"distanceBetween\",\n \"inputs\": [\n [\"inMatrix1\"],\n [\"inMatrix2\"],\n ],\n \"outputs\": [\n [\"distance\"],\n ],\n },\n\n \"mult_matrix\": {\n \"node\": \"multMatrix\",\n \"inputs\": [\n [\n \"matrixIn[{array}]\"\n ],\n ],\n \"outputs\": [\n [\"matrixSum\"],\n ],\n },\n\n \"normalize_vector\": {\n \"node\": \"vectorProduct\",\n \"inputs\": [\n [\"input1X\", \"input1Y\", \"input1Z\"],\n [\"normalizeOutput\"],\n ],\n \"outputs\": [\n [\"outputX\", \"outputY\", \"outputZ\"],\n ],\n \"operation\": 0,\n },\n\n \"pair_blend\": {\n \"node\": \"pairBlend\",\n \"inputs\": [\n [\"inTranslateX1\", \"inTranslateY1\", \"inTranslateZ1\"],\n [\"inRotateX1\", \"inRotateY1\", \"inRotateZ1\"],\n [\"inTranslateX2\", \"inTranslateY2\", \"inTranslateZ2\"],\n [\"inRotateX2\", \"inRotateY2\", \"inRotateZ2\"],\n [\"weight\"],\n [\"rotInterpolation\"],\n ],\n \"outputs\": [\n [\"outTranslateX\", \"outTranslateY\", \"outTranslateZ\"],\n [\"outRotateX\", \"outRotateY\", \"outRotateZ\"],\n ],\n \"output_is_predetermined\": True,\n },\n\n \"point_matrix_mult\": {\n \"node\": \"pointMatrixMult\",\n \"inputs\": [\n [\"inPointX\", \"inPointY\", \"inPointZ\"],\n [\"inMatrix\"],\n [\"vectorMultiply\"],\n ],\n \"outputs\": [\n [\"outputX\", \"outputY\", \"outputZ\"],\n ],\n },\n\n \"remap_value\": {\n \"node\": \"remapValue\",\n \"inputs\": [\n [\"inputValue\"],\n [\"outputMin\"],\n [\"outputMax\"],\n [\"inputMin\"],\n [\"inputMax\"],\n ],\n \"outputs\": [\n [\"outValue\"],\n ],\n },\n\n \"set_range\": {\n \"node\": \"setRange\",\n \"inputs\": [\n [\"valueX\", \"valueY\", \"valueZ\"],\n [\"minX\", \"minY\", \"minZ\"],\n [\"maxX\", \"maxY\", \"maxZ\"],\n [\"oldMinX\", \"oldMinY\", \"oldMinZ\"],\n [\"oldMaxX\", \"oldMaxY\", \"oldMaxZ\"],\n ],\n \"outputs\": [\n [\"outValueX\", \"outValueY\", \"outValueZ\"],\n ],\n },\n\n \"transpose_matrix\": {\n \"node\": \"transposeMatrix\",\n \"inputs\": [\n [\"inputMatrix\"],\n ],\n \"outputs\": [\n [\"outputMatrix\"],\n ],\n },\n }\n\n # Fill BASIC_OPERATORS with condition operations\n cond_operators = [\"eq\", \"ne\", \"gt\", \"ge\", \"lt\", \"le\"]\n for i, condition_operator in enumerate(cond_operators):\n BASIC_OPERATORS[condition_operator] = {\n \"node\": \"condition\",\n \"inputs\": [\n [\"firstTerm\"],\n [\"secondTerm\"],\n ],\n # The condition node is a special case! It gets created during\n # the magic-method-comparison and fully connected after being\n # passed on to the condition()-method in this OperatorMetaClass\n \"outputs\": [\n [None],\n ],\n \"operation\": i,\n }\n\n # Fill BASIC_OPERATORS with +,- operations\n for i, add_sub_operator in enumerate([\"add\", \"sub\"]):\n BASIC_OPERATORS[add_sub_operator] = {\n \"node\": \"plusMinusAverage\",\n \"inputs\": [\n [\n \"input3D[{array}].input3Dx\",\n \"input3D[{array}].input3Dy\",\n \"input3D[{array}].input3Dz\"\n ],\n ],\n \"outputs\": [\n [\"output3Dx\", \"output3Dy\", \"output3Dz\"],\n ],\n \"operation\": i + 1,\n }\n\n # Fill BASIC_OPERATORS with *,/,** operations\n for i, mult_div_operator in enumerate([\"mul\", \"div\", \"pow\"]):\n BASIC_OPERATORS[mult_div_operator] = {\n \"node\": \"multiplyDivide\",\n \"inputs\": [\n [\"input1X\", \"input1Y\", \"input1Z\"],\n [\"input2X\", \"input2Y\", \"input2Z\"],\n ],\n \"outputs\": [\n [\"outputX\", \"outputY\", \"outputZ\"],\n ],\n \"operation\": i + 1,\n }\n\n # Fill BASIC_OPERATORS with vectorProduct operations\n for i, vector_product_operator in enumerate([\"dot\", \"cross\"]):\n BASIC_OPERATORS[vector_product_operator] = {\n \"node\": \"vectorProduct\",\n \"inputs\": [\n [\"input1X\", \"input1Y\", \"input1Z\"],\n [\"input2X\", \"input2Y\", \"input2Z\"],\n [\"normalizeOutput\"],\n ],\n \"outputs\": [\n [\"outputX\", \"outputY\", \"outputZ\"],\n ],\n \"operation\": i + 1,\n }", "def test_Symmetry_nosym_s2p_map(nacl_unitcell_order1: PhonopyAtoms):\n ph = Phonopy(\n nacl_unitcell_order1,\n supercell_matrix=[2, 2, 2],\n primitive_matrix=\"F\",\n is_symmetry=False,\n )\n # for i, v in enumerate(ph.symmetry.symmetry_operations[\"translations\"]):\n # print(\"[\", \", \".join(f\"{x}\" for x in v), \"],\")\n np.testing.assert_equal(\n ph.symmetry.symmetry_operations[\"translations\"],\n [\n [0.0, 0.0, 0.0],\n [0.5, 0.0, 0.0],\n [0.0, 0.5, 0.0],\n [0.5, 0.5, 0.0],\n [0.0, 0.0, 0.5],\n [0.5, 0.0, 0.5],\n [0.0, 0.5, 0.5],\n [0.5, 0.5, 0.5],\n [0.0, 0.25, 0.25],\n [0.5, 0.25, 0.25],\n [0.0, 0.75, 0.25],\n [0.5, 0.75, 0.25],\n [0.0, 0.25, 0.75],\n [0.5, 0.25, 0.75],\n [0.0, 0.75, 0.75],\n [0.5, 0.75, 0.75],\n [0.25, 0.0, 0.25],\n [0.75, 0.0, 0.25],\n [0.25, 0.5, 0.25],\n [0.75, 0.5, 0.25],\n [0.25, 0.0, 0.75],\n [0.75, 0.0, 0.75],\n [0.25, 0.5, 0.75],\n [0.75, 0.5, 0.75],\n [0.25, 0.25, 0.0],\n [0.75, 0.25, 0.0],\n [0.25, 0.75, 0.0],\n [0.75, 0.75, 0.0],\n [0.25, 0.25, 0.5],\n [0.75, 0.25, 0.5],\n [0.25, 0.75, 0.5],\n [0.75, 0.75, 0.5],\n ],\n )", "def main_piece(self, parent, um, info=\"Patron de T-shirt\", front=True):\n piece_group = inkex.etree.SubElement(parent, 'g',\n {inkex.addNS('label', 'inkscape'): info,\n 'transform': '' if front else 'matrix(-1,0,0,1,-34.745039,0)'})\n\n # The template main vertexes absolute positions\n neck_drop = um['neck_rear'] if not front else um['neck_front'] if um['neck_front'] > 0 else um['neck']\n vertexes = {\n 'neck': (um['neck'], 0),\n 'neck_drop': (0, neck_drop),\n 'shoulder': (um['shoulder'], um['shoulder_drop']),\n 'chest': (um['chest'], um['hsp_chest']),\n 'waist': (um['waist'], um['hsp_waist']),\n 'hip': (um['hip'], um['hsp_hip'])\n }\n\n # The Template structure reference\n if self.options.grid:\n reference = inkex.etree.SubElement(piece_group, 'g',\n {inkex.addNS('label', 'inkscape'): info + \"_structure\"})\n\n draw_svg_line([(0, 0), (0, um['hsp_hip'])], reference, self.doted_line)\n draw_svg_line([(0, 0), (um['neck'], 0)], reference, self.doted_line)\n draw_svg_line([(um['neck'], 0), (0, um['hsp_hip'])], reference, self.doted_line)\n draw_svg_line([(0, um['shoulder_drop']), (um['shoulder'], 0)], reference, self.doted_line)\n draw_svg_line([(0, um['hsp_chest']), (um['chest'], 0)], reference, self.doted_line)\n draw_svg_line([(0, um['hsp_waist']), (um['waist'], 0)], reference, self.doted_line)\n draw_svg_line([(0, um['hsp_hip']), (um['hip'], 0)], reference, self.doted_line)\n\n for name, vertex in vertexes.items():\n draw_svg_circle(self.getunittouu('4mm'), vertex, reference, self.normal_line)\n\n # Template edge paths\n if self.options.temp:\n\n line_style = self.normal_line if self.options.style == 'print' else self.cut_line\n edge = inkex.etree.SubElement(piece_group, 'g', {inkex.addNS('label', 'inkscape'): info + \"_edge\"})\n\n # Building the path string description 'd'\n path = [['m', vertexes['neck']]]\n path.append(Patron.neckline(um, neck_drop))\n path.append(['l', [0, um['hsp_hip'] - neck_drop]])\n path.append(['l', [um['hip'], 0]])\n path.append(Patron.waist_curve(um))\n path.append(self.sleeve_curve(um))\n path.append(['Z', []])\n\n sewing_attribs = {\n 'style': simplestyle.formatStyle(self.normal_line),\n inkex.addNS('label', 'inkscape'): info + '_sewing',\n 'd': formatPath(path)}\n inkex.etree.SubElement(edge, inkex.addNS('path', 'svg'), sewing_attribs)\n\n path[2][1] = [0, um['hsp_hip'] + self.getunittouu('1.5cm') - neck_drop]\n path[3][1] = [um['hip'], 0, 0, -self.getunittouu('1.5cm')]\n offset_attribs = {'style': simplestyle.formatStyle(line_style),\n inkex.addNS('type', 'sodipodi'): 'inkscape:offset',\n inkex.addNS('radius', 'inkscape'): str(self.getunittouu('1cm')),\n inkex.addNS('original', 'inkscape'): formatPath(path)\n }\n inkex.etree.SubElement(edge, inkex.addNS('path', 'svg'), offset_attribs)", "def createTopology(self):\n\n # find DAG root\n dagRoot = None\n for mote in self.motes:\n if mote.id == 0:\n mote.role_setDagRoot()\n dagRoot = mote\n assert dagRoot\n\n # put DAG root at center of area\n dagRoot.setLocation(x=self.squareSide/2,\n y=self.squareSide/2)\n\n # Copy the contents of the list (but keep the originals) and shuffle them.\n # shuffledMotes = list(self.motes)\n # random.shuffle(shuffledMotes)\n # print shuffledMotes\n\n #### GRID PREPRATIONS.\n dagRootX, dagRootY = dagRoot.getLocation()\n # determine the number of 'square levels'\n numberOfMotes = len(self.motes)\n currentLvl = 0\n sumMotes = 0\n while (sumMotes < numberOfMotes):\n if currentLvl == 0:\n sumMotes += 1\n else:\n sumMotes += currentLvl * 8\n currentLvl += 1\n maxLvl = currentLvl - 1\n # print sumMotes\n coordinatesPerLvl = []\n for lvl in range(0, maxLvl + 1):\n coordinatesThisLvl = []\n if lvl == 0:\n coordinatesThisLvl = [(dagRootX, dagRootY)]\n elif lvl == 1:\n coordinatesThisLvl = self.getSquareCoordinates((dagRootX, dagRootY), self.DISTANCE)\n elif lvl > 1:\n coordinatesPrevLvl = coordinatesPerLvl[lvl - 1]\n coordinatesPrevPrevLvl = coordinatesPerLvl[lvl - 2]\n for coordinatePrevLvl in coordinatesPrevLvl:\n squareCoordinates = self.getSquareCoordinates(coordinatePrevLvl, self.DISTANCE)\n for squareCoordinate in squareCoordinates:\n if not self.isInCoordinates(squareCoordinate,\n coordinatesPrevPrevLvl) and not self.isInCoordinates(\n squareCoordinate, coordinatesPrevLvl) and not self.isInCoordinates(squareCoordinate,\n coordinatesThisLvl):\n coordinatesThisLvl.append(squareCoordinate)\n coordinatesPerLvl.append(coordinatesThisLvl)\n # print 'Level %d: # motes = %d' % (lvl, len(coordinatesThisLvl))\n # print coordinatesThisLvl\n assert len(coordinatesThisLvl) == 1 or len(coordinatesThisLvl) == lvl * 8\n\n allCoordinates = [j for i in coordinatesPerLvl for j in i]\n # print allCoordinates\n\n # reposition each mote until it is connected\n countMote = 1 # root 0 already has coordinates\n connectedMotes = [dagRoot]\n for mote in self.motes:\n if mote in connectedMotes:\n continue\n\n connected = False\n while not connected:\n # pick a random location\n\n newX = None\n newY = None\n # if no topology is not given, build the topology yourself\n if SimEngine.SimEngine().ilp_topology is None:\n newX = np.random.normal(allCoordinates[countMote][0], self.DISTANCE / 8, 1)[0]\n newY = np.random.normal(allCoordinates[countMote][1], self.DISTANCE / 8, 1)[0]\n else:\n # if no topology is given, use that topology\n newX = SimEngine.SimEngine().ilp_topology[str(mote.id)]['x']\n newY = SimEngine.SimEngine().ilp_topology[str(mote.id)]['y']\n\n mote.setLocation(\n x=newX,\n y=newY\n )\n\n numStableNeighbors = 0\n\n # count number of neighbors with sufficient RSSI\n for cm in connectedMotes:\n\n rssi = self._computeRSSI(mote, cm)\n mote.setRSSI(cm, rssi)\n cm.setRSSI(mote, rssi)\n\n # save the intial RSSI values for future use in the mobility models\n mote.initialRSSI[cm] = rssi\n cm.initialRSSI[mote] = rssi\n\n if self.settings.individualModulations == 1:\n if rssi > Modulation.Modulation().modulationStableRSSI[Modulation.Modulation().minimalCellModulation[SimSettings.SimSettings().modulationConfig]]:\n # print rssi\n numStableNeighbors += 1\n else:\n if rssi > self.STABLE_RSSI:\n # print rssi\n numStableNeighbors += 1\n\n # make sure it is connected to at least STABLE_NEIGHBORS motes\n # or connected to all the currently deployed motes when the number of deployed motes\n # are smaller than STABLE_NEIGHBORS\n if numStableNeighbors >= self.STABLE_NEIGHBORS or numStableNeighbors == len(connectedMotes):\n connected = True\n\n connectedMotes += [mote]\n countMote += 1\n\n # for each mote, compute PDR to each neighbors\n for mote in self.motes:\n shortestDistance = None\n for m in self.motes:\n if mote == m:\n continue\n if self.settings.individualModulations == 1:\n rssi_value = mote.getRSSI(m)\n for modulationTmp in Modulation.Modulation().modulations:\n # if the rssi value is higher than the minimal signal value required for this neighbor, take that modulation\n # and compute the PDR using that modulation\n if rssi_value > Modulation.Modulation().modulationStableRSSI[modulationTmp]:\n pdr = self._computePDR(mote, m, modulation=modulationTmp)\n mote.setPDR(m, pdr)\n m.setPDR(mote, pdr)\n mote.setModulation(m, modulationTmp)\n m.setModulation(mote, modulationTmp)\n else:\n if mote.getRSSI(m) > mote.minRssi:\n pdr = self._computePDR(mote, m)\n mote.setPDR(m, pdr)\n m.setPDR(mote, pdr)\n # closest distance\n dist = self._computeDistance(mote, m)\n if shortestDistance == None or dist < shortestDistance:\n mote.closestNeighbor = m\n shortestDistance = dist", "def createTopology(self):\n\n # find DAG root\n dagRoot = None\n for mote in self.motes:\n if mote.id == 0:\n mote.role_setDagRoot()\n dagRoot = mote\n assert dagRoot\n\n # put DAG root at center of area\n dagRoot.setLocation(x=self.squareSide/2,\n y=self.squareSide/2)\n\n # Copy the contents of the list (but keep the originals) and shuffle them.\n # shuffledMotes = list(self.motes)\n # random.shuffle(shuffledMotes)\n # print shuffledMotes\n\n #### GRID PREPRATIONS.\n dagRootX, dagRootY = dagRoot.getLocation()\n # determine the number of 'square levels'\n numberOfMotes = len(self.motes)\n currentLvl = 0\n sumMotes = 0\n while (sumMotes < numberOfMotes):\n if currentLvl == 0:\n sumMotes += 1\n else:\n sumMotes += currentLvl * 8\n currentLvl += 1\n maxLvl = currentLvl - 1\n # print sumMotes\n coordinatesPerLvl = []\n for lvl in range(0, maxLvl + 1):\n coordinatesThisLvl = []\n if lvl == 0:\n coordinatesThisLvl = [(dagRootX, dagRootY)]\n elif lvl == 1:\n coordinatesThisLvl = self.getSquareCoordinates((dagRootX, dagRootY), self.DISTANCE)\n elif lvl > 1:\n coordinatesPrevLvl = coordinatesPerLvl[lvl - 1]\n coordinatesPrevPrevLvl = coordinatesPerLvl[lvl - 2]\n for coordinatePrevLvl in coordinatesPrevLvl:\n squareCoordinates = self.getSquareCoordinates(coordinatePrevLvl, self.DISTANCE)\n for squareCoordinate in squareCoordinates:\n if not self.isInCoordinates(squareCoordinate,\n coordinatesPrevPrevLvl) and not self.isInCoordinates(\n squareCoordinate, coordinatesPrevLvl) and not self.isInCoordinates(squareCoordinate,\n coordinatesThisLvl):\n coordinatesThisLvl.append(squareCoordinate)\n coordinatesPerLvl.append(coordinatesThisLvl)\n # print 'Level %d: # motes = %d' % (lvl, len(coordinatesThisLvl))\n # print coordinatesThisLvl\n assert len(coordinatesThisLvl) == 1 or len(coordinatesThisLvl) == lvl * 8\n\n allCoordinates = [j for i in coordinatesPerLvl for j in i]\n # print allCoordinates\n\n # reposition each mote until it is connected\n countMote = 1 # root 0 already has coordinates\n connectedMotes = [dagRoot]\n for mote in self.motes:\n if mote in connectedMotes:\n continue\n\n connected = False\n while not connected:\n # pick a random location\n\n newX = np.random.normal(allCoordinates[countMote][0], self.DISTANCE / 8, 1)[0]\n newY = np.random.normal(allCoordinates[countMote][1], self.DISTANCE / 8, 1)[0]\n\n mote.setLocation(\n x=newX,\n y=newY\n )\n\n # mote.setLocation(\n # x = allCoordinates[countMote][0],\n # y = allCoordinates[countMote][1]\n # )\n\n numStableNeighbors = 0\n\n # count number of neighbors with sufficient RSSI\n for cm in connectedMotes:\n\n rssi = self._computeRSSI(mote, cm)\n mote.setRSSI(cm, rssi)\n cm.setRSSI(mote, rssi)\n\n # save the intial RSSI values for future use in the mobility models\n mote.initialRSSI[cm] = rssi\n cm.initialRSSI[mote] = rssi\n\n if rssi > self.STABLE_RSSI:\n # print rssi\n numStableNeighbors += 1\n\n # make sure it is connected to at least STABLE_NEIGHBORS motes\n # or connected to all the currently deployed motes when the number of deployed motes\n # are smaller than STABLE_NEIGHBORS\n if numStableNeighbors >= self.STABLE_NEIGHBORS or numStableNeighbors == len(connectedMotes):\n connected = True\n\n connectedMotes += [mote]\n countMote += 1\n\n # for each mote, compute PDR to each neighbors\n for mote in self.motes:\n shortestDistance = None\n for m in self.motes:\n if mote == m:\n continue\n if mote.getRSSI(m) > mote.minRssi:\n pdr = self._computePDR(mote, m)\n mote.setPDR(m, pdr)\n m.setPDR(mote, pdr)\n # closest distance\n dist = self._computeDistance(mote, m)\n if shortestDistance == None or dist < shortestDistance:\n mote.closestNeighbor = m\n shortestDistance = dist", "def test_construct_subcircuit_layers(self):\r\n dev = qml.device(\"default.qubit\", wires=3)\r\n\r\n def circuit(params):\r\n # section 1\r\n qml.RX(params[0], wires=0)\r\n # section 2\r\n qml.RY(params[1], wires=0)\r\n qml.CNOT(wires=[0, 1])\r\n qml.CNOT(wires=[1, 2])\r\n # section 3\r\n qml.RX(params[2], wires=0)\r\n qml.RY(params[3], wires=1)\r\n qml.RZ(params[4], wires=2)\r\n qml.CNOT(wires=[0, 1])\r\n qml.CNOT(wires=[1, 2])\r\n # section 4\r\n qml.RX(params[5], wires=0)\r\n qml.RY(params[6], wires=1)\r\n qml.RZ(params[7], wires=2)\r\n qml.CNOT(wires=[0, 1])\r\n qml.CNOT(wires=[1, 2])\r\n return qml.expval(qml.PauliX(0)), qml.expval(qml.PauliX(1)), qml.expval(qml.PauliX(2))\r\n\r\n circuit = qml.QNode(circuit, dev)\r\n\r\n params = np.ones([8])\r\n tapes = circuit.metric_tensor(params, only_construct=True)\r\n\r\n # this circuit should split into 4 independent\r\n # sections or layers when constructing subcircuits\r\n assert len(tapes) == 4\r\n\r\n # first layer subcircuit\r\n assert len(tapes[0].operations) == 1\r\n assert isinstance(tapes[0].operations[0], qml.Hadamard) # PauliX decomp\r\n\r\n # second layer subcircuit\r\n assert len(tapes[1].operations) == 4\r\n assert isinstance(tapes[1].operations[0], qml.RX)\r\n # PauliY decomp\r\n assert isinstance(tapes[1].operations[1], qml.PauliZ)\r\n assert isinstance(tapes[1].operations[2], qml.S)\r\n assert isinstance(tapes[1].operations[3], qml.Hadamard)\r\n\r\n # # third layer subcircuit\r\n assert len(tapes[2].operations) == 8\r\n assert isinstance(tapes[2].operations[0], qml.RX)\r\n assert isinstance(tapes[2].operations[1], qml.RY)\r\n assert isinstance(tapes[2].operations[2], qml.CNOT)\r\n assert isinstance(tapes[2].operations[3], qml.CNOT)\r\n # PauliX decomp\r\n assert isinstance(tapes[2].operations[4], qml.Hadamard)\r\n # PauliY decomp\r\n assert isinstance(tapes[2].operations[5], qml.PauliZ)\r\n assert isinstance(tapes[2].operations[6], qml.S)\r\n assert isinstance(tapes[2].operations[7], qml.Hadamard)\r\n\r\n # # fourth layer subcircuit\r\n assert len(tapes[3].operations) == 13\r\n assert isinstance(tapes[3].operations[0], qml.RX)\r\n assert isinstance(tapes[3].operations[1], qml.RY)\r\n assert isinstance(tapes[3].operations[2], qml.CNOT)\r\n assert isinstance(tapes[3].operations[3], qml.CNOT)\r\n assert isinstance(tapes[3].operations[4], qml.RX)\r\n assert isinstance(tapes[3].operations[5], qml.RY)\r\n assert isinstance(tapes[3].operations[6], qml.RZ)\r\n assert isinstance(tapes[3].operations[7], qml.CNOT)\r\n assert isinstance(tapes[3].operations[8], qml.CNOT)\r\n # PauliX decomp\r\n assert isinstance(tapes[3].operations[9], qml.Hadamard)\r\n # PauliY decomp\r\n assert isinstance(tapes[3].operations[10], qml.PauliZ)\r\n assert isinstance(tapes[3].operations[11], qml.S)\r\n assert isinstance(tapes[3].operations[12], qml.Hadamard)", "def _calculate_for_one_side(self, equation, side=\"left_side\"):\n max_width = 0\n for entity in equation.get(\"children\"): # iterate through the entities separated by `+`\n single_entity = entity.get(\"children\")[0]\n num_of_children = len(single_entity.get(\"children\")) # children separated by ::\n width, height = self._calculate_nested_complexes(single_entity, num_of_children - 2) # penultimate entity\n single_entity[\"size\"] = {\"width\": width, \"height\": height}\n if width > max_width:\n max_width = width\n self._current_x, self._current_y = 0, 0\n\n # add complex into compartments\n compartment_entity = single_entity.get(\"children\")[num_of_children - 1]\n compartment_name = compartment_entity.get(\"children\")[0][\"entity\"][\"token\"]\n if self.compartments.get(compartment_name) is None:\n self.compartments[compartment_name] = {\"left_side\": [], \"right_side\": []}\n self.compartments[compartment_name][side].append(single_entity)\n else:\n self.compartments[compartment_name][side].append(single_entity)\n\n if side == \"left\":\n self.x_limit = max_width + 2*X_ENTITY_DISTANCE\n else:\n self.x_limit += max_width + 2*X_ENTITY_DISTANCE", "def generate(self, analysis):\n\n #analysis = ['p','a','n','i','c','+past form']\n # Let's define our first FST\n\n f1 = FST('morphology-generate')\n \n f1.add_state('1')\n f1.add_state('2')\n f1.add_state('3')\n f1.add_state('4')\n f1.add_state('5') \n f1.add_state('6') #non-c state\n f1.add_state('7') #c state\n f1.add_state('8') #add k\n f1.add_state('9') #+present \n f1.add_state('10') #+past\n \n f1.initial_state = '1'\n #f1.set_final('8')\n f1.set_final('9')\n f1.set_final('10')\n \n #state 1 to 2, and 2 to 3. we don't care about vowel or consonant here\n for letter in list(string.ascii_letters):\n f1.add_arc('1', '2', letter, letter)\n f1.add_arc('2', '3', letter, letter)\n \n #3 to 5 input/output consonants\n vowels = ['a','e','i','o','u','A','E','I','O','U']\n consonants = [c for c in list(string.ascii_letters) if c not in vowels]\n non_c_con = [c for c in consonants if c not in ['c', 'C']]\n for letter in consonants:\n f1.add_arc('3', '5', letter, letter)\n f1.add_arc('5', '5', letter, letter)\n \n #the third and fourth input should be a vowel\n for letter in vowels:\n f1.add_arc('3', '4', letter, letter)\n f1.add_arc('4', '4', letter, letter)\n \n #if the fourth input is a non c consonant, go to 5\n for letter in non_c_con:\n f1.add_arc('4', '5', letter, letter)\n \n #if the input at state 5 is a vowel, go back to 4 \n for letter in vowels:\n f1.add_arc('5', '4', letter, letter)\n \n #if the second last letter is a c, go to 7\n f1.add_arc('4', '7', 'c', 'c')\n \n #add k after 7\n f1.add_arc('7', '8', '', 'k')\n #output nothing from 5 to 8\n f1.add_arc('5', '8', '', '')\n \n f1.add_arc('8','9','+present participle form','ing')\n f1.add_arc('8','10','+past form','ed')\n \n output = f1.transduce(analysis)[0]\n return ''.join(output)", "def geometric_descriptor(element_dict):\n # encode the orbital types\n category = {'s': 1, 'p': 2, 'd': 3, 'f': 4};\n # total number of atoms in a perovskite structure\n N = sum(element_dict.values())\n # obtain array of atomic properties for each element type\n atomic_number_list = []\n atomic_mass_list = []\n atomic_radius_list = []\n mendeleev_no_list = []\n common_oxidation_states_list = []\n Pauling_electronegativity_list = []\n row_list = []\n group_list = []\n block_list = []\n thermal_conductivity_list = []\n boiling_point_list = []\n melting_point_list = []\n average_ionic_radius_list = []\n molar_volume_list = []\n atomic_orbitals_list = []\n for item in element_dict:\n # extract atomic property from pymatgen\n ele = mg.Element(item)\n atomic_number = ele.Z\n atomic_mass = float(str(ele.atomic_mass)[:-4])\n atomic_radius = float(str(ele.atomic_radius)[:-4])\n mendeleev_no = ele.mendeleev_no\n common_oxidation_states = ele.common_oxidation_states[0]\n Pauling_electronegativity = ele.X\n row = ele.row\n group = ele.group\n block = ele.block\n thermal_conductivity = float(str(ele.thermal_conductivity)[:-12])\n boiling_point = float(str(ele.boiling_point)[: -2])\n melting_point = float(str(ele.melting_point)[: -2])\n average_ionic_radius = float(str(ele.average_ionic_radius)[:-4])\n molar_volume = float(str(ele.molar_volume)[: -5])\n if '6s' in ele.atomic_orbitals.keys():\n atomic_orbitals = ele.atomic_orbitals['6s']\n elif '4s' in ele.atomic_orbitals.keys():\n atomic_orbitals = ele.atomic_orbitals['4s']\n else:\n atomic_orbitals = ele.atomic_orbitals['2s']\n # calculate the array of atomic properties for all atoms \n atomic_number_list += [atomic_number]*element_dict[item]\n atomic_mass_list += [atomic_mass]*element_dict[item]\n atomic_radius_list += [atomic_radius]*element_dict[item]\n mendeleev_no_list += [mendeleev_no]*element_dict[item]\n common_oxidation_states_list += [common_oxidation_states]*element_dict[item]\n Pauling_electronegativity_list += [Pauling_electronegativity]*element_dict[item]\n row_list += [row]*element_dict[item]\n group_list += [group]*element_dict[item]\n block_list += [category[block]]*element_dict[item]\n thermal_conductivity_list += [thermal_conductivity]*element_dict[item]\n boiling_point_list += [boiling_point]*element_dict[item]\n melting_point_list += [melting_point]*element_dict[item]\n average_ionic_radius_list += [average_ionic_radius]*element_dict[item]\n molar_volume_list += [molar_volume]*element_dict[item]\n atomic_orbitals_list += [atomic_orbitals]*element_dict[item]\n return [generalized_mean(np.array(atomic_number_list), 1, N)] + [generalized_mean(np.array(atomic_radius_list), 1, N)] + [generalized_mean(np.array(mendeleev_no_list), 1, N)] + [generalized_mean(np.array(common_oxidation_states_list), 1, N)] + [generalized_mean(np.array(Pauling_electronegativity_list), 1, N)] + [generalized_mean(np.array(thermal_conductivity_list), 1, N)] + [generalized_mean(np.array(average_ionic_radius_list), 1, N)] + [generalized_mean(np.array(atomic_orbitals_list), 1, N)]", "def build_boundary_PSLGs(domain, sphere_pieces, ds):\n # TODO : Break up this function a bit.\n\n def compile_points_edges(sphere_pieces):\n \"\"\"Produces consolidated arrays containing all SpherePiece vertices and\n edges.\n :param sphere_pieces list: list of SpherePiece objects.\n :return: tuple of arrays of vertex coordinates and topology.\n :rtype: tuple.\n \"\"\"\n def build_edge_list(tris, points):\n v_adj = np.zeros(2*[points.shape[0]], dtype=np.int32)\n v_adj[tris[:,0], tris[:,1]] = v_adj[tris[:,1], tris[:,0]] = 1\n v_adj[tris[:,1], tris[:,2]] = v_adj[tris[:,2], tris[:,1]] = 1\n v_adj[tris[:,2], tris[:,0]] = v_adj[tris[:,0], tris[:,2]] = 1\n return np.array(np.where(np.triu(v_adj) == 1), dtype=np.int32).T\n\n vcount = 0\n all_points = []\n all_edges = []\n for points, tris in [(p.points, p.tris) for p in sphere_pieces]:\n edges = build_edge_list(tris, points)\n edges += vcount\n vcount += len(points)\n all_points.append(points)\n all_edges.append(edges)\n return np.vstack(all_points), np.vstack(all_edges)\n\n def refined_perimeter(perim, axis, ds):\n \"\"\"Adds additional vertices to subdivide perimeter edge segments.\n :param perim numpy.ndarray: array of vertices intersecting perimeter.\n :param axis int: ordinal value of axis 0:x, 1:y, 2:z.\n :param ds float: characteristic segment length.\n :return: array of vertices intersecting refined perimeter.\n :rtype: numpy.ndarray.\n \"\"\"\n\n def filter_colocated_points(perim, axis):\n delta = np.diff(perim[:,axis])\n keep_idx = np.hstack(([0], np.where(~np.isclose(delta,0.))[0] + 1))\n return perim[keep_idx]\n\n perim = filter_colocated_points(perim, axis)\n refined_points = [perim[0]]\n for e in [[i, i+1] for i in range(perim.shape[0]-1)]:\n e_len = perim[e[1], axis] - perim[e[0], axis]\n ne = int(np.ceil(e_len / ds))\n if ne > 1:\n dse = e_len / ne\n add_points = np.zeros((ne,3))\n add_points[:,axis] = dse * np.arange(1,ne+1)\n refined_points.append(perim[e[0]] + add_points)\n return np.vstack(refined_points)\n\n def add_holes(sphere_pieces):\n \"\"\"Add hole points to boundary PSLGs.\n :param sphere_pieces list: list of SpherePiece objects.\n :return: array of hole point vertices.\n :rtype: numpy.ndarray.\n \"\"\"\n # TODO : this is a placeholder function. Ultimately holes need to\n # : be created at the point when a sphere is split into pieces.\n holes = [[] for _ in range(3)]\n for i in range(3):\n j, k = (i+1)%3, (i+2)%3\n for points, tris in [(p.points, p.tris) for p in sphere_pieces]:\n points_ax = points[np.isclose(points[:,i], 0.)]\n if points_ax.shape[0]:\n holes[i].append([\n 0.5 * (points_ax[:,j].max() + points_ax[:,j].min()),\n 0.5 * (points_ax[:,k].max() + points_ax[:,k].min())\n ])\n holes[i] = np.vstack(holes[i]) if len(holes[i])\\\n else np.empty((0,2), dtype=np.float64)\n return holes\n\n def reindex_edges(points, points_ax, edges_ax):\n \"\"\"Reindexes edges along a given axis.\n :param points numpy.ndarray: all point coordinates.\n :param points_ax numpy.ndarray: indices of points intersecting boundary.\n :param edges_ax numpy.ndarray: edges interecting boundary.\n :return: tuple of arrays of point coordinates and reindexed edges.\n :rtype: tuple.\n \"\"\"\n points_segment = points[points_ax]\n reindex = {old: new for new, old in enumerate(np.where(points_ax)[0])}\n for i, (v0, v1) in enumerate(edges_ax):\n edges_ax[i] = np.array([reindex[v0], reindex[v1]])\n return points_segment, edges_ax\n\n def build_perim_edge_list(points_pieces, perim_refined):\n \"\"\"Construct list of perimeter edges for boundary.\n :param points_pieces numpy.ndarray: sphere points intersecting boundary.\n :param perim_refined numpy.ndarray: refined perimeter points.\n :return: array of perimeter edge topology for boundary.\n :rtype: numpy.ndarray.\n \"\"\"\n # Need to adjust edge indices for perimeter segments\n v_count = len(points_pieces)\n perim_edges = 4 * [None]\n for j in range(4):\n v_count_perim = len(perim_refined[j])\n perim_vidx = np.empty(v_count_perim, dtype=np.int32)\n mask = np.full(v_count_perim, True)\n v_count_new = 0\n for i, p in enumerate(perim_refined[j]):\n vidx = np.where(np.isclose(npl.norm(points_pieces - p, axis=1), 0.))[0]\n if len(vidx):\n mask[i] = False\n perim_vidx[i] = vidx[0]\n else:\n perim_vidx[i] = v_count + v_count_new\n v_count_new += 1\n perim_edges[j] = np.array([\n [perim_vidx[k], perim_vidx[k+1]] for k in range(v_count_perim-1)\n ])\n perim_refined[j] = perim_refined[j][mask]\n v_count += v_count_new\n return perim_edges\n\n def add_point_plane_intersections(hole_pieces, axis, domain):\n \"\"\"Adds points for sphere which just \"touch\" the boundary at a single point.\n :param hole_pieces list: list of SpherePiece objects.\n :param axis int: ordinal value of axis 0:x, 1:y, 2:z.\n :param domain Domain: spatial domain for mesh.\n :return: array of points touching boundary (may be empty).\n :rtype: numpy.ndarray.\n \"\"\"\n added_points = []\n for hole_piece in hole_pieces:\n if np.isclose(hole_piece.sphere.min[axis], 0.):\n close = np.where(np.isclose(hole_piece.points[:,axis], 0.))[0]\n for idx in close:\n added_points.append(hole_piece.points[idx])\n elif np.isclose(hole_piece.sphere.max[axis], domain.L[axis]):\n close = np.where(np.isclose(hole_piece.points[:,axis], domain.L[axis]))[0]\n trans = np.zeros(3)\n trans[axis] = -domain.L[axis]\n for idx in close:\n added_points.append(hole_piece.points[idx] + trans)\n if added_points:\n return np.vstack(added_points)\n else:\n return np.empty((0,3), dtype=np.float64)\n\n L = domain.L\n PBC = domain.PBC\n\n sphere_pieces_holes = [p for p in sphere_pieces if p.is_hole]\n sphere_pieces = [p for p in sphere_pieces if not p.is_hole]\n\n # TODO : Optimise this by compliling only edges from sphere piece\n # : intersection loops rather than considering all edges.\n if len(sphere_pieces):\n points, edges = compile_points_edges(sphere_pieces)\n else:\n points = np.empty((0,3), dtype=np.float64)\n edges = np.empty((0,2), dtype=np.int32)\n\n # Get edges and points on each boundary\n edges_ax = [\n edges[np.all(np.isclose(points[edges,i], 0.), axis=1)]\n for i in range(3)\n ]\n points_ax = [np.isclose(points[:,i], 0.) for i in range(3)]\n\n # Fix boundary points to exactly zero\n for i in range(3):\n points[(points_ax[i], i)] = 0.\n\n # reindex edge vertices\n points_pieces, edges_ax = [list(x) for x in zip(*[\n reindex_edges(points, points_ax[i], edges_ax[i]) for i in range(3)\n ])]\n perim = []\n perim_refined = []\n perim_segs = np.array([[0, 1], [1, 2], [2, 3], [3, 0]])\n\n perim_edges = []\n\n for i in range(3):\n perim.append(4 * [None])\n perim_refined.append(4 * [None])\n # Rotate coordinate system by cyclic permutation of axes\n points_pieces[i][:,(0,1,2)] = points_pieces[i][:,(i,(i+1)%3,(i+2)%3)]\n\n corners = np.array([\n [0., 0., 0.], [0., L[1], 0.], [0., L[1], L[2]], [0., 0., L[2]]\n ])\n\n points_on_perim = 4 * [None]\n points_on_perim[0] = np.isclose(points_pieces[i][:, 2], 0.)\n points_on_perim[1] = np.isclose(points_pieces[i][:, 1], L[1])\n points_on_perim[2] = np.isclose(points_pieces[i][:, 2], L[2])\n points_on_perim[3] = np.isclose(points_pieces[i][:, 1], 0.)\n\n for j in range(4):\n axis = 1 + j % 2\n if PBC[axis] and j >= 2:\n continue\n perim[i][j] = np.vstack(\n (corners[perim_segs[j]], points_pieces[i][points_on_perim[j]])\n )\n if PBC[axis]:\n translate = np.array([0., 0., -L[2]]) if axis == 1\\\n else np.array([0., L[1], 0.])\n translated_points = points_pieces[i][points_on_perim[j + 2]]\\\n + translate\n perim[i][j] = np.vstack((perim[i][j], translated_points))\n perim[i][j] = perim[i][j][perim[i][j][:, axis].argsort()]\n perim_refined[i][j] = refined_perimeter(perim[i][j], axis, ds)\n if PBC[axis]:\n perim_refined[i][j+2] = perim_refined[i][j] - translate\n\n # Add the corner points so that duplicate coners can be filtered out\n # in build_perim_edge_list\n points_pieces[i] = np.append(points_pieces[i], corners, axis=0)\n\n perim_edges.append(\n build_perim_edge_list(points_pieces[i], perim_refined[i])\n )\n\n # Put coordinates back in proper order for this axis\n points_pieces[i][:,(i,(i+1)%3,(i+2)%3)] = points_pieces[i][:,(0,1,2)]\n\n L = L[np.newaxis, (1, 2, 0)][0]\n\n # TODO : refactor so boundary PSLG is built during above loop avoiding subsequent loops\n\n # add holes\n pslg_holes = add_holes(sphere_pieces)\n\n # Add points which lie on the boundaries from hole particles\n added_points = [\n add_point_plane_intersections(sphere_pieces_holes, i, domain)\n for i in range(3)\n ]\n\n # Group together segment and perimeter points and edges for each axis\n boundary_pslgs = []\n for i in range(3):\n pslg_points = np.vstack((\n points_pieces[i][:,((i+1)%3,(i+2)%3)],\n np.vstack(perim_refined[i])[:,(1,2)],\n added_points[i][:,((i+1)%3,(i+2)%3)]\n ))\n pslg_edges = np.vstack((edges_ax[i], np.vstack(perim_edges[i])))\n boundary_pslgs.append(PSLG(pslg_points, pslg_edges, pslg_holes[i]))\n return boundary_pslgs", "def regular_representation(self, side=\"left\"):\n S = self.basis().keys()\n return S.regular_representation(self.base_ring(), side)", "def extract_operator_products(e, independent=False):\n ops = []\n\n if isinstance(e, Operator):\n ops.append(e)\n\n elif isinstance(e, Add):\n for arg in e.args:\n ops += extract_operator_products(arg, independent=independent)\n\n elif isinstance(e, Mul):\n c, o = split_coeff_operator(e)\n if o != 1:\n ops.append(o)\n else:\n if debug:\n print(\"Unrecongized type: %s: %s\" % (type(e), str(e)))\n\n no_ops = []\n for op in ops:\n no_op = normal_ordered_form(op.expand(), independent=independent)\n if isinstance(no_op, (Mul, Operator, Pow)):\n no_ops.append(no_op)\n elif isinstance(no_op, Add):\n for sub_no_op in extract_operator_products(no_op, independent=independent):\n no_ops.append(sub_no_op)\n else:\n raise ValueError(\"Unsupported type in loop over ops: %s: %s\" %\n (type(no_op), no_op))\n\n return list(set(no_ops))", "def test_bksf_edge_op_aij(self):\n edge_matrix = np.triu(np.ones((4, 4)))\n edge_list = np.array(np.nonzero(np.triu(edge_matrix) - np.diag(np.diag(edge_matrix))))\n qterm_a01 = _edge_operator_aij(edge_list, 0, 1)\n qterm_a02 = _edge_operator_aij(edge_list, 0, 2)\n qterm_a03 = _edge_operator_aij(edge_list, 0, 3)\n qterm_a12 = _edge_operator_aij(edge_list, 1, 2)\n qterm_a13 = _edge_operator_aij(edge_list, 1, 3)\n qterm_a23 = _edge_operator_aij(edge_list, 2, 3)\n\n ref_qterm_a01 = SparsePauliOp(\"IIIIIX\")\n ref_qterm_a02 = SparsePauliOp(\"IIIIXZ\")\n ref_qterm_a03 = SparsePauliOp(\"IIIXZZ\")\n ref_qterm_a12 = SparsePauliOp(\"IIXIZZ\")\n ref_qterm_a13 = SparsePauliOp(\"IXZZIZ\")\n ref_qterm_a23 = SparsePauliOp(\"XZZZZI\")\n\n with self.subTest(\"Test edge operator a01\"):\n self.assertEqual(qterm_a01, ref_qterm_a01)\n with self.subTest(\"Test edge operator a02\"):\n self.assertEqual(qterm_a02, ref_qterm_a02)\n with self.subTest(\"Test edge operator a03\"):\n self.assertEqual(qterm_a03, ref_qterm_a03)\n with self.subTest(\"Test edge operator a12\"):\n self.assertEqual(qterm_a12, ref_qterm_a12)\n with self.subTest(\"Test edge operator a13\"):\n self.assertEqual(qterm_a13, ref_qterm_a13)\n with self.subTest(\"Test edge operator a23\"):\n self.assertEqual(qterm_a23, ref_qterm_a23)", "def _gen_qiskit_gateset(q_circ):\n return {\n 'H': q_circ.h,\n 'X': q_circ.x,\n 'Y': q_circ.y,\n 'Z': q_circ.z,\n 'SWAP': q_circ.swap,\n 'I': q_circ.iden,\n 'S': q_circ.s,\n 'D-S': q_circ.sdg,\n 'T': q_circ.t,\n 'D-T': q_circ.tdg,\n 'RX': q_circ.rx,\n 'RY': q_circ.ry,\n 'RZ': q_circ.rz,\n 'C-H': q_circ.ch,\n 'CNOT': q_circ.cx,\n 'C-Y': q_circ.cy,\n 'CSIGN': q_circ.cz,\n 'C-RZ': q_circ.crz,\n 'CCNOT': q_circ.ccx,\n 'C-SWAP': q_circ.cswap,\n 'U': q_circ.u3,\n 'U3': q_circ.u3,\n 'U2': q_circ.u2,\n 'U1': q_circ.u1,\n 'U0': q_circ.iden,\n 'PH': q_circ.rz,\n 'RXX': q_circ.rxx,\n 'RZZ': q_circ.rzz,\n 'R': q_circ.r,\n 'MS': q_circ.ms\n }", "def _create_main_shape(self):\n\n a, b = gc( self.size/2,\n self._ZERO_DEGREES - self.angle,\n self._180_DEGREES + self.angle)\n self.wafer_points = zip(a,b)\n self.wafer_polygon = gdspy.Polygon(self.wafer_points, self.WAFER_LAYER)\n self.cell.add(self.wafer_polygon)", "def glueEmH( Ja, Jf, truncNum = scipy.inf ):\n w, v = truncBasisH( Ja, truncNum )\n sPlus, sMinus, sZ = sPlusAndMinusAndZ( v )\n \n H1 = scipy.zeros( ( len(w)**4, len(w)**4 ) )\n \n for n in range( len(w)**4 ):\n # Diagonal previous generation contributions\n o = oct(n)[-4:].zfill(4)\n o = [int(char) for char in o]\n o_A, o_B, o_C, o_D = o\n \n H1[n, n] += scipy.sum( [ w[ i ] for i in o ] )\n \n # Edge terms\n for np in range( n, len(w)**4 ):\n op = oct(np)[-4:].zfill(4)\n op = [int(char) for char in op]\n op_A, op_B, op_C, op_D = op\n \n x = 0.\n if ( (o_B == op_B) and (o_C == op_C) ):\n x += -Jf * ( .5 * ( sPlus[0][o_A, op_A] * sMinus[0][o_D, op_D] + sMinus[0][o_A, op_A] * sPlus[0][o_D,op_D] ) + sZ[0][o_A, op_A] * sZ[0][o_D, op_D] )\n if ( (o_C == op_C) and (o_A == op_A) ):\n x += -Jf * ( .5 * ( sPlus[1][o_B, op_B] * sMinus[1][o_D, op_D] + sMinus[1][o_B, op_B] * sPlus[1][o_D,op_D] ) + sZ[1][o_B, op_B] * sZ[1][o_D, op_D] )\n if ( (o_A == op_A) and (o_B == op_B) ):\n x += -Jf * ( .5 * ( sPlus[2][o_C, op_C] * sMinus[2][o_D, op_D] + sMinus[2][o_C, op_C] * sPlus[1][o_D,op_D] ) + sZ[1][o_C, op_C] * sZ[2][o_D, op_D] )\n \n H1[n, np] = x\n H1[np, n] = x\n \n return H1", "def build_from_problem(self, \n entity: Entity,\n graph: DeductionGraph):\n lines = [e for e in entity.children if type(e) == Line]\n cols = []\n for c in graph.conditions:\n if isinstance(c, RelationshipBased) and \\\n isinstance(c.relationship, Collineation):\n cols.append(c.relationship)\n \n self.collineation = []\n for col in cols:\n self.collineation.append(''.join([p.id for p in col.points]))\n\n for line in lines:\n end1, end2 = line.end1, line.end2\n duplicate = False\n for col in self.collineation:\n if col.find(end1.id) != -1 and col.find(end2.id) != -1:\n duplicate = True\n break\n if not duplicate:\n self.collineation.append(end1.id + end2.id)\n\n # Key of _angle_alis dict is standard name, value is angle entity.\n self._angle_alis = {}\n for e in entity.children:\n if type(e) == Angle:\n std_name = self.angle_alis(e)\n self._angle_alis[std_name] = e", "def shape_element(element, node_attr_fields=NODE_FIELDS, way_attr_fields=WAY_FIELDS,\n problem_chars=PROBLEMCHARS, default_tag_type='regular'):\n\n node_attribs = {}\n way_attribs = {}\n way_nodes = []\n tags = [] # Handle secondary tags the same way for both node and way elements\n\n# Node ---------------------------\n if element.tag == 'node':\n for attribute in node_attr_fields: #pop kv's for nodes\n node_attribs[attribute] = element.attrib[attribute]\n for secondary_elem in element.findall('tag'): #pop secondary node tags\n # secondary_elem = clean_data(secondary_elem)\n if secondary_elem.attrib['k'] == \"addr:street\" :\n name = secondary_elem.attrib['v']\n m = street_type_re.search(name) \n if m: \n street_type = m.group() \n if street_type in mapping_street: \n name = re.sub(street_type_re, mapping_street[street_type], name)\n secondary_elem.attrib['v'] = name\n elif secondary_elem.attrib['k'] == \"cuisine\" :\n name = secondary_elem.attrib['v']\n w = cuisine_type_re.search(name) \n if w: \n cuisine_type = w.group() \n if cuisine_type in mapping: \n name = re.sub(cuisine_type_re, mapping[cuisine_type], name) \n secondary_elem.attrib['v'] = name\n tag_append = find_tags(secondary_elem, element.attrib['id'])\n if tag_append: #if tag_append is none it will skip the line (if prob char occurs)\n tags.append(tag_append)\n return {'node': node_attribs, 'node_tags': tags}\n #Way---------------------------\n elif element.tag == 'way':\n for attribute in way_attr_fields:\n way_attribs[attribute] = element.attrib[attribute]\n for secondary_elem in element.findall('tag'):\n # secondary_elem = clean_data(secondary_elem)\n if secondary_elem.attrib['k'] == \"addr:street\" :\n name = secondary_elem.attrib['v']\n m = street_type_re.search(name) \n if m: \n street_type = m.group() \n if street_type in mapping_street: \n name = re.sub(street_type_re, mapping_street[street_type], name)\n secondary_elem.attrib['v'] = name\n elif secondary_elem.attrib['k'] == \"cuisine\":\n name = secondary_elem.attrib['v']\n w = cuisine_type_re.search(name) \n if w: \n cuisine_type = w.group() \n if cuisine_type in mapping: \n name = re.sub(cuisine_type_re, mapping[cuisine_type], name) \n secondary_elem.attrib['v'] = name\n tag_append = find_tags(secondary_elem, element.attrib['id'])\n if tag_append:\n tags.append(tag_append)\n position = 0\n for secondary_elem in element.findall('nd'):\n way_nodes_append = {'id' : element.attrib['id'],\n 'node_id' : secondary_elem.attrib['ref'],\n 'position' : position\n }\n position != 1\n way_nodes.append(way_nodes_append)\n return{'way': way_attribs, 'way_nodes': way_nodes, 'way_tags' : tags}", "def createTopology(self):\n\n # find DAG root\n dagRoot = None\n for mote in self.motes:\n if mote.id == 0:\n mote.role_setDagRoot()\n dagRoot = mote\n assert dagRoot\n\n if self.settings.mobilityModel == 'RPGM':\n # put DAG root at center of area\n dagRoot.setLocation(x=SimEngine.SimEngine().targets[0][0],\n y=SimEngine.SimEngine().targets[0][1])\n else:\n # put DAG root at center of area\n dagRoot.setLocation(x=self.squareSide/2,\n y=self.squareSide/2)\n\n # reposition each mote until it is connected\n connectedMotes = [dagRoot]\n motes_shuffled = copy.copy(self.motes)\n random.shuffle(motes_shuffled) # shuffle them around\n\n # for mote in self.motes:\n for mote in motes_shuffled:\n stableNeighbors = []\n if mote in connectedMotes:\n continue\n\n connected = False\n while not connected:\n # pick a random location\n # mote.setLocation(x=self.squareSide*random.random(),\n # y=self.squareSide*random.random())\n #\n # mote.setLocation(\n # x=self.settings.squareSide * random.random(),\n # y=self.settings.squareSide * random.random()\n # )\n\n newX = None\n newY = None\n # if no topology is not given, build the topology yourself\n if SimEngine.SimEngine().ilp_topology is None:\n newX = self.settings.squareSide * random.random()\n newY = self.settings.squareSide * random.random()\n else:\n # if no topology is given, use that topology\n newX = SimEngine.SimEngine().ilp_topology[str(mote.id)]['x']\n newY = SimEngine.SimEngine().ilp_topology[str(mote.id)]['y']\n\n mote.setLocation(\n x=newX,\n y=newY\n )\n\n numStableNeighbors = 0\n stableNeighbors = []\n\n # tryAgain = False\n # for cm in connectedMotes:\n # rssi = self._computeRSSI(mote, cm)\n # if rssi > -110:\n # tryAgain = True\n\n # if not tryAgain:\n # count number of neighbors with sufficient RSSI\n for cm in connectedMotes:\n\n rssi = self._computeRSSI(mote, cm)\n mote.setRSSI(cm, rssi)\n cm.setRSSI(mote, rssi)\n\n # save the intial RSSI values for future use in the mobility models\n mote.initialRSSI[cm] = rssi\n cm.initialRSSI[mote] = rssi\n\n if self.settings.individualModulations == 1:\n if self.rssiToPdr(rssi, modulation=Modulation.Modulation().minimalCellModulation[SimSettings.SimSettings().modulationConfig]) > self.settings.stableNeighborPDR:\n # if rssi > Modulation.Modulation().modulationStableRSSI[Modulation.Modulation().minimalCellModulation[SimSettings.SimSettings().modulationConfig]]:\n # print rssi\n numStableNeighbors += 1\n stableNeighbors.append(cm.id)\n else:\n if rssi > self.STABLE_RSSI:\n # print rssi\n numStableNeighbors += 1\n\n # make sure it is connected to at least STABLE_NEIGHBORS motes\n # or connected to all the currently deployed motes when the number of deployed motes\n # are smaller than STABLE_NEIGHBORS\n if numStableNeighbors >= self.stable_neighbors or numStableNeighbors == len(connectedMotes):\n print 'For mote {0}, stable neighbors {1}'.format(mote.id, stableNeighbors)\n connected = True\n\n connectedMotes += [mote]\n\n # for each mote, compute PDR to each neighbors\n for mote in self.motes:\n for m in self.motes:\n if mote == m:\n continue\n\n # set the distance to all other motes\n distance = math.sqrt((m.x - mote.x) ** 2 + (m.y - mote.y) ** 2)\n m.set_distance(mote, distance)\n mote.set_distance(m, distance)\n # print 'mote %d to mote %d: %.4f' % (m.id, mote.id, distance)\n if self.settings.individualModulations == 1:\n rssi_value = mote.getRSSI(m)\n # for modulationTmp in Modulation.Modulation().modulations:\n # if self.settings.ilpfile is not None:\n # ## I am not going to set this as this should be set by the ILP\n # pass\n # else:\n # # if the rssi value is higher than the minimal signal value required for this neighbor, take that modulation\n # # and compute the PDR using that modulation\n # pass\n # # if rssi_value > Modulation.Modulation().modulationStableRSSI[modulationTmp]:\n # # pdr = self._computePDR(mote, m, modulation=modulationTmp)\n # # mote.setPDR(m, pdr)\n # # m.setPDR(mote, pdr)\n # # mote.setModulation(m, modulationTmp)\n # # m.setModulation(mote, modulationTmp)\n else:\n if mote.getRSSI(m) > mote.minRssi:\n pdr = self._computePDR(mote, m)\n mote.setPDR(m, pdr)\n m.setPDR(mote, pdr)", "def generate_irreducible_Wedge(self,nx_gridsize,fx,fy):\n\n integers = N.arange(nx_gridsize+1)\n\n list_triangles = []\n\n ki = []\n kj = []\n\n triangles_indices = []\n\n # create point array\n for j in integers:\n for i in integers[:j+1]:\n ki.append(i)\n kj.append(j)\n\n list_k = N.array([N.array(ki)*fx/nx_gridsize, N.array(kj)*fy/nx_gridsize]).transpose()\n\n # create connections\n for i in integers[:-1]:\n\n j = i\n I1 = get_index(i,j+1)\n I2 = get_index(i,j)\n I3 = get_index(i+1,j+1)\n triangles_indices.append([I1,I2,I3])\n\n for j in integers[i+1:-1]:\n\n I1 = get_index(i,j+1)\n I2 = get_index(i,j)\n I3 = get_index(i+1,j+1)\n triangles_indices.append([I1,I2,I3])\n\n I1 = get_index(i+1,j)\n I2 = get_index(i+1,j+1)\n I3 = get_index(i,j)\n\n triangles_indices.append([I1,I2,I3])\n\n\n\n triangles_indices = N.array(triangles_indices )\n irreducible_wedge = Wedge(list_k=list_k,triangles_indices=triangles_indices)\n\n return irreducible_wedge", "def buildSystem(self, shape ):\n\t\tfor s in self.scatters:\n\t\t\tfor i,n in enumerate( s._nodes ):\n\t\t\t\tsoftMod = sf.SoftModCluster( 'lip_' + '_%i'%i + '_SFM', shape )\n\t\t\t\tsoftMod.create( n.a.t.v[0] )", "def _eval_legpoly(self, t_0, t_s, p_0, p_s, geometry=None):\n\n assert geometry is not None, \"Geometry needs to be specified!\"\n\n theta_0 = sp.Symbol(\"theta_0\")\n theta_s = sp.Symbol(\"theta_s\")\n theta_ex = sp.Symbol(\"theta_ex\")\n phi_0 = sp.Symbol(\"phi_0\")\n phi_s = sp.Symbol(\"phi_s\")\n phi_ex = sp.Symbol(\"phi_ex\")\n\n res = self.legexpansion(t_0, t_s, p_0, p_s, geometry).xreplace(\n {\n theta_0: t_0,\n theta_s: t_s,\n phi_0: p_0,\n phi_s: p_s,\n theta_ex: t_s,\n phi_ex: p_s,\n }\n )\n return res.evalf()", "def find_symmetry(self):\n from spglib import get_spacegroup\n cell = ( self.lattice, self.fractional_coordinates, self.atomic_nos )\n self.spacegroup = get_spacegroup(cell, symmprec=1e-5)\n print(\"Symmetry space group is\", self.spacegroup)", "def test_circuit_decompose(self):\n dec = TwoQubitDecomposeUpToDiagonal()\n u4 = scipy.stats.unitary_group.rvs(4, random_state=47)\n dmat, circ2cx = dec(u4)\n\n qc1 = QuantumCircuit(2)\n qc1.append(UnitaryGate(u4), range(2))\n\n qc2 = QuantumCircuit(2)\n qc2.compose(circ2cx, range(2), front=False, inplace=True)\n qc2.append(UnitaryGate(dmat), range(2))\n\n self.assertEqual(Operator(u4), Operator(qc1))\n self.assertEqual(Operator(qc1), Operator(qc2))", "def createTopology(self):\n\n # find DAG root\n dagRoot = None\n for mote in self.motes:\n if mote.id == 0:\n mote.role_setDagRoot()\n dagRoot = mote\n assert dagRoot\n\n if self.settings.mobilityModel == 'RPGM':\n # put DAG root at center of area\n dagRoot.setLocation(x=SimEngine.SimEngine().targets[0][0],\n y=SimEngine.SimEngine().targets[0][1])\n else:\n # put DAG root at center of area\n dagRoot.setLocation(x=self.squareSide/2,\n y=self.squareSide/2)\n\n # reposition each mote until it is connected\n connectedMotes = [dagRoot]\n for mote in self.motes:\n if mote in connectedMotes:\n continue\n\n connected = False\n while not connected:\n # pick a random location\n mote.setLocation(x=self.squareSide*random.random(),\n y=self.squareSide*random.random())\n\n # if mote.id == 1:\n # mote.setLocation(\n # x=self.squareSide / 2.5 + 0.02,\n # y=self.squareSide / 2.5 + 0.3\n # )\n # elif mote.id == 2:\n # mote.setLocation(\n # x=self.squareSide / 2.5 + 0.3,\n # y=self.squareSide / 2.5 + 0.3\n # )\n #\n # elif mote.id == 3:\n # mote.setLocation(\n # x=self.squareSide / 2.5 + 0.1,\n # y=self.squareSide / 2.5 + 0.4\n # )\n\n # elif mote.id == 4:\n # mote.setLocation(\n # x=self.squareSide / 2.5 + 0.2,\n # y=self.squareSide / 2.5 + 0.65\n # )\n # else:\n\n mote.setLocation(\n x=self.settings.squareSide * random.random(),\n y=self.settings.squareSide * random.random()\n )\n\n numStableNeighbors = 0\n\n # count number of neighbors with sufficient RSSI\n for cm in connectedMotes:\n\n rssi = self._computeRSSI(mote, cm)\n mote.setRSSI(cm, rssi)\n cm.setRSSI(mote, rssi)\n\n # save the intial RSSI values for future use in the mobility models\n mote.initialRSSI[cm] = rssi\n cm.initialRSSI[mote] = rssi\n\n if rssi > self.STABLE_RSSI:\n numStableNeighbors += 1\n\n # make sure it is connected to at least stable_neighbors motes\n # or connected to all the currently deployed motes when the\n # number of deployed motes are smaller than stable_neighbors\n if (numStableNeighbors >= self.stable_neighbors or\n numStableNeighbors == len(connectedMotes)):\n print 'moteid %d, mote x %.4f, mote y %.4f: valid %s' % (mote.id, mote.x, mote.y , SimEngine.SimEngine().checkValidPosition(mote.x, mote.y, countSquare=True, placement=True))\n if self.settings.mobilityModel == 'RPGM' and SimEngine.SimEngine().checkValidPosition(mote.x, mote.y, countSquare=True, placement=True):\n connected = True\n elif self.settings.mobilityModel != 'RPGM':\n connected = True\n\n connectedMotes += [mote]\n\n # self.motes[3].setRSSI(self.motes[0], -96)\n # self.motes[0].setRSSI(self.motes[3], -96)\n\n # for each mote, compute PDR to each neighbors\n for mote in self.motes:\n for m in self.motes:\n if mote == m:\n continue\n if mote.getRSSI(m) > mote.minRssi:\n pdr = self._computePDR(mote, m)\n mote.setPDR(m, pdr)\n m.setPDR(mote, pdr)", "def _calc_interaction_expansion(self):\n # preevaluate expansions for volume and surface phase functions\n # this returns symbolic code to be then further used\n\n volexp = self.V.legexpansion(self.t_0, self.t_ex,\n self.p_0, self.p_ex,\n self.geometry).doit()\n\n brdfexp = self.SRF.legexpansion(self.t_0, self.t_ex,\n self.p_0, self.p_ex,\n self.geometry).doit()\n\n # preparation of the product of p*BRDF for coefficient retrieval\n # this is the eq.23. and would need to be integrated from 0 to 2pi\n fPoly = expand(2 * sp.pi * volexp * brdfexp)\n\n # do integration of eq. 23\n expr = self._integrate_0_2pi_phis(fPoly)\n\n # now we do still simplify the expression to be able to express\n # things as power series of cos(theta_s)\n theta_s = sp.Symbol('theta_s')\n replacements = [(sp.sin(theta_s) ** i,\n expand((1. - sp.cos(theta_s) ** 2)\n ** sp.Rational(i, 2)))\n for i in range(1, self.SRF.ncoefs + self.V.ncoefs - 1)\n if i % 2 == 0]\n\n res = expand(expr.xreplace(dict(replacements)))\n\n return res", "def test_hexamethylcyclohexane(self):\n def draw(image: ShapeImage):\n image.add_regular_hexagon(\n 100, start_coord=(400, 400)\n )\n image.add_line((487, 350), (487, 250))\n image.add_line((574, 400), (661, 350))\n image.add_line((574, 500), (661, 550))\n image.add_line((487, 550), (487, 650))\n image.add_line((400, 500), (313, 550))\n image.add_line((400, 400), (313, 350))\n\n self._test_shape(\n image_size=(1000, 1000),\n expected_corners=np.array([\n [[400, 400]],\n [[487, 350]],\n [[574, 400]],\n [[574, 500]],\n [[487, 550]],\n [[400, 500]],\n # Methyl groups\n [[487, 250]],\n [[661, 350]],\n [[661, 550]],\n [[487, 650]],\n [[313, 550]],\n [[313, 350]]\n ]),\n drawer=draw,\n expected_edges=np.array([\n [[400, 400, 487, 350]],\n [[487, 350, 574, 400]],\n [[574, 400, 574, 500]],\n [[574, 500, 487, 550]],\n [[487, 550, 400, 500]],\n [[400, 500, 400, 400]],\n # To methyl groups\n [[487, 350, 487, 250]],\n [[574, 400, 661, 350]],\n [[574, 500, 661, 550]],\n [[487, 550, 487, 650]],\n [[400, 500, 313, 550]],\n [[400, 400, 313, 350]]\n ])\n )", "def may_steenrod_structure(arity, degree, torsion=None, convention=None):\n\n def i(surj, iterate=1):\n \"\"\"Inclusion of Surj(r) into Surj(r+1)\n\n Defined by appending 1 at the start of basis elements and\n raising the value of all other entries by 1.\"\"\"\n\n if iterate == 1:\n answer = surj.zero()\n for k, v in surj.items():\n answer += answer.create(\n {(1,) + tuple(j + 1 for j in k): v})\n return answer\n if iterate > 1:\n return i(i(surj, iterate=iterate - 1))\n\n def p(surj, iterate=1):\n \"\"\"Projection of Surj(r) to Surj(r-1)\n\n Defined by removing 1 from a basis element with only one\n occurrences of value 1 and subtracting 1 from all other entries.\n\n \"\"\"\n if iterate == 1:\n answer = surj.zero()\n for k, v in surj.items():\n if k.count(1) == 1:\n idx = k.index(1)\n new_k = (tuple(j - 1 for j in k[:idx]) +\n tuple(j - 1 for j in k[idx + 1:]))\n answer += answer.create({new_k: v})\n return answer\n if iterate > 1:\n return p(p(surj, iterate=iterate - 1))\n\n def s(surj):\n \"\"\"Chain homotopy from the identity to the composition pi.\n Explicitly, id - ip = ds + sd.\"\"\"\n answer = surj.zero()\n for k, v in surj.items():\n answer += answer.create({(1,) + tuple(j for j in k): v})\n return answer\n\n def h(surj):\n \"\"\"Chain homotopy from the identity to i...i p..p.\n In Surj(r), realizing its contractibility to Surj(1).\"\"\"\n answer = s(surj)\n for r in range(1, arity - 1):\n answer += i(s(p(surj, r)), r)\n return answer\n\n operators = {\n 0: SymmetricRing.norm_element(arity),\n 1: SymmetricRing.transposition_element(arity)\n }\n\n def psi(arity, degree, convention=convention):\n \"\"\"Recursive definition of steenrod product over the integers.\"\"\"\n if degree == 0:\n return SurjectionElement({tuple(range(1, arity + 1)): 1},\n convention=convention)\n else:\n previous = psi(arity, degree - 1, convention=convention)\n acted_on = operators[degree % 2] * previous\n answer = h(acted_on)\n return answer\n\n if convention is None:\n convention = SurjectionElement.default_convention\n integral_answer = psi(arity, degree, convention=convention)\n if torsion:\n integral_answer.set_torsion(torsion)\n return integral_answer", "def make_unary(sv, piece, o, op):\r\n there=len(op) # start position of last part\r\n # if the object is subscripted / has args\r\n if piece[there:].startswith(Special+Bloc): \r\n here=piece[there+1:].find(Special) # find ending delimiter\r\n key=piece[there+1:there+here+1] # extract key for the block\r\n if piece[there+here+2:].strip(Space): # something after the block (some other subscript)\r\n first=(o, tree_build(sv, sv.Blocks[key]), None) # Build block RECURSIVE \r\n last=tree_build(sv, piece[there+here+2:]) # build other subscript RECURSIVE\r\n res=(Special, first, last) # code for a subscripted object\r\n else:\r\n res=(o, tree_build(sv, sv.Blocks[key]), None) # Build block RECURSIVE\r\n return res\r\n # the object is not subscripted but may have parts separated by space\r\n if Space in piece.strip(Space): return (o, tree_build(sv, piece[there:]), None) # Build RECURSIVE\r\n return make_leaf(sv, piece.strip(Space))" ]
[ "0.5479113", "0.5462708", "0.54347366", "0.53193223", "0.53082114", "0.51953447", "0.5145413", "0.51347935", "0.508981", "0.5083603", "0.50094604", "0.5006806", "0.49864545", "0.49627787", "0.4931252", "0.49236828", "0.4907775", "0.49058902", "0.4894753", "0.48798862", "0.48758644", "0.48701966", "0.4864555", "0.48643816", "0.4859853", "0.4847534", "0.48431438", "0.4838394", "0.48378772", "0.48366374", "0.48333037", "0.48181078", "0.4816929", "0.4812871", "0.4810238", "0.48023096", "0.47976473", "0.47958526", "0.47934395", "0.47804445", "0.4764539", "0.475971", "0.47478083", "0.4743567", "0.47278172", "0.4723421", "0.47221732", "0.4717698", "0.4717564", "0.4710208", "0.47081295", "0.46992993", "0.46946153", "0.46803376", "0.46623993", "0.46466613", "0.46466413", "0.46441787", "0.4642763", "0.46370018", "0.46368828", "0.46364743", "0.46256375", "0.46229857", "0.46227276", "0.46182585", "0.461313", "0.46117258", "0.46014938", "0.45992583", "0.4598409", "0.4594044", "0.4589882", "0.45892328", "0.45891383", "0.45822665", "0.45822665", "0.45771864", "0.4567327", "0.45659712", "0.45542508", "0.455401", "0.45521292", "0.4548231", "0.4541523", "0.45373842", "0.45358628", "0.4535111", "0.45258683", "0.4523517", "0.4521806", "0.45217675", "0.4520115", "0.45143265", "0.45118418", "0.4503634", "0.45022783", "0.44965672", "0.44944346", "0.44931832", "0.44885775" ]
0.0
-1
Place the vertex v at position, and apply transformation T. Return the grid points that are occupied by the piece.
def place( self, position, v, T): geo = (self.geo - self.geo[v]).dot( T) return position + geo
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def translate(self, v):\n return Position.fromnp(translate(self.tonp(), v))", "def project_vector(u, v):\n u_np = np.array([u.get_x(), u.get_y()])\n v_np = np.array([v.get_x(), v.get_y()])\n proj = (np.dot(u_np, v_np) / np.dot(v_np, v_np)) * v_np\n return Point(proj[0], proj[1])", "def translate(self, tr):\n c = self.c -self.a*tr[0] -self.b*tr[1]\n self.c =c\n self.pointN = self.pointN+tr\n self.point1 = self.point1+tr\n self.points +=tr", "def translate(self, tr):\n self.points = self.points + tr", "def point_at(self, u, v, world=True):\n u = u * pi\n v = v * PI2\n x = self.radius * cos(u) * sin(v)\n y = self.radius * sin(u) * sin(v)\n z = self.radius * cos(v)\n point = Point(x, y, z)\n if world:\n point.transform(self.transformation)\n return point", "def proyZm1(u, v, t1):\n den = u ** 2 + v ** 2 + 4\n x = u - t1 * (u - 4 * u / den)\n y = v - t1 * (v - 4 * v / den)\n z = -1 - t1 * (-2 + 8 / den)\n return (x, y, z)", "def move_vertex(self, p, v, x, y, z):\n self._move_vertex(p, v, x, y, z)", "def geochart(u, v):\n cv = torch.cos(v)\n cu = torch.cos(u)\n sv = torch.sin(v)\n su = torch.sin(u)\n return torch.stack((cv*su, sv*su, cu)).T", "def changePoint(self, P, V):\n if (not (isinstance(P, list) and isinstance(V, list))) or \\\n not (len(P) == 2 and len(V) == 4 and V[0] != 0):\n raise ValueError(\"(P,V) must be ([px, py], [u, r, s, t]) with u != 0.\")\n\n if self.ch == 0:\n Q0 = rational.IntegerIfIntOrLong(P[0]-V[1])/rational.IntegerIfIntOrLong(V[0]**2)\n Q1 = rational.IntegerIfIntOrLong(P[1]-V[2]*(P[0]-V[1])-V[3])/rational.IntegerIfIntOrLong(V[0]**3)\n else:\n v = self.basefield.createElement(V[0]).inverse()\n Q0 = ((P[0]-V[1])*v**2)\n Q1 = ((P[1]-V[2]*(P[0]-V[1])-V[3])*v**3)\n Q = [Q0, Q1]\n return Q", "def transform_ip(self, H): # or update()\n self.vh = H @ self.vertices.T\n self.vh = self.vh.T \n self.va = self.vh[:,:2]", "def vector_trans(self, v, T, V0):\n v = np.array(v)\n newv = np.add(v[0:2].dot(T), V0)\n self.log.debug(\"Transformation of vector {}, with transformation matrix {} nad V0 {}, to: {}\".format(v, T, V0, newv))\n return newv", "def rigid_transform_3d(v, mapping, alpha = 1):\r\n p_wgt = vec3(0, 0, 0)\r\n q_wgt = vec3(0, 0, 0)\r\n w = len(mapping)*[None]\r\n w_sum = 0\r\n for i in range(len(mapping)):\r\n mp = mapping[i]\r\n x = mp[0].x - v.x\r\n y = mp[0].y - v.y\r\n z = mp[0].z - v.z\r\n if (x == 0 and y == 0 and z == 0): return mp[1]\r\n w[i] = 1/((x*x + y*y + z*z) ** alpha)\r\n p_wgt += mp[0]*w[i]\r\n q_wgt += mp[1]*w[i]\r\n w_sum += w[i]\r\n p_wgt /= w_sum\r\n q_wgt /= w_sum\r\n A = mat3(0)\r\n for i in range(len(mapping)):\r\n mp = mapping[i]\r\n p_adj = mp[0] - p_wgt\r\n q_adj = mp[1] - q_wgt\r\n A += w[i]*p_adj.transpose_multiply(q_adj)\r\n A_arr = np.array(A.matrix).reshape(3, 3)\r\n U, S, V = np.linalg.svd(A_arr)\r\n M_arr = np.matmul(np.transpose(V), np.transpose(U))\r\n M = mat3(M_arr.ravel().tolist())\r\n v_out = M*(v - p_wgt) + q_wgt\r\n return v_out", "def translate(self, vect):\n self.apply(lambda c: (vector(c) + vect).coords())", "def position(self, t):\n return vector_add(self.origin, self.direction.scale(t))", "def rigid_transform_2d(v, mapping, alpha = 1):\r\n p_wgt = vec2(0, 0)\r\n q_wgt = vec2(0, 0)\r\n w = len(mapping)*[None]\r\n w_sum = 0\r\n for i in range(len(mapping)):\r\n mp = mapping[i]\r\n x = mp[0].x - v.x\r\n y = mp[0].y - v.y\r\n if (x == 0 and y == 0): return mp[1]\r\n w[i] = 1/((x*x + y*y) ** alpha)\r\n p_wgt += mp[0]*w[i]\r\n q_wgt += mp[1]*w[i]\r\n w_sum += w[i]\r\n p_wgt /= w_sum\r\n q_wgt /= w_sum\r\n A_fac = mat2([v.x - p_wgt.x, v.y - p_wgt.y, v.y - p_wgt.y, p_wgt.x - v.x])\r\n v_out = vec2(0, 0)\r\n for i in range(len(mapping)):\r\n mp = mapping[i]\r\n p_adj = mp[0] - p_wgt\r\n q_adj = mp[1] - q_wgt\r\n A = mat2([p_adj.x, p_adj.y, p_adj.y, -p_adj.x])*A_fac*w[i]\r\n A = A.transpose()\r\n v_out += A*q_adj\r\n r = math.sqrt(v_out.dot(v_out))\r\n v_out /= r\r\n v_sub = v - p_wgt\r\n r = math.sqrt(v_sub.dot(v_sub))\r\n v_out *= r\r\n v_out += q_wgt\r\n return v_out", "def eval_2pts(self, vector, t):\n if t < 0 or t > 1:\n raise Exception(\"Cannot Eval \", e, \" with t=\", t, \": t Should Satisfy 0<=t<=1.\")\n else:\n return Vector((1-t)*self.x + t*vector.x, (1-t)*self.y + t*vector.y, (1-t)*self.z + t*vector.z)", "def vector_proj(v, w):\n w_hat = vector_hat(w)\n return vector_dot(v, w_hat) * w_hat", "def project(self, win_width, win_height, fov, viewer_distance):\r\n factor = fov / (viewer_distance + self.z)\r\n x = self.x * factor + win_width / 2\r\n y = -self.y * factor + win_height / 2\r\n return Point3D(x, y, 1)", "def locate_on_surface(sx, sy, sz, t, x, y, zz, v=1500):\n # Check sizes\n assert (len(sx) == len(sy)) and (len(sx) == len(sz))\\\n and (len(sx) == len(t)),\\\n 'Source position and time arrays must all be of the same size.'\n assert np.shape(zz) == (len(x), len(y)),\\\n 'shape(zz) must equal (len(x), len(y)).'\n # Calculate RMS for each grid node\n rms = [] \n pos = []\n for ix, _x in enumerate(x):\n for iy, _y in enumerate(y):\n _t = slant_time(sx, sy, sz, _x, _y, zz[ix, iy], v=v)\n rms.append(np.sqrt(np.sum(np.power(_t - t, 2))))\n pos.append([_x, _y, zz[ix, iy]])\n i = np.argmin(rms)\n return pos[i] + [rms[i]]", "def get_vertex(self, p, v, x, y, z):\n x.value, y.value, z.value = self._get_vertex(p, v, x.value, y.value, z.value)", "def project(self, win_width, win_height, fov, viewer_distance):\n factor = fov / (viewer_distance + self.z)\n x = self.x * factor + win_width / 2\n y = -self.y * factor + win_height / 2\n return Point3D(x, y, 1)", "def get_element_for_location(self, points):\n verts = np.zeros((points.shape[0], 4, 3))\n bc = np.zeros((points.shape[0], 4))\n tetras = np.zeros(points.shape[0], dtype=\"int64\")\n inside = np.zeros(points.shape[0], dtype=bool)\n npts = 0\n npts_step = int(1e4)\n # break into blocks of 10k points\n while npts < points.shape[0]:\n\n cell_index = np.array(\n self.aabb_grid.position_to_cell_index(points[: npts + npts_step, :])\n )\n inside = self.aabb_grid.inside(points[: npts + npts_step, :])\n global_index = (\n cell_index[:, 0]\n + self.aabb_grid.nsteps_cells[None, 0] * cell_index[:, 1]\n + self.aabb_grid.nsteps_cells[None, 0]\n * self.aabb_grid.nsteps_cells[None, 1]\n * cell_index[:, 2]\n )\n\n tetra_indices = self.aabb_table[global_index[inside], :].tocoo()\n # tetra_indices[:] = -1\n row = tetra_indices.row\n col = tetra_indices.col\n # using returned indexes calculate barycentric coords to determine which tetra the points are in\n vertices = self.nodes[self.elements[col, :4]]\n pos = points[row, :]\n vap = pos[:, :] - vertices[:, 0, :]\n vbp = pos[:, :] - vertices[:, 1, :]\n # # vcp = p - points[:, 2, :]\n # # vdp = p - points[:, 3, :]\n vab = vertices[:, 1, :] - vertices[:, 0, :]\n vac = vertices[:, 2, :] - vertices[:, 0, :]\n vad = vertices[:, 3, :] - vertices[:, 0, :]\n vbc = vertices[:, 2, :] - vertices[:, 1, :]\n vbd = vertices[:, 3, :] - vertices[:, 1, :]\n\n va = np.einsum(\"ij, ij->i\", vbp, np.cross(vbd, vbc, axisa=1, axisb=1)) / 6.0\n vb = np.einsum(\"ij, ij->i\", vap, np.cross(vac, vad, axisa=1, axisb=1)) / 6.0\n vc = np.einsum(\"ij, ij->i\", vap, np.cross(vad, vab, axisa=1, axisb=1)) / 6.0\n vd = np.einsum(\"ij, ij->i\", vap, np.cross(vab, vac, axisa=1, axisb=1)) / 6.0\n v = np.einsum(\"ij, ij->i\", vab, np.cross(vac, vad, axisa=1, axisb=1)) / 6.0\n c = np.zeros((va.shape[0], 4))\n c[:, 0] = va / v\n c[:, 1] = vb / v\n c[:, 2] = vc / v\n c[:, 3] = vd / v\n # inside = np.ones(c.shape[0],dtype=bool)\n mask = np.all(c >= 0, axis=1)\n\n verts[: npts + npts_step, :, :][row[mask], :, :] = vertices[mask, :, :]\n bc[: npts + npts_step, :][row[mask], :] = c[mask, :]\n tetras[: npts + npts_step][row[mask]] = col[mask]\n inside[: npts + npts_step][row[mask]] = True\n npts += npts_step\n return verts, bc, tetras, inside", "def project(self, win_width, win_height, fov, viewer_distance):\n\t\tfactor = fov / (viewer_distance + self.z)\n\t\tx = self.x * factor + win_width / 2\n\t\ty = -self.y * factor + win_height / 2\n\t\treturn Point3D(x, y, 1)", "def project(self, win_width, win_height, fov, viewer_distance):\n factor = fov / (viewer_distance + self.z)\n x = self.x * factor + win_width / 2\n y = -self.y * factor + win_height / 2\n return Point3D(x, y, self.z)", "def project(self, win_width, win_height, fov, viewer_distance):\n factor = fov / (viewer_distance + self.z)\n x = self.x * factor + win_width / 2\n y = -self.y * factor + win_height / 2\n return Point3D(x, y, self.z)", "def project(self, win_width, win_height, fov, viewer_distance):\n factor = fov / (viewer_distance + self.z)\n x = self.x * factor + win_width / 2\n y = -self.y * factor + win_height / 2\n return Point3D(x, y, self.z)", "def protrudes((u,v)):\r\n return ((u,v,W), (u,v,S), (u,v-1,W), (u-1,v,S))", "def transform_coordinates(self, p):\n if type(p) == PhysicalObject:\n return self.transform_coordinates(p.position)\n elif type(p) == Vector:\n return tuple(map(\n lambda x: int(x),\n (p / SCALE_FACTOR - self.pos_shift))\n )", "def position(self, u, v):\n raise NotImplementedError", "def project(v, w):\n projection_length = dot(v, w)\n return scalar_multiply(projection_length, w)", "def coordinate_in_lattice(v,n,m):\n y=int(v/n);\n x=(v%n);\n\n return (x,y)", "def project (u, v):\r\n\r\n # Construct linear system Ap = d\r\n A = sps.lil_matrix ((width*height, width*height))\r\n d = np.zeros ((width*height))\r\n\r\n for i in range (1, height-1):\r\n for j in range (1, width-1):\r\n A[index(i,j), index(i,j)] = 4\r\n A[index(i,j), index(i-1,j)] = -1\r\n A[index(i,j), index(i+1,j)] = -1\r\n A[index(i,j), index(i,j-1)] = -1\r\n A[index(i,j), index(i,j+1)] = -1\r\n \r\n d[index(i,j)] = -1/h * (u[i,j] - u[i,j-1] + v[i,j] - v[i-1,j])\r\n\r\n # Unhandled boundary cases, we assume solid walls that don't move\r\n A[index(0,0), index(0,0)] = 2\r\n A[index(0,0), index(1,0)] = -1\r\n A[index(0,0), index(0,1)] = -1\r\n d[index(0,0)] = -1/h * (u[0,0] + v[0,0])\r\n\r\n A[index(height-1,0), index(0,0)] = 2\r\n A[index(height-1,0), index(height-1,1)] = -1\r\n A[index(height-1,0), index(height-2,0)] = -1\r\n d[index(height-1,0)] = -1/h * (u[height-1,0] - v[height-2,0])\r\n\r\n A[index(0,width-1), index(0,width-1)] = 2\r\n A[index(0,width-1), index(1,width-1)] = -1\r\n A[index(0,width-1), index(0,width-2)] = -1\r\n d[index(0,width-1)] = -1/h * (-u[0,width-2] + v[0,width-1])\r\n\r\n A[index(height-1,width-1), index(height-1,width-1)] = 2\r\n A[index(height-1,width-1), index(height-2,width-1)] = -1\r\n A[index(height-1,width-1), index(height-1,width-2)] = -1\r\n d[index(height-1,width-1)] = -1/h * (-u[height-1,width-2] - v[height-2,width-1])\r\n\r\n\r\n for i in range (1, height-1):\r\n A[index(i,0), index(i,0)] = 3\r\n A[index(i,0), index(i-1,0)] = -1\r\n A[index(i,0), index(i+1,0)] = -1\r\n A[index(i,0), index(i,1)] = -1\r\n d[index(i,0)] = -1/h * (u[i,0] + v[i,0] - v[i-1,0])\r\n\r\n for i in range (1, height-1):\r\n A[index(i,width-1), index(i,width-1)] = 3\r\n A[index(i,width-1), index(i-1,width-1)] = -1\r\n A[index(i,width-1), index(i+1,width-1)] = -1\r\n A[index(i,width-1), index(i,width-2)] = -1\r\n d[index(i,width-1)] = -1/h * (- u[i,width-2] + v[i, width-1] - v[i-1,width-1])\r\n\r\n for j in range (1, width-1):\r\n A[index(0,j), index(0,j)] = 3\r\n A[index(0,j), index(1,j)] = -1\r\n A[index(0,j), index(0,j-1)] = -1\r\n A[index(0,j), index(0,j+1)] = -1\r\n d[index(0,j)] = -1/h * (u[0,j] - u[0,j-1] + v[0,j])\r\n \r\n for j in range (1, width-1):\r\n A[index(height-1,j), index(height-1,j)] = 3\r\n A[index(height-1,j), index(height-2,j)] = -1\r\n A[index(height-1,j), index(height-1,j-1)] = -1\r\n A[index(height-1,j), index(height-1,j+1)] = -1\r\n d[index(height-1,j)] = -1/h * (u[height-1,j] - u[height-1,j-1] - v[height-2,j])\r\n\r\n\r\n A = A * dt / (density * h**2)\r\n\r\n A = sps.csr_matrix (A)\r\n p = np.reshape(spsolve (A, d), (height, width))\r\n\r\n # Calculate new velocity field based on this pressure field\r\n for i in range (height):\r\n for j in range (width):\r\n if (i == height-1 and j == width-1) or (i == height-1 and j == 0) or (i == 0 and j == width-1) or (i == 0 and j == 0):\r\n # Set vertical velocity to movement of solid wall 0\r\n u[i,j] = 0\r\n v[i,j] = 0\r\n elif i == height-1 or i == 0:\r\n u[i,j] = u[i,j] - dt / (density * h) * (p[i,j+1] - p[i,j])\r\n v[i,j] = 0\r\n elif j == width-1 or j == 0:\r\n u[i,j] = 0\r\n v[i,j] = v[i,j] - dt / (density * h) * (p[i+1,j] - p[i,j])\r\n else:\r\n u[i,j] = u[i,j] - dt / (density * h) * (p[i,j+1] - p[i,j])\r\n v[i,j] = v[i,j] - dt / (density * h) * (p[i+1,j] - p[i,j])\r\n\r\n # let's get some inflow\r\n u[4:12, 0] = 1\r\n\r\n return u, v, p", "def transform(self, ((a, b), (c, d))=((1, 1), (-1, 1)), aligned_with_grid=False):\n (x0, y0), (x1, y1) = self.vertices\n return type(self)((int(a * x0 + c * y0), int(b * x0 + d * y0)),\n (int(a * x1 + c * y1), int(b * x1 + d * y1)),\n aligned_with_grid=aligned_with_grid)", "def TransformPoint(*args, **kwargs):\n return _gdi_.GraphicsMatrix_TransformPoint(*args, **kwargs)", "def transform_point(p,R,t):\r\n x = R[0][0]*p[0]+R[0][1]*p[1]+t[0]\r\n y = R[1][0]*p[0]+R[1][1]*p[1]+t[1]\r\n return [x,y]", "def point(chordwise, spanwise, vertical):\n return Point3D.create(chordwise, spanwise, vertical)", "def t2v(T):\n x = T[0, 2]\n y = T[1, 2]\n theta = np.arctan2(T[1, 0], T[0, 0])\n v = np.array([x, y, theta])\n return v", "def get_vtktransform_rotate_global_to_local(v1, v2, v3):\n\n\n x = np.array([1.0, 0.0, 0.0])\n y = np.array([0.0, 1.0, 0.0])\n z = np.array([0.0, 0.0, 1.0])\n\n rot_mat = np.zeros((3, 3), dtype=float)\n rot_mat[0, 0] = np.dot(v1, x)\n rot_mat[0, 1] = np.dot(v1, y)\n rot_mat[0, 2] = np.dot(v1, z)\n rot_mat[1, 0] = np.dot(v2, x)\n rot_mat[1, 1] = np.dot(v2, y)\n rot_mat[1, 2] = np.dot(v2, z)\n rot_mat[2, 0] = np.dot(v3, x)\n rot_mat[2, 1] = np.dot(v3, y)\n rot_mat[2, 2] = np.dot(v3, z)\n\n rot_mat = np.column_stack((rot_mat, np.array([0.0, 0.0, 0.0])))\n rot_mat = np.vstack((rot_mat, np.array([0.0, 0.0, 0.0, 1.0])))\n\n vtkM = vtk.vtkMatrix4x4()\n\n for i in range(4):\n for j in range(4):\n vtkM.SetElement(i, j, rot_mat[i, j])\n\n transform = vtk.vtkTransform()\n transform.PreMultiply()\n transform.SetMatrix(vtkM)\n\n return transform", "def apply(self,v):\n return np.tensordot(self._transform, v, axes=([1],[0])) \\\n + self._translation", "def trans_to_coordinates(T, pts):\n p = []\n for i in range(len(pts)):\n \n p_b = [pts[i][0], pts[i][1], pts[i][2], 1]\n p_a = np.matmul(T, p_b).tolist()\n p.append(p_a[0:3])\n\n return p", "def trans_to_coordinates(T, pts):\n p = []\n for i in range(len(pts)):\n \n p_b = [pts[i][0], pts[i][1], pts[i][2], 1]\n p_a = np.matmul(T, p_b).tolist()\n p.append(p_a[0:3])\n\n return p", "def moveStageToWell(self, u, v): \n c= \"/cli:python /app:matrix /sys:1 /cmd:moveteowell \"\n c += \" /upos:\"+str(u)\n c += \" /vpos:\"+str(v)\n self.sendCMDstring(c)", "def setCoordsToMainFromPivot(self):\n\t\tself.grp.a.t.v = self.piv.translate\n\t\tself.grp.a.r.v = self.piv.rotation[0]", "def transform_points(points, T):\n\n homo_points = np.array([(x, y, 1) for (y, x) in points])\n t_points = np.array([T.dot(v) for v in homo_points ])\n swap = np.array([(x,y) for (y,x,z) in t_points])\n return swap", "def transform(self, x, y):\n # return self.transform_2D(x, y)\n return self.transform_perspective(x, y)", "def project(v, w):\n coefficient = dot(v, w)\n return scalar_multiply(coefficient, w)", "def TransformVector(self, *args):\n return _itkTranslationTransformPython.itkTranslationTransformD3_TransformVector(self, *args)", "def generate_regular_grid_point_coords(R, side_size, device):\n aff = torch.tensor([[[0.5, 0, 0.5], [0, 0.5, 0.5]]], device=device)\n r = F.affine_grid(aff, torch.Size((1, 1, side_size, side_size)), align_corners=False)\n return r.view(1, -1, 2).expand(R, -1, -1)", "def spherical_parallel_transport(p_from, p_to, v):\n assert p_from.shape == p_to.shape == v.shape\n axis = np.cross(p_from, p_to)\n axis = axis / (np.linalg.norm(axis, axis=-1, keepdims=True) + 1e-20)\n theta = np.arccos(np.sum(p_to * p_from, axis=1).clip(-1, 1))\n rot = so3_matrix_generator(axis, theta)\n v_transformed = np.einsum(\"nij,nj->ni\", rot, v)\n return v_transformed", "def transform(self, v):\n #matrix vector multiply, convert from matrix to array type at the end\n return np.array( v * self.M )", "def solve_homography(u, v):\r\n N = u.shape[0]\r\n H = None\r\n\r\n if v.shape[0] is not N:\r\n print('u and v should have the same size')\r\n return None\r\n if N < 4:\r\n print('At least 4 points should be given')\r\n\r\n # TODO: 1.forming A\r\n A = np.zeros((2*N, 8))\r\n for i in range(N):\r\n A[2*i, :] = np.array([u[i, 0], u[i, 1], 1, 0, 0, 0, -u[i, 0]*v[i,0], -u[i, 1]*v[i, 0]])\r\n A[2*i+1, :] = np.array([0, 0, 0, u[i, 0], u[i, 1], 1, -u[i, 0]*v[i, 1], -u[i, 1]*v[i, 1]])\r\n\r\n # TODO: 2.solve H with A\r\n b = v.reshape(-1)\r\n H, res, _, _ = np.linalg.lstsq(A, b, rcond=None)\r\n H = np.concatenate((H, np.array([1])))\r\n H = H.reshape(3,3)\r\n\r\n return H", "def move(self, t):\n if self.visible == True:\n self.x += self.vx * t\n self.y += self.vy * t # Maybe do this to the end?! Like in article https://gafferongames.com/post/integration_basics/\n\n v_norm = np.sqrt(self.vx ** 2 + self.vy ** 2)\n self.phi = t * v_norm / self.radius\n\n glPushMatrix()\n glLoadIdentity()\n\n if v_norm > 0.0:\n glRotatef(self.phi * 180.0 / np.pi, -self.vy / v_norm, self.vx / v_norm, 0.0)\n glMultMatrixd(self.matrix)\n self.matrix = glGetDoublev(GL_MODELVIEW_MATRIX)\n\n glPopMatrix()\n\n # Friction!!! Could do it as global parameter\n if v_norm > 0.0:\n v_prime_norm = v_norm - 68 * t # tune this parameter for real effect\n\n if v_prime_norm < 0.0:\n v_prime_norm = 0.0\n\n self.vx *= v_prime_norm / v_norm\n self.vy *= v_prime_norm / v_norm\n\n return True\n # Add comment\n return False", "def g_xy(self):\n for x in range(self.size.x):\n for y in range(self.size.y):\n yield self.p[0] + Vect(x, y)", "def to_world(self, uv):\n return self._projective_transform(self.A, uv)", "def get_coords(self):\r\n # get the coordinates from V, skipping every 3 since it's a vector\r\n meshpts = self.V.tabulate_dof_coordinates()[::3]\r\n\r\n # create r vector if not already created\r\n try:\r\n self.r\r\n except:\r\n self.r = Function(self.V)\r\n\r\n # set the r vector\r\n self.r.vector()[:] = meshpts.flatten()", "def transform_coords(x, y, w, h, nw, nh):\r\n return ((((x / w) - 0.5) * nw), (((h - y) / h) - 0.5) * nh)", "def TransformVector(self, *args):\n return _itkTranslationTransformPython.itkTranslationTransformD2_TransformVector(self, *args)", "def T(self):\n\n # Calculate the direction cosines for the local x-axis\n # The local x-axis will run from the i-node to the j-node\n xi = self.i_node.X\n xj = self.j_node.X\n yi = self.i_node.Y\n yj = self.j_node.Y\n zi = self.i_node.Z\n zj = self.j_node.Z\n x = [(xj - xi), (yj - yi), (zj - zi)]\n x = x/norm(x)\n \n # The local y-axis will be in the plane of the plate\n # Find a vector in the plate's local xy plane\n xn = self.n_node.X\n yn = self.n_node.Y\n zn = self.n_node.Z\n xy = [xn - xi, yn - yi, zn - zi]\n\n # Find a vector perpendicular to the plate surface to get the orientation of the local z-axis\n z = cross(x, xy)\n \n # Divide the vector by its magnitude to produce a unit z-vector of direction cosines\n z = z/norm(z)\n\n # Calculate the local y-axis as a vector perpendicular to the local z and x-axes\n y = cross(z, x)\n \n # Divide the z-vector by its magnitude to produce a unit vector of direction cosines\n y = y/norm(y)\n\n # Create the direction cosines matrix\n dirCos = array([x, y, z])\n \n # Build the transformation matrix\n transMatrix = zeros((24, 24))\n transMatrix[0:3, 0:3] = dirCos\n transMatrix[3:6, 3:6] = dirCos\n transMatrix[6:9, 6:9] = dirCos\n transMatrix[9:12, 9:12] = dirCos\n transMatrix[12:15, 12:15] = dirCos\n transMatrix[15:18, 15:18] = dirCos\n transMatrix[18:21, 18:21] = dirCos\n transMatrix[21:24, 21:24] = dirCos\n \n return transMatrix", "def calcul_point_plan_projection(cls,cx,cy,cz,spx,spy,axe_x,axe_y):\n projX=gs.Vector3(spx*axe_x.x,spx*axe_x.y,spx*axe_x.z)\n projY=gs.Vector3(spy*axe_y.x,spy*axe_y.y,spy*axe_y.z)\n point=gs.Vector3(projX+projY)+gs.Vector3(cx,cy,cz)\n return point", "def vinet(p, v):\n x = ( v / p[3] ) ** ( 1.0 / 3 )\n xi = 3.0 / 2 * ( p[2] - 1 )\n return p[0] + 9 * p[1] * p[3] / ( xi**2 ) * ( 1 + ( xi * ( 1 - x ) - 1 ) * np.exp( xi * ( 1 - x ) ) )", "def transform(p, xform, axes=None, vector=False):\n\n p = _fillPoints(p, axes)\n t = np.dot(xform[:3, :3], p.T).T\n\n if not vector:\n t = t + xform[:3, 3]\n\n if axes is not None:\n t = t[:, axes]\n\n if t.size == 1: return t[0]\n else: return t", "def getVerticePosition(self):\n #def getvoxelpos(model,scale,dims,translate,i,j,k): #centroid!\n return(self.X,self.Y,self.Z)", "def update_grid(self):\n if self.game_over:\n return\n if self.active_piece is None:\n self.place_new_piece()\n if self.piece_collision_exists(self.active_piece):\n self.handle_active_piece_collision()\n self.place_new_piece()\n self.shift_cells(self.active_piece, self.current_direction)\n self.active_piece = TransformPiece.shift_coordinates(self.active_piece, self.current_direction)\n self.merge_with_completed_rows()\n if self.is_game_won():\n self.game_over = True", "def _fit(self, v, progress):\n # Compute on non-masked sources :\n xyz = self.xyz\n N = xyz.shape[0]\n v = v.reshape(v.shape[0] * 3, 3)\n\n # Loop over sources :\n progress.show()\n for k in range(N):\n # Get the euclidian distance :\n eucl = cdist(v, xyz[[k], :])\n # Get the closest vertex :\n eucl_argmin = eucl.argmin()\n # Set new coordinate :\n self.xyz[k, :] = v[eucl_argmin, :]\n # Finally update data sources and text :\n self.update()\n self.text_update()\n progress.hide()", "def proj_tan(self, v, x, c):\n return v", "def myPerspectiveTransform(pts, H):\n\n # Clone and reshape the list of points\n new_pts = np.reshape(pts, (-1, 2))\n # Allocate a vector filled with one with size (-1, 1)\n one_vector = np.zeros((pts.shape[0], 1)) + 1\n # Concatenate the one vector to the list of points to form the homogenious coordiniate system\n new_pts = np.concatenate((new_pts, one_vector), axis=len(new_pts.shape)-1)\n\n # Perform transformation and transform results into the pixel coord. system\n # i.e., x' = x/w, and y' = y/w\n for i, pt in enumerate(new_pts):\n new_pts[i] = H.dot(pt.T)\n new_pts[i] /= new_pts[i, -1]\n\n # Return results with the same shape as the input has\n return new_pts[:, :-1].reshape(pts.shape)", "def warp(im, u, v):\n assert im.shape == u.shape and \\\n u.shape == v.shape\n \n im_warp = np.empty_like(im)\n #\n # Your code here\n #\n ## Hint: You may find function griddata from package scipy.interpolate useful\n ## code inspired by: https://towardsdatascience.com/image-geometric-transformation-in-numpy-and-opencv-936f5cd1d315\n ## https://github.com/rajat95/Optical-Flow-Warping-Tensorflow/blob/master/warp.py\n ## https://sergevideo.blogspot.com/2014/11/writing-simple-optical-flow-in-python.html\n ## https://github.com/liruoteng/OpticalFlowToolkit/blob/master/lib/flowlib.py\n\n # get image dimensions [y, x]\n im_height, im_width = im.shape\n \n # number of pixel\n N = im_height * im_width\n\n iy, ix = np.mgrid[0:im_height, 0:im_width] # int-meshgrid\n fy, fx = np.mgrid[0:im_height:1.0, 0:im_width:1.0] # float-meshgrid\n\n # add the optical flow to the indices (float)\n fx = fx + u\n fy = fy + v\n\n points = np.c_[ix.reshape(N, 1), iy.reshape(N, 1)]\n xi = np.c_[fx.reshape(N, 1), fy.reshape(N, 1)]\n values = im.reshape(N, 1)\n im_interpol = griddata(points, values, xi, method='linear', fill_value=0.0)\n im_warp = im_interpol.reshape(im_height, im_width)\n\n assert im_warp.shape == im.shape\n return im_warp", "def translate(self, vector):\n locations = self.locations.translate(vector)\n # do not translate the orientations!\n pcs = self.pcs.translate(vector)\n self.locations = locations\n self.pcs = pcs\n return self", "def TransformPoint(transform, x, y, z):\n result = np.matmul(transform, np.array([x, y, z, 1.]))\n return result[0], result[1], result[2]", "def projectPoint(self,p):\n a,b,c = self.a, self.b, self.c\n x,y = p\n return numpy.array( [ b*(x*b-y*a) - c*a, a*(y*a-x*b) - c*b ] )", "def transform(self, H): # or update()\n vh = H @ self.vertices.T\n vh = vh.T \n va = vh[:,:2]\n return Polygon(va)", "def projective_transform(self, x):\n\n x = np.asarray(x)\n # Assume no intensity column\n x0, y0, z0 = x\n\n # Camera coors to pixel coors\n u = ((x0 / z0) * self.f) + (self.sensor_size[0] // 2)\n v = ((y0 / z0) * self.f) + (self.sensor_size[1] // 2)\n\n u_min = np.min(u)\n v_min = np.min(v)\n\n n = len(u)\n u_list = []\n v_list = []\n if self.error_on_oob:\n for i in range(n):\n if (u[i] >= u_min and u[i] <= self.sensor_size[0] and v[i] >= v_min and v[i] <= self.sensor_size[1]):\n u_list.append(u[i])\n v_list.append(v[i])\n else:\n raise OutOfSensorBoundsError(\"Projected coordinate was outside the sensor\")\n else:\n for i in range(n):\n u_list.append(u[i])\n v_list.append(v[i])\n\n u = np.asarray(u_list)\n v = np.asarray(v_list)\n\n return np.vstack((u, v))", "def obtener_peso_arista(self, v, w):\n return self.vertices[v][w]", "def scattering_direction(v, theta):\r\n # Sample cos_phi and sin_phi, phi is the azimuthal angle of the scattering event\r\n continue_loop = True\r\n while continue_loop:\r\n eta1 = 1-2*random.random()\r\n eta2 = 1-2*random.random()\r\n alpha = eta1**2 + eta2**2\r\n if alpha <= 1:\r\n continue_loop = False\r\n cos_phi = eta1/np.sqrt(alpha)\r\n sin_phi = eta2/np.sqrt(alpha)\r\n \r\n new_x = v[0]*np.cos(theta) - np.sin(theta)/np.sqrt(1-v[2]**2) * (v[0]*v[2]*cos_phi + v[1]*sin_phi)\r\n new_y = v[1]*np.cos(theta) - np.sin(theta)/np.sqrt(1-v[2]**2) * (v[1]*v[2]*cos_phi - v[0]*sin_phi)\r\n new_z = v[2]*np.cos(theta) + np.sqrt(1-v[2]**2)*np.sin(theta)*cos_phi\r\n \r\n return [new_x, new_y, new_z]", "def translate(self, vect):\n self.pl.Base = vect\n\n self.comp.Placement = self.pl\n self.box.Placement = self.pl", "def proyZ1(u, v, t2):\n den = u ** 2 + v ** 2 + 4\n x = u - t2 * (u - 4 * u / den)\n y = v - t2 * (v - 4 * v / den)\n z = 1 - t2 * (2 - 8 / den)\n return (x, y, z)", "def transform_point(transform, x_in, y_in):\n # create point geometry from coordinates\n point = ogr.Geometry(ogr.wkbPoint)\n point.AddPoint(x_in, y_in)\n point.Transform(transform)\n\n x_out = point.GetX()\n y_out = point.GetY()\n return x_out, y_out", "def apply_transform(transform):\n vg.shape.check(locals(), \"transform\", (4, 4))\n\n def apply(points, discard_z_coord=False, treat_input_as_vector=False):\n points, is_columnized, maybe_decolumnize = columnize(\n points, (-1, 3), name=\"points\"\n )\n\n homogenous_coordinate_value = 0 if treat_input_as_vector else 1\n padded_points = np.pad(\n points,\n ((0, 0), (0, 1)),\n mode=\"constant\",\n constant_values=homogenous_coordinate_value,\n )\n transformed_padded_points = np.dot(transform, padded_points.T).T\n transformed_points = np.delete(transformed_padded_points, 3, axis=1)\n\n result = maybe_decolumnize(transformed_points)\n if discard_z_coord:\n return result[:, 0:2] if is_columnized else result[0:2]\n else:\n return result\n\n return apply", "def scalar_proj(v, w):\n return vector_dot(v, vector_hat(w))", "def householder_transformation(v):\n size_of_v = v.shape[1]\n e1 = np.zeros_like(v)\n e1[0, 0] = 1\n vector = get_norm(v) * e1\n if v[0, 0] < 0:\n vector = - vector\n u = (v + vector).astype(np.float32)\n norm2 = get_norm(u)\n u = u / norm2\n H = np.identity(size_of_v) - ((2 * np.matmul(np.transpose(u), u)) / np.matmul(u, np.transpose(u)))\n return H, u", "def plane_point_side_v3(p: np.ndarray, v: np.ndarray) -> Any:\n return p[:3].dot(v) + p[3]", "def transform_point(self, pt):\r\n\r\n x, y = pt\r\n return (x - self.xoffset, (y - self.yoffset) * self.yscale)", "def dexpinv(self, u, v, _=None):\n A, a = np.split(u, 2)\n B, b = np.split(v, 2)\n alpha = np.linalg.norm(A)\n rho = np.inner(A, a)\n if np.isclose(alpha, 0):\n return v\n c1 = (\n B\n - 0.5 * np.cross(A, B)\n + self._dexpinv_helper_1(alpha) * np.cross(A, np.cross(A, B))\n )\n c2 = (\n b\n - 0.5 * (np.cross(a, B) + np.cross(A, b))\n + self._dexpinv_helper_2(alpha, rho) * np.cross(A, np.cross(A, B))\n + self._dexpinv_helper_1(alpha)\n * (\n np.cross(a, np.cross(A, B))\n + np.cross(A, np.cross(a, B))\n + np.cross(A, np.cross(A, b))\n )\n )\n return np.hstack((c1, c2))", "def translate(self, vector):\n \n matrix = wf.translationMatrix(*vector)\n for wireframe in self.wireframes.values():\n wireframe.transform(matrix)", "def affine_transform_3d(v, mapping, alpha = 1):\r\n p_wgt = vec3(0, 0, 0)\r\n q_wgt = vec3(0, 0, 0)\r\n w = len(mapping)*[None]\r\n w_sum = 0\r\n for i in range(len(mapping)):\r\n mp = mapping[i]\r\n x = mp[0].x - v.x\r\n y = mp[0].y - v.y\r\n z = mp[0].z - v.z\r\n if (x == 0 and y == 0 and z == 0): return mp[1]\r\n w[i] = 1/((x*x + y*y + z*z) ** alpha)\r\n p_wgt += mp[0]*w[i]\r\n q_wgt += mp[1]*w[i]\r\n w_sum += w[i]\r\n p_wgt /= w_sum\r\n q_wgt /= w_sum\r\n M1 = mat3(0)\r\n M2 = mat3(0)\r\n for i in range(len(mapping)):\r\n mp = mapping[i]\r\n p_adj = mp[0] - p_wgt\r\n q_adj = mp[1] - q_wgt\r\n M1 += p_adj.transpose_multiply(p_adj)*w[i]\r\n M2 += p_adj.transpose_multiply(q_adj)*w[i]\r\n M1 = M1.inverse()\r\n M = M1*M2\r\n M = M.transpose()\r\n v_out = M*(v - p_wgt) + q_wgt\r\n return v_out", "def gen_regular_grid_coord(W, H, N):\n ### regular grid\n x = np.array(list(range(0, W))) / (W - 1)\n y = np.array(list(range(0, H))) / (H - 1)\n X, Y = tf.meshgrid(x, y)\n indices = tf.stack([X, Y])\n indices = tf.transpose(indices, (1, 2, 0))[None, ...]\n regular_coord_point = tf.tile(indices, (N, 1, 1, 1))\n regular_coord_point = tf.cast(regular_coord_point, tf.float32)\n return regular_coord_point", "def assign_vertices(self):\n CV_matrix = np.zeros((self.n_c, self.n_v, 3))\n for i in range(3):\n CV_matrix[self.tris[:, i], np.arange(self.n_v), i] = 1\n self.CV_matrix = CV_matrix\n return self.CV_matrix", "def place_new_piece(self):\n new_piece = PieceFactory.get_piece()\n new_piece = TransformPiece.transform(new_piece, PieceFactory.get_start_point(self.size, self.current_direction))\n self.active_piece = new_piece\n value = PieceFactory.get_value()\n for cell in self.active_piece:\n self.set_cell_value(cell, value)\n if self.piece_collision_exists(self.active_piece):\n self.handle_active_piece_collision()", "def pontos(self):\n \n self.sc = 1. \n self.x = self.sc*np.array([-155., -139.4, -124., -108.5, -93., -77.5, -62., -46.5, -31., -15.5, 0, 15.5, 31., 46.5, 62., 77.5, 93., 108.5, 124., 139.5, 155.])\n self.y = self.sc*np.array([ 9.23, 14.37, 18.98, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 21.55, 14.37, 3.59])\n self.px_index = len(self.x)\n #self.py_index = len(self.x)/2\n\n self.coord = np.array([self.x,self.y,np.full(len(self.x),self.z)])\n \n self.x = self.x[::-1]\n self.y = -self.y[::-1] \n self.new = np.array([self.x,self.y,np.full(len(self.x),self.z)])\n self.coord = np.array([np.append(self.coord[0],self.new[0]),np.append(self.coord[1],self.new[1]),np.append(self.coord[2],self.new[2])])\n self.coord = np.array([np.append(self.coord[0],self.coord[0,0]),np.append(self.coord[1],self.coord[1,0]),np.append(self.coord[2],self.coord[2,0])])\n\n self.coord[0] = self.coord[0] - (np.amax(self.coord[0])+np.amin(self.coord[0]))/2\n self.coord[1] = self.coord[1] + (np.amax(self.coord[1])-np.amin(self.coord[1]))/2 \n \n self.coordi = np.array(self.coord)\n \n self.cg = np.array([0 + self.dx, self.H/2 + self.dy, self.z]) \n self.cgi = np.array(self.cg)\n \n self.thi = 0. + self.dth \n self.th = float(self.thi) \n \n self.coordnav(self.dx,self.dy,self.dth)", "def _eval(self, Tv):\n # trans_vox_coords needs be C-contiguous\n trans_vox_coords = Tv.apply(self._vox_coords)\n interp = self._interp\n if self._interp < 0:\n interp = - np.random.randint(MAX_INT)\n _joint_histogram(self._joint_hist,\n self._from_data.flat, # array iterator\n self._to_data,\n trans_vox_coords,\n interp)\n # Make sure all joint histogram entries are non-negative\n np.maximum(self._joint_hist, 0, self._joint_hist)\n return self._similarity_call(self._joint_hist)", "def _eval(self, v):\n Kv = np.zeros(self.K.output_size)\n self.K.forward(v.ravel(), Kv)\n return super(least_squares, self)._eval(Kv - self.offset)", "def _transform_point(self, x, y):\n return (x, y)", "def do_transform_point(transform: Transform, point: Point) -> Point:\n\n transform_mat = alloy.math.transformation_matrix_from_array(alloy.ros.transform_to_numpy(transform))\n point_np = alloy.ros.point_to_numpy(point)\n point_np = np.append(point_np, 1).reshape((4,1))\n trans_point = np.matmul(transform_mat, point_np)\n return alloy.ros.numpy_to_point(trans_point[0:3,0])", "def transform(self, R, t, scale = 1):\n\n # Build 4-by-4 projection matrix from args ----------------------------\n # This is what we are doing internally:\n # Proj = np.r_[ scale * np.c_[R, t], [[0, 0, 0, 1]] ]\n # InvProj = np.r_[ scale * np.c_[R.T, -np.dot(R.T, t)], [[0,0,0,scale]] ]\n Proj = tf_format.tf_format('4x4', R, t)\n Proj[:-1,:] *= scale\n InvProj = tf_format.tf_format('i4x4', R, t) * scale\n \n \n # Apply transformation to pts3D ---------------------------------------\n if self.pts3D is not None and self.pts3D.shape[1] > 0:\n # Use homogeneous coords\n pts3D = np.r_[self.pts3D, np.ones((1, self.pts3D.shape[1]))]\n pts3D = np.dot(Proj, pts3D)\n self.pts3D = pts3D[:3, :]\n\n # Apply transformation to cameras -------------------------------------\n # Camera poses are stored using camera-to-world transformations, we \n # need to invert the projection matrix for this to work --> \n # we use InvProj\n\n cposes = self.cam_poses\n for i in range(cposes.shape[1]):\n\n # Extract camera projection matrix\n p_cam = tf_format.tf_format('4x4', cposes[:, i])\n\n # Transform camera projection matrix\n new_p_cam = np.dot(p_cam, InvProj)\n \n # Make sure it's a true rotation!\n [u, s, vT] = np.linalg.svd(new_p_cam[:3,:3])\n cposes[:3, i] = tf_format.rodrigues( np.dot(u,vT) ).ravel()\n cposes[3:, i] = new_p_cam[:3, 3]\n\n self.cam_poses = cposes", "def solve_system(coord, vec):\n num_batch = coord.shape[0]\n num_point = coord.shape[1]\n\n ones = torch.ones([num_batch, num_point, 1])\n p = torch.cat([ones, coord], 2) # [bn, pn, 3]\n p_1 = torch.reshape(p, [num_batch, -1, 1, 3]) # [bn, pn, 1, 3]\n p_2 = torch.reshape(p, [num_batch, 1, -1, 3]) # [bn, 1, pn, 3]\n d = p_1 - p_2 # [bn, pn, pn, 3]\n d2 = torch.sum(torch.pow(d, 2), 3) # [bn, pn, pn]\n r = d2 * torch.log(d2 + 1e-6) # [bn, pn, pn]\n\n zeros = torch.zeros([num_batch, 3, 3])\n W_0 = torch.cat([p, r], 2) # [bn, pn, 3+pn]\n W_1 = torch.cat([zeros, torch.transpose(p, 2, 1)], 2) # [bn, 3, pn+3]\n W = torch.cat([W_0, W_1], 1) # [bn, pn+3, pn+3]\n W_inv = b_inv(W)\n\n tp = F.pad(vec.unsqueeze(1), (0, 0, 0, 3))\n tp = tp.squeeze(1) # [bn, pn+3, 2]\n T = torch.matmul(W_inv, tp) # [bn, pn+3, 2]\n T = torch.transpose(T, 2, 1) # [bn, 2, pn+3]\n\n return T", "def translate(self, vec):\n self.substrates = shapely.affinity.translate(self.substrates, vec[0], vec[1])\n self.partitionLine = shapely.affinity.translate(self.partitionLine, vec[0], vec[1])\n for annotation in self.annotations:\n o = annotation.origin\n annotation.origin = (o[0] + vec[0], o[1] + vec[1])\n\n def newRevertTransformation(point, orig=self.revertTransformation, vec=vec):\n prevPoint = (point[0] - vec[0], point[1] - vec[1])\n if orig is not None:\n return orig(prevPoint)\n return prevPoint\n self.revertTransformation = newRevertTransformation", "def translate_to_point_O(self):\n self.translate(-self.pcs.origin)", "def vspatial(X, Y, U, V):\n row, col = X.shape\n r = row\n c = col\n vsqrt = (U ** 2 + V ** 2) ** 0.5\n Ax = U / vsqrt\n Ay = V / vsqrt\n CA = np.ones((r, c))\n CV = np.ones((r, c))\n for xin in range(0, c):\n for yin in range(0, r):\n CA[yin, xin] = (Ax[0:row-yin, 0:col-xin] * Ax[yin:row, xin:col] + Ay[0:row-yin, 0:col-xin] * Ay[yin:row, xin:col]).mean()\n CV[yin, xin] = (U[0:row-yin, 0:col-xin] * U[yin:row, xin:col] + V[0:row-yin, 0:col-xin] * V[yin:row, xin:col]).mean()\n return X, Y, CA, CV", "def _cell_to_global(self, xy, wh):\n # grid setup\n line = tf.range(0, self.num_cells)\n rows = tf.reshape(line, [self.num_cells, 1])\n rows = tf.tile(rows, [1, self.num_cells])\n cols = tf.reshape(line, [1, self.num_cells])\n cols = tf.tile(cols, [self.num_cells, 1])\n grid = tf.stack([cols, rows], axis=-1)\n grid = tf.reshape(grid, [1, self.num_cells, self.num_cells, 1, 2])\n grid = tf.cast(grid, tf.float32)\n # box transformation\n xy += grid\n wh *= tf.reshape(self.anchors, [1, 1, 1, self.num_anchors, 2])\n return tf.concat([xy, wh], axis=-1) / self.num_cells", "def _proj(u,v):\n return (np.einsum('i...,i...->...',u,v)/np.einsum('i...,i...->...',u,u))*u" ]
[ "0.61589414", "0.5887289", "0.56152284", "0.55743974", "0.5553705", "0.55252033", "0.55161303", "0.54971194", "0.5430372", "0.54015726", "0.5349081", "0.5345369", "0.53373307", "0.53254366", "0.5311833", "0.5305778", "0.5302551", "0.53003585", "0.52812153", "0.52796966", "0.5275249", "0.52329", "0.52311933", "0.52299464", "0.52299464", "0.52299464", "0.5227934", "0.52165365", "0.52145654", "0.5205811", "0.5192516", "0.5190237", "0.51770735", "0.51684123", "0.5152329", "0.5129694", "0.5128992", "0.5125958", "0.5102302", "0.50977325", "0.50977325", "0.5097411", "0.5092535", "0.50880367", "0.5074629", "0.5072377", "0.50712454", "0.5048723", "0.50409335", "0.5039736", "0.5037511", "0.5027697", "0.5021371", "0.5019952", "0.50172615", "0.5015732", "0.5011319", "0.50078785", "0.50022686", "0.49988723", "0.49961457", "0.4992173", "0.49870184", "0.49799442", "0.49776876", "0.4977528", "0.49769697", "0.49768335", "0.49760017", "0.49752554", "0.4968658", "0.49675065", "0.4963839", "0.4951604", "0.49451315", "0.4936038", "0.49355212", "0.49332058", "0.49327806", "0.49206233", "0.49204478", "0.49092236", "0.49033272", "0.4898178", "0.48913404", "0.48850816", "0.48797083", "0.4877209", "0.48757958", "0.48707578", "0.4869667", "0.48632205", "0.48605207", "0.48577884", "0.48533118", "0.4849311", "0.4846896", "0.48458898", "0.48437804", "0.48369294" ]
0.65050745
0
Generate all nondegenerate placements, with one of the vertices placed at (0,0). Return the placements as [ (v, T) ], where v is the vertex to be placed at (0,0), and T the 2x2 transformation matrix that place the piece according to self.geo[v] + T.dot(self.geo self.geo[v])
def findNondegeneratePlacements( self): # Rotate counterclockwise by 90 degrees around the v'th vertex. r90 = np.array( [ [0,1], [-1,0] ], dtype=int) # Flip the piece along the vertical axis through the v'th vertex. fv = np.array( [ [1,0], [0,-1] ], dtype=int) self.placements = [] uniques = set() # Unique placements generated so far identity = np.array( [ [1,0], [0,1] ], dtype=int) T = identity[:,:] for i in xrange(self.nVertices): geo = self.geo[:,:] geo -= geo[i] # Place i'th vertex at (0,0) for r in xrange(4): T = T.dot( r90) for f in xrange(2): T = T.dot( fv) pk = placementKey( geo.dot(T)) if (not pk in uniques): uniques.add( pk) self.placements.append( (i, T)) # After four rotations and two flips, we should be back to # the original position. assert( np.array_equal( T, identity)) return self.placements
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_all_locations(grid, shape):", "def test_create_new_placements(self):\n subv = SimpleMachineVertex(None, \"\")\n pl = Placement(subv, 0, 0, 1)\n Placements([pl])", "def generate_nearby_cells(self):\n for y in range(len(self.island_map)):\n for x in range(len(self.island_map[y])):\n list_of_nearby_cells = []\n\n if y != 0:\n self.generate_cell_above(x, y, list_of_nearby_cells)\n\n if x != 0:\n self.generate_cell_left(x, y, list_of_nearby_cells)\n\n if y != len(self.island_map)-1:\n self.generate_cell_below(x, y, list_of_nearby_cells)\n\n if x != len(self.island_map[y])-1:\n self.generate_cell_right(x, y, list_of_nearby_cells)\n\n self.island_map[y][x].nearby_cells = list_of_nearby_cells", "def __init__( self, geo, index=None):\n # the column vector self.geo[:,i] gives the i'th vertex's positions\n self.geo = np.array( geo, dtype=int)\n self.findNondegeneratePlacements()\n self.id = index", "def g_xy(self):\n for x in range(self.size.x):\n for y in range(self.size.y):\n yield self.p[0] + Vect(x, y)", "def gen_placecells(self, min_spread=0.2):\r\n\r\n N = None\r\n num_tries = 1000 # a limit on the number of attempts to place a new placecell\r\n\r\n # assign random x,y locations to each neuron\r\n locations = [self.random_location()]\r\n while True:\r\n # generate a random new point\r\n new_loc = self.random_location()\r\n\r\n # check that the point isn't too close to previous points\r\n count = 0\r\n while min([self.calc_dist(new_loc, l) for l in locations]) < min_spread and count < num_tries:\r\n new_loc = self.random_location()\r\n count += 1\r\n\r\n # add the new point\r\n locations += [new_loc]\r\n\r\n if (N == None and count >= num_tries) or len(locations) == N:\r\n # stop when required number of place cells built (if N specified),\r\n # or when world has been decently filled\r\n break\r\n\r\n return locations", "def createTopology(self):\n\n # find DAG root\n dagRoot = None\n for mote in self.motes:\n if mote.id == 0:\n mote.role_setDagRoot()\n dagRoot = mote\n assert dagRoot\n\n # put DAG root at center of area\n dagRoot.setLocation(x=self.squareSide/2,\n y=self.squareSide/2)\n\n # Copy the contents of the list (but keep the originals) and shuffle them.\n # shuffledMotes = list(self.motes)\n # random.shuffle(shuffledMotes)\n # print shuffledMotes\n\n #### GRID PREPRATIONS.\n dagRootX, dagRootY = dagRoot.getLocation()\n # determine the number of 'square levels'\n numberOfMotes = len(self.motes)\n currentLvl = 0\n sumMotes = 0\n while (sumMotes < numberOfMotes):\n if currentLvl == 0:\n sumMotes += 1\n else:\n sumMotes += currentLvl * 8\n currentLvl += 1\n maxLvl = currentLvl - 1\n # print sumMotes\n coordinatesPerLvl = []\n for lvl in range(0, maxLvl + 1):\n coordinatesThisLvl = []\n if lvl == 0:\n coordinatesThisLvl = [(dagRootX, dagRootY)]\n elif lvl == 1:\n coordinatesThisLvl = self.getSquareCoordinates((dagRootX, dagRootY), self.DISTANCE)\n elif lvl > 1:\n coordinatesPrevLvl = coordinatesPerLvl[lvl - 1]\n coordinatesPrevPrevLvl = coordinatesPerLvl[lvl - 2]\n for coordinatePrevLvl in coordinatesPrevLvl:\n squareCoordinates = self.getSquareCoordinates(coordinatePrevLvl, self.DISTANCE)\n for squareCoordinate in squareCoordinates:\n if not self.isInCoordinates(squareCoordinate,\n coordinatesPrevPrevLvl) and not self.isInCoordinates(\n squareCoordinate, coordinatesPrevLvl) and not self.isInCoordinates(squareCoordinate,\n coordinatesThisLvl):\n coordinatesThisLvl.append(squareCoordinate)\n coordinatesPerLvl.append(coordinatesThisLvl)\n # print 'Level %d: # motes = %d' % (lvl, len(coordinatesThisLvl))\n # print coordinatesThisLvl\n assert len(coordinatesThisLvl) == 1 or len(coordinatesThisLvl) == lvl * 8\n\n allCoordinates = [j for i in coordinatesPerLvl for j in i]\n # print allCoordinates\n\n # reposition each mote until it is connected\n countMote = 1 # root 0 already has coordinates\n connectedMotes = [dagRoot]\n for mote in self.motes:\n if mote in connectedMotes:\n continue\n\n connected = False\n while not connected:\n # pick a random location\n\n newX = None\n newY = None\n # if no topology is not given, build the topology yourself\n if SimEngine.SimEngine().ilp_topology is None:\n newX = np.random.normal(allCoordinates[countMote][0], self.DISTANCE / 8, 1)[0]\n newY = np.random.normal(allCoordinates[countMote][1], self.DISTANCE / 8, 1)[0]\n else:\n # if no topology is given, use that topology\n newX = SimEngine.SimEngine().ilp_topology[str(mote.id)]['x']\n newY = SimEngine.SimEngine().ilp_topology[str(mote.id)]['y']\n\n mote.setLocation(\n x=newX,\n y=newY\n )\n\n numStableNeighbors = 0\n\n # count number of neighbors with sufficient RSSI\n for cm in connectedMotes:\n\n rssi = self._computeRSSI(mote, cm)\n mote.setRSSI(cm, rssi)\n cm.setRSSI(mote, rssi)\n\n # save the intial RSSI values for future use in the mobility models\n mote.initialRSSI[cm] = rssi\n cm.initialRSSI[mote] = rssi\n\n if self.settings.individualModulations == 1:\n if rssi > Modulation.Modulation().modulationStableRSSI[Modulation.Modulation().minimalCellModulation[SimSettings.SimSettings().modulationConfig]]:\n # print rssi\n numStableNeighbors += 1\n else:\n if rssi > self.STABLE_RSSI:\n # print rssi\n numStableNeighbors += 1\n\n # make sure it is connected to at least STABLE_NEIGHBORS motes\n # or connected to all the currently deployed motes when the number of deployed motes\n # are smaller than STABLE_NEIGHBORS\n if numStableNeighbors >= self.STABLE_NEIGHBORS or numStableNeighbors == len(connectedMotes):\n connected = True\n\n connectedMotes += [mote]\n countMote += 1\n\n # for each mote, compute PDR to each neighbors\n for mote in self.motes:\n shortestDistance = None\n for m in self.motes:\n if mote == m:\n continue\n if self.settings.individualModulations == 1:\n rssi_value = mote.getRSSI(m)\n for modulationTmp in Modulation.Modulation().modulations:\n # if the rssi value is higher than the minimal signal value required for this neighbor, take that modulation\n # and compute the PDR using that modulation\n if rssi_value > Modulation.Modulation().modulationStableRSSI[modulationTmp]:\n pdr = self._computePDR(mote, m, modulation=modulationTmp)\n mote.setPDR(m, pdr)\n m.setPDR(mote, pdr)\n mote.setModulation(m, modulationTmp)\n m.setModulation(mote, modulationTmp)\n else:\n if mote.getRSSI(m) > mote.minRssi:\n pdr = self._computePDR(mote, m)\n mote.setPDR(m, pdr)\n m.setPDR(mote, pdr)\n # closest distance\n dist = self._computeDistance(mote, m)\n if shortestDistance == None or dist < shortestDistance:\n mote.closestNeighbor = m\n shortestDistance = dist", "def createTopology(self):\n\n # find DAG root\n dagRoot = None\n for mote in self.motes:\n if mote.id == 0:\n mote.role_setDagRoot()\n dagRoot = mote\n assert dagRoot\n\n # put DAG root at center of area\n dagRoot.setLocation(x=self.squareSide/2,\n y=self.squareSide/2)\n\n # Copy the contents of the list (but keep the originals) and shuffle them.\n # shuffledMotes = list(self.motes)\n # random.shuffle(shuffledMotes)\n # print shuffledMotes\n\n #### GRID PREPRATIONS.\n dagRootX, dagRootY = dagRoot.getLocation()\n # determine the number of 'square levels'\n numberOfMotes = len(self.motes)\n currentLvl = 0\n sumMotes = 0\n while (sumMotes < numberOfMotes):\n if currentLvl == 0:\n sumMotes += 1\n else:\n sumMotes += currentLvl * 8\n currentLvl += 1\n maxLvl = currentLvl - 1\n # print sumMotes\n coordinatesPerLvl = []\n for lvl in range(0, maxLvl + 1):\n coordinatesThisLvl = []\n if lvl == 0:\n coordinatesThisLvl = [(dagRootX, dagRootY)]\n elif lvl == 1:\n coordinatesThisLvl = self.getSquareCoordinates((dagRootX, dagRootY), self.DISTANCE)\n elif lvl > 1:\n coordinatesPrevLvl = coordinatesPerLvl[lvl - 1]\n coordinatesPrevPrevLvl = coordinatesPerLvl[lvl - 2]\n for coordinatePrevLvl in coordinatesPrevLvl:\n squareCoordinates = self.getSquareCoordinates(coordinatePrevLvl, self.DISTANCE)\n for squareCoordinate in squareCoordinates:\n if not self.isInCoordinates(squareCoordinate,\n coordinatesPrevPrevLvl) and not self.isInCoordinates(\n squareCoordinate, coordinatesPrevLvl) and not self.isInCoordinates(squareCoordinate,\n coordinatesThisLvl):\n coordinatesThisLvl.append(squareCoordinate)\n coordinatesPerLvl.append(coordinatesThisLvl)\n # print 'Level %d: # motes = %d' % (lvl, len(coordinatesThisLvl))\n # print coordinatesThisLvl\n assert len(coordinatesThisLvl) == 1 or len(coordinatesThisLvl) == lvl * 8\n\n allCoordinates = [j for i in coordinatesPerLvl for j in i]\n # print allCoordinates\n\n # reposition each mote until it is connected\n countMote = 1 # root 0 already has coordinates\n connectedMotes = [dagRoot]\n for mote in self.motes:\n if mote in connectedMotes:\n continue\n\n connected = False\n while not connected:\n # pick a random location\n\n newX = np.random.normal(allCoordinates[countMote][0], self.DISTANCE / 8, 1)[0]\n newY = np.random.normal(allCoordinates[countMote][1], self.DISTANCE / 8, 1)[0]\n\n mote.setLocation(\n x=newX,\n y=newY\n )\n\n # mote.setLocation(\n # x = allCoordinates[countMote][0],\n # y = allCoordinates[countMote][1]\n # )\n\n numStableNeighbors = 0\n\n # count number of neighbors with sufficient RSSI\n for cm in connectedMotes:\n\n rssi = self._computeRSSI(mote, cm)\n mote.setRSSI(cm, rssi)\n cm.setRSSI(mote, rssi)\n\n # save the intial RSSI values for future use in the mobility models\n mote.initialRSSI[cm] = rssi\n cm.initialRSSI[mote] = rssi\n\n if rssi > self.STABLE_RSSI:\n # print rssi\n numStableNeighbors += 1\n\n # make sure it is connected to at least STABLE_NEIGHBORS motes\n # or connected to all the currently deployed motes when the number of deployed motes\n # are smaller than STABLE_NEIGHBORS\n if numStableNeighbors >= self.STABLE_NEIGHBORS or numStableNeighbors == len(connectedMotes):\n connected = True\n\n connectedMotes += [mote]\n countMote += 1\n\n # for each mote, compute PDR to each neighbors\n for mote in self.motes:\n shortestDistance = None\n for m in self.motes:\n if mote == m:\n continue\n if mote.getRSSI(m) > mote.minRssi:\n pdr = self._computePDR(mote, m)\n mote.setPDR(m, pdr)\n m.setPDR(mote, pdr)\n # closest distance\n dist = self._computeDistance(mote, m)\n if shortestDistance == None or dist < shortestDistance:\n mote.closestNeighbor = m\n shortestDistance = dist", "def __new_position(self):\n iterables = [range(self.size_x), range(self.size_y)]\n points = [] # Save all points in size.\n for point in itertools.product(*iterables):\n points.append(point)\n\n current_points = [] # Save used points.\n for object in self.objects:\n if (object.x, object.y) not in current_points:\n current_points.append((object.x, object.y))\n\n for point in current_points:\n points.remove(point) # Remove all used points.\n\n location = np.random.choice(a=range(len(points)), replace=False)\n return points[location]", "def test_get_placement_of_vertex(self):\n subv = list()\n for i in range(5):\n subv.append(SimpleMachineVertex(None, \"\"))\n\n pl = list()\n for i in range(4):\n pl.append(Placement(subv[i], 0, 0, i))\n\n pls = Placements(pl)\n for i in range(4):\n self.assertEqual(pls.get_placement_of_vertex(subv[i]), pl[i])", "def place_allowed_tower_sites():\n self.coordinates__tower_sites = []\n for tk in xrange(self.N_tower_kinds):\n #Each kind of tower will have the correct number of sites placed\n \n coords = []\n while len(coords)<self.N_tower_sites[tk]:\n x = np.random.randint(self.BORDER_MARGIN,self.map_dimensions[1]+1-self.BORDER_MARGIN,size=1)[0]\n y = np.random.randint(self.BORDER_MARGIN,self.map_dimensions[0]+1-self.BORDER_MARGIN,size=1)[0]\n p = (x,y) \n all_valid = True\n for rect in self.coordinates__obstacles:\n if not check_valid_placement(p,rect):\n all_valid = False\n break\n if all_valid:\n coords.append(p)\n self.coordinates__tower_sites.append(coords)", "def get_element_for_location(self, points):\n verts = np.zeros((points.shape[0], 4, 3))\n bc = np.zeros((points.shape[0], 4))\n tetras = np.zeros(points.shape[0], dtype=\"int64\")\n inside = np.zeros(points.shape[0], dtype=bool)\n npts = 0\n npts_step = int(1e4)\n # break into blocks of 10k points\n while npts < points.shape[0]:\n\n cell_index = np.array(\n self.aabb_grid.position_to_cell_index(points[: npts + npts_step, :])\n )\n inside = self.aabb_grid.inside(points[: npts + npts_step, :])\n global_index = (\n cell_index[:, 0]\n + self.aabb_grid.nsteps_cells[None, 0] * cell_index[:, 1]\n + self.aabb_grid.nsteps_cells[None, 0]\n * self.aabb_grid.nsteps_cells[None, 1]\n * cell_index[:, 2]\n )\n\n tetra_indices = self.aabb_table[global_index[inside], :].tocoo()\n # tetra_indices[:] = -1\n row = tetra_indices.row\n col = tetra_indices.col\n # using returned indexes calculate barycentric coords to determine which tetra the points are in\n vertices = self.nodes[self.elements[col, :4]]\n pos = points[row, :]\n vap = pos[:, :] - vertices[:, 0, :]\n vbp = pos[:, :] - vertices[:, 1, :]\n # # vcp = p - points[:, 2, :]\n # # vdp = p - points[:, 3, :]\n vab = vertices[:, 1, :] - vertices[:, 0, :]\n vac = vertices[:, 2, :] - vertices[:, 0, :]\n vad = vertices[:, 3, :] - vertices[:, 0, :]\n vbc = vertices[:, 2, :] - vertices[:, 1, :]\n vbd = vertices[:, 3, :] - vertices[:, 1, :]\n\n va = np.einsum(\"ij, ij->i\", vbp, np.cross(vbd, vbc, axisa=1, axisb=1)) / 6.0\n vb = np.einsum(\"ij, ij->i\", vap, np.cross(vac, vad, axisa=1, axisb=1)) / 6.0\n vc = np.einsum(\"ij, ij->i\", vap, np.cross(vad, vab, axisa=1, axisb=1)) / 6.0\n vd = np.einsum(\"ij, ij->i\", vap, np.cross(vab, vac, axisa=1, axisb=1)) / 6.0\n v = np.einsum(\"ij, ij->i\", vab, np.cross(vac, vad, axisa=1, axisb=1)) / 6.0\n c = np.zeros((va.shape[0], 4))\n c[:, 0] = va / v\n c[:, 1] = vb / v\n c[:, 2] = vc / v\n c[:, 3] = vd / v\n # inside = np.ones(c.shape[0],dtype=bool)\n mask = np.all(c >= 0, axis=1)\n\n verts[: npts + npts_step, :, :][row[mask], :, :] = vertices[mask, :, :]\n bc[: npts + npts_step, :][row[mask], :] = c[mask, :]\n tetras[: npts + npts_step][row[mask]] = col[mask]\n inside[: npts + npts_step][row[mask]] = True\n npts += npts_step\n return verts, bc, tetras, inside", "def _generate_maze(self):\n grid = [[GridCell(x, y, self._treasure_prob) for x in range(self._map_size)] for y in range(self._map_size)]\n\n center_x = self._map_size // 2\n center_y = self._map_size // 2\n\n for _ in range(self._sparsity):\n current = grid[center_x][center_y]\n stack = list()\n start = True\n while len(stack) or start:\n start = False\n current.visited = True\n children = current.has_children(grid)\n\n if children:\n choice = np.random.choice(children)\n choice.visited = True\n\n stack.append(current)\n\n self._remove_walls(current, choice)\n\n current = choice\n\n elif stack:\n current = stack.pop()\n for row in grid:\n for cell in row:\n cell.visited = False\n\n # edit center area\n grid[center_x][center_y].set_treasury()\n for x in range(center_x - 1, center_x + 2):\n for y in range(center_y - 1, center_y + 2):\n grid[x][y].erase_walls()\n return grid", "def get_random_coordinates(self):\n array_shape = np.shape(self.cells) # type: tuple\n points_on_island = []\n for i in range(1, array_shape[0] - 1):\n for j in range(1, array_shape[1] - 1):\n points_on_island.append((i, j))\n random.shuffle(points_on_island)\n return points_on_island", "def test_get_placements(self):\n subv = list()\n for i in range(5):\n subv.append(SimpleMachineVertex(None, \"\"))\n\n pl = list()\n for i in range(4):\n pl.append(Placement(subv[i], 0, 0, i))\n\n pls = Placements(pl)\n container = pls.placements\n for i in range(4):\n self.assertIn(pl[i], container)", "def iter_grid_tiles(self):\n all_points = self.grid[0].union(self.grid[1], self.grid[2], {self.position})\n min_x = min(p.x for p in all_points)\n min_y = min(p.y for p in all_points)\n\n if min_x < 0:\n xoffset = -min_x\n elif min_x == 0:\n xoffset = 0\n elif min_x > 0:\n xoffset = min_x\n if min_y < 0:\n yoffset = -min_y\n elif min_y == 0:\n yoffset = 0\n elif min_y > 0:\n yoffset = min_y\n origin = Point(0 + xoffset, 0 + yoffset)\n position = Point(self.position.x + xoffset, self.position.y + yoffset)\n for tile_type in (0, 1, 2):\n for point in self.grid[tile_type]:\n newpoint = Point(point.x + xoffset, point.y + yoffset)\n if newpoint not in (origin, position):\n yield newpoint.x, newpoint.y, tile_type\n yield origin.x, origin.y , 4\n yield position.x, position.y, 3", "def _place_nodes(self, i, j, step, max_nodes):\n points = []\n for k in range(max_nodes):\n while(True):\n t = Point(random.randint(i,i+step), random.randint(j,j+step)) \n if all([point.get_distance(t) > self.min_distance for point in points]):\n points.append(t)\n break\n \n for point in points:\n n=Node(self.counter, point)\n self.nodes.append(n)\n self.counter+=1", "def empty_spots(self):\n\t\tret = []\n\t\tfor i in range(0, self.size):\n\t\t\tfor j in range(0, self.size):\n\t\t\t\tif(self.grid[i][j] == self.terminal):\n\t\t\t\t\tret.append((i,j))\n\t\treturn ret", "def generate_possible_moves(self):\r\n\t\t# Moves:\r\n\t\t# 0 - North\r\n\t\t# 1 - East\r\n\t\t# 2 - South\r\n\t\t# 3 - West\r\n\r\n\t\tmoves = []\r\n\r\n\t\tif self.x != 0:\r\n\t\t\tmoves.append(0)\r\n\t\tif self.y != self.n-1:\r\n\t\t\tmoves.append(1)\r\n\t\tif self.x != self.n-1:\r\n\t\t\tmoves.append(2)\r\n\t\tif self.y != 0:\r\n\t\t\tmoves.append(3)\r\n\r\n\t\treturn moves", "def grid_points(self):\n for i in range(self.rows):\n for j in range(self.cols):\n min_lat,max_lat,min_lon,max_lon = self.coords_to_min_max_lat_lon((i,j))\n if i == 0:\n print_gps(max_lat,max_lon,\"grid\")\n if j == 0:\n print_gps(max_lat,min_lon,\"grid\")\n if j == 0:\n print_gps(min_lat,min_lon,\"grid\")\n print_gps(min_lat,max_lon,\"grid\")", "def get_candidate_tiles(self) -> List[Point]:\n\t\tempty_tiles = set()\n\t\tfor x in range(self.size):\n\t\t\tfor y in range(self.size):\n\t\t\t\tif not self.tiles[x][y] == 0:\n\t\t\t\t\tfor d in [[0,1], [1,1], [1,0], [1,-1], [0,-1], [-1,-1], [-1,0], [-1,1]]:\n\t\t\t\t\t\tif x+d[0] >= 0 and y+d[1] >= 0 and x+d[0] < self.size and y+d[1] < self.size and self.tiles[x+d[0]][y+d[1]] == 0:\n\t\t\t\t\t\t\tempty_tiles.add(Point(x+d[0],y+d[1]))\n\t\treturn list(empty_tiles)", "def create_coords_medium(ph):\n # Min: 8, max 12\n for start_row in xrange(ph.pizza.shape[0]):\n for start_col in xrange(ph.pizza.shape[1]-2*ph.min_ing_per_slice+1):\n # First scenario\n for i in xrange(ph.min_ing_per_slice*2, ph.max_cells_per_slice+1):\n end_row = start_row + 1\n end_col = start_col + i\n yield (start_row, start_col, end_row, end_col)\n yield (start_row, start_col, end_col, end_row)\n\n for start_row in xrange(ph.pizza.shape[0]-1):\n for start_col in xrange(ph.pizza.shape[1]-3):\n # Second scenario\n for i in xrange(ph.min_ing_per_slice, ph.min_ing_per_slice+3):\n end_row = start_row + 2\n end_col = start_col + i\n yield (start_row, start_col, end_row, end_col)\n yield (start_row, start_col, end_col, end_row)\n\n for start_row in xrange(ph.pizza.shape[0] - 2):\n for start_col in xrange(ph.pizza.shape[1] - 2):\n # Third scenario\n for i in xrange(3, 5):\n end_row = start_row + 3\n end_col = start_col + i\n yield (start_row, start_col, end_row, end_col)\n yield (start_row, start_col, end_col, end_row)", "def generatePos(self):\n self.pos = np.zeros((self.num_points, 2), dtype='int32')\n self.pos[:, 1] = np.repeat(list(reversed(np.arange(1, self.x*2, 2))), self.y)\n self.pos[:, 0] = np.tile(np.arange(1, self.x*2, 2), self.y)", "def init_place(self):\n for i in range(self.numCells):\n x = randint(0,self.nx)\n y = randint(0,self.ny)\n while not self.is_empty(x,y):\n x = randint(0, self.nx)\n y = randint(0, self.ny)\n assert self.put_cell(x, y, i) is True\n self.cells.append(Cell(x,y))\n\n assert self.calc_cost() is True", "def get_empty_tiles(self) -> List[Point]:\n\t\tempty_tiles = []\n\t\tfor x in range(self.size):\n\t\t\tfor y in range(self.size):\n\t\t\t\tif self.tiles[x][y] == 0:\n\t\t\t\t\tempty_tiles.append(Point(x,y))\n\t\treturn empty_tiles", "def generate_positions(self):\n raise NotImplementedError(\"Should implement generate_positions()!\")", "def create_points(self):\n v1 = 0.0\n v2 = 0.5\n v3 = 0.25\n v4 = 0.2 # only used for hexgrid\n\n points = []\n\n points.append((v1, v1, v1)) # 0\n points.append((v2, v1, v1)) # 1\n points.append((v2, v2, v1)) # 2\n points.append((v1, v2, v1)) # 3\n\n points.append((v1, v1, v2)) # 4\n points.append((v2, v1, v2)) # 5\n points.append((v2, v2, v2)) # 6\n points.append((v1, v2, v2)) # 7\n\n points.append((v3, v1, v1)) # 8\n points.append((v2, v3, v1)) # 9\n points.append((v3, v2, v1)) # 10\n points.append((v1, v3, v1)) # 11\n\n points.append((v1, v1, v3)) # 12\n points.append((v2, v1, v3)) # 13\n points.append((v2, v2, v3)) # 14\n points.append((v1, v2, v3)) # 15\n\n points.append((v3, v1, v2)) # 16\n points.append((v2, v3, v2)) # 17\n points.append((v3, v2, v2)) # 18\n points.append((v1, v3, v2)) # 19\n\n points.append((v4, v1, v1)) # 20\n points.append((v1, v4, v1)) # 21\n points.append((v1, v1, v4)) # 22\n\n return points", "def _generate_pores(self):\n logger.info(\"Place randomly located pores in the domain\")\n #Original Random Point Generator\n #coords = sp.rand(self._Np,3)*[self._Lx,self._Ly,self._Lz]\n #Seeding Code\n coords = np.zeros([self._Np,3])\n #reject points close to boundaries - if False there will be slightly more\n rejection = [False,False,True]\n for j in range(3):\n i = 0\n while i < self._Np:\n coord = np.random.uniform(0,1,1)\n if self._reject(coord) == rejection[j]:\n coords[i][j]=coord\n i += 1\n coords*=np.array([self._Lx,self._Ly,self._Lz])\n #Seeding Code\n #Uniform Random Generator\n #coords = np.array([np.random.uniform(0,self._Lx,self._Np),np.random.uniform(0,self._Ly,self._Np),np.random.uniform(0,self._Lz,self._Np)]).T\n\n self['pore.coords'] = coords\n logger.debug(\"End of method\")", "def fill(self, products):\n unplaced = []\n self.x = 0\n for p in products:\n if self.x + p.width < Cage.width:\n p.location = self.x, self.y, self.z\n self.placed_products.append(p)\n self.x += p.width\n else:\n unplaced.append(p)\n return unplaced", "def __generate_spawn_points(self):\n while True:\n p1x = random.randint(0, self.width - 1)\n p1y = random.randint(0, self.height - 1)\n p2x, p2y = self.__mirror(p1x, p1y)\n d_sq = (p1x - p2x)**2 + (p1y - p2y)**2\n if d_sq >= (self.width / 2)**2:\n break\n return (p1x, p1y), (p2x, p2y)", "def create_hard_blocks(self):\n for x in xrange(1, self.map_size[0], 2):\n for y in xrange(1, self.map_size[1], 2):\n self.create_hard_block_at(x, y)", "def vertices(self):\n d = self.space_dimension()\n v = vector(ZZ, d)\n points = []\n for g in self.minimized_generators():\n for i in range(0,d):\n v[i] = g.coefficient(Variable(i))\n v_copy = copy.copy(v)\n v_copy.set_immutable()\n points.append(v_copy)\n return tuple(points)", "def pontos(self):\n \n self.sc = 1. \n self.x = self.sc*np.array([-155., -139.4, -124., -108.5, -93., -77.5, -62., -46.5, -31., -15.5, 0, 15.5, 31., 46.5, 62., 77.5, 93., 108.5, 124., 139.5, 155.])\n self.y = self.sc*np.array([ 9.23, 14.37, 18.98, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 21.55, 14.37, 3.59])\n self.px_index = len(self.x)\n #self.py_index = len(self.x)/2\n\n self.coord = np.array([self.x,self.y,np.full(len(self.x),self.z)])\n \n self.x = self.x[::-1]\n self.y = -self.y[::-1] \n self.new = np.array([self.x,self.y,np.full(len(self.x),self.z)])\n self.coord = np.array([np.append(self.coord[0],self.new[0]),np.append(self.coord[1],self.new[1]),np.append(self.coord[2],self.new[2])])\n self.coord = np.array([np.append(self.coord[0],self.coord[0,0]),np.append(self.coord[1],self.coord[1,0]),np.append(self.coord[2],self.coord[2,0])])\n\n self.coord[0] = self.coord[0] - (np.amax(self.coord[0])+np.amin(self.coord[0]))/2\n self.coord[1] = self.coord[1] + (np.amax(self.coord[1])-np.amin(self.coord[1]))/2 \n \n self.coordi = np.array(self.coord)\n \n self.cg = np.array([0 + self.dx, self.H/2 + self.dy, self.z]) \n self.cgi = np.array(self.cg)\n \n self.thi = 0. + self.dth \n self.th = float(self.thi) \n \n self.coordnav(self.dx,self.dy,self.dth)", "def generate(self):\n print self._matrix\n for point, cell in self._matrix:\n walls = zip(('U', 'L', 'D', 'R'), cell)\n blocked = [x for x in walls if not x[1]]\n if len(blocked) < 3:\n # we have more than one exit, this isn't a dead end and we\n # don't need to do anything\n continue\n print \"***\"\n print \"%s: %s\" % (blocked, len(blocked))\n random.shuffle(blocked)\n while(blocked):\n try:\n self._matrix.carve(point, blocked.pop()[0])\n except IndexError:\n continue\n break", "def randPlace(self):\r\n random.seed(self.seed)\r\n \r\n # Start placement on Partition A\r\n partA = True\r\n for node in self.G.nodes():\r\n \r\n randSite = random.randint(0,int(self.sitesNum/2)-1)\r\n \r\n if partA:\r\n partSite = self.sitesA\r\n self.G.node[node][\"part\"] = 'A'\r\n \r\n else:\r\n partSite = self.sitesB\r\n self.G.node[node][\"part\"] = 'B'\r\n \r\n while (partSite[randSite].isOcp()):\r\n randSite = random.randint(0,int(self.sitesNum/2)-1) \r\n\r\n partSite[randSite].setCell(node)\r\n self.G.node[node][\"site\"] = partSite[randSite]\r\n \r\n # Toggle partition for next placement\r\n partA = not partA", "def neighbors(self):\n \n # find 0 - blank square\n \n x0 = None\n y0 = None\n \n for i in range(4):\n for j in range(4):\n if self.get_tile(i,j) == 0:\n y0 = i\n x0 = j\n\n if x0 == None or y0 == None:\n return []\n \n neighbor_list = []\n \n # move 0 to the right\n if x0 < 3:\n new_position = Position(self.tiles)\n temp = new_position.get_tile(y0,x0+1)\n new_position.set_tile(y0,x0+1,0)\n new_position.set_tile(y0,x0,temp)\n new_position.directiontomoveto = 'r'\n neighbor_list.append(new_position)\n # move 0 to the left\n if x0 > 0:\n new_position = Position(self.tiles)\n temp = new_position.get_tile(y0,x0-1)\n new_position.set_tile(y0,x0-1,0)\n new_position.set_tile(y0,x0,temp)\n new_position.directiontomoveto = 'l'\n neighbor_list.append(new_position)\n # move 0 up\n if y0 > 0:\n new_position = Position(self.tiles)\n temp = new_position.get_tile(y0-1,x0)\n new_position.set_tile(y0-1,x0,0)\n new_position.set_tile(y0,x0,temp)\n new_position.directiontomoveto = 'u'\n neighbor_list.append(new_position)\n # move 0 down\n if y0 < 3:\n new_position = Position(self.tiles)\n temp = new_position.get_tile(y0+1,x0)\n new_position.set_tile(y0+1,x0,0)\n new_position.set_tile(y0,x0,temp)\n new_position.directiontomoveto = 'd'\n neighbor_list.append(new_position)\n \n return neighbor_list", "def _computeStikeDip(self):\n seg = self._group_index\n groups = np.unique(seg)\n ng = len(groups)\n norm_vec = Vector(0, 0, 0)\n north_vec = Vector(0, 0, 0)\n up_vec = Vector(0, 0, 0)\n for i in range(ng):\n group_segments = np.where(groups[i] == seg)[0]\n nseg = len(group_segments) - 1\n for j in range(nseg):\n ind = group_segments[j]\n P0 = Point(self._toplons[ind],\n self._toplats[ind],\n self._topdeps[ind])\n P1 = Point(self._toplons[ind + 1],\n self._toplats[ind + 1],\n self._topdeps[ind + 1])\n P2 = Point(self._botlons[ind + 1],\n self._botlats[ind + 1],\n self._botdeps[ind + 1])\n P3 = Point(self._botlons[ind],\n self._botlats[ind],\n self._botdeps[ind])\n P1up = Point(self._toplons[ind + 1],\n self._toplats[ind + 1],\n self._topdeps[ind + 1] - 1.0)\n P1N = Point(self._toplons[ind + 1],\n self._toplats[ind + 1] + 0.001,\n self._topdeps[ind + 1])\n P3up = Point(self._botlons[ind],\n self._botlats[ind],\n self._botdeps[ind] - 1.0)\n P3N = Point(self._botlons[ind],\n self._botlats[ind] + 0.001,\n self._botdeps[ind])\n p0 = Vector.fromPoint(P0)\n p1 = Vector.fromPoint(P1)\n p2 = Vector.fromPoint(P2)\n p3 = Vector.fromPoint(P3)\n p1up = Vector.fromPoint(P1up)\n p1N = Vector.fromPoint(P1N)\n p3up = Vector.fromPoint(P3up)\n p3N = Vector.fromPoint(P3N)\n\n # Sides\n s01 = p1 - p0\n s02 = p2 - p0\n s03 = p3 - p0\n s21 = p1 - p2\n s23 = p3 - p2\n\n # First triangle\n t1norm = (s02.cross(s01)).norm()\n a = s01.mag()\n b = s02.mag()\n c = s21.mag()\n s = (a + b + c) / 2\n A1 = np.sqrt(s * (s - a) * (s - b) * (s - c)) / 1000\n\n # Second triangle\n t2norm = (s03.cross(s02)).norm()\n a = s03.mag()\n b = s23.mag()\n c = s02.mag()\n s = (a + b + c) / 2\n A2 = np.sqrt(s * (s - a) * (s - b) * (s - c)) / 1000\n\n # Up and North\n p1up = (p1up - p1).norm()\n p3up = (p3up - p3).norm()\n p1N = (p1N - p1).norm()\n p3N = (p3N - p3).norm()\n\n # Combine\n norm_vec = norm_vec + A1 * t1norm + A2 * t2norm\n north_vec = north_vec + A1 * p1N + A2 * p3N\n up_vec = up_vec + A1 * p1up + A2 * p3up\n\n norm_vec = norm_vec.norm()\n north_vec = north_vec.norm()\n up_vec = up_vec.norm()\n\n # Do I need to flip the vector because it is pointing down (i.e.,\n # right-hand rule is violated)?\n flip = np.sign(up_vec.dot(norm_vec))\n norm_vec = flip * norm_vec\n\n # Angle between up_vec and norm_vec is dip\n self._dip = np.arcsin(up_vec.cross(norm_vec).mag()) * 180 / np.pi\n\n # Normal vector projected to horizontal plane\n nvph = (norm_vec - up_vec.dot(norm_vec) * up_vec).norm()\n\n # Dip direction is angle between nvph and north; strike is orthogonal.\n cp = nvph.cross(north_vec)\n sign = np.sign(cp.dot(up_vec))\n dp = nvph.dot(north_vec)\n strike = np.arctan2(sign * cp.mag(), dp) * 180 / np.pi - 90\n if strike < -180:\n strike = strike + 360\n self._strike = strike", "def _computeNoisyPositions(self, state):\n positions = state.getGhostPositions()\n w = self.args.w\n w2 = 2*w+1\n div = float(w2 * w2)\n new_positions = []\n for p in positions:\n (x, y) = p\n dist = util.Counter()\n for i in range(x - w, x + w + 1):\n for j in range(y - w, y + w + 1):\n dist[(i, j)] = 1.0 / div\n dist.normalize()\n new_positions.append(util.chooseFromDistribution(dist))\n return new_positions", "def _computeNoisyPositions(self, state):\n positions = state.getGhostPositions()\n w = self.args.w\n w2 = 2*w+1\n div = float(w2 * w2)\n new_positions = []\n for p in positions:\n (x, y) = p\n dist = util.Counter()\n for i in range(x - w, x + w + 1):\n for j in range(y - w, y + w + 1):\n dist[(i, j)] = 1.0 / div\n dist.normalize()\n new_positions.append(util.chooseFromDistribution(dist))\n return new_positions", "def generate_mines(self, number):\n mine_locations = []\n available_places = [[j, i]\n for i in xrange(0, self.x) for j in xrange(0, self.y)]\n while number > 0:\n # the chosen coordinate for a mine is appended into the list and is\n # removed from the list of choices to prevent duplicates.\n choice = random.choice(available_places)\n available_places.remove(choice)\n mine_locations.append(choice)\n number -= 1\n return mine_locations", "def non_rotated_vertices(self):\n v0 = [self.pos.x - self.width / 2, self.pos.y - self.height / 2]\n v1 = [self.pos.x + self.width / 2, self.pos.y - self.height / 2]\n v2 = [self.pos.x + self.width / 2, self.pos.y + self.height / 2]\n v3 = [self.pos.x - self.width / 2, self.pos.y + self.height / 2]\n return v0, v1, v2, v3", "def createTopology(self):\n\n # find DAG root\n dagRoot = None\n for mote in self.motes:\n if mote.id == 0:\n mote.role_setDagRoot()\n dagRoot = mote\n assert dagRoot\n\n if self.settings.mobilityModel == 'RPGM':\n # put DAG root at center of area\n dagRoot.setLocation(x=SimEngine.SimEngine().targets[0][0],\n y=SimEngine.SimEngine().targets[0][1])\n else:\n # put DAG root at center of area\n dagRoot.setLocation(x=self.squareSide/2,\n y=self.squareSide/2)\n\n # reposition each mote until it is connected\n connectedMotes = [dagRoot]\n for mote in self.motes:\n if mote in connectedMotes:\n continue\n\n connected = False\n while not connected:\n # pick a random location\n mote.setLocation(x=self.squareSide*random.random(),\n y=self.squareSide*random.random())\n\n # if mote.id == 1:\n # mote.setLocation(\n # x=self.squareSide / 2.5 + 0.02,\n # y=self.squareSide / 2.5 + 0.3\n # )\n # elif mote.id == 2:\n # mote.setLocation(\n # x=self.squareSide / 2.5 + 0.3,\n # y=self.squareSide / 2.5 + 0.3\n # )\n #\n # elif mote.id == 3:\n # mote.setLocation(\n # x=self.squareSide / 2.5 + 0.1,\n # y=self.squareSide / 2.5 + 0.4\n # )\n\n # elif mote.id == 4:\n # mote.setLocation(\n # x=self.squareSide / 2.5 + 0.2,\n # y=self.squareSide / 2.5 + 0.65\n # )\n # else:\n\n mote.setLocation(\n x=self.settings.squareSide * random.random(),\n y=self.settings.squareSide * random.random()\n )\n\n numStableNeighbors = 0\n\n # count number of neighbors with sufficient RSSI\n for cm in connectedMotes:\n\n rssi = self._computeRSSI(mote, cm)\n mote.setRSSI(cm, rssi)\n cm.setRSSI(mote, rssi)\n\n # save the intial RSSI values for future use in the mobility models\n mote.initialRSSI[cm] = rssi\n cm.initialRSSI[mote] = rssi\n\n if rssi > self.STABLE_RSSI:\n numStableNeighbors += 1\n\n # make sure it is connected to at least stable_neighbors motes\n # or connected to all the currently deployed motes when the\n # number of deployed motes are smaller than stable_neighbors\n if (numStableNeighbors >= self.stable_neighbors or\n numStableNeighbors == len(connectedMotes)):\n print 'moteid %d, mote x %.4f, mote y %.4f: valid %s' % (mote.id, mote.x, mote.y , SimEngine.SimEngine().checkValidPosition(mote.x, mote.y, countSquare=True, placement=True))\n if self.settings.mobilityModel == 'RPGM' and SimEngine.SimEngine().checkValidPosition(mote.x, mote.y, countSquare=True, placement=True):\n connected = True\n elif self.settings.mobilityModel != 'RPGM':\n connected = True\n\n connectedMotes += [mote]\n\n # self.motes[3].setRSSI(self.motes[0], -96)\n # self.motes[0].setRSSI(self.motes[3], -96)\n\n # for each mote, compute PDR to each neighbors\n for mote in self.motes:\n for m in self.motes:\n if mote == m:\n continue\n if mote.getRSSI(m) > mote.minRssi:\n pdr = self._computePDR(mote, m)\n mote.setPDR(m, pdr)\n m.setPDR(mote, pdr)", "def display(self):\n for i in range(self.height - 1, 0, -1):\n for j in range(self.width):\n # yield i, j - 1, self.grid[i][j - 1]\n yield j, i, self.dungeon.tile(Point(j, i))\n\n \"\"\"\n def __iter__(self):\n for i in range(self.height):\n for j in range(self.width):\n yield Point(x=self.x + j, y=self.y + i)\n \"\"\"", "def get_directions():\n return [(1, 0), (0, 1), (-1, 0), (0, -1)]", "def place( self, position, v, T):\n geo = (self.geo - self.geo[v]).dot( T)\n return position + geo", "def plan(self):\n return [(0, 0), (1, 0), (1, 1), (1, 2), (1, 3)]", "def generatePiece(self):\n\n empty_tiles = []\n for y in range(BOARD_SIZE):\n for x in range(BOARD_SIZE):\n if self.grid[x][y].isEmpty():\n empty_tiles.append(self.grid[x][y])\n\n two_or_four = random.choice([2, 4])\n random.choice(empty_tiles).set(two_or_four)", "def vertexes(self):\n theta = self.orientation\n shifts = np.array([np.cos(theta), np.sin(theta)]) * self.a\n return self.coords + (shifts[:, None] * [-1, 1]).T", "def fill_octree(self):\n if len(self.children) <= 0:\n self.generate_octants()\n for point in self.points:\n self.append_point(point)\n self.points = np.array([])", "def generate_points(self):\n for x in range(self.num_sides):\n for y in range(self.num_sides):\n for z in range(self.num_sides):\n col_name = y + 4\n top_num = 0\n if 1 < z < 4:\n top_name = 'b'\n else:\n top_name = 'd'\n if z == 3 or z == 1:\n top_num += 4\n top_num += x\n\n top_name += str(top_num)\n\n k = Node(x*self.length-self.center, y*self.length -\n self.center, z*self.length-self.center, top_name, col_name)\n self.c_layers[y].append(k)\n self.points.append(k)", "def createTopology(self):\n\n # find DAG root\n dagRoot = None\n for mote in self.motes:\n if mote.id == 0:\n mote.role_setDagRoot()\n dagRoot = mote\n assert dagRoot\n\n if self.settings.mobilityModel == 'RPGM':\n # put DAG root at center of area\n dagRoot.setLocation(x=SimEngine.SimEngine().targets[0][0],\n y=SimEngine.SimEngine().targets[0][1])\n else:\n # put DAG root at center of area\n dagRoot.setLocation(x=self.squareSide/2,\n y=self.squareSide/2)\n\n # reposition each mote until it is connected\n connectedMotes = [dagRoot]\n motes_shuffled = copy.copy(self.motes)\n random.shuffle(motes_shuffled) # shuffle them around\n\n # for mote in self.motes:\n for mote in motes_shuffled:\n stableNeighbors = []\n if mote in connectedMotes:\n continue\n\n connected = False\n while not connected:\n # pick a random location\n # mote.setLocation(x=self.squareSide*random.random(),\n # y=self.squareSide*random.random())\n #\n # mote.setLocation(\n # x=self.settings.squareSide * random.random(),\n # y=self.settings.squareSide * random.random()\n # )\n\n newX = None\n newY = None\n # if no topology is not given, build the topology yourself\n if SimEngine.SimEngine().ilp_topology is None:\n newX = self.settings.squareSide * random.random()\n newY = self.settings.squareSide * random.random()\n else:\n # if no topology is given, use that topology\n newX = SimEngine.SimEngine().ilp_topology[str(mote.id)]['x']\n newY = SimEngine.SimEngine().ilp_topology[str(mote.id)]['y']\n\n mote.setLocation(\n x=newX,\n y=newY\n )\n\n numStableNeighbors = 0\n stableNeighbors = []\n\n # tryAgain = False\n # for cm in connectedMotes:\n # rssi = self._computeRSSI(mote, cm)\n # if rssi > -110:\n # tryAgain = True\n\n # if not tryAgain:\n # count number of neighbors with sufficient RSSI\n for cm in connectedMotes:\n\n rssi = self._computeRSSI(mote, cm)\n mote.setRSSI(cm, rssi)\n cm.setRSSI(mote, rssi)\n\n # save the intial RSSI values for future use in the mobility models\n mote.initialRSSI[cm] = rssi\n cm.initialRSSI[mote] = rssi\n\n if self.settings.individualModulations == 1:\n if self.rssiToPdr(rssi, modulation=Modulation.Modulation().minimalCellModulation[SimSettings.SimSettings().modulationConfig]) > self.settings.stableNeighborPDR:\n # if rssi > Modulation.Modulation().modulationStableRSSI[Modulation.Modulation().minimalCellModulation[SimSettings.SimSettings().modulationConfig]]:\n # print rssi\n numStableNeighbors += 1\n stableNeighbors.append(cm.id)\n else:\n if rssi > self.STABLE_RSSI:\n # print rssi\n numStableNeighbors += 1\n\n # make sure it is connected to at least STABLE_NEIGHBORS motes\n # or connected to all the currently deployed motes when the number of deployed motes\n # are smaller than STABLE_NEIGHBORS\n if numStableNeighbors >= self.stable_neighbors or numStableNeighbors == len(connectedMotes):\n print 'For mote {0}, stable neighbors {1}'.format(mote.id, stableNeighbors)\n connected = True\n\n connectedMotes += [mote]\n\n # for each mote, compute PDR to each neighbors\n for mote in self.motes:\n for m in self.motes:\n if mote == m:\n continue\n\n # set the distance to all other motes\n distance = math.sqrt((m.x - mote.x) ** 2 + (m.y - mote.y) ** 2)\n m.set_distance(mote, distance)\n mote.set_distance(m, distance)\n # print 'mote %d to mote %d: %.4f' % (m.id, mote.id, distance)\n if self.settings.individualModulations == 1:\n rssi_value = mote.getRSSI(m)\n # for modulationTmp in Modulation.Modulation().modulations:\n # if self.settings.ilpfile is not None:\n # ## I am not going to set this as this should be set by the ILP\n # pass\n # else:\n # # if the rssi value is higher than the minimal signal value required for this neighbor, take that modulation\n # # and compute the PDR using that modulation\n # pass\n # # if rssi_value > Modulation.Modulation().modulationStableRSSI[modulationTmp]:\n # # pdr = self._computePDR(mote, m, modulation=modulationTmp)\n # # mote.setPDR(m, pdr)\n # # m.setPDR(mote, pdr)\n # # mote.setModulation(m, modulationTmp)\n # # m.setModulation(mote, modulationTmp)\n else:\n if mote.getRSSI(m) > mote.minRssi:\n pdr = self._computePDR(mote, m)\n mote.setPDR(m, pdr)\n m.setPDR(mote, pdr)", "def splitPlace(self):\r\n \r\n \r\n \r\n nodeSortedIter = sorted(self.G.degree_iter(),key=itemgetter(1),reverse=True)\r\n \r\n placeCnt = 0\r\n \r\n for node in nodeSortedIter:\r\n if placeCnt<self.cells/2:\r\n self.sitesA.append(node[0])\r\n self.G.node[node[0]][\"part\"] = 'A'\r\n else:\r\n self.sitesB.append(node[0])\r\n self.G.node[node[0]][\"part\"] = 'B'\r\n placeCnt+=1", "def allplacement(self, test_pylos):\n placements = []\n player = test_pylos._state['visible']['turn']\n for layer in range(4):\n for row in range(4-layer):\n for column in range(4-layer):\n value = test_pylos.get(layer,row,column)\n if value is None:\n try:\n test_pylos.validPosition(layer, row, column)\n except game.InvalidMoveException:\n pass\n else:\n if test_pylos.createSquare((layer, row, column)) is True:\n if player == 0:\n removableballs = self.removableballs0(test_pylos)\n removableballs.apppend([layer, row, column])\n else:\n removableballs = self.removableballs1(test_pylos)\n removableballs.apppend([layer, row, column])\n for i in removableballs:\n if i[0] == layer - 1 and i[1] == row and i[2] == column:\n removableballs.remove(i)\n elif i[0] == layer - 1 and i[1] == row + 1 and i[2] == column:\n removableballs.remove(i)\n elif i[0] == layer - 1 and i[1] == row + 1 and i[2] == column + 1:\n removableballs.remove(i)\n elif i[0] == layer - 1 and i[1] == row and i[2] == column + 1:\n removableballs.remove(i)\n\n for i in removableballs:\n move = {\n 'move': 'place',\n 'to': [layer, row, column],\n 'remove': i\n }\n\n placements.append(move)\n else:\n move = {\n 'move': 'place',\n 'to': [layer, row, column]\n }\n placements.append(move)\n return placements", "def Generate_Random( self ):\n print( 'Generating Random coordinates' )\n stands = self.Data.Stand.keys()\n stands.sort()\n for s in stands:\n trees = self.Data.Stand[s].Tree.keys()\n trees.sort()\n for t in trees:\n self.Data.Stand[s].Tree[t].X = random.uniform( 0, 208.71 )\n self.Data.Stand[s].Tree[t].Y = random.uniform( 0, 208.71 )", "def possibleMovements(self,numIterations:int=50)->list[tuple]:\n x=random.randint(0,self._side-1); y=random.randint(0,self._side-1)\n possible_positions=[]\n positionsCovered=[(x,y)]\n for _ in range(numIterations):\n if x+2<self._side and y+1<self._side:\n possible_positions.append((x+2,y+1))\n \n if x+2<self._side and y-1<self._side and y-1>0:\n possible_positions.append((x+2,y-1))\n \n if x-2<self._side and y+1<self._side and x-2>0:\n possible_positions.append((x-2,y+1))\n \n if x-2<self._side and y-1<self._side and x-2>0 and y-1>0:\n possible_positions.append((x-2,y-1)) \n\n if x+1<self._side and y+2<self._side:\n possible_positions.append((x+1,y+2))\n \n if x+1<self._side and y-2<self._side and y-1>0:\n possible_positions.append((x+1,y-2))\n\n if x-1<self._side and y+2<self._side and x-1>0:\n possible_positions.append((x-1,y+2))\n \n if x-1<self._side and y-2<self._side and x-1>0 and y-2>0:\n possible_positions.append((x-1,y-2))\n\n newX,newY=random.choice(possible_positions) #choose randomly among the possible positions,and then repeat this \n x,y=newX,newY\n positionsCovered.append((newX,newY)) \n\n return positionsCovered", "def CreateGridNodesInStatePlane(x0, y0, azi, dx, dy, ni, nj):\n # calculating change in alongshore coordinate for northing and easting\n # given the associated dx dy\n dE_j = dy * np.cos(np.deg2rad(azi + 90))\n dN_j = dy * np.sin(np.deg2rad(azi + 90))\n # calculating change in cross-shore coordinate for northing and easting\n dE_i = dx * np.cos(np.deg2rad(azi))\n dN_i = dx * np.sin(np.deg2rad(azi))\n # create Easting & Northing coordinates for\n # cross shore location (in grid space)\n try: # this works for when dE_i is not an array .... ie regularly spaced grid nodes\n easting_i = np.linspace(x0, x0 + ni * dE_i, num=ni, endpoint=True)\n northing_i = np.linspace(y0, y0 + ni * dN_i, num=ni, endpoint=True)\n # create Northing and Easting coords for Along-shore location\n easting_j = np.linspace(x0, x0 + nj * dE_j, num=nj, endpoint=True)\n northing_j = np.linspace(y0, y0 + nj * dN_j, num=nj, endpoint=True)\n except ValueError: # for instances when grid nodes are irregularly spaced\n easting_i, northing_i = [x0], [y0] # seeding the origin for the first value in the coordinates\n easting_j, northing_j = [x0], [y0]\n for ii in range(0, ni-1): # first doing the i coordinate (start at origin add the change in e/n for each grid cell\n easting_i.append(easting_i[ii] + dE_i[ii])\n northing_i.append(northing_i[ii] + dN_i[ii])\n for jj in range(0, nj-1):\n easting_j.append(easting_j[jj] + dE_j[jj])\n northing_j.append(northing_j[jj] + dN_j[jj])\n # convert lists to arrays\n easting_i = np.array(easting_i)\n easting_j = np.array(easting_j)\n northing_i = np.array(northing_i)\n northing_j = np.array(northing_j)\n assert easting_j.shape[0] == nj, 'len of cstateplane sp3200oordinates are not the same as the number of cells'\n assert northing_i.shape[0] == ni, 'len of coordinates are not the same as the number of cells'\n\n icoords = np.array([easting_i, northing_i])\n jcoords = np.array([easting_j, northing_j])\n\n return icoords, jcoords", "def _generate_vertexes(self):\n # generate list of sets for each vms\n self._graph = [set() for _ in range(self._vm_count)]", "def guarded_places(self):\n guarded = []\n for x in range(8):\n for y in range(8):\n if self.squares[x][y].piece and self.squares[x][y].piece.color != self.turn:\n squares = self.squares[x][y].piece.actions(self, (x, y), True)\n if self.squares[x][y].piece.name != 'pawn': # pawns capture in different areas than they move\n guarded.extend(squares[0])\n guarded.extend(squares[1])\n return guarded", "def successors(state):\n free_coordinates = []\n for i in range(3):\n for j in range(3):\n if state[i][j] == '_':\n free_coordinates.append([i, j])\n\n return free_coordinates", "def generate_xcoords():\n\n for i in range(0, Molecule.ground_species_count):\n tmp1 = (PlotParameter.species_line_spacing * i) + 0.25\n tmp2 = tmp1 + PlotParameter.species_line_length\n Molecule.left_endpt.append(tmp1)\n Molecule.right_endpt.append(tmp2)\n\n for i in range(0, Molecule.excited_species_count):\n tmp1 = Molecule.left_endpt[Molecule.excited_state[i] - 1]\n tmp2 = Molecule.right_endpt[Molecule.excited_state[i] - 1]\n Molecule.left_endpt.append(tmp1)\n Molecule.right_endpt.append(tmp2)\n\n return None", "def generatePolygons():", "def create_local_voxmap(sampler, point, xd=10, yd=10, zd=10, voxel_size=1):\n \n # minimum and maximum north coordinates\n north_min = point[0] - xd\n north_max = point[0] + xd\n \n # minimum and maximum east coordinates\n east_min = point[1] - yd\n east_max = point[1] + yd\n \n # miniumum and maximum altitude\n alt_min = point[2] - zd\n alt_max = point[2] + zd\n \n # given the minimum and maximum coordinates we can\n # calculate the size of the grid.\n north_size = int(np.ceil((north_max - north_min))) // voxel_size\n east_size = int(np.ceil((east_max - east_min))) // voxel_size\n alt_size = int(np.ceil((alt_max - alt_min))) // voxel_size\n\n # Create an empty grid\n voxmap = np.zeros((north_size, east_size, alt_size), dtype=np.bool)\n \n #maximum distance between point and outer voxels\n d_voxmap = np.sqrt((xd**2+yd**2) + (zd/2)**2)\n \n #maximum distance between obstacle center and outer borders\n d_obstacle = np.max(np.array([ \n LA.norm(np.array(p.coords[0]) - \n np.array(p.coords[2])) / 2 \n for p in polygons]))\n \n #maximum combined distances between voxmap center and obstacle centers\n d_max = d_voxmap + d_obstacle\n\n #all obstacles in vincinity\n idxs = list(sampler._tree.query_radius(point[:2], r=d_max))[0]\n \n #loop over closeby obstacles\n for i in idxs:\n \n #current obstacle\n p = polygons[i]\n \n #get the obstacle bounds (north_min, north_max, east_min, east_max)\n bounds = [\n np.min([vals[0] for vals in p.coords]),\n np.max([vals[0] for vals in p.coords]),\n np.min([vals[1] for vals in p.coords]),\n np.max([vals[1] for vals in p.coords]),\n 0.,\n p.height\n ]\n \n #discretize obstacle bounds according to voxel size\n obstacle = [\n int(bounds[0] - north_min) // voxel_size,\n int(bounds[1] - north_min) // voxel_size,\n int(bounds[2] - east_min) // voxel_size,\n int(bounds[3] - east_min) // voxel_size,\n int(bounds[4] - alt_min) // voxel_size,\n int(bounds[5] - alt_min) // voxel_size\n ]\n \n #correct for out-of-bound values\n if obstacle[0]<0:\n obstacle[0]=0\n if obstacle[1]>voxmap.shape[0]-1:\n obstacle[1]=voxmap.shape[0]-1\n if obstacle[2]<0:\n obstacle[2]=0\n if obstacle[3]>voxmap.shape[1]-1:\n obstacle[3]=voxmap.shape[1]-1\n if obstacle[4]<0:\n obstacle[4]=0\n if obstacle[5]>voxmap.shape[2]-1:\n obstacle[5]=voxmap.shape[2]-1\n \n #add collision information to the voxmap\n voxmap[obstacle[0]:obstacle[1]+1,\n obstacle[2]:obstacle[3]+1,\n obstacle[4]:obstacle[5]+1] = True\n \n #collect collision information for the ground floor\n floor = int(0-alt_min)//voxel_size\n\n #if voxmap collides with ground floor: add collision information\n if floor>=0:\n voxmap[:,:,:floor]=True\n \n #return the voxmap\n return voxmap", "def TankPositionGenerator(geometry):\n # for omkey, geo in geometry.omgeo:\n # if 60 < omkey.om < 65:\n # yield omkey, geo.position\n for station in geometry.stationgeo.values():\n for tank in station:\n yield tank.omkey_list[0], tank.position\n yield tank.omkey_list[1], tank.position", "def generaCubo(self):\r\n #Use Panda predefined format for vertex coordinate only\r\n format = GeomVertexFormat.getV3()\r\n \r\n #Build Vertex data using the created format. Vertex will never change so I use Static attribute \r\n vdata = GeomVertexData('CuboData', format, Geom.UHStatic)\r\n \r\n #I will have to write vertex data so I create a writer for these data\r\n vertex = GeomVertexWriter(vdata, 'vertex')\r\n \r\n #I now use the writer to add vertex data\r\n vertex.addData3f(0, 0, 0)\r\n vertex.addData3f(1, 1, 1)\r\n vertex.addData3f(0, 1, 1)\r\n vertex.addData3f(0, 1, 0)\r\n vertex.addData3f(0, 0, 1)\r\n vertex.addData3f(1, 0, 0)\r\n vertex.addData3f(1, 0, 1)\r\n vertex.addData3f(1, 1, 0)\r\n \r\n #I now create 12 triangles\r\n prim = GeomTriangles(Geom.UHStatic)\r\n\r\n #and then I add vertex to them\r\n #Next time use addVertices(0,1,2) !!!\r\n prim.addVertex(7)\r\n prim.addVertex(0)\r\n prim.addVertex(5)\r\n prim.closePrimitive()\r\n \r\n prim.addVertex(3)\r\n prim.addVertex(0)\r\n prim.addVertex(7)\r\n prim.closePrimitive()\r\n \r\n prim.addVertex(2)\r\n prim.addVertex(6)\r\n prim.addVertex(4)\r\n prim.closePrimitive()\r\n\r\n prim.addVertex(1)\r\n prim.addVertex(6)\r\n prim.addVertex(2)\r\n prim.closePrimitive()\r\n\r\n prim.addVertex(7)\r\n prim.addVertex(2)\r\n prim.addVertex(3)\r\n prim.closePrimitive()\r\n\r\n prim.addVertex(1)\r\n prim.addVertex(2)\r\n prim.addVertex(7)\r\n prim.closePrimitive()\r\n\r\n prim.addVertex(3)\r\n prim.addVertex(4)\r\n prim.addVertex(0)\r\n prim.closePrimitive()\r\n\r\n prim.addVertex(2)\r\n prim.addVertex(4)\r\n prim.addVertex(3)\r\n prim.closePrimitive()\r\n\r\n prim.addVertex(0)\r\n prim.addVertex(6)\r\n prim.addVertex(5)\r\n prim.closePrimitive()\r\n\r\n prim.addVertex(4)\r\n prim.addVertex(6)\r\n prim.addVertex(0)\r\n prim.closePrimitive()\r\n\r\n prim.addVertex(5)\r\n prim.addVertex(1)\r\n prim.addVertex(7)\r\n prim.closePrimitive()\r\n\r\n prim.addVertex(6)\r\n prim.addVertex(1)\r\n prim.addVertex(5)\r\n prim.closePrimitive()\r\n\r\n #Create a Geom to bing vertex data to primitives\r\n geom = Geom(vdata)\r\n geom.addPrimitive(prim)\r\n\r\n #Create a node for the Geom in order to be able to render it\r\n node = GeomNode('gnode')\r\n node.addGeom(geom)\r\n\r\n #Adde the node to the scene graph == render it!\r\n nodePath = render.attachNewNode(node)\r\n \r\n #is this needed?\r\n nodePath.setPos( 0, 5, 0)\r\n \r\n self.camera.lookAt(nodePath)\r\n \r\n base.setBackgroundColor( .0, .0, .0 )\r\n \r\n taskMgr.add(self.SpinCameraTask, \"SpinCameraTask\")", "def get_all_moves(self):\n # 2d matrix of true/false, true if something can be placed\n legal_move_board = []\n possible_move_list = []\n for row in range(self.size):\n move_row = []\n for col in range(self.size):\n empty = self.board[row][col].state == PegState.EMPTY\n move_row.append(empty)\n if empty:\n possible_move_list.append((row, col))\n legal_move_board.append(move_row)\n \n # every position where something can be placed (list of tuples) (Combined with above)\n \"\"\" possible_move_list = []\n for row in range(self.size):\n for col in range(self.size):\n if legal_move_board[row][col] == True:\n possible_move_list.append((row, col))\n \"\"\"\n return legal_move_board, possible_move_list", "def test_empty_generation(self, example_staypoints):\n # the pfs would not generate staypoints with the default parameters\n sp = example_staypoints\n # select subset of sp such that no locations can be generated\n sp = sp.iloc[:3]\n\n warn_string = \"No locations can be generated, returning empty locs.\"\n with pytest.warns(UserWarning, match=warn_string):\n sp, locs = sp.as_staypoints.generate_locations(\n method=\"dbscan\", epsilon=10, num_samples=2, distance_metric=\"haversine\", agg_level=\"user\"\n )\n assert len(locs) == 0", "def generate(self):\n self.generate_points()\n self.generate_edges()", "def no_non_adjacent_vertices(self):\n clauses = []\n for v in range(0,self.graph.num_vertices):\n non_neighbours = sorted(list(set(range(0,self.graph.num_vertices))\n - set([v])\n - set(self.graph.edges[v])))\n for nv in non_neighbours:\n for position in range(0,self.graph.num_vertices-1):\n clause = [ ClauseVariable(True,v,position),\n ClauseVariable(True,nv,position+1)]\n clauses.append(clause)\n return clauses", "def __maze_generator(self):\n grid = []\n for row in range(self.__row_count):\n new_row = []\n for col in range(self.__col_count):\n new_row.append(RoomFactory.create_room([row, col]))\n if col > 0:\n new_row[col].left = new_row[col - 1]\n new_row[col - 1].right = new_row[col]\n if row > 0:\n new_row[col].up = grid[row - 1][col]\n grid[row - 1][col].down = new_row[col]\n grid.append(new_row)\n return grid", "def test_create_new_empty_placements(self):\n pls = Placements()\n self.assertEqual(pls._placements, dict())\n self.assertEqual(pls._machine_vertices, dict())", "def PINTARMATRIXDEBOTONES(self):\n for i in range(0, 26):\n for j in range(0, 26):\n x0 = ((i+1)*24) + 26\n y0 = ((j)*21) \n self.telaMAPA.create_rectangle(x0, 545 - y0, x0 + 10, 550 - y0, tag=str(i)+\"-\"+str(j))\n # Se guarda la inforamcion\n info = (x0, 545 - y0, str(i)+\"-\"+str(j))\n self.bononesPlanoXY.append(info)", "def get_topology(init_height, map_size, max_height):\n num_features = math.ceil(map_size[0] / FEATURE_SIZE)\n generators = [create_valley, create_hill, create_plateau]\n previous = [random.randrange(len(generators)), random.randrange(len(generators))]\n feature_points = []\n for i in range(num_features):\n while True:\n idx = random.randrange(len(generators))\n # do not repeat topology more than once\n if previous.count(idx) != 2:\n break\n new_points = generators[idx](map_size[1], init_height, max_height, FEATURE_SIZE)\n for idp in range(len(new_points)):\n # as the feature points are generated in local coordinates, shift them on the x axis to the correct part\n # of the terrain.\n new_points[idp] = (new_points[idp][0] + i * FEATURE_SIZE, new_points[idp][1])\n feature_points.extend(new_points)\n previous.pop(0)\n previous.append(idx)\n\n return feature_points", "def generate_goal(self):\r\n\t\t# Creates a flat list of correct values\r\n\t\ttempList = [x for x in range(self.n**2)]\r\n\r\n\t\t# Nests those lists into a NxN\r\n\t\tBoardClass.goal = [tempList[self.n*i:self.n*(i+1)] for i in range(self.n)]\r\n\r\n\t\t# Creates a dictionary for the intended location of any specific tile. Used in\r\n\t\t# Manhatten Distance calculation.\r\n\t\tfor i in range(self.n**2):\r\n\t\t\trow = i // self.n\r\n\t\t\tcol = i % self.n\r\n\t\t\tBoardClass.goalTileLocations[i] = [row, col]", "def internal_points(self):\n return self.ghost_points()", "def new_tile(self):\n zero_list = []\n zero_cell = ()\n # self._cells = [[0 for col in range(self._grid_width)] for row in range(self._grid_height)]\n for row in range(self._grid_height):\n for col in range(self._grid_width):\n if self._cells[row][col] == 0:\n zero_cell = (row, col)\n zero_list.append(zero_cell)\n if len(zero_list) > 0:\n chance = random.randrange(0,10)\n cell_idx = random.randrange(len(zero_list))\n if chance == 9:\n self._cells[zero_list[cell_idx][0]][zero_list[cell_idx][1]] = 4\n else:\n self._cells[zero_list[cell_idx][0]][zero_list[cell_idx][1]] = 2\n else:\n print(\"You lost! Better luck next time!\")", "def create_directions_ingoing(self, pos=None):\n if pos is None:\n pos = self.draw.position\n all_directions = ((1, 0), (-1, 0), (0, 1), (0, -1), (-1, -1), (-1, 1), (1, 1), (1, -1))\n\n # filter returns an iterator, we have to materialize the filter and turn it into a tuple (or list or whatever...)\n return tuple(filter(lambda x: self.check_position_exists(pos.step(x)), all_directions))\n # all directions used to be a list, now it is a tuple. Seems to work fine...\n # filter needs the data to be filtered as a iterable container", "def redefinir_vertices(self):\n self.nueva_posicion_posible_parte_inferior = [0,0]\n self.nueva_posicion_posible_parte_superior = [0,0]\n self.vertice_1 = self.posicion\n self.vertice_2 = [self.posicion[0] + self.medidas, self.posicion[1]]\n self.vertice_3 = [self.posicion[0], self.posicion[1] + self.medidas]\n self.vertice_4 = [self.posicion[0] + self.medidas, self.posicion[1] + self.medidas]", "def random_placement(area):\n\n area.create_houses(True)\n\n for house in area.houses:\n place_house(area, house)", "def regular_grid(self, nsteps=None, shuffle = True, rescale=False, order='C'):\n if nsteps is None:\n nsteps = self.nsteps\n x = np.linspace(self.bounding_box[0, 0], self.bounding_box[1, 0],\n nsteps[0])\n y = np.linspace(self.bounding_box[0, 1], self.bounding_box[1, 1],\n nsteps[1])\n z = np.linspace(self.bounding_box[1, 2], self.bounding_box[0, 2],\n nsteps[2])\n xx, yy, zz = np.meshgrid(x, y, z, indexing='ij')\n locs = np.array([xx.flatten(order=order), yy.flatten(order=order), zz.flatten(order=order)]).T\n if shuffle:\n logger.info(\"Shuffling points\")\n np.random.shuffle(locs)\n if rescale:\n locs = self.rescale(locs)\n return locs", "def smart_tentacle_positions(self, bounds: np.ndarray, num_positions) -> np.ndarray:\n valid_memory = [(pos, cost) for pos, cost in self.memory if\n np.all(pos >= bounds[:, 0]) and np.all(pos <= bounds[:, 1])]\n if len(valid_memory) < 2 * len(bounds):\n return self.random_tentacle_positions(bounds, num_positions)\n if len(valid_memory) > self.max_training_mem:\n random.shuffle(valid_memory) # so the model can change\n valid_memory = valid_memory[:self.max_training_mem]\n # base_estimator = cook_estimator(\"GP\", space=bounds,noise=.005)\n opt = skopt.Optimizer(bounds, n_initial_points=0, n_jobs=-1,\n acq_optimizer_kwargs={\"n_restarts_optimizer\": 10, \"n_points\": 30_000}, acq_func=\"EI\")\n\n x = [list(pos) for pos, cost in valid_memory]\n y = [cost for pos, cost in valid_memory]\n opt.tell(x, y) # train model\n positions = np.array(opt.ask(num_positions))\n return positions", "def generate_points(self, userdata):\n # TODO: generate along multiple axes\n x, y, _ = userdata.initial_point\n x, y = 0, 0\n\n i = 0\n for _ in range(self.num_points):\n point = PointStamped()\n point.header.frame_id = \"map\"\n point.point.x = x + i * self.spacing\n point.point.y = y\n point.point.z = 0.0\n\n i += 1\n\n yield point", "def trapezoid_decomposition_pl(polygons, bounds):\n polygons = Polygons(polygons)\n # print(bounds)\n point_locator = PointLocator(bounds)\n for edge in polygons.random_edge_sampler():\n point_locator.add_line(edge)\n return point_locator", "def generate(self):\n for i in range(4):\n random_first = randomize_first_box()\n self.randomize(random_first)\n for i in range(9):\n random_pos = randomize_position()\n self.randomize(random_pos)\n self.board.solve()", "def place_targets():\n\n \n coords = []\n while len(coords)<self.N_targets:\n x = np.random.randint(self.BORDER_MARGIN,self.map_dimensions[1]+1-self.BORDER_MARGIN,size=1)[0]\n y = np.random.randint(self.BORDER_MARGIN,self.map_dimensions[0]+1-self.BORDER_MARGIN,size=1)[0]\n p = (x,y)\n all_valid = True\n for rect in self.coordinates__obstacles:\n if not check_valid_placement(p,rect):\n all_valid = False\n break\n if all_valid:\n coords +=[p]\n self.coordinates__targets = coords", "def __init__(self, no_vertices=0):\r\n self.__neighbours = {}\r\n self.__cost = {}\r\n for i in range(no_vertices):\r\n self.__neighbours[i] = []", "def _neuron_locations(self, m, n):\n #Nested iterations over both dimensions\n #to generate all 2-D locations in the map\n for i in range(m):\n for j in range(n):\n yield np.array([i, j])", "def _neuron_locations(self, m, n):\n #Nested iterations over both dimensions\n #to generate all 2-D locations in the map\n for i in range(m):\n for j in range(n):\n yield np.array([i, j])", "def _get_placement_actions(self, exclude=None):\n if not self._has_blocks_to_place(exclude=exclude):\n return []\n\n dirs = [_Vec3(0, 2, 0)]\n for dir_ in _adj_dirs():\n dirs.extend([dir_, dir_ + _Vec3(0, 1, 0)])\n if self._get_block(self._pos + dir_) in [_AIR, _WATER]:\n dirs.append(dir_ + _Vec3(0, -1, 0))\n\n rtn = []\n for dir_ in dirs:\n pos = self._pos + dir_\n if self._can_place(pos):\n rtn.append({\n 'func': '_place',\n 'args': (pos,),\n 'kwargs': {'exclude': exclude}\n })\n\n return rtn", "def _generate_coordinates(self):\n a0 = +0.2969\n a1 = -0.1260\n a2 = -0.3516\n a3 = +0.2843\n a4 = -0.1036 # zero thickness TE\n\n x = np.linspace(0.0, 1.0, num=self.n_points)\n\n if len(self.digits) == 4:\n # Returns n+1 points in [0 1] for the given 4-digits NACA string\n m = float(self.digits[0]) / 100.0\n p = float(self.digits[1]) / 10.0\n t = float(self.digits[2:]) / 100.0\n\n # half-thickness distribution\n yt = 5 * t * (a0 * np.sqrt(x) + a1 * x + a2 * np.power(x, 2) +\n a3 * np.power(x, 3) + a4 * np.power(x, 4))\n\n if p == 0:\n # Symmetric foil\n self.xup_coordinates = np.linspace(0.0, 1.0, num=self.n_points)\n self.yup_coordinates = yt\n self.xdown_coordinates = np.linspace(\n 0.0, 1.0, num=self.n_points)\n self.ydown_coordinates = -yt\n else:\n # Cambered foil\n xc1 = np.asarray([xx for xx in x if xx <= p])\n xc2 = np.asarray([xx for xx in x if xx > p])\n yc1 = m / np.power(p, 2) * xc1 * (2 * p - xc1)\n yc2 = m / np.power(1 - p, 2) * (1 - 2 * p + xc2) * (1 - xc2)\n # Y-coordinates of camber line\n yc = np.append(yc1, yc2)\n\n if self.cosine_spacing:\n # points are generated according to cosine distribution of\n # the X-coordinates of the chord\n dyc1_dx = m / np.power(p, 2) * (2 * p - 2 * xc1)\n dyc2_dx = m / np.power(1 - p, 2) * (2 * p - 2 * xc2)\n dyc_dx = np.append(dyc1_dx, dyc2_dx)\n theta = np.arctan(dyc_dx)\n self.xup_coordinates = x - yt * np.sin(theta)\n self.yup_coordinates = yc + yt * np.cos(theta)\n self.xdown_coordinates = x + yt * np.sin(theta)\n self.ydown_coordinates = yc - yt * np.cos(theta)\n else:\n # Linear spacing distribution of the foil coordinates\n self.xup_coordinates = np.linspace(\n 0.0, 1.0, num=self.n_points)\n self.xdown_coordinates = np.linspace(\n 0.0, 1.0, num=self.n_points)\n self.yup_coordinates = yc + yt\n self.ydown_coordinates = yc - yt\n\n elif len(self.digits) == 5:\n # Returns n+1 points in [0 1] for the given 5-digits NACA string\n cld = float(self.digits[0]) * 0.15\n p = 5.0 * float(self.digits[1]) / 100.0\n s = float(self.digits[2])\n t = float(self.digits[3:]) / 100.0\n\n # half-thickness distribution\n yt = 5 * t * (a0 * np.sqrt(x) + a1 * x + a2 * np.power(x, 2) +\n a3 * np.power(x, 3) + a4 * np.power(x, 4))\n\n if s == 1:\n # Relfex camber\n P = np.array([0.1, 0.15, 0.2, 0.25])\n M = np.array([0.13, 0.2170, 0.318, 0.441])\n K = np.array([51.99, 15.793, 6.520, 3.191])\n elif s == 0:\n # Standard camber\n P = np.array([0.05, 0.1, 0.15, 0.2, 0.25])\n M = np.array([0.0580, 0.1260, 0.2025, 0.2900, 0.3910])\n K = np.array([361.4, 51.64, 15.957, 6.643, 3.230])\n else:\n raise ValueError(\n 'For NACA \"LPSTT\" the value of \"S\" can be either 0 or 1.')\n\n if p == 0:\n # Symmetric foil\n self.xup_coordinates = np.linspace(0.0, 1.0, num=self.n_points)\n self.yup_coordinates = yt\n self.xdown_coordinates = np.linspace(\n 0.0, 1.0, num=self.n_points)\n self.ydown_coordinates = -yt\n else:\n # Cambered foil\n spl_m = splrep(P, M)\n spl_k = splrep(M, K)\n m = splev(p, spl_m)\n k1 = splev(m, spl_k)\n xc1 = np.asarray([xx for xx in x if xx <= m])\n xc2 = np.asarray([xx for xx in x if xx > m])\n yc1 = k1 / 6.0 * (np.power(xc1, 3) - 3 * m * np.power(xc1, 2) +\n np.power(m, 2) * (3 - m) * xc1)\n yc2 = k1 / 6.0 * np.power(m, 3) * (1 - xc2)\n yc = np.append(yc1, yc2)\n\n if self.cosine_spacing:\n # points are generated according to cosine distribution of\n # the X-coordinates of the chord\n zc = cld / 0.3 * yc\n dyc1_dx = 1.0 / 6.0 * k1 * (\n 3 * np.power(xc1, 2) - 6 * m * xc1 + np.power(m, 2) *\n (3 - m))\n dyc2_dx = np.tile(-1.0 / 6.0 * k1 * np.power(m, 3),\n len(xc2))\n dyc_dx = np.append(dyc1_dx, dyc2_dx)\n theta = np.arctan(dyc_dx)\n self.xup_coordinates = x - yt * np.sin(theta)\n self.yup_coordinates = zc + yt * np.cos(theta)\n self.xdown_coordinates = x + yt * np.sin(theta)\n self.ydown_coordinates = zc - yt * np.cos(theta)\n else:\n # Linear spacing distribution of the foil coordinates\n self.xup_coordinates = np.linspace(\n 0.0, 1.0, num=self.n_points)\n self.xdown_coordinates = np.linspace(\n 0.0, 1.0, num=self.n_points)\n self.yup_coordinates = yc + yt\n self.ydown_coordinates = yc - yt\n\n else:\n raise Exception", "def generate_trivial_tours(self):\n self.routes = []\n for c in range(1, self.vrpdata.NumCust+1):\n self.routes.append(VRP_Route([c]))\n return self.get_objective()", "def points_generator(self):\n rows, cols = self.game.board.board_size\n points = [Point(i, j) for i, j in product(range(rows), range(cols))]\n for point in points:\n yield point", "def build_maze(self, start, end, steprange):\n \n self.directions = [self.turn_R, self.turn_L, self.turn_L, self.go_up]\n currentpos = (start, self.height-4)\n list_of_points = [currentpos]\n \n if currentpos[1] == 0 and currentpos[0] == 0:\n return list_of_points\n #Before the end point is reached, move in a random direction\n while currentpos[1] > end or currentpos[0] > end:\n f = random.choice(self.directions)\n currentpos = f(currentpos, random.randint(steprange[0], steprange[1]))\n \n #Check if the vine is out of bounds\n if currentpos[0] < 0:\n currentpos = (0, currentpos[1])\n if currentpos[0] > self.width:\n currentpos = (self.width-4, currentpos[1])\n if currentpos[1] < 0:\n currentpos = (currentpos[0], 0)\n #Add the point to the list of points the vine goes through\n list_of_points.append(currentpos)\n return list_of_points", "def tiles_positions(self) -> Generator[TilePosition, None, None]:\r\n for i in range(self.width * self.height):\r\n yield TilePosition(i % self.width, i // self.width)", "def get_nodes(self, latlon=False):\n ids = np.where(np.isnan(self.data[:,:,:]))\n i_nan = ids[0][0] ; j_nan = ids[1][0]\n \n def area_neighbours(Area, i_nan, j_nan):\n rows = np.array(Area)[:,0]\n cols = np.array(Area)[:,1]\n rows_m = rows-1\n cols_m = cols-1\n rows_p = rows+1\n cols_p = cols+1\n \n p1 = np.array([rows_m,cols]).ravel().reshape(len(rows),2,order='F')\n p2 = np.array([rows_p,cols]).ravel().reshape(len(rows),2,order='F')\n p3 = np.array([rows,cols_m]).ravel().reshape(len(rows),2,order='F')\n p4 = np.array([rows,cols_p]).ravel().reshape(len(rows),2,order='F')\n cond1 = p1[:,0]<0\n cond2 = p2[:,0]>self.dimX-1\n cond3 = p3[:,1]<0\n cond4 = p4[:,1]>self.dimY-1\n if latlon:\n p3[:,1][cond3] = self.dimY-1\n p4[:,1][cond4] = 0\n else:\n p3[:,0][cond3] = i_nan\n p3[:,1][cond3] = j_nan\n p4[:,0][cond4] = i_nan\n p4[:,1][cond4] = j_nan\n p1[:,0][cond1] = i_nan\n p1[:,1][cond1] = j_nan\n p2[:,0][cond2] = i_nan\n p2[:,1][cond2] = j_nan\n p = np.concatenate((p1,p2,p3,p4)).tolist()\n return [i for i in p if i not in self.unavail]\n\n def area_max_correlation(Area, neighbours):\n Rmean = [] ; X = []\n for cell in neighbours:\n R = []\n new_cell = cell[0]*self.dimY + cell[1]\n if new_cell in self.gridcells:\n X.append(cell)\n IDm = np.where(self.gridcells==new_cell)\n Rmean.append(np.nanmean(self.corrs[cells_in_k,IDm]))\n try:\n Rmax = np.nanmax(Rmean)\n except ValueError:\n Rmax = np.nan\n return np.array(X), Rmean, Rmax\n \n def diag_indices(a, k):\n rows, cols = np.diag_indices_from(a)\n if k < 0:\n return rows[-k:], cols[:k]\n elif k > 0:\n return rows[:-k], cols[k:]\n else:\n return rows, cols\n\n #S T E P 1 (C R E A T E N O D E S)\n\n self.nodes = {}\n self.unavail = []\n if latlon:\n neighbour_corrs1 = self.corrs.diagonal(offset=1)\n neighbour_corrs2 = self.corrs.diagonal(offset=self.dimY-1)\n subset = np.arange(0,len(neighbour_corrs2),self.dimY)\n neighbour_corrs2 = neighbour_corrs2[subset]\n neighbour_corrs = np.concatenate((neighbour_corrs1,neighbour_corrs2))\n\n cellIDs1 = diag_indices(self.corrs,1)\n cellIDs2 = diag_indices(self.corrs,self.dimY-1)\n\n cellIDs = (np.concatenate((cellIDs1[0],cellIDs2[0][subset])),\\\n np.concatenate((cellIDs1[1],cellIDs2[1][subset])))\n else:\n neighbour_corrs = self.corrs.diagonal(offset=1)\n cellIDs = diag_indices(self.corrs,1)\n \n cellIDs = (self.gridcells[cellIDs[0]],self.gridcells[cellIDs[1]])\n k = 0\n neighbour_corrs,cellIDs1,cellIDs2 = list(zip(*sorted(zip(neighbour_corrs,cellIDs[0],cellIDs[1]),reverse=True)))\n cell_IDs = (cellIDs1,cellIDs2)\n np.random.seed(2)\n for it in range(len(neighbour_corrs)):\n cells_in_k = []\n i = cell_IDs[0][it]\n j = cell_IDs[1][it]\n r = neighbour_corrs[it]\n \n row_i = int(np.floor(i/self.dimY)) ; col_i = int(i % self.dimY)\n row_j = int(np.floor(j/self.dimY)) ; col_j = int(j % self.dimY)\n \n if ([row_i,col_i] not in self.unavail) & ([row_j,col_j] not in self.unavail):\n if r>self.tau:\n self.nodes.setdefault(k, []).append([row_i,col_i])\n self.nodes.setdefault(k, []).append([row_j,col_j])\n self.unavail.append([row_i,col_i])\n self.unavail.append([row_j,col_j])\n cells_in_k.extend(np.where(self.gridcells==i)[0])\n cells_in_k.extend(np.where(self.gridcells==j)[0])\n\n while True: #expand\n neighbours = area_neighbours(self.nodes[k], i_nan, j_nan)\n X, Rmean, Rmax = area_max_correlation(Area=self.nodes[k], neighbours=neighbours)\n if Rmax > self.tau:\n m = X[Rmean==Rmax].tolist()\n if len(m)>1:\n m = m[np.random.randint(low=0,high=len(m))]\n else:\n m = m[0]\n self.nodes.setdefault(k, []).append(m)\n self.unavail.append(m)\n cells_in_k.extend(np.where(self.gridcells==m[0]*self.dimY+m[1])[0])\n else:\n break\n if len(self.nodes[k]) <= 2:\n del self.nodes[k]\n k += 1\n else:\n break\n \n #S T E P 2 (M E R G E N O D E S)\n \n self.unavail = []\n while True:\n Rs = {}\n unavail_neighbours = {}\n num_cells = dict([(area,len(self.nodes[area])) if self.nodes[area] not in self.unavail else (area,np.inf) for area in self.nodes.keys()])\n maxID = min(num_cells.items(), key=operator.itemgetter(1))[0]\n if num_cells[maxID] > 175: #arbitrary choice?\n break\n else:\n cells_in_k = [np.where(self.gridcells==cell[0]*self.dimY+cell[1])[0] for cell in self.nodes[maxID]]\n neighbours = area_neighbours(self.nodes[maxID], i_nan, j_nan)\n for cell in neighbours:\n gcell = cell[0]*self.dimY + cell[1]\n Rmean = []\n cond1 = gcell in self.gridcells\n cond2 = cell not in self.nodes[maxID]\n cond3 = cell not in [k for k, g in itertools.groupby(sorted(itertools.chain(*unavail_neighbours.values())))]\n cond4 = len([area for area, cells in self.nodes.items() if cell in cells]) > 0\n if (cond1) & (cond2) & (cond3) & (cond4):\n nID = [area for area, cells in self.nodes.items() if cell in cells][0]\n unavail_neighbours[nID] = self.nodes[nID]\n X, Rmean, Rmax = area_max_correlation(Area=self.nodes[nID]+self.nodes[maxID], neighbours=self.nodes[nID]+self.nodes[maxID])\n if nID not in Rs: \n Rs[nID] = np.nanmean(Rmean)\n try:\n Rs_maxID = max(Rs.items(), key=operator.itemgetter(1))[0]\n if Rs[Rs_maxID] > self.tau:\n for cell in self.nodes.pop(Rs_maxID, None):\n self.nodes.setdefault(maxID, []).append([cell[0],cell[1]])\n else:\n self.unavail.append(self.nodes[maxID])\n except ValueError:\n self.unavail.append(self.nodes[maxID])", "def test_is_filled(self):\n n = 10\n matrix = g.np.random.uniform(size=(n + 1,) * 3) > 0.5\n not_matrix = g.np.logical_not(matrix)\n pitch = 1. / n\n origin = g.np.random.uniform(size=(3,))\n vox = g.trimesh.voxel.VoxelGrid(matrix)\n vox = vox.apply_scale(pitch).apply_translation(origin)\n not_vox = g.trimesh.voxel.VoxelGrid(not_matrix)\n not_vox = not_vox.apply_scale(pitch).apply_translation(origin)\n for a, b in ((vox, not_vox), (not_vox, vox)):\n points = a.points\n # slight jitter - shouldn't change indices\n points += (\n g.np.random.uniform(size=points.shape) - 1) * 0.4 * pitch\n g.np.random.shuffle(points)\n\n # all points are filled, and no empty points are filled\n assert g.np.all(a.is_filled(points))\n assert not g.np.any(b.is_filled(points))\n\n # test different number of dimensions\n points = g.np.stack([points, points[-1::-1]], axis=1)\n assert g.np.all(a.is_filled(points))\n assert not g.np.any(b.is_filled(points))", "def build(self):\n self._start = np.zeros_like(self._rooms.shape)\n self._start[0] = random.randrange(self._rooms.shape[0])\n position = self._start\n egress = Direction.South\n distance = 1\n while position in self:\n room = self[position]\n room.egress = egress\n room.distance = distance\n yield position\n options = {}\n for direction in Direction.range():\n if direction != egress:\n new_position = position + direction.offset()\n if new_position in self:\n if self.is_sealed(new_position):\n options[direction] = new_position\n if options:\n direction = random.choice(tuple(options.keys()))\n room.remove_wall(direction)\n position = options[direction]\n egress = direction.reverse()\n distance += 1\n else:\n position += egress.offset()\n if position in self:\n egress = self[position].egress\n distance = self[position].distance", "def mover_bm_izquierda(self):\n self.nueva_posicion_posible_parte_superior = self.mapa.consultar_casilla_por_movimiento([self.casilla[0] - 1,\n self.casilla[1]],\n [self.vertice_1[0] - self.velocidad,self.vertice_1[1]], \n [self.vertice_1[0] - 5 - 5, self.vertice_1[1]])\n self.nueva_posicion_posible_parte_inferior = self.mapa.consultar_casilla_por_movimiento([self.casilla[0] - 1,\n self.casilla[1] + 1],\n [self.vertice_3[0] - self.velocidad,self.vertice_3[1]],\n [self.vertice_3[0] - 5,self.vertice_3[1]]) \n if self.nueva_posicion_posible_parte_superior[0] != 1 and self.nueva_posicion_posible_parte_inferior[0] != 1:\n self.x -= self.velocidad * (self.x >= 15)\n self.posicion = [self.x,self.posicion[1]]\n self.casilla = [self.casilla[0] - self.nueva_posicion_posible_parte_superior[1] *(self.nueva_posicion_posible_parte_inferior[0] != 1) * (self.nueva_posicion_posible_parte_superior[0] != 1), self.casilla[1]]\n self.redefinir_vertices()", "def iter_points(self):\n for x in range(self.left, self.right + 1):\n for y in range(self.top, self.bottom + 1):\n yield Point(x, y)", "def NewBlock(self):\n for i in self.matrix:\n if 2 in i:\n return()\n blockType = self.bag.Choose()\n subtractor = {\"I\" : 4, \"J\" : 3, \"L\" : 3, \"O\" : 2, \"S\" : 3, \"T\" : 3, \"Z\": 3}\n x = random.randint(0, self.width - subtractor.get(blockType))\n coords = []\n if blockType == \"I\":\n coords = [(x + i, 0) for i in range(4)]\n elif blockType == \"J\":\n coords = [(x + i, 0) for i in range(3)]\n coords.append((x, 1))\n elif blockType == \"L\":\n coords = [(x + i, 0) for i in range(3)]\n coords.append((x + 2, 1))\n elif blockType == \"O\":\n coords = [(x, 0), (x + 1, 0), (x, 1), (x + 1, 1)]\n elif blockType == \"Z\":\n coords = [(x, 0), (x + 1, 0), (x + 1, 1), (x + 2, 1)]\n elif blockType == \"S\":\n coords = [(x + 1, 0), (x + 2, 0), (x, 1), (x + 1, 1)]\n elif blockType == \"T\":\n coords = [(x, 0), (x + 1, 0), (x + 2, 0), (x + 1, 1)]\n self.coords = coords\n return(coords)", "def star_topology(random, population, args):\r\n for _ in range(len(population)):\r\n yield population[:]" ]
[ "0.6300398", "0.59959567", "0.5966825", "0.5925378", "0.58771193", "0.5777176", "0.57712615", "0.57712615", "0.5769345", "0.5725288", "0.5673758", "0.5602161", "0.56021327", "0.56002945", "0.5596927", "0.5585428", "0.5523877", "0.5508298", "0.5461161", "0.546076", "0.5455531", "0.54486144", "0.5427992", "0.5425584", "0.5418832", "0.5412836", "0.5397276", "0.53711367", "0.5332808", "0.5325766", "0.5322386", "0.53191465", "0.53095937", "0.5306799", "0.53033996", "0.5294247", "0.528047", "0.5280365", "0.5280365", "0.527731", "0.52638966", "0.5241109", "0.5239592", "0.522908", "0.5222281", "0.5220967", "0.5211306", "0.52104384", "0.52043664", "0.52040195", "0.5195925", "0.5185564", "0.51838535", "0.51557845", "0.5137932", "0.5131969", "0.5129986", "0.5120769", "0.5116486", "0.51158404", "0.51131576", "0.5109799", "0.51092434", "0.509652", "0.5088312", "0.50814676", "0.50809157", "0.5079765", "0.50758415", "0.50718063", "0.506784", "0.5067585", "0.5067137", "0.50573194", "0.50468343", "0.5045469", "0.5043491", "0.504284", "0.5041618", "0.5039123", "0.5038719", "0.50371504", "0.50350267", "0.5023328", "0.50232244", "0.5015992", "0.5015992", "0.5013542", "0.50090283", "0.5008389", "0.5006957", "0.5003629", "0.5002962", "0.5002139", "0.49968174", "0.49964193", "0.49890846", "0.49764457", "0.49632603", "0.4960033" ]
0.7902054
0
Construct a Lonpos state, with the given board and pieces. The occupation array indicates which points of the board are occupied, and by what pieces. It is specified by [ (p, i) ], which indicate point p of the board is occupied by i'th piece. p can be either the index or the 2d coordinates of the point.
def __init__( self, board, occupation=[]): self.board = board if (occupation): if (isinstance(occupation[0][0], int)): self.occupation = dict(occupation) elif (isinstance(occupation[0][0], tuple) and len(occupation[0][0])==2): try: self.occupation = dict( [ (board.grids[o[0]], o[1]) for o in occupation ]) except KeyError, e: raise e, "Occupied point not on board." else: self.occupation = dict(occupation)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, params):\n position = recordtype(\"position\", [\"x\", \"y\", \"kind\"])\n coordinate = recordtype(\"coordinate\", [\"x\", \"y\"])\n\n self.width = params[\"m\"]\n self.height = params[\"n\"]\n self.count = 0\n self.pieces = params[\"pieces\"]\n\n self.board = []\n for _ in self.pieces:\n self.board.append(position(0, 0, 0))\n\n self.board_index = 0\n self.last_xy = []\n self.last_index = [0] * number_of_types\n\n for _ in range(number_of_types):\n coord_list = []\n for _ in range(len(self.pieces) + 1):\n coord_list.append(coordinate(0, 0))\n self.last_xy.append(coord_list)\n\n self.attacked_cols = [0] * self.width\n self.attacked_rows = [0] * self.height\n self.attacked_diag_l = [0] * (self.width + self.height)\n self.attacked_diag_r = [0] * (self.width + self.height)\n self.attacked_cells = [0] * ((self.width+4) * (self.height+4))\n\n self.king_rules = [\n coordinate(-1, 0), coordinate(1, 0), coordinate(0, -1), coordinate(0, 1),\n coordinate(-1, -1), coordinate(1, 1), coordinate(1, -1), coordinate(-1, 1)\n ]\n\n self.knight_rules = [\n coordinate(-2, -1), coordinate(-2, 1), coordinate(2, -1), coordinate(2, 1),\n coordinate(-1, -2), coordinate(-1, 2), coordinate(1, -2), coordinate(1, 2)\n ]", "def set_pieces(self):\n\n for i in range(len(self._game_board)):\n\n # Row 1\n if i == 0:\n for ii in range(len(self._game_board[i])):\n if ii == 0 or ii == 8:\n self._game_board[i][ii] = Chariot(\"black\", \"BCHA\")\n self._game_board[i][ii].update_location([i, ii])\n if ii == 1 or ii == 7:\n self._game_board[i][ii] = Horse(\"black\", \" BH \")\n self._game_board[i][ii].update_location([i, ii])\n if ii == 2 or ii == 6:\n self._game_board[i][ii] = Elephant(\"black\", \" BE \")\n self._game_board[i][ii].update_location([i, ii])\n if ii == 3 or ii == 5:\n self._game_board[i][ii] = Advisor(\"black\", \" BA \")\n self._game_board[i][ii].update_location([i, ii])\n if ii == 4:\n self._game_board[i][ii] = General(\"black\", \" BG \")\n self._game_board[i][ii].update_location([i, ii])\n\n # Row 3\n if i == 2:\n for ii in range(len(self._game_board[i])):\n if ii == 1 or ii == 7:\n self._game_board[i][ii] = Cannon(\"black\", \"BCAN\")\n self._game_board[i][ii].update_location([i, ii])\n\n # Row 4\n if i == 3:\n for ii in range(len(self._game_board[i])):\n if ii % 2 == 0:\n self._game_board[i][ii] = Soldier(\"black\", \"BSOL\")\n self._game_board[i][ii].update_location([i, ii])\n\n # Row 7\n if i == 6:\n for ii in range(len(self._game_board[i])):\n if ii % 2 == 0:\n self._game_board[i][ii] = Soldier(\"red\", \"RSOL\")\n self._game_board[i][ii].update_location([i, ii])\n\n # Row 8\n if i == 7:\n for ii in range(len(self._game_board[i])):\n if ii == 1 or ii == 7:\n self._game_board[i][ii] = Cannon(\"red\", \"RCAN\")\n self._game_board[i][ii].update_location([i, ii])\n\n # Row 10\n if i == 9:\n for ii in range(len(self._game_board[i])):\n if ii == 0 or ii == 8:\n self._game_board[i][ii] = Chariot(\"red\", \"RCHA\")\n self._game_board[i][ii].update_location([i, ii])\n if ii == 1 or ii == 7:\n self._game_board[i][ii] = Horse(\"red\", \" RH \")\n self._game_board[i][ii].update_location([i, ii])\n if ii == 2 or ii == 6:\n self._game_board[i][ii] = Elephant(\"red\", \" RE \")\n self._game_board[i][ii].update_location([i, ii])\n if ii == 3 or ii == 5:\n self._game_board[i][ii] = Advisor(\"red\", \" RA \")\n self._game_board[i][ii].update_location([i, ii])\n if ii == 4:\n self._game_board[i][ii] = General(\"red\", \" RG \")\n self._game_board[i][ii].update_location([i, ii])", "def solve( board, pieces, occupation):\n\n from heapq import heappush, heappop\n\n unoccupied = findUnoccupied( board, occupation)\n remainingpieces = range(len(pieces))\n\n searchq = []\n nbacktrack = 0\n\n while (unoccupied):\n nnheap = []\n # As a heuristic, we choose to first place pieces on points\n # with the least number of unoccupied neighbors.\n for i in unoccupied:\n p = board.positions[i]\n nn = countFreeNeighbors( p, board, occupation)\n heappush( nnheap, (nn, i))\n nn, pt = heappop( nnheap)\n if (nn==0): # No solution, back-track\n if (searchq):\n occupation, remainingpieces = searchq.pop()\n nbacktrack += 1\n print \"Backtracking for the %d'th time\" % nbacktrack\n unoccupied = findUnoccupied( board, occupation)\n continue\n else:\n break\n for ipc in remainingpieces:\n pc = pieces[ipc]\n for o in placePiece( board, occupation, board.positions[pt], pc):\n # A search node is defined by the occupation state and\n # the remaining pieces.\n searchq.append( (o, [i for i in remainingpieces if i != ipc]))\n if (searchq):\n occupation, remainingpieces = searchq.pop()\n unoccupied = findUnoccupied( board, occupation)\n else:\n break\n else:\n state = LonposState( board, occupation.items())\n state.show()\n return occupation\n\n # No solution for the state.\n print \"No solution!\"\n return None", "def with_np_pieces(self, np_pieces):\n if np_pieces is None:\n np_pieces = self.np_pieces\n return Board(self.width, self.depth, self.height, self.win_length, np_pieces)", "def __init__(self, board, position, player, piece_type):\n self.board = board\n self.position = position\n self.player = player\n self.piece_behavior = piece_type", "def make(self,state_board):\n\t\tstate_board[self.column][self.line] = self.couleur #place the piece\n\t\tdrawPiece((self.column,self.line),self.couleur) #draws it on the board\n\t\tfor pos in self.flips: #flips all the pieces in flips\n\t\t\tstate_board[pos[0]][pos[1]] = self.couleur\n\t\t\tdrawPiece(pos,self.couleur) #draws it on the board", "def __init__(self):\n self.board = {} # dict of (x,y) to PlacedTile\n self.board[(0,0)] = STARTING_PIECE", "def __init__(self, board = np.random.randint(2, size=(5, 5), dtype = np.uint8)): \n # Check for valid filetype for board\n if not isinstance(board, np.ndarray):\n raise NotImplementedError(\"Board must be an numpy.array.\")\n # Check for valid board size.\n if any(x < 2 for x in board.shape):\n raise NotImplementedError(\"Board state invalid! Must be at least 2x2.\")\n # Check for valid board cell entries.\n if not np.all(np.isin(board, [0,1])):\n raise NotImplementedError(\"Board state invalid! Must be filled with 0s and 1s.\")\n\n # Define lookup table for cell evaluation\n self.lookup = np.asarray([[0,0,0,1,0,0,0,0,0],[0,0,1,1,0,0,0,0,0]])\n\n # Define expanded board for easier border cell calculations\n expanded_shape = tuple(d+2 for d in board.shape)\n board_slice = (slice(1, -1),) * 2\n self.expanded_board = np.zeros(expanded_shape,dtype = np.uint8)\n self.expanded_board[board_slice] = board\n self.board = self.expanded_board[board_slice]", "def __init__(self, height, width):\n self.height, self.width = height, width\n self.board = self.create_board_matrix(height, width)\n self.refresh_rate = 0.3\n self.points = 0 # pieces successfully added\n self.level = 1", "def initialize_board():\n # Wipe current board\n for x in range(len(THE_BOARD.positions)):\n for y in range(len(THE_BOARD.positions)):\n THE_BOARD.positions[x][y] = ' '\n\n all_pieces = []\n\n # Pawns\n white_pawns = [Pawn('white', (6, i)) for i in range(len(THE_BOARD.positions[6]))]\n black_pawns = [Pawn('black', (1, i)) for i in range(len(THE_BOARD.positions[1]))]\n all_pieces.extend(white_pawns)\n all_pieces.extend(black_pawns)\n\n # Rooks\n rook1 = Rook('black', (0, 0))\n all_pieces.append(rook1)\n rook2 = Rook('black', (0, 7))\n all_pieces.append(rook2)\n rook3 = Rook('white', (7, 0))\n all_pieces.append(rook3)\n rook4 = Rook('white', (7, 7))\n all_pieces.append(rook4)\n\n # Knights\n knight1 = Knight('black', (0, 1))\n all_pieces.append(knight1)\n knight2 = Knight('black', (0, 6))\n all_pieces.append(knight2)\n knight3 = Knight('white', (7, 1))\n all_pieces.append(knight3)\n knight4 = Knight('white', (7, 6))\n all_pieces.append(knight4)\n\n # Bishops\n bishop1 = Bishop('black', (0, 2))\n all_pieces.append(bishop1)\n bishop2 = Bishop('black', (0, 5))\n all_pieces.append(bishop2)\n bishop3 = Bishop('white', (7, 2))\n all_pieces.append(bishop3)\n bishop4 = Bishop('white', (7, 5))\n all_pieces.append(bishop4)\n\n # King and Queen\n queen1 = Queen('black', (0, 4))\n all_pieces.append(queen1)\n queen2 = Queen('white', (7, 4))\n all_pieces.append(queen2)\n king1 = King('black', (0, 3))\n all_pieces.append(king1)\n king2 = King('white', (7, 3))\n all_pieces.append(king2)\n\n # Add every single piece to the board. Only then can they update their spaces threatened\n for piece in all_pieces:\n THE_BOARD.update(piece)\n THE_BOARD.update_all_spaces_threatened()", "def create(self):\n\n for i in range(8):\n # Create white pawns\n self.board[1][i] = Piece(\"pawn\", 1, i, 0)\n # Create black pawns\n self.board[6][i] = Piece(\"pawn\", 6, i, 1)\n\n # Create white rooks\n self.board[0][0] = Piece(\"rook\", 0, 0, 0)\n self.board[0][7] = Piece(\"rook\", 0, 7, 0)\n\n # Create black rooks\n self.board[7][0] = Piece(\"rook\", 7, 0, 1)\n self.board[7][7] = Piece(\"rook\", 7, 7, 1)\n\n # Create white knights\n self.board[0][1] = Piece(\"knight\", 0, 1, 0)\n self.board[0][6] = Piece(\"knight\", 0, 6, 0)\n\n # Create black knights\n self.board[7][1] = Piece(\"knight\", 7, 1, 1)\n self.board[7][6] = Piece(\"knight\", 7, 6, 1)\n\n # Create white bishop\n self.board[0][2] = Piece(\"bishop\", 0, 2, 0)\n self.board[0][5] = Piece(\"bishop\", 0, 5, 0)\n\n # Create black bishop\n self.board[7][2] = Piece(\"bishop\", 7, 2, 1)\n self.board[7][5] = Piece(\"bishop\", 7, 5, 1)\n\n # Create white queen and king\n self.board[0][3] = Piece(\"queen\", 0, 3, 0)\n self.board[0][4] = Piece(\"king\", 0, 4, 0)\n\n # Create black queen and king\n self.board[7][3] = Piece(\"queen\", 7, 3, 1)\n self.board[7][4] = Piece(\"king\", 7, 4, 1)", "def __init__(self, grid_size, num_pokemon):\n self._game_board = UNEXPOSED * (grid_size ** 2)\n self._num_pokemon = num_pokemon\n self._pokemon_location = self.generate_pokemons(grid_size)", "def place_piece(piece, px, py, pc):\n \n\n for i, j in piece:\n x = px + i\n y = py + j\n if not (0 <= x < BOARD_WIDTH):\n continue\n if not (0 <= y < BOARD_HEIGHT):\n continue\n board[y][x] = pc", "def __init__(self, width, height):\n\n self.WIDTH = width\n self.HEIGHT = height\n\n self.active_piece = None\n\n # 0,0 is defined as the top left\n self.board = [[0] * self.WIDTH for y in range(self.HEIGHT)]\n\n self.cleared_lines = 0\n self.score = 0", "def updated_board(board_w, board_h, piece_list, board, position):\n board_state = board.state\n new_board = Board(board_w, board_h, 1, piece_list, position)\n new_board.state = board_state\n return new_board", "def __init__(self, size):\n self.size = size\n self.grid = {}\n self.init_grid(size)\n self.current_direction = \"down\"\n # if not empty, should be 4 tuples representing the coordinates of the moving piece\n self.active_piece = None\n self.game_over = False", "def get_board_positions_pieces_OHE(board):\n information = str(board).replace(\"\\n\", \" \")\n one_hot_encoded_board_positions = np.zeros(shape = (8,8,12))\n x = 0\n y = 0\n for piece in information:\n if(piece == \" \"):\n continue\n one_hot_encoded_board_positions[y, x, :] = get_piece_OHE(piece)\n if(x <= 7):\n x += 1 \n if(x > 7):\n x = 0\n y += 1\n return one_hot_encoded_board_positions", "def __init__(self):\n # The starting counts are set to 0 and modified when the board is initiated.\n self.num_black_pieces = 0\n self.num_black_kings = 0\n self.num_white_pieces = 0\n self.num_white_kings = 0\n # Creates a new board and fills it with the appropriate pieces.\n self.board = self._initiate_board()\n self.moves = []", "def update_state(self):\n self.reset_state()\n for piece in self.pieces:\n coordinates = piece.get_block_positions()\n for coor in coordinates:\n x, y = coor\n self.state[y][x] = piece", "def __init__(self, boardDimensions, shipsAfloat):\r\n self.enemyBoard = [[BoardState.OPEN for j in range(boardDimensions)] for i in range(boardDimensions)]\r\n self.boardDimensions = boardDimensions\r\n self.shipsAfloat = shipsAfloat", "def initialize_board(self):\n self.board = np.zeros(shape=(BOARD_SIZE, BOARD_SIZE), dtype=np.int) # another way of defining board: [[for x in range(cm.BOARD_SIZE)] for x in range(cm.BOARD_SIZE)]\n center = int(BOARD_SIZE / 2)\n self.board[center-1][center-1] = self.board[center][center] = WHITE # place the board according to position\n self.board[center][center-1] = self.board[center-1][center] = BLACK\n self.black_piece = 2\n self.white_piece = 2", "def __init__(self):\n\n self._turn = 'blue'\n self._active_pieces = {'blue': [], 'red': []}\n self._game_state = 'UNFINISHED'\n self._board = [['' for j in range(10)] for i in range(9)]\n\n # add pieces to the board\n self.add_piece('red', Chariot, 'a1')\n self.add_piece('red', Elephant, 'b1')\n self.add_piece('red', Horse, 'c1')\n self.add_piece('red', Guard, 'd1')\n self.add_piece('red', Guard, 'f1')\n self.add_piece('red', Elephant, 'g1')\n self.add_piece('red', Horse, 'h1')\n self.add_piece('red', Chariot, 'i1')\n self.add_piece('red', General, 'e2')\n self.add_piece('red', Cannon, 'b3')\n self.add_piece('red', Cannon, 'h3')\n self.add_piece('red', Soldier, 'a4')\n self.add_piece('red', Soldier, 'c4')\n self.add_piece('red', Soldier, 'e4')\n self.add_piece('red', Soldier, 'g4')\n self.add_piece('red', Soldier, 'i4')\n self.add_piece('blue', Chariot, 'a10')\n self.add_piece('blue', Elephant, 'b10')\n self.add_piece('blue', Horse, 'c10')\n self.add_piece('blue', Guard, 'd10')\n self.add_piece('blue', Guard, 'f10')\n self.add_piece('blue', Elephant, 'g10')\n self.add_piece('blue', Horse, 'h10')\n self.add_piece('blue', Chariot, 'i10')\n self.add_piece('blue', General, 'e9')\n self.add_piece('blue', Cannon, 'b8')\n self.add_piece('blue', Cannon, 'h8')\n self.add_piece('blue', Soldier, 'a7')\n self.add_piece('blue', Soldier, 'c7')\n self.add_piece('blue', Soldier, 'e7')\n self.add_piece('blue', Soldier, 'g7')\n self.add_piece('blue', Soldier, 'i7')", "def initial_board():\n board = [OUTER] * 100\n for i in Othello.squares():\n board[i] = EMPTY\n # The middle four squares should hold the initial piece positions.\n board[44], board[45] = BLACK, WHITE\n board[54], board[55] = WHITE, BLACK\n return board", "def __init__(self):\n self._board = []\n for i in range(10):\n self._board.append([None for i in range(9)])\n self.place_pieces()", "def populate(self):\n counter = 0\n placers = [piece_class.Rook, piece_class.Knight, piece_class.Bishop, \n piece_class.Queen, piece_class.King, piece_class.Bishop, \n piece_class.Knight, piece_class.Rook, piece_class.Pawn, \n piece_class.Pawn, piece_class.Pawn, piece_class.Pawn, \n piece_class.Pawn, piece_class.Pawn, piece_class.Pawn, \n piece_class.Pawn]\n \n \n #Creates new piece objects\n for i in placers:\n self.board[counter] = (i(WHITE, piece_class.PIECEDICT[WHITE][i]))\n counter += 1\n \n counter = 48\n placers.reverse()\n placers[11], placers[12] = placers[12], placers[11]\n \n for i in placers:\n self.board[counter] = (i(BLACK, piece_class.PIECEDICT[BLACK][i]))\n counter += 1\n\n# self.board[11] = self.empty\n# self.board[12] = self.empty\n# self.board[57] = self.empty\n# self.board[58] = self.empty\n# self.board[5] = self.empty\n# self.board[61] = self.empty\n# self.board[59] = self.empty\n# self.board[55] = self.empty\n# self.board[54] = self.empty\n# self.board[53] = self.empty\n# self.board[52] = self.empty\n# self.board[51] = self.empty\n# self.board[50] = self.empty\n# self.board[49] = self.empty\n# self.board[48] = piece_class.Bishop(BLACK, piece_class.PIECEDICT[BLACK][piece_class.Bishop])\n# self.board[(40-16)] = piece_class.Rook(WHITE, piece_class.PIECEDICT[WHITE][piece_class.Rook])\n# self.board[(41-16)] = piece_class.Rook(WHITE, piece_class.PIECEDICT[WHITE][piece_class.Rook])\n## self.board[(41-7)] = piece_class.Bishop(WHITE, piece_class.PIECEDICT[WHITE][piece_class.Bishop])\n# self.board[56] = piece_class.King(BLACK, piece_class.PIECEDICT[BLACK][piece_class.King])\n \n# self.board[18] = piece_class.Rook(BLACK, piece_class.PIECEDICT[BLACK][piece_class.Rook])\n# self.board[21] = piece_class.Bishop(WHITE, piece_class.PIECEDICT[WHITE][piece_class.Bishop])\n# self.board[27] = piece_class.Bishop(BLACK, piece_class.PIECEDICT[BLACK][piece_class.Bishop])\n# self.board[36] = piece_class.Knight(BLACK, piece_class.PIECEDICT[BLACK][piece_class.Knight])\n# self.board[41] = piece_class.Rook(WHITE, piece_class.PIECEDICT[WHITE][piece_class.Rook])\n## self.board[32] = piece_class.Bishop(BLACK, piece_class.PIECEDICT[BLACK][piece_class.Bishop])\n# self.board[48] = piece_class.King(WHITE, piece_class.PIECEDICT[WHITE][piece_class.King])\n## self.board[59] = piece_class.King(BLACK, piece_class.PIECEDICT[BLACK][piece_class.King])\n# self.board[49] = piece_class.Pawn(WHITE, piece_class.PIECEDICT[WHITE][piece_class.Pawn])\n# self.board[50] = piece_class.Queen(BLACK, piece_class.PIECEDICT[BLACK][piece_class.Queen])\n# self.board[59] = piece_class.Bishop(BLACK, piece_class.PIECEDICT[BLACK][piece_class.Bishop])\n# self.board[52] = piece_class.Pawn(BLACK, piece_class.PIECEDICT[BLACK][piece_class.Pawn])\n# \n# del self.board[64:]\n\n return self.board", "def __init__(self, board=None, workers=None):\n if board:\n self._board = []\n for row in range(self.BOARD_SIZE):\n self._board.append([])\n for col in range(self.BOARD_SIZE):\n try:\n height = board[row][col]\n except IndexError:\n height = 0\n self._board[row].append(Building(height))\n else:\n self._board = [[Building() for col in range(self.BOARD_SIZE)]\n for row in range(self.BOARD_SIZE)]\n\n if workers:\n self._workers = workers\n else:\n self._workers = {}", "def init_board():\n\t# Generates a table 10*10 of 0s with -1 around and the initial state\n\t# of the board with 2 whites and 2 blacks in the middle\n\ttable = [[0 if i != 0 and i != 9 else -1 for i in range(10)] if j != 0 and j != 9 else [-1 for i in range(10)] for j in range(10)] #leaves a -1 line around the whole table of 0s\n\t#initial state is drawn and recorded\n\ttable[4][4] = 2\n\ttable[5][5] = 2\n\ttable[4][5] = 1\n\ttable[5][4] = 1\n\tdrawPiece((4,4),2)\n\tdrawPiece((5,5),2)\n\tdrawPiece((4,5),1)\n\tdrawPiece((5,4),1)\n\treturn table", "def __init__(self, piece_type, origin):\n self.origin = origin\n self.positions = self.get_piece_coordinates(piece_type)", "def create_manual_barrage_initial_state(\n spy_locations_list,\n scout_locations_list,\n miner_locations_list,\n sergeant_locations_list,\n lieutenant_locations_list,\n captain_locations_list,\n major_locations_list,\n colonel_locations_list,\n general_locations_list,\n marshall_locations_list,\n flag_locations_list,\n bomb_locations_list,\n specify_pieces_for_player=OUTSIDE_AGENT_PLAYER_ID):\n game_version = STRATEGO_ENV_BARRAGE_INTERFACE_CONFIG['version']\n game_version_config = VERSION_CONFIGS[game_version]\n board_shape = (game_version_config['rows'], game_version_config['columns'])\n procedural_env = StrategoProceduralEnv(*board_shape)\n\n # Verify inputs and fill a 2d ndarray with specified player piece values.\n if not (specify_pieces_for_player == 1 or specify_pieces_for_player == -1):\n raise ValueError(\"specify_pieces_for_player must be 1 or -1\")\n\n allowed_piece_rows_for_player = [0, 1, 2, 3] if specify_pieces_for_player == 1 else [6, 7, 8, 9]\n specified_player_initial_piece_map = np.zeros(shape=board_shape, dtype=INT_DTYPE_NP)\n\n manual_piece_locations = {\n SP.SPY: spy_locations_list,\n SP.SCOUT: scout_locations_list,\n SP.MINER: miner_locations_list,\n SP.SERGEANT: sergeant_locations_list,\n SP.LIEUTENANT: lieutenant_locations_list,\n SP.CAPTAIN: captain_locations_list,\n SP.MAJOR: major_locations_list,\n SP.COLONEL: colonel_locations_list,\n SP.GENERAL: general_locations_list,\n SP.MARSHALL: marshall_locations_list,\n SP.FLAG: flag_locations_list,\n SP.BOMB: bomb_locations_list\n }\n\n for piece_type, locations_list in manual_piece_locations.items():\n if len(locations_list) > 0 and \\\n (len(np.shape(locations_list)) != 2 or\n (len(np.shape(locations_list)) == 2 and np.shape(locations_list)[1] != 2)):\n raise ValueError(f\"Each locations list must be a list of 2d coordinates. Examples: [] or [[1,2], [2,5]].\\n\"\n f\"For {piece_type.name}, {locations_list} was passed.\")\n\n if len(locations_list) != game_version_config['piece_amounts'][piece_type]:\n allowed_piece_amounts = {pc_type.name: amt for pc_type, amt in game_version_config['piece_amounts'].items()}\n raise ValueError(f\"{len(locations_list)} {piece_type.name} piece locations were provided when \"\n f\"{game_version.name} requires the following piece amounts: \\n{allowed_piece_amounts}\")\n\n for location in locations_list:\n row, column = location\n if (not 0 <= column < board_shape[1]) or (row not in allowed_piece_rows_for_player):\n raise ValueError(f\"The out-of-range location {location} for {piece_type.name} was provided. \"\n f\"Locations are in the format, (row, column). \"\n f\"Rows take values in {allowed_piece_rows_for_player} for player {specify_pieces_for_player}. \"\n f\"Columns must be in the range [0, {board_shape[1]}].\")\n if specified_player_initial_piece_map[row, column] != 0:\n raise ValueError(f\"The location {location} was specified for more than one piece.\")\n\n # Set piece value for location\n specified_player_initial_piece_map[row, column] = piece_type.value\n\n # Grab a random human initialization for the non-specified player.\n # Human inits have been downloaded from the Gravon Archive (https://www.gravon.de/gravon/stratego/strados2.jsp)\n random_human_init_spec_str = np.random.choice(HUMAN_INITS)\n player_1_random_human_piece_map, player_2_random_human_piece_map = create_initial_positions_from_human_data(\n player1_string=random_human_init_spec_str, player2_string=random_human_init_spec_str,\n game_version_config=game_version_config)\n\n # Set obstacle locations\n obstacle_map = np.zeros(shape=board_shape, dtype=INT_DTYPE_NP)\n for obstacle_location in VERSION_CONFIGS[game_version]['obstacle_locations']:\n obstacle_map[obstacle_location] = 1.0\n\n # Create the initial state\n initial_state = procedural_env.create_initial_state(\n obstacle_map=obstacle_map,\n player_1_initial_piece_map=specified_player_initial_piece_map if specify_pieces_for_player == 1 else player_1_random_human_piece_map,\n player_2_initial_piece_map=specified_player_initial_piece_map if specify_pieces_for_player == -1 else player_2_random_human_piece_map,\n max_turns=game_version_config['max_turns'])\n\n return initial_state", "def __init__(self, board, turn):\n self.player = turn\n self.roll = self.roll_dice()\n #array of applied board states\n self.moves = []\n self.board = board\n self.generate_valid_moves()", "def _create_game_pieces(self):\n p1pieces = []\n \n list_of_player_1_coords = [(4, 2), (4, 3), (4, 5), (4, 6),\n (2, 4), (3, 4), (5, 4), (6, 4)]\n \n for coord in list_of_player_1_coords:\n new_piece = GamePiece(coord, 1)\n p1pieces.append(new_piece)\n \n p2pieces = []\n \n list_of_player_2_coords = [(0, 3), (0, 4), (0, 5), (1, 4),\n (8, 3), (8, 4), (8, 5), (7, 4),\n (3, 0), (4, 0), (5, 0), (4, 1),\n (3, 8), (4, 8), (5, 8), (4, 7)]\n \n for coord in list_of_player_2_coords:\n new_piece = GamePiece(coord, 2)\n p2pieces.append(new_piece)\n \n # \n new_piece = GamePiece((4, 4), 3)\n p1pieces.append(new_piece)\n \n return p1pieces + p2pieces", "def __init__(self, piece):\n self.piece = piece", "def __init__(self):\n self._board_area = [[\" \" for i in range(20)] for j in range(20)]\n\n # Starting setup for board includes these coordinates black, and their mirror white\n black_start = [(1, 2), (2, 2), (2, 1), (2, 3), (3, 2), (4, 1), (4, 3), (5, 2), (6, 1), (6, 3), (7, 1),\n (7, 2), (7, 3), (8, 1), (8, 2), (8, 3), (9, 1), (9, 2), (9, 3), (10, 1), (10, 2), (10, 3),\n (11, 1), (11, 3), (12, 1), (12, 2), (12, 3), (13, 1), (13, 3), (14, 2), (15, 1), (15, 3),\n (16, 2), (17, 1), (17, 2), (17, 3), (18, 2), (2, 6), (5, 6), (8, 6), (11, 6),\n (14, 6), (17, 6)]\n\n # Border points set for clearing out stones that move beyond the border\n self._border = set((0, i) for i in range(20)) | set((19, i) for i in range(20))\n self._border = self._border | set((i, 0) for i in range(20)) | set((i, 19) for i in range(20))\n\n # Fill black and white stones\n for coord in black_start:\n self._board_area[coord[0]][coord[1]] = \"B\"\n self._board_area[coord[0]][-coord[1] - 1] = \"W\"\n\n # Alphabetic indexing of board for alpha-numeric movement inputs\n self._locmap = dict(zip(\"abcdefghijklmnopqrst\", range(20)))", "def __init__(self):\n self.game_board = [' '] * 9\n self.size = len(self.game_board)\n self.move = 'X'\n self.player1 = None\n self.player2 = None\n self.current_player = None\n self.board_coords = {\n (1, 3): 0, (2, 3): 1, (3, 3): 2,\n (1, 2): 3, (2, 2): 4, (3, 2): 5,\n (1, 1): 6, (2, 1): 7, (3, 1): 8\n }\n\n self.winning_cases = [\n (0, 1, 2), (3, 4, 5), (6, 7, 8),\n (0, 3, 6), (1, 4, 7), (2, 5, 8),\n (0, 4, 8), (2, 4, 6)\n ]", "def __init__(self):\n\t\tself.current = Piece.EX\n\t\tself.board = [Piece.BLANK, Piece.BLANK, Piece.BLANK, Piece.BLANK, Piece.BLANK, Piece.BLANK, Piece.BLANK, Piece.BLANK, Piece.BLANK]", "def test_start_game():\n board = Board(640, 640, 8)\n black_piece = GamePiece(0, 0, BLACK, 0)\n white_piece = GamePiece(0, 0, WHITE, 0)\n board.start_game()\n exp_list = []\n for x in range(8):\n for y in range(8):\n exp_list.append(((board.SPACE_SIZE/2) + (board.SPACE_SIZE * x),\n (board.SPACE_SIZE/2) + (board.SPACE_SIZE * y)))\n assert board.piece_location == exp_list\n exp_game_pieces = []\n for _i in range(8):\n current_list = []\n for _j in range(8):\n current_list.append(None)\n exp_game_pieces.append(current_list)\n exp_game_pieces[3][3] = white_piece\n exp_game_pieces[3][4] = black_piece\n exp_game_pieces[4][3] = black_piece\n exp_game_pieces[4][4] = white_piece\n for i in range(8):\n for j in range(8):\n if board.game_pieces[i][j] is None:\n assert exp_game_pieces[i][j] is None\n else:\n assert board.game_pieces[i][j].color == exp_game_pieces[i][j].color", "def set_pieces(self, board_layout_filename):\n\n def get_ctor(piece_type_str: str):\n \"\"\"gets the ctor function for the given piece type string\"\"\"\n if piece_type_str == \"PAWN\":\n return Pawn\n if piece_type_str == \"ROOK\":\n return Rook\n if piece_type_str == \"HORSE\":\n return Horse\n if piece_type_str == \"BISHOP\":\n return Bishop\n if piece_type_str == \"KING\":\n return King\n if piece_type_str == \"QUEEN\":\n return Queen\n\n def get_instance(klass, *args):\n return klass(*args)\n\n board_json = json.loads(open(board_layout_filename).read())\n\n white_pieces = self._pieces[PieceColor.WHITE]\n black_pieces = self._pieces[PieceColor.BLACK]\n\n for piece_json in board_json['WHITE']:\n x, y, name, piece_type = int(piece_json['x']), int(piece_json['y']), piece_json['name'], piece_json['piece_type']\n ctor = get_ctor(piece_type)\n piece = get_instance(get_ctor(piece_type), PieceColor.WHITE, Position(x, y), name)\n white_pieces[name] = piece\n self._rubrics[x][y] = piece\n\n for piece_json in board_json['BLACK']:\n x, y, name, piece_type = int(piece_json['x']), int(piece_json['y']), piece_json['name'], piece_json['piece_type']\n piece = get_ctor(piece_type)(PieceColor.BLACK, Position(x, y), name)\n black_pieces[name] = piece\n self._rubrics[x][y] = piece", "def ai_2(board: BoardState) -> BoardState:\n cur_piece = board.cpiece\n if cur_piece is None:\n board.cpiece_id = choose_none_winable_piece(board)\n else:\n board[choice(list(board.open_spots))] = board.cpiece_id\n board.cpiece_id = choose_none_winable_piece(board)\n\n if (board.cpiece_id is None) and not board.is_full:\n board.cpiece_id, _ = choice(list(board.unused_game_pieces))\n return board", "def __init__(self, board):\n self.board = board\n self.position = 0\n self.no_moves = 0 # Added number of moves\n self.adjustment = 0", "def generate():\n global BOARD\n next = [[0] * ROWS for _ in range(COLS)]\n # Loop through every spot in our 2D array and check spots neighbors\n for x in range(COLS):\n for y in range(ROWS):\n # Add up all the states in a 3x3 surrounding grid\n neighbors = 0\n for i in range(-1, 2):\n for j in range(-1, 2):\n nx = (x + i + COLS) % COLS\n ny = (y + j + ROWS) % ROWS\n neighbors += BOARD[nx][ny]\n # A little trick to subtract the current cell's state since\n # we added it in the above loop\n neighbors -= BOARD[x][y]\n # Rules of Life\n if BOARD[x][y] == 1 and neighbors < 2 : next[x][y] = 0 # Loneliness\n elif BOARD[x][y] == 1 and neighbors > 3 : next[x][y] = 0 # Overpopulation\n elif BOARD[x][y] == 0 and neighbors == 3: next[x][y] = 1 # Reproduction\n else: next[x][y] = BOARD[x][y] # Stasis\n # Next is now our board\n BOARD = next", "def __init__(self, board=None, rseed=None):\n\n self.height = 26\n self.playable_height = 20\n self.width = 10\n\n if board is None:\n self._board = np.array([[0 for i in range(self.width)]\n for i in range(self.height)])\n else:\n self._board = np.array(board)\n\n if rseed is None:\n rseed = datetime.now()\n\n random.seed(rseed)\n\n self.lines_cleared = 0\n self.score = 0\n self.dead = False\n self.held_piece = None\n self._hold_used = False\n\n self.cur_piece = None\n self.next_pieces = deque()\n self._pick_new_next()\n self._spawn_piece()\n self.ghost_piece_occupied = None\n self._generate_ghost_piece()", "def __init__(self, player):\n self._piece_type = 'pawn'\n self._value = 2 if player == \"white\" else -2\n self._summary = 'W-Pw' if player == \"white\" else 'B-Pw'\n\n self._directions = []\n if player == \"white\":\n self._directions.append([(-1, 1)])\n self._directions.append([(0, 1), (0, 2)])\n self._directions.append([(1, 1)])\n else:\n self._directions.append([(-1, -1)])\n self._directions.append([(0, -1), (0, -2)])\n self._directions.append([(1, -1)])", "def __init__(self, startingGameState):\n self.walls = startingGameState.getWalls()\n self.startingPosition = startingGameState.getPacmanPosition()\n top, right = self.walls.height - 2, self.walls.width - 2\n self.corners = ((1, 1), (1, top), (right, 1), (right, top))\n for corner in self.corners:\n if not startingGameState.hasFood(*corner):\n print('Warning: no food in corner ' + str(corner))\n self._expanded = 0 # DO NOT CHANGE; Number of search nodes expanded\n # Please add any code here which you would like to use\n # in initializing the problem\n \"*** YOUR CODE HERE ***\"\n \"\"\"\n Mi espacio de estados consistirá en que cada estado será una tupla del tipo (pos, grid), donde:\n * pos es la posición en coordenadas (x,y) (como antes)\n * grid contendrá una grid 2x2 con la información relevante de la comida en las esquinas. Esto es:\n - En cada item de la grid habrá un true o un false, en función de si en esa esquina hay o no comida.\n - Por ejemplo, si la grid es:\n | True False |\n | True True |\n entonces significa que ya habremos comido la comida de la esquina (right,top)\n \"\"\"\n self.startingFood = startingGameState.getFood()\n self.cornersFood = game.Grid(2, 2) # Defino la matriz tipo grid de dimensión 2x2\n self.cornersFood[0][0] = self.startingFood[1][top] # Asigno manualmente cada valor a la grid\n self.cornersFood[0][1] = self.startingFood[right][top] # El problema es que yo enumero diferente la matriz\n self.cornersFood[1][0] = self.startingFood[1][1] # Es decir, a[0][0] es la esquina superior izquierda\n self.cornersFood[1][1] = self.startingFood[right][1]\n self.startFoodPosition = (self.startingPosition, self.cornersFood)", "def __init__(self, squares=None, ncols=8, nrows=8):\n self.ncols = ncols\n self.nrows = nrows\n\n if not squares:\n self.squares = dict((i, None) for i in xrange(ncols * nrows))\n\n # 0 begins as the top of the board, making it black\n for i in xrange(ncols * 3):\n row, col = i // ncols, i % ncols\n if row % 2 == col % 2:\n self.squares[i] = Piece(\"black\")\n # red would be the bottom 3 rows\n for i in xrange(ncols * (nrows - 3), ncols * nrows):\n row, col = i // ncols, i % ncols\n if row % 2 == col % 2:\n self.squares[i] = Piece(\"red\")", "def init_board(self):\n\n self.__board = dict()\n order = ['rook', 'knight', 'bishop', 'queen', 'king', 'bishop',\n 'knight', 'rook']\n for j, name in enumerate(order):\n\n self.__board[(0, j)] = ChessGame.Piece( name, ChessGame.WHITE)\n self.__board[(7, j)] = ChessGame.Piece( name, ChessGame.BLACK)\n self.__board[(1, j)] = ChessGame.Piece('pawn', ChessGame.WHITE)\n self.__board[(6, j)] = ChessGame.Piece('pawn', ChessGame.BLACK)\n\n self.__players = { ChessGame.WHITE: set(), ChessGame.BLACK: set() }\n for color in (ChessGame.BLACK, ChessGame.WHITE):\n self.__players[color] = {(x, y) for (x, y), piece in\n self.__board.iteritems() if piece.color == color }\n\n return", "def __init__(self,m,n):\n self.columns = m\n self.rows = n\n self.board = makeBoard(m,n)", "def ai_3(board: BoardState) -> BoardState:\n cur_piece = board.cpiece\n if cur_piece is not None:\n moved = False\n for (x,y) in board.open_spots:\n move = find_win_spot(cur_piece, board)\n if move:\n board[move] = board.cpiece_id\n moved = True\n break\n if not moved:\n board[choice(list(board.open_spots))] = board.cpiece_id\n board.cpiece_id = choose_none_winable_piece(board)\n else:\n board.cpiece_id = choose_none_winable_piece(board)\n\n if (board.cpiece_id is None) and not board.is_full:\n board.cpiece_id, _ = choice(list(board.unused_game_pieces))\n return board", "def __init__(self, FEN_state=\"rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1\"):\n board, turn, castle_avail, en_passants, half_moves, full_moves = FEN_state.split()\n\n # contains all of the pieces\n self.board = list(map(State._parse_row, board.split('/')))\n\n # Whether the current board state is white to move or not (black to move)\n self.white_to_move = turn == 'w'\n\n # Which sides can be castled on\n self.available_castles = castle_avail\n\n # States where it is possible to move en passant, if any\n self.en_passants = '' if en_passants == '-' else en_passants\n\n # Number of half moves already made in the game.\n # Used to track 50 move repetition\n self.half_moves = int(half_moves)\n\n # Used the current full move, used to track for algebraic notation\n self.full_moves = int(full_moves)", "def setup(self):\n piece_order = ['ROOK','KNIGHT','BISHOP','QUEEN','KING','BISHOP','KNIGHT','ROOK']\n for row,colour in zip([0,7],['BLACK','WHITE']):\n for col,piece in enumerate(piece_order):\n self.board[row][col] = colour + '_' + piece\n \n for row,colour in zip([1,6],['BLACK','WHITE']):\n for i in range(8):\n self.board[row][i] = colour + '_' + 'PAWN'\n \n self.toplay = 'WHITE'", "def __init__(self, board, index):\n self.board = board\n self.index = index\n self._x, self._y = None, None", "def __init__(self, size, board):\n self.BoardSize = size #the size of the board\n self.CurrentGameBoard= board #the current state of the game board", "def piece_type(self, pos, board):\n piece = board[self.ind(pos)[0]][self.ind(pos)[1]]\n return piece", "def __init__(self, board_dim= DEFAULT_DIM):\r\n self.width = board_dim\r\n self.height = board_dim\r\n\r\n self.grid = np.array([[' '] * self.width for i in range(self.height)])\r\n self.num_checkers = 0 # keeps track of how many checkers have been added\r\n\r\n self.available_moves = [(row, col) for row in range(self.height) for col in range(self.width)]\r\n\r\n # Specify the winning condition based on the board's dimension\r\n if (self.width < 5):\r\n self.win_condition = self.width\r\n else:\r\n self.win_condition = 5", "def get_pawn_moves(self, state):\n pawn_moves = []\n\n if self.color == cc.WHITE_ACTIVE:\n forward_1 = add_vectors(self.coord, cc.V_UP)\n forward_2 = add_vectors(self.coord, cc.V_UP_2)\n attacks = get_crawler_moves(self.coord, cc.W_PAWN_CAPTURE_VECTORS)\n starting_rank = cc.RANK_2\n promo_rank = cc.RANK_8\n promo_pieces = cc.WHITE_PROMO\n enemy_set = cc.BLACK_PIECES\n elif self.color == cc.BLACK_ACTIVE:\n forward_1 = add_vectors(self.coord, cc.V_DOWN)\n forward_2 = add_vectors(self.coord, cc.V_DOWN_2)\n attacks = get_crawler_moves(self.coord, cc.B_PAWN_CAPTURE_VECTORS)\n starting_rank = cc.RANK_7\n promo_rank = cc.RANK_1\n promo_pieces = cc.BLACK_PROMO\n enemy_set = cc.WHITE_PIECES\n else:\n raise Exception(\"get_pawn_moves: Invalid Piece Color\")\n\n if validate_move(forward_1) and state.board[forward_1] == cc.NO_PIECE:\n if forward_1[0] == promo_rank:\n for p in promo_pieces:\n pawn_moves.append(cc.Action(self.string, self.coord, forward_1, promo=p))\n else:\n pawn_moves.append(cc.Action(self.string, self.coord, forward_1))\n if self.coord[0] == starting_rank and validate_move(forward_2) and state.board[forward_2] == cc.NO_PIECE:\n pawn_moves.append(cc.Action(self.string, self.coord, forward_2, en_p=forward_1))\n\n for attack in attacks:\n if state.board[attack] in enemy_set:\n if attack[0] == promo_rank:\n for p in promo_pieces:\n pawn_moves.append(cc.Action(self.string, self.coord, attack, capture=True, promo=p))\n else:\n pawn_moves.append(cc.Action(self.string, self.coord, attack, capture=True))\n # Make sure Pawns can attack en_passant squares\n elif attack == state.en_passant:\n pawn_moves.append(cc.Action(self.string, self.coord, attack, capture=True))\n\n return pawn_moves", "def __init__(self, python_board: list[list[int]] = None, red_active: bool = True) -> None:\n\n game_board = [[0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0]]\n\n if python_board is not None:\n self.board_array = np.array(python_board)\n else:\n self.board_array = np.array(game_board)\n\n self.move_number = 0\n\n # Creating the kernels to use in a 2d convolution to check the board for a winner later\n across = np.array([[1, 1, 1, 1]])\n vertical = np.transpose(across)\n main_diagonal = np.eye(4, dtype=np.uint8)\n off_diagonal = np.fliplr(main_diagonal)\n self._detection_kernels_red = [across, vertical, main_diagonal, off_diagonal]\n self._detection_kernels_yellow = [kernel * -1 for kernel in self._detection_kernels_red]\n\n self._is_red_active = red_active\n\n # Matches moves to their indices in self._valid_moves, this order is very important\n # for optimising alpha-beta pruning\n self._valid_move_order = {3: 0, 2: 1, 4: 2, 5: 3, 1: 4, 0: 5, 6: 6}\n self._valid_moves = [3, 2, 4, 5, 1, 0, 6]\n self._column_to_row = {0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0}\n\n self._win_state = None\n\n # This code reads in the hash keys for use in Zobrist hashing, for more information, see\n # opening_book_gen.py\n red_hash_keys = []\n with open('data/Zobrist_Hash_Keys/Zobrist_red_key.csv') as file:\n reader = csv.reader(file)\n for row in reader:\n red_hash_keys.append([int(r) for r in row])\n self._red_hash_keys = np.array(red_hash_keys)\n\n yellow_hash_keys = []\n with open('data/Zobrist_Hash_Keys/Zobrist_yellow_key.csv') as file:\n reader = csv.reader(file)\n for row in reader:\n yellow_hash_keys.append([int(r) for r in row])\n self._yellow_hash_keys = np.array(yellow_hash_keys)\n\n self.hash = 0", "def __init__(self, chess_board, pieces, window, update):\n import turtle\n\n self.board = chess_board\n self.pieces = pieces\n self.update = update\n self.is_piece_selected = False\n self.selected_row = -1\n self.selected_col = -1\n self.turn_color = \"white\"\n self.window = turtle.Screen()\n window.onclick(self.findHeuristic)", "def new_generation(board):\n # size = board.shape\n new = np.zeros(board.shape)\n for row in range(board.shape[0]):\n for col in range(board.shape[1]):\n if calculate_dead_alive(board, row, col):\n new[row][col] = 1\n else:\n new[row][col] = 0\n return new", "def is_in_check(self, player):\n # List of coords in board\n col = ['a','b','c','d','e','f','g','h','i'] # the columns\n a = []\n for i in range(10):\n a.append([j + str(i+1) for j in col])\n \n # Flatten the list\n board_coords = []\n for sublist in a:\n for coord in sublist:\n board_coords.append(coord)\n \n # getting each object in the board for a player\n pieces_coords = []\n pieces_left = []\n for row in range(10):\n for column in range(9):\n if self.get_board()[row][column] is not None and self.get_board()[row][column].get_color() == player.upper():\n # pieces left on the board for the player\n pieces_coords.append((row, column))\n pieces_left.append(self.get_board()[row][column])\n \n p_b_coord = (pieces_coords, board_coords)\n \n counter = 0 \n for piece_coord in pieces_coords: \n for board_coord in board_coords: \n translated_index = self.column_to_letter(piece_coord[1]) + str(piece_coord[0]) \n piece = self.get_piece_type(translated_index)\n if piece is not None:\n if piece.check_legal(translated_index, board_coord, self.get_board(), self.get_game_state()) == True:\n counter += 1\n print(counter)\n if counter == 0:\n self._current_state = upper(player) + '_WON'\n return True \n return False", "def gameOfLife(self, board) -> None:\n rows = len(board)\n cols = len(board[0])\n neighbours = [(-1, 1), (0, 1), (1, 1), (-1, 0), (1, 0), (-1, -1), (0, -1), (1, -1)]\n for row in range(rows):\n for col in range(cols):\n live_neighbour = 0\n for i, j in neighbours:\n new_row = row + i\n new_col = col + j\n if new_row >= 0 and new_row < rows and new_col >= 0 and new_col < cols and \\\n board[new_row][new_col] in [1, -1]:\n live_neighbour += 1\n if (live_neighbour < 2 or live_neighbour > 3) and board[row][col] == 1:\n board[row][col] = -1\n elif live_neighbour == 3 and board[row][col] == 0:\n board[row][col] = 2\n for row in range(rows):\n for col in range(cols):\n if board[row][col] == -1:\n board[row][col] = 0\n elif board[row][col] == 2:\n board[row][col] = 1", "def gameOfLife(self, board: List[List[int]]) -> None:\n self.board = copy.deepcopy(board)\n self.rows = len(self.board)\n self.cols = len(self.board[0])\n for i in range(self.rows):\n for j in range(self.cols):\n neighbors = self.count_neighbors(i, j)\n if board[i][j] == 1:\n if neighbors < 2 or neighbors > 3:\n board[i][j] = 0\n else:\n if neighbors == 3:\n board[i][j] = 1", "def serialize(board):\n bstate = np.zeros(shape=64, dtype=np.uint)\n\n pieces_map = {\"p\": 1, \"n\": 2, \"b\": 3, \"q\": 4, \"k\": 5, \"r\": 6}\n\n # General cases\n for i, piece in board.piece_map().items():\n piece = str(piece)\n piece_num = 0\n if piece.isupper():\n piece_num += 8\n piece = piece.lower()\n piece_num += pieces_map[piece]\n bstate[i] = piece_num\n\n # Special cases...\n # Castling\n for location, has_rights in [(0, board.has_queenside_castling_rights(True)),\n (7, board.has_kingside_castling_rights(True)),\n (63, board.has_kingside_castling_rights(False)),\n (63 - 7, board.has_queenside_castling_rights(False))]:\n if has_rights:\n bstate[location] += 1\n\n # En Passant\n ep_square = board.ep_square\n if ep_square is not None:\n bstate[ep_square] = 8\n\n bstate = bstate.reshape((8, 8))\n\n state = np.zeros(shape=(5, 8, 8), dtype=np.uint8)\n\n # Bitwise magic to convert everything into binary values\n state[0] = (bstate >> 0) & 1\n state[1] = (bstate >> 1) & 1\n state[2] = (bstate >> 2) & 1\n state[3] = (bstate >> 3) & 1\n\n state[4] = board.turn * 1.0\n\n return state", "def check_complete_board(start_pos, dim_square, board):\n change = False\n for row in range(8):\n for col in range(8):\n # Grab image on real board\n im = region_grabber((start_pos[0] + col * dim_square[0],\n start_pos[1] - (row + 1.0) * dim_square[1],\n start_pos[0] + (col + 1.0) * dim_square[0],\n start_pos[1] - row * dim_square[1]))\n\n # Check if piece corresponds with piece on board if there is a piece\n if piece_on_pos((row, col), board):\n obj = board[row][col]\n if (row + col) % 2 == 0: # Black background\n pos = imagesearcharea(obj.im_b, 0, 0, 0, 0, 0.9, im)\n if pos != [-1, -1]:\n continue\n else: # White background\n pos = imagesearcharea(obj.im_w, 0, 0, 0, 0, 0.9, im)\n if pos != [-1, -1]:\n continue\n\n # Else --> Go through every possible image\n if (row + col) % 2 == 0: # Black background\n # Pawn\n pos = imagesearcharea(\"Images/PWB.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = Pawn(\"PW\")\n continue\n pos = imagesearcharea(\"Images/PBB.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = Pawn(\"PB\")\n continue\n # Tower\n pos = imagesearcharea(\"Images/TWB.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = Tower(\"TW\")\n continue\n pos = imagesearcharea(\"Images/TBB.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = Tower(\"TB\")\n continue\n # Horse\n pos = imagesearcharea(\"Images/HWB.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = Horse(\"HW\")\n continue\n pos = imagesearcharea(\"Images/HBB.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = Horse(\"HB\")\n continue\n # Bishop\n pos = imagesearcharea(\"Images/BWB.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = Bishop(\"BW\")\n continue\n pos = imagesearcharea(\"Images/BBB.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = Bishop(\"BB\")\n continue\n # King\n pos = imagesearcharea(\"Images/KWB.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = King(\"KW\")\n continue\n pos = imagesearcharea(\"Images/KBB.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = King(\"KB\")\n continue\n # Queen\n pos = imagesearcharea(\"Images/QWB.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = Queen(\"QW\")\n continue\n pos = imagesearcharea(\"Images/QBB.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = Queen(\"QB\")\n continue\n board[row][col] = None\n else: # White background\n # Pawn\n pos = imagesearcharea(\"Images/PWW.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = Pawn(\"PW\")\n continue\n pos = imagesearcharea(\"Images/PBW.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = Pawn(\"PB\")\n continue\n # Tower\n pos = imagesearcharea(\"Images/TWW.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = Tower(\"TW\")\n continue\n pos = imagesearcharea(\"Images/TBW.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = Tower(\"TB\")\n continue\n # Horse\n pos = imagesearcharea(\"Images/HWW.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = Horse(\"HW\")\n continue\n pos = imagesearcharea(\"Images/HBW.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = Horse(\"HB\")\n continue\n # Bishop\n pos = imagesearcharea(\"Images/BWW.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = Bishop(\"BW\")\n continue\n pos = imagesearcharea(\"Images/BBW.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = Bishop(\"BB\")\n continue\n # King\n pos = imagesearcharea(\"Images/KWW.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = King(\"KW\")\n continue\n pos = imagesearcharea(\"Images/KBW.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = King(\"KB\")\n continue\n # Queen\n pos = imagesearcharea(\"Images/QWW.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = Queen(\"QW\")\n continue\n pos = imagesearcharea(\"Images/QBW.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = Queen(\"QB\")\n continue\n board[row][col] = None\n\n if change:\n pyautogui.moveTo(start_pos[0] + 4 * dim_square[0],\n start_pos[1] - 4 * dim_square[1], 0.2)\n\n return change", "def evaluateBoardState(self, board):\n\n \"\"\"\n These are the variables and functions for board objects which may be helpful when creating your Agent.\n Look into board.py for more information/descriptions of each, or to look for any other definitions which may help you.\n\n Board Variables:\n board.width \n board.height\n board.last_move\n board.num_to_connect\n board.winning_zones\n board.score_array \n board.current_player_score\n\n Board Functions:\n get_cell_value(row, col)\n try_move(col)\n valid_move(row, col)\n valid_moves()\n terminal(self)\n legal_moves()\n next_state(turn)\n winner()\n \"\"\"\n if self.id == 1:\n opponent_id = 2\n else:\n opponent_id = 1\n\n maxvalue = 100000\n minvalue = -maxvalue\n winner = board.winner()\n if winner == self.id:\n return maxvalue\n elif winner == opponent_id:\n return minvalue\n size_y = board.height\n size_x = board.width\n map_ = []\n num_to_connect = board.num_to_connect\n total_points = 0\n\n multiply_reachable = 1\n multiply_oddeven = 1\n # basically this function is calculating all the possible win positions\n # more pieces in a possible win position will be counted with more weights\n # a win position with X pieces in it will be counted as X^2 points\n # initialise the zones maps\n for i in range(size_y):\n map_.append([])\n for j in range(size_x):\n map_[i].append([])\n\n # Fill in the horizontal win positions\n for i in range(size_y):\n for j in range(size_x - num_to_connect + 1):\n points = 0\n self_pieces_count = 0\n opponent_pieces_count = 0\n for k in range(num_to_connect):\n if board.board[i][j + k] == opponent_id:\n opponent_pieces_count += 1\n elif board.board[i][j + k] == self.id:\n points += len(board.winning_zones[j+k][i])\n if (self.id == 1 and i % 2 == 1) or (self.id == 2 and i%2 == 0):\n points *= multiply_oddeven\n self_pieces_count += 1\n if self_pieces_count == 3 and opponent_pieces_count == 0:\n if j - 1 >= 0 and board.board[i][j + 3] == 0 and board.board[i][j - 1] == 0 \\\n and board.try_move(j + 3) == i and board.try_move(j - 1) == i:\n return maxvalue\n elif j + 4 < size_y and board.board[i][j + 4] == 0 and board.board[i][j] == 0 \\\n and board.try_move(j + 4) == i and board.try_move(j) == i:\n return maxvalue\n else:\n for k in range(num_to_connect):\n if board.board[i][j + k] == 0 and board.try_move(j + k) == i:\n points *= multiply_reachable\n elif opponent_pieces_count == 3 and self_pieces_count == 0:\n if j - 1 >= 0 and board.board[i][j + 3] == 0 and board.board[i][j - 1] == 0 \\\n and board.try_move(j + 3) == i and board.try_move(j - 1) == i:\n return minvalue\n elif j + 4 < size_y and board.board[i][j + 4] == 0 and board.board[i][j] == 0 \\\n and board.try_move(j + 4) == i and board.try_move(j) == i:\n return minvalue\n # else:\n # for k in range(num_to_connect):\n # if board.board[i][j + k] == 0 and board.try_move(j + k) == i:\n # points *= -multiply_reachable\n if (opponent_pieces_count == 3 and self_pieces_count == 0) or opponent_pieces_count == 0:\n total_points += points\n\n # Fill in the vertical win positions\n for i in range(size_x):\n for j in range(size_y - num_to_connect + 1):\n points = 0\n self_pieces_count = 0\n for k in range(num_to_connect):\n if board.board[j + k][i] == opponent_id:\n opponent_pieces_count += 1\n elif board.board[j + k][i] == self.id:\n points += len(board.winning_zones[i][j+k])\n if (self.id == 1 and (j+k) % 2 == 1) or (self.id == 2 and (j+k)%2 == 0):\n points *= multiply_oddeven\n self_pieces_count += 1\n points *= multiply_reachable\n # if opponent_pieces_count == 3 and self_pieces_count == 0:\n # points *= -1\n if (opponent_pieces_count == 3 and self_pieces_count == 0) or opponent_pieces_count == 0:\n total_points += points\n\n # Fill in the forward diagonal win positions\n for i in range(size_y - num_to_connect + 1):\n for j in range(size_x - num_to_connect + 1):\n points = 0\n self_pieces_count = 0\n opponent_pieces_count = 0\n for k in range(num_to_connect):\n if board.board[i + k][j + k] == opponent_id:\n opponent_pieces_count += 1\n elif board.board[i + k][j + k] == self.id:\n points += len(board.winning_zones[j+k][i+k])\n if (self.id == 1 and (i+k) % 2 == 1) or (self.id == 2 and (i+k)%2 == 0):\n points *= multiply_oddeven\n self_pieces_count += 1\n if self_pieces_count == 3 and opponent_pieces_count == 0:\n if i - 1 >= 0 and j - 1 >= 0 and board.board[i + 3][j + 3] == 0 and board.board[i - 1][j - 1] == 0 \\\n and board.try_move(j + 3) == i + 3 and board.try_move(j - 1) == i - 1:\n return maxvalue\n elif i + 4 < size_y and j + 4 < size_x and board.board[i + 4][j + 4] == 0 and board.board[i][j] == 0 \\\n and board.try_move(j + 4) == i + 4 and board.try_move(j) == i:\n return maxvalue\n else:\n for k in range(num_to_connect):\n if board.board[i + k][j + k] == 0 and board.try_move(j + k) == i + k:\n points *= multiply_reachable\n elif opponent_pieces_count == 3 and self_pieces_count == 0:\n if i - 1 >= 0 and j - 1 >= 0 and board.board[i + 3][j + 3] == 0 and board.board[i - 1][j - 1] == 0 \\\n and board.try_move(j + 3) == i + 3 and board.try_move(j - 1) == i - 1:\n return minvalue\n elif i + 4 < size_y and j + 4 < size_x and board.board[i + 4][j + 4] == 0 and board.board[i][j] == 0 \\\n and board.try_move(j + 4) == i + 4 and board.try_move(j) == i:\n return minvalue\n # else:\n # for k in range(num_to_connect):\n # if board.board[i + k][j + k] == 0 and board.try_move(j + k) == i + k:\n # points *= -multiply_reachable\n if (opponent_pieces_count == 3 and self_pieces_count == 0) or opponent_pieces_count == 0:\n total_points += points\n\n # Fill in the backward diagonal win positions\n for i in range(size_y - num_to_connect + 1):\n for j in range(size_x - 1, num_to_connect - 1 - 1, -1):\n points = 0\n self_pieces_count = 0\n opponent_pieces_count = 0\n for k in range(num_to_connect):\n if board.board[i + k][j - k] == opponent_id:\n opponent_pieces_count += 1\n elif board.board[i + k][j - k] == self.id:\n points += len(board.winning_zones[j-k][i+k])\n if (self.id == 1 and (i+k) % 2 == 1) or (self.id == 2 and (i+k)%2 == 0):\n points *= multiply_oddeven\n self_pieces_count += 1\n if self_pieces_count == 3 and self_pieces_count == 0:\n if board.board[i + 3][j - 3] == 0 and board.board[i - 1][j + 1] == 0 \\\n and board.try_move(j - 3) == i + 3 and board.try_move(j + 1) == i - 1:\n return maxvalue\n elif i + 4 < size_y and j - 4 >= 0 and board.board[i + 4][j - 4] == 0 and board.board[i][j] == 0 \\\n and board.try_move(j - 4) == i + 4 and board.try_move(j) == i:\n return maxvalue\n else:\n for k in range(num_to_connect):\n if board.board[i + k][j - k] == 0 and board.try_move(j - k) == i + k:\n points *= multiply_reachable\n\n elif opponent_pieces_count == 3 and self_pieces_count == 0:\n if board.board[i + 3][j - 3] == 0 and board.board[i - 1][j + 1] == 0 \\\n and board.try_move(j - 3) == i + 3 and board.try_move(j + 1) == i - 1:\n return minvalue\n elif i + 4 < size_y and j - 4 >= 0 and board.board[i + 4][j - 4] == 0 and board.board[i][j] == 0 \\\n and board.try_move(j - 4) == i + 4 and board.try_move(j) == i:\n return minvalue\n # else:\n # for k in range(num_to_connect):\n # if board.board[i + k][j - k] == 0 and board.try_move(j - k) == i + k:\n # points *= -multiply_reachable\n if (opponent_pieces_count == 3 and self_pieces_count == 0) or opponent_pieces_count == 0:\n total_points += points\n return total_points", "def __init__(self, name, starting_player, *moves: List[Point]):\r\n self.name = name\r\n self.starting_player = starting_player\r\n self.moves = moves\r\n self.move_table = np.zeros((2, BOARD_HEIGHT * BOARD_WIDTH), dtype=np.int64) # player, square\r\n self.precompute_move_table()", "def __init__(self,state,player=WHITE):\n if(state==None):\n self.gameState = dict()\n for x in range(0,WIDTH):\n for y in range(0,HEIGHT):\n self.gameState[x,y] = EMPTY\n for x in range(0,WIDTH):\n self.gameState[x,BSTARTROW] = BLACK#Blacks starting row\n self.gameState[x,WSTARTROW] = WHITE#Whites starting row\n #whites.append(Board.pawn(Board.pos(x,WSTARTROW),WHITE))\n #blacks.append(Board.pawn(Board.pos(x,BSTARTROW),BLACK))\n else:\n self.gameState = state\n \n self.whoseTurn = player\n self.cachedWin = False # set to True in winFor() if\n self.cachedWinner = None", "def __init__(self, board_size=MAX_BOARD_SIZE, cell_size=MAX_CELL_SIZE, dead_color=DEAD, alive_color=ALIVE):\n self._board_size = board_size\n self._cell_size = cell_size\n self.dead_color = dead_color\n self.alive_color = alive_color\n\n self.board = []\n self.mode = 0", "def __init__(self, board_size=BOARD_SIZE, num_mines=NUM_MINES):\n\n self.board_size = board_size\n self.num_mines = num_mines\n self.board = place_mines(board_size, num_mines)\n self.my_board = np.ones((board_size, board_size), dtype=int) * CLOSED\n self.valid_actions = np.ones((self.board_size, self.board_size), dtype=np.bool)\n\n self.observation_space = spaces.Box(low=-2, high=9,\n shape=(self.board_size, self.board_size), dtype=np.int)\n self.action_space = spaces.MultiDiscrete([self.board_size, self.board_size])", "def __init__(self):\n\n self._board = list()\n self._palace_board_blue = ['d9', 'e8', 'e10', 'f9']\n self._palace_board_red = ['d2', 'e1', 'e3', 'f2']\n self._palace_diagonal_blue = ['d8', 'd10', 'e9', 'f8', 'f10']\n self._palace_diagonal_red = ['d1', 'd3', 'e2', 'f1', 'f3']\n self._board_columns = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i']\n self._board_rows = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10']\n self._general_position_blue = 'e9'\n self._general_position_red = 'e2'\n\n self.setup_board()", "def __init__(self, board_size=BOARD_SIZE, num_mines=NUM_MINES):\n\n self.board_size = board_size\n self.num_mines = num_mines\n self.board = place_mines(board_size, num_mines)\n self.my_board = np.ones((board_size, board_size), dtype=int) * CLOSED\n self.num_actions = 0\n\n self.observation_space = spaces.Box(low=-2, high=9,\n shape=(self.board_size, self.board_size), dtype=np.int)\n self.action_space = spaces.Discrete(self.board_size*self.board_size)\n self.valid_actions = np.ones((self.board_size * self.board_size), dtype=np.bool)", "def new_piece() -> dict:\n shape = random.choice(list(PIECES.keys()))\n # start the new piece above the board (i.e. y < 0)\n return {\n 'shape': shape,\n 'rotation': random.randint(0, len(PIECES[shape]) - 1),\n 'x': int(BOARDWIDTH / 2) - int(TEMPLATEWIDTH / 2),\n 'y': -2,\n 'color': PALETTE[shape]\n }", "def __init__(self, testBoard=None):\n if (testBoard == None):\n self.blackPieces = self.getStartingBlackPieces()\n self.whitePieces = self.getStartingWhitePieces()\n self.blackKing = Piece(\"black\", \"king\", 0, 4) #direct access to kings for\n self.whiteKing = Piece(\"black\", \"king\", 7, 4) #checkmate checks\n\n\n else:\n self.blackPieces = self.makeCustonBlackPieces(testBoard)\n self.whitePieces = self.makeCustonWhitePieces(testBoard)", "def __init__(self, piece_str, parent_game):\n self.piece_str = piece_str\n self._parent_game = parent_game\n self.rotation = 0\n self.last_move = None\n\n # sets the spawning location (compensates for O's shape by moving it forward one column)\n self.pos = [6, 4 if piece_str == 'O' else 3]", "def setup(self):\n self.board[(3, 3)] = -1\n self.board[(3, 4)] = -1\n self.board[(4, 3)] = 1\n self.board[(4, 4)] = 1\n\n self.stones_set = 4", "def __init__(self, board_dimensions: tuple, initial_position: list = None) -> None:\n assert len(board_dimensions) == 2, \"board dimensions must be 2 digit array\"\n assert all(\n [dim >= 0 for dim in board_dimensions]\n ), \"dimensions must be positive\"\n self.board_dimensions = board_dimensions\n if initial_position:\n assert type(initial_position) == list, \"Position must be length 2 list\"\n assert (\n len(initial_position) == 2\n ), \"Position must be a list of length 2 containing x and y coordinates where top left of the board is [0,0]\"\n assert (\n 0 <= initial_position[0] < self.board_dimensions[0]\n ), \"Invalid initial x position\"\n assert (\n 0 <= initial_position[1] < self.board_dimensions[1]\n ), \"invalid initial y position\"\n self.position = initial_position.copy()\n else:\n self.position = [\n np.random.randint(0, board_dimensions[0] - 1),\n np.random.randint(0, board_dimensions[1] - 1),\n ]", "def makeBoard(n):\n valid_positions = []\n for i in range(0, n):\n for j in range(0,n):\n valid_positions.append(Position(i,j))\n return valid_positions", "def setup_new_board(self):\n\n logger.info(u'setup_new_board()')\n\n self.squares = [[None for j in xrange(8)] for i in xrange(8)]\n \n self.black_checkers = [ch.Checker(u'black', self) for i in xrange(12)]\n self.white_checkers = [ch.Checker(u'white', self) for i in xrange(12)]\n\n u\"\"\" Place checkers in starting squares \"\"\"\n i = 0\n for row in xrange(3):\n for column in xrange(8):\n if self.dark_square((row, column)):\n self.place_checker((row, column), self.white_checkers[i])\n i += 1\n\n i = 0\n for row in xrange(5, 8):\n for column in xrange(8):\n if self.dark_square((row, column)):\n self.place_checker((row, column), self.black_checkers[i])\n i += 1", "def advance_board(self):\n # We can advance the board using a pretty simple convolution,\n # so we don't have to execute a lot of loops in python.\n # Of course, this probably won't be sufficient for extremely\n # large boards.\n self.num_steps += 1\n board = self.board\n cfilter = np.array([[1,1,1],[1,0,1],[1,1,1]], dtype=np.uint16)\n\n alive = board & CellTypes.alive > 0\n spawning = board & CellTypes.spawning > 0\n frozen = board & CellTypes.frozen > 0\n\n can_die = ~frozen & (\n convolve2d(board & CellTypes.preserving, cfilter) == 0)\n can_grow = ~frozen & (\n convolve2d(board & CellTypes.inhibiting, cfilter) == 0)\n\n num_neighbors = convolve2d(alive, cfilter)\n num_spawn = convolve2d(spawning, cfilter)\n spawn_prob = 1 - (1 - self.spawn_prob)**num_spawn\n has_spawned = coinflip(spawn_prob, board.shape)\n\n born_rule = np.zeros(9, dtype=bool)\n born_rule[list(self.born_rule)] = True\n dead_rule = np.ones(9, dtype=bool)\n dead_rule[list(self.survive_rule)] = False\n\n new_alive = (born_rule[num_neighbors] | has_spawned) & ~alive & can_grow\n new_dead = dead_rule[num_neighbors] & alive & can_die\n\n new_flags = np.zeros_like(board)\n color_weights = 1 * alive + 2 * spawning\n for color in CellTypes.colors:\n # For each of the colors, see if there are two or more neighbors\n # that have it. If so, any new cells (whether born or spawned)\n # will also get that color.\n has_color = board & color > 0\n new_color = convolve2d(has_color * color_weights, cfilter) >= 2\n new_flags += color * new_color\n indestructible = alive & (board & CellTypes.destructible == 0)\n new_flags += CellTypes.destructible * (convolve2d(indestructible, cfilter) < 2)\n\n board *= ~(new_alive | new_dead)\n board += new_alive * (CellTypes.alive + new_flags)", "def add_to_board(board: list, piece: dict) -> None:\n for x in range(TEMPLATEWIDTH):\n for y in range(TEMPLATEHEIGHT):\n if PIECES[piece['shape']][piece['rotation']][y][x] != BLANK:\n board[y + piece['y']][x + piece['x']] = piece['color']", "def gameOfLife(self, board: 'List[List[int]]') -> None:\n m, n = len(board), len(board[0])\n\n def calc(i, j):\n neighbors = [\n [i-1, j-1],[i-1,j],[i-1,j+1],\n [i, j-1],[i,j+1],\n [i+1, j-1],[i+1, j],[i+1,j+1]\n ]\n sum = 0\n for r,c in neighbors:\n if 0 <= r < m and 0 <= c < n:\n sum += (board[r][c] & 1)\n return sum\n\n for i in range(m):\n for j in range(n):\n status = calc(i, j)\n if board[i][j] == 1 and (status == 2 or status == 3):\n board[i][j] = 3\n else:\n if status == 3:\n board[i][j] = 2\n for i in range(m):\n for j in range(n):\n board[i][j] >>= 1", "def gameOfLife(self, board):\n n = len(board)\n m = len(board[0])\n DX = [0, 0, 1, -1, 1, 1, -1, -1]\n DY = [1, -1, 0, 0, 1, -1, 1, -1];\n for i in range(n):\n for j in range(m):\n cnt = 0\n for k in range(8):\n x = i + DX[k]\n y = j + DY[k]\n if x < 0 or x >= n or y < 0 or y >= m:\n continue\n cnt += board[x][y] & 1\n if (board[i][j] & 1) > 0:\n if cnt >= 2 and cnt <= 3:\n board[i][j] = 0b11\n elif cnt == 3:\n board[i][j] = 0b10\n for i in range(n):\n for j in range(m):\n board[i][j] >>= 1", "def __init__(self, pos, length, direction, board_size):\n self._pos = pos\n self._x_pos, self._y_pos = self._pos\n self._len = length\n self._dir = direction\n self._bs = board_size\n self._is_hit = False\n self._hit_coors = []\n self._coordinates = self.coordinates()", "def __init__(self):\n self.board = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]\n self.last_move = None", "def gameOfLife(self, board: List[List[int]]) -> None:\r\n self.board = board\r\n self.l = len(board)\r\n self.w = len(board[0])\r\n status = [[0] * self.w for _ in range(self.l)]\r\n for i in range(self.l):\r\n for j in range(self.w):\r\n status[i][j] = self.statusUpdate(board[i][j], self.countLivingNeighbor([i, j]))\r\n #print(\"prev: \", i, j ,board[i][j], \" count: \", self.countLivingNeighbor([i, j]), \" after:\", status[i][j])\r\n for i in range(self.l):\r\n for j in range(self.w):\r\n board[i][j] = status[i][j]", "def __init__(self, board=None):\n self.winner = None\n self.board = board or [self.__class__.EMPTY_POSITION_COUNTER] * 9", "def __init__(self, size, given_cells):\n self.ROWS = string.ascii_uppercase[:size ** 2]\n self.COLS = [str(i) for i in range(1, size ** 2)]\n self.size = size\n self.given_cells = given_cells\n self.board = self.create_board()\n self.squares = [utility.cross(i, j) for i in [self.ROWS[i:i + size] for i in range(0, len(self.ROWS), size)]\n for j in [self.COLS[i:i + size] for i in range(0, len(self.COLS), size)]]\n self.attach_neighbors()\n self.update_neighbor_values_by_given()\n print(\"Initial board:\")\n GUI.print_sudoku(self.board, self.size)", "def __init__(self, positions):\n self.positions_count = positions\n self.board = [0 for _ in range(self.positions_count)]", "def on_board(self, pos):\n i, j = pos\n return 0 <= i < COLS and 0 <= j < ROWS", "def __init__(self, colour):\n self.colour = colour\n self.board = Board()\n self.pieces = self.assign_pieces()\n self.strategy = Strategy()", "def gameOfLife(self, board: List[List[int]]) -> None:\n ds = [(-1, -1), (0, -1), (-1, 0), (1, 0), (0, 1), (1, 1), (1, -1), (-1, 1)]\n for i in range(0, len(board)):\n for j in range(0, len(board[i])):\n lnum = 0\n for k in range(0, len(ds)):\n x, y = ds[k]\n if 0 <= i + x < len(board) and 0 <= j + y < len(board[i]):\n s = board[i + x][j + y] & 1\n if s == 1:\n lnum += 1\n if board[i][j] == 1:\n if lnum < 2:\n board[i][j] |= 2\n elif 2 <= lnum <= 3:\n pass\n else:\n board[i][j] |= 2\n else:\n if lnum == 3:\n board[i][j] |= 2\n for i in range(0, len(board)):\n for j in range(0, len(board[i])):\n if board[i][j] > 1:\n board[i][j] = ~(board[i][j] & 1) & 1", "def __init__(self, \n nd = 2, \n goal = np.array([1.0,1.0]),\n state_bound = [[0,1],[0,1]],\n nA = 4,\n action_list = [[0,1],[0,-1],[1,0],[-1,0]],\n<<<<<<< HEAD:archive-code/puddleworld.py\n ngrid = [10.0,10.0],\n maxStep = 40):\n ngrid = [40, 40]\n x_vec = np.linspace(0,1,ngrid[0])\n y_vec = np.linspace(0,1,ngrid[1])\n for x in x_vec:\n for y in y_vec:\n if ~self.inPuddle([x,y]):\n puddle.append([x,y])\n # puddle is a closed loop \n outpuddlepts = np.asarray(puddle)\n \"\"\"\n\n\n # Horizontal wing of puddle consists of \n # 1) rectangle area xch1<= x <=xc2 && ych1-radius <= y <=ych2+radius\n # (xchi,ychi) is the center points (h ==> horizantal)\n # x, y = state[0], state[1]\n xch1, ych1 = 0.3, 0.7\n xch2, ych2 = 0.65, ych1\n radius = 0.1\n\n\n #Vertical wing of puddle consists of \n # 1) rectangle area xcv1-radius<= x <=xcv2+radius && ycv1 <= y <= ycv2\n # where (xcvi,ycvi) is the center points (v ==> vertical)\n xcv1 = 0.45; ycv1=0.4;\n xcv2 = xcv1; ycv2 = 0.8;\n\n # % 2) two half-circle at end edges of rectangle\n \n # POINTS ON HORIZANTAL LINES OF PUDDLE BOUNDARY\n for x in np.arange(xch1,xcv1-radius,self.meshsize[0]/2):\n puddle.append([x,ych1-radius])\n puddle.append([xcv1-radius,ych1-radius])\n \n for x in np.arange(xcv1+radius,xch2,self.meshsize[0]/2):\n puddle.append([x,ych1-radius])\n \n for x in np.arange(xch1,xcv1-radius,self.meshsize[0]/2):\n puddle.append([x,ych1+radius])\n \n puddle.append([xcv1-radius,ych1+radius])\n\n\n for x in np.arange(xcv1+radius,xch2,self.meshsize[0]/2):\n puddle.append([x,ych1+radius])\n\n # POINTS ON VERTICAL LINES OF PUDDLE BOUNDARY\n for y in np.arange(ycv1,ych1-radius,self.meshsize[1]/2):\n puddle.append([xcv1-radius,y])\n \n for y in np.arange(ycv1,ych1-radius,self.meshsize[1]/2):\n puddle.append([xcv1+radius,y])\n \"\"\"\n for y in np.arrange():\n puddle.append([])\n \n for y in np.arrange():\n puddle.append([])\n \"\"\"\n\n # HALF CIRCLES\n ngridTheta = 10\n thetaVec = np.linspace(0,pi,ngridTheta)\n\n for t in thetaVec:\n puddle.append([xch1+radius*np.cos(pi/2+t),ych1+radius*np.sin(pi/2+t)])\n\n for t in thetaVec:\n puddle.append([xch2+radius*np.cos(-pi/2+t),ych2+radius*np.sin(-pi/2+t)])\n\n for t in thetaVec:\n puddle.append([xcv1+radius*np.cos(pi+t),ycv1+radius*np.sin(pi+t)])\n\n for t in thetaVec:\n puddle.append([xcv2+radius*np.cos(t),ycv2+radius*np.sin(t)])\n\n \n outpuddlepts = np.asarray(puddle)\n return outpuddlepts", "def gameOfLife(self, board: List[List[int]]) -> None:\n neighbors = [(1,0), (1,-1), (0,-1), (-1,-1), (-1,0), (-1,1), (0,1), (1,1)]\n row = len(board)\n col = len(board[0])\n copyboard = copy.deepcopy(board)\n for i in range(row):\n for j in range(col):\n liven = 0\n for neighbor in neighbors:\n r = i + neighbor[0]\n c = j + neighbor[1]\n if (r>=0 and r<row) and (c>=0 and c<col) and (copyboard[r][c] == 1):\n liven += 1\n if copyboard[i][j]==1 and (liven<2 or liven>3):\n board[i][j] = 0\n if copyboard[i][j]==0 and liven == 3:\n board[i][j] =1", "def _initiate_board(self):\n grid = []\n for i in range(constant.BOARD_DIMENSION):\n # Starts each row\n current_row = []\n for j in range(constant.BOARD_DIMENSION):\n # Adds the pieces depending on the position\n if i < constant.ROWS_OF_PIECES:\n # Black pieces\n if (j + i) % 2 != 0:\n current_row.append(Piece(i, j, Player.black))\n self.num_black_pieces = self.num_black_pieces + 1\n else:\n current_row.append(None)\n\n elif i >= constant.BOARD_DIMENSION - constant.ROWS_OF_PIECES:\n # White pieces\n if (j + i) % 2 != 0:\n current_row.append(Piece(i, j, Player.white))\n self.num_white_pieces = self.num_white_pieces + 1\n else:\n current_row.append(None)\n\n else:\n current_row.append(None)\n\n grid.append(current_row)\n\n return grid", "def __init__(self, rows, cols, mines):\n self.rows = rows\n self.cols = cols\n self.mines = mines\n self.opened = 0\n self.game_won = False\n self.game_lost = False\n self.board = self.__init__minefield__()\n self.tiles = self.__init__tiles__()", "def search(self, piece_number):\n # Recursive search function.\n if piece_number == len(self.pieces):\n # Found!\n self.count += 1\n if self.height <= 4 and self.width <= 4:\n self.print_board()\n return\n\n kind = self.pieces[piece_number]\n\n # Load the last used position for this type of pieces to avoid generating\n # duplicated positions.\n last_index = self.last_index[kind]\n f_y = self.last_xy[kind][last_index].y\n f_x = self.last_xy[kind][last_index].x\n\n j = f_y\n while j < self.height:\n # Skip if row is occupied.\n if self.attacked_rows[j] == 0:\n i = f_x\n while i < self.width:\n # Skip if cell is occupied.\n if self.attacked_cells[(j + 2) * (self.width + 4) + (i + 2)] > 0:\n i += 1\n continue\n\n # Skip if column is occupied.\n if self.attacked_cols[i] > 0:\n i += 1\n continue\n\n # Skip if diagonals are occupied.\n if (self.attacked_diag_l[j + i] > 0) or \\\n (self.attacked_diag_r[j - i + self.width] > 0):\n i += 1\n continue\n\n # Skip if the current piece attacks already placed units.\n if self.is_attacking(j, i, kind):\n i += 1\n continue\n\n # Mark the board.\n self.place_piece(j, i, kind, 1)\n\n self.search(piece_number + 1)\n\n # Un-mark the board.\n self.place_piece(j, i, kind, -1)\n i += 1\n j += 1\n f_x = 0", "def __init__(self):\n self.board = [[0 for i in range(9)]]*9\n self.board = [[0, 0, 0, 0, 3, 0, 9, 0, 0],\n [0, 0, 3, 0, 8, 0, 0, 0, 7],\n [6, 0, 0, 0, 0, 0, 4, 0, 0],\n [0, 5, 8, 3, 6, 0, 0, 0, 0],\n [0, 1, 0, 8, 9, 4, 0, 6, 0],\n [0, 0, 0, 0, 2, 7, 8, 4, 0],\n [0, 0, 9, 0, 0, 0, 0, 0, 8],\n [7, 0, 0, 0, 4, 0, 6, 0, 0],\n [0, 0, 5, 0, 1, 0, 0, 0, 0]]", "def add_piece(self, pos, piece):\n self._board[self.ind(pos)[0]][self.ind(pos)[1]] = piece", "def makeState(*args,**kwargs):\n \n cells = []\n\n for item in args:\n #print item\n cells.append(item)\n \n newState = State(cells)\n #newState.printBoard()\n return newState", "def check_legal(self, cur_pos, new_pos, board, state):\n new_row = self.ind(new_pos)[0]\n new_col = self.ind(new_pos)[1]\n cur_row = self.ind(cur_pos)[0]\n cur_col = self.ind(cur_pos)[1]\n piece = self.piece_type(cur_pos, board)\n\n if state == \"UNFINISHED\":\n if (new_row == cur_row + 3) and (new_col == cur_col + 2): #F5\n if board[cur_row + 1][cur_col] and board[cur_row + 2][cur_col + 1] is not None:\n print(\"hello 1 elephant\")\n return\n elif self.piece_type(new_pos, board) is not None and self.piece_type(new_pos, board).get_color() == self._color:\n print(\"1for some reason it thinks the new pos has a color of the same piece\")\n return\n print(\"elephant moved down and right\")\n return True\n\n elif (new_row == cur_row - 3) and (new_col == cur_col - 2): #B1\n print(\"Hello im here\")\n # checking left and right are valid\n if board[cur_row - 1][cur_col] and board[cur_row - 2][cur_col - 1] is not None:\n print(\"horse attempted to move left and up the board\")\n return\n elif self.piece_type(new_pos, board) is not None and self.piece_type(new_pos, board).get_color() == self._color:\n return\n print(\"e moved up and left\")\n return True\n\n elif (new_row == cur_row + 3) and (new_col == cur_col - 2): #\n # checking left and right are valid\n if board[cur_row + 1][cur_col] and board[cur_row + 2][cur_col - 1] is not None:\n print(\"hello e3\")\n return False\n elif self.piece_type(new_pos, board) is not None and self.piece_type(new_pos, board).get_color() == self._color:\n return False\n print(\"e moved down and right\")\n return True\n\n elif (new_row == cur_row - 3) and (new_col == cur_col + 2): #F1\n # checking left and right are valid\n if board[cur_row - 1][cur_col] and board[cur_row - 2][cur_col + 1] is not None:\n print(\"hello e4\")\n return False\n elif self.piece_type(new_pos, board) is not None and self.piece_type(new_pos, board).get_color() == self._color:\n return False\n print(\"Horse moved down and left 2\")\n return True\n #---------------------------------------------------------------------------------------------------------------\n # Check if the forwards and backwards is legal\n elif (new_row == cur_row - 2) and (new_col == cur_col + 3): #G2\n # checking left and right are valid\n if board[cur_row][cur_col + 1] and board[cur_row - 1][cur_col + 2] is not None:\n print(\"hello e5\")\n return\n elif self.piece_type(new_pos, board) is not None and self.piece_type(new_pos, board).get_color() == self._color:\n print(\"bye 5e\")\n return\n print(\"it worked e5\")\n return True\n\n elif (new_row == cur_row - 2) and (new_col == cur_col - 3): #A2\n # checking left and right are valid\n if board[cur_row][cur_col - 1] and board[cur_row - 1][cur_col - 2] is not None:\n print(\"hello e6\")\n return\n elif self.piece_type(new_pos, board) is not None and self.piece_type(new_pos, board).get_color() == self._color:\n print(\"bye 6e\")\n return\n print(\"it worked e6\")\n return True\n\n elif (new_row == cur_row + 2) and (new_col == cur_col + 3): #G6\n # checking left and right are valid\n if board[cur_row][cur_col + 1] and board[cur_row - 1][cur_col - 2] is not None:\n print(\"hello 7e\")\n return\n elif self.piece_type(new_pos, board) is not None and self.piece_type(new_pos, board).get_color() == self._color:\n print(\"ebye 7\")\n return\n print(\"it worked e7\")\n return True\n\n elif (new_row == cur_row + 2) and (new_col == cur_col - 3): #A6\n # checking left and right are valid\n if board[cur_row][cur_col - 1] and board[cur_row + 1][cur_col - 2] is not None:\n print(\"hello 8\")\n return\n elif self.piece_type(new_pos, board) is not None and self.piece_type(new_pos, board).get_color() == self._color:\n print(\"bye 8\")\n return\n print(\"it worked 8\")\n return True\n# else:\n # print(\"it actually never entered the if statement?\"\n #return False\n else:\n print(\"False\")\n return False", "def draw_pieces(screen, board):\n for row in range(DIMENSION):\n for col in range(DIMENSION):\n piece = board[row][col]\n # Check for empty square\n if piece != \"--\":\n screen.blit(IMAGES[piece], p.Rect(col * SQ_SIZE, row * SQ_SIZE, SQ_SIZE, SQ_SIZE))", "def __init__(self):\n\n self._length = 8\n self.board = []\n self.columns = \"ABCDEFGH\"\n for colNum in range(0, self._length):\n self.board.append([])\n for rowNum in range(0, self._length):\n self.board[colNum].append(Tile(colNum, rowNum))\n\n self.board[3][3].color = \"blue\"\n self.board[3][4].color = \"red\"\n self.board[4][3].color = \"red\"\n self.board[4][4].color = \"blue\"" ]
[ "0.6446085", "0.63221157", "0.6167907", "0.6051009", "0.6044088", "0.6041356", "0.5958236", "0.5945604", "0.5903196", "0.589366", "0.5844264", "0.5821776", "0.5812927", "0.5807781", "0.57963544", "0.5786014", "0.5778081", "0.5771436", "0.5771375", "0.5727038", "0.56882375", "0.5679382", "0.5670196", "0.56677043", "0.56610125", "0.5621925", "0.5593816", "0.55867386", "0.55835724", "0.55818385", "0.55786866", "0.5577297", "0.5569712", "0.55384517", "0.55345863", "0.54950356", "0.5483367", "0.54781973", "0.5469722", "0.5461524", "0.5457811", "0.54561615", "0.5443248", "0.5442358", "0.5427649", "0.54243594", "0.5415963", "0.5408021", "0.54053736", "0.54022807", "0.5386499", "0.5385063", "0.53846914", "0.53842366", "0.53831476", "0.53788596", "0.53754526", "0.53738976", "0.53636986", "0.5358552", "0.53558445", "0.5348701", "0.534801", "0.53416014", "0.53393614", "0.5325477", "0.53117085", "0.5299166", "0.52972126", "0.52899194", "0.5285695", "0.5279483", "0.5272826", "0.52685446", "0.52658176", "0.5264952", "0.52611685", "0.5260712", "0.52594715", "0.5258222", "0.5252633", "0.5252436", "0.52493054", "0.5246555", "0.5243941", "0.52409434", "0.524002", "0.5239579", "0.5234154", "0.5225482", "0.5225031", "0.52222323", "0.5221612", "0.52202386", "0.52178264", "0.5217414", "0.52100897", "0.52096796", "0.5207044", "0.5204691" ]
0.6202576
2
Use ASCII to illustrate the state of the Lonpos.
def show( self): def symbol( i): return i<0 and (i==-2 and ' ' or '0') or chr(ord('a') + i) X, Y = np.max( self.board.positions, 0) # -2 to indicate outside board. display = np.zeros( (X+1,Y+1), dtype=int) - 2 for x, y in self.board.positions: display[x, y] = -1 # -1 to indicate unoccupied for p, i in self.occupation.items(): x, y = self.board.positions[p] display[x, y] = i for x in xrange(X+1): s = ''.join( [ symbol( display[x, y]) for y in xrange(Y+1) ]) print s
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __repr__(self):\n s = \" position:\" + str(self.pos) + \"\\n\"\n s += \" heading: \" + str(self.heading) + \"\\n\"\n return s", "def __str__(self):\n tapeline = self.tape.format(\n self.index - 10, self.index + 11) + ' : state {}'.format(self.state)\n pointline = ' ' * 10 + '^' + ' ' * 11 + \\\n ' : index {}'.format(self.index)\n\n return tapeline + '\\n' + pointline", "def line(self):\n\t\treturn self.ESC+\"32m-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-\"+self.ESC+\"0m\\r\\n\"", "def lammps_pos_text(structure, species):\n\n pos_text = \"\\n\"\n for count, (pos, spec) in enumerate(zip(structure.positions, species)):\n pos_text += f\"{count+1} {spec} {pos[0]} {pos[1]} {pos[2]}\\n\"\n return pos_text", "def banner_ascii():\n print(\"\")\n print(f\"\\n{RED} Steganography Tool{RESET}\")\n print(f\"{RED} Made By {RESET}\")\n print(f\"{RED} Ehthe Samul Islam Laskar USN:1DS16CS712 {RESET}\")\n print(f\"{RED} B Padma USN:1DS19CS420{RESET}\")\n print(f\"{RED} Nikhil D Kanyal USN:1DS17CS731{RESET}\")\n print(f\"{YELLOW}Type 'help' to see commands{RESET}\")", "def _repr_(self):\n return 'A line in the direction ' + repr(self.vector());", "def __str__(self):\n return self.get_ascii_trunk() + self.get_ascii_leaves()", "def lammps_pos_text_charged(structure, charges, species):\n\n pos_text = \"\\n\"\n for count, (pos, chrg, spec) in enumerate(\n zip(structure.positions, charges, species)\n ):\n pos_text += f\"{count+1} {spec} {chrg} {pos[0]} {pos[1]} {pos[2]}\\n\"\n return pos_text", "def basic_char_setup( self ):\n\t\tsmall_bar = 3 # number of points per bar\n\t\twide_bar = round(small_bar * 2.25,0) # 2.25 x small_bar\n\t\tdpl = 50 # dots per line 300dpi/6lpi = 50dpl\n\t\tself._nb = bytes( self.owner.PRINTER_ESC +\n\t\t\t\t\t\t ( '*c%02ia%ib0P' % (small_bar, self.bc_height*dpl) ) + \n\t\t\t\t\t\t self.owner.PRINTER_ESC + \n\t\t\t\t\t\t (\"*p+%02iX\" % small_bar) )\n\t\tself._wb = bytes( self.owner.PRINTER_ESC +\n\t\t\t\t\t\t ('*c%02ia%ib0P' % (wide_bar, self.bc_height*dpl) )+\n\t\t\t\t\t\t self.owner.PRINTER_ESC +\n\t\t\t\t\t\t ('*p+%02iX' % wide_bar ) )\n\t\tself._ns = bytes( self.owner.PRINTER_ESC + ( '*p+%02iX' % small_bar ) )\n\t\tself._ws = bytes( self.owner.PRINTER_ESC + ( '*p+%02iX' % wide_bar ) )\n \n\t\t# DONE nb = bc39_esc+\"*c\"+TRANSFORM(small_bar,'99')+\"a\"+Alltrim(STR(bc39_height*dpl))+\"b0P\"+bc39_esc+\"*p+\"+TRANSFORM(small_bar,'99')+\"X\"\n\t\t# DONE wb = bc39_esc+\"*c\"+TRANSFORM(wide_bar,'99')+\"a\"+Alltrim(STR(bc39_height*dpl))+\"b0P\"+bc39_esc+\"*p+\"+TRANSFORM(wide_bar,'99')+\"X\"\n\t\t# DONE ns = bc39_esc+\"*p+\"+TRANSFORM(small_bar,'99')+\"X\"\n\t\t# DONE ws = bc39_esc+\"*p+\"+TRANSFORM(wide_bar,'99')+\"X\"\n \n\t\t# adjust cusor position to start at top of line and return to bottom of line\n\t\tself._bc_start = bytes( self.owner.PRINTER_ESC + '*p-50Y' )\n\t\tself._bc_end = bytes( self.owner.PRINTER_ESC + '*p+50Y' )\n\t\t# DONE bc39_start = bc39_esc+\"*p-50Y\"\n\t\t# DONE bc39_END = bc39_esc+\"*p+50Y\"\n\n\t\t# setup the structure allowing to print the code codebar section for various LETTERS\n\t\tself._char39 = { u'1' : 'wb+ns+nb+ws+nb+ns+nb+ns+wb' , \n\t\t\t\t\t\t u'2' : 'nb+ns+wb+ws+nb+ns+nb+ns+wb' , \n\t\t\t\t\t\t u'3' : 'wb+ns+wb+ws+nb+ns+nb+ns+nb' , \n\t\t\t\t\t\t u'4' : 'nb+ns+nb+ws+wb+ns+nb+ns+wb' , \n\t\t\t\t\t\t u'5' : 'wb+ns+nb+ws+wb+ns+nb+ns+nb' , \n\t\t\t\t\t\t u'6' : 'nb+ns+wb+ws+wb+ns+nb+ns+nb' , \n\t\t\t\t\t\t u'7' : 'nb+ns+nb+ws+nb+ns+wb+ns+wb' , \n\t\t\t\t\t\t u'8' : 'wb+ns+nb+ws+nb+ns+wb+ns+nb' , \n\t\t\t\t\t\t u'9' : 'nb+ns+wb+ws+nb+ns+wb+ns+nb' , \n\t\t\t\t\t\t u'0' : 'nb+ns+nb+ws+wb+ns+wb+ns+nb' , \n\t\t\t\t\t\t u'A' : 'wb+ns+nb+ns+nb+ws+nb+ns+wb' , \n\t\t\t\t\t\t u'B' : 'nb+ns+wb+ns+nb+ws+nb+ns+wb' , \n\t\t\t\t\t\t u'C' : 'wb+ns+wb+ns+nb+ws+nb+ns+nb' , \n\t\t\t\t\t\t u'D' : 'nb+ns+nb+ns+wb+ws+nb+ns+wb' , \n\t\t\t\t\t\t u'E' : 'wb+ns+nb+ns+wb+ws+nb+ns+nb' , \n\t\t\t\t\t\t u'F' : 'nb+ns+wb+ns+wb+ws+nb+ns+nb' , \n\t\t\t\t\t\t u'G' : 'nb+ns+nb+ns+nb+ws+wb+ns+wb' , \n\t\t\t\t\t\t u'H' : 'wb+ns+nb+ns+nb+ws+wb+ns+nb' , \n\t\t\t\t\t\t u'I' : 'nb+ns+wb+ns+nb+ws+wb+ns+nb' , \n\t\t\t\t\t\t u'J' : 'nb+ns+nb+ns+wb+ws+wb+ns+nb' , \n\t\t\t\t\t\t u'K' : 'wb+ns+nb+ns+nb+ns+nb+ws+wb' , \n\t\t\t\t\t\t u'L' : 'nb+ns+wb+ns+nb+ns+nb+ws+wb' , \n\t\t\t\t\t\t u'M' : 'wb+ns+wb+ns+nb+ns+nb+ws+nb' , \n\t\t\t\t\t\t u'N' : 'nb+ns+nb+ns+wb+ns+nb+ws+wb' , \n\t\t\t\t\t\t u'O' : 'wb+ns+nb+ns+wb+ns+nb+ws+nb' , \n\t\t\t\t\t\t u'P' : 'nb+ns+wb+ns+wb+ns+nb+ws+nb' , \n\t\t\t\t\t\t u'Q' : 'nb+ns+nb+ns+nb+ns+wb+ws+wb' , \n\t\t\t\t\t\t u'R' : 'wb+ns+nb+ns+nb+ns+wb+ws+nb' , \n\t\t\t\t\t\t u'S' : 'nb+ns+wb+ns+nb+ns+wb+ws+nb' , \n\t\t\t\t\t\t u'T' : 'nb+ns+nb+ns+wb+ns+wb+ws+nb' , \n\t\t\t\t\t\t u'U' : 'wb+ws+nb+ns+nb+ns+nb+ns+wb' , \n\t\t\t\t\t\t u'V' : 'nb+ws+wb+ns+nb+ns+nb+ns+wb' , \n\t\t\t\t\t\t u'W' : 'wb+ws+wb+ns+nb+ns+nb+ns+nb' , \n\t\t\t\t\t\t u'X' : 'nb+ws+nb+ns+wb+ns+nb+ns+wb' , \n\t\t\t\t\t\t u'Y' : 'wb+ws+nb+ns+wb+ns+nb+ns+nb' , \n\t\t\t\t\t\t u'Z' : 'nb+ws+wb+ns+wb+ns+nb+ns+nb' , \n\t\t\t\t\t\t u'-' : 'nb+ws+nb+ns+nb+ns+wb+ns+wb' , \n\t\t\t\t\t\t u'.' : 'wb+ws+nb+ns+nb+ns+wb+ns+nb' , \n\t\t\t\t\t\t u' ' : 'nb+ws+wb+ns+nb+ns+wb+ns+nb' , \n\t\t\t\t\t\t u'*' : 'nb+ws+nb+ns+wb+ns+wb+ns+nb' , \n\t\t\t\t\t\t u'$' : 'nb+ws+nb+ws+nb+ws+nb+ns+nb' , \n\t\t\t\t\t\t u'/' : 'nb+ws+nb+ws+nb+ns+nb+ws+nb' , \n\t\t\t\t\t\t u'+' : 'nb+ws+nb+ns+nb+ws+nb+ws+nb' , \n\t\t\t\t\t\t u'%' : 'nb+ns+nb+ws+nb+ws+nb+ws+nb' }", "def print_pos(pos):\n # TO DO: EXCLUDE FIRST LINE\n s = \"%BLOCK POSITIONS_FRAC\\n\" + str(pos) + \"\\n%ENDBLOCK POSITIONS_FRAC\"\n return s", "def print_title():\r\n HANGMAN_ASCII_ART = \"\"\"welcome to the game hangman\r\n _ _ \r\n | | | | \r\n | |__| | __ _ _ __ __ _ _ __ ___ __ _ _ __ \r\n | __ |/ _` | '_ \\ / _` | '_ ` _ \\ / _` | '_ \\ \r\n | | | | (_| | | | | (_| | | | | | | (_| | | | |\r\n |_| |_|\\__,_|_| |_|\\__, |_| |_| |_|\\__,_|_| |_|\r\n __/ | \r\n |___/\r\n\"\"\"\r\n print(HANGMAN_ASCII_ART)", "def __repr__(self) -> str:\n return \"{}({!r}, {!r}, {!r})\".format(\n self.__class__.__name__,\n \"\".join(self.tape),\n self.blank_symbol,\n self.current_position,\n )", "def __repr__(self):\n string = \"Current state: \\n\"\n if self.state[0] == 0: # We're on the left side\n string += \"M: \"\n string += str(self.state[1]).ljust(10)\n string += \"M: \"\n string += str(TOTAL_NO_MISSIONARIES - self.state[1]).ljust(10)\n string += \"\\n\"\n\n string += \"C: \"\n string += str(self.state[2]).ljust(10)\n string += \"C: \"\n string += str(TOTAL_NO_CANNIBALS - self.state[2]).ljust(10)\n string += \"\\n\"\n\n string += \"Boat position: left\\n\"\n else: # We're on the right side\n string += \"M: \"\n string += str(TOTAL_NO_MISSIONARIES - self.state[1]).ljust(10)\n string += \"M: \"\n string += str(self.state[1])\n string += \"\\n\"\n\n string += \"C: \"\n string += str(TOTAL_NO_CANNIBALS - self.state[2]).ljust(10)\n string += \"C: \"\n string += str(self.state[2]).ljust(10)\n string += \"\\n\"\n\n string += \"Boat position: right\\n\"\n string += \"\\n\"\n return string", "def __str__(self):\r\n x, y, z = self.pos\r\n return self.label + f\" {x} {y} {z}\"", "def print_instructions(self):\n\t\tprint('\\n\\n==========================================================================')\n\t\tprint('==========================================================================\\n')\n\t\tprint('Welcome to Tic Tac Toe, the came you know and love. \\nThe rules are the same ones you know and love. \\nTo make a move just type the coordinates of the spot like so - row,column. \\nNo spaces please! Lets go ahead and start! Here is a picuter of the board with some coordinates just in case!\\n')\n\t\tprint('=====================')\n\t\tprint('|| 0,0 | 0,1 | 0,2 ||')\n\t\tprint(' -----------------')\n\t\tprint('|| 1,0 | 1,1 | 1,2 ||')\n\t\tprint(' -----------------')\n\t\tprint('|| 2,0 | 2,1 | 2,2 ||')\n\t\tprint('=====================')\n\t\tprint('\\n==========================================================================')\n\t\tprint('==========================================================================\\n\\n')", "def status(self):\n\n for index, x in enumerate(self.lot):\n print('|', end='')\n for spot, value in enumerate(x):\n if value == 1:\n print(\"|\", end='')\n if value == 2:\n print(\" |\", end='')\n if value == 3:\n print(\" |\", end='')\n if value == -1:\n print(\"X|\", end='')\n if value == -2:\n print(\"XXX|\", end='')\n if value == -3:\n print(\"XXXXX|\", end='')\n print()", "def __str__(self):\n state = ''\n state += ' '.join([str(x) for x in self.pos]) + ' '\n state += ''.join([str(x) + ' ' + str(y) + ' ' for x,\n y in zip(self.BU, self.BD)])\n for e in self.BF:\n state += ' '.join([str(x) for x in e])\n state += ' '\n state += ' '.join([str(x) for x in self.LU]) + ' '\n state += ' '.join([str(x) for x in self.LD]) + ' '\n\n return state", "def __repr__(self):\r\n return '(' + str(self.position) + ','+ str(self.left_cont) + ',' \\\r\n + str(self.right_cont) + ',' + self.line + ')'", "def __str__(self):\n return \"%s\\n\" % self.text + \" \" * self.col + \"^\"", "def exemple():\r\n\r\n case_1 = \"\\u25CC\"\r\n case_1 = u\"{}\".format(case_1)\r\n fourmi_1_1 = \"\\u22C0\"\r\n fourmi_1_1 = u\"{}\".format(fourmi_1_1)\r\n fourmi_2_1 = \"\\u21CA\"\r\n fourmi_2_1 = u\"{}\".format(fourmi_2_1)\r\n fourmi_3_1 = \"\\u25BC\"\r\n fourmi_3_1 = u\"{}\".format(fourmi_3_1)\r\n fourmi_1_2 = \"\\u22C0\"\r\n fourmi_1_2 = u\"{}\".format(fourmi_1_2)\r\n fourmi_2_2 = \"\\u21C8\"\r\n fourmi_2_2 = u\"{}\".format(fourmi_2_2)\r\n fourmi_3_2 = \"\\u25B2\"\r\n fourmi_3_2 = u\"{}\".format(fourmi_3_2)\r\n clods_1 = \"\\u2726\"\r\n clods_1 = u\"{}\".format(clods_1)\r\n clods_2 = \"\\u2737\"\r\n clods_2 = u\"{}\".format(clods_2)\r\n clods_3 = \"\\u2739\"\r\n clods_3 = u\"{}\".format(clods_3)\r\n \r\n print(term.move_xy(82,3) + term.white + 'DEPOT : ' + (case_1))\r\n print(term.move_xy(82,5) + term.white + 'Clods de niveau 1 : ' + (clods_1))\r\n print(term.move_xy(82,6) + term.white + 'Clods de niveau 2 : ' + (clods_2))\r\n print(term.move_xy(82,7) + term.white + 'Clods de niveau 3 : ' + (clods_3))\r\n print(term.move_xy(82,8) + term.white + 'Fourmis de niveau 1 : ' + (fourmi_1_1) + ' ' + (fourmi_1_2))\r\n print(term.move_xy(82,9) + term.white + 'Fourmis de niveau 2 : ' + (fourmi_2_1) + ' ' + (fourmi_2_2))\r\n print(term.move_xy(82,10) + term.white + 'Fourmis de niveau 3 : ' + (fourmi_3_1) + ' ' + (fourmi_3_2))\r\n print(term.move_xy(82,12) + term.white + 'Joueur 1 vous jouez en rouge.')\r\n print(term.move_xy(82,13) + term.white + 'Joueur 2 vous jouez en jaune.')", "def __str__(self):\n return \"hl(\" + str(self.point) + \",\" + str(self.angle) + \")\"", "def do_display_ascii(self, address):\n address = self.ParseAddressExpr(address)\n string = self.reader.ReadAsciiString(address)\n if string == \"\":\n print(\"Not an ASCII string at %s\" % self.reader.FormatIntPtr(address))\n else:\n print(\"%s\\n\" % string)", "def print( self, str, pos=None ):\n\t\tif pos:\n\t\t\tself.set_cursor( pos )\n\t\tself.write( str.encode(\"ASCII\") )", "def print_state(self):\n grid = [[\".\" for _ in range(self.width)] for _ in range(self.height)]\n #icons = [\"^\", \"/\", \">\", \"\\\\\", \"|\", \"/\", \"<\", \"\\\\\"] # NON-UNICODE, uncomment if problems\n icons = [chr(0x2191), chr(0x2197), chr(0x2192), chr(0x2198), \\\n chr(0x2193), chr(0x2199), chr(0x2190), chr(0x2196)]\n for robot in self.robots:\n grid[robot[1]][robot[0]] = icons[(robot[2]+robot[3]) % 8]\n for item in self.items:\n if item[2] == 1:\n grid[item[1]][item[0]] = \"O\"\n elif item[2] == 2:\n grid[item[1]][item[0]] = \"*\"\n print(\"-\"*(self.width+2))\n for i in range(self.height):\n print(\"|\", end=\"\")\n for j in range(self.width):\n print(grid[i][j], end=\"\")\n print(\"|\")\n print(\"-\"*(self.width+2))", "def display_state(self):\r\n\r\n print('\\n')\r\n print('>>CURRENT STATE')\r\n ct = 0\r\n for i in self.state:\r\n for j in i:\r\n if j == -1:\r\n val = 'X'\r\n else:\r\n val = str(ct)\r\n if len(val) == 1:\r\n print(' ' + val + ' ', end='')\r\n else:\r\n print(val + ' ', end='')\r\n ct += 1\r\n print('\\n')", "def __repr__(self):\n representantion = ''\n\n for i in range(3):\n for j in range(3):\n representantion += str(self.state[3 * i + j])\n\n if j == 2 and i != 2:\n representantion += '\\n'\n else:\n representantion += ' '\n\n return representantion", "def show(self):\n print('\\n'+'\\n'.join([' '.join([['.', 'O', 'X'][self.board[3*j + i]]\n for i in range(3)]) for j in range(3)]))", "def print_asmline(self,adr,mode,op_bytes, ins, op_str):\r\n MODE = \"T\" if mode else \"A\"\r\n line = (\r\n highlight(f\"{ins:<6} {op_str:<20}\", self.asm_hl, self.asm_fmt)\r\n .decode()\r\n .strip(\"\\n\")\r\n )\r\n if len(op_bytes) == 4:\r\n op_bytes = f\" {int(op_bytes, 16):04X} \"\r\n else:\r\n op_bytes = f\" {int(op_bytes, 16):08X} \"\r\n\r\n if self.baseAddr:\r\n print(\"\\n\" + MODE + self.color(\"YELLOW\", f\" {adr - self.baseAddr:08X} {adr:08X}\") + self.color(\"RED\", op_bytes) + line, end=\";\")\r\n else:\r\n print(\"\\n\" + MODE + self.color(\"YELLOW\",f\" {adr:08X}\") + self.color(\"RED\", op_bytes) + line, end=\";\")", "def __str__(self):\n line = ''\n line += self.board_state.__str__()\n line += self.move.__str__()\n line += '\\n'\n return line", "def __str__(self):\n return str(\"{0} {1} {2} {3}\".format(self.label, self.position[0], self.position[1], self.position[2]))", "def state(self) -> str:", "def lt_command(self):\n self.write(\n \"@SP\\nA=M-1\\nD=M\\n@NEG1\" + str(\n self.__label_num) + \"\\nD;JLT\\n@POS1\" + str(\n self.__label_num) +\n \"\\nD;JGE\\n(NEG1\" + str(\n self.__label_num) + \")\\n@SP\\nA=M-1\\nA=A-1\\nD=M\\n@POS2\" + str(\n self.__label_num) + \"\\nD;JGT\\n@CONT\"\n + str(self.__label_num) + \"\\n0;JMP\\n(POS1\" + str(\n self.__label_num) + \")\\n@SP\\nA=M-1\\nA=A-1\\nD=M\\n@NEG2\" +\n str(self.__label_num) + \"\\nD;JLT\\n@CONT\" + str(\n self.__label_num) + \"\\n0;JMP\\n(POS2\" + str(\n self.__label_num) + \")\\n@SP\"\n \"\\nA=M-1\\nA=A-1\\nM=0\\n@SP\\nM=M-1\\n@ENDLABEL\" + str(\n self.__label_num) + \"\\n0;JMP\\n(NEG2\" + str(\n self.__label_num) + \")\\n@SP\" +\n \"\\nA=M-1\\nA=A-1\\nM=-1\\n@SP\\nM=M-1\\n@ENDLABEL\" + str(\n self.__label_num) + \"\\n0;JMP\\n(CONT\" + str(\n self.__label_num) + \")\\n\"\n \"@SP\\nM=M-1\\nA=M\\nD=M\\n@SP\\nA=M-1\\nD=M-D\\n@TRUE\" + str(\n self.__label_num) + \"\\nD;JGE\\n@SP\\nA=M-1\\nM=-1\\n@ENDLABEL\" +\n str(self.__label_num) + \"\\n0;JMP\\n(TRUE\" + str(\n self.__label_num) + \")\\n@SP\\nA=M-1\\nM=0\\n(ENDLABEL\" +\n str(self.__label_num) + \")\\n\")", "def __repr__(self):\n return f\"Era '{self.text}' starting at {self.start}, ending at {self.end}\"", "def __str__(self):\n res = \"Pos 1: \" + str(self.s_pos) + \" Pos 2: \" + str(self.e_pos) + \" Dir: \" + str(self.dir)\n return res", "def _repr_(self):\n if self.is_multitape:\n pos = tuple(p for p, t in sorted(self.position, key=lambda x: x[1]))\n return 'multi-tape at %s' % (pos,)\n else:\n return 'tape at %s' % (self.position[0][0],)", "def status(self):\n str = \"%s\\n\\tpv %s\\n\" % (self.name,self.pvname)\n str += \"\\tcurrent position (user,dial): %f,%f\\n\" % (self.wm(),self.wm_dial())\n str += \"\\tuser limits (low,high) : %f,%f\\n\" % (self.get_lowlim(),self.get_hilim())\n try:\n str += \"\\tpreset position : %s\" % (self.presets.state())\n except AttributeError:\n pass\n return str", "def status(s):\n print(\"\\033 {}\".format(s))#print(\"\\033[1m{0}\\033[0m\".format(s))", "def display(self):\r\n\t\ts = self.options['space']\r\n\t\tv = self.level\r\n\t\tp = self.options['sep']\r\n\t\tt = self.options['tab']\r\n\t\tb = self.options['bullet']\r\n\t\tprint(v*t+b+s+self.abbrev+s+p+s+self.text)", "def head_plain():\n print (hair_buzz())\n print (eye_narrow())\n print (nose_triangle())\n print (mouth_smile())\n print (chin_plain())", "def __repr__(self):\n return \"{}: {}\".format(self.nodeid, self.lemma)", "def label(mi_, ma_):\n\treturn \"caractères Unicode des points de code {} à {}\".format(mi_, ma_)", "def __repr__(self):\r\n c = \"Player \" + self.checker + \" (\" + self.tiebreak + \", \" + str(self.lookahead) + \")\"\r\n return c", "def enlabel(mi_, ma_):\n\treturn \"Unicode characters from {} to {} codepoints\".format(mi_, ma_)", "def print_position(position):\n print('Packet Number # %s' % position)", "def __repr__(self):\n s = \"\"\n for y in range(0,HEIGHT):\n temp=\"\"\n for x in range(0,WIDTH):\n temp = temp+ str(self.gameState[x,y])\n s += temp+\"\\n\"\n return s", "def __repr__(self, state):\n print ' ',\n for w in range(len(state)+2):\n print \"___\",\n print '\\n'\n for x in state:\n print \"| \", x, \" |\"\n print ' ',\n for y in range(len(state)+2):\n print \"___\",\n print '\\n'\n return state", "def afficher_damier_ascii(infojeu):\n lignes = []\n lignes += list(\"Légende: 1=\"+ str(infojeu[\"joueurs\"][0][\"nom\"])+\n ', 2='+str(infojeu[\"joueurs\"][1][\"nom\"]) + \"\\n\")\n lignes += list(\" \"+\"-\"*35+\"\\n\")\n for i in range(1, 10):\n lignes += str(10-i) + \" | \"\n for j in range(1, 9):\n strplayer = \".\"\n if [j, 10-i] == infojeu[\"joueurs\"][0][\"pos\"]:\n strplayer = \"1\"\n elif [j, 10-i] == infojeu[\"joueurs\"][1][\"pos\"]:\n strplayer = \"2\"\n if [j+1, 10-i] in infojeu[\"murs\"][\"verticaux\"]:\n lignes += list(strplayer + \" | \")\n elif [j+1, 9-i] in infojeu[\"murs\"][\"verticaux\"]:\n lignes += list(strplayer + \" | \")\n else:\n lignes += list(strplayer + \" \")\n if [9, 10-i] == infojeu[\"joueurs\"][0][\"pos\"]:\n lignes += list(\"1 |\")\n elif [9, 10-i] == infojeu[\"joueurs\"][1][\"pos\"]:\n lignes += list(\"2 |\")\n else:\n lignes += list(\". |\")\n if i != 9:\n lignes += list(\"\\n |\")\n for k in range(1, 9):\n if i != 9:\n if [k, 10-i] in infojeu[\"murs\"][\"horizontaux\"]:\n lignes += list(\"----\")\n elif [k-1, 10-i] in infojeu[\"murs\"][\"horizontaux\"] and \\\n [k+1, 9-i] in infojeu[\"murs\"][\"verticaux\"]:\n lignes += list(\"---|\")\n elif [k-1, 10-i] in infojeu[\"murs\"][\"horizontaux\"]:\n lignes += list(\"--- \")\n elif [k+1, 9-i] in infojeu[\"murs\"][\"verticaux\"]:\n lignes += list(\" |\")\n else:\n lignes += list(\" \")\n if i != 9:\n if [8, 10-i] in infojeu[\"murs\"][\"horizontaux\"]:\n lignes += list(\"---|\")\n else:\n lignes += list(\" |\")\n lignes += list(\"\\n\")\n lignes += list(\"--|\"+ \"-\"*35+\"\\n\")\n lignes += list(\" | 1 2 3 4 5 6 7 8 9\")\n lignes = ''.join(lignes)\n print(lignes)", "def __str__(self):\n allowed = ['!', '@', '#', '$', '%', '^', '&', '*', '/', '.', '1', '2', '3', '4', '5', '6', '7', '8', '9']\n returnstring = \"\"\n for row in self.positions:\n for char in row:\n if char.isupper() or char == 'r' or char in allowed:\n returnstring += \"| \" + char + \" \"\n else:\n returnstring += \"| \" + \"_\" + \" \"\n returnstring += \"\\n\"\n return returnstring", "def __getAsciiString(self):\n lines = []\n horizontalLine = ('-' * (26))\n lines.append(horizontalLine)\n for row in self.board:\n rowLine = '|'\n for col in row:\n if col == -1:\n col = 'O'\n if col == 0:\n col = '-'\n if col == 1:\n col = 'X'\n rowLine = rowLine + ' ' + col.__str__() + ' |'\n lines.append(rowLine)\n lines.append(horizontalLine)\n return '\\n'.join(lines)", "def _repr_(self):\n return 'A line in the direction ' + repr(self._representation_vector);", "def abdul(self):\n\t\tthismsg = \"\\r\\n\"+self.ESC+\"1;33m\"+self.A220+self.A220+self.A220+self.A220+self.ESC+\"0;33m\"+self.A220+self.A220+self.ESC+\"1m\"+self.A220+self.A220+self.ESC+\"0;33m\"+self.A220+self.ESC+\"1m\"+self.A220+self.A220+self.A220+self.ESC+\"0;33m\"+self.A220+self.ESC+\"1m\"+self.A220+self.A220+self.ESC+\"0;33m\"+self.A220+self.ESC+\"1m\"+self.A220+self.ESC+\"0;33m\"+self.A220+self.A220+self.ESC+\"1m\"+self.A220+self.ESC+\"0;33m\"+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.ESC+\"1;30m\"+self.A220+self.ESC+\"C\"+self.ESC+\"0;33m\"+self.A220+self.A220+self.A220+self.A220+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"79C\"+self.A220+self.ESC+\"1;43m\"+self.A219+self.A178+self.ESC+\"0;33m\"+self.A219+self.A219+self.A219+self.A223+self.A219+self.A219+self.A223+self.ESC+\"32m\"+self.A220+self.ESC+\"33m\"+self.A223+self.ESC+\"32m\"+self.A220+self.ESC+\"C\"+self.A254+self.ESC+\"33m\"+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.ESC+\"1;30;43m\"+self.A176+self.ESC+\"C\"+self.ESC+\"0;33m\"+self.A219+self.ESC+\"1;43m\"+self.A177+self.A176+self.ESC+\"C\"+self.A176+self.ESC+\"C\"+self.ESC+\"37;40mSaga\"+self.ESC+\"Cof\"+self.ESC+\"Cthe\"+self.ESC+\"CRed\"+self.ESC+\"CDragon\"+self.ESC+\"C-\"+self.ESC+\"C\"+self.ESC+\"33mAbduls\"+self.ESC+\"CArmour \"+self.ESC+\"C\"+self.ESC+\"0;33m\"+self.A223+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"1;43m\"+self.A219+self.ESC+\"0;33m\"+self.A219+self.A219+self.A223+self.ESC+\"32m\"+self.A220+self.ESC+\"1;42m\"+self.A177+self.ESC+\"0;32m\"+self.A220+self.ESC+\"C\"+self.A223+self.ESC+\"1m\"+self.A223+self.ESC+\"2C\"+self.ESC+\"0;33m\"+self.A223+self.A223+self.A223+self.ESC+\"30;43m\"+self.A177+self.A176+self.ESC+\"33;40m\"+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.ESC+\"1;30;43m\"+self.A176+self.ESC+\"2C\"+self.ESC+\"0;33m\"+self.A223+self.A219+self.ESC+\"C\"+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"79C\"+self.ESC+\"1;30;43m\"+self.A176+self.ESC+\"33m\"+self.A219+self.ESC+\"0;33m\"+self.A219+self.A219+self.A219+self.A220+self.ESC+\"32m\"+self.A223+self.ESC+\"33m\"+self.A220+self.A219+self.A223+self.ESC+\"37m\"+self.A220+self.ESC+\"1;47m\"+self.A176+self.ESC+\"0m\"+self.A219+self.A223+self.ESC+\"1;30;47m\"+self.A176+self.A176+self.ESC+\"40m\"+self.A220+self.A220+self.ESC+\"0;33m\"+self.A223+self.ESC+\"30;43m\"+self.A177+self.A176+self.ESC+\"33;40m\"+self.A219+self.A219+self.A219+self.ESC+\"1;30;43m\"+self.A177+self.ESC+\"C\"+self.ESC+\"0;33m\"+self.A220+self.ESC+\"1;30m\"+self.A223+self.ESC+\"4C\"+self.ESC+\"0;33mBehind\"+self.ESC+\"Cthe\"+self.ESC+\"Cdesk\"+self.ESC+\"Cof\"+self.ESC+\"Cthe\"+self.ESC+\"Carmour\"+self.ESC+\"Cshop\"+self.ESC+\"Cis\"+self.ESC+\"Can\\r\\n\"\n\t\tthismsg += self.ESC+\"1;43m\"+self.A219+self.ESC+\"0;33m\"+self.A219+self.A219+self.A219+self.A219+self.ESC+\"30;43m\"+self.A176+self.A177+self.ESC+\"C\"+self.ESC+\"37;40m\"+self.A223+self.ESC+\"1;47m\"+self.A177+self.A176+self.ESC+\"C\"+self.ESC+\"0m\"+self.A219+self.A219+self.A219+self.ESC+\"1;30;47m\"+self.A176+self.A177+self.A178+self.ESC+\"C\"+self.ESC+\"0;30;43m\"+self.A177+self.A176+self.ESC+\"33;40m\"+self.A219+self.A219+self.ESC+\"1;30;43m\"+self.A178+self.ESC+\"C\"+self.ESC+\"40m\"+self.A223+self.A220+self.ESC+\"4C\"+self.ESC+\"0;33mamazingly\"+self.ESC+\"Cattractive\"+self.ESC+\"Clooking\"+self.ESC+\"Cfemale - she seems\\r\\n\"\n\t\tthismsg += self.ESC+\"1;43m\"+self.A219+self.ESC+\"0;33m\"+self.A219+self.A219+self.ESC+\"30;43m\"+self.A176+self.A177+self.ESC+\"33;40m\"+self.A223+self.ESC+\"1;37m\"+self.A220+self.ESC+\"47m\"+self.A177+self.A176+self.ESC+\"0m\"+self.A220+self.A220+self.A220+self.A220+self.A220+self.ESC+\"1;30m\"+self.A220+self.A220+self.A223+self.ESC+\"47m\"+self.A177+self.A178+self.ESC+\"C\"+self.ESC+\"0;30;43m\"+self.A177+self.A176+self.ESC+\"33;40m\"+self.A219+self.ESC+\"1;30;43m\"+self.A219+self.ESC+\"2C\"+self.ESC+\"0;32m\"+self.A220+self.A254+self.ESC+\"3C\"+self.ESC+\"33mbusy, doing her mails but she\"+self.ESC+\"Casks\"+self.ESC+\"C\\\"\"+self.ESC+\"1mHow\\r\\n\"\n\t\tthismsg += self.ESC+\"43m\"+self.A219+self.ESC+\"0;33m\"+self.A223+self.A219+self.ESC+\"30;43m\"+self.A176+self.A177+self.ESC+\"C\"+self.ESC+\"1;37;47m\"+self.A178+self.ESC+\"40m\"+self.A222+self.A222+self.ESC+\"47m\"+self.A176+self.ESC+\"C\"+self.ESC+\"30m\"+self.A176+self.ESC+\"C\"+self.A177+self.ESC+\"40m\"+self.A220+self.ESC+\"47m\"+self.A178+self.ESC+\"40m\"+self.A223+self.A220+self.ESC+\"47m\"+self.A219+self.ESC+\"C\"+self.ESC+\"0;30;43m\"+self.A177+self.A176+self.ESC+\"33;40m\"+self.A223+self.ESC+\"32m\"+self.A220+self.A178+self.ESC+\"6C\"+self.ESC+\"1;33mmay\"+self.ESC+\"CI\"+self.ESC+\"Cbe\"+self.ESC+\"Cof\"+self.ESC+\"Cservice?\"+self.ESC+\"0;33m\\\"\\r\\n\"\n\t\tthismsg += self.ESC+\"1m\"+self.A220+self.ESC+\"0;33m\"+self.A223+self.ESC+\"C\"+self.A220+self.A220+self.ESC+\"C\"+self.ESC+\"1;37m\"+self.A223+self.ESC+\"47m\"+self.A178+self.ESC+\"0m\"+self.A220+self.ESC+\"1;47m\"+self.A177+self.ESC+\"0m\"+self.A220+self.ESC+\"1;30;47m\"+self.A176+self.ESC+\"0m\"+self.A220+self.ESC+\"1;30m\"+self.A223+self.A223+self.A220+self.ESC+\"47m\"+self.A177+self.A178+self.ESC+\"C\"+self.ESC+\"0;30;43m\"+self.A177+self.ESC+\"33;40m\"+self.A223+self.ESC+\"32m\"+self.A220+self.ESC+\"1;42m\"+self.A176+self.ESC+\"0;32m\"+self.A220+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"1;33;43m\"+self.A219+self.ESC+\"0;33m\"+self.A219+self.A220+self.A223+self.ESC+\"C\"+self.A220+self.ESC+\"1;43m\"+self.A176+self.ESC+\"0;33m\"+self.A220+self.ESC+\"1;37m\"+self.A223+self.ESC+\"0m\"+self.A220+self.ESC+\"2C\"+self.A220+self.A219+self.ESC+\"1;30;47m\"+self.A176+self.A176+self.ESC+\"40m\"+self.A223+self.ESC+\"47m\"+self.A219+self.ESC+\"C\"+self.ESC+\"0;30;43m\"+self.A177+self.A176+self.ESC+\"33;40m\"+self.A220+self.ESC+\"32m\"+self.A223+self.ESC+\"1;30m\"+self.A220+self.ESC+\"7C\"+self.ESC+\"0;33m[\"+self.ESC+\"1mB\"+self.ESC+\"0;33m]\"+self.ESC+\"1muy\"+self.ESC+\"CArmour\\r\\n\"\n\t\tthismsg += self.ESC+\"43m\"+self.A219+self.ESC+\"0;33m\"+self.A219+self.A219+self.A219+self.A220+self.A223+self.ESC+\"1;37m\"+self.A220+self.A220+self.ESC+\"0m\"+self.A223+self.ESC+\"C\"+self.ESC+\"1;47m\"+self.A223+self.ESC+\"0m\"+self.A219+self.A220+self.A220+self.A220+self.A220+self.ESC+\"1;30;47m\"+self.A176+self.A177+self.ESC+\"40m\"+self.A220+self.ESC+\"0;33m\"+self.A223+self.A223+self.ESC+\"30;43m\"+self.A177+self.A176+self.ESC+\"1;40m\"+self.A219+self.ESC+\"7C\"+self.ESC+\"0;33m[\"+self.ESC+\"1mS\"+self.ESC+\"0;33m]\"+self.ESC+\"1mell\"+self.ESC+\"CArmour\\r\\n\"\n\t\tthismsg += self.ESC+\"43m\"+self.A219+self.ESC+\"0;33m\"+self.A219+self.ESC+\"30;43m\"+self.A176+self.A177+self.ESC+\"C\"+self.ESC+\"1;37;47m\"+self.A219+self.A178+self.ESC+\"40m\"+self.A220+self.ESC+\"47m\"+self.A177+self.A176+self.ESC+\"0m\"+self.A220+self.A220+self.A220+self.A220+self.A219+self.A220+self.A223+self.ESC+\"1;30m\"+self.A220+self.A220+self.A219+self.A219+self.A220+self.ESC+\"0;33m\"+self.A223+self.ESC+\"1;30m\"+self.A219+self.ESC+\"7C\"+self.ESC+\"0;33m[\"+self.ESC+\"1mY\"+self.ESC+\"0;33m]\"+self.ESC+\"1mour\"+self.ESC+\"CStats\\r\\n\"\n\t\tthismsg += self.ESC+\"43m\"+self.A178+self.ESC+\"0;33m\"+self.A223+self.A220+self.A223+self.ESC+\"30;43m\"+self.A176+self.A223+self.ESC+\"1;37;40m\"+self.A223+self.A223+self.ESC+\"47m\"+self.A178+self.A177+self.A176+self.A176+self.ESC+\"0m\"+self.A219+self.A219+self.A223+self.ESC+\"1;30m\"+self.A220+self.A176+self.A177+self.A178+self.A223+self.A223+self.ESC+\"0;33m\"+self.A220+self.A219+self.ESC+\"1;30m\"+self.A219+self.ESC+\"7C\"+self.ESC+\"0;33m[\"+self.ESC+\"1mR\"+self.ESC+\"0;33m]\"+self.ESC+\"1meturn\"+self.ESC+\"Cto\"+self.ESC+\"CTown\\r\\n\"\n\t\tthismsg += self.ESC+\"43m\"+self.A177+self.ESC+\"0;33m\"+self.A219+self.A220+self.A219+self.A219+self.A223+self.ESC+\"32m\"+self.A220+self.ESC+\"1;42m\"+self.A176+self.ESC+\"0;32m\"+self.A220+self.ESC+\"C\"+self.ESC+\"33m\"+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.ESC+\"C\"+self.A220+self.A220+self.A219+self.ESC+\"30;43m\"+self.A176+self.ESC+\"33;40m\"+self.A219+self.A219+self.ESC+\"1;30m\"+self.A223+self.ESC+\"0;33m\"+self.A220+self.A254+self.ESC+\"C\"+self.ESC+\"1;30m\"+self.A220+self.A223+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"33;43m\"+self.A176+self.ESC+\"0;33m\"+self.A219+self.A219+self.A219+self.A219+self.A219+self.A220+self.ESC+\"32m\"+self.A223+self.ESC+\"C\"+self.A178+self.A254+self.ESC+\"33m\"+self.A219+self.ESC+\"30;43m\"+self.A177+self.A176+self.ESC+\"33;40m\"+self.A223+self.A220+self.ESC+\"1;43m\"+self.A176+self.ESC+\"0;33m\"+self.A220+self.A223+self.A220+self.A223+self.A223+self.A220+self.ESC+\"3C\"+self.ESC+\"1;30m\"+self.A177+self.A220+self.ESC+\"2C\"+self.ESC+\"33m\"+self.A220+self.A220+self.ESC+\"0;33m\"+self.A220+self.A220+self.ESC+\"1m\"+self.A220+self.ESC+\"0;33m\"+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"79C\"+self.ESC+\"1;30;43m\"+self.A176+self.ESC+\"33m\"+self.A176+self.ESC+\"0;33m\"+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.A220+self.A220+self.ESC+\"32m\"+self.A254+self.ESC+\"33m\"+self.A223+self.A219+self.A219+self.A220+self.A223+self.A220+self.A220+self.A223+self.A223+self.ESC+\"1;30m\"+self.A220+self.A220+self.A219+self.ESC+\"2C\"+self.A220+self.A178+self.A220+self.ESC+\"C\"+self.ESC+\"33;43m\"+self.A177+self.ESC+\"2C\"+self.ESC+\"0m \"+self.ESC+\"3C\"+self.ESC+\"33m\"+self.A220+\"\\r\\n\"\n\t\tthismsg += self.A223+self.A223+self.A223+self.ESC+\"1;30m\"+self.A223+self.ESC+\"0;33m\"+self.A223+self.A223+self.ESC+\"1;30m\"+self.A223+self.ESC+\"0;33m\"+self.A223+self.ESC+\"1;30m\"+self.A223+self.A223+self.ESC+\"0;33m\"+self.A223+self.ESC+\"1;30m\"+self.A223+self.A223+self.A223+self.ESC+\"0;33m\"+self.A223+self.ESC+\"1;30m\"+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.ESC+\"C\"+self.ESC+\"0;33m\"+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.ESC+\"1;30m\"+self.A223+self.ESC+\"C\"+self.ESC+\"0;33m\"+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"79C\"+self.A223+self.ESC+\"0m\\r\\n\"\n\t\treturn thismsg", "def one_leg(length, pos_m, pos_p):\n pos_m, mat_moves = one_player_leg(length, pos_m)\n if pos_p == pos_m:\n pos_p = 0\n pos_p, pat_moves = one_player_leg(length, pos_p)\n print('Mat:', end=' ')\n for move in mat_moves:\n print(move, end=' ')\n print('Pat:', end=' ')\n for move in pat_moves:\n print(move, end=' ')\n if pos_m == pos_p:\n pos_m = 0\n print('\\n')\n one_line_print(length, 'M', pos_m)\n one_line_print(length, 'P', pos_p)\n print('\\n')\n return pos_m, pos_p", "def one_line_print(length, player, pos):\n if pos == 0:\n print(player, 'in Home', end=' ')\n else:\n print('Home', end=' ')\n for i in range(1, length):\n if i == pos:\n print(player, end=' ')\n else:\n print('.', end=' ')\n if pos == length:\n print(player, 'in Finish')\n else:\n print('Finish')", "def showText(pos):\n\treturn OnscreenText( \\\n\t\ttext=\" \", \\\n\t\tstyle=1, fg=(0,0,0,1), pos=(-1.3, pos), \\\n\t\talign=TextNode.ALeft, scale = .06, mayChange = True)", "def msg(text):\n for line in text.splitlines():\n if JS.alignment == \"left\":\n print(demarkup(line))\n elif JS.alignment == \"center\":\n print(demarkup(line).center(get_terminal_size()[0] - 1))\n else:\n print(demarkup(line).rjust(get_terminal_size()[0] - 1))", "def get_instructions(self):\n return \"A non-negative whole number is chosen as the starting \\n\" \\\n \"valueby some neutral entity. In our case, a player will \\n\" \\\n \"choose it (i.e. through the use of input. The player whose \\n\" \\\n \"turn it is chooses some square of a positive whole number (\\n\" \\\n \"such as 1, 4, 9, 16, . . . ) to subtract from the \\n\" \\\n \"value, provided the chosen square is not larger. After \\n\" \\\n \"subtracting, we have a new value and the next player \\n\" \\\n \"chooses a square to ubtract from it. Play continues\\n\" \\\n \" to alternate between the two players until no moves are\\n\" \\\n \" possible. Whoever is about to play at that point loses!\"", "def __str__(self):\n out = \"{}.\".format(self.move_number)\n if self.white.san != \"\":\n out += \" \" + str(self.white)\n else:\n out += \"..\"\n if self.black.san != \"\":\n out += \" \" + str(self.black)\n if self.comment:\n out += \" {\" + self.comment + \"}\"\n return out", "def showInstructions():\n print(\"\"\"\n RPG Game\n ========\n Commands:\n go [direction]\n get [item]\n\n\t\"\"\")", "def print_line():\n print('+ - - - - + - - - - +'),", "def print_intro(self):\n \n print('Did you know mammals tend to have the shortest migration routes because walking takes more energy than flying or swimming?')", "def print_state(X):\n out = ''\n for coord in range(18):\n out += \"{0}\".format(STATE_VARS[coord])\n val = float(X[coord])\n out += \" {0: 2.4e}\\n\".format(val)\n\n print out", "def __str__(self):\n if self._active_player:\n def piece_to_index(piece):\n return (piece & 0xF)\n else:\n def piece_to_index(piece):\n return (piece & 0xE) | (0 if piece & 1 else 1)\n\n return '\\n'.join(map(\n lambda posY, row: ''.join(map(\n lambda posX, piece: self.EMOJI[\n piece_to_index(piece)\n if piece else\n 14 + ((posY + posX) % 2)],\n count(), row)),\n count(),\n self.board if self._active_player else reversed(\n [reversed(row) for row in self.board])))", "def __repr__(self) -> str:\n return f\"key:{self.key},pos:{self.pos},inside:{self.get_inside()},outside:{self.get_outside()}\"", "def print_chars(self):\n for v in voc.split('\\n'):\n pair = v.split(',')\n print(pair[0], pair[1], '\\t', self.epi.xsampa_list(pair[0]))", "def print_latex_state(Xs):\n\n out = ''\n iters = Xs.keys()\n iters.sort()\n\n for num in iters:\n out += ' & Iter {0} '.format(num)\n\n out += '\\\\\\\\ \\n'\n \n for coord in range(18):\n out += \"${0}$ \".format(STATE_VARS[coord])\n for num in iters:\n val = float(Xs[num][coord])\n out += \"& {0: 2.4e} \".format(val)\n out += '\\\\\\\\ \\n'\n\n print out", "def __str__(self):\n outstr = \"\"\n for i in range(3):\n for j in range(3):\n outstr += str(self.pos_to_num[(i, j)]) + \" \"\n outstr = outstr[:-1]\n outstr += \"\\n\"\n outstr += \"\\n\"\n return outstr", "def test_repr_format(self):\n t = OneHotEncode(3)\n assert t.repr_format(\"asfa\") == \"OneHotEncode(asfa)\"", "def __str__(self):\n local_s = 'F30A: '\n local_s += '\\n'\n return local_s", "def __str__(self):\n return \"Normal:\" + str(self.norm) + \"\\nColour:\" + self.colour", "def __repr__(self):\n return \"(\"+str(self.pos)+\",\"+str(self.color)+\")\"", "def __str__(self):\n return \"c(pos:\" + str(self.position) + \",rad:\" + str(self.radius) + \")\"", "def print_marks(self):\n\t\tSYMBOLS = {CLOSED: \".\", FLAG: \"x\", BOOM: \"#\", CLEAR: \" \"}\n\t\tfor y in range(self.height):\n\t\t\tfor x in range(self.width):\n\t\t\t\tm = self.marks[x][y]\n\t\t\t\tprint(SYMBOLS.get(m, m), end=\"\")\n\t\t\tprint(\"\")", "def nl():\n\tprint(\"\")", "def lowerPen(gcode):\r\n gcode.append(\"M300 S43\")\r\n #gcode.append(\"G0 Z0\")\r", "def __repr__(self):\n\t\tret = \"\"\n\t\tfor i, x in enumerate(self.squares):\n\n\t\t\tret += \"\\t\"\n\t\t\tfor j in range(32): ret += u\"\\u2015\"\n\t\t\tret += \"\\n\\t|\"\n\t\t\tfor y in x:\n\t\t\t\tret += str(y)\n\t\t\t\tret += \" | \"\n\n\t\t\tret += str(i+1) + \"\\n\"\n\n\t\tret += \"\\t\"\n\t\tfor i in range(32): ret += u\"\\u2015\"\n\t\tret += \"\\n \"\n\n\t\tfor l in self.letters:\n\t\t\tret += l+\" \"\n\t\treturn ret", "def __repr__(self):\r\n numLetters = self.numLetters\r\n S = ''\r\n S += 3*'\\n'\r\n S += ' '\r\n for i in range(numLetters):\r\n S += self.currentBoard[i] + ' '\r\n\r\n return S", "def show_status(self):\n color = (255, 255, 255)\n w, h = self.width, self.height\n x, y = self.pos_shift\n self.put_text(\"scale factor: %.2E\" % SCALE_FACTOR,\n color, (x, y))\n self.put_text(\"G: %.7E\" % G,\n color, (x, y + 25))\n self.put_text(\"number of objects: %d\" % len(self.phy.objects),\n color, (x, y + 50))\n self.put_text(\"x: %d\" % x,\n color, (w + x - 100, h + y - 50))\n self.put_text(\"y: %d\" % y,\n color, (w + x - 100, h + y - 25))", "def __str__(self, precision=2):\n return \"l(a=\" + str(round(self.slope, precision)) + \\\n \",b=\" + str(round(self.ordinate, precision)) + \")\"", "def test_repr_show(self):\n self.assertEquals(\n repr(self.t['CNNNN']),\n \"<Show Chaser Non-Stop News Network (CNNNN) (containing 2 seasons)>\"\n )", "def __str__(self) -> str:\n position = self.get_position()\n return f\"Baby at position ({position[0]}, {position[1]}) (row, col)\"", "def state_text(self, byte):\n c = chr(byte)\n if byte == telnet_IAC:\n self.next_fn = self.state_cmd\n self.telnet_cmd = []\n elif c in telnet_printable:\n self.inbuffer += c\n if self.termious:\n self.termious_hack(byte)", "def blueline(self):\n\t\treturn self.ESC+\"34m-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-\"+self.ESC+\"0m\\r\\n\"", "def __str__(self):\n return self._str_hsp_header() + \"\\n\" + self._str_aln()", "def test_assembleNormal(self):\n self.assertEqual(irc.assembleFormattedText(A.normal[\"hello\"]), \"\\x0fhello\")", "def __str__(self):\n return str(self.t1)+\"<-->t1, \\t\"+str(self.t2)+\"<-->t2, \\t\"+str(self.phi)+\"<-->phi, \\t\"+str(self.m)+\"<-->m, \\t\"+str(self.t31)+\"<-->t31, \\t\"+str(self.t32)+\"<-->t32, \\n\"", "def get_instructions(self) -> str:\n instructions = \"Players take turns to occupy available positions \" \\\n \"on the \" \\\n \"board. Once half or more of a ley-line has been \" \\\n \"occupied\" \\\n \"one player, that ley-line is entirely captured by \" \\\n \"said player. The winner is the person who captures \" \\\n \"half\" \\\n \"or more of the ley-lines first.\"\n return instructions", "def __repr__(self):\r\n s = 'Player ' + self.checker + ' (' + self.tiebreak + ', ' + str(self.lookahead) + ')'\r\n return s", "def __str__(self):\n if self.__size != 0:\n [print(\"\") for i in range(0, self.__position[1])]\n for i in range(0, self.__size):\n [print(\" \", end=\"\") for j in range(0, self.__position[0])]\n [print(\"#\", end=\"\") for k in range(0, self.__size)]\n if i != self.__size - 1:\n print(\"\")\n return (\"\")", "def mostraCotxe(self):\n return str(self._l[0])+\" \"+str(self._l[1])+\" \"+str(self._l[2])+\" \"+str(self._l[3])", "def to_ascii(self):\n code = self.build()\n for i, line in enumerate(code):\n code[i] = line.replace('1', '|').replace('0', '_')\n return '\\n'.join(code)", "def loc(y,x):\n return '\\033[%s;%sH' % (str(y),str(x))", "def render(self, s):\n if s < self.num_states - 1:\n a = np.zeros((self.height, self.width), dtype=np.unicode_)\n # mark all locations as inaccessible first\n a[:, :] = u'█'\n # regular fields\n for c in self.locs:\n a[c[1], c[0]] = u'░'\n # mark exits\n for c in self.exitsP:\n a[c[1], c[0]] = 'E'\n for c in self.exitsN:\n a[c[1], c[0]] = 'F'\n # player character\n c = self.locs[s]\n a[c[1], c[0]] = u'☺'\n a = [' '.join(row) for row in a]\n a = '\\n'.join(reversed(a))\n return a\n else:\n return 'Game over!'", "def status(s):\n print(\"\\033[1m{0}\\033[0m\".format(s))", "def status(s):\n print(\"\\033[1m{0}\\033[0m\".format(s))", "def status(s):\n print(\"\\033[1m{0}\\033[0m\".format(s))", "def status(s):\n print(\"\\033[1m{0}\\033[0m\".format(s))", "def display(self):\n for r in range(1, self.size+1):\n print(\"+\" + (\"-+\"*self.size))\n print(\"|\", end=\"\")\n for c in range(1, self.size+1):\n print(self.gameState[r,c], end=\"\")\n print(\"|\",end=\"\")\n print()\n print(\"+\" + (\"-+\"*self.size))", "def snapshot(self):\n text = \"\"\n text += \"{}:\\n{}\\n\".format('chi', np.array2string(self.chi))\n return text", "def status(s: str):\n print(\"\\033[1m{0}\\033[0m\".format(s))", "def chess_map(self):\n print('<>\\t', end='')\n for i in range(1, self.mapX + 1): # 打印列坐标\n print(i, end='\\t')\n print()\n for i in range(self.mapY): # 打印行坐标\n print(chr(65 + i), end='\\t')\n for j in range(self.mapX): # 打印每行的棋子\n print(self.pos[i][j], '\\t', end='')\n print('') # 换行" ]
[ "0.6132644", "0.612283", "0.60547996", "0.60443765", "0.5874516", "0.5855971", "0.5843751", "0.5838602", "0.5808942", "0.58021694", "0.57527065", "0.57486653", "0.57406795", "0.5740448", "0.570328", "0.56801385", "0.56685424", "0.5661247", "0.56583273", "0.5644304", "0.5644119", "0.56376237", "0.562322", "0.56155396", "0.56116045", "0.5596847", "0.5596366", "0.5596172", "0.5587158", "0.557279", "0.5554731", "0.5541631", "0.5541356", "0.55203086", "0.5517462", "0.5508785", "0.5506857", "0.55048525", "0.55043674", "0.54988736", "0.54984456", "0.5497527", "0.5496397", "0.54962695", "0.5496246", "0.54810345", "0.5479969", "0.5471892", "0.54705733", "0.54654217", "0.54610515", "0.54552644", "0.5453589", "0.5452598", "0.5448162", "0.54473096", "0.5445564", "0.5441792", "0.54390323", "0.5429653", "0.54185987", "0.54157084", "0.541233", "0.5410837", "0.54100984", "0.540979", "0.54014575", "0.53979003", "0.5394679", "0.5387633", "0.53739583", "0.537176", "0.53660315", "0.5364161", "0.5362094", "0.5359863", "0.5358622", "0.53568304", "0.53554285", "0.53499055", "0.5345599", "0.5343466", "0.53401357", "0.53318584", "0.5330654", "0.5326314", "0.532506", "0.53221464", "0.532141", "0.5311156", "0.5306225", "0.530409", "0.529014", "0.529014", "0.529014", "0.529014", "0.5285595", "0.52854466", "0.5283312", "0.52795017" ]
0.5684829
15
Count unoccupied neighbors of a point.
def countFreeNeighbors( p, board, occupation): n = 0 for m in [0, 1]: for d in [-1, 1]: pn = [p[0], p[1]] pn[m] += d j = board.grids.get( tuple(pn), None) if (j is None): continue # Not a board point if (occupation.has_key( j)): continue # Occupied n += 1 return n
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def count_neighboors(self, x: int, y: int) -> int :\n\n cpt : int = 0\n min_x : int = max(0, x - 1)\n max_x : int = min(x + 1, self.width-1)\n min_y : int = max(0, y - 1)\n max_y : int = min(y + 1, self.height-1)\n\n x_tmp : int\n y_tmp : int\n for x_tmp in range(min_x, max_x+1):\n for y_tmp in range(min_y, max_y+1):\n if self.is_alive(x_tmp, y_tmp) and not (x_tmp == x and y_tmp == y):\n cpt += 1\n return cpt", "def count_neighbors(self, row, col):\n neighbors = 0\n neighbors += self.get_cell_value(row - 1, col - 1)\n neighbors += self.get_cell_value(row - 1, col)\n neighbors += self.get_cell_value(row - 1, col + 1)\n neighbors += self.get_cell_value(row, col - 1)\n neighbors += self.get_cell_value(row, col + 1)\n neighbors += self.get_cell_value(row + 1, col - 1)\n neighbors += self.get_cell_value(row + 1, col)\n neighbors += self.get_cell_value(row + 1, col + 1)\n\n return neighbors", "def n_neighbors(self,n):\n return sum(1 for x in self.hex.get_neighbors_ring(n) if x is not None and x.is_occupied == 1)", "def count_neighbors(self, x, y):\n # IMPLEMENT ME\n # HINT: You do not have to use a for-loop for this method; just\n # if-statements will suffice. Also, you do not need to indent further\n # than two levels further than this comment.\n neighbours = 0\n if x > 0 and y > 0:\n if self.board[x-1][y-1] == \"x\":\n neighbours += 1\n if x > 0:\n if self.board[x-1][y] == \"x\":\n neighbours += 1\n if x > 0 and y < self.width - 1:\n if self.board[x-1][y+1] == \"x\":\n neighbours += 1\n if y > 0:\n if self.board[x][y-1] == \"x\":\n neighbours += 1\n if y < self.width - 1:\n if self.board[x][y+1] == \"x\":\n neighbours += 1\n if x < self.height - 1 and y > 0:\n if self.board[x+1][y-1] == \"x\":\n neighbours += 1\n if x < self.height - 1:\n if self.board[x+1][y] == \"x\":\n neighbours += 1\n if x < self.height - 1 and y < self.width - 1:\n if self.board[x+1][y+1] == \"x\":\n neighbours += 1\n return neighbours", "def _count_living_neighbors(self, cell: Cell) -> int:\n count = 0\n # borders of the area in which we are trying to find neighbors\n # Let's assume y axis directs downside and x axis directs to the left\n \n for x in range(cell.x - 1, cell.x + 2):\n for y in range(cell.y - 1, cell.y + 2):\n if cell.x == x and cell.y == y:\n continue\n if (x, y) in self.living_cells.keys():\n count += 1\n \n return count", "def count_alive_cells(self, x, y):\n\n # indices of surrounding cells.\n ul = max(y - 1, 0) # upper left\n ur = min(y + 2, self.f_shape[1]) # upper right\n bl = max(x - 1, 0) # bottom left\n br = min(x + 2, self.f_shape[0]) # bottom right\n\n # slice\n cells = self.cells[bl:br, ul:ur]\n n_cells = np.count_nonzero(cells)\n\n return n_cells - self.cells[x][y]", "def neighbors(self, point):\n # Sanity checks\n # Check that point has same number of dimensions as graph\n if not len(point) == len(self.dimensions):\n raise Exception(\"Point has \" + str(len(point)) + \" dimensions, Coordination Space has \" + \\\n str(len(self.dimensions)) + \" dimensions.\")\n\n point_and_neighbors = self.point_neighbors_recursion(point) # All neighbors, including point\n point_and_neighbors_set = set()\n for i in point_and_neighbors:\n point_and_neighbors_set.add(tuple(i))\n\n point_and_neighbors_set.remove(point) # Remove point\n neighbors = point_and_neighbors_set # Renaming for readability\n\n neighbors = filter(self.in_bounds, neighbors) # Remove points that are out-of-bounds\n neighbors = filter(self.passable, neighbors) # Remove points that are not neighbors\n\n return neighbors", "def neighbors(self, x):\n pass", "def _count_seen_occupied(grid: List[List[str]], row: int, col: int) -> int:\n count = 0\n for dx in [-1, 0, 1]:\n for dy in [-1, 0, 1]:\n if not (dx == 0 and dy == 0):\n count += 1 if _is_occupied(grid, row, col, dx, dy) else 0\n return count", "def get_neighbours_sum(self, x, y, current_point) -> int:\n return np.sum(self.grid_array[x-1:x+2, y-1:y+2]) - current_point", "def count_island(row, col, island):\n count = 0\n for i in range(row):\n for j in range(col):\n count = count + floodfill(i, j, row, col, island)\n return count", "def count_neighbor_mines(self, x, y):\n\t\treturn sum(self.mines[n][m] for (n, m) in self.get_valid_neighbors(x, y))", "def count_neighbour_mines(self, x, y):\n neighbour_mines = 0\n for _x in range(x - 1, x + 2):\n for _y in range(y - 1, y + 2):\n if is_valid(_x, _y):\n if is_mine(self.board, _x, _y):\n neighbour_mines += 1\n return neighbour_mines", "def count_neighbour_mines(self, x, y):\n neighbour_mines = 0\n for _x in range(x - 1, x + 2):\n for _y in range(y - 1, y + 2):\n if is_valid(_x, _y):\n if is_mine(self.board, _x, _y):\n neighbour_mines += 1\n return neighbour_mines", "def num_neighbors(self):\n return self._num_neighbors", "def checkNumNeighbors():", "def neighbor_count(A):\n sum2 = lambda A, B: map2(add, A, B)\n neighbors = ((-1, -1), (-1, 0), (-1, 1),\n (0, -1), (0, 1),\n (1, -1), (1, 0), (1, 1))\n return reduce(sum2,\n map(lambda d: rotate2(A, d[0], d[1]),\n neighbors))", "def get_count_life_neighbor(arr, x, y, max_x, max_y):\n\tres_count = 0\n\n\tif x > 0 and y > 0:\n\t\tif arr[y-1][x-1]:\n\t\t\tres_count += 1\n\n\tif y > 0:\n\t\tif arr[y-1][x]:\n\t\t\tres_count += 1\n\n\tif y > 0 and x < max_x:\n\t\tif arr[y-1][x+1]:\n\t\t\tres_count += 1\n\n\tif x > 0:\n\t\tif arr[y][x-1]:\n\t\t\tres_count += 1;\n\n\tif x < max_x:\n\t\tif arr[y][x+1]:\n\t\t\tres_count += 1\n\n\tif y < max_y and x > 0:\n\t\tif arr[y+1][x-1]:\n\t\t\tres_count += 1\n\n\tif y < max_y:\n\t\tif arr[y+1][x]:\n\t\t\tres_count += 1\n\n\tif y < max_y and x < max_x:\n\t\tif arr[y+1][x+1]:\n\t\t\tres_count += 1\n\n\treturn res_count", "def countNeighbors(row, col, A):\n h = len(A)\n w = len(A[0])\n count = 0\n for x in range(-1, 2, 1):\n for y in range(-1, 2, 1):\n if abs(x) + abs(y) != 0:\n count += A[row+x][col+y]\n return count", "def neighbor(board, x, y, n, m):\n deltas = (\n (-1, -1), (-1, 0), (-1, 1),\n (0, -1), (0, 1),\n (1, -1), (1, 0), (1, 1),\n )\n count = 0\n for dx, dy in deltas:\n xx = x + dx\n yy = y + dy\n if xx >= 0 and xx < n and yy >= 0 and yy < m and board[xx][yy] % 2 == 1:\n count += 1\n\n return count", "def get_neighbors(self, line, col):\n neighbors = 0\n for line_shift in [-1, 0, 1]:\n for col_shift in [-1, 0, 1]:\n if line_shift == 0 and col_shift == 0:\n continue # Do not count given cell\n # % connects left/right and up/down\n i = (line + line_shift) % self.lines\n j = (col + col_shift) % self.cols\n if self[i][j] == self.cell_state['alive']:\n neighbors += 1\n return neighbors", "def get_number_neighbours_of_cell(self, x_cell, y_cell):\n alive_neighbours = 0\n \n # neighbour indices\n x_indices = [x_cell-1, x_cell, x_cell+1]\n y_indices = [y_cell-1, y_cell, y_cell+1]\n\n\n #TODO: use functional programming ^^^^^^\n #x_indices = list(filter(lambda x: x < 0 and x > self.size[0], x_indices))\n #y_indices = list(filter(lambda y: y < 0 and y > self.size[1], y_indices))\n \n # correct indices for cell neighbours based on wrap_around_borders\n #TODO: this so far only works for x,y same size..\n if self.wrap_around_borders:\n for indices in [x_indices, y_indices]:\n if -1 in indices:\n indices.remove(-1)\n indices.append(self.board_size[0] - 1)\n if self.board_size[0] in indices:\n indices.remove(self.board_size[0])\n indices.append(0)\n else:\n for indices in [x_indices, y_indices]:\n if -1 in indices:\n indices.remove(-1)\n if self.board_size[0] in indices:\n indices.remove(self.board_size[0])\n\n # check each neighbour status and add to counter\n for x in x_indices:\n for y in y_indices:\n alive_neighbours = alive_neighbours + self.board_state[x][y]\n\n # dont count own value\n alive_neighbours = alive_neighbours - self.board_state[x_cell][y_cell]\n\n return alive_neighbours", "def _get_neighbors(size, point):\n i, j = point\n\n neighbors = [(i - 1, j), (i + 1, j), (i, j - 1), (i, j + 1)]\n _valid_neighbor = lambda neighbor: all(0 <= x < size for x in neighbor)\n neighbors = list(filter(_valid_neighbor, neighbors))\n \n return neighbors", "def get_neighbors_count(self, atom):\n return self._graph.get_connected_vertices_count(atom)", "def countNeighbors(oldgen, x, y):\n temp = 1\n\n count = 0\n for i in range(-1, 2):\n for j in range(-1, 2):\n\n # TODO: this needs rewritin to be more understandable\n if not (i == 0 and j == 0):\n count += int(oldgen[(x + i + WID) % WID][(y + j + HGT) % HGT])\n\n for i in range(-1, 2):\n for j in range(-1, 2):\n temp += 1\n\n count -= int(oldgen[x][y])\n\n return count", "def countAdjacentFloorNodes(self, x, y):\n\t\treturn self.isFloor(x - 1, y) + self.isFloor(x + 1, y) + self.isFloor(x, y - 1) + self.isFloor(x, y + 1)", "def _get_neighbours(point):\n # Pull coords out of point.\n x = point[0]\n y = point[1]\n z = point[2]\n return ((x-1, y, z), (x+1, y, z), (x, y-1, z), (x, y+1, z), (x, y, z-1), (x, y, z+1))", "def _count_subset_neighbors(v, X):\n return len(set(v.neighbors).intersection(X))", "def get_neighbors(self, row, col):\n neighbors = set()\n for d in [-1,1]:\n if row+d >= 0 and row+d < self._height and \\\n (row+d,col) in self._empty_spaces:\n neighbors.add((row+d,col))\n if col+d >= 0 and col+d < self._width and \\\n (row,col+d) in self._empty_spaces:\n neighbors.add((row,col+d))\n return neighbors", "def checkDimension(neighbour, current_point):\n for i in range(3):\n delta = abs(neighbour[i] - current_point[i])\n if delta > 0:\n return i", "def _count_adj_occupied(grid: List[List[str]], row: int, col: int) -> int:\n count = 0\n if row - 1 >= 0:\n if col - 1 >= 0:\n count += 1 if grid[row - 1][col - 1] == '#' else 0\n if col + 1 < len(grid[0]):\n count += 1 if grid[row - 1][col + 1] == '#' else 0\n count += 1 if grid[row - 1][col] == '#' else 0\n if row + 1 < len(grid):\n if col - 1 >= 0:\n count += 1 if grid[row + 1][col - 1] == '#' else 0\n if col + 1 < len(grid[0]):\n count += 1 if grid[row + 1][col + 1] == '#' else 0\n count += 1 if grid[row + 1][col] == '#' else 0\n if col - 1 >= 0:\n count += 1 if grid[row][col - 1] == '#' else 0\n if col + 1 < len(grid[0]):\n count += 1 if grid[row][col + 1] == '#' else 0\n return count", "def get_neighbors_of(cell, board):\n count = 0\n (x, y) = cell\n for cell in board:\n if cell == (x - 1, y - 1):\n count += 1\n elif cell == (x, y - 1):\n count += 1\n elif cell == (x + 1, y - 1):\n count += 1\n elif cell == (x - 1, y):\n count += 1\n elif cell == (x + 1, y):\n count += 1\n elif cell == (x - 1, y + 1):\n count += 1\n elif cell == (x, y + 1):\n count += 1\n elif cell == (x + 1, y + 1):\n count += 1\n return count", "def count_alive_neighbors(self, status):\n kernel = np.array(\n [[1, 1, 1],\n [1, 0, 1],\n [1, 1, 1]])\n\n count = convolve2d(status, kernel, mode='same', boundary=\"wrap\")\n return count", "def living_neighbors(self):\n neighborCount = 0\n for neighbor in self.__neighbors:\n if neighbor.get_living() == True:\n neighborCount += 1\n return neighborCount", "def count_ones(self):\r\n count = 0\r\n for x in range(self.xspan):\r\n for y in range(self.yspan):\r\n if (self.cells[x][y] == 1):\r\n count = count + 1\r\n return count", "def num_black_neighbors(tile, tiles):\n return sum([tiles[add(tile, step)] for step in NEIGHBORS])", "def numNeighbors(minesSet, row_index, cols_index, num_cols, num_rows):\n mines = 0\n for j in np.arange(max(0, cols_index-1), min(num_cols-1, cols_index+1)+1):\n for i in np.arange(max(0, row_index-1), min(num_rows-1, row_index+1)+1):\n if ((i, j) in minesSet):\n mines+=1\n return mines", "def count_neighbor_flags(self, x, y):\n\t\treturn sum(self.marks[n][m] == FLAG for (n, m) in self.get_valid_neighbors(x, y))", "def neighbors(self, point: Point, collected_keys: t.Set[str]):\n unopened_doors = {\n point\n for point, door_name in self.doors.items()\n if door_name.lower() not in collected_keys\n }\n\n return self.points.intersection(point.neighbors()) - unopened_doors", "def get_neighbour(self, y, x):\n if [y, x] in self.mine_locations:\n return Minesweeper.BOMB\n count = 0\n # (x-1, y-1), (x, y-1), (x+1, y-1),\n # (x-1, y), (x, y), (x+1, y),\n # (x-1, y+1), (x, y+1), (x+1, y+1)\n for xe in range(x - 1, x + 2):\n for ye in range(y - 1, y + 2):\n if [ye, xe] in self.mine_locations:\n count += 1\n return str(count)", "def num_good_neighbors(self):\n return self._num_good_neighbors", "def neighbors8(point):\n x, y = point\n return ((x + 1, y), (x - 1, y), (x, y + 1), (x, y - 1),\n (x + 1, y + 1), (x - 1, y - 1), (x + 1, y - 1), (x - 1, y + 1))", "def get_neighbours_count(self, cell: Position) -> int:\n possible_neighbours = self.get_neighbours(cell)\n return sum(self.is_alive(n) for n in possible_neighbours)", "def get_neighbors(self, cell, count):\n row, col = cell\n # get all the neighbors\n neighbors = set([(min(self.height - 1, max(row + i, 0)), min(self.width - 1, max(col + j, 0))) \n for i in range(-1, 2)\n for j in range(-1, 2)])\n\n for neighbor in deepcopy(neighbors):\n if neighbor in self.safes or neighbor == cell:\n neighbors.remove(neighbor)\n elif neighbor in self.mines:\n neighbors.remove(neighbor)\n count -= 1\n\n return neighbors, count", "def get_no_vertices(self):\r\n return len(self.__neighbours.keys())", "def point_neighbors_recursion(self, point):\n # Sanity checks\n if point is None:\n raise ValueError(\"Cannot operate on None\")\n\n neighbors = []\n # 1-dimension\n if len(point) == 1:\n neighbors.append([point[0] - 1]) # left\n neighbors.append([point[0]]) # current\n neighbors.append([point[0] + 1]) # right\n\n return neighbors\n\n # n-dimensional\n for sub_dimension in self.point_neighbors_recursion(point[1:]):\n neighbors.append([point[0] - 1] + sub_dimension) # left + (n-1)-dimensional combinations\n neighbors.append([point[0]] + sub_dimension) # current + (n-1)-dimensional combinations\n neighbors.append([point[0] + 1] + sub_dimension) # right + (n-1)-dimensional combinations\n\n return neighbors", "def island_perimeter(grid):\n total = 0\n for x in range(0, len(grid)):\n for y in range(0, len(grid[0])):\n if grid[x][y] == 1:\n if x == 0 or grid[x - 1][y] == 0:\n total += 1\n if x == len(grid) - 1 or grid[x + 1][y] == 0:\n total += 1\n if y == len(grid[0]) - 1 or grid[x][y + 1] == 0:\n total += 1\n if y == 0 or grid[x][y - 1] == 0:\n total += 1\n return total", "def neighbors(self, row, col):\n alive_around = 0\n for i in range(row -1, row + 2):\n for j in range(col - 1, col + 2):\n irow = i % self.row\n icol = j % self.col\n if (not (irow == row and icol == col)):\n if (self.now[irow, icol]):\n alive_around = alive_around + 1\n\n return alive_around", "def island_perimeter(grid):\n count = 0\n for j, r in enumerate(grid):\n for i, c in enumerate(r):\n if c == 1:\n if j == 0 or grid[j - 1][i] == 0:\n count += 1\n if i == 0 or grid[j][i - 1] == 0:\n count += 1\n if j == len(grid) - 1 or grid[j + 1][i] == 0:\n count += 1\n if i == len(r) - 1 or grid[j][i + 1] == 0:\n count += 1\n return count", "def numIslands(grid):\n # count to store each new island found\n count = 0\n # If the grid is empty, return 0\n if not grid:\n return count\n\n y_max = len(grid)\n x_max = len(grid[0])\n \n for i in range(len(grid)):\n for j in range(len(grid[0])):\n if grid[i][j] == '1':\n dfs(grid, i, j)\n count += 1\n return count", "def flags_nearby(self, y, x):\n count = 0\n l = [[ye, xe] for xe in range(\n x - 1, x + 2) if xe >= 0 for ye in range(y - 1, y + 2) if ye >= 0]\n for ye, xe in l:\n if xe >= self.x or ye >= self.y:\n continue\n if self.table_state[ye][xe] == Minesweeper.FLAG:\n count += 1\n return str(count)", "def count_neighbors(lights, r, c):\n neighbors = 0\n\n if r > 0 and c > 0: # 1\n neighbors += 1 if lights[r - 1][c - 1] == \"#\" else 0\n\n if r > 0: # 2\n neighbors += 1 if lights[r - 1][c] == \"#\" else 0\n\n if r > 0 and c < GRID_SIZE - 1: # 3\n neighbors += 1 if lights[r - 1][c + 1] == \"#\" else 0\n\n if c < GRID_SIZE - 1: # 4\n neighbors += 1 if lights[r][c + 1] == \"#\" else 0\n\n if r < GRID_SIZE - 1 and c < GRID_SIZE - 1: # 5\n neighbors += 1 if lights[r + 1][c + 1] == \"#\" else 0\n\n if r < GRID_SIZE - 1: # 6\n neighbors += 1 if lights[r + 1][c] == \"#\" else 0\n\n if r < GRID_SIZE - 1 and c > 0: # 7\n neighbors += 1 if lights[r + 1][c - 1] == \"#\" else 0\n\n if c > 0: # 8\n neighbors += 1 if lights[r][c - 1] == \"#\" else 0\n\n return neighbors", "def checkAmountOfNeighbors(self):\n cellsToDelete = []\n for cell in self.cells:\n if(cell.numOfNeighbor > 3 or cell.numOfNeighbor < 2 or (cell.numOfNeighbor == 2 and cell.dead == True)):\n cellsToDelete.append(cell)\n elif(cell.numOfNeighbor == 3 and cell.dead == True):\n cell.makeAlive()\n cell.numOfNeighbor = 0\n\n self.removeCells(cellsToDelete)", "def neighbours(t,p):\r\n neighbour = set()\r\n\r\n if p[t][1] != 0:\r\n neighbour.add(tuple(p[t][1]))\r\n if p[t][2] != 0:\r\n neighbour.add(tuple(p[t][2]))\r\n if p[t][3] != 0:\r\n neighbour.add(tuple(p[t][3]))\r\n if p[t][4] != 0:\r\n neighbour.add(tuple(p[t][4]))\r\n \r\n return neighbour", "def alive_neighbors(live_coords, coord):\n if not live_coords or not coord:\n return False\n x, y = coord\n neighbors = [[(x - 1), y], [(x - 1), (y - 1)], [(x - 1), (y + 1)],\n [(x + 1), y], [(x + 1), (y - 1)], [(x + 1), (y + 1)],\n [x, (y - 1)], [x, (y + 1)]]\n intersection = [value for value in neighbors if value in live_coords]\n return len(intersection)", "def get_num_mines_around_position(self, x, y):\n mines = 0\n for row in range(y-1, y+2):\n for col in range(x-1, x+2):\n if row >= 0 and col >= 0 and row < len(self.mine_map) and col < len(self.mine_map[row]): # Don't check spaces that are outside of the array\n if self.mine_map[row][col]:\n mines += 1\n return mines", "def count_neighbor_mines(self, i, j):\n n_neighbor_mines = -1\n if not self.mines[i, j]:\n n_neighbor_mines = np.count_nonzero(\n self.mines[(i-1 if i > 0 else 0):i+2, (j-1 if j > 0 else 0):j+2])\n return n_neighbor_mines", "def get_neighbors(point):\n pt = point.copy()\n output= [point.copy() for i in range(4)]\n output[0:2] = map(Point.setY, output[0:2], [pt.getY()+ i for i in range(-1,2,2)])\n output[2:4]= map(Point.setX, output[2:4], [pt.getX()+ i for i in range(-1,2,2)])\n return output", "def island_perimeter(grid):\n count = 0\n for row in grid:\n size = len(row)\n row.insert(0, 0)\n row.append(0)\n grid.insert(0, [0 for x in range(size + 2)])\n grid.append([0 for x in range(size + 2)])\n\n for e, row in enumerate(grid):\n for i, num in enumerate(row):\n if num == 1:\n if grid[e][i - 1] != 1:\n count += 1\n if grid[e][i + 1] != 1:\n count += 1\n if grid[e - 1][i] != 1:\n count += 1\n if grid[e + 1][i] != 1:\n count += 1\n return count", "def island_perimeter(grid):\n\n counter = 0\n for i in range(len(grid)):\n for j in range(len(grid[i])):\n if (grid[i][j] == 1):\n if ((j + 1) == len(grid[i]) or (grid[i][j + 1] == 0)):\n counter += 1\n if ((j - 1) < 0 or (grid[i][j - 1] == 0)):\n counter += 1\n if ((i + 1) == len(grid) or (grid[i + 1][j] == 0)):\n counter += 1\n if ((i - 1) < 0 or (grid[i - 1][j] == 0)):\n counter += 1\n return counter", "def neighbours(x, y):\n n = []\n for c in ((y-1, x-1), (y-1, x), (y-1, x+1), (y, x-1), (y, x+1), (y+1, x-1), (y+1, x), (y+1, x+1)):\n n.append(c)\n return n", "def getAdjacentCount(grid, x, y, X, Y, char):\n count = 0\n try{\n if x == 0:\n\n if y == 0:\n\n if x == X-1:\n\n if y == Y-1:\n }", "def count_islands(matrix):\n visited = init_visited(matrix)\n num_islands = 0\n for i in range(len(matrix)):\n for j in range(len(matrix)):\n if matrix[i][j] and not visited[i][j]:\n check_neighbours(matrix, (i, j), visited)\n num_islands += 1\n # print(visited)\n return num_islands", "def test_count_neighbors(self):\n m, n = 5, 5\n k, p = 0.2, 0.7\n agents = [ConwayAgent(ii, ii & 0x1 == 1) for ii in range(m * n)]\n C = ConwayModel(m, n, k, p, agents)\n\n to_count = np.array([[0, 1, 0], [0, 0, 1], [1, 0, 0]])\n expected = np.array([[1, 1, 2], [2, 3, 1], [0, 2, 1]])\n result = C.count_neighbors(to_count)\n self.assertTrue(np.all(expected == result))", "def count_constellations(points):\n\n num_points = len(points)\n edges = np.zeros((num_points, num_points), np.bool)\n for i in range(num_points):\n edges[i, i] = True\n point_i = points[i]\n for j in range(i+1, num_points):\n edges[i, j] = (point_i - points[j]) <= THRESHOLD\n edges[j, i] = edges[i, j]\n\n visited = set()\n constellations = []\n for i in range(num_points):\n if i in visited:\n continue\n\n constellations.append(build_constellation(edges, i, visited))\n\n return len(constellations)", "def nr_points(self):\n return len(self.x)", "def number_active_neighbors(graph, node):\n return np.sum(\n [\n graph.nodes[neighbor_idx][\"agent\"].active\n for neighbor_idx in graph[node].keys()\n ]\n )", "def neighbors(self):\n return self.mesh.neighbors()", "def neighbours(box, kps):\n box_duplicate = box.unsqueeze(2).repeat(1, 1, len(kps.t())).transpose(0, 1)\n kps_duplicate = kps.unsqueeze(1).repeat(1, len(box), 1)\n\n xmin = kps_duplicate[0].ge(box_duplicate[0])\n ymin = kps_duplicate[1].ge(box_duplicate[1])\n xmax = kps_duplicate[0].le(box_duplicate[2])\n ymax = kps_duplicate[1].le(box_duplicate[3])\n\n nbr_onehot = torch.mul(torch.mul(xmin, ymin), torch.mul(xmax, ymax)).t()\n n_neighbours = nbr_onehot.sum(dim=1)\n\n return nbr_onehot, n_neighbours", "def get_neighbours(self):\n return self.points_to.keys()", "def neighbors(i , j) :\n ns = []\n # vector de direction\n dx = [+1, +1, 0, 1]\n dy = [0, +1, 1, -1]\n for d in range(4) :\n ns.append((i + dx[d], j + dy[d]))\n #remove neagative element\n ns = [i for i in ns if i[0] >= 0 and i[1] >= 0]\n return ns", "def fill_count(nid):\n n_edges = G.subgraph(G.neighbors(nid)).number_of_edges()\n deg = G.degree[nid]\n n_fill = deg*(deg-1)//2 - n_edges\n return n_fill", "def approximate_neighbors_count(self) -> Optional[int]:\n return pulumi.get(self, \"approximate_neighbors_count\")", "def check_neighbours(self, grid):\n if self.bomba:\n self.bombs_around = -1\n return\n\n total = 0\n for x in range(-1, 2):\n for y in range(-1, 2):\n i = self.i + x\n j = self.j + y\n if i > -1 and i < len(grid) and j > -1 and j < len(grid[1]):\n neighbor = grid[i][j]\n\n if neighbor.bomba:\n total += 1\n \n self.bombs_around = total", "def __get_total_neighbors(shape):\n from .util import prod\n\n ndim = len(shape)\n\n # Count the bulk of the pixels in the core\n core_n_pixels = prod(x-2 for x in shape)\n core_n_neighbors = 3**ndim-1\n count = core_n_pixels * core_n_neighbors\n\n # Go through pixels that are along planes/edges/corners\n # The number of neighbors is missing n_axes+1 axes\n n_axes = arange(ndim)\n n_neighbors = core_n_neighbors - ((1<<n_axes) * 3**(ndim-n_axes-1)).cumsum()\n for inds in axes_combinations(ndim):\n n_pixels = core_n_pixels // prod(shape[i]-2 for i in inds)\n count += (1<<len(inds)) * n_pixels * n_neighbors[len(inds)-1]\n\n return count", "def propagate(possible: np.array, count: ma.array, where: ma.array) -> int:\n while np.equal(count, 1, out=where).any():\n i, j = _neighbors[:, where, :]\n _, k = possible[where, :].nonzero()\n possible[i, j, k[:, np.newaxis]] = False\n if not possible.sum(axis=2, out=count).all():\n return -1 # site with 0 possibility => infeasibility\n count[where] = ma.masked # avoid repetitive work\n return count.count()", "def grid_point_count(self):\n return pytools.product(self.grid_point_counts())", "def count_neighbor_flags(self, i, j):\n return np.count_nonzero(self.flags[(i-1 if i > 0 else 0):i+2, (j-1 if j > 0 else 0):j+2])", "def cell_neighbours(self, x, y):\n if self.maze_map[y][x]:\n return set()\n neighbours = set()\n for (direction, ((i, j), dummy)) in MazeGraph.DIRECTIONS.items():\n xi, yj = (x + i) % self.width, (y + j) % self.height\n if not self.maze_map[yj][xi]:\n neighbours.add((direction, (xi, yj)))\n return neighbours", "def findNeighbours(self):\n neighbours = []\n\n for i in range(self.xCoordinate - 1, self.xCoordinate + 2):\n for j in range(self.yCoordinate - 1, self.yCoordinate + 2):\n if (not (i == self.xCoordinate and j == self.yCoordinate)) and (0 <= i <= 394 and 0 <= j <= 499):\n neighbours.append(PixelPosition(i, j))\n\n return neighbours", "def neighbor_edge(self, neighborhood_node):\n Temp = 0\n for node1 in neighborhood_node:\n for node2 in neighborhood_node:\n if(self.Adjmatrix[node1, node2] == 1):\n Temp += 1\n return Temp", "def find_neighbors(self):\n x, y = self.position\n\n for i in range(3):\n for j in range(3):\n try:\n self.neighbors.append(self.stitches[(x - 1 + i, y - 1 + j)].position)\n except:\n pass\n\n # this cell will be added by default so we must delete at the end\n self.neighbors.remove(self.position)", "def get_neighbours(self, coords):\n\n\t dxdy = [(-1,-2),(0,-2),(1,-2),(-2,-1),(-1,-1),(0,-1),(1,-1),(2,-1),\n\t (-2,0),(-1,0),(1,0),(2,0),(-2,1),(-1,1),(0,1),(1,1),(2,1),\n\t (-1,2),(0,2),(1,2),(0,0)]\n\t neighbours = []\n\t for dx, dy in dxdy:\n\t neighbour_coords = coords[0] + dx, coords[1] + dy\n\t if not (0 <= neighbour_coords[0] < self.nx and\n\t 0 <= neighbour_coords[1] < self.ny):\n\t # We're off the grid: no neighbours here.\n\t continue\n\t neighbour_cell = self.cells[neighbour_coords]\n\t if neighbour_cell is not None:\n\t # This cell is occupied: store this index of the contained point.\n\t neighbours.append(neighbour_cell)\n\t return neighbours", "def main():\n row, col, island = make_matrix()\n print(count_island(row, col, island))", "def numOfLiveNeighbors(self):\n return len(list(filter(lambda x: x.isAlive(), self._neighbors)))", "def get_neighbours(coords):\n\n dxdy = [(-1,-2),(0,-2),(1,-2),(-2,-1),(-1,-1),(0,-1),(1,-1),(2,-1),\n (-2,0),(-1,0),(1,0),(2,0),(-2,1),(-1,1),(0,1),(1,1),(2,1),\n (-1,2),(0,2),(1,2),(0,0)]\n neighbours = []\n for dx, dy in dxdy:\n neighbour_coords = coords[0] + dx, coords[1] + dy\n if not (0 <= neighbour_coords[0] < nx and\n 0 <= neighbour_coords[1] < ny):\n # We're off the grid: no neighbours here.\n continue\n neighbour_cell = cells[neighbour_coords]\n if neighbour_cell is not None:\n # This cell is occupied: store this index of the contained point.\n neighbours.append(neighbour_cell)\n return neighbours", "def neighborhood(index, npoints, maxdist=1):\n return [index + i for i in range(-maxdist, maxdist + 1)\n if i != 0 and 0 <= index + i <= npoints - 1]", "def num_points_in_circle(d):\n return 6 * d if d > 0 else 1", "def island_perimeter(grid):\n perimeter = 0\n for x in range(len(grid)):\n for y in range(len(grid[x])):\n if grid[x][y] == 1:\n if x == 0:\n perimeter += 1\n elif grid[x - 1][y] == 0:\n perimeter += 1\n if y == 0:\n perimeter += 1\n elif grid[x][y - 1] == 0:\n perimeter += 1\n if x == len(grid) - 1:\n perimeter += 1\n elif grid[x + 1][y] == 0:\n perimeter += 1\n if y == len(grid[0]) - 1:\n perimeter += 1\n elif grid[x][y + 1] == 0:\n perimeter += 1\n return perimeter", "def perimeter(points):\n return sum(get_distances(points))", "def update_neighbors(self):\n neighbors = []\n for i in range(-1, 2):\n for j in range(-1, 2):\n if (i, j) == (0, 0):\n continue\n try:\n y, x = self.loc[0]+i, self.loc[1]+j\n neighbor = self.board.array[y, x]\n if neighbor > 0:\n neighbors.append(neighbor)\n except:\n continue\n \n self.neighbors = neighbors", "def misplaced_nodes(puzzle):\n\n\tcount = 0\n\tfor i in range(puzzle.dimension):\n\t\tfor j in range(puzzle.dimension):\n\t\t\tif (puzzle.board[i][j] != puzzle.final_state[i][j] and puzzle.board[i][j] != 0): count += 1\n\n\treturn count", "def degree(self):\n return len(self._neighbors)", "def degree( self, n ):\n return len(self._G.neighbors(n))", "def check_neighbours(coordinates):\n x_coord = coordinates[0]\n y_coord = coordinates[1]\n coordinates_value = 0\n for x_move in [-1, 0, 1]:\n x = x_coord + x_move\n for y_move in [-1, 0, 1]:\n y = y_coord + y_move\n try:\n value = grid[(x,y)]\n coordinates_value += value\n except KeyError:\n pass\n\n grid[coordinates] = coordinates_value\n # print(coordinates_value)\n return coordinates_value", "def countOccupied(data):\n\tcounter = 0\n\n\t# loop through rows and columns and\n\t# count the number of '#'s\n\tfor r in range(len(data)):\n\t\tfor c in range(len(data[r])):\n\t\t\tif data[r][c] == '#':\n\t\t\t\tcounter += 1\n\n\treturn counter", "def get_neighbours(point, grid):\n # possible movements (diagonally is impossible)\n dy, dx = [-1, 0, 1, 0], [0, 1, 0, -1]\n\n neighbours = []\n for i in range(4):\n y, x = point[0] + dy[i], point[1] + dx[i]\n\n # skip if not within maze's bounds (NOT actually needed since there is a \"#\" barrier around the maze)\n # if not (0 <= x < len(grid) and 0 <= y < len(grid[0])):\n # continue\n\n point_type = grid[y][x]\n if point_type == \"#\": # skip if wall\n continue\n neighbours.append((y, x))\n\n return neighbours", "def get_erosion_level(self, point: Point) -> int:\n return (self.grid[point] + self.depth) % 20183", "def neighbors(self, u):\r\n return filter(lambda v: self.getCapacity((u,v)) > 0, self.adjacent[u])", "def no_neighbour(x: int, y: int) -> bool:\r\n if not wall_check(x, y-1, False):\r\n if example[x, y-1] == 0:\r\n return False\r\n if not wall_check(x, y+1, False):\r\n if example[x, y+1] == 0:\r\n return False\r\n if not wall_check(x+1, y, False):\r\n if example[x+1, y] == 0:\r\n return False\r\n if not wall_check(x-1, y, False):\r\n if example[x-1, y] == 0:\r\n return False\r\n return True" ]
[ "0.7307373", "0.7237684", "0.7203775", "0.71407646", "0.6955501", "0.68983823", "0.6883159", "0.6826486", "0.6824896", "0.6804748", "0.6788591", "0.67754936", "0.67619663", "0.67619663", "0.67333233", "0.6709194", "0.66639596", "0.66181695", "0.657233", "0.65499747", "0.65250754", "0.64985716", "0.64570016", "0.6450929", "0.6436147", "0.64351785", "0.6386303", "0.6379967", "0.6366813", "0.6363727", "0.63401496", "0.63311183", "0.6300116", "0.62860185", "0.6267197", "0.626156", "0.62395823", "0.6235121", "0.62263644", "0.619141", "0.6171729", "0.6171327", "0.61516565", "0.6145341", "0.61263984", "0.61156887", "0.6097483", "0.60821694", "0.6079117", "0.6074856", "0.60655946", "0.6057587", "0.60561085", "0.60373294", "0.6027829", "0.6015585", "0.6015287", "0.598593", "0.59858733", "0.5975241", "0.5937704", "0.5934518", "0.59342134", "0.59309316", "0.5920006", "0.5900962", "0.5899489", "0.58826494", "0.58666116", "0.5865556", "0.58583575", "0.58544457", "0.5848388", "0.584371", "0.58435655", "0.58263856", "0.5822116", "0.58211565", "0.5803452", "0.58026886", "0.5796176", "0.5792225", "0.5788834", "0.5782739", "0.57784647", "0.57702386", "0.5765865", "0.5760473", "0.5748407", "0.5747776", "0.5743547", "0.57433677", "0.5741363", "0.5736615", "0.5734189", "0.57317114", "0.5728909", "0.5722576", "0.57167804", "0.5713444" ]
0.7302581
1
Find unoccupied positions on the board.
def findUnoccupied( board, occupation): return [ j for j in xrange(len(board.positions)) if not occupation.has_key(j) ]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def free_positions(self):\n positions = []\n for i in range(self.grid_size):\n for j in range(self.grid_size):\n if self.grid[i][j] == 0:\n positions.append((i, j))\n if positions == []:\n raise GameException('Game Over. No free position left.')\n return positions", "def board_empty_positions(self, x, y):\n board = self.boards[x][y]\n coords = [(x, y, i, j) for (i, j) in board.empty_squares]\n return self.coords_to_positions(coords)", "def find_unsettled_spot(self):\n\t\tfor i in range(9):\n\t\t\tfor j in range(9):\n\t\t\t\tif self.grid[i][j] == 0:\n\t\t\t\t\treturn i, j\n\t\treturn", "def filled_positions(self):\n return [x for x in assignable_positions if self.grid[x][0]]", "def find_empty(self):\n num_rows = len(self.board)\n num_cols = len(self.board[0])\n\n for i in range(num_rows):\n for j in range(num_cols):\n if self.board[i][j] == 0:\n return (i, j)", "def findEmpty(grid):\n for x in range(len(grid.board)):\n for y in range(len(grid.board[0])):\n if grid.board[x][y] == 0:\n return [x,y]", "def available_positions(self):\n available_positions = []\n for i in range(self.positions_count):\n if self.board[i] == 0:\n available_positions.append(i+1)\n return available_positions", "def empty_spots(self):\n\t\tret = []\n\t\tfor i in range(0, self.size):\n\t\t\tfor j in range(0, self.size):\n\t\t\t\tif(self.grid[i][j] == self.terminal):\n\t\t\t\t\tret.append((i,j))\n\t\treturn ret", "def available_positions(self):\n if len([x for x in self.grid.values() if x[0] != None]) < 13:\n return [x for x in assignable_positions if self.grid[x][1] == \"---\"]\n else:\n return []", "def remove_filled_positions(self, positions, board):\n\n new_positions = []\n for p in positions:\n if board.check_move(p[0], p[1]):\n new_positions.append(p)\n return new_positions", "def get_positions(self):\r\n null_pos, black_pos, white_pos = set(), set(), set()\r\n for pos in BOARD_POSITIONS:\r\n if self.state[pos[0]][pos[1]] == 0:\r\n null_pos.add(pos)\r\n elif self.state[pos[0]][pos[1]] == 1:\r\n black_pos.add(pos)\r\n else:\r\n white_pos.add(pos)\r\n return null_pos, black_pos, white_pos", "def get_empty_board_indecies(self):\n empty_indecies = []\n for row_num in range(len(self.board)):\n for col_num in range(len(self.board)):\n if self.board[row_num][col_num] and self.board[row_num][col_num].state == PegState.EMPTY:\n empty_indecies.append((row_num, col_num))\n return empty_indecies", "def get_empty_positions(self):\n\n empty_positions = []\n\n for i in range(self._dimension):\n for j in range(self._dimension):\n if self._board[i][j] == ' ':\n empty_positions.append((i, j))\n\n return empty_positions", "def _find_empty_cell(self):\n\n for r, row in enumerate(self._board):\n for c, cell in enumerate(row):\n if cell is None:\n return r, c", "def get_played_positions(board):\n return np.argwhere(board.state != -1)", "def get_unknown_neighbours(self, row, col):\n return [cell for cell in self.get_neighbours(row, col) if cell.state == None ]", "def find_empty_space(self, state):\r\n for i in range(3):\r\n for j in range(3):\r\n if state[i][j] == 0:\r\n return (i, j)", "def available_spots(self):\n occupied_tiles = self.board.keys()\n neighbors = lambda x, y: ((x+1, y), (x-1, y), (x, y+1), (y, y-1))\n tiles_near_occupied = set(neighbor for tile in occupied_tiles\n for neighbor in neighbors(*tile))\n unnoccupied_titles_near_occupied = tiles_near_occupied - set(occupied_tiles)\n return unnoccupied_titles_near_occupied", "def find_empty(self):\n min_num_choices = 10\n ret_x, ret_y = (-1, -1)\n\n for x in range(0, 9):\n for y in range(0, 9):\n if self.field[x][y] != -1:\n continue\n\n if (min_num_choices > len(self.choices[x][y])):\n min_num_choices = len(self.choices[x][y])\n ret_x = x\n ret_y = y\n\n return (ret_x, ret_y)", "def unoccupied(self):\n self.is_occupied = 0\n for hex in self.fon:\n hex.remove_neighbor()\n hex.set_quality()", "def find_excited_locations(self):\n return np.asarray(np.where(self._grid == 8)).T", "def allowed_positions(self, curr_state):\n return [i for i, val in enumerate(curr_state) if np.isnan(val)]", "def allowed_positions(self, curr_state):\n return [i for i, val in enumerate(curr_state) if np.isnan(val)]", "def allowed_positions(self, curr_state):\n return [i for i, val in enumerate(curr_state) if np.isnan(val)]", "def opponentBoarderPosition(self, gameState):\n if self.red:\n i = self.midWidth\n else:\n i = self.midWidth - 1\n boudaries = [(i,j) for j in range(self.height)]\n validPositions = []\n for i in boudaries:\n if not gameState.hasWall(i[0],i[1]):\n validPositions.append(i)\n return validPositions", "def find_empty(puzzle):\r\n empty_squares = []\r\n for y in range(len(puzzle.squares)):\r\n for x in range(len(puzzle.squares[0])):\r\n if puzzle.squares[y][x].is_editable() is True:\r\n empty_squares.append((x, y))\r\n return empty_squares", "def _get_neighbours(self, position):\n grid = self._grid\n x, y = position\n neighbours = []\n offsets = [(0,1),(1,0),(0,-1),(-1,0)]\n shuffle(offsets)\n for offset in offsets:\n i, j = offset\n position = (x + i, y + j)\n if grid.valid_position(position) and position not in self.shots:\n neighbours.append(position)\n return neighbours", "def legalMoves(self):\n return [c for c in range(self.getWidth()) if len([r for r in range(self.getHeight()) if self.cell[c][r]==EMPTY])>0 ]", "def unsolved_cells(self) -> Set[Cell]:\n\t\treturn set(self.iter_unsolved_cells())", "def unsolved_cells(self) -> Set[Cell]:\n\t\treturn set(self.iter_unsolved_cells())", "def get_occupied_tiles(self):\r\n occupied = np.zeros(self.searchenv.conv.num_tiles)\r\n #Convert current state (positions of agents) to tile indices\r\n tiles = self.searchenv.conv.state_to_tile(self.searchstate.positions)\r\n valid_tiles = tiles[self.searchstate.actives == 1]\r\n occupied[valid_tiles] = 1\r\n return occupied", "def find_empty_cells(gr):\n l = list()\n for i in range(0,9):\n for j in range(0,9):\n if(gr[i][j] == 0):\n l.append([i, j])\n return l", "def searchDeadEnd(self):\n boundaries = []\n if not self.red:\n i = self.midWidth - 1\n else:\n i = self.midWidth + 1\n boudaries = [(i, j) for j in range(self.height)]\n validPositions = []\n for i in boudaries:\n if not (i[0], i[1]) in self.walls:\n validPositions.append(i)\n\n dangerPos = []\n\n toExpand = self.scanmap.twoEntryPoints()\n for (x,y) in toExpand:\n adjacent = self.scanmap.adjacentValidPoints(x, y)\n if not (x,y) in dangerPos:\n for (u, w) in adjacent:\n visited = []\n visited.append((x, y))\n safe = False\n danger = False\n DFS = util.Stack()\n DFS.push((u,w))\n while not safe and not danger:\n (i,j) = DFS.pop()\n visited.append((i,j))\n adjacents = self.scanmap.adjacentValidPoints(i,j)\n for position in adjacents:\n if not position in visited:\n DFS.push(position)\n if DFS.isEmpty():\n danger = True\n dangerPos = list(set(dangerPos) | set(visited))\n\n if (i,j) in validPositions:\n safe = True\n oneEntry = self.scanmap.oneEntryPoints()\n dangerPos = list(set(oneEntry).union(set(dangerPos)))\n dangerPos.sort()\n return dangerPos", "def find_empty_space(board: list) -> tuple:\n board_length = len(board)\n for i in range(board_length):\n for j in range(board_length):\n if board[i][j] == 0:\n return (i,j)", "def find_moves(self):\n\n from itertools import product\n free_position = self.find_free()\n return [list(free_position+i) for i in [[0,1],[1,0],[-1,0],[0,-1]] if tuple(i+free_position) in product(range(self.size),repeat=2)]", "def get_legal_moves(self):\n moves = []\n if self.player_locations[self.whose_turn] is None:\n return self.get_blank_locations()\n matrix = [(1,0), (-1,0), (0,1), (0,-1), (1,1), (1,-1), (-1, 1), (-1,-1)]\n\n for dx, dy in matrix:\n x,y = self.player_locations[self.whose_turn]\n while x+dx <= xdim and x+dx >= 0 and y+dy <= ydim and y+dy >= 0:\n x = x+dx\n y = y+dx\n if self.board[x][y] : break\n moves.append((x,y))\n return moves", "def get_unhindered_positions(self, endposition):\n current_position = self.position\n potential_positions = { \n 'diag1' : [],\n 'diag2' : [],\n 'diag3' : [],\n 'diag4' : []\n }\n space_down = current_position[0]-1\n space_up = self.ncols-current_position[0]\n space_right = self.nrows - (ord('H')-ord(current_position[1]))\n space_left = ord(current_position[1]) - ord('A')\n\n for i in range(1, space_down+1):\n diag1 = (current_position[0]-i, chr(ord(current_position[1])+i))\n diag2 = (current_position[0]-i, chr(ord(current_position[1])-i))\n if self.pos_within_bounds(diag1):\n potential_positions['diag1'].append(diag1)\n if self.pos_within_bounds(diag2):\n potential_positions['diag2'].append(diag2)\n\n for i in range(1, space_up+1):\n diag3 = (current_position[0]+i, chr(ord(current_position[1])+i))\n diag4 = (current_position[0]+i, chr(ord(current_position[1])-i))\n if self.pos_within_bounds(diag3):\n potential_positions['diag3'].append(diag3)\n if self.pos_within_bounds(diag4):\n potential_positions['diag4'].append(diag4)\n \n for direction, square in potential_positions.items():\n if tuple(endposition) in square:\n return potential_positions[direction]", "def get_all_pieces(self):\n occupied = []\n for pieces in self.piece_locs.values():\n occupied += pieces\n return occupied", "def is_unoccupied(self, row, col):\n return self.maze[row][col] is EMPTY", "def get_all_positions(board, white_turn):\n list = []\n for row in range(8):\n for col in range(8):\n # White\n if white_turn and white_piece_on_pos((row, col), board):\n obj = board[row][col]\n if type(obj) is Pawn:\n for valid_pos in valid_positions_pawn_white((row, col), board):\n list.append(((row, col), valid_pos))\n elif type(obj) is Tower:\n for valid_pos in valid_positions_tower_white((row, col), board):\n list.append(((row, col), valid_pos))\n elif type(obj) is Bishop:\n for valid_pos in valid_positions_bishop_white((row, col), board):\n list.append(((row, col), valid_pos))\n elif type(obj) is Horse:\n for valid_pos in valid_positions_horse_white((row, col), board):\n list.append(((row, col), valid_pos))\n elif type(obj) is Queen:\n for valid_pos in valid_positions_queen_white((row, col), board):\n list.append(((row, col), valid_pos))\n elif type(obj) is King:\n for valid_pos in valid_positions_king_white((row, col), board):\n list.append(((row, col), valid_pos))\n # Black\n elif (not white_turn) and black_piece_on_pos((row, col), board):\n obj = board[row][col]\n if type(obj) is Pawn:\n for valid_pos in valid_positions_pawn_black((row, col), board):\n list.append(((row, col), valid_pos))\n elif type(obj) is Tower:\n for valid_pos in valid_positions_tower_black((row, col), board):\n list.append(((row, col), valid_pos))\n elif type(obj) is Bishop:\n for valid_pos in valid_positions_bishop_black((row, col), board):\n list.append(((row, col), valid_pos))\n elif type(obj) is Horse:\n for valid_pos in valid_positions_horse_black((row, col), board):\n list.append(((row, col), valid_pos))\n elif type(obj) is Queen:\n for valid_pos in valid_positions_queen_black((row, col), board):\n list.append(((row, col), valid_pos))\n elif type(obj) is King:\n for valid_pos in valid_positions_king_black((row, col), board):\n list.append(((row, col), valid_pos))\n return list", "def check_for_moves(self) -> list:\r\n avail_moves = []\r\n for x in range(self.size):\r\n for y in range(self.size):\r\n if self.tags[x][y] is None:\r\n avail_moves.append((x, y))\r\n return avail_moves", "def get_empty_cells(state):\n cells = []\n for row_index, row in enumerate(state.board):\n for col_index, cell in enumerate(row):\n if cell == 0:\n cells.append([row_index, col_index])\n return cells", "def bosonic_cells(self):\n cells = self.cells()\n fermionic_cells = self.fermionic_cells()\n coords = [x for x in cells if x not in fermionic_cells]\n return coords", "def moles(board):\n return (pos for pos in range(1, length+1) if at(board, pos))", "def find_neighbors(self):\n x, y = self.position\n\n for i in range(3):\n for j in range(3):\n try:\n self.neighbors.append(self.stitches[(x - 1 + i, y - 1 + j)].position)\n except:\n pass\n\n # this cell will be added by default so we must delete at the end\n self.neighbors.remove(self.position)", "def findImmediateNeighbours(self):\n immediateNeighbours = []\n\n if self.xCoordinate - 1 > 0:\n immediateNeighbours.append(PixelPosition(self.xCoordinate - 1, self.yCoordinate))\n\n if self.xCoordinate + 1 < 395:\n immediateNeighbours.append(PixelPosition(self.xCoordinate + 1, self.yCoordinate))\n\n if self.yCoordinate + 1 < 500:\n immediateNeighbours.append(PixelPosition(self.xCoordinate, self.yCoordinate + 1))\n\n if self.yCoordinate - 1 > 0:\n immediateNeighbours.append(PixelPosition(self.xCoordinate, self.yCoordinate - 1))\n\n return immediateNeighbours", "def position_surroundings(self, neighbour_pos, missed_value):\n pos = []\n for x, y in neighbour_pos:\n position = self._square_matrix[x][y].get_pos_from_number(missed_value)\n if position:\n pos.append(position)\n return pos", "def get_unhindered_positions(self, endposition):\n current_position = self.position\n potential_positions = potential_positions = {\n 'left' : [], \n 'right' : [],\n 'up' : [], \n 'down' : []\n }\n space_down = current_position[0]-1\n space_up = self.ncols-current_position[0]\n space_right = self.nrows - (ord('H')-ord(current_position[1]))\n space_left = ord(current_position[1]) - ord('A')\n\n for i in range(1, space_down+1):\n pos = (current_position[0]-i, current_position[1])\n if self.pos_within_bounds(pos):\n potential_positions['down'].append(pos)\n\n for i in range(1, space_up+1):\n pos = (current_position[0]+i, current_position[1])\n if self.pos_within_bounds(pos):\n potential_positions['up'].append(pos)\n \n for i in range(1, space_left+1):\n pos = (current_position[0], chr(ord(current_position[1])-i))\n if self.pos_within_bounds(pos):\n potential_positions['left'].append(pos)\n\n for i in range(1, space_right+1):\n pos = (current_position[0], chr(ord(current_position[1])+i))\n if self.pos_within_bounds(pos):\n potential_positions['right'].append(pos)\n\n for direction, square in potential_positions.items():\n if tuple(endposition) in square:\n return potential_positions[direction]", "def open_spots(self):\n ret = []\n for i in range(1,25):\n if self.nodes[i].piece == None:\n ret.append(i)\n return ret", "def find_valid_posse(board: 'List') -> 'List':\n for i, a in enumerate(board):\n for j, b in enumerate(board):\n if j != i:\n for k, c in enumerate(board):\n if k not in (i, j) and \\\n is_valid_posse((a, b, c)):\n # print((i, j, k))\n return [a, b, c]", "def get_valid_moves(self):\r\n validMoves = []\r\n\r\n for x in range(BOARD_SIZE):\r\n for y in range(BOARD_SIZE):\r\n pos = np.array([x,y])\r\n if self.board[pos[0],pos[1]] == 0:\r\n if(self.update_board(pos,_testing=True)):\r\n validMoves.append(pos)\r\n\r\n return validMoves", "def find_blank_cell(self, board: list):\n cells = {}\n for i in range(9): # Iterate over rows\n for j in range(9): # Iterate over columns\n if board[i][j] == 0:\n cells[str(i) + ' ' + str(j)] = self.count_numbers(board, j, i)\n m = max(cells.values())\n for k in cells:\n if cells[k] == m:\n s = k.split()\n x, y = int(s[1]), int(s[0])\n return x, y", "def findNeighbours(self):\n neighbours = []\n\n for i in range(self.xCoordinate - 1, self.xCoordinate + 2):\n for j in range(self.yCoordinate - 1, self.yCoordinate + 2):\n if (not (i == self.xCoordinate and j == self.yCoordinate)) and (0 <= i <= 394 and 0 <= j <= 499):\n neighbours.append(PixelPosition(i, j))\n\n return neighbours", "def get_legal_moves(self, color):\n moves = [] # stores the legal moves.\n # Get all the squares with pieces of the given color.\n for x in range(self.n):\n for y in range(self.n):\n if self[x][y]==0:\n moves.append((x,y))\n return moves", "def checked_positions():\n for base_position in chain([me.shipyard], me.get_dropoffs()):\n x_shipyard = base_position.position.x\n y_shipyard = base_position.position.y\n for x in range(-search_range, search_range):\n for y in range(-search_range, search_range):\n yield hlt.Position(\n x=x_shipyard + x,\n y=y_shipyard + y)", "def _computeNoisyPositions(self, state):\n positions = state.getGhostPositions()\n w = self.args.w\n w2 = 2*w+1\n div = float(w2 * w2)\n new_positions = []\n for p in positions:\n (x, y) = p\n dist = util.Counter()\n for i in range(x - w, x + w + 1):\n for j in range(y - w, y + w + 1):\n dist[(i, j)] = 1.0 / div\n dist.normalize()\n new_positions.append(util.chooseFromDistribution(dist))\n return new_positions", "def _computeNoisyPositions(self, state):\n positions = state.getGhostPositions()\n w = self.args.w\n w2 = 2*w+1\n div = float(w2 * w2)\n new_positions = []\n for p in positions:\n (x, y) = p\n dist = util.Counter()\n for i in range(x - w, x + w + 1):\n for j in range(y - w, y + w + 1):\n dist[(i, j)] = 1.0 / div\n dist.normalize()\n new_positions.append(util.chooseFromDistribution(dist))\n return new_positions", "def boarderPosition(self, gameState):\n if gameState.isOnRedTeam(self.index):\n i = self.midWidth - 1\n else:\n i = self.midWidth + 1\n boudaries = [(i,j) for j in range(self.height)]\n validPositions = []\n for i in boudaries:\n if not gameState.hasWall(i[0],i[1]):\n validPositions.append(i)\n return validPositions", "def get_legal_moves(self, board):\n moves = set()\n capture_moves = set()\n for move in self.pot_moves:\n target_row = self.field_row + move[0]\n target_col = self.field_col + move[1]\n if not (target_row > 7 or target_row < 0 or target_col > 7 or target_col < 0):\n if board.status[target_row, target_col] == 0:\n moves.add(move)\n if board.status[target_row, target_col] * self.color_value < 0:\n capture_moves.add(move)\n self.legal_moves = moves\n self.legal_capture_moves = capture_moves", "def get_legal_moves(self, board):\n moves = set()\n capture_moves = set()\n for move in self.pot_moves:\n target_row = self.field_row + move[0]\n target_col = self.field_col + move[1]\n if self.path_clear(board, move, target_row, target_col):\n if board.status[target_row, target_col] * self.color_value == 0:\n moves.add(move)\n if board.status[target_row, target_col] * self.color_value < 0:\n capture_moves.add(move)\n self.legal_moves = moves\n self.legal_capture_moves = capture_moves", "def get_valid_locations(self, board):\n valid_locations = []\n for col in range(self._COLUMNCOUNT):\n try:\n if validate_column(board, col):\n valid_locations.append(col)\n except InvalidColumn:\n pass\n return valid_locations", "def outputPositionsWithoutExtraSpace(board):\n n = len(board)\n if n == 0:\n return []\n\n m = len(board[0])\n if m == 0:\n return []\n\n has_first_row_zero = False\n has_first_col_zero = False\n\n for i in range(0, n):\n if not board[i][0]:\n has_first_col_zero = True\n break\n\n for j in range(0, m):\n if not board[0][j]:\n has_first_row_zero = True\n break\n\n for i in range(1, n):\n for j in range(1, m):\n if not board[i][j]:\n A[i][0] = A[0][j] = 0\n \n for i in range(1, n):\n if not board[i][0]:\n continue\n\n for j in range(1, m):\n board[i][j] = 0\n\n for j in range(1, m):\n if not board[0][j]:\n continue\n\n for i in range(1, n):\n board[i][j] = 0\n\n if has_first_row_zero:\n for j in range(0, m):\n board[0][j] = 0\n\n if has_first_col_zero:\n for i in range(0, n):\n board[i][0] = 0", "def get_empty_squares(self):\n empty = []\n for row in range(self._dim):\n for col in range(self._dim):\n if self._board[row][col] == EMPTY:\n empty.append((row, col))\n return empty", "def known_safes(self):\n #if the bomb count is zero\n if(self.count == 0):\n #all spaces are safe, which returns all spots that are safe\n return (self.cells)\n else:\n return set()", "def actions(board):\n avail_moves = set()\n\n for i in range(3):\n for j in range(3):\n if board[i][j] == EMPTY:\n avail_moves.add((i,j))\n \n if len(avail_moves) == 0:\n return 0\n\n return avail_moves", "def find_empty(grid):\n for i in range(LEN_GRID):\n for j in range(LEN_GRID):\n if grid[i][j] == 0:\n return (i, j) # row, col\n return None", "def get_empty_cells(board):\n empty_cells = [idx for idx, e in enumerate(board) if e == ' ']\n return empty_cells", "def iter_unsolved_cells(self):\n\t\treturn (\n\t\t\tcell for cell in\n\t\t\tself._cells\n\t\t\tif not cell.value()\n\t\t)", "def occupied_cells(self):\n\n for lm in self.landmarks:\n if self.cell_size < 1:\n # expand the range the landmark exists\n lm_x_range = np.arange(lm[0]-self.R, lm[0]+self.R, self.cell_size)\n lm_y_range = np.arange(lm[1]-self.R, lm[1]+self.R, self.cell_size)\n\n # loop through expanded ranges and compute grid positions\n for lm_x in lm_x_range:\n for lm_y in lm_y_range:\n\n row, col = self.cell_index([lm_x, lm_y])\n\n # apply cost of occupied cell\n try:\n self.world[row][col] = 1000\n except IndexError:\n pass\n\n else:\n # apply cost of occupied cell\n row, col = self.cell_index(lm)\n try:\n self.world[row][col] = 1000\n except IndexError:\n pass", "def findEmptyCell(board: str, threeInRow: Tuple[int, int, int]) -> int:\n emptyCells = [\n index for index in threeInRow if isAvailable(board, index)]\n return choice(emptyCells)", "def dead_boards(self):\n return [(i, j) for i in range(self.SIZE) for j in range(self.SIZE)\n if self.boards[i][j].state != State.IN_PROGRESS]", "def get_empty_cells(grid):\n empty = []\n for j,row in enumerate(grid):\n for i,val in enumerate(row):\n if not val:\n empty.append((j,i))\n return empty", "def get_empty_cells(grid):\n\tempty = []\n\tfor j,row in enumerate(grid):\n\t\tfor i,val in enumerate(row):\n\t\t\tif not val:\n\t\t\t\tempty.append((j,i))\n\treturn empty", "def get_available_moves(self, board):\n available_moves = []\n for fieldx in range(len(board)):\n column = []\n for fieldy in range(len(board)):\n legit_move = board[self.posy][self.posx].is_valid_move(board, fieldx, fieldy)\n column.append(legit_move)\n available_moves.append(column)\n return available_moves", "def get_empty_tiles(self) -> List[Point]:\n\t\tempty_tiles = []\n\t\tfor x in range(self.size):\n\t\t\tfor y in range(self.size):\n\t\t\t\tif self.tiles[x][y] == 0:\n\t\t\t\t\tempty_tiles.append(Point(x,y))\n\t\treturn empty_tiles", "def get_flagged_neighbours(self, row, col):\n return [cell for cell in self.get_neighbours(row, col) if cell.state == 'X']", "def actions(board):\n available = set()\n\n for i in range(3):\n for j in range(3):\n if board[i][j] == EMPTY:\n available.add((i, j))\n\n return available", "def xy_occupied(xy, board):\n return True if board[xy[0]][xy[1]] else False", "def get_move_list(self):\n return [\n tuple(x) for x in np.argwhere(self.board == HexBoard.EMPTY).tolist()\n ]", "def iter_unsolved_cells(self) -> Iterable[Cell]:\n\t\treturn (\n\t\t\tcell\n\t\t\tfor cell in self\n\t\t\tif not cell.value()\n\t\t)", "def __FreeTiles(self, grid, log=False):\n\n x_pos, _ = np.where(grid == 0)\n return len(x_pos)", "def find_empty_squares(board):\n for i in range(len(board)):\n for j in range(len(board[0])):\n if board[i][j] == 0:\n return (i,j) #row , column\n\n #if there are no blank squres\n return None", "def get_legal_moves(self, board):\n moves = set()\n capture_moves = set()\n if not (self.field_row*self.color_value == 1 or self.field_row*self.color_value == -6):\n self.pot_moves = {(1*self.color_value, 0)}\n\n for move in self.pot_moves:\n target_row = self.field_row + move[0]\n target_col = self.field_col + move[1]\n if self.path_clear(board, move, target_row, target_col):\n if board.status[target_row, target_col] * self.color_value == 0:\n moves.add(move)\n\n for move in self.pot_capture_moves:\n target_row = self.field_row + move[0]\n target_col = self.field_col + move[1]\n if self.path_clear(board, move, target_row, target_col):\n if board.status[target_row, target_col] * self.color_value < 0:\n capture_moves.add(move)\n self.legal_moves = moves\n self.legal_capture_moves = capture_moves", "def legal_moves(self, player, board):\r\n #go through the whole board and check whether the piece is on the board or not\r\n #num/row size - num%col == num2/row size - num@%col\r\n #num/row size + num%col\r\n moves = list()\r\n opp = self.opponent(player)\r\n #print(board)\r\n for i in self.squares():\r\n if board[i] == core.EMPTY:\r\n for d in core.DIRECTIONS:\r\n endPt = self.find_bracket(i, player, board, d)\r\n if endPt!= None:\r\n moves.append(i)\r\n break\r\n\r\n return moves", "def moves(self):\n\n # define a full range, which we can compare against columns,\n # rows, or blocks. they're all the same when stored as sets.\n line = set(range(1, 10))\n moves = []\n\n # iterate every cell on the board\n for row in range(0, 9):\n for col in range(0, 9):\n\n # ignore this cell if it's already filled\n i = self._index(col, row)\n if self.data[i] is not None:\n continue\n\n # fetch the adjacent cells\n row_values = set(self._row(row))\n col_values = set(self._column(col))\n bck_values = set(self._block(col, row))\n\n # subtract the values present in the adjacent cells\n # (since this cell *can't* be of any of those values),\n # to leave the list of possibilities for this cell\n missing = line.difference(row_values, col_values, bck_values)\n\n # if there's only *one* possibility, we've found the\n # solution to this cell\n if len(missing) == 1:\n moves.append((col, row, missing.pop()))\n\n return moves", "def get_moves(self):\n grid = self.model.grid\n # List of agents we can't overlap with\n no_overlap = [\"wall\", \"human\", \"zombie\"]\n\n if self.agent_type == \"zombie\" or \\\n (\"AvoidingZombie\" not in self.states and os.environ[\"mode\"] == \"5\"):\n no_overlap.append(\"road\")\n\n # Always give the option to stay on your current location(stand still)\n all_cells = self.neighbors()\n free_cells = [self.pos]\n\n # Get rid of cells that we may not move to by iterating through all\n # cells next to the agent, and only adding non-occupied cells\n for cell in all_cells:\n cell_occupied = False\n x, y = cell.pos\n # If there are agents in the current cell, and we are not allowed\n # to overlap with any of those agents, the cell is occupied.\n # Only add cells which are not occupied.\n if not grid.is_cell_empty((x, y)):\n for agent in grid[x][y]:\n if agent.agent_type in no_overlap:\n cell_occupied = True\n break\n if not cell_occupied:\n free_cells.append((x, y))\n return free_cells", "def available_moves(self):\n moves = []\n for x, y in self.available_boards:\n moves.extend([self.to_position(x, y, i, j) for (i, j)\n in self.boards[x][y].empty_squares])\n return moves", "def find_empty(board):\n for ii in range(len(board)):\n for jj in range(len(board[ii])):\n if board[ii][jj] == 0:\n print('Empty: ', (jj , ii)) # column, row\n return jj, ii # column, row\n return None", "def get_valid_neighbors(self, x, y):\n\t\tx_1, x_2 = max(x-1, 0), min(x+1, self.width-1)\n\t\ty_1, y_2 = max(y-1, 0), min(y+1, self.height-1)\n\t\treturn [(n, m) for n in range(x_1, x_2+1) \n\t\t for m in range(y_1, y_2+1) if x != n or y != m]", "def random_empty_position(current_animal, grid_size, all_animals):\n all_neighbors = list_neighbors(current_animal.row, current_animal.col,\n grid_size)\n\n occupied = set()\n for x in all_animals:\n occupied.add((x.row, x.col))\n\n neighbors = []\n for x in all_neighbors:\n if x not in occupied:\n neighbors.append(x)\n\n if len(neighbors) == 0:\n return None\n\n row, col = my_random_choice(neighbors)\n return (row, col)", "def _check_occupied(self, col, row):\n if self.board[row - 1][col - 1] == EMPTY:\n return False\n else:\n return True", "def get_empty_cells(self):\n empty_cells = []\n for cell_row in self.board:\n for current_cell in cell_row:\n if current_cell is not None:\n if current_cell.get_cell_state() == 0:\n empty_cells.append(current_cell)\n return empty_cells", "def getSimbadPositions(identifier):\n\treturn base.caches.getSesame(\"simbad\").getPositionFor(identifier)", "def get_legal_moves(self, pos: Position, game_board: GameBoard) -> PossibleMoveSet:\n pass", "def unstuck(self):\n mask = Map.current_map.mask\n \n x_max, y_max = mask.get_size()\n orig_x, orig_y = round(self.x), round(self.y)\n x, y = orig_x , orig_y\n unstuck_aggr = COLLISION_UNSTUCK_AGGRESSION\n \n # Vertical check for any open spots we could put the entity on...\n while y > 0:\n if not mask.get_at((x, y)):\n self.y = y\n self.vy = -unstuck_aggr\n return\n y -= unstuck_aggr\n y = orig_y\n while y < y_max:\n if not mask.get_at((x, y)):\n self.y = y\n self.vy = unstuck_aggr\n return\n y += unstuck_aggr\n y = orig_y\n \n # Horizontal spots?\n while x > 0:\n if not mask.get_at((x, y)):\n self.x = x\n self.vx = -unstuck_aggr\n return\n x -= unstuck_aggr\n x = orig_x\n while x < x_max:\n if not mask.get_at((x, y)):\n self.x = x\n self.vx = unstuck_aggr\n return\n x += unstuck_aggr\n x = orig_x\n \n # Diagonal spots\n while x > 0 and y > 0:\n if not mask.get_at((x, y)):\n self.x, self.y = x, y\n self.vx, self.vy = -unstuck_aggr, -unstuck_aggr\n return\n x, y = x - unstuck_aggr, y - unstuck_aggr\n x, y = orig_x, orig_y\n while x < x_max and y < y_max:\n if not mask.get_at((x, y)):\n self.x, self.y = x, y\n self.vx, self.vy = unstuck_aggr, unstuck_aggr\n return\n x, y = x + unstuck_aggr, y + unstuck_aggr\n x, y = orig_x, orig_y\n while x > 0 and y < y_max:\n if not mask.get_at((x, y)):\n self.x, self.y = x, y\n return\n x, y = x - unstuck_aggr, y + unstuck_aggr\n x, y = orig_x, orig_y\n while x < x_max and y > 0:\n if not mask.get_at((x, y)):\n self.x, self.y = x, y\n return\n x, y = x + unstuck_aggr, y - unstuck_aggr\n x, y = orig_x, orig_y\n \n # All right, I officially give up now.\n print(\"Couldn't unstuck object!\")", "def get_empty_slots(self):\n slots = np.reshape(range(0, self.size * self.size), (self.size, self.size))\n\n return slots[~self.tiles_taken]", "def available_moves(board_state):\n for x, y in itertools.product(range(len(board_state)), range(len(board_state[0]))):\n if board_state[x][y] == 0:\n yield (x, y)", "def find_empty(game_board):\n for row in range(len(game_board)):\n for col in range(len(game_board[row])):\n if len(game_board[row][col]) == 2:\n return row, col\n for row in range(len(game_board)):\n for col in range(len(game_board[row])):\n if len(game_board[row][col]) >= 3:\n return row, col\n\n return None", "def get_neighbors(self, row, col):\n neighbors = set()\n for d in [-1,1]:\n if row+d >= 0 and row+d < self._height and \\\n (row+d,col) in self._empty_spaces:\n neighbors.add((row+d,col))\n if col+d >= 0 and col+d < self._width and \\\n (row,col+d) in self._empty_spaces:\n neighbors.add((row,col+d))\n return neighbors", "def filter_occupied(board_state):\n my_list = [\"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\"]\n for i in range(0, 9):\n if board_state[i] != \" \":\n my_list.remove(str(i+1))\n return my_list" ]
[ "0.74757147", "0.73623365", "0.7305674", "0.72659814", "0.7136726", "0.6983209", "0.6902076", "0.69016534", "0.68626094", "0.6858928", "0.67981094", "0.67022055", "0.66423976", "0.65927297", "0.6580229", "0.6564762", "0.65628403", "0.6559673", "0.654521", "0.65293694", "0.65235007", "0.6506315", "0.6506315", "0.6506315", "0.64854264", "0.64846927", "0.64595026", "0.64592546", "0.64553624", "0.64553624", "0.64461726", "0.643557", "0.6427243", "0.6426077", "0.64235425", "0.639974", "0.63861364", "0.63672537", "0.63623637", "0.635806", "0.6355539", "0.6344593", "0.62697953", "0.62643117", "0.62637234", "0.62618685", "0.6258552", "0.62535083", "0.6251065", "0.62420857", "0.6237177", "0.6227483", "0.6219008", "0.6214004", "0.62120855", "0.6206813", "0.6206813", "0.6204388", "0.619606", "0.618115", "0.6177295", "0.61625636", "0.61530787", "0.61476845", "0.6145243", "0.6112083", "0.6110005", "0.6102245", "0.6097772", "0.60929257", "0.6092573", "0.6089145", "0.60826534", "0.6081642", "0.6075018", "0.6071512", "0.6068478", "0.6058106", "0.60529864", "0.6045663", "0.60380167", "0.6028977", "0.6024179", "0.60202754", "0.6019365", "0.60160774", "0.6005594", "0.59925056", "0.59806406", "0.5975549", "0.5971632", "0.5971581", "0.59693307", "0.5952756", "0.5943348", "0.59422433", "0.59263015", "0.5906373", "0.58999056", "0.58969843" ]
0.77521825
0
Use a depthfirstsearch to solve the Lonpos puzzle.
def solve( board, pieces, occupation): from heapq import heappush, heappop unoccupied = findUnoccupied( board, occupation) remainingpieces = range(len(pieces)) searchq = [] nbacktrack = 0 while (unoccupied): nnheap = [] # As a heuristic, we choose to first place pieces on points # with the least number of unoccupied neighbors. for i in unoccupied: p = board.positions[i] nn = countFreeNeighbors( p, board, occupation) heappush( nnheap, (nn, i)) nn, pt = heappop( nnheap) if (nn==0): # No solution, back-track if (searchq): occupation, remainingpieces = searchq.pop() nbacktrack += 1 print "Backtracking for the %d'th time" % nbacktrack unoccupied = findUnoccupied( board, occupation) continue else: break for ipc in remainingpieces: pc = pieces[ipc] for o in placePiece( board, occupation, board.positions[pt], pc): # A search node is defined by the occupation state and # the remaining pieces. searchq.append( (o, [i for i in remainingpieces if i != ipc])) if (searchq): occupation, remainingpieces = searchq.pop() unoccupied = findUnoccupied( board, occupation) else: break else: state = LonposState( board, occupation.items()) state.show() return occupation # No solution for the state. print "No solution!" return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n util.raiseNotDefined()", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n \n util.raiseNotDefined()", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n\n #Creamos las estructuras de datos necesarias (stack y set)\n openNodes = util.Stack()\n closedNodes = set([])\n\n #Guardamos el nodo inicial\n node = Node(problem.getStartState(), '', 0, None)\n\n #Metemos el nodo en la pila\n openNodes.push(node)\n\n #Iteramos para cada nodo de la pila\n while True:\n if openNodes.isEmpty():\n break #ERROR: throw exception\n else :\n #Sacamos el nodo de arriba de la pila\n node = openNodes.pop()\n if problem.isGoalState(node.name):\n break\n else: #Expandimos los nodos sucesores del nodo n si no estan en closed\n for successor in problem.getSuccessors(node.name):\n n, p, c = successor\n succNode = Node(n, p, c, node)\n if nodeIsClosed(succNode, closedNodes) is False:\n #Metemos al sucesor en la pila\n openNodes.push(succNode)\n #Metemos el nodo n en closed\n closedNodes.add(node)\n\n #Devolvemos el camino al Goal\n return findPath(node)", "def iterativeDeepeningSearch(problem):\n from util import Stack\n \n for max_depth in range(0, 10000000):\n # print max_depth\n st = Stack()\n mapper = {}\n mapper[(problem.getStartState(), 0)] = None #map of (childpos, depth): (parentpos, direction, depth)\n st.push((problem.getStartState(), 0)) # stack of ((x,y) , depth)\n\n while not(st.isEmpty()):\n vertex = st.pop() #( (x,y) , depth )\n depth = vertex[1]\n\n if (problem.isGoalState(vertex[0])):\n c = vertex\n l = []\n while mapper[c] != None:\n tup = mapper[c]\n l.append(tup[1])\n c = tup[0], tup[2]\n l.reverse()\n print \"max_depth: \", max_depth\n print l\n return l\n\n else:\n n_depth = depth + 1 # new depth\n if n_depth < max_depth:\n neigh = problem.getSuccessors(vertex[0])\n # neigh.reverse()\n for child in neigh:\n if (child[0], n_depth) not in mapper:\n st.push((child[0], n_depth))\n mapper[(child[0], n_depth)] = (vertex[0], child[1], depth)", "def depthFirstSearch(problem):\r\n\t\"*** YOUR CODE HERE ***\"\r\n\r\n\tfrontera = util.Stack()\r\n\testadoInicial= problem.getStartState()\r\n\tfrontera.push((estadoInicial, [],0))\r\n\tvisitados=[]\r\n\tvisitados.append(estadoInicial)\r\n\r\n\twhile not(frontera.isEmpty()):\r\n\t\t(estado, camino, costo) =frontera.pop()\r\n\t\tif(problem.isGoalState(estado)):\r\n\t\t\tbreak\r\n\r\n\t\tsucesores=problem.getSuccessors(estado)\r\n\t\tfor sucesor in sucesores:\r\n\t\t\tif sucesor[0] not in visitados:\r\n\t\t\t\tfrontera.push((sucesor[0], camino + [sucesor[1]], costo + sucesor[2]))\r\n\t\t\t\tvisitados.append(sucesor[0])\r\n\tprint ('Cantidad de nodos en memoria: {}').format(len(frontera.list))\r\n\treturn camino", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n from util import Stack\n \n st = Stack()\n mapper = {}\n mapper[problem.getStartState()] = None\n\n st.push(problem.getStartState())\n while not(st.isEmpty()):\n vertex = st.pop()\n \n if (problem.isGoalState(vertex)):\n c = vertex\n l = []\n while mapper[c] != None:\n tup = mapper[c]\n l.append(tup[1])\n c = tup[0]\n l.reverse()\n print l\n return l\n\n else:\n neigh = problem.getSuccessors(vertex)\n # neigh.reverse()\n # neigh.sort()\n for child in neigh:\n if child[0] not in mapper:\n st.push(child[0])\n mapper[child[0]] = (vertex, child[1])\n # print mapper\n \n # visited = []\n # p = dfsRecursive(problem, problem.getStartState(), st, visited, [])\n # return p\n \n # pathfind = {}\n # st.push(problem.getStartState())\n # iterative approach:\n # while (not st.isEmpty()):\n # point = st.pop() # (x,y)\n # if problem.isGoalState(point):\n # # print point\n # print pathfind\n # # print visited\n # elif (not (point in visited)):\n # visited.append(point)\n # # print pathfind, '\\n'\n # print visited, '\\n'\n # for child in problem.getSuccessors(point):\n # st.push(child[0])\n # pathfind[child[0]] = point #this preemptively adds!\n # util.raiseNotDefined()", "def depthFirstSearch(problem):\r\n \"*** YOUR CODE HERE ***\"\r\n node = problem.getStartState()\r\n if (problem.isGoalState(node)):\r\n return [] # no need to make any moves of the start state is goal\r\n start = (node, 'NoDirection',0)\r\n\r\n frontier_queue = Stack() # queue for frontier\r\n frontier_queue.push(start) # frontier consists of only the start state\r\n\r\n explored_nodes = set()\r\n explored_track = {start:None} # keep a track of parent, parent of root node is None\r\n\r\n while not frontier_queue.isEmpty():\r\n state = frontier_queue.pop() # pop the top element from the queue \r\n explored_nodes.add(state)\r\n\r\n if problem.isGoalState(state[0]):\r\n return get_track(explored_track, state)\r\n\r\n neighbors_state = problem.getSuccessors(state[0])\r\n for neighbor in neighbors_state: # neighbor will be something like this ((34, 15), 'South', 1)\r\n if neighbor not in frontier_queue.list and neighbor not in explored_nodes:\r\n frontier_queue.push(neighbor)\r\n explored_track[neighbor] = state\r\n\r\n\r\n def get_track(explored_track, state):\r\n from game import Directions\r\n track_history = [state]\r\n track_history_direction = []\r\n leaf = state\r\n while (explored_track[leaf]) != start:\r\n track_history.append(explored_track[leaf])\r\n leaf = explored_track[leaf]\r\n\r\n for j in range (len(track_history),-1,-1):\r\n this_step = track_history[j-1]\r\n this_step = this_step[1]\r\n track_history_direction.append(this_step)\r\n return track_history_direction[:-1]", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE IF YOU WANT TO PRACTICE ***\"\n # Initialize a stack\n open = util.Stack()\n\n # Retrieve the init state\n initState = (problem.getStartState(), ['Stop'], 0)\n open.push(initState)\n closed = []\n\n while not open.isEmpty():\n currState = open.pop()\n currPos = currState[0]\n currPath = currState[1]\n currCost = currState[2]\n\n if problem.isGoalState(currPos):\n return currPath[1:]\n else:\n closed.append(currPos)\n if currState not in closed:\n successors = problem.getSuccessors(currPos)\n if len(successors) > 0:\n for each in successors:\n if each[0] not in closed:\n temp = (each[0], currPath+[each[1]], currCost+each[2])\n open.push(temp)\n return False", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n\n \"\"\"\n ALGORITH FOR DFS\n \n function graph-search(problem, fringe) retuen a sloution or failure\n \n closed <-- an empty set\n fringe <-- insert (make-node (initial-state [problem]), fringe)\n \n loop do :\n if fringe is empty then return failure\n node <-- Remove-front (fringe)\n if goal-test (problem, state[node]) then return node\n if state[node] is not in closed then \n add STATE[node] to closed\n for child-node in EXPAND(STATE[node],problem) do\n fringe <-- Insert (child-node, fringe)\n end\n end\n \"\"\"\n\n templist=[]\n explored = set()\n fringe = util.Stack()\n #print \"the stat node is : \", problem.getStartState()\n\n fringe.push((problem.getStartState(),templist))\n while (not fringe.isEmpty()):\n (currentNode,currDir) = fringe.pop()\n # print \"Pacman is currently at : \", currentNode\n if problem.isGoalState(currentNode):\n # print \" Goal State Found : \", currentNode\n pathToGoal = currDir\n break\n if not (currentNode in explored):\n # print \"Adding current node to explored\"\n explored.add(currentNode)\n for childNode in problem.getSuccessors(currentNode):\n # print \"child node : \", childNode , \" is added \"\n fringe.push((childNode[0],currDir+[childNode[1]]))\n\n return pathToGoal", "def iterativeDeepeningSearch(problem):\n \"*** YOUR CODE HERE FOR TASK 1 ***\"\n\n # Retrieve the init state\n # state model ( (position, depth), path, cost)\n initState = ( (problem.getStartState(), 1) , ['Stop'], 0)\n limit = 1\n while True:\n # Initialization each iteration\n open = util.Stack()\n open.push(initState)\n closed = {}\n\n while not open.isEmpty():\n currState = open.pop()\n currPos = currState[0][0]\n currDepth = currState[0][1]\n currPath = currState[1]\n currCost = currState[2]\n\n closed[currPos] = currCost\n if currDepth <= limit:\n successors = problem.getSuccessors(currPos)\n if len(successors) > 0:\n nextDepth = currDepth + 1\n for each in successors:\n nextCost = currCost + each[2]\n nextPath = currPath + [each[1]]\n if each[0] not in closed.keys() or nextCost < closed[each[0]]:\n temp = ( (each[0], nextDepth), nextPath, nextCost)\n open.push(temp)\n if problem.isGoalState(temp[0][0]):\n return nextPath[1:]\n limit += 1", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n \n from game import Directions\n visited = set() # unique elements\n state = problem.getStartState()\n #returns starting agent's position\n waiting_list = util.Stack()\n # LIFO\n # last in first out\n # parents = collections.defaultdict(collections.UserDict)\n parents = {}\n #dictionary\n sequence = []\n #LIFO\n for action in problem.getSuccessors(state):\n # in order to push full-state values\n waiting_list.push(action)\n # enumarating tuple\n\n while not waiting_list.isEmpty():\n state = waiting_list.pop()\n \n visited.add(state[0])\n # node is visited and we wont visit those nodes\n \n for substate in problem.getSuccessors(state[0]):\n # take a look to successors of current node\n \n if substate[0] not in visited:\n # if not in visited \n # saving parents\n parents[substate[0]]={'parent':state} \n # generate new node\n waiting_list.push(substate)\n # push to stack\n if problem.isGoalState(substate[0]): \n target_state = substate \n #finding wayback\n\n\n while target_state[0] in parents.keys():\n temp=parents[target_state[0]]['parent']\n sequence.append(target_state[1])\n target_state = temp\n sequence.append(target_state[1])\n return sequence[::-1]", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n fringe = util.Stack()\n return GraphSearch(problem, 'dfs').search(fringe)", "def solve(problem):\n\n # *** YOUR CODE HERE ***\n\n # The core of Iterative Deepening Search are iterations of Depth Limited\n # Search with given increasing depth.\n\n # A recursive version of Depth Limited Search\n def depth_limited_search(problem, limit):\n \"\"\"\n Return a list of nodes we traversed (or None).\n :param problem: the starting set up.\n :param limit: a given numeric depth limit.\n :return: a list of nodes.\n \"\"\"\n\n # in this case, we simply use a list to keep track of nodes we\n # traversed, instead of the data structure, Stack.\n path = list()\n visited = set() # as before, to prevent duplicated nodes\n root = problem.get_initial_state()\n\n def rec_dls(state, action, depth):\n\n visited.add(state)\n\n # if it is a goal\n if problem.goal_test(state):\n path.append((state, action))\n return path\n\n # or if it reaches a certain depth, but not a goal\n elif depth == 0:\n visited.remove(state)\n return None\n\n else:\n path.append([state, action])\n for successor, action, cost in problem.get_successors(state):\n if successor not in visited:\n # recursively expands the deepest node\n res = rec_dls(successor, action, depth-1)\n if res is not None:\n return res\n path.pop()\n visited.remove(state)\n\n # \"Stared From the Bottom\" (root)\n result = rec_dls(root, 'None', limit)\n # return the path if the we DID have achieved something\n if result is not None:\n return path\n\n import sys\n for depth in range(sys.maxsize): # depth from 0 to infinity\n print(\"Lower-bound of the optimal cost is {}\".format(depth))\n res2 = depth_limited_search(problem, depth)\n if res2 is not None:\n action_list = list()\n for move in res2:\n action_list.append(move[1]) # recall index 0 is the parent\n # do not forget a None returned in iteration 0 (with depth 0)\n action_list.remove('None')\n return action_list", "def sudokuDepthFirstSearch(problem):\n\n def convertStateToHash(values):\n \"\"\" \n values as a dictionary is not hashable and hence cannot be used directly in the explored/visited set.\n This function changes values dict into a unique hashable string which can be used in the explored set.\n You may or may not use this\n \"\"\"\n l = list(sorted(values.items()))\n modl = [a+b for (a, b) in l]\n return ''.join(modl)\n\n ## YOUR CODE HERE\n root_node = Node(problem.getStartState(), [], 0, None, 0)\n frontier = util.Stack()\n frontier.push(root_node)\n explored = []\n\n while not(frontier.isEmpty()):\n node_to_explore = frontier.pop()\n\n if problem.isGoalState(node_to_explore.state):\n return node_to_explore.state\n else:\n copy_state = node_to_explore.state.copy()\n \n if convertStateToHash(copy_state) not in explored:\n\t explored.append(convertStateToHash(copy_state))\n\t successors_state = problem.getSuccessors(copy_state)\n\t if len(successors_state) > 0:\n\t\t for state_action_cost in successors_state:\n\t\t if convertStateToHash(state_action_cost[0]) in explored:\n\t\t continue\n\t\t else:\n\t\t frontier.push(Node(state_action_cost[0], state_action_cost[1], node_to_explore.path_cost + 1, node_to_explore, node_to_explore.depth + 1))\n\n return False\n # util.raiseNotDefined()", "def solve(self):\n self.left -= len(self.nodes)\n \n def depths(x,depth = 0):\n depth+=1\n for y in self.graph[x]:\n if y in self.nodes:\n self.nodes.remove(y)\n depth = depths(y,depth)\n return depth\n \n while len(self.nodes):\n x = self.nodes.pop()\n self.firstGen.append(depths(x))\n #print self.graph\n #print self.nodes\n #print self.firstGen", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n from util import Stack\n Pilha_Caminho = Stack()\n Pilha_Estados = Stack()\n Caminho = []\n Visitados = []\n\n Pilha_Caminho.push(Caminho) # empilha caminho (vazio, no começo)\n Pilha_Estados.push(problem.getStartState()) # empilha estado inicial\n\n while (Pilha_Caminho.isEmpty() == False and Pilha_Estados.isEmpty() == False):\n Caminho_Andado = Pilha_Caminho.pop() # atualiza caminho\n Estado_Atual = Pilha_Estados.pop() # atualiza estado\n if problem.isGoalState(Estado_Atual): # caso estado atual seja o desejado,\n return Caminho_Andado # retorna o caminho total\n if Estado_Atual not in Visitados: # caso estado atual não tenha sido visitado\n Visitados.append(Estado_Atual) # marca estado como visitado\n for Sucessor in problem.getSuccessors(Estado_Atual): # busca sucessores\n if Sucessor[0] not in Visitados: # caso sucessor não tenha sido visitado\n Pilha_Caminho.push(Caminho_Andado + [Sucessor[1]]) # atualiza caminho total na pilha\n Pilha_Estados.push(Sucessor[0]) # atualiza estado\n return", "def depthFirstSearch(problem):\n\n\n no = problem.getStartState()\n if (problem.isGoalState(no)):\n return []\n \n pilha = util.Stack()\n pilha.push((no, []))\n \n explorados = []\n \n while not pilha.isEmpty():\n (no, caminho) = pilha.pop()\n \n if problem.isGoalState(no):\n return caminho\n \n explorados.append(no)\n for filho in problem.getSuccessors(no):\n if (filho[0] not in explorados):\n pilha.push((filho[0], caminho + [filho[1]]))\n\n return []", "def sudokuDepthFirstSearch(problem):\n\n def convertStateToHash(values):\n \"\"\" \n values as a dictionary is not hashable and hence cannot be used directly in the explored/visited set.\n This function changes values dict into a unique hashable string which can be used in the explored set.\n You may or may not use this\n \"\"\"\n l = list(sorted(values.items()))\n modl = [a+b for (a, b) in l]\n return ''.join(modl)\n\n # YOUR CODE HERE\n frontier = util.Stack()\n explored = set()\n initialState = problem.getStartState()\n frontier.push(initialState)\n while not frontier.isEmpty():\n choice = frontier.pop()\n if convertStateToHash(choice) not in explored:\n if problem.isGoalState(choice):\n return choice\n successors = problem.getSuccessors(choice)\n for successor in successors:\n frontier.push(successor[0])\n explored.add(convertStateToHash(choice))\n # util.raiseNotDefined()", "def depthFirstSearch(problem):\n container = util.Stack() \n return depthOrBreadthFirstSearch(problem, container)", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n\n start = problem.getStartState()\n stack = util.Stack() # stack to keep track of frontier nodes where pacman has move\n stack.push(start)\n explored = set() # to keep track of explored areas\n route = []\n\n while not stack.isEmpty():\n current_position = stack.pop()\n explored.add(current_position)\n\n if problem.isGoalState(current_position):\n break\n for each in problem.getSuccessors(current_position):\n if each[0] not in explored: # x,y coordinates of positions we haven't visited are pushed onto stack\n # print(each)\n stack.push(each[0])\n route.append((current_position, each[0], each[1])) # record of movements to rebuild path (from,to,how)\n\n x = len(route)\n while x - 1 != 0: # loop clears out actions that dont come from previous position\n if route[x - 1][0] != route[x - 2][1]: # starts from goal and works backwards\n route.remove(route[x - 2])\n x = len(route)\n else:\n x -= 1\n # print(route)\n return [action[2] for action in route]", "def solveL(self,level=-1) :\n for g in self.L() :\n if level >= 0 :\n printIndent('%s' % (g,),level=level)\n print \n if g.solveR(level=(level+1) if level>=0 else level) is None : \n return g\n return None", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n visited_nodes = []\n start_node = problem.getStartState()\n visited_nodes.append(start_node)\n curr_node = start_node\n q = util.Queue()\n directions = util.Queue()\n q.push(curr_node)\n goal_found = problem.isGoalState(curr_node)\n\n while not goal_found:\n nxt_node_list = problem.getSuccessors(curr_node)\n nxt_node_found = False\n\n # Check if a child can be found which has not been visited\n for node in nxt_node_list:\n nxt_node = node[0]\n move = node[1]\n if nxt_node not in visited_nodes:\n nxt_node_found = True # mark that a child node has been found\n q.push(nxt_node) # add the node in the tree\n directions.push(move) # add the direction\n visited_nodes.append(nxt_node) # mark the node as visited\n break\n\n # If child not found, go to parent\n if not nxt_node_found:\n q.list.pop(0)\n directions.list.pop(0)\n\n if q.isEmpty(): break\n\n curr_node = q.list[0]\n goal_found = problem.isGoalState(curr_node)\n\n final_moves = []\n while not directions.isEmpty():\n final_moves.append(directions.pop())\n \n return final_moves\n #util.raiseNotDefined()", "def depth_first_solve(puzzle):\n # instantiating stack using new PuzzleNode (root)\n root = PuzzleNode(puzzle)\n stack = deque([root])\n tracker = deque([])\n\n # while puzzle still has moves to make (or is not solved yet)\n while len(stack) > 0:\n\n current = stack.pop() # update current node\n\n # if child node is solved, return it\n if current.puzzle.is_solved():\n return newNode\n\n if current not in tracker:\n\n tracker.append(current)\n extensions = current.puzzle.extensions() # gather moves to make\n\n # loop through extensions (depth)\n for i in range(len(extensions)):\n newNode = PuzzleNode(extensions[i], [], current)\n current.children.append(newNode) # add as child of current node\n if newNode not in tracker:\n stack.append(newNode) # add to stack\n\n\n\n return None # no solution was found", "def search(start):\n\n '''\n Create a class named nodeClass which contains 4 elements: \n state: The puzzle object containing the puzzle board at the node \n misplaced: num of misplaced tiles\n depth: depth of the node in the tree \n prev: parent node\n '''\n nodeClass = namedtuple('nodeClass', 'state, misplaced, depth, prev')\n\n #instantiate object from class creating the root node\n node = nodeClass(start, 0, 0, None)\n\n #stores the nodes that are going to be explored. \n #the node with lower f-score is explored first\n frontier = q.PriorityQueue()\n frontier.put((0,node))\n\n # frontier_set keep track of the nodes in the frontier queue\n frontier_set = {node}\n #contains the board states already explored\n explored_states = set()\n for ite in range(1,max_iterations+2):#while True:\n #Retrieve the node in the frontier with lowest value\n node = frontier.get()[1]\n\n #get the puzzle board obj from the node object\n state = node.state\n\n #Check if the game has ben solved\n if state.solved or ite==max_iterations:\n Result = namedtuple('Result', 'board, depth, nodesExpanded, max_depth, isSolved')\n return Result(state, node.depth, ite, max(no.depth for no in frontier_set), state.solved)\n\n # expanded nodes are added to explored set\n explored_states.add(state)\n\n #EXPANDING\n for mov in state.possible_moves:\n new_state=state.move(mov)\n new_node = nodeClass(new_state, new_state.score,\n node.depth + 1, node)\n\n #compute f-score of the node\n f_score=new_state.score + new_node.depth\n\n if new_state not in explored_states and new_node not in frontier_set:\n frontier.put((f_score,new_node))\n frontier_set.add(new_node)", "def depthFirstSearch(problem):\n #print \"Start:\", problem.getStartState()\n #print \"Is the start a goal?\", problem.isGoalState(problem.getStartState())\n #print \"Start's successors:\", problem.getSuccessors(problem.getStartState())\n \n from game import Directions\n s = Directions.SOUTH\n w = Directions.WEST\n n = Directions.NORTH\n e = Directions.EAST\n\n #created a frontier Stack for DFS\n #Here the stack acts as a LIFO stack\n neighbourNodes = util.Stack()\n #created a list of moves which will be returned in then end\n moves = []\n #pushed the start node and empty moves list, onto the frontier stack\n neighbourNodes.push((problem.getStartState(),moves))\n #this is a set of nodes which have been seen, to avoid adding nodes already visited \n seenNodes = set()\n #condition evaluated based on the existence of elements in the frontier stack\n while not neighbourNodes.isEmpty():\n #last node in the stack is popped and its state and action is stored\n poppedNodeState, poppedNodeAction = neighbourNodes.pop()\n #condition to check if the node is already been visited\n if(poppedNodeState in seenNodes):\n #if yes then it just skips the iteration using the continue statement\n continue\n #condition to check if the current node is the goal node\n if problem.isGoalState(poppedNodeState):\n #if yes then return the action or moves to be performed list\n return poppedNodeAction\n #if not visited before then node is added to the seenNodes set\n seenNodes.add(poppedNodeState)\n #loop to parse the successor nodes and check and add them to the frontier stack\n for state, action, cost in problem.getSuccessors(poppedNodeState):\n #checking if the successor node has already been visited before\n if(state in seenNodes):\n #if yes then it skips that node\n continue\n #else it adds that successor along with it action appeneded with the already existing actions\n neighbourNodes.push((state, poppedNodeAction+[action]))\n #the list of moves if finally returned\n return moves\n #util.raiseNotDefined()", "def depth_first_search(problem):\r\n \"*** YOUR CODE HERE ***\"\r\n fringe = []\r\n path = set()\r\n final = []\r\n acts = dict()\r\n state = problem.get_start_state()\r\n fringe.append(state)\r\n\r\n while(len(fringe) > 0):\r\n state = fringe.pop()\r\n path.add(state)\r\n states = problem.get_successors(state)\r\n acts[state] = states[:]\r\n if problem.is_goal_state(state):\r\n break\r\n\r\n #states = problem.get_successors(state)\r\n for stat in states:\r\n if stat[0] not in path and stat[0] not in fringe:\r\n fringe.append(stat[0])\r\n\r\n while(True):\r\n if state == problem.get_start_state():\r\n break\r\n for key, val in acts.items():\r\n for va in val: #( x, y, z)\r\n if va[0] == state:\r\n final.append(va[1])\r\n state = key\r\n break\r\n else:\r\n continue\r\n break\r\n\r\n final.reverse()\r\n\r\n return final", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n # util.raiseNotDefined()\n\n # print(\"Start:\", problem.getStartState())\n # print(\"Is the start a goal?\", problem.isGoalState(problem.getStartState()))\n # print(\"Start's successors:\", problem.getSuccessors(problem.getStartState()))\n\n # Initialize a frontier, and push the initial state into the frontier\n frontier = util.Stack()\n frontier.push([(problem.getStartState(), 'move', 0)])\n # Initialize a explored set to store the visited nodes\n exploredSet = set()\n\n # Check the content of frontier\n while not frontier.isEmpty():\n stateList = list()\n stateList = frontier.pop()\n # print (stateList)\n # What we focus on is the next state, not the (previous state + next state), so we should take the last element\n nextState = stateList[len(stateList) - 1]\n # Check the current state is goal or not\n if problem.isGoalState(nextState[0]):\n # Initial a path, which is the way to the goal state\n path = list()\n for eachMove in stateList:\n path.append(eachMove[1])\n # If the initial state is the goal state, there's no need to explore other nodes, so that's called special condition\n if len(path) == 1:\n return path[0]\n # This is the normal condition, we should convey the path except the first one, because we haven't define what's \"move\"\n else:\n return path[1:]\n # If this is a state which we don't visit, add it to the explored set(this is called GSA)\n if not nextState[0] in exploredSet:\n exploredSet.add(nextState[0])\n # Give me your child nodes\n for childState in problem.getSuccessors(nextState[0]):\n nextStateList = stateList[:]\n # we focus on the path, so we have to record the every move from the initial state to the current one\n nextStateList.append(childState)\n frontier.push(nextStateList)\n\n # Or maybe there's no way to the goal state\n else:\n return \"There's no way.\"", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n \n #Stack to hold the node that have been visited along with the path taken from the start node to reach that node.\n stack = Stack()\n #Set to hold the node explored.\n explorednode = set()\n #Get the start node.\n startnode = problem.getStartState()\n #Push the starting node on the Stack along with an empty set to know the direction in order to reach the node.\n stack.push((startnode,[]))\n #Loop till the stack is empty\n while stack.isEmpty() is not True:\n #Pop the currentnode and the direction from the stack\n currentnode, direction = stack.pop()\n #We will now add the node to set of explored node.\n explorednode.add(currentnode)\n #If the node is the goal. We made it!!\n if problem.isGoalState(currentnode):\n #print currentnode, direction\n #The direction holds the way to reach till the goal from the start node.\n #print direction\n return direction\n #Loop for each successor(child) of the current node.\n for (successor, action, stepCost) in problem.getSuccessors(currentnode):\n #If the successor(child) is not explored\n if successor not in explorednode:\n #Add the successor to the stack along with the path to reach it.\n stack.push((successor, direction + [action]))", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n stack = util.Stack() # stack for searshing the graph\n visited = [] # Keep track of visited nodes\n start =problem.getStartState() # The start node\n stack.push((start, [])) # the sart state and empty path list is pushed to the stack\n \n while stack:\n (vrtx, path) = stack.pop() # Pop tfrom the stack , vrtx: the poped node for expantion.\n if vrtx not in visited: # if the node is visited alraedy \n if problem.isGoalState(vrtx):\n return [p[1] for p in path]\n visited.append(vrtx)\n for successor in problem.getSuccessors(vrtx):\n stack.push((successor[0], path+[successor]))\n util.raiseNotDefined()", "def depth_first_search(problem):\n fringe = util.Stack()\n return general_search(problem, fringe)", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n '''\n print \"Start:\", problem.getStartState()\n print \"Is the start a goal?\", problem.isGoalState((2,2))\n print \"Start's successors:\", problem.getSuccessors((1,1))\n suc=problem.getSuccessors(problem.getStartState())\n actionList=[]\n stateList=[]\n import random\n randomNum=random.randrange(0,len(suc),1)\n \n \n print len(suc)\n #for i in range(1000):\n while not problem.isGoalState(suc[randomNum][0]):\n\tprint randomNum\n\trandomNum=random.randrange(0,len(suc),1)\n\trandomAction=suc[randomNum][1]\n\t\n \t#print randomNum\n\tif suc[randomNum][0] not in stateList:\n\t\tstateList.append(suc[randomNum][0])\n\t\tactionList.append(randomAction)\n \t\tsuc=problem.getSuccessors(suc[randomNum][0]) \n \n #actionList.append(suc[randomNum][0])\n #if kiki==0:\n print actionList\n \n return actionList\n\n\n #util.raiseNotDefined()\n '''\n return DFS(problem,problem.getStartState(),[])", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n frontier = util.Stack()\n start_node = problem.getStartState()\n\n if problem.isGoalState(start_node):\n return ['Stop']\n frontier.push((start_node,[]))\n explored = set()\n while True:\n if frontier.isEmpty():\n return []\n node = frontier.pop()\n explored.add(node[0])\n for successor in problem.getSuccessors(node[0]):\n nextState, action, cost = successor\n if nextState in explored or nextState in [f[0] for f in frontier.list]:\n continue\n actions = node[1][:]\n actions.append(action)\n new_node = (nextState, actions)\n if problem.isGoalState(new_node[0]):\n return new_node[1]\n frontier.push(new_node)\n #print frontier.list\n return []", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n class Node:\n def __init__(self, state, parent, action, pathCost):\n self.state = state #state of the game\n self.parent = parent #parent of the node\n self.action = action #action that led to that node\n self.pathCost = pathCost #total cost of tha path until that node\n\n def solution(self): #return the path to the goal node\n path = [] #path is a list of actions\n tempNode = self #temp node is the goal node\n while tempNode.state != problem.getStartState(): #until we get to the initial node\n path.insert(0, tempNode.action) #insert at the start of the list\n tempNode = tempNode.parent #go to the parent of the node\n return path #return list of actions\n\n def childNode(successor, parent, action, stepCost):\n pathCost = parent.pathCost + stepCost #total cost is the total cost of the parent + the cost of the last action\n child = Node(successor, parent, action, pathCost) #create new child node\n return child\n\n initialNode = Node(problem.getStartState(), None, None, 0) #create initial node with start state and no parent\n if problem.isGoalState(initialNode.state):\n return initialNode.solution()\n\n frontier = util.Stack() #dfs uses a stack\n frontier.push(initialNode) #insert initial node to the stack\n explored = set() #explored nodes are added to a set\n\n while not frontier.isEmpty(): #while stack is not empty\n nextNode = frontier.pop() #extract the last node entered\n explored.add(nextNode.state) #add the state of the node to the explored set\n for successor, action, stepCost in problem.getSuccessors(nextNode.state): #for every successor create a new child\n child = childNode(successor, nextNode, action, stepCost)\n if child.state not in explored and child not in frontier.list: #if child is not already explored or is not in the stack\n if problem.isGoalState(child.state): # if node is goal node we return the path of actions\n return child.solution()\n frontier.push(child) #insert it into the stack\n\n return [] #if stack is empty\n util.raiseNotDefined()", "def depthFirstSearch(problem):\n \n from game import Directions\n North = Directions.NORTH\n South = Directions.SOUTH\n East = Directions.EAST\n West = Directions.WEST \n \n pathDict = {}\n visited = set()\n #visited start\n visited.add(problem.getStartState())\n #initial successors\n successor = problem.getSuccessors(problem.getStartState())\n for initSucc in successor:\n pathDict[initSucc[0]] = [initSucc[1]]\n #loop\n while (1):\n #if fringe = null, return failure\n if (len(successor) == 0):\n print \"Fringe is empty\"\n return util.raiseNotDefined()\n #(v, path) = fringe.pop\n succLocation = successor[0][0]\n succDirection = successor[0][1]\n del successor[0]\n #if isGoal = true, return path\n if problem.isGoalState(succLocation):\n return pathDict[succLocation]\n #if visited = false\n if succLocation not in visited:\n #visited = true\n visited.add(succLocation)\n #L = expand(v,path)\n tempSuccList = problem.getSuccessors(succLocation)\n #Fringe <- L\n for succ in reversed(tempSuccList):\n successor.insert(0,succ)\n pathDict[succ[0]] = []\n pathDict[succ[0]].extend(pathDict[succLocation])\n pathDict[succ[0]].append(succ[1])", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n\n # Frontier stored in a Stack\n frontier = util.Stack()\n\n # Visited states stored in a list\n visitedStates = []\n\n # Format of each element: (current coordinates, [path taken to get there]) \n frontier.push((problem.getStartState(), []))\n\n # while there are still states to explore\n while not frontier.isEmpty():\n \n # store the current state and path in separate variables\n currentState, pathTaken = frontier.pop()\n\n # for skipping states that have already been visited\n if currentState in visitedStates:\n continue\n\n # for returning the correct path to the goal state upon discovering it\n if problem.isGoalState(currentState):\n return pathTaken\n\n # count the current state as \"visited\"\n visitedStates.append(currentState)\n\n # for each successor state, check whether they have already been visited. if not, add their coordinates to the frontier, and append their respective direction to the path list\n for coordinates, direction, cost in problem.getSuccessors(currentState):\n\n if coordinates not in visitedStates:\n \n frontier.push((coordinates, pathTaken + [direction]))\n\n\n util.raiseNotDefined()", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n from util import Stack\n\n frontier = Stack()\n explored = []\n actions = []\n\n class node:\n def __init__(self, path, dad, action):\n self.path = path\n self.dad = dad\n self.action = action\n\n start = node(problem.getStartState(),'','')\n frontier.push(start)\n\n while frontier.isEmpty() == False:\n path = frontier.pop()\n successors = problem.getSuccessors(path.path)\n explored.append(path)\n for vertex in successors:\n achou = False\n for path_ex in explored:\n if vertex[0] == path_ex.path:\n achou = True\n if achou == False:\n successor = node(vertex[0],path.path,vertex[1])\n frontier.push(successor)\n if problem.isGoalState(successor.path):\n while len(explored) > 0:\n ant = explored.pop()\n if ant.path == successor.dad:\n actions.append(successor.action)\n successor = ant\n actions.reverse()\n return actions", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n # initialize frontier using initial state of problem\n current_state = problem.getStartState()\n frontier = util.Stack()\n frontier.push(current_state)\n\n # initialize explored set to be empty\n explored_set = []\n\n # a dictionary to save how to get to certain states from initial state\n actions_list = {current_state:[]}\n\n # loop while we still have unexplored nodes\n while not frontier.isEmpty():\n\n # choose a leaf node and remove it from frontier\n leaf_node = frontier.pop()\n\n # return the solution if it is the goal state\n if problem.isGoalState(leaf_node):\n return actions_list[leaf_node]\n\n # add the node to explored set\n explored_set.append(leaf_node)\n\n # expand the chosen node\n # and add to the frontier if not in frontier and explored set\n for successor in problem.getSuccessors(leaf_node):\n child, action, _ = successor\n if child not in explored_set and child not in frontier.list:\n frontier.push(child)\n actions_list[child] = actions_list[leaf_node] + [action]\n else:\n # search through all but still can't find a solution -> failed!\n return 'failure'", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n from game import Directions\n s = Directions.SOUTH\n w = Directions.WEST\n n = Directions.NORTH\n e = Directions.EAST\n\n result = []\n st = util.Stack()\n visited = set([])\n\n current = problem.getStartState()\n st.push((current, \"\", 0))\n\n while not st.isEmpty():\n while st.top()[0] in visited:\n st.pop()\n result.pop()\n\n current = st.top()\n visited.add(current[0])\n\n if current[1] != \"\":\n result.append(current[1])\n\n if problem.isGoalState(current[0]):\n break\n\n for each in problem.getSuccessors(current[0]):\n if each[0] not in visited:\n st.push(each)\n\n path = []\n for each in result:\n if each == \"South\":\n path.append(s)\n elif each == \"West\":\n path.append(w)\n elif each == \"North\":\n path.append(n)\n else:\n path.append(e)\n\n return path\n util.raiseNotDefined()", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n #util.Stack() = LIFO for DFS\n #travel down path until end of line unlike BFS, backtrack until there is another path\n\n visited = []\n\n frontier = util.Stack()\n frontier.push( (problem.getStartState(), []) ) \n\n while not frontier.isEmpty():\n node,actions = frontier.pop()\n\n if problem.isGoalState(node):\n return actions\n\n visited.append(node)\n\n for coord,direction,cost in problem.getSuccessors(node):\n if not coord in visited:\n frontier.push((coord, actions + [direction]))\n\n return []", "def depthFirstSearch(problem):\n marcado = set()\n pilha = util.Stack()\n pilha.push((problem.getStartState(), []))\n while not pilha.isEmpty():\n posicao, movimento = pilha.pop()\n if problem.isGoalState(posicao):\n return movimento\n if posicao in marcado:\n continue\n marcado.add(posicao)\n candidatos = problem.getSuccessors(posicao)\n for candidato, acao, custo in candidatos:\n pilha.push((candidato, movimento + [acao]))\n return []", "def solve(self, mode):\n if mode == 'depth-first':\n print(\"Solving maze with depth-first algorithm\")\n frontier = StackFrontier()\n elif mode == 'breadth-first':\n print(\"Solving maze with breadth-first algorithm\")\n frontier = QueueFrontier()\n else:\n raise Exception(f\"{mode} mode doesn't exist (yet)!\")\n\n time_start = time.time()\n start = Node(self.start, None, None)\n frontier.add(start)\n\n while True:\n if frontier.is_empty():\n raise Exception(\"There is no solution for this maze.\")\n\n node = frontier.remove()\n if node.state == self.target:\n time_end = time.time()\n time_duration = time_end - time_start\n print(f'Solution found in {time_duration}s, explored {len(self.explored_nodes)} nodes'\n '\\n' 'Retracing path')\n cells = []\n actions = []\n\n while node.parent is not None:\n cells.append(node.state)\n actions.append(node.action)\n node = node.parent\n\n cells.reverse()\n actions.reverse()\n self.solution = (cells, actions)\n return\n\n self.explored_nodes.add(node.state)\n for action, state in self.get_neighbors(node.state):\n if not frontier.contains(state) and state not in self.explored_nodes:\n child = Node(state, node, action)\n frontier.add(child)", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n\n startState=problem.getStartState()\n currentLocation = startState \n\n #for GSA implementation\n exploredStates = []\n exploredStates.append(startState)\n \n #To transform the graph to stack for better access in DFS\n frontierStack = util.Stack()\n for frontier in problem.getSuccessors(startState):\n frontierRoute = frontier + (frontier[1],)\n frontierStack.push(frontierRoute)\n\n currentRoute = []\n\n #start DFS\n while not(frontierStack.isEmpty()):\n currentStage = frontierStack.pop()\n currentState = currentStage[0]\n currentRoute = currentStage[3]\n\n if problem.isGoalState(currentState): \n break\n if currentState not in exploredStates:\n for frontier in problem.getSuccessors(currentState):\n if frontier[0] not in exploredStates:\n nextRoute = currentRoute + \",\" + frontier[1]\n frontierRoute = frontier + (nextRoute,)\n frontierStack.push(frontierRoute)\n exploredStates.append(currentState)\n \n return currentRoute.split(\",\")\n\n util.raiseNotDefined()", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n fringeList = util.Stack()\n print \"fringeList\",fringeList\n closedList = {str(problem.getStartState()): ([])} #Hash Map to maintain state to path\n print \"closed list:\", closedList\n isGoalStateArrived = False\n\n # Push start state into fringeList\n fringeList.push((problem.getStartState()))\n\n while not isGoalStateArrived and not fringeList.isEmpty():\n currentNode = fringeList.pop()\n print \"currentNode\",currentNode\n currentNodePath = closedList[str(currentNode)]\n print \"currentNodepath:\",currentNodePath\n # Explore children\n childrenOfCurrentNode = problem.getSuccessors(currentNode)\n print \"childrenOfCurrentNode:\",childrenOfCurrentNode\n for childNode in childrenOfCurrentNode:\n if str(childNode[0]) not in closedList:\n path = copy.copy(currentNodePath)\n path.append(childNode[1])\n print \"child [0] %s, child [1] %s\", childNode[0],childNode[1]\n print \"path \", path\n fringeList.push(childNode[0])\n closedList[str(childNode[0])] = path # Put parent node in closed List\n if problem.isGoalState(childNode[0]):\n isGoalStateArrived = True\n goalState = childNode[0]\n break\n\n if isGoalStateArrived:\n #print closedList[str(problem.getStartState())]\n return closedList[str(goalState)]\n \"util.raiseNotDefined()\"", "def search(board:Board, max_depth=3) -> DiGraph:\n\n n = 0 # node label which also serves as a node counter\n depth = 0\n \n G = nx.DiGraph()\n G.add_node(0, winner=None, player=0, board=board.state, board_p = board.display)\n \n # First branch in look ahead\n newleavelist=[]\n parent_node = n\n parent_board = Board(G.nodes[n]['board'][0], G.nodes[n]['board'][1])\n\n for move in ALL_MOVES:\n moves_available = parent_board.moves_available(player=0)\n if move not in moves_available:\n continue\n \n # Do move\n new_board = parent_board.update_board(Move(player=0, move=move))\n \n # Add move node to graph\n n=n+1\n G.add_node(n, winner=new_board.is_winner, player=1, board=new_board.state, board_p = new_board.display)\n G.add_edge(parent_node, n, move=move)\n if new_board.is_winner:\n continue\n newleavelist.append(n)\n \n depth=1\n # subsequent branches\n while depth < max_depth:\n leavelist = newleavelist[:]\n newleavelist = []\n for leave in leavelist: \n # Get parent board\n parent_board = Board(G.nodes[leave]['board'][0], G.nodes[leave]['board'][1])\n for move in ALL_MOVES:\n moves_available = parent_board.moves_available(player=depth%2)\n if move not in moves_available:\n continue\n # Do move\n new_board = parent_board.update_board(Move(player=depth%2, move=move))\n # Add move node to graph\n n=n+1\n G.add_node(n, winner=new_board.is_winner, player=1-depth%2, \n board=new_board.state, board_p=new_board.display)\n G.add_edge(leave, n, move=move)\n if new_board.is_winner:\n continue\n \n newleavelist.append(n)\n depth=depth+1\n return G", "def graph_search(problem, open_nodes):\n explored = [problem.initial]\n open_nodes.append(Node(problem.initial))\n while len(open_nodes) > 0:\n node = open_nodes.pop()\n if problem.goal_test(node.state):\n #print \"Path cost: %d\" % node.path_cost\n print 'Broj poteza: ' + str(len(node.solution())-1)\n return node.solution()\n for child in node.expand(problem):\n if child.state not in explored:\n open_nodes.append(child)\n explored.append(child.state)\n return None", "def Find_Path(self):\n closed_nodes_map = [] # map of closed (tried-out) nodes\n open_nodes_map = [] # map of open (not-yet-tried) nodes\n dir_map = [] # map of directions\n row = [0] * self.n\n for i in range(self.m): # create 2d arrays\n closed_nodes_map.append(list(row))\n open_nodes_map.append(list(row))\n dir_map.append(list(row))\n \n pq = [[], []] # priority queues of open (not-yet-tried) nodes\n pqi = 0 # priority queue index\n # create the start node and push into list of open nodes\n n0 = node(self.xStart, self.yStart, 0.0, 0.0)\n n0.updatePriority(self.xFinish, self.yFinish)\n heappush(pq[pqi], n0)\n open_nodes_map[self.yStart][self.xStart] = n0.priority # mark it on the open nodes map\n \n # A* search\n while len(pq[pqi]) > 0:\n # get the current node w/ the highest priority\n # from the list of open nodes\n n1 = pq[pqi][0] # top node\n n0 = node(n1.xPos, n1.yPos, n1.distance, n1.priority)\n x = n0.xPos\n y = n0.yPos\n heappop(pq[pqi]) # remove the node from the open list\n open_nodes_map[y][x] = 0\n # mark it on the closed nodes map\n closed_nodes_map[y][x] = 1\n \n # quit searching when the goal state is reached\n if x == self.xFinish and y == self.yFinish:\n # Generate the path from finish to start by following the \n # directions.\n return self.Reconstruct_Path(dir_map)\n \n # generate moves (child nodes) in all possible directions\n for i in range(self.num_directions):\n new_x = x + self.dx[i]\n new_y = y + self.dy[i]\n Flag=True\n if not (new_x < 0 or new_x > self.n-1 or new_y < 0 or new_y > self.m - 1\n or self.MAP[new_y][new_x] == 1 or closed_nodes_map[new_y][new_x] == 1):\n # Check to see if the extended path runs through any obstacles\n if (abs(self.dx[i])>1 or abs(self.dy[i])>1):\n # Need to check that the path does not pass an object\n JumpCells=2*max(abs(self.dx[i]),abs(self.dy[i]))-1\n for K in range(1,JumpCells):\n YPOS=int(round(K*1.0*self.dy[i]/JumpCells))\n XPOS=int(round(K*1.0*self.dx[i]/JumpCells))\n if (self.MAP[y+YPOS][x+XPOS]==1):\n Flag=False\n if Flag: \n # generate a child node\n m0 = node(new_x, new_y, n0.distance, n0.priority)\n m0.calc_cost(self.dx[i], self.dy[i])\n m0.updatePriority(self.xFinish, self.yFinish)\n # if it is not in the open list then add into that\n if open_nodes_map[new_y][new_x] == 0:\n open_nodes_map[new_y][new_x] = m0.priority\n heappush(pq[pqi], m0)\n # mark its parent node direction\n dir_map[new_y][new_x] = (self.num_directions-i-1) % self.num_directions\n elif open_nodes_map[new_y][new_x] > m0.priority:\n # update the priority info\n open_nodes_map[new_y][new_x] = m0.priority\n # update the parent direction info\n dir_map[new_y][new_x] = (self.num_directions-i-1) % self.num_directions\n # replace the node\n # by emptying one pq to the other one\n # except the node to be replaced will be ignored\n # and the new node will be pushed in instead\n while not (pq[pqi][0].xPos == new_x and pq[pqi][0].yPos == new_y):\n heappush(pq[1 - pqi], pq[pqi][0])\n heappop(pq[pqi])\n heappop(pq[pqi]) # remove the wanted node\n # empty the larger size pq to the smaller one\n if len(pq[pqi]) > len(pq[1 - pqi]):\n pqi = 1 - pqi\n while len(pq[pqi]) > 0:\n heappush(pq[1-pqi], pq[pqi][0])\n heappop(pq[pqi]) \n pqi = 1 - pqi\n heappush(pq[pqi], m0) # add the better node instead\n return '','' # no route found", "def depthFirstSearch(problem):\n\n\n \"*** YOUR CODE HERE ***\"\n st = util.Stack()\n strt = problem.getStartState()\n st.push(strt) \n visited = []\n came_from ={}\n came_from [strt] =(None,None)\n\n while not st.isEmpty():\n state = st.pop()\n if state in visited :\n continue\n visited.append(state)\n if problem.isGoalState(state) :\n break\n nodes = problem.getSuccessors(state)\n for (successor,action,cost) in nodes:\n if successor not in visited :\n st.push(successor)\n came_from[successor] = (state , action) \n \n # exit while\n actions = []\n while(state != strt) :\n (parent,action) =came_from[state]\n state = parent\n actions.append(action)\n actions.reverse()\n return actions", "def graph_search(problem, open_nodes):\n explored = [problem.initial]\n open_nodes.append(Node(problem.initial))\n while len(open_nodes) > 0:\n node = open_nodes.pop()\n if problem.goal_test(node.state):\n #print \"Path cost: %d\" % node.path_cost\n return node.solution()\n for child in node.expand(problem):\n if child.state not in explored:\n open_nodes.append(child)\n explored.append(child.state)\n return None", "def solve_tour(self):\n\t\tboard = [[-1 for _ in range(self.N)]for _ in range(self.N)]\n\t\tboard[0][0] = 0\n\n\t\tz = self.find_tour(board, 0, 0, 1)\n\t\tif z:\n\t\t\tfor i in range(self.N):\n\t\t\t\tfor j in range(self.N):\n\t\t\t\t\tself.solution.append(board[i][j])\n\t\t\tprint board\n\t\t\treturn self.solution\n\t\t\t\t\n\t\telse:\n\t\t\tprint(\"No solution\")", "def search(values):\n \"Using depth-first search and propagation, try all possible values.\"\n ## Used the provided solutions to be sure that my implementation of diagonals and\n ## Twins is ok\n\n # First, reduce the puzzle using the previous function\n values = reduce_puzzle(values)\n if values is False:\n return False ## Failed earlier\n if all(len(values[s]) == 1 for s in boxes):\n return values ## Solved!\n # Choose one of the unfilled squares with the fewest possibilities\n n,s = min((len(values[s]), s) for s in boxes if len(values[s]) > 1)\n # Now use recurrence to solve each one of the resulting sudokus, and\n for value in values[s]:\n new_sudoku = values.copy()\n new_sudoku[s] = value\n attempt = search(new_sudoku)\n if attempt:\n return attempt", "def df_search(grid, level):\n states_we_have_seen_before = Set(grid)\n\n def recur(inner_grid, itter, level):\n counter = 0\n next_states = Set()\n\n for gg in legal_moves(inner_grid):\n if gg not in states_we_have_seen_before:\n states_we_have_seen_before.add(gg)\n next_states.add(gg)\n\n for t in next_states:\n if match_level(t, level):\n return (size * size * size - itter, t)\n\n if itter > 0:\n for t in next_states:\n r = recur(t, itter - 1, level)\n if r:\n return r\n return None\n\n return recur(grid, size * size * size, level)", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n mystack = util.Stack()\n startNode = (problem.getStartState(), '', 0, [])\n mystack.push(startNode)\n visited = set()\n while mystack :\n node = mystack.pop()\n state, action, cost, path = node\n if state not in visited :\n visited.add(state)\n if problem.isGoalState(state) :\n path = path + [(state, action)]\n break;\n succNodes = problem.expand(state)\n for succNode in succNodes :\n succState, succAction, succCost = succNode\n newNode = (succState, succAction, cost + succCost, path + [(state, action)])\n mystack.push(newNode)\n actions = [action[1] for action in path]\n del actions[0]\n return actions", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n\n # current path stack\n path_stack = util.Stack()\n action_stack = util.Stack()\n path_stack.push(problem.getStartState())\n\n # visited (so don't )\n visited = []\n visited.append(problem.getStartState())\n\n i = 0\n while not path_stack.isEmpty():\n\n # check goal state\n if problem.isGoalState(path_stack.list[-1]): # check if goal\n return action_stack.list\n\n # get next possible state (choose first in list)\n successors = problem.getSuccessors(path_stack.list[-1])\n forward=False\n for successor in successors:\n ss,aa,_ = successor\n if ss not in visited:\n\n path_stack.push(ss)\n action_stack.push(aa)\n visited.append(ss) # you don't pop visited\n forward=True\n break\n\n # backtrack\n if forward==False:\n path_stack.pop()\n action_stack.pop()\n\n i+=1\n #if i==25:\n # import pdb; pdb.set_trace()\n #print(path_stack.list)", "def solve(self, board: List[List[str]]) -> None:\n m = len(board)\n if m == 0:\n return\n n = len(board[0])\n plus = m * n # 设定虚拟代表\n disjoint = [i for i in range(m * n + 1)] # 并查集尺寸增加1\n for i in [0, m - 1]:\n for j in range(n):\n if board[i][j] == \"O\":\n disjoint[i * n + j] = plus # 边界O指向虚拟代表\n for j in [0, n - 1]:\n for i in range(m):\n if board[i][j] == \"O\":\n disjoint[i * n + j] = plus # 边界O指向虚拟代表\n\n def find_root(i, j, disjoint, n):\n idx = i * n + j\n root = idx\n while disjoint[root] != root:\n root = disjoint[root]\n while disjoint[idx] != root:\n temp = idx\n idx = disjoint[idx]\n disjoint[temp] = root\n return root\n\n for i in range(m):\n for j in range(n):\n if board[i][j] == \"O\":\n root = find_root(i, j, disjoint, n)\n for dx, dy in [(1, 0), (0, 1)]:\n x, y = i + dx, j + dy\n if 0 <= x < m and 0 <= y < n and board[x][y] == \"O\":\n root_neibor = find_root(x, y, disjoint, n)\n if root == plus or root_neibor == plus:\n disjoint[root] = plus\n disjoint[root_neibor] = plus\n else:\n disjoint[root_neibor] = root\n for i in range(m):\n for j in range(n):\n if board[i][j] == \"O\":\n if find_root(i, j, disjoint, n) != plus:\n board[i][j] = \"X\"", "def depthFirstSearch(problem):\n\t#print(\"Start:\", problem.getStartState())\n\t#print(\"Is the start a goal?\", problem.isGoalState(problem.getStartState()))\n\t#print(\"Start's successors:\", problem.getSuccessors(problem.getStartState()))\n\t\n\n\t\"*** YOUR CODE HERE ***\"\n\n\t# Create the stack, and visited array to keep track of visited nodes.\n\tdfsStack = util.Stack()\n\tvisited = []\n\t# Get the first state in the graph, push to the stack\n\tfirst = problem.getStartState()\n\tdfsStack.push([first, [], 0])\n\n\t# While the stack is not empty, pop the first node from the stack, and check if that state\n # is the goal state. If so, return the actions for that node. Otherwise, append that state\n # to the visited array, get its successors, and push them to the stack.\n\twhile not dfsStack.isEmpty():\n\t\tNewNode = dfsStack.pop()\n\t\tif((problem.isGoalState(NewNode[0]) == True)):\n\t\t\treturn NewNode[1]\n\t\tif(NewNode[0] not in visited):\n\t\t\tvisited.append(NewNode[0])\n\t\t\tfor NextNode in problem.getSuccessors(NewNode[0]):\n\t\t\t\tif NextNode[0] not in visited:\n\t\t\t\t\tdfsStack.push((NextNode[0], NewNode[1] + [NextNode[1]], NextNode[2]))", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n startState = problem.getStartState()\n visitedNodes = []\n actions = []\n fringe = util.Stack()\n cost = 0 \n if (problem.isGoalState(startState) == True):#if startState is the goalState\n return actions\n else :\n # Data Type Format : (currentState,actions,cost) based on errors I got :\\\n fringe.push((startState,actions,cost))\n while (fringe.isEmpty() == False) :\n currentState , actions , cost = fringe.pop()\n if(problem.isGoalState(currentState)):\n return actions\n \n elif ((currentState in visitedNodes) == False ):\n visitedNodes.append(currentState)\n currentNodeSuccessors = problem.getSuccessors(currentState)\n for node in currentNodeSuccessors :\n state , action , cost = node\n if ( (state in visitedNodes) == False ):\n newNode = (state , actions + [action] , cost)\n fringe.push(newNode)\n \n util.raiseNotDefined()", "def solve(self, repository, verbose=0):\n self.max_depth = 0\n solutions = []\n for solution in self.solve_all(repository, verbose):\n solutions.append(solution)\n return solutions", "def depthFirstSearch(problem):\n from game import Directions\n s = Directions.SOUTH\n w = Directions.WEST\n n = Directions.NORTH\n e = Directions.EAST\n \n visitedlist = []\n st = Stack()\n outputlist = []\n st.push(problem.getStartState())\n visitedlist.append(problem.getStartState())\n recurseDFS(st,problem,visitedlist)\n if st.isEmpty():\n print \"No Path exist\"\n else:\n while not st.isEmpty():\n value = st.pop()\n if len(value) == 2:\n continue\n if value[1] == 'South':\n outputlist.append(s)\n elif value[1] == 'North':\n outputlist.append(n)\n elif value[1] == 'East':\n outputlist.append(e)\n elif value[1] == 'West':\n outputlist.append(w)\n \n return outputlist[::-1]", "def solve(self):", "def run(self, max_depth):\n while len(self.stack) > 0:\n state = self.get_next_state()\n\n if state.is_solution():\n self.solutions.append(state.moves)\n\n if len(state.moves) < max_depth:\n self.create_children(state)\n\n self.archive[state.get_tuple()] = len(state.moves)\n\n # sort solutions best to worst\n self.solutions.sort(key=len)\n\n if self.solutions:\n return self.solutions[0]\n\n print(\"This depth is not sufficient.\")\n return []", "def dpsearch(points,k):\n\t#M = k\n\tpoints = np.sort(points,axis=0)\n\tL = len(points)\n\tM = k\n\tT = list(np.zeros(M+1,dtype='int'))\n\tT[0] = 0\t#first threshold is by default always set to index 0 in trellis graph.\n\tT[M] = L \t#last threshold is by default always set to last number in input points.\n\ttrellis_value = np.full((M+1,L+1),np.inf)\n\ttrellis_backpointer = np.full((M+1,L+1),np.inf)\n\n\t# Stage 1: m=1\t\n\tfor l in range(1,L-M+2):\n\t\ttrellis_value[1][l] = ((l-0)/float(L))*np.var(points[0:l])\n\t\ttrellis_backpointer[1][l] = 0\n\n\t\n\tif(M>2):\n\t\t# Stage 2: m=2 to m=M-1\n\t\tfor m in range(2,M):\n\t\t\tfor l in range(m,L-M+m+1):\n\t\t\t\t#finding optimal path\n\t\t\t\tJ_min = np.inf\n\t\t\t\tJ_temp = np.inf\n\t\t\t\tfor i in range(m-1,l):\n\t\t\t\t\tJ_temp = trellis_value[m-1][i] + ((l-i)/float(L))*np.var(points[i:l])\n\t\t\t\t\tif J_temp < J_min:\n\t\t\t\t\t\tJ_min = J_temp\n\t\t\t\t\t\tptr = i\n\t\t\t\t\n\t\t\t\ttrellis_value[m][l],trellis_backpointer[m][l] = J_min,ptr\n\t\t\t\t\n\n\t# Stage 3: m=M\n\tm = M\n\tl = L\n\t#finding optimal path\n\tJ_min = np.inf\n\tJ_temp = np.inf\n\tfor i in range(m-1,l):\n\t\tJ_temp = trellis_value[m-1][i] + ((l-i)/float(L))*np.var(points[i:l])\n\t\tif J_temp < J_min:\n\t\t\tJ_min = J_temp\n\t\t\tptr = i\n\n\t\n\ttrellis_value[M][L] = J_min\n\ttrellis_backpointer[M][L] = ptr\n\t\n\t\n\t# Backtracking\n\tl = L\n\tm = M\n\twhile m>=2:\n\t\tT[m-1] = int(trellis_backpointer[m][l])\n\t\tl = int(trellis_backpointer[m][l])\n\t\tm = m - 1\n\n\t#Assign cluster labels\n\tlabels = np.full(len(points),0)\n\tj = T[0]\n\tcounter = 0\n\tfor i in range(1,k+1):\n\t\tlabels[j:T[i]] = counter\n\t\tj = T[i]\n\t\tcounter += 1\n\n\n\treturn labels,T", "def dfsl(board, depth_limit):\n # base cases\n if all(not piece.alive for piece in board.black_pieces):\n # goal! start building a path\n return []\n\n elif depth_limit == 0:\n # no path found to goal with this depth limit\n return None\n\n # recursive case: try all possible moves for all remaining pieces\n remaining_pieces = [p for p in board.white_pieces if p.alive]\n for piece in remaining_pieces:\n for newpos in piece.moves():\n oldpos = piece.pos\n eliminated_pieces = piece.makemove(newpos)\n result = dfsl(board, depth_limit-1)\n piece.undomove(oldpos, eliminated_pieces)\n\n if result is not None:\n # recursively found a sequence of moves to a goal state! hooray!\n # continue building the (reversed) sequence on the way back up\n result.append((piece, newpos))\n return result\n # otherwise, continue searching\n\n # no sequence found using any possible move (with this depth limit)\n return None", "def solve_puzzle(self):\n\n # for each word in the words list\n # ...for each row in the game board\n # ......for each column in each row\n for word in self.words:\n for y, row in enumerate(self.board):\n for x, col in enumerate(row):\n \n # for each direction\n # try to find a word in said direction\n for dir in self.directions:\n self.scan_word(word, y, x, dir)", "def depthFirstSearch(problem):\n #\"*** YOUR CODE HERE ***\"\n\n \"\"\"\n Pseudocode:\n function G RAPH-S EARCH ( problem) returns a solution, or failure\n initialize the frontier using the initial state of problem\n initialize the explored set to be empty\n loop do\n if the frontier is empty then return failure\n choose a leaf node and remove it from the frontier\n if the node contains a goal state then return the corresponding solution\n add the node to the explored set\n expand the chosen node, adding the resulting nodes to the frontier\n only if not in the frontier or explored set\n\n \"\"\"\n frontier = util.Stack()\n #print 'Create frontier'\n initial_node = node(problem.getStartState(), 0, [], 0)#(state,depth,path_actions,path_cost)\n frontier.push(initial_node)\n #print 'Push ',repr(initial_node.state)\n frontierSet = set([initial_node.state])\n explored = set() #initialize the explored set to be empty\n\n while True:\n if frontier.isEmpty() == True: raise Exception, \"The frontier was emptied\"#if the frontier is empty then return failure\n currNode = frontier.pop()#HERE1\n frontierSet.remove(currNode.state)\n #print 'Remove',repr(currNode.state)\n #print 'State: ' + repr(currNode.state) + '. Depth: ' + repr(currNode.depth) + '. Path Cost: ' + repr(currNode.path_cost) + '. Path Actions: ' + repr(currNode.path_actions) + '.\\n'\n if problem.isGoalState(currNode.state) == True:\n print 'Goal reached!'\n return currNode.path_actions\n explored.add(currNode.state)\n for succ in problem.getSuccessors(currNode.state):\n #print 'Succ: ',repr(succ[0])\n succNode = node(succ[0], currNode.depth + 1, currNode.path_actions + [succ[1],], currNode.path_cost + succ[2])\n if (succNode.state not in explored):\n # Si hacemos estas verificaciones entonces cuando se encuentra que un estado que se quiere expandir ya esta en la frontera\n # eliminamos ese estado de la frontera y lo expandimos ahora. Osea, damos prioridad a los nodos nuevos\n if(succNode.state in frontierSet):\n # Recurso'i:\n for frontierNode in frontier.list:\n if frontierNode.state == succNode.state:\n frontier.list.remove(frontierNode)\n frontierSet.remove(frontierNode.state)\n # if ((succNode.state not in explored) and (succNode.state not in frontierSet)): \n # Alternativa segun el libro. Lo que se hace es que se da prioridad a los nodos viejos.\n\n # Aca no verificaba si ya esta en la frontera porque alteraba el orden en el que se visitan los nodos.\n # Por ejemplo cuando esta pendiente (se genero pero no se expandio) un hijo con un estado,\n # pero en un nivel mas profundo se vuelve a generar el mismo estado y se tiene que expandir.\n # Si seguimos el DFS creo que tendriamos que expandir ese nodo ahi y no en la primera llamada donde quedo pendiente.\n \n frontier.push(succNode)\n #print 'Push ',repr(succNode.state)\n frontierSet.add(succNode.state)\n\n #util.raiseNotDefined()", "def solve(self, debug: bool = False) -> np.ndarray:\n coords = self.start\n priority_queue = [(0, 0, coords, [coords])] # Loss must go first!\n heapq.heapify(priority_queue)\n\n while priority_queue:\n _, distance_, coords, path = heapq.heappop(priority_queue)\n\n neighbors = self.neighbors(coords)\n # all_losses = [distance_ + self.heuristic(n) for n in neighbors]\n all_losses = [self.heuristic(n) for n in neighbors]\n\n not_visited = [n for n in neighbors if n in set(neighbors) - set(path)] # Preserve order\n losses = [all_losses[i] for i in range(len(all_losses))\n if neighbors[i] in not_visited]\n\n for loss, neighbor in [(loss, n) for loss, n in sorted(zip(losses, not_visited))]:\n if neighbor == self.finish:\n coords = neighbor\n self.path = np.array(path + [neighbor])\n priority_queue = []\n break\n else:\n # heapq.heappush(priority_queue, (distance_ + 1 + loss, distance_ + 1, neighbor, path + [neighbor]))\n heapq.heappush(priority_queue, (loss, distance_ + 1, neighbor, path + [neighbor]))\n\n if debug:\n print(coords)\n # print(neighbors)\n # print(losses)\n print([(loss, n) for loss, n in sorted(zip(losses, not_visited))])\n\n if coords != self.finish:\n if debug:\n print(coords, self.finish)\n\n raise NoSolutionsError('No solution has been found.')\n else:\n print('Solution has been found!')\n\n return self.path", "def depthFirstSearch(problem):\n #Initializing variables\n fringe = util.Stack()\n #Creating visited list\n visited = []\n #Pushing start state to Stack\n fringe.push((problem.getStartState(), []))\n #Adding start state to visited list\n visited.append(problem.getStartState())\n \n #Popping point from the stack\n while fringe.isEmpty() == False:\n state, actions = fringe.pop()\n #Getting successor nodes\n for next in problem.getSuccessors(state):\n newstate = next[0]\n newdirection = next[1]\n #Pushing successor nodes to the stack and appending to visited\n if newstate not in visited:\n if problem.isGoalState(newstate):\n return actions + [newdirection] \n else:\n fringe.push((newstate, actions + [newdirection]))\n visited.append(newstate)\n\n util.raiseNotDefined()", "def solveR(self,level=-1) :\n for g in self.R():\n if level>=0 :\n printIndent('%s' % (g,),level=level)\n print \n if g.solveL(level=(level+1) if level>=0 else level) is None :\n return g\n return None", "def breadthFirstSearch(problem):\r\n\t\"*** YOUR CODE HERE ***\"\r\n\tfrom game import Directions\r\n\t#i = 0\r\n\tfrontera=util.Queue()\r\n\testadoInicial= problem.getStartState()\r\n\tfrontera.push((estadoInicial, [],0))\r\n\tvisitados=[]\r\n\tvisitados.append(estadoInicial)\r\n\r\n\twhile not(frontera.isEmpty()):\r\n\t\t(estado, camino, costo) =frontera.pop()\r\n\t\tif(problem.isGoalState(estado)):\r\n\t\t\tbreak\r\n\r\n\t\tsucesores=problem.getSuccessors(estado)\r\n\t\tfor sucesor in sucesores:\r\n\t\t\t#i = i+1\r\n\t\t\t#print (i)\r\n\t\t\tif sucesor[0] not in visitados:\r\n\t\t\t\tfrontera.push((sucesor[0], camino + [sucesor[1]], costo + sucesor[2]))\r\n\t\t\t\tvisitados.append(sucesor[0])\r\n\tprint ('Cantidad de nodos en memoria: {}').format(len(frontera.list))\r\n\treturn camino", "def search(board):\n depth = 0\n while True:\n result = depth_first(board, depth)\n if result:\n return result\n else:\n depth += 1", "def graph_search(problem, verbose=False, debug=False):\r\n \r\n # PriorityQueue should be used to maintain the order of the queue.\r\n frontier = PriorityQueue()\r\n \r\n frontier.append(Node(problem, problem.initial))\r\n \r\n current_node = frontier.pop()\r\n \r\n p = True\r\n #depth first search\r\n if current_node.expand(current_node.problem)[0].g < 0:\r\n \r\n frontier = deque()\r\n frontier.append(Node(problem, problem.initial))\r\n #breadth first search\r\n elif current_node.expand(current_node.problem)[0].h < 2:\r\n \r\n p = False\r\n frontier = deque()\r\n frontier.append(Node(problem, problem.initial))\r\n #manhattan\r\n else:\r\n \r\n frontier.append(current_node)\r\n\r\n f_hash = Explored()\r\n f_hash.add(problem.initial.state_tuple())\r\n done = False\r\n n_explored = 0\r\n explored = Explored()\r\n\r\n #graph_search\r\n while not done:\r\n \r\n if p:\r\n current_node = frontier.pop()\r\n else:\r\n current_node = frontier.popleft()\r\n explored.add(current_node.state.state_tuple())\r\n n_explored = n_explored + 1 #inc the number of explored nodes\r\n\r\n if current_node.state.solved():\r\n path = current_node.path()\r\n done = True\r\n return path, n_explored\r\n #if not found in the tree return none and number of nodes explored\r\n else:\r\n \r\n for child in current_node.expand(current_node.problem):\r\n if not explored.exists(child.state.state_tuple()) and not \\\r\n f_hash.exists(child.state.state_tuple()):\r\n frontier.append(child)\r\n f_hash.add(child)\r\n done = len(frontier) == 0\r\n\r\n return None, n_explored", "def solve(grid):\n puzzle_dict = grid_values(grid)\n return search(puzzle_dict)", "def depth_fs(start: Vector2D, goal: Vector2D, grid: Scene, *args) -> (list, list):\n frontier = Stack()\n prev_node = dict()\n explored = []\n\n frontier.put(start)\n prev_node[start] = None\n\n while not frontier.empty():\n current = frontier.get()\n \n if current == goal:\n return (reconstruct_path(goal, prev_node), explored[1:]) # [1:] to remove start from list\n\n grid.set_cell(current, Cell(val = CellType.searched))\n explored.append(current)\n\n for neighbor in grid.get_unexplored_neighbors(current):\n prev_node[neighbor] = current\n frontier.put(neighbor)\n\n # grid.set_cell(neighbor, Cell(val = CellType.searched))\n \n # If frontier empty but goal was never reached, no solution was found\n return ([], explored[1:]) # [1:] to remove start from list", "def problem2(self, s):\n \n points = self.neighbor(100, 10, s.exhaustive_search)\n points += self.neighbor(10, 100, s.exhaustive_search)\n points += 1\n\n _testDriver.get_code(s.exhaustive_search)\n print \"\\n(Check that scipy.spatial.KDTree is not used)\"\n points *= self.grade(1)\n\n return points", "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n util.raiseNotDefined()", "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n util.raiseNotDefined()", "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n util.raiseNotDefined()", "def depthFirstSearch(problem):\n stack = Stack()\n\n visited = []\n parent_dict = dict()\n start_state = problem.getStartState()\n stack.push(start_state)\n current_path = []\n actions_dict = dict()\n final_actions = []\n flag = False\n\n if problem.isGoalState(problem.getStartState()):\n return []\n\n while not stack.isEmpty():\n current_state = stack.pop()\n current_path.append(current_state)\n visited.append(current_state)\n\n if problem.isGoalState(current_state):\n break\n\n successors = problem.getSuccessors(current_state)\n\n for s in successors:\n flag = False\n if s[0] not in visited:\n stack.push(s[0])\n parent_dict[s[0]] = current_state\n actions_dict[(current_state, s[0])] = s[1]\n flag = True\n\n\n\n if not successors and not stack.isEmpty() or flag is False:\n current_state = stack.pop()\n while current_path[-1] != parent_dict[current_state]:\n current_path.pop()\n stack.push(current_state)\n\n for i in range(len(current_path)-1):\n final_actions.append(actions_dict[current_path[i],current_path[i+1]])\n\n\n return final_actions", "def depthFirstSearch(problem):\n\n # Initialization\n startState = problem.getStartState()\n # print \"Start:\", startState\n\n if problem.isGoalState(startState):\n return [] # No action needed\n\n route = util.Stack()\n closed = set([startState])\n stack = util.Stack() # DFS use stack\n\n # print problem.getSuccessors(startState)\n \n for successor in problem.getSuccessors(startState):\n # Use list(old_list) to make a copy of current route\n stack.push((successor, list(route.list)))\n \n # Tree search\n while not stack.isEmpty():\n #print stack.list\n ((currentState, action, cost), route.list) = stack.pop()\n\n if currentState in closed:\n continue # Skip the residue of expanded states in the stack\n\n # print \"Go \", action\n # print \"In \", currentState\n route.push(action)\n\n if problem.isGoalState(currentState): # Check for goal condition\n # print route.list\n # util.pause()\n return route.list # Return the route\n \n # Current state is not goal state\n closed.add(currentState)\n for successor in problem.getSuccessors(currentState):\n if successor[0] in closed:\n # print \"-Closed \", successor\n continue # this state is already expanded\n \n # print \"-Open \", successor\n # Use list(old_list) to make a copy of current route\n stack.push((successor, list(route.list)))", "def solve(self):\n # Use a trivial tour (1-2-3-...-N-1) to set the global upper bound.\n tour = list(range(self._N))\n upper_bound = sum([self._G[i][(i + 1) % self._N] for i in range(self._N)])\n trace = []\n\n # Start from a configuration with a single vertex.\n frontier = [BranchAndBoundConfiguration(self._G, self._N, [0], LOWER_BOUND_METHOD)]\n\n # Set the start time.\n start_time = time.time()\n\n # Branch and bound until the frontier set is empty or the time has expired.\n while frontier and (time.time() - start_time) < self._cutoff_time:\n # Fetch the most promising configuration.\n config = heappop(frontier)\n\n # Expand configuration by appending a vertex to the path.\n for v in range(self._N):\n try:\n expanded_config = config.expand(v)\n except ValueError:\n # Expanded configuration is not valid.\n continue\n if expanded_config.is_solution():\n # Update the global upper bound, if needed.\n this_solution = expanded_config.get_cycle_cost()\n if this_solution < upper_bound:\n # Log it.\n trace.append((time.time() - start_time, this_solution))\n # Update the best solution.\n upper_bound = this_solution\n tour = list(expanded_config.get_path())\n elif expanded_config.get_lower_bound() < upper_bound:\n # Add to the frontier set.\n heappush(frontier, expanded_config)\n return (upper_bound, [self._index_to_id[v] for v in tour], trace)", "def solve(grid):\n\n return search(grid_values(grid))", "def solve(grid):\n\n return search(grid_values(grid))", "def minimax_endgame_search(state, maximize=True) :\n global depth;\n depth=0\n path=[]\n paths=[]\n _path, _score = get_minimax_score(state, maximize, path, paths,INF,always_zero)\n\n return [_path, _score, len(paths)]", "def dfs(pos, dis):\n global ans\n if pos == e:\n ans = dis - 1 if not ans or dis < ans else ans\n return\n\n # Backtracking\n if ans and dis > ans:\n return\n\n # Check the point visited\n visited[pos[0]][pos[1]] = 1\n for i in range(4):\n ny = pos[0] + dy[i]\n nx = pos[1] + dx[i]\n if 0 <= ny < N and 0 <= nx < N:\n # If the new point is not wall and not visited\n if maze[ny][nx] != 1 and not visited[ny][nx]:\n dfs([ny, nx], dis + 1)\n visited[pos[0]][pos[1]] = 0", "def search(self) -> int:\n # crete node list\n for x in range(self.n):\n for y in range(self.n):\n if not self.grid[y][x] == 0:\n self.all_nodes.append((x, y))\n # recursively create paths\n i = 0\n paths = [[(0, 0)]]\n while i < self.n * self.n:\n paths = self.generate_paths(paths)\n if isinstance(paths, int):\n return paths\n i += 1\n\n return -1", "def dfs_search(board):\n goalcount = 0\n fringe = deque([])\n count = 0\n fringe.append(board)\n while(True):\n if len(fringe) is 0:\n print(\"Empty Fringe\")\n return\n n = fringe.pop()\n # print(n)\n goalcount = goalcount + 1\n if n.goal_test():\n print goalcount\n print count\n return\n column = n.get_next_unassigned_var()\n for val in n.choices[column]:\n count = count+1\n child = nQueens(copy.deepcopy(n.state), copy.deepcopy(n.choices), copy.deepcopy(n.n), n)\n child.assign(column, val)\n fringe.append(child)", "def search(self):\n open_set = set()\n closed_set = set()\n open_set.add(self.start_node)\n\n # loop through all nodes until open set is empty to build neighbor map\n while open_set:\n current_node = open_set.pop()\n closed_set.add(current_node)\n for removed_cells, score, next_status in current_node.find_next_moves():\n open_status_set = [i.status for i in open_set]\n closed_status_set = [i.status for i in closed_set]\n if next_status in open_status_set:\n index = open_status_set.index(next_status)\n node = list(open_set)[index]\n elif next_status in closed_status_set:\n index = closed_status_set.index(next_status)\n node = list(closed_set)[index]\n else:\n node = PopstarsNode(next_status)\n open_set.add(node)\n node.parents.append(current_node)\n current_node.children[node].append(\n (score, removed_cells, True))\n current_node.update_parents()\n max_score = []\n for i in self.start_node.children:\n max_score += self.start_node.children[i]\n return max(max_score)[0]", "def solve(self):\n ...", "def print_solution(state1, number_nodes_expanded, goal_state, state2 = None):\n\n\tif state2 != None:\n\t\ttotal_depth = state1.depth + state2.depth\n\telse:\n\t\ttotal_depth = state1.depth\n\t\tprint(\"Solution found at depth: \" + str(total_depth))\n\n\tdimensions = int(math.sqrt(total_depth)) + 1\n\n\tfig = plt.figure(figsize=[4 * dimensions, 4 * dimensions])\n\n\tstate1.print_path(fig, dimensions, state1.depth + 1)\n\n\tif state2 != None:\n\t\tstate2.parent.print_path_reserse(fig, dimensions, state1.depth + 2)\n\t\tmiddle_depth = state1.depth\n\t\tfound = False\n\t\twhile True:\n\t\t\tif state1.check_solution(goal_state):\n\t\t\t\tmiddle_depth = state1.depth\n\t\t\t\tfound = True\n\t\t\t\t#check if the solution can still be find in previous nodes\n\t\t\t\tstate1 = state1.parent\n\t\t\telse:\n\t\t\t\tif state1.parent == None:\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\tstate1 = state1.parent\n\n\t\tstate2 = state2.parent\n\t\twhile not(found):\n\t\t\tif state2.check_solution(goal_state):\n\t\t\t\tmiddle_depth += 1\n\t\t\t\tfound = True\n\t\t\telse:\n\t\t\t\tmiddle_depth += 1\n\t\t\t\tstate2 = state2.parent\n\t\t\n\t\tprint(\"Solution found at depth: \" + str(middle_depth))\n\t\tplt.show()\n\t\treturn middle_depth\n\telse:\n\t\tplt.show()\n\t\treturn None", "def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n\n\n templist=[]\n explored = set()\n fringe = util.PriorityQueue()\n # state, list of directions till now and the cost is pushed in the stack\n # so that algorithm can explore the node with lowest cost first\n fringe.push((problem.getStartState(),templist),1)\n\n while (not fringe.isEmpty()):\n (currentNode,currDir) = fringe.pop()\n\n if problem.isGoalState(currentNode):\n pathToGoal = currDir\n break\n if not (currentNode in explored):\n explored.add(currentNode)\n for childNode in problem.getSuccessors(currentNode):\n # total cost is cost till now plus cost to the child node\n totalCost = childNode[2]+problem.getCostOfActions(currDir)\n fringe.push((childNode[0],currDir+[childNode[1]]),totalCost)\n\n\n\n\n return pathToGoal;", "def breadthFirstSearch(problem):\r\n \"*** YOUR CODE HERE ***\"\r\n node = problem.getStartState()\r\n if (problem.isGoalState(node)):\r\n return [] # no need to make any moves of the start state is goal\r\n start = (node, 'NoDirection',0)\r\n\r\n frontier_queue = Queue() # queue for frontier\r\n frontier_queue.push(start) # frontier consists of only the start state\r\n\r\n explored_nodes = set()\r\n explored_track = {start:None} # keep a track of parent, parent of root node is None\r\n\r\n while not frontier_queue.isEmpty():\r\n state = frontier_queue.pop() # pop the top element from the queue \r\n explored_nodes.add(state)\r\n\r\n if problem.isGoalState(state[0]):\r\n return get_track(explored_track, state)\r\n\r\n neighbors_state = problem.getSuccessors(state[0])\r\n for neighbor in neighbors_state: # neighbor will be something like this ((34, 15), 'South', 1)\r\n if neighbor not in frontier_queue.list and neighbor not in explored_nodes:\r\n frontier_queue.push(neighbor)\r\n explored_track[neighbor] = state\r\n\r\n\r\n def get_track(explored_track, state):\r\n from game import Directions\r\n track_history = [state]\r\n track_history_direction = []\r\n leaf = state\r\n while (explored_track[leaf]) != start:\r\n track_history.append(explored_track[leaf])\r\n leaf = explored_track[leaf]\r\n\r\n for j in range (len(track_history),-1,-1):\r\n this_step = track_history[j-1]\r\n this_step = this_step[1]\r\n track_history_direction.append(this_step)\r\n return track_history_direction[:-1]", "def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n q_p=util.PriorityQueue()\n #nodes=[]\n visited=[]\n \n node=dict()\n start=problem.getStartState()\n node['parent']=None\n node['direction']=None\n node['state']=start\n node['cost']=0\n \n q_p.push(node,node['cost'])\n lis.append(node)\n \n while q_p.isEmpty()!=True:\n node=q_p.pop()\n print node\n state=node['state']\n visited.append(state)\n #lis.append(node)\n if problem.isGoalState(state):\n print \"found\"\n return getPath(problem,node)\n \n suc=problem.getSuccessors(state)\n if suc ==None:\n continue \n for child in suc:\n if child[0] not in visited:\n childnode={}\n childnode['parent']=state\n childnode['direction']=child[1]\n childnode['state']=child[0]\n childnode['cost']=node['cost']+1\n q_p.push(childnode,childnode['cost'])\n lis.append(childnode)\n \n\n \n\n\n\n\n \n\n \n \n #util.raiseNotDefined()", "def solve(grid):\n return search(grid_values(grid))", "def solve(grid):\n return search(grid_values(grid))", "def solve(self):\n return breadth_first_search(self) + [self.goal_url]", "def solve(moves_from, positions=None):\n if positions == None:\n positions = list(moves_from.keys())\n else:\n positions = list(positions)\n\n nimbers = dict()\n\n for word in positions:\n nimbers[word] = mex(nimbers[move] for move in moves_from[word])\n\n print(\"Pruning strategy...\")\n strategy = dict()\n for word in positions:\n if not moves_from[word]:\n strategy[word] = []\n continue\n\n if nimbers[word] == 0:\n # player moving has lost. might move anywhere to be difficult\n children = [strategy[move] for move in moves_from[word]]\n strat = set()\n for child in children:\n strat.update(child)\n strat = sorted(strat, key=lambda x: (len(x), x))\n else:\n # move to a zero\n children = [[move] + strategy[move] for move in moves_from[word] if nimbers[move] == 0]\n assert children\n # choose shortest to remember\n strat = min(children, key=len)\n\n strategy[word] = strat\n\n return nimbers, strategy", "def depth_first_graph_search(problem):\n\n\tfrontier = [(Node(problem.initial))] # Stack (implemented as a list)\n\n\texplored = set()\n\twhile frontier:\n\t\tnode = frontier.pop()\n\t\tif problem.goal_test(node.state):\n\t\t\treturn node\n\t\texplored.add(node.state)\n\t\tfrontier.extend(child for child in node.expand(problem)\n\t\t\t\t\t\tif child.state not in explored and child not in frontier)\n\treturn None", "def dfs( self ):\n\n #print self.state; \n #print self.visited;\n SearchProblem.stateVisited= SearchProblem.stateVisited+1 \n \n if self.stop: # check class variable and stop searching...\n return;\n\n for action in self.edges(): # consider each edge leading out of this node\n\n action.destination.path = self.path + str(action.label); \n # get the label associated with the\n # action and append it to the path\n # string\n\n action.destination.visited = self.visited.copy();\n # make copy of source node's visited set\n # and use it as destination node's\n # visited set\n\n action.destination.visited.add( repr(action.destination.state) );\n\n if action.destination.is_target(): \n # check if destination of edge is target node\n action.destination.target_found(); # perform target found action\n if not self.continue_search(): # stop searching if not required\n SearchProblem.stop = True; # set class variable to record that we\n break; # are done\n\n if repr(action.destination.state) in self.visited:\n continue; # skip if we've visited this one before\n\n action.destination.dfs(); # resume recursive search ", "def pfd_solve (r, w) :\n\tglobal targets\n\ta = [0, 0]\n\tpfd_initialize(r, a)\n\ttargets_array = []\n\tpfd_find_first_target()\n\t\n\tresultArr = []\n\n\twhile len(targets) > 0:\n\t\ttarget = heapq.heappop(targets)\n\t\tresultArr.append(target+1)\n\t\tnew_targets = pfd_clear(target)\n\n\t\tfor i in new_targets:\n\t\t\tdependencies_list[i]-=1\n\t\t\tif dependencies_list[i] == 0:\n\t\t\t\theapq.heappush(targets,i)\n\t\t\t\t\n\t#Prints the result\n\tfor i in xrange(len(resultArr)) :\n\t print resultArr[i],", "def cuckoo_search(n=None, nd=None, Lb=None, Ub=None, pa=None):\n\tif n is None:\n\t\tn =25\n\n\tif nd is None:\n\t\tnd=21\n\n\tif Lb is None:\n\t\tLb = np.ones(nd)*0\n\tif Ub is None:\n\t\tUb = np.ones(nd)*5\n\n\tif pa is None:\n\t\tpa = 0.25\n\n\t# creation of the list for parameter pairs \n\t\n\tstep = 1\n\n # initialization of the nests\n\tnests = np.zeros((n,nd))\n\tfor i in range(n):\n\t\tnests[i,:] = Lb + (Ub-Lb)*np.random.rand(len(Lb))\n\n\tfitness = 10**10 * np.ones((n,1))\n\tbest_nest, fmin, nest, fitness, N_iter = single_cuckoo_search(nests,fitness,Lb,Ub,pa,step) \n\n\treturn best_nest, fmin, nest, fitness, N_iter", "def solve(self):\n pass" ]
[ "0.70110327", "0.6945343", "0.67388815", "0.6720618", "0.6668133", "0.66212654", "0.65762824", "0.65408486", "0.65394676", "0.65213966", "0.65150017", "0.6503912", "0.64855695", "0.64802074", "0.64769286", "0.6470355", "0.64611316", "0.6455134", "0.64195275", "0.6398742", "0.63514024", "0.6341915", "0.6335996", "0.62871116", "0.6263344", "0.6242594", "0.6218089", "0.62147874", "0.62140924", "0.62097526", "0.619691", "0.61652595", "0.61602604", "0.6125032", "0.61224145", "0.6104371", "0.6094363", "0.60426503", "0.6008636", "0.5977009", "0.5950024", "0.5939261", "0.5938529", "0.5924662", "0.59013176", "0.59005386", "0.58679295", "0.58541906", "0.5844866", "0.5825019", "0.58184904", "0.58052933", "0.58040667", "0.5800417", "0.5782879", "0.57774574", "0.5762074", "0.57526106", "0.5744355", "0.5711005", "0.5702246", "0.5693935", "0.5679415", "0.5671529", "0.56555015", "0.56550616", "0.5620959", "0.5604264", "0.5603584", "0.5600197", "0.55987906", "0.5589623", "0.5587703", "0.55859077", "0.55859077", "0.55859077", "0.55799603", "0.5577402", "0.5563055", "0.556261", "0.556261", "0.55550724", "0.55469656", "0.5544631", "0.5544464", "0.5541206", "0.55041516", "0.5498747", "0.5494732", "0.54931796", "0.5493111", "0.54764223", "0.54764223", "0.5460992", "0.5460474", "0.5459811", "0.5456625", "0.54536694", "0.5452677", "0.54514176" ]
0.6093073
37
Returns a QuerySet containing only available instances (i.e. not selected previously)
def available(self, include_qs = None, include_obj = None): qs = self.all() available_qs = self.all() for obj in qs: if include_qs: if not obj.available and include_qs.filter(id=obj.id).count() == 0: available_qs = available_qs.exclude(id=obj.id) elif include_obj: if not obj.available and obj.id != include_obj.id: available_qs = available_qs.exclude(id=obj.id) else: if not obj.available: available_qs = available_qs.exclude(id=obj.id) return available_qs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_queryset(self):\n return NoneToEmptyQuerySet(self.model, using=self._db)", "def get_queryset(self):\n # the filter says that it only returns those w/ the pub_date\n # less or equal to timezone.now() (earlier or now)\n questions = Question.objects.filter(\n pub_date__lte=timezone.now()\n ).order_by('-pub_date')\n\n excludes = []\n for question in questions:\n if not question.choice_set.all().exists():\n excludes.append(question.id)\n \n return Question.objects.exclude(pk__in=excludes).filter(\n pub_date__lte=timezone.now()\n ).order_by('-pub_date')[:5]", "def get_queryset(self):\n return self._get_base_queryset().filter(deleted__isnull=True)", "def get_inititially_selected_queryset(self):\n return self.model.objects.none()", "def get_queryset(self):\n # Check if the parameter assigned_only is on the request\n assigned_only = bool(\n int(self.request.query_params.get('assigned_only', 0))\n )\n # Make copy of queryset so we do not modify the original\n queryset = self.queryset\n # If the parameter was passed filter on the book not\n # being specified\n if assigned_only:\n queryset = queryset.filter(book__isnull=False)\n\n # Remove duplicates\n return queryset.filter(\n user=self.request.user\n ).order_by('-name').distinct()", "def get_queryset(self):\n assigned_only = bool(\n int(self.request.query_params.get('assigned_only', 0))\n )\n queryset = self.queryset\n if assigned_only:\n queryset = queryset.filter(recipe__isnull=False)\n\n return queryset.filter(user=self.request.user).order_by('-name').distinct()", "def queryset(self, request, queryset):\n if self.value() == 'ignore':\n return queryset.filter(ignored_at__isnull=False)\n if self.value() == 'active':\n return queryset.filter(ignored_at=None)", "def non_hidden(self):\n return self.filter(hidden=False)", "def non_hidden(self):\n return self.filter(hidden=False)", "def available_list(cls, num):\n return cls.objects.filter(status=0)[:num]", "def get_queryset(self):\n assigned_only = bool(\n int(self.request.query_params.get('assigned_only', 0))\n )\n queryset = self.queryset\n if assigned_only:\n queryset = queryset.filter(recipe__isnull=False)\n\n return queryset.filter(user=self.request.user).order_by('-name')", "def get_queryset(self):\n queryset = super().get_queryset()\n return queryset.available_for_user(self.request.user)", "def get_queryset(self):\n return Person.objects.filter(expiry_date__gt=timezone.now())", "def get_queryset(self):\n return Question.objects.filter(publication_date__lte=timezone.now())", "def filter(cls, *args, **kwargs) -> models.QuerySet:\n return cls.objects.filter(*args, **kwargs)", "def get_queryset(self):\n q_kwargs = {\n \"awardsearch__latest_transaction__isnull\": True,\n \"date_signed__isnull\": True,\n \"total_obligation__isnull\": True,\n }\n\n return super(AwardManager, self).get_queryset().filter(~Q(**q_kwargs))", "def get_queryset(self):\n return self.model.objects.all()", "def usable(self):\n return self.exclude(Q(location=None) | Q(ipv4=None) | Q(ipv6=None))", "def visible(self):\n return self.get_queryset().filter(\n record_status=self.model.ACTIVE, merged_with=None)", "def get_queryset(self):\n qs = self.queryset.filter(expiry_date__gt=timezone.now())\n if not self.request.user.groups.filter(name=REGISTRIES_VIEWER_ROLE).exists():\n qs = qs.filter(Q(applications__current_status__code='A'),\n Q(applications__removal_date__isnull=True))\n return qs", "def visible(self, **kwargs):\r\n return self.filter(is_deleted=False, **kwargs)", "def get_queryset(self):\n if getattr(self, 'use_this_queryset', None):\n return self.use_this_queryset\n return self.model().objects.all()", "def queryset(cls):\n return cls.model._default_manager.all()", "def queryset(self):\n if current_user.is_authenticated:\n return Question.query.filter(User.rooms.any(User.id == current_user.id))\n if session.get(\"rooms\") is not None:\n return Question.query.filter(Room.id.in_(session.get(\"rooms\")))\n return Question.query.filter(sql.false())", "def all(self):\n return self.filter()", "def all(self):\n return self.filter()", "def all(self):\n return self.filter()", "def get_queryset(self):\n\t\treturn Question.objects.filter(pub_date__lte=timezone.now())", "def get_queryset(self):\n\t\treturn Question.objects.filter(pub_date__lte=timezone.now())", "def queryset(self, request, queryset):\n if self.value() == 'syndicated':\n return queryset.filter(syndicated_at__isnull=False)\n if self.value() == 'ready_to_syndicate':\n return queryset.filter(ignored_at=None,\n syndicated_at=None,\n longitude_position__isnull=False,\n merchantwebsite__deleted_at=None,\n merchantdoc__deleted_at=None,\n ).distinct()\n if self.value() == 'ignore':\n return queryset.filter(ignored_at__isnull=False)", "def available_qs(self):\n correct_qs_ids = self.tasks.filter(answer=F('question__solution')).values_list('question__id', flat=True)\n return Q.objects.exclude(id__in=correct_qs_ids)", "def get_unlabelled_documents_queryset(self):\n queryset = self.get_queryset()\n\n # Retrieve labelled IDs\n labelled_ids = self.get_labelled_documents_queryset()\\\n .values_list('document_id', flat=True)\n\n return queryset.exclude(pk__in=labelled_ids)", "def get_queryset(self):\n self.object = self.get_object()\n return self.object.friends.all().exclude(user=self.object.user)", "def get_query_set(self):\n return super(PublishedManager, self).get_query_set().filter(is_published=True)", "def get_queryset(self):\r\n return self.model.objects.all()", "def get_queryset(self):\n return Question.objects.filter(pub_date__lte=timezone.now())", "def get_queryset(self):\n return Question.objects.filter(pub_date__lte=timezone.now())", "def get_queryset(self):\n return Question.objects.filter(pub_date__lte=timezone.now())", "def get_queryset(self):\n return Question.objects.filter(pub_date__lte=timezone.now())", "def get_queryset(self):\n return Question.objects.filter(pub_date__lte=timezone.now())", "def get_queryset(self):\n return Question.objects.filter(pub_date__lte=timezone.now())", "def get_queryset(self):\n return Question.objects.filter(pub_date__lte=timezone.now())", "def get_queryset(self):\n return Question.objects.filter(pub_date__lte=timezone.now())", "def get_queryset(self):\n return Question.objects.filter(pub_date__lte=timezone.now())", "def get_queryset(self):\n return Question.objects.filter(pub_date__lte=timezone.now())", "def get_queryset(self):\n return Question.objects.filter(pub_date__lte=timezone.now())", "def get_queryset(self):\n return Question.objects.filter(pub_date__lte=timezone.now())", "def get_queryset(self):\n return Question.objects.filter(pub_date__lte=timezone.now())", "def get_queryset(self):\n return Question.objects.filter(pub_date__lte=timezone.now())", "def get_queryset(self):\n return Question.objects.filter(pub_date__lte=timezone.now())", "def load_all_queryset(self):\n return self.get_model()._default_manager.all()", "def qs(self) -> MIZQuerySet:\n if isinstance(self, type):\n raise TypeError(\n f\"Calling qs() from class level is prohibited. Use {self.__name__}.objects instead.\"\n )\n # noinspection PyUnresolvedReferences\n return self._meta.model.objects.filter(pk=self.pk)", "def get_queryset(self):\n if self.queryset is None:\n return self.model.objects.all()\n return self.queryset", "def queryset(self, request, queryset):\n if self.value() == 'Y':\n return queryset.exclude(cwr_exports__count=0)\n elif self.value() == 'N':\n return queryset.filter(cwr_exports__count=0)", "def get_queryset(self):\n return Article.objects.filter(pub_date__lte=timezone.now())", "def get_queryset(self, **kwargs):\n # if getattr(self.view, 'deleted_obj_lookup', False) and self.view.queryset is None and self.view.model:\n if getattr(self.view, 'deleted_obj_lookup', False) or self.request.GET.get('deleted_obj_lookup', None):\n return self.view.model._default_manager.all_with_deleted().filter(**kwargs)\n return self.super.get_queryset(**kwargs)", "def get_queryset(self):\n form = self.form_class(self.request.GET)\n if form.is_valid():\n object_list = Book.objects.filter(\n self._create_filter_object(form.cleaned_data)\n )\n return object_list\n return Book.objects.all()", "def untracked(self):\n\n # Getting all scopes.\n day_scope = get_scope_by_name(Scope.DAY.value)()\n week_scope = get_scope_by_name(Scope.WEEK.value)()\n month_scope = get_scope_by_name(Scope.MONTH.value)()\n year_scope = get_scope_by_name(Scope.YEAR.value)()\n\n return self.exclude(\n Q(scope=day_scope.name, track_events__created__date__range=(day_scope.start, day_scope.end)) |\n Q(scope=week_scope.name, track_events__created__date__range=(week_scope.start, week_scope.end)) |\n Q(scope=month_scope.name, track_events__created__date__range=(month_scope.start, month_scope.end)) |\n Q(scope=year_scope.name, track_events__created__date__range=(year_scope.start, year_scope.end))\n )", "def get_queryset(self):\n return Question.objects.filter(\n pub_date__lte=timezone.now()\n ).order_by('-pub_date')[:1000]", "def get_queryset(self):\n return Page.objects.active()", "def get_query_set(self):\r\n return super(TopLevelManager, self).get_query_set().filter(parent=None, hidden=False)", "def all(cls):\n return cls.where()", "def get_pending_instances(self):\n return [instance for instance in self.instances.itervalues()\n if InstanceState.REQUESTED <= instance.state < InstanceState.RUNNING]", "def excluded(cls):\n return []", "def _queryset(self):\n return self.type.objects.filter(id__in=self.ids)", "def queryset(self, request, queryset):\n if self.value() == '1':\n return queryset.exclude(moyen_id=24)\n if self.value() == '0':\n return queryset.filter(moyen_id=24)", "def get_queryset(self):\n qs = AllowedTag.objects.filter(enabled=True)\n if self.q:\n qs = qs.filter(name__istartswith=self.q)\n return qs.order_by('name')", "def get_queryset(self):\n return Post.objects.filter(pub_date__lte=timezone.now())", "def get_queryset(self):\n return Post.objects.filter(pub_date__lte=timezone.now())", "def get_queryset(self):\n return Post.objects.filter(published_date__isnull=True).order_by('created_date')", "def filter_queryset(self, request, queryset, view):\n instance_id = request.query_params.get(\"instance\")\n\n if instance_id:\n int_or_parse_error(\n instance_id,\n \"Invalid value for instance_id. It must be a positive integer\",\n )\n\n instance = get_object_or_404(Instance, pk=instance_id)\n queryset = queryset.filter(instance=instance)\n\n return queryset", "def get_valid_types():\n return BaseUsage.objects.exclude(\n pk__in=UsageType.objects.filter(usage_type='SU')\n )", "def get_queryset(self):\n qs = self.queryset\n if not self.request.user.groups.filter(name=REGISTRIES_VIEWER_ROLE).exists():\n qs = qs.filter(\n Q(applications__current_status__code='A'),\n Q(applications__removal_date__isnull=True))\n if self.kwargs.get('activity') == 'drill':\n qs = qs.filter(registries_activity='DRILL')\n if self.kwargs.get('activity') == 'install':\n qs = qs.filter(registries_activity='PUMP')\n return qs", "def get_queryset(self):\n return Question.objects.order_by('-pub_date')[:20]", "def filter(self, **search_terms):\n conditions = \" AND \".join(\n [f\"{term} = :{term}\"\n for term, value in search_terms.items()\n if value is not None]\n ).strip()\n\n if conditions:\n conditions = f\"WHERE {conditions}\"\n\n instances = self.db.query(f\"\"\"\n SELECT * from {self.table}\n {conditions}\n \"\"\", **search_terms).all(as_dict=True)\n\n return [\n self.model(**instance)\n for instance in instances\n ]", "def get_queryset(self):\n return self.queryset.filter(theme__contest__publish_date__lte=timezone.now())", "def prune(self):\n target_user_ids = self.get_queryset().values_list('id', flat=True)\n exclude_user_ids = SentDrip.objects.filter(date__lt=conditional_now(),\n drip=self.drip_model,\n user__id__in=target_user_ids)\\\n .values_list('user_id', flat=True)\n self._queryset = self.get_queryset().exclude(id__in=exclude_user_ids)", "def GetQNoAnnotations(cls):\n return models.Q(annotationstable__isnull=True)", "def index_queryset(self, using=None):\n return self.get_model().objects.filter(created_at__lte=datetime.datetime.now())", "def get_queryset(self):\n return Question.objects.filter(pub_date__lte=timezone.now()).order_by('-pub_date')", "def get_queryset(self):\n return Question.objects.filter(\n pub_date__lte=timezone.now()\n ).order_by('-pub_date')[:10]", "def get_queryset(self):\n return Question.objects.filter(\n pub_date__lte=timezone.now()\n ).order_by('-pub_date')[:10]", "def index_queryset(self, using=None):\n return self.get_model().objects.filter(pubdate__lte=datetime.datetime.now())", "def exclude_object(qs, obj):\n return qs.exclude(pk=obj.pk)", "def distinct(self):\n return DistinctQuery(self)", "def get_queryset(self):\n return Question.objects.filter(pub_date__lte=timezone.now()).order_by(\n \"-pub_date\"\n )[:5]", "def get_queryset(self):\n return get_user_model().objects.none()", "def get_queryset(self):\n if hasattr(self, 'revision_model'):\n return self.revision_model.objects\n raise NotImplementedError()", "def get_queryset(self):\n\t\treturn Question.objects.filter(pub_date__lte=timezone.now()).order_by('-pub_date')[:5]", "def get_queryset(self):\n\t\treturn Question.objects.filter(pub_date__lte=timezone.now()).order_by('-pub_date')[:5]", "def index_queryset(self, using=None):\n return self.get_model().objects.filter(pub_date__lte=datetime.datetime.now())", "def index_queryset(self):\n return self.get_model().objects.filter(active=True)", "def get_non_selected(self):\n\n obj_list = self.get_list()\n\n for sel in self.get_selected():\n obj_list.remove(sel)\n\n return obj_list", "def get_queryset(self):\n return Question.objects.filter(\n pub_date__lte=timezone.now()\n ).order_by('-pub_date')", "def get_queryset(self):\n entries = self.model_cls.published.none()\n\n if self.request.GET:\n self.pattern = self.request.GET.get('q', '')\n if len(self.pattern) < 3:\n self.error = _('The pattern is too short')\n else:\n query_parsed = QUERY.parseString(self.pattern)\n entries = self.model_cls.published.filter(\n query_parsed[0]).distinct()\n if not entries:\n self.error = _(\"No entries found with %s pattern\" % self.pattern)\n else:\n self.error = _('No pattern to search found')\n\n return entries", "def filter_queryset(self, request, queryset, view):\n if request.user.is_anonymous:\n return queryset.filter(Q(shared_data=True))\n return queryset", "def distinct(self):\n qs = copy(self)\n qs._distinct = True\n return qs", "def get_non_inheriting_objects(self):\n return get_non_inheriting_objects(self)", "def get_queryset(self):\n return Question.objects.filter(pub_date__lte=timezone.now()).order_by('-pub_date')[:5]", "def get_queryset(self):\n return Question.objects.filter(pub_date__lte=timezone.now()).order_by('-pub_date')[:5]" ]
[ "0.6884471", "0.68695015", "0.67739236", "0.67603797", "0.671835", "0.65459937", "0.6483099", "0.64685464", "0.64685464", "0.6414166", "0.6411734", "0.6331146", "0.6330862", "0.6273209", "0.6255611", "0.6248718", "0.62450546", "0.6231552", "0.6221019", "0.6218501", "0.62098354", "0.62066174", "0.61992687", "0.61856896", "0.6172777", "0.6172777", "0.6172777", "0.6162526", "0.6162526", "0.6139381", "0.61232966", "0.6119052", "0.61182606", "0.6112199", "0.6102962", "0.60846615", "0.60846615", "0.60846615", "0.60846615", "0.60846615", "0.60846615", "0.60846615", "0.60846615", "0.60846615", "0.60846615", "0.60846615", "0.60846615", "0.60846615", "0.60846615", "0.60846615", "0.60776484", "0.60737073", "0.605767", "0.60575837", "0.6009522", "0.6004024", "0.5999803", "0.59913754", "0.5968496", "0.5968313", "0.5955729", "0.5947632", "0.5941362", "0.5919979", "0.59111005", "0.59050846", "0.58999246", "0.58949673", "0.58949673", "0.58746326", "0.5873379", "0.58702356", "0.5841108", "0.58301973", "0.582092", "0.5819595", "0.58069843", "0.5803303", "0.5787375", "0.57866234", "0.5785252", "0.5785252", "0.57796824", "0.5774916", "0.5774665", "0.57727027", "0.57702416", "0.5760314", "0.5759749", "0.5759749", "0.5757947", "0.5757127", "0.57557136", "0.57509017", "0.57470477", "0.5745066", "0.5738385", "0.5734691", "0.57260233", "0.57260233" ]
0.6682215
5
Determines whether the model instance has already been selected in a related field (ManyToManyField, OneToOneField).
def available(self): fields = self._meta.get_fields() for field in fields: if isinstance(field, models.ManyToManyRel): attr = field.get_accessor_name() if getattr(self, attr).count() > 0: return False elif isinstance(field, models.OneToOneRel): attr = field.get_accessor_name() if getattr(self, attr, None): return False return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def relation_exists(cls, model):\n return bool(cls.get_related_field(model)\n or cls.get_reverse_related_field(model))", "def isRelated(self):\n return len(self.user_storage.all()) > 0", "def has_field(self, field):\n return field in self.extra_fields", "def contains(self, obj):\n self._not_support_combined_queries(\"contains\")\n if self._fields is not None:\n raise TypeError(\n \"Cannot call QuerySet.contains() after .values() or .values_list().\"\n )\n try:\n if obj._meta.concrete_model != self.model._meta.concrete_model:\n return False\n except AttributeError:\n raise TypeError(\"'obj' must be a model instance.\")\n if obj.pk is None:\n raise ValueError(\"QuerySet.contains() cannot be used on unsaved objects.\")\n if self._result_cache is not None:\n return obj in self._result_cache\n return self.filter(pk=obj.pk).exists()", "def _is_selected ( self, object ):\n if hasattr(object, 'model_selection') \\\n and object.model_selection is not None:\n return True\n return False", "def exists(self):\n new_qs = self._copy_self()\n new_qs.max_items = 1\n return new_qs.count(page_size=1) > 0", "def _optimize_field_by_name(self, store: QueryOptimizerStore, model, selection, field_def) -> bool:\n name = self._get_name_from_field_dev(field_def)\n if not (model_field := self._get_model_field_from_name(model, name)):\n return False\n _logger.info('_optimize_field_by_name %r %r', name, model_field)\n if self._is_foreign_key_id(model_field, name):\n # ToDo: check if this works - i write resolvers for this\n store.only(name)\n return True\n if model_field.many_to_one or model_field.one_to_one:\n # ForeignKey or OneToOneField\n field_store = self._optimize_gql_selections(\n selection.selections,\n self._get_type(field_def),\n )\n store.select_related(name, field_store)\n return True\n if model_field.one_to_many or model_field.many_to_many:\n field_store = self._optimize_gql_selections(\n selection.selections,\n self._get_type(field_def),\n )\n if isinstance(model_field, ManyToOneRel):\n field_store.only(model_field.field.name)\n related_queryset = model_field.related_model.objects.all()\n _logger.info('_optimize_field_by_name many relation %r %r', model, name)\n store.prefetch_related(name, field_store, related_queryset)\n return True\n if not model_field.is_relation:\n store.only(name)\n return True\n return False", "def _filter_m2m(self, field):\n if isinstance(field, models.ManyToManyField):\n if self._join_allowed(field.model, field.rel.to, field):\n return field", "def __contains__(self, key):\n if isinstance(key, Model):\n key = key.get_id()\n return (str(key) in self.get_models())", "def is_m2m_set(self, int_model, model1, model2):\n for m2m in model1._meta.many_to_many:\n if m2m.rel.to == model2 and m2m.rel.through == int_model:\n return True\n for m2m in model2._meta.many_to_many:\n if m2m.rel.to == model1 and m2m.rel.through == int_model:\n return True\n return False", "def table_contains_elements(self, model):\n if model.objects.count() > 1:\n return True\n return False", "def exists(self, value=None):\n try:\n if not value:\n value = self.get()\n except AttributeError:\n # If the instance is deleted, the _pk attribute doesn't exist\n # anymore. So we catch the AttributeError to return False (this pk\n # field doesn't exist anymore) in this specific case\n return False\n else:\n return self.connection.sismember(self.collection_key, value)", "def has_item(self, item):\n return item in self.set", "def has_item(self, item):\n return item in self.set", "def exists(self):\n return self.obj is not None", "def __contains__(self, obj):\n if isinstance(obj, self):\n query = self.where(**obj.data).select()\n result = query.execute()\n if result.count:\n return True\n return False", "def has_add_permissions(self):\n queryset = self.model.objects.all()\n if hasattr(queryset, 'has_add_permissions'):\n return queryset.has_add_permissions( PyFormsMiddleware.user() )\n else:\n return True", "def is_one(self) -> bool:\n return self.field.one == self", "def has_field(cls, field) -> bool:\n try:\n cls._meta.get_field(field)\n return True\n except models.FieldDoesNotExist:\n return False", "def test_annotation(self):\n queryset = OtherModel.objects.annotate(\n has_related=Exists(QuerySetModel.objects.filter(other_id=OuterRef('pk')))\n )\n self.assertEqual(queryset.count(), 1)\n self.assertFalse(queryset[0].has_related)", "def _filter_related_m2m(self, rel):\n field = rel.field\n if isinstance(field, models.ManyToManyField):\n if self._join_allowed(rel.parent_model, rel.model, field):\n return rel", "def include_related(request):\n query_param_value = request.GET.get(\"include_related\")\n return query_param_value in [\"true\", \"True\"]", "def allow_relation(self, obj1, obj2, **hints):\n if obj1._meta.app_label == 'data_collection' or \\\n obj2._meta.app_label == 'data_collection':\n return True\n return None", "def contains_field(cls, field_id):\n return field_id in cls.__data", "def IsDocumentRelated(self, *args, **kwargs):\n pass", "def is_initially_selected(self, value):\n return value in self._get_selected_values_set()", "def has_siblings(self):\n return self.siblings.exclude(pk=self.pk).exists()", "def in_(self, other: Any) -> NoReturn:\n raise NotImplementedError(\n \"in_() not yet supported for \"\n \"relationships. For a simple \"\n \"many-to-one, use in_() against \"\n \"the set of foreign key values.\"\n )", "def __eq__(self, other):\n return (other is not None and\n self.field_name == other.field_name and\n self.field_type is other.field_type and\n dict.__eq__(self.field_attrs, other.field_attrs) and\n self.related_model == other.related_model)", "def hasField(self) -> bool:\n return bool(self.__field)", "def __eq__(self, other):\n return (self.field_name == other.field_name and\n self.field_type is other.field_type and\n dict.__eq__(self.field_attrs, other.field_attrs) and\n self.related_model == other.related_model)", "def in_search_queryset(self, instance_id, index='_all'):\n return self.get_search_queryset(index=index).filter(pk=instance_id).exists()", "def has_item(self, item: Inventory) -> bool:\n return (item.pk,) in self.orderitem_set.values_list('item')", "def allow_relation(self, obj1, obj2, **hints):\n\n result = (obj1._meta.model_name in DefaultRouting.defaultModels and \n obj2._meta.model_name in DefaultRouting.defaultModels)\n return result", "def has(self, target):\r\n return target in self.by_target", "def _is_valid_field(self, field, allow_m2m=True):\r\n try:\r\n self.query.setup_joins(field.split(LOOKUP_SEP), self.query.get_meta(), self.query.get_initial_alias(), False, allow_m2m, True)\r\n return True\r\n except FieldError:\r\n return False", "def __contains__(self, item):\n\n if self.is_view:\n return item in self._view\n return item in self._storage", "def __contains__(self, val):\n return val in self.ids or super().__contains__(val)", "def __contains__(self, fieldname):\r\n return fieldname in self._by_name", "def exists1(self, cls, **attr):\n nodes = getattr(self.graph, getattr(models, cls).element_plural).query(**attr).all()\n return len(nodes) > 0", "def relevant():\n query = (self.query[exp[\"ids\"][0]]\n if exp[\"object_name\"] == \"__previous__\" else exp)\n return object_class.id.in_(\n RelationshipHelper.get_ids_related_to(\n object_class.__name__,\n query[\"object_name\"],\n query[\"ids\"],\n )\n )", "def _optimize_field_by_hints(self, store: QueryOptimizerStore, selected_field, field_def) -> bool:\n if not (optimization_hints := getattr(field_def, 'optimization_hints', None)):\n return False\n args = selected_field.arguments\n self._add_optimization_hints(optimization_hints.select_related(*args), store.select_list)\n self._add_optimization_hints(optimization_hints.prefetch_related(*args), store.prefetch_list)\n if store.only_list is not None:\n self._add_optimization_hints(optimization_hints.only(*args), store.only_list)\n return True", "def belongs_to_user(self) -> bool:\n return flask.g.user is not None and flask.g.user.id == getattr(\n self, 'user_id', False\n )", "def formfield_for_manytomany(self, db_field, request, **kwargs):\n if db_field.name == 'purposes':\n kwargs['widget'] = CheckboxSelectMultiple()\n\n return super(PrescriptionAdmin, self).formfield_for_manytomany(\n db_field, request, **kwargs)", "def formfield_for_manytomany(self, db_field, request, **kwargs):\n if db_field.name == \"professor\" and not request.user.is_superuser:\n kwargs[\"queryset\"] = Professor.objects.filter(faculty_id=request.user.profile.faculty.id)\n\n return super(ExamAdmin, self).formfield_for_manytomany(db_field, request, **kwargs)", "def IsSelected(self, item):\r\n\r\n return item.IsSelected()", "def is_duplicate(self, **kwargs):\n return len(list(self.c.select(**kwargs))) > 0", "def solved(self):\n return GOAL_VEHICLE in self.vehicles", "def has_view_permissions(self, obj):\n queryset = self.model.objects.filter(pk=obj.pk)\n if hasattr(queryset, 'has_view_permissions'):\n return queryset.has_view_permissions( PyFormsMiddleware.user() )\n else:\n return True", "def has(self, target):\n return target in self.by_target", "def _filter_related_one2one(self, rel):\n field = rel.field\n if isinstance(field, models.OneToOneField):\n if self._join_allowed(rel.parent_model, rel.model, field):\n return rel", "def is_associated(self, community, member):\n if __debug__:\n from community import Community\n assert isinstance(community, Community)\n assert isinstance(member, Member)\n return (community.cid, member) in self._associations", "def allow_relation(self, obj1, obj2, **hints):\n\t\tif obj1._meta.app_label == 'product' or \\\n\t\t obj2._meta.app_label == 'product':\n\t\t return True\n\t\treturn None", "def __bool__(self):\n return self.taxonomy.exists", "def allow_relation(self, obj1, obj2, **hints):\n if obj1._meta.app_label == 'researcherquery' and obj2._meta.app_label == 'researcherquery':\n return True\n return None", "def exists(self):\n return bool(self.get())", "def is_many(self) -> bool: # pragma: no cover\n pass", "def _is_foreign_key(self, key):\n return self._in_keys(key, self._foreign_keys)", "def _persists_for(self, mapper: Mapper[Any]) -> bool:\n\n return (\n self.key in mapper.relationships\n and mapper.relationships[self.key] is self\n )", "def select_all_is_allowed(self):\n select_all_max_items = self.get_select_all_max_items()\n if select_all_max_items is None:\n return True\n else:\n return select_all_max_items > self.get_total_number_of_items_in_queryset()", "def allow_relation(self, obj1, obj2, **hints):\n\n result = False\n if not (obj1._meta.model_name in GeoSpatialRouting.includedModels and \n obj2._meta.model_name in GeoSpatialRouting.includedModels) :\n result = None\n return result", "def has_fields(model, *fields):\n meta = model._meta\n\n try:\n for field in fields:\n meta.get_field(field)\n except FieldDoesNotExist:\n return False\n return True", "def check_field_existence(self, model_name, field_name):\n model_id = self.env['ir.model'].sudo().search([('model', '=', model_name)], limit=1)\n\n if self.env['ir.model.fields'].sudo().search_count([('name', '=', field_name), ('model_id', '=', model_id.id)]) == 0:\n raise BadRequest(\"L'une des conditions concerne un champ qui n'existe pas\")\n\n return True", "def _join_allowed(self, source, target, field=None):\n join = (source, target)\n\n # No circles\n if target == source:\n return False\n\n # Prevent join to excluded models\n if target in self.excluded_models:\n return False\n\n # Never go back through the root\n if target == self.root_model:\n return False\n\n # Apply excluded joins if any\n if join in self._excluded_joins:\n _field = self._excluded_joins[join]\n if not _field:\n return False\n elif _field and _field == field:\n return False\n\n # Check if the join is allowed by a required rule\n for (_source, _target), _field in self._required_joins.items():\n if _target == target:\n if _source != source:\n return False\n\n # If a field is supplied, check to see if the field is allowed\n # for this join.\n if field and _field and _field != field:\n return False\n\n return True", "def belongs_to(self, group):\n return self in group.users", "def __contains__(self, instance: object) -> bool:\n try:\n state = attributes.instance_state(instance)\n except exc.NO_STATE as err:\n raise exc.UnmappedInstanceError(instance) from err\n return self._contains_state(state)", "def allow_relation(self, obj1, obj2, **hints):\n if obj1._meta.app_label == self.app_label or \\\n obj2._meta.app_label == self.app_label:\n return True\n return None", "def is_displayed(self, unit):\n try:\n field_is_displayed = getattr(self.unit.get_model_name()+'_is_displayed')\n if field_is_displayed:\n return field_is_displayed(unit)\n except AttributeError:\n pass\n if not self.displayed and not self.excluded:\n return True\n elif self.displayed and self.excluded:\n return unit.get_model_name() in self.displayed \\\n and unit.get_model_name() not in self.excluded\n elif self.excluded:\n return unit.get_model_name() not in self.excluded\n elif self.displayed:\n return unit.get_model_name() in self.displayed\n else:\n return True", "def has_componente(self, persona):\n return True if persona.pk in self.pks_componenti else False", "def is_related_to(self, entity, pure=False):\n if type(entity) not in [list, tuple, set]:\n entity = [entity]\n entity = [str(e) for e in entity]\n out = []\n for attribute in self._class_linkables:\n value = self._attr_get_name(attribute)\n if type(value) == list:\n out += [str(v) in entity for v in value]\n elif value is None:\n continue\n else:\n out.append(str(value) in entity)\n if pure:\n return all(out)\n else:\n return any(out)", "def is_field_requested(self, field_name):\n if self.limit_fields:\n request = self.get_request()\n assert request, \"request can't be None in limit_fields mode\"\n requested_fields = self.get_requested_field_names(request)\n return field_name in requested_fields\n else:\n # always return field if limit_fields flag set to False\n return True", "def has_items(self):\r\n return self.orderitem_set.exists() # pylint: disable=E1101\r", "def allow_relation(self, obj1, obj2, **hints):\r\n if obj1._meta.app_label == self.APP_LABEL or obj2._meta.app_label == self.APP_LABEL:\r\n return True\r\n return None", "def has_relation(\n self, source: Tuple[str, str], target: Tuple[str, str], relation: str\n ) -> bool:\n res = self.get_relations(source, target, relation, limit=1)\n if res:\n return True\n else:\n return False", "def has_voted(self, user):\n return user.choice_set.filter(vote=self).exists()", "def is_choice(self):\n return self.__class__.get_setting_choices(self.key, **self.get_kwargs()) is not None", "def allow_relation(self, obj1, obj2, **hints):\n\n if obj1._state.db == obj2._state.db:\n return True\n return False", "def exists(self):\n try:\n key = self.key\n except DoesNotExist:\n \"\"\"\n If the object doesn't exists anymore, its PK is deleted, so the\n \"self.key\" call will raise a DoesnotExist exception. We catch it\n to return False, as the field doesn't exists too.\n \"\"\"\n return False\n else:\n return self.connection.exists(key)", "def exists(self):\n\n return self.ids[-1] is not None", "def exists(cls, ko):\n if isinstance(ko, BagDocument):\n return ko._key in cls._dbag\n else:\n return ko in cls._dbag", "def allow_relation(self, obj1, obj2, **hints):\n if (obj1._meta.app_label == obj2._meta.app_label):\n return True\n else:\n return None", "def is_country_selection_criteria_field_present_in_vendor_profile_page(self):\n return self.is_specific_selection_criteria_filter_present(self.vendor_profile_page_div_id, self.country_label_name)", "def __contains__(self, item):\n if item == self.profile_id:\n return True", "def formfield_for_manytomany(self, db_field, request, **kwargs):\n\n if db_field.name == \"professor\" and not request.user.is_superuser:\n kwargs[\"queryset\"] = Professor.objects.filter(faculty_id=request.user.profile.faculty.id)\n\n return super(MaterialAdmin, self).formfield_for_manytomany(db_field, request, **kwargs)", "def has_step_field(self, field) -> bool:\n if not self._step:\n return False\n return field in self._step", "def __contains__(self, item):\n return item in self._fetch()", "def has_changed(self):\n has_changed = forms.ModelForm.has_changed(self)\n return bool(self.initial or has_changed)", "def has_changed(self):\n has_changed = forms.ModelForm.has_changed(self)\n return bool(self.initial or has_changed)", "def has_changed(self):\n has_changed = forms.ModelForm.has_changed(self)\n return bool(self.initial or has_changed)", "def is_country_selection_criteria_field_present_in_compare_price_list_pop_up(self):\n return self.is_specific_selection_criteria_filter_present(self.compare_price_list_pop_up_div_id, self.country_label_name)", "def allow_relation(self, obj1, obj2, **hints):\n return self._route_by_model_type(obj1) == self._route_by_model_type(obj2)", "def test_item_is_related_to_list(self) -> None:\n list_ = List.objects.create()\n item = Item()\n item.list = list_\n item.save()\n self.assertIn(item, list_.item_set.all())", "def has_id_field(class_or_instance: Any) -> bool:\n return hasattr(class_or_instance, _ID_FIELD_NAME)", "def __contains__(self, obj):\n return obj in self.actors", "def formfield_for_manytomany(self, db_field, request, **kwargs):\n if db_field.name == \"professor\" and not request.user.is_superuser:\n kwargs[\"queryset\"] = Professor.objects.filter(faculty_id=request.user.profile.faculty.id)\n\n return super(TaskAdmin, self).formfield_for_manytomany(db_field, request, **kwargs)", "def hasMetaModel(self, metaModel):\r\n if self.getClass() == metaModel: return 1\t\t\t\t# if the meta model is the actual class\r\n for mmodels in self.mergedASG:\t\t\t\t\t# else check the merged meta-models\r\n if mmodels.getClass() == metaModel: return 1\r\n return 0", "def has_fields(class_or_instance):\n return hasattr(class_or_instance, _FIELDS)", "def _filter_one2one(self, field):\n if isinstance(field, models.OneToOneField):\n if self._join_allowed(field.model, field.rel.to, field):\n return field", "def contains(self, x: object):\n return x in self.items", "def __contains__(self, name):\n return name in set(self)" ]
[ "0.6742698", "0.6351571", "0.6249839", "0.6177484", "0.594861", "0.58511186", "0.58348316", "0.5823608", "0.5715787", "0.56835854", "0.5671056", "0.5618836", "0.5598657", "0.5598657", "0.55803764", "0.55689776", "0.5560058", "0.55562407", "0.551438", "0.55038196", "0.54985", "0.54815364", "0.54295796", "0.5426876", "0.54227006", "0.5397911", "0.5394847", "0.53613305", "0.5352232", "0.5341833", "0.53153884", "0.53138244", "0.529909", "0.5285754", "0.52839637", "0.52807474", "0.52707636", "0.5267501", "0.52592057", "0.525849", "0.52518076", "0.5243299", "0.5231033", "0.5215929", "0.52140856", "0.5206491", "0.52063644", "0.519721", "0.5197167", "0.5194406", "0.5179496", "0.5173344", "0.5160901", "0.5160441", "0.5159656", "0.51576096", "0.51515925", "0.51445186", "0.51320237", "0.5125677", "0.51239735", "0.5122955", "0.5121526", "0.5116497", "0.511559", "0.51118815", "0.511173", "0.5090428", "0.5086558", "0.5083573", "0.5082484", "0.5081689", "0.50808555", "0.50784785", "0.5074368", "0.50733894", "0.50677586", "0.5066228", "0.5066053", "0.50628006", "0.5062504", "0.50510913", "0.50480795", "0.5041609", "0.5039106", "0.5037081", "0.50361764", "0.50361764", "0.50361764", "0.50257814", "0.50199574", "0.50198966", "0.5018034", "0.5017374", "0.500496", "0.5004755", "0.5004493", "0.50033593", "0.4997818", "0.49977088" ]
0.66686696
1
outputs the A vector
def getA(self): return self.theta
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def A(self):\n return self._representation_vector[1:]", "def return_vec(self) :\r\n y_vec = np.concatenate((self.x_vec,self.v_vec))\r\n return y_vec", "def print_vector(self):\n print self.x, self.y, self.z", "def v(self) -> np.ndarray:\n return self.A[1:] if self.scalar_vector else self.A[:3]", "def __call__(self):\n return self._vector", "def as_vector(self) -> np.ndarray:\n psi = self.A[0]\n for i in range(1, len(self.A)):\n psi = merge_mps_tensor_pair(psi, self.A[i])\n assert psi.ndim == 3\n assert psi.shape[1] == 1 and psi.shape[2] == 1\n return psi.reshape(-1)", "def vector(self):\n \n v_list = Householder.triangle_operation(self)[1]\n \n return(v_list)", "def vector(self):\n return self.__vector", "def __call__(self):\n return self._representation_vector", "def AsVector(self) -> BaseVector:", "def vec(self):\r\n\r\n xv = np.arange(self.dx / 2, self.lx, self.dx)\r\n yv = np.arange(-self.ly / 2 + self.dy / 2, self.ly / 2, self.dy)\r\n zv = np.arange(self.oz, self.lz + self.oz, self.dz)\r\n\r\n if self.ox != 0:\r\n xv = np.arange(self.ox, self.lx + self.ox, self.dx)\r\n yv = np.arange(self.oy, self.ly + self.oy, self.dy)\r\n zv = np.arange(self.oz, self.lz + self.oz, self.dz)\r\n\r\n return xv, yv, zv", "def AsVector(self) -> ngsolve.la.BaseVector:", "def getPrincipalVectors(A): #\n VT=np.linalg.eig(np.matmul(A.T,A))\n sort = sorted(zip(VT[0],VT[1].T.tolist()),reverse=True)\n values,vectors = zip(*sort)\n return vectors,values", "def voigt(a):\n A = np.zeros(shape=(6, 1))\n A[0] = a[0, 0]\n A[1] = a[1, 1]\n A[2] = a[2, 2]\n A[3] = a[1, 2]\n A[4] = a[0, 2]\n A[5] = a[0, 1]\n return A", "def cal_list_a(self):\r\n \r\n list_a = np.array([])\r\n \r\n self.cal_et()\r\n #Para cada vector configuración binaria que etiqueta al elemento del vector A obtenemos un numero decimal sobre el número de elemento, a partir de aqui podemos hacer el calculo de la propagacion\r\n list_a = np.array([])\r\n for binn in self.et:\r\n dec_a = numListar(binn)\r\n dec_a = int(str(dec_a), 2)\r\n list_a = np.append(list_a, [dec_a]) \r\n return list_a", "def __repr__(self):\n x, y, z = self._ar\n return \"<Vector %.2f, %.2f, %.2f>\" % (x, y, z)", "def v_o(A,vd):\n return A*vd", "def get_vector(self): \n #print(self.state)\n '''\n print(\"\"\"\n Price {}\n Last Price {}\n Last Period Transaction {}\n Last Transaction {}\n Las Value {}\n Last day {}\n Last hour {}\n Last minute {}\n --------------\n Balance {}\n Bag {}\n \"\"\".format(\n self.state['price'],\n self.states[-1]['price'],\n self.states[-1]['transaction'],\n self.transactions[-1]['transaction'],\n self.value,\n self.state['day'],\n self.state['hour'],\n self.state['minute'], \n self.balance, \n self.bag, \n )) \n ''' \n self.state_vector = np.array([\n self.state['price'],\n self.states[-1]['price'],\n self.states[-1]['transaction'],\n self.transactions[-1]['transaction'],\n self.value,\n self.state['day'],\n self.state['hour'],\n self.state['minute'],\n ])\n\n return self.state_vector", "def vec_node(self):\r\n\r\n xv = np.arange(self.ox, self.lx + self.ox + self.dx, self.dx)\r\n yv = np.arange(self.oy, self.ly + self.oy + self.dy, self.dy)\r\n zv = np.arange(self.oz, self.lz + self.oz + self.dz, self.dz)\r\n\r\n return xv, yv, zv", "def A(self):\n return self._A", "def take_vec(self):\n vec = aux.vec(self.numbers)\n\n return vec", "def vec_z(self):\t\t\t\r\n if self.oz != 0:\r\n ov = self.oz\r\n lv = self.self.lz + self.oz\r\n else:\r\n ov = self.dz / 2\r\n lv = self.lz\r\n\r\n zv = \"\"\r\n for num in np.arange(ov, lv, self.dz):\r\n zv += str(num) + \" \"\r\n\r\n return zv", "def project_vectors_ab(a, b):\n # print('dot = ', np.dot(a,b))\n # print('norm = ', np.linalg.norm(b))\n return np.dot(a, b) / np.linalg.norm(b)", "def vector(self) -> np.ndarray:\n link_vectors = [link.vector for link in self.links]\n v = np.array(link_vectors).ravel()\n return v", "def vector(self):\n return self.q[1:4]", "def vector(self):\n return self._representation_vector", "def getA(self):\n\t\treturn self.a", "def __str__(self):\n s = \"\"\n for v in self.vectors:\n s += str(v) + \"\\n\"\n return s", "def call_single_vec(self, input_value):\n _, eigVectors = self.getEigen(input_value)\n return eigVectors[:,:,-1]", "def vector_space(a, alpha):\n x, y = meshgrid(linspace(-2, 2, num=20), linspace(-2, 2, num=20))\n fx, fy = stuartLandau([x, y], a, alpha)\n gx, gy = noiseFunction([x, y])\n plt.quiver(x, y, fx + gx, fy + gy, color='red')\n plt.xlabel('x')\n plt.ylabel('y')\n plt.show()", "def out(self, inputs):", "def schreier_vector(self, alpha):\n n = self.degree\n v = [None]*n\n v[alpha] = -1\n orb = [alpha]\n used = [False]*n\n used[alpha] = True\n gens = self.generators\n r = len(gens)\n for b in orb:\n for i in range(r):\n temp = gens[i]._array_form[b]\n if used[temp] is False:\n orb.append(temp)\n used[temp] = True\n v[temp] = i\n return v", "def numpy_vector(self):\n pass", "def onBase():\n return (vector(1, 0, 0), vector(0, 1, 0), vector(0, 0, 1))", "def _outputs_vector(self, symbol):\n out_iter = (self._output_logprob(sj, symbol) for sj in self._states)\n return np.fromiter(out_iter, dtype=np.float64)", "def getOutputVector(self, data = None):\n\t\treturn self.loader.getOutputVector(data)", "def convertOutput2Vec(self, vec):\r\n\t\treturn self.convert2Vec(self.getOutput(), vec)", "def el2rv(mu,a,e,i,capom,om,f):\n\n prec = 1.0e-13 #user can change this if more precision needed (just runs slower)\n\n #compute the unit vector\n u = om + f\n xhat = np.cos(u)*np.cos(capom) - np.cos(i)*np.sin(capom)*np.sin(u)\n yhat = np.cos(u)*np.sin(capom) + np.cos(i)*np.cos(capom)*np.sin(u)\n zhat = np.sin(i)*np.sin(u)\n\n #compute the angular momentum vector (unit vector)\n hx = np.sin(capom)*np.sin(i)\n hy = -np.cos(capom)*np.sin(i)\n hz = np.cos(i)\n\n #assuming not parabolic, here the magnitudes of the vectors\n r = a * (1.0 - e*e) / (1.0 + e*np.cos(f))\n h = ( mu*a*(1.0 - e*e) )**0.5\n\n #position vectors\n x = r * xhat\n y = r * yhat\n z = r * zhat\n\n #compute components of vector theta hat\n thx = hy * zhat - hz * yhat\n thy = hz * xhat - hx * zhat\n thz = hx * yhat - hy * xhat\n\n #obtain the velocity vector's components and calculate v\n thdot = h/(r*r)\n rdot = e*mu*np.sin(f)/h\n\n vx = r * thdot * thx + rdot * xhat\n vy = r * thdot * thy + rdot * yhat\n vz = r * thdot * thz + rdot * zhat\n\n return x,y,z", "def out(self):\n return self.ag.output()", "def _vec(A):\n N, m, n = A.shape\n return A.reshape((N, m*n, 1), order='F')", "def vec(self):\n return np.matrix(self.val.ravel()).transpose()", "def vec_x(self):\t\r\n if self.ox != 0:\r\n ov = self.ox\r\n lv = self.self.lx + self.ox\r\n else:\r\n ov = self.dx / 2\r\n lv = self.lx\r\n\r\n xv = \"\"\r\n for num in np.arange(ov, lv, self.dx):\r\n xv += str(num) + \" \"\r\n\r\n return xv", "def evaluate_as_vector(self, chain_state): \n def vector_representation(n, ordering, it):\n return self.mapping.subspace(zip(ordering,it))\n return self._evaluate(vector_representation, chain_state)", "def xvec(self):\n return np.array([self.x, self.y])", "def V_vect(self, points):\n return self.A_conf*norm(points)*self.isOutside(points)", "def a(self):\r\n return self.__a", "def __repr__(self):\n return \"Vector(\" + str(self.x) + \")\"", "def a(self):\n return self._a", "def a(self):\n return self._a", "def a(self):\n return self._a", "def vector_line(self):\n assert len(self.xcoords) == 2\n diff_x = self.xcoords[1] - self.xcoords[0]\n diff_z = self.zcoords[1] - self.zcoords[0]\n vec = np.hstack((diff_x, diff_z))\n return vec", "def v(self) -> np.ndarray:\n return self.array[:, 1:] if self.scalar_vector else self.array[:, :3]", "def result(self):\n return self.a", "def print_vectors(self):\n print(\"Vectors:\")\n for name, vector in self.get_vectors():\n self.print_vector(name, vector.items)", "def array(self):", "def Test(vectors, distance_alogrithm):\n\n lista1, distancia1 = iterative_func_ordered_1(vectors, distance_alogrithm)\n\n print(' ')\n print(f\"Distancia IFO1: {distancia1}\")\n print(\"Mejor Lista IFO1:\")\n\n print(' ')\n for vector in lista1:\n print(vector)", "def azs (a):\r\n zscores = []\r\n for item in a:\r\n zscores.append(z(a,item))\r\n return N.array(zscores)", "def articulate(self, ar_in):\n #pdb.set_trace()\n ar_out = ar_in * 2 - 1\n ar_out[:, 0:2] *= N.random.beta(self.alpha, self.beta, (4, 2))\n #ar_out[:,0:2] += N.random.normal(0,0.001)\n ar_out = 0.5 * ar_out + 0.5\n return ar_out", "def z(self) -> float:\n return self.A[3] if self.scalar_vector else self.A[2]", "def feed(self, vector):\n return vector", "def acc_b_v(self):\r\n return self._acc_b_v", "def acc_visc(j,rA,vA,mA,rhoA,PA,hA,dW=kernel.dW_M4):\n assert rA.shape[0] == vA.shape[0] == mA.shape[0] == rhoA.shape[0] == hA.shape[0], \"arrays are not matched\"\n N = len(mA)\n c_j = c_gas(j,rhoA,PA)\n\n tot = 0\n for i in range(N):\n if i != j:\n\n r_ij = rA[j,:] - rA[i,:]\n r_ij1 = np.linalg.norm(r_ij)\n v_ij = vA[j,:] - vA[i,:]\n m_i = mA[i]\n c_i = c_gas(i,rhoA,PA)\n c_ij = 0.5 * (c_i + c_j)\n h_ij = 0.5 * (hA[i] + hA[j])\n rho_ij = 0.5 * (rhoA[i] + rhoA[j])\n\n c = np.dot(v_ij,r_ij)\n mu_ij = ( c * h_ij ) / ( r_ij1**2 + 0.01*h_ij**2 )\n\n a = ( -alpha * mu_ij * c_ij + beta * mu_ij**2 ) / rho_ij\n b = 0\n Pi_ij = a*dm.heavi(-c) + b*dm.heavi(c)\n\n # if Pi_ij == 0:\n # print(\"i,j:\",i,j)\n # print(\"c:\",c)\n # print(\"c_ij\",c_ij)\n # print(\"\")\n # assert Pi_ij != 0\n\n tot += m_i * h_ij**(-4) * Pi_ij * dW(r_ij1,h_ij) * (r_ij/r_ij1)\n\n return - tot", "def _asvector(self, arr):\n result = self._moveaxis(arr, [-2, -1], [0, 1])\n return self.domain.element(result)", "def tovector(self):\n return np.hstack(self.values())", "def strain2disp(e, A):\n l = A.shape\n u = np.zeros(shape=l)\n for i in range(l[0]):\n u[i, 0] = e[0, 0] * A[i, 0] + e[0, 1] * A[i, 1]\n u[i, 1] = e[1, 1] * A[i, 1] + e[1, 2] * A[i, 2]\n u[i, 2] = e[2, 2] * A[i, 2] + e[0, 2] * A[i, 0]\n return u", "def _repr_(self):\n return 'A ray in the direction ' + repr(self.vector());", "def algi(C):\n return np.array([ C[0,2], C[1,2], C[1,0] ])", "def at( self, ang ):\n ang = asarray(ang)[:,newaxis]\n tw = ang * self.tw\n A = [identity(4)]\n for twi in tw:\n M = seToSE(twi)\n A.append(dot(A[-1],M))\n return A", "def to_abivars(self):", "def __AOD04(self):\n\n assert self.__lmax <= 100\n\n k = np.zeros(101)\n\n k[0] = 0\n k[1] = 0\n k[2] = -0.308\n k[3] = -0.195\n k[4] = -0.132\n k[5] = -0.103\n k[6] = -0.089\n k[7] = -0.082\n k[8] = -0.078\n k[9] = -0.073\n\n for i in range(10, 18):\n k[i] = -(0.682 + 0.27 * (i - 10) / 8) / i\n for i in range(18, 32):\n k[i] = -(0.952 + 0.288 * (i - 18) / 14) / i\n for i in range(32, 56):\n k[i] = -(1.24 + 0.162 * (i - 32) / 24) / i\n for i in range(56, 101):\n k[i] = -(1.402 + 0.059 * (i - 56) / 44) / i\n\n # print(k[0:(self.__Nmax+1)])\n\n return k[0:(self.__lmax + 1)]", "def v(self):\n return self._v", "def v(self):\n return self._v", "def get_vectors(self):\n return self.vecs[:]", "def as_vector(self):\n return self.pdm.as_vector()", "def c(self) -> np.ndarray:\n return self._vector[10:12]", "def get_a(self):\n return 0.5*-G*self._mm*self._sm/self.geten()", "def Log_OB_S1_vec(xref,x):\n\n nX = np.shape(x)\n\n m = nX[0]\n n = nX[1]\n t = nX[2]\n\n G = np.zeros((n,t))\n Gv = np.zeros((m,n,t))\n\n for r in range(t):\n\n # Correct for permuations\n\n Xout = dp(x[:,:,r])\n\n for q in range(n):\n\n a = np.sum(Xout[:,q]*xref[:,q,r])/np.sqrt(np.sum(xref[:,q,r]**2)*np.sum(Xout[:,q]**2)) # Should have unit L2 norm\n G[q,r] = np.arccos(a) # Computing the angles\n if a > 1:\n a = 1\n if a < -1:\n a = -1\n\n v = Xout[:,q] - a*xref[:,q,r]\n Gv[:,q,r] = v / (1e-24 + np.linalg.norm(v)) # Unit vector in the tangent subspace\n\n return G,Gv", "def CreateVector(self) -> BaseVector:", "def Cvec(self):\n return vec(self.xc, self.yc)", "def V(self):\n return self._V", "def V(self):\n return self._V", "def V(self):\n return self._V", "def f_v(_a, _vs, _Ps, _Ps0): # _aはスカラ, _vsはベクトル, _Ps, _Ps0は3行2列の行列\n center_pos = _Ps[0]\n center_pos_0 = _Ps0[0]\n idx_iter = Index_iterator(1, 8)\n #中心点から各点へのベクトル\n x = []\n x0 = []\n for p in (_Ps):\n x.append(p - center_pos)\n for p in _Ps(_Ps0):\n x0.append(p - center_pos_0)\n\n x01 = (_Ps[1]-center_pos) \n x02 = (_Ps[2]-center_pos) \n x03 = (_Ps[3]-center_pos) \n x04 = (_Ps[4]-center_pos) \n x05 = (_Ps[5]-center_pos) \n x06 = (_Ps[6]-center_pos) \n x07 = (_Ps[7]-center_pos) \n x08 = (_Ps[8]-center_pos)\n print('p_id', center_pos, end='\\t')\n print('x01:', x01, end=\"\\t\")\n print('x03:', x03, end=\"\\t\")\n print('x05:', x05, end=\"\\t\")\n print('x07:', x07)\n x001 = (_Ps0[1]-_Ps0[0]) \n x002 = (_Ps0[2]-_Ps0[0]) \n x003 = (_Ps0[3]-_Ps0[0]) \n x004 = (_Ps0[4]-_Ps0[0]) \n x005 = (_Ps0[5]-_Ps0[0]) \n x006 = (_Ps0[6]-_Ps0[0]) \n x007 = (_Ps0[7]-_Ps0[0]) \n x008 = (_Ps0[8]-_Ps0[0]) \n \n #中心点周りの面の面積\n def calc_area(j,k,l):\n s = LA.norm(np.cross(x[j],x[k]))/2 \\\n + LA.norm(np.cross(x[k],x[l]))/2\n return s\n\n s = []\n s0 = []\n hen = [1,3,5,7]\n for i in range(4):\n j,k,l = [n for n in idx_iter.get_indexes(start_idx=hen[i], 3)]\n s[i] = calc_area(j,k,l)\n s0[i] = calc_area(j,k,l)\n\n # s0123 = LA.norm(np.cross(x[1],x[2]))/2\\\n # +LA.norm(np.cross(x[2],x[3]))/2\n # s4367 = LA.norm(np.cross(x[3],x[4]))/2\\\n # +LA.norm(np.cross(x[4],x[5]))/2\n # s4785 = LA.norm(np.cross(x[5],x[6]))/2\\\n # +LA.norm(np.cross(x[6],x[7]))/2\n # s4521 = LA.norm(np.cross(x[7],x[8]))/2\\\n # +LA.norm(np.cross(x[8],x[1]))/2\n # s04103 = LA.norm(np.cross(x0[1],x0[2]))/2\\\n # +LA.norm(np.cross(x0[2],x0[3]))/2\n # s04367 = LA.norm(np.cross(x0[3],x0[4]))/2\\\n # +LA.norm(np.cross(x0[4],x0[7]))/2\n # s04785 = LA.norm(np.cross(x0[7],x0[8]))/2\\\n # +LA.norm(np.cross(x0[8],x0[5]))/2\n # s04521 = LA.norm(np.cross(x0[5],x0[2]))/2\\\n # +LA.norm(np.cross(x0[2],x0[1]))/2\n \n #各方向への平均面積(ここだけ反時計回り順で設定してる)\n S_iminus = (s[1] + s[2]) / 2 #43方向\n S_Jminus = (s[1] + s[4]) / 2 #41方向\n S_iplus = (s[3] + s[4]) / 2 #45方向\n S_Jplus = (s[3] + s[2]) / 2 #47方向\n S_iminus0 = (s0[1] + s0[2]) / 2 #43方向\n S_Jminus0 = (s0[1] + s0[4]) / 2 #41方向\n S_iplus0 = (s0[3] + s0[4]) / 2 #45方向\n S_Jplus0 = (s0[3] + s0[2]) / 2 #47方向\n # 各方向への厚み\n h_iminus = h_0 / ((poisson/(1-poisson) * (S_iminus - S_iminus0) / S_iminus0) + 1) #43方向\n h_Jminus = h_0 / ((poisson/(1-poisson) * (S_Jminus - S_Jminus0) / S_Jminus0) + 1) #41方向\n h_iplus = h_0 / ((poisson/(1-poisson) * (S_iplus - S_iplus0) / S_iplus0) + 1) #45方向\n h_Jplus = h_0 / ((poisson/(1-poisson) * (S_Jplus - S_Jplus0) / S_Jplus0) + 1) #47方向\n # 各断片の重心\n g = []\n kado = [2,4,6,8]\n hen = [1,3,5,7]\n for i in range(len(kado)):\n _kado = kado[i]\n _hen1, _ = [idx for idx in idx_iter.get_indexes_reverse(_kado, 2)]\n _hen2, _ = [idx for idx in idx_iter.get_indexes(_kado, 2)]\n _hen = [_hen1, _hen2]\n _g1 = (center_pos + _Ps[_kado] + _Ps[_hen1])/3\n _g2 = (center_pos + _Ps[_kado] + _Ps[_hen2])/3\n g.append([_g1, _g2])\n\n g401 = (center_pos + _Ps[0] + _Ps[1]) / 3\n g430 = (center_pos + _Ps[3] + _Ps[0]) / 3\n g436 = (center_pos + _Ps[3] + _Ps[6]) / 3\n g467 = (center_pos + _Ps[6] + _Ps[7]) / 3\n g478 = (center_pos + _Ps[7] + _Ps[8]) / 3\n g485 = (center_pos + _Ps[8] + _Ps[5]) / 3\n g452 = (center_pos + _Ps[5] + _Ps[2]) / 3\n g421 = (center_pos + _Ps[2] + _Ps[1]) / 3\n g0401 = (_Ps0[4] + _Ps0[0] + _Ps0[1]) / 3\n g0430 = (_Ps0[4] + _Ps0[3] + _Ps0[0]) / 3\n g0436 = (_Ps0[4] + _Ps0[3] + _Ps0[6]) / 3\n g0467 = (_Ps0[4] + _Ps0[6] + _Ps0[7]) / 3\n g0478 = (_Ps0[4] + _Ps0[7] + _Ps0[8]) / 3\n g0485 = (_Ps0[4] + _Ps0[8] + _Ps0[5]) / 3\n g0452 = (_Ps0[4] + _Ps0[5] + _Ps0[2]) / 3\n g0421 = (_Ps0[4] + _Ps0[2] + _Ps0[1]) / 3\n \n # 各断片面積\n triangle_area = []\n kado = [2,4,6,8]\n for i in range(len(kado)):\n j, k = [idx for idx in idx_iter.get_indexes_reverse(kado[i], 1)]\n _s1 = LA.norm(np.cross(x[j],x[k]))/2\n j, k = [idx for idx in idx_iter.get_indexes(kado[i], 1)]\n _s2 = LA.norm(np.cross(x[j],x[k]))/2\n triangle_area.append([_s1, _s2])\n\n s410 = LA.norm(np.cross(x[1],x[2]))/2\n s403 = LA.norm(np.cross(x[2],x[3]))/2\n s436 = LA.norm(np.cross(x[3],x[4]))/2\n s467 = LA.norm(np.cross(x[4],x[5]))/2\n s478 = LA.norm(np.cross(x[5],x[6]))/2\n s485 = LA.norm(np.cross(x[6],x[7]))/2\n s452 = LA.norm(np.cross(x[7],x[8]))/2\n s421 = LA.norm(np.cross(x[8],x[1]))/2\n s0410 = LA.norm(np.cross(x0[1],x0[2]))/2\n s0403 = LA.norm(np.cross(x0[2],x0[3]))/2\n s0436 = LA.norm(np.cross(x0[3],x0[4]))/2\n s0467 = LA.norm(np.cross(x0[4],x0[5]))/2\n s0478 = LA.norm(np.cross(x0[5],x0[6]))/2\n s0485 = LA.norm(np.cross(x0[6],x0[7]))/2\n s0452 = LA.norm(np.cross(x0[7],x0[8]))/2\n s0421 = LA.norm(np.cross(x0[8],x0[1]))/2\n # 四角の重心\n\n center_g_square = []\n for i in range(len(g)):\n _g = (triangle_area[i][0]*g[i][0] + triangle_area[i][1]*g[i][1])/(triangle_area[i][0] + triangle_area[i][1])\n center_g.append(_g)\n g4103 = (s410*g401 + s403*g430) / (s410 + s403)\n g4367 = (s436*g436 + s467*g467) / (s436 + s467)\n g4785 = (s478*g478 + s485*g485) / (s478 + s485)\n g4521 = (s452*g452 + s421*g421) / (s452 + s421)\n g04103 = (s0410*g0401 + s0403*g0430) / (s0410 + s0403)\n g04367 = (s0436*g0436 + s0467*g0467) / (s0436 + s0467)\n g04785 = (s0478*g0478 + s0485*g0485) / (s0478 + s0485)\n g04521 = (s0452*g0452 + s0421*g0421) / (s0452 + s0421)\n # 各重心間の距離\n Lj82 = LA.norm(g4521 - g4103)\n Lj24 = LA.norm(g4103 - g4367)\n Lj46 = LA.norm(g4367 - g4785)\n Lj68 = LA.norm(g4785 - g4521)\n \n # ひずみ\n eps_i41 = (LA.norm(x01) - LA.norm(x041)) / LA.norm(x041)\n eps_J41 = (LA.norm(g4521 - g4103) - LA.norm(g04521 - g04103)) / LA.norm(g04521 - g04103)\n eps_i43 = (LA.norm(x03) - LA.norm(x043)) / LA.norm(x043)\n eps_J43 = (LA.norm(g4103 - g4367) - LA.norm(g04103 - g04367)) / LA.norm(g04103 - g04367)\n eps_i47 = (LA.norm(x01) - LA.norm(x041)) / LA.norm(x041)\n eps_J47 = (LA.norm(g4367 - g4785) - LA.norm(g04367 - g04785)) / LA.norm(g04367 - g04785)\n eps_i45 = (LA.norm(x01) - LA.norm(x041)) / LA.norm(x041)\n eps_J45 = (LA.norm(g4785 - g4521) - LA.norm(g04785 - g04521)) / LA.norm(g04785 - g04521)\n # 張力\n F_T1 = (young_modulus * h_Jminus * Lj82 * (eps_i41 + poisson * eps_J41) / (1 - poisson**2))*x01/LA.norm(x01)\n F_T3 = (young_modulus * h_iminus * Lj24 * (eps_i43 + poisson * eps_J43) / (1 - poisson**2))*x03/LA.norm(x03)\n F_T5 = (young_modulus * h_Jplus * Lj46 * (eps_i47 + poisson * eps_J47) / (1 - poisson**2))*x05/LA.norm(x05)\n F_T7 = (young_modulus * h_iplus * Lj68 * (eps_i45 + poisson * eps_J45) / (1 - poisson**2))*x07/LA.norm(x07)\n # せん断ひずみ\n gamma513 = (math.acos((np.dot(x07,x01))/(LA.norm(x07)*LA.norm(x01))) - math.acos((np.dot(x045,x041))/(LA.norm(x045)*LA.norm(x041)))\\\n + math.acos((np.dot(x03,x01))/(LA.norm(x03)*LA.norm(x01))) - math.acos((np.dot(x043,x041))/(LA.norm(x043)*LA.norm(x041))))/2\n gamma137 = (math.acos((np.dot(x01,x03))/(LA.norm(x01)*LA.norm(x03))) - math.acos((np.dot(x041,x043))/(LA.norm(x041)*LA.norm(x043)))\\\n + math.acos((np.dot(x03,x05))/(LA.norm(x03)*LA.norm(x05))) - math.acos((np.dot(x043,x047))/(LA.norm(x043)*LA.norm(x047))))/2\n gamma375 = (math.acos((np.dot(x05,x03))/(LA.norm(x05)*LA.norm(x03))) - math.acos((np.dot(x047,x043))/(LA.norm(x047)*LA.norm(x043)))\\\n + math.acos((np.dot(x07,x05))/(LA.norm(x07)*LA.norm(x05))) - math.acos((np.dot(x045,x047))/(LA.norm(x045)*LA.norm(x047))))/2\n gamma751 = (math.acos((np.dot(x05,x07))/(LA.norm(x05)*LA.norm(x07))) - math.acos((np.dot(x047,x045))/(LA.norm(x047)*LA.norm(x045)))\\\n + math.acos((np.dot(x07,x01))/(LA.norm(x07)*LA.norm(x01))) - math.acos((np.dot(x045,x041))/(LA.norm(x045)*LA.norm(x041))))/2\n # せん断力\n F_S41 = ((young_modulus * h_Jminus * LA.norm(x01) * gamma513)/(2 * (1 + poisson)))*x01/LA.norm(x01)\n F_S43 = ((young_modulus * h_Jminus * LA.norm(x03) * gamma137)/(2 * (1 + poisson)))*x03/LA.norm(x03)\n F_S47 = ((young_modulus * h_Jminus * LA.norm(x05) * gamma375)/(2 * (1 + poisson)))*x05/LA.norm(x05)\n F_S45 = ((young_modulus * h_Jminus * LA.norm(x07) * gamma751)/(2 * (1 + poisson)))*x07/LA.norm(x07)\n \n # J方向の曲げ力\n n_j_cross = np.cross(x05, x01)\n if any(n_j_cross):\n n_J = n_j_cross/LA.norm(n_j_cross)\n else: \n\n l_Jalfa = LA.norm(_Ps[1] - _Ps[7])\n cos_Jalfa = (LA.norm(x01)**2 + LA.norm(x05)**2 - l_Jalfa**2) / (2 * LA.norm(x01) * LA.norm(x05))\n if cos_Jalfa > 1.0:\n cos_Jalfa = 1.0\n elif cos_Jalfa < -1.0:\n cos_Jalfa = -1.0\n sin_Jalfa = math.sqrt(1 - cos_Jalfa**2)\n CJa2 = math.sqrt((cos_Jalfa + 1)/2)\n SJa2 = math.sqrt((1 - cos_Jalfa)/2)\n zJC = (_Ps[7][2]-_Ps[1][2])/(_Ps[7][0]-_Ps[1][0]) * (center_pos[0]-_Ps[1][0]) + _Ps[1][2] #曲げ力の方向の場合わけに必要\n if center_pos[2] > zJC:\n e_j = np.dot(np.array([[CJa2 + (n_J[0]**2) * (1 - CJa2), n_J[0] * n_J[1] * (1 - CJa2) + n_J[2] * SJa2, n_J[0] * n_J[2] * (1 - CJa2) - n_J[1] * SJa2],\\\n [n_J[1] * n_J[0] * (1 - CJa2) - n_J[2] * SJa2, CJa2 + (n_J[1]**2) * (1 - CJa2), n_J[1] * n_J[2] * (1 - CJa2) + n_J[0] * SJa2],\\\n [n_J[2] * n_J[0] * (1 - CJa2) + n_J[1] * SJa2, n_J[2] * n_J[1] * (1 - CJa2) - n_J[0] * SJa2, CJa2 + (n_J[2]**2) * (1 - CJa2)]]), (_Ps[7] - center_pos)/LA.norm(_Ps[7] - center_pos))\n else:\n e_j = np.dot(np.array([[CJa2 + (n_J[0]**2) * (1 - CJa2), n_J[0] * n_J[1] * (1 - CJa2) - n_J[2] * SJa2, n_J[0] * n_J[2] * (1 - CJa2) + n_J[1] * SJa2],\\\n [n_J[1] * n_J[0] * (1 - CJa2) + n_J[2] * SJa2, CJa2 + (n_J[1]**2) * (1 - CJa2), n_J[1] * n_J[2] * (1 - CJa2) - n_J[0] * SJa2],\\\n [n_J[2] * n_J[0] * (1 - CJa2) - n_J[1] * SJa2, n_J[2] * n_J[1] * (1 - CJa2) + n_J[0] * SJa2, CJa2 + (n_J[2]**2) * (1 - CJa2)]]), (_Ps[7] - center_pos)/LA.norm(_Ps[7] - center_pos))\n d_etha_J = (2 * sin_Jalfa / l_Jalfa) - (2 * math.sqrt(1 - np.dot(x041,x047)**2/(LA.norm(x041)*LA.norm(x047))**2)/(LA.norm(x041 - x047)))\n\n n_i = np.cross(x07,x03)/LA.norm(np.cross(x03,x07)) \n cos_ialfa = np.dot(x03,x07) / (LA.norm(x03) * LA.norm(x07))\n sin_ialfa = math.sqrt(1 - cos_ialfa**2)\n Cia2 = math.sqrt((cos_ialfa + 1)/2)\n Sia2 = math.sqrt((1 - cos_ialfa)/2)\n ziC = (_Ps[5][2]-_Ps[3][2])/(_Ps[5][0]-_Ps[3][0]) * (center_pos[0]-_Ps[3][0]) + _Ps[3][2]\n if center_pos[2] > ziC:\n e_i = np.dot(np.array([[Cia2 + (n_i[0]**2) * (1 - Cia2), n_i[0] * n_i[1] * (1 - Cia2) + n_i[2] * Sia2, n_i[0] * n_i[2] * (1 - Cia2) - n_i[1] * Sia2],\\\n [n_i[1] * n_i[0] * (1 - Cia2) - n_i[2] * Sia2, Cia2 + (n_i[1]**2) * (1 - Cia2), n_i[1] * n_i[2] * (1 - Cia2) + n_i[0] * Sia2],\\\n [n_i[2] * n_i[0] * (1 - Cia2) + n_i[1] * Sia2, n_i[2] * n_i[1] * (1 - Cia2) - n_i[0] * Sia2, Cia2 + (n_i[2]**2) * (1 - Cia2)]]), (_Ps[7] - center_pos)/LA.norm(_Ps[7] - center_pos))\n else:\n e_i = np.dot(np.array([[Cia2 + (n_i[0]**2) * (1 - Cia2), n_i[0] * n_i[1] * (1 - Cia2) - n_i[2] * Sia2, n_i[0] * n_i[2] * (1 - Cia2) + n_i[1] * Sia2],\\\n [n_i[1] * n_i[0] * (1 - Cia2) + n_i[2] * Sia2, Cia2 + (n_i[1]**2) * (1 - Cia2), n_i[1] * n_i[2] * (1 - Cia2) - n_i[0] * Sia2],\\\n [n_i[2] * n_i[0] * (1 - Cia2) - n_i[1] * Sia2, n_i[2] * n_i[1] * (1 - Cia2) + n_i[0] * Sia2, Cia2 + (n_i[2]**2) * (1 - Cia2)]]), (_Ps[5] - center_pos)/LA.norm(_Ps[5] - center_pos))\n d_etha_i = (2 * sin_ialfa / LA.norm(x07 - x03)) - (2 * math.sqrt(1 - np.dot(x043,x045)**2/(LA.norm(x043)*LA.norm(x045))**2)/(LA.norm(x043 - x045)))\n\n\n l_J = (Lj20 + Lj06 + Lj68 + Lj82) / 4\n h = (h_iminus + h_iplus + h_Jminus + h_Jplus) / 4\n I = (l_J * h**3) / 12\n M_i = (young_modulus * I * (d_etha_i + poisson * d_etha_J)/(1 - poisson**2))\n M_J = (young_modulus * I * (d_etha_J + poisson * d_etha_i)/(1 - poisson**2))\n #曲げ力\n F_Bi = M_i / LA.norm(x03) + M_i / LA.norm(x07) * e_i\n F_BJ = M_J / LA.norm(x01) + M_J / LA.norm(x05) * e_j\n #空気力\n # S = (S_iminus + S_iplus + S_Jminus + S_Jplus) / 4\n # F_A = p * S\n F_A = np.array([0.0, 0.0, -0.1]) * _a\n\n # 運動方程式(支配方程式)\n S_0 = (S_iminus0 + S_iplus0 + S_Jminus0 + S_Jplus0) / 4\n F_T = F_T41 + F_T43 + F_T45 + F_T47\n F_S = F_S41 + F_S43 + F_S45 + F_S47\n F_B = F_Bi + F_BJ\n return (F_T + F_S + F_B + F_A) / (rho * h_0 * S_0) - c * _vs", "def __rmul__(self,a):\n return Vector(self.x*a,self.y*a)\n pass", "def output(self):\n if self.real != 1:\n theta = numpy.arccos(self.real)\n angle = 360 * theta / numpy.pi\n xyz = self.pure / numpy.sin(theta)\n else:\n angle = 0.\n xyz = self.pure\n result = \"%g %g %g %g %g %g %g\" % (self.trans[0], self.trans[1], self.trans[2],\n xyz[0], xyz[1], xyz[2], angle)\n return result", "def auxiliary_trail_vector(self):\n return self.attributes[\"_aux_vector\"]", "def ac(self):\n return np.array(self['gen'], dtype=np.float32)", "def vec(ang, bl=1):\n return [bl * np.cos(ang), bl * np.sin(ang)]", "def axialv(a):\n return 0.5 * np.array([a[2, 1] - a[1, 2], a[0, 2] - a[2, 0], a[1, 0] - a[0, 1]])", "def _repr_(self):\n return 'A vertex at ' + repr(self.vector());", "def vec(self) -> Vec:\n return Vec(self.x, self.y, self.z)", "def xyz(self) -> np.ndarray:\n return self._vector[0:3]", "def lca(self, v, w):", "def acoeff(self):\n return np.dot(self.mmi,np.dot(self.mmatrix.T,self.bvec))", "def get_V(self):\n return self.V", "def x(self) -> float:\n return self.A[1] if self.scalar_vector else self.A[0]", "def alive_vector(self):\n return self._alive_vector", "def alom():\n #\n # this is the alpha\n inlist = list(\"begin\") # change data into a list element\n outlist[0:5] = inlist # place data in the list in the correct place\n # print(\"\".join(outlist)) # see result\n #\n # this is the omega\n inlist = list(\"end\")\n #\n # change data into a list element\n outlist[1247:1250] = inlist # place data in the list in the correct place\n outstr = \"\".join(outlist)\n print(outstr)\n print(len(outstr))\n # of = open(\"workfile\", \"w\")\n # of.write(outstr)", "def acc_a(self):\n return self._acc_a", "def stateVector(self):\n simulator=Aer.get_backend('statevector_simulator')\n result=execute(self.circuit,backend=simulator).result()\n statevector=result.get_statevector(decimals=4) #\"decimals=4\" doesn't work in version 0.20.0 \n return statevector.tolist()", "def _as_vector(self):\n return np.hstack([self.global_parameters, self.weights])" ]
[ "0.6733412", "0.673054", "0.6519931", "0.64849705", "0.64376384", "0.6289511", "0.62315416", "0.62300235", "0.62070984", "0.61930376", "0.61860853", "0.61663204", "0.61269957", "0.609983", "0.6096781", "0.60403943", "0.60208887", "0.5975669", "0.5926071", "0.5925174", "0.58986276", "0.58929133", "0.58813846", "0.58724606", "0.5870889", "0.58696616", "0.585583", "0.58544815", "0.5849832", "0.584903", "0.58477765", "0.58319473", "0.583183", "0.5817809", "0.5813953", "0.58111423", "0.58010495", "0.5793006", "0.578254", "0.5773826", "0.5769524", "0.5761977", "0.57360274", "0.5718752", "0.5713716", "0.5705241", "0.57028335", "0.56614816", "0.56614816", "0.56614816", "0.565132", "0.5646905", "0.56430143", "0.563033", "0.5619967", "0.5606501", "0.5601431", "0.5584658", "0.5584657", "0.55835587", "0.5582152", "0.55652803", "0.55633616", "0.5550791", "0.55437607", "0.5534024", "0.55272377", "0.5520675", "0.55142325", "0.5503813", "0.550301", "0.550301", "0.5492151", "0.54916066", "0.5486385", "0.5485708", "0.54853576", "0.54841423", "0.5479576", "0.5477218", "0.5477218", "0.5477218", "0.5472702", "0.5468911", "0.5456425", "0.5455788", "0.5453193", "0.5451944", "0.54512864", "0.54485774", "0.5445383", "0.5440352", "0.54243493", "0.5422566", "0.5422086", "0.54198873", "0.5411846", "0.5410281", "0.54035974", "0.540063", "0.5386392" ]
0.0
-1
outputs the noise or bias
def getB(self): return self.error
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def noise(self, stddev):\n #add noise to weights\n pass", "def noise(self, freq: int, /) -> None:", "def add_noise(self, data):", "def noisePreset() :\n s.noisePreset()", "def noiseReduction(self):\n pass", "def white_noise():\n return random.randint(-32767, 32767)", "def noise(self):\n return self._noise", "def brown_noise():\n # TODO: try different values of BROWN_FACTOR\n # ... just seems to make it noisier or quieter - no change in freq\n global brown_val\n if brown_val > 32767:\n brown_val = brown_val - abs(white_noise()) / BROWN_FACTOR\n elif brown_val < -32767:\n brown_val = brown_val + abs(white_noise()) / BROWN_FACTOR\n else:\n brown_val = brown_val + white_noise() / BROWN_FACTOR\n return int(brown_val)", "def noise(self, xs, ys):\n raise NotImplementedError", "def noise(self):\n # Extract parameters\n pzs = self.params[0]\n ngals = np.array([pz.gals_per_steradian for pz in pzs])\n return 1.0 / ngals", "def generate_noise_vector(self, ):\n self.noise.resize_(\n self.batch_size, int(self.opt.nz), 1, 1).normal_(0, 1)\n self.noisev = Variable(self.noise) # TODO: Add volatile=True???", "def addNoise (image,noise_type=\"gauss\",var = .01):\n row,col,ch= image.shape\n if noise_type == \"gauss\": \n mean = 0.0\n #var = 0.001\n sigma = var**0.5\n gauss = np.array(image.shape)\n gauss = np.random.normal(mean,sigma,(row,col,ch))\n gauss = gauss.reshape(row,col,ch)\n #print(gauss)\n noisy = image + gauss*255\n return noisy.astype('uint8')\n elif noise_type == \"s&p\":\n s_vs_p = 0.5\n amount = 0.09\n out = image\n # Generate Salt '1' noise\n num_salt = np.ceil(amount * image.size * s_vs_p)\n coords = [np.random.randint(0, i - 1, int(num_salt))\n for i in image.shape]\n out[coords] = 255\n # Generate Pepper '0' noise\n num_pepper = np.ceil(amount* image.size * (1. - s_vs_p))\n coords = [np.random.randint(0, i - 1, int(num_pepper))\n for i in image.shape]\n out[coords] = 0\n return out\n elif noise_type == \"poisson\":\n vals = len(np.unique(image))\n vals = 2 ** np.ceil(np.log2(vals))\n noisy = np.random.poisson(image * vals) / float(vals)\n return noisy\n elif noise_type ==\"speckle\":\n gauss = np.random.randn(row,col,ch)\n gauss = gauss.reshape(row,col,ch) \n noisy = image + image * gauss\n return noisy\n else:\n return image", "def add_noise(self):\n self.noise = torch.normal(0.5, .2, self.state.shape).double()\n self.noise *= torch.sqrt(2 *\n self.vars['T']*torch.tensor(self.vars['dt']))", "def pink_brown():\n return pink_noise() + brown_noise()", "def apply_noise(self, input):\n mask = np.random.binomial(1, 1-self.noise_prob, len(input)) \n noisy_input = mask * input\n \n return noisy_input", "def get_noisy_output_of_system(self, y_without_noise):\n # There were some problems with copying the array data so I just wrote a copy command for every single line\n if self.bOutputNoise:\n if np.size(y_without_noise, 0) == 3:\n y_with_noise = np.zeros(3)\n y_with_noise[0] = y_without_noise[0] + np.random.normal(0, np.sqrt(self.p_var), 1)[0]\n y_with_noise[1] = y_without_noise[1] + np.random.normal(0, np.sqrt(self.e_var), 1)[0]\n y_with_noise[2] = y_without_noise[2] + np.random.normal(0, np.sqrt(self.lamb_var), 1)[0]\n elif np.size(y_without_noise, 0) == 5:\n y_with_noise = np.zeros(5)\n y_with_noise[0] = y_without_noise[0] + np.random.normal(0, np.sqrt(self.p_var), 1)[0]\n y_with_noise[1] = y_without_noise[1] + np.random.normal(0, np.sqrt(self.e_var), 1)[0]\n y_with_noise[2] = y_without_noise[2] + np.random.normal(0, np.sqrt(self.lamb_var), 1)[0]\n y_with_noise[3] = y_without_noise[3] + np.random.normal(0, np.sqrt(self.f_var), 1)[0]\n y_with_noise[4] = y_without_noise[4] + np.random.normal(0, np.sqrt(self.b_var), 1)[0]\n else:\n y_with_noise = y_without_noise\n return y_with_noise", "def noise(self) -> Sequence:\n\n return self._noise", "def add_noise(self, u):\n noise = torch.randn_like(u)\n noise[:, :, :3] = noise[:, :, :3] * self.imu_std[0]\n noise[:, :, 3:6] = noise[:, :, 3:6] * self.imu_std[1]\n\n # bias repeatability (without in run bias stability)\n b0 = self.uni.sample(u[:, 0].shape).cuda()\n b0[:, :, :3] = b0[:, :, :3] * self.imu_b0[0]\n b0[:, :, 3:6] = b0[:, :, 3:6] * self.imu_b0[1]\n u = u + noise + b0.transpose(1, 2)\n return u", "def noise(self):\r\n if self.buffer_offset + self.frames_per_buffer - 1 > self.x_max:\r\n #relleno con ceros al final si es necesario\r\n xs = np.arange(self.buffer_offset, self.x_max)\r\n tmp = np.random.random_sample(len(xs)) #ruido\r\n out = np.append(tmp, np.zeros(self.frames_per_buffer-len(tmp)))\r\n else:\r\n xs = np.arange(self.buffer_offset,\r\n self.buffer_offset + self.frames_per_buffer)\r\n out = np.random.random_sample(len(xs))\r\n self.buffer_offset += self.frames_per_buffer\r\n return out", "def reset_noise(self):\n self.advantage_hidden_layer.reset_noise()\n self.advantage_layer.reset_noise()\n self.value_hidden_layer.reset_noise()\n self.value_layer.reset_noise()", "def _get_noise(self, shape, dtype=None):", "def add_noise(self):\n self.noise = np.random.poisson(lam=self.lam, size=self.image.shape)\n self.image += self.noise\n return", "def add_noise(self):\n self.noise = np.random.poisson(lam=self.lam, size=self.im.shape)\n self.im += self.noise\n return", "def image(self):\n\n return self.signal + self.noise", "def noise(self, noise):\n\n self._noise = noise", "def prob_3_6(self):\n \n ###### START CODE HERE ######\n\n\n ###### END CODE HERE ######\n pass\n \n ###### return addNoiseImg ######", "def test_noise(self, lang):\n\n lang_id = self.params.lang2id[lang]\n sent1, len1 = self.get_batch('encdec', lang, None)\n sent1 = sent1.transpose_(0, 1)\n print(sent1.shape)\n print(\"sent1 before noise is \")\n print(sent1)\n print(\"len1 before noise is \")\n print(len1)\n\n sent1, len1 = self.add_noise(sent1, len1, lang_id)\n\n print('sent1 after noise for ' + lang + ' is')\n print(sent1)\n print('len1 for ' + lang + \" is \")\n print(len1)", "def _get_noise(self, shape, dtype=None):\n return np.random.normal(self._bias, self._scale, shape).astype(dtype)", "def noise_generator(n, mean, std, fractindex):\n if fractindex not in VALID_FRACT:\n raise ValueError(\"results: status must be one of %r.\" % VALID_FRACT)\n \n stdev = std\n \n b = 2*fractindex-1\n print('beta: ', b)\n \n bdis = np.zeros(n)\n\n bdis[0] = 1\n for i in range(1,n):\n bdis[i] = bdis[i-1] * (0.5 * b + (i-1))/i # note that b is the shape parementer (b)\n\n plt.plot(bdis)\n plt.show\n\n wnt = np.random.normal(mean, stdev, size = n)\n print('WhiteNoise Stdev: ', np.std(wnt))\n plt.plot(wnt)\n plt.show()\n\n bdis_freq = np.fft.fft(bdis)\n wnt_freq = np.fft.fft(wnt)\n\n bdis_freq = bdis_freq[1:n+1]\n wnt_freq = wnt_freq[1:n+1]\n\n freq_total = bdis_freq * wnt_freq\n \n NumUniquePts = n/2 + 1\n NumUniquePts = int(NumUniquePts)\n j = np.arange(1, NumUniquePts)\n \n if fractindex > 1.0:\n j = j\n elif fractindex <= 1.0:\n j = j**0.5\n \n ft_half1 = freq_total[1:NumUniquePts]/j\n\n real = np.real(freq_total[1:NumUniquePts+1])\n real = np.flip(real, axis=0)\n\n imaginary = np.imag(freq_total[1:NumUniquePts+1])\n imaginary = np.flip(imaginary, axis=0)\n imaginary = 1j * imaginary\n\n ft_half2 = real - imaginary\n\n ft = np.hstack((ft_half1, ft_half2))\n \n x = np.fft.ifft(ft)\n x = np.real(x[:n])\n\n mean_diff = mean - np.mean(x)\n x = mean_diff + x\n print(np.mean(x))\n print(np.std(x))\n plt.plot(x)\n plt.show()\n \n return x", "def add_noise(self, noise):\n if noise > 0.0:\n for key in self.counts:\n self.counts[key] *= 1.0 + noise * np.random.random_sample()", "def _addNoise(self):\n self.dispNoise = self.dispRaw.copy()\n self.dispNoise[:, 0] += self.sigmaEast * numpy.random.randn(self.numStations)\n self.dispNoise[:, 1] += self.sigmaNorth * numpy.random.randn(self.numStations)\n self.dispNoise[:, 2] += self.sigmaUp * numpy.random.randn(self.numStations)\n return", "def _get_noise(self, shape, dtype=None):\n return np.full(shape, self._bias, dtype)", "def add_noise(image):\n image += 10e-10 * np.random.randn(image.shape[0], image.shape[1], 1)\n \n return image", "def addNoise(pure,snr):\r\n watts = pure**2\r\n # Calculate signal power and convert to dB \r\n sig_avg_watts = np.mean(watts)\r\n sig_avg_db = 10 * np.log10(sig_avg_watts)\r\n # Calculate noise according to [2] then convert to watts\r\n noise_avg_db = sig_avg_db - snr\r\n noise_avg_watts = 10 ** (noise_avg_db / 10)\r\n # Generate an sample of white noise\r\n mean_noise = 0\r\n noise = np.random.normal(mean_noise, np.sqrt(noise_avg_watts), len(watts))\r\n \r\n return pure+noise", "def ternary_noise(N_stimuli, Nx, Ny):\n return np.random.randint(-1, 2, size=(N_stimuli, Nx, Ny))", "def add_weight_noise(self, std):\n with torch.no_grad():\n param_vector = parameters_to_vector(self.parameters())\n normal_dist = torch.distributions.Normal(loc=torch.tensor([0.0]), scale=torch.tensor([std]))\n noise = normal_dist.sample(param_vector.size())\n if self.device_id >= 0:\n noise = noise\n param_vector.add_(noise[0])\n vector_to_parameters(param_vector, self.parameters())", "def exact_noise_value(d=10, b=3):\n return (d-b)/(d - 1)", "def noise(self):\n # Extract parameters\n pzs = self.params[0]\n # retrieve number of galaxies in each bins\n ngals = np.array([pz.gals_per_steradian for pz in pzs])\n if isinstance(self.config[\"sigma_e\"], list):\n sigma_e = np.array([s for s in self.config[\"sigma_e\"]])\n else:\n sigma_e = self.config[\"sigma_e\"]\n return sigma_e ** 2 / ngals", "def awgn(input, noise_std):\n\tif not isinstance(noise_std, (list, tuple)):\n\t\tsigma = noise_std\n\telse: # uniform sampling of sigma\n\t\tsigma = noise_std[0] + \\\n\t\t (noise_std[1] - noise_std[0])*torch.rand(len(input),1,1,1, device=input.device)\n\treturn input + torch.randn_like(input) * (sigma/255)", "def awgn(input, noise_std):\n\tif not isinstance(noise_std, (list, tuple)):\n\t\tsigma = noise_std\n\telse: # uniform sampling of sigma\n\t\tsigma = noise_std[0] + \\\n\t\t (noise_std[1] - noise_std[0])*torch.rand(len(input),1,1,1, device=input.device)\n\treturn input + torch.randn_like(input) * (sigma/255)", "def reset(self):\n self.noise.reset()", "def model_noise(self, model, model_res=None, num_observations=1):\n\n raise NotImplementedError", "def _sample_noise(self) -> np.ndarray:\n return np.random.randn(self.actor_action_size)", "def noiseAtten(atten) :\n s.noiseAtten(atten)", "def get_estimated_noise(self):\n raise NotImplementedError('Abstract Method.')", "def gen_noise(sample_size, latent):\r\n\treturn Variable(torch.randn(sample_size, latent))", "def getRxNoise(self):\n \n return self.rx_noise", "def get_noise(self):\n\n n = self.qubic.get_noise().ravel()\n n = np.r_[n, self.planck.get_noise().ravel()]\n\n return n", "def sample_posterior(self):\n \n# print (\"SAMPLING FROM LINEAR SIMILARITY VB\")\n if (self.posterior_mean == False):\n self.weight = Vil.sample_posterior(self.mu_weight, Vil.softplus(self.rho_weight))\n self.bias = Vil.sample_posterior(self.mu_bias, Vil.softplus(self.rho_bias))\n# print (self.bias)\n else:\n self.weight.data = self.mu_weight.data\n self.bias.data = self.mu_bias.data", "def out(input_lst, weight_lst, bias):\r\n return 1 / (1 + math.exp(-1 * net(input_lst, weight_lst, bias)))", "def __call__(self, input: torch.Tensor) -> torch.Tensor:\n # Get noise\n noise = self.mean + torch.randn_like(input) * self.std\n # Apply nose to image\n input = input + noise\n return input", "def pink_noise():\n global curr_tick\n octave = octave_lookup[curr_tick]\n curr_noise[octave] = int(white_noise() / (5-octave))\n curr_tick += 1\n if curr_tick >= len(octave_lookup):\n curr_tick = 0\n return sum(curr_noise)", "def ICA_Denoise(Y, ica_model, noise_std):\n\n # TODO: YOUR CODE HERE", "def add_noise(Y, sigma):\r\n return Y + np.random.normal(0, sigma, Y.shape)", "def add_noise(image, noise, rate=0.05):\n\n if noise == \"gaussian\":\n row, col = image.shape\n var = ndimage.laplace(image).var()\n sigma = (var*rate) ** 0.5\n print(var, sigma)\n gauss = np.random.normal(loc=0, scale=sigma, size=(row, col)) * rate\n noisy = image + gauss\n # noisy = image + gauss\n return noisy\n\n elif noise == \"salt_pepper\":\n output = image.copy()\n black = 0\n white = 255\n probs = np.random.random(image.shape[:2])\n output[probs < (rate / 2)] = black\n output[probs > 1 - (rate / 2)] = white\n\n return output\n\n else:\n return image", "def noiseless_function(x):\n return 1/(1+np.exp(-x+5))-0.5", "def _apply_observation_noise(self, state: RobotState,\n config: RobotGroupConfig):\n if config.sim_observation_noise is None or self.random_state is None:\n return\n\n # Define the noise calculation.\n def noise(value_range: np.ndarray):\n amplitude = config.sim_observation_noise * np.ptp(\n value_range, axis=1)\n return amplitude * self.random_state.uniform(\n low=-0.5, high=0.5, size=value_range.shape[0])\n\n if config.qpos_range is not None:\n state.qpos += noise(config.qpos_range)\n if config.qvel_range is not None:\n state.qvel += noise(config.qvel_range)", "def generator(noise, params):\n hidden_layer = tf.nn.relu(tf.add(tf.matmul(noise, weights['gen_hidden1']), biases['gen_hidden1']))\n gen_data = tf.nn.sigmoid(tf.add(tf.matmul(hidden_layer, weights['gen_out']), biases['gen_out']))\n print('Generate data done.')\n return gen_data", "def get_bias(self):", "def _sample_new_noise(self, *, tf_sess=None):\n if self.framework == \"tf\":\n tf_sess.run(self.tf_sample_new_noise_op)\n elif self.framework == \"tf2\":\n self._tf_sample_new_noise_op()\n else:\n for i in range(len(self.noise)):\n self.noise[i] = torch.normal(\n mean=torch.zeros(self.noise[i].size()), std=self.stddev\n ).to(self.device)", "def noiseon(delay=2.0, reference=False, subarray=DEFAULT) :\n multiSubarray('noiseSource', subarray, True, reference)\n multiSubarray('rfPower', subarray, False)\n sleep(delay) # Temporary - to allow for delay in correlator", "def random(self):\n self.img[:, :] = np.random.random(\n (self.l_i, self.l_i)).astype('float32')\n self.img_name = 'white_noise'", "def gnoise(mag, sigma, mu):\n noise = np.random.normal(mu,sigma,n)\n mag = mag + noise\n return mag, noise", "def make_noise(self, num):\n return np.random.randn(num, self.seq_length, self.noise_dim)", "def writeNoise(self):\n\n if (self.noise_file == None or self.noise_file == \"\"):\n return\n ofname = self.noise_file\n ofh = open(ofname,'w')\n\n # these have to be there as long as we've read the FAST file already\n ## not true: we don't store these in the dict.\n have_data = False\n if (\"TipRad\" in self.fstDict and 'TowerHt' in self.fstDict and 'Twr2Shft' in self.fstDict):\n tiprad = self.fstDict['TipRad']\n towerht = self.fstDict['TowerHt']\n twr2shft = self.fstDict['Twr2Shft']\n have_data = True\n\n for line in self.lines_noise:\n if (have_data and line.find('Observer location') >= 0):\n xdist = -1.0 * (tiprad + (towerht + twr2shft))\n ofh.write('{:.1f} 0.0 0.0'.format(xdist))\n ofh.write(' (x,y,z) Observer location in tower-base coordinate system. Use -(RotRad+HubHt)\\n')\n else:\n ofh.write(line)\n ofh.close()", "def __call__(self, wav):\n beg_i = 0\n end_i = wav.shape[0]\n sel_noise = self.load_noise(self.sample_noise())\n if len(sel_noise) < len(wav):\n # pad noise\n P = len(wav) - len(sel_noise)\n sel_noise = np.pad(sel_noise, (0, P))\n # mode='reflect').view(-1).data.numpy()\n T = end_i - beg_i\n # TODO: not pre-loading noises from files?\n if len(sel_noise) > T:\n n_beg_i = np.random.randint(0, len(sel_noise) - T)\n else:\n n_beg_i = 0\n noise = sel_noise[n_beg_i:n_beg_i + T]\n # randomly sample the SNR level\n snr = random.choice(self.snr_levels)\n K, Ex, En = self.compute_SNR_K(wav, noise, snr)\n scaled_noise = K * noise\n if En > 0:\n noisy_wav = wav + scaled_noise\n noisy_wav = self.norm_energy(noisy_wav, Ex)\n else:\n noisy_wav = wav\n return noisy_wav", "def _gaussian_for_learn_denosing_model(image):\n return add_gaussian_noise(image, 0, 0.2)", "def make_noisy_images(image):\r\n return apply_poisson_noise(image, random_state=12345)", "def apply_noise(input_image, noise_level):\n n,m = input_image.shape\n # generate binomial noise\n ksi = np.random.binomial(size=n*m, n=1, p=noise_level).reshape(input_image.shape)\n noised_image = ksi^input_image\n return noised_image", "def _make_noisy(x, the_noise):\n noise_sample = the_noise[np.random.choice(the_noise.shape[0],\n x.shape[0],\n replace=False)]\n return x + noise_sample", "def predict_w_noise(self, xs, stochastic=True, **kwargs):\n raise NotImplementedError", "def get_noise(n_samples, noise_dims):\n return torch.randn(n_samples, noise_dims).to(device)", "def _calc(self):\r\n tot_sum: float = 0 # ? total sum of the noise values\r\n max_amp: float = 0 # ? keep the sum in [0,1]\r\n amp: float = 1.0 # ? amplitude of each noise value\r\n freq: float = 1.0 # ? frequency for getting the detailed noise\r\n\r\n # for each octave we twice the frequency and multiply the amplitude \r\n # by persistance to get the detailed noise value\r\n # to keep the final sum value in the range [0, 1] we keep track of the \r\n # max amplitude (sum of all the amplitudes)\r\n for octave in range(self.octaves):\r\n noise_obj = PerlinNoise(self.inp_x*freq, self.inp_y*freq, self.inp_z*freq)\r\n # ? multiply the noise value by the amplitude\r\n tot_sum += noise_obj.val() * amp\r\n max_amp += amp\r\n\r\n amp *= self.persist\r\n freq *= 2.0 # double the freq each iteration\r\n\r\n # value is in the range [0,1]\r\n self.value = tot_sum / max_amp", "def generate_with_noise(self, noise):\n with tf.compat.v1.Session() as sess:\n self.saver.restore(sess, \"data/\" + self.name + \".ckpt\")\n return self.X_fake.eval(feed_dict={self.z: noise, self.is_training: False})", "def generate_fake_noise(inputs, size):\n return np.random.normal(-0.0289923828125, 1.9391296947313124, (inputs, size)).astype(np.float32)", "def addNoise(x,y,noise):\n n = len(x)\n theta = np.random.random(n)*(2*np.pi) # random angle in [0,2*pi[\n d = np.random.random(n)*noise # random amplitude in [0,noise[\n x += np.cos(theta)*d\n y += np.sin(theta)*d", "def sample_noise(batch_size, dim):\n temp = torch.rand(batch_size, dim) + torch.rand(batch_size, dim)*(-1)\n\n return temp", "def sim_brown_noise(N):\n return np.cumsum(np.random.randn(N))", "def addNoise(data, amp, scale):\n lfnData = addLFNoise(data, amp, scale)\n noisyData = addHFNoise(hfnData, amp)\n\n return noisyData", "def addNoise(img, sigma=2.0, mean=0):\n img2 = np.random.normal(mean, sigma, size=img.shape)\n\n img2 += img\n img2 = np.uint8(img2.clip(0, 255))\n return img2", "def addNoise(self, sigma=1.0):\n noise = numpy.random.normal(loc=0, scale=sigma, size=(self.ny, self.nx))\n self.image += noise\n return", "def sample(self):\n self.dev.write(1, 'S')", "def get_read_noise(self):\n\n read_noise_adu = self.ccd.read_noise / self.ccd.gain\n return numpy.random.normal(scale=read_noise_adu, size=self.image.shape)", "def generate_samples(self,n_samples=100):\n rnd_input=torch.randn((n_samples,self._reparam_nodes[1]))\n zeta=rnd_input \n # rnd_input=torch.where((rnd_input>0.5),torch.ones(rnd_input.size()),torch.zeros(rnd_input.size()))\n # print(rnd_input) \n # output, mu, logvar, zeta=self.forward(rnd_input)\n # mu = self._reparam_layers['mu'](rnd_input)\n # logvar = self._reparam_layers['var'](rnd_input)\n # zeta = self.reparameterize(mu, logvar)\n output = self.decoder.decode(zeta)\n return output", "def get_estimated_noise(self):\n return self.gp_core.noise_var", "def bic(self, tmin=None, tmax=None):\n noise = self.ml.noise(tmin=tmin, tmax=tmax)\n n = noise.size\n nparam = len(self.ml.parameters[self.ml.parameters.vary == True])\n bic = -2.0 * np.log(sum(noise ** 2.0)) + nparam * np.log(n)\n return bic", "def sample_noise(batch_size, dim):\n noise = torch.rand(batch_size, dim) * 2 - 1\n return noise.view(batch_size, dim, 1, 1)", "def make_noise(self, num):\n return np.random.randn(num, self.seq_length + 2 * self.seq_pad,\n self.noise_dim)", "def __init__(self, n_in, n_out):\n self.W = np.random.randn(n_in, n_out) * 0.1\n self.b = np.zeros(n_out)", "def sample(self, n, requires_grad=False, return_mu=False, return_both=False):\n h = torch.randn((n, self.noise_dim)).to(next(self.parameters()).device)\n if requires_grad:\n h.requires_grad_()\n x_mu = self.g(h)\n x = x_mu + torch.randn_like(x_mu) * self.logsigma.exp()\n if return_both:\n return x_mu, x, h\n if return_mu:\n return x_mu, h\n else:\n return x, h", "def decay_noise_ampl(self):\n \n self.noise_ampl *= self.noise_ampl_decay", "def _sample(self, rnn_output, temperature):\n pass", "def noise_data(self, x):\n return x + np.random.normal(size=x.shape)", "def _get_noise(self, shape, dtype=None):\n return np.random.uniform(self._low, self._high, shape).astype(dtype)", "def bias_prior(self):", "def make_noise(self, signal_only):\n\n #print >> sys.stdout, \"generating noise...\"\n\n if signal_only:\n\n # the noise is just a time series of zeros\n \n self.td_noise = pycbc.types.timeseries.TimeSeries(\n initial_array=np.zeros(self.duration/self.delta_t),\n delta_t=self.delta_t, epoch=self.epoch)\n\n else:\n # Generate noise \n self.assign_noise_curve()\n\n # Generate time-domain noise\n # XXX: minimum duration seems to be 1 second. I'll hack around this by\n # reducing the 1 second to the desired duration\n tmplen=max(self.duration,1.0)/self.delta_t\n self.td_noise = pycbc.noise.noise_from_psd(int(tmplen), self.delta_t,\n self.psd, seed=self.seed)\n\n self.td_noise = \\\n pycbc.types.TimeSeries(self.td_noise.data[:self.duration/self.delta_t],\n delta_t=self.delta_t)\n\n # XXX not sure if this is a good idea...\n self.td_noise.start_time = float(self.epoch)\n\n self.fd_noise = self.td_noise.to_frequencyseries()", "def softing_noise(image, kn):\n\n s_noise = cv2.GaussianBlur(image, (kn, kn), 0)\n\n return s_noise", "def gen_noise(num_signals, sig_len):\n\n r_noise = np.random.normal(0, 1, (num_signals, sig_len))\n c_noise = np.random.normal(0, 1, (num_signals, sig_len)) * 1j\n noise = np.add(r_noise, c_noise) / np.sqrt(2)\n return noise/(np.var(noise, axis=1)**.5)[:, None]", "def add_noise(image, shot_noise=0.01, read_noise=0.0005):\n variance = image * shot_noise + read_noise\n noise = torch.FloatTensor(image.shape).normal_().to(image.device)*variance.sqrt()\n return image + noise", "def get_estimated_noise(self):\n return self.gp_core.likelihood.noise.item()", "def fill_noise(x, noise_type):\n if noise_type == 'u':\n x.uniform_()\n elif noise_type == 'n':\n x.normal_() \n else:\n assert False" ]
[ "0.72268444", "0.6808519", "0.67288476", "0.6671144", "0.6670958", "0.6571604", "0.6559497", "0.65078646", "0.6505491", "0.64908093", "0.6452366", "0.63892174", "0.6370349", "0.6341129", "0.63222975", "0.6292225", "0.62755954", "0.6267123", "0.6263561", "0.6238834", "0.62096435", "0.61855334", "0.6180751", "0.6179022", "0.61459005", "0.6131687", "0.6130845", "0.61302924", "0.6127228", "0.6120282", "0.6105267", "0.61024404", "0.6100136", "0.60927075", "0.6021739", "0.60025454", "0.5981375", "0.5981372", "0.59636396", "0.59636396", "0.5929213", "0.58947116", "0.5887537", "0.58869064", "0.5880207", "0.58759314", "0.5872326", "0.5869848", "0.5869171", "0.58503664", "0.58410096", "0.5836054", "0.58294606", "0.58214617", "0.58058155", "0.57986563", "0.57971907", "0.5793166", "0.57752156", "0.57647794", "0.57485926", "0.57426697", "0.57409114", "0.5718715", "0.570918", "0.5706601", "0.5704779", "0.5698455", "0.5693626", "0.56865466", "0.5675927", "0.5667625", "0.56593037", "0.56524384", "0.565064", "0.5644178", "0.5639042", "0.5636204", "0.5624671", "0.5622015", "0.5619795", "0.5602995", "0.5586794", "0.55820596", "0.55808234", "0.55766934", "0.5571404", "0.55701923", "0.5548474", "0.5541302", "0.55336255", "0.5526193", "0.5518413", "0.55118245", "0.5505959", "0.55039823", "0.5502578", "0.5494392", "0.5485046", "0.54790395", "0.54769117" ]
0.0
-1
outputs the covariance matrix
def getCovarianceMatrix(self): #ypost = np.dot ( self.getA().T, self.priorX ) theta = np.mat ( self.getA() ) Xm = np.mat ( self.priorX ) ypost = Xm * theta yprior = self.priorY error = ypost - yprior #error = error - np.mean ( error, axis = 0 ) return np.dot ( error.T, error )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def covariance_matrix(self):\n\n self._order_observations()\n self.cov_matrix = self._compute_covariance_matrix(\n self.list_observations, self.list_observations)\n\n self.cov_matrix += np.diag(np.array([self.noise] * self.n_observation))\n\n return self.cov_matrix", "def covariance_matrix(self):\n\n cov_filename = self.covariance_filename\n cov_press, cov_data = self._co_star_read(cov_filename)\n\n # \"Fix\" covariances that are not positive definite\n if not np.all(np.linalg.eigvals(cov_data) > 0):\n warnings.warn(\"Covariance matrix for species {} is not positive definite, modifying eigenvals\".format(self.species))\n\n # Get eigen values and vector from matrix\n eigval, eigvec = np.linalg.eig(cov_data)\n\n # Find negative eigen values and set to the media\n eigval[np.where(eigval < 0)] = np.median(eigval)\n\n # Reconstruct matrix with modified eigen values\n cov_data = eigvec @ np.diag(eigval) @ np.linalg.inv(eigvec)\n\n return cov_data", "def covariance(data_matrix):\n return np.asmatrix(np.cov(data_matrix, rowvar=0))", "def get_cov_matrix_outputs(self):\n cov = numpy.diag(numpy.zeros(self.get_num_measured_outputs()))\n i = 0\n for o in self.outputs:\n if o.is_measured_output():\n cov[i,i] = o.get_covariance()\n i += 1\n return cov", "def getCovarianceNoiseMatrix(self):\n return np.dot ( self.getB().T, self.getB() )", "def cov(self):\n cov_ = np.dot(self.weights * self.demeaned.T, self.demeaned)\n cov_ /= self.sum_weights - self.ddof\n return cov_", "def get_cov_matrix_states(self):\n cov = numpy.diag(numpy.zeros(self.get_num_variables()))\n i = 0\n for v in self.variables:\n cov[i,i] = v.get_covariance()\n i += 1\n return cov", "def get_cov_matrix_parameters(self):\n cov = numpy.diag(numpy.zeros(self.get_num_parameters()))\n i = 0\n for p in self.parameters:\n cov[i,i] = p.get_covariance()\n i += 1\n return cov", "def cov(self):\n E_x = Sample.mean(self)\n Std_x = Sample.std(self)\n cov = Std_x/E_x\n return(cov)", "def covariance(mtrx):\r\n\r\n # Average column of matrix\r\n T = np.transpose(mtrx)\r\n ave = np.zeros(len(mtrx))\r\n mtrx = np.asarray(mtrx)\r\n\r\n if isinstance(mtrx, np.ndarray):\r\n ave = average(T)\r\n\r\n for col in T:\r\n if type(mtrx) == list:\r\n # If data isn't standardized\r\n ave += np.asarray(col)\r\n\r\n\r\n if len(mtrx[0]) > len(mtrx):\r\n for moreRows in range(len(mtrx[0]), len(mtrx)):\r\n mtrx[moreRows] = np.asarray(mtrx[moreRows])\r\n\r\n ave /= len(mtrx[0])\r\n\r\n\r\n phi = T - ave\r\n # Covariance matrix\r\n return np.dot(np.transpose(phi), phi)", "def _mn_cov_ ( self , size = -1 , root = False ) :\n #\n if size <= 0 : size = len ( self )\n size = min ( size , len ( self ) ) \n #\n from array import array\n matrix = array ( 'd' , [ 0 for i in range(0, size * size) ] )\n self.mnemat ( matrix , size )\n #\n import ostap.math.linalg\n from ostap.core.core import Ostap \n mtrx = Ostap.Math.SymMatrix ( size )() \n for i in range ( 0 , size ) :\n for j in range ( i , size ) : \n mtrx [ i , j ] = matrix [ i * size + j ]\n \n return mtrx", "def covariance(self):\n return self._covariance", "def covariance_matrix(self,x,y,names=None,cov=None):\n if not isinstance(x,np.ndarray):\n x = np.array(x)\n if not isinstance(y,np.ndarray):\n y = np.array(y)\n assert x.shape[0] == y.shape[0]\n\n if names is not None:\n assert x.shape[0] == len(names)\n c = np.zeros((len(names),len(names)))\n np.fill_diagonal(c,self.contribution)\n cov = Cov(x=c,names=names)\n elif cov is not None:\n assert cov.shape[0] == x.shape[0]\n names = cov.row_names\n c = np.zeros((len(names),1)) + self.contribution\n cont = Cov(x=c,names=names,isdiagonal=True)\n cov += cont\n\n else:\n raise Exception(\"Vario2d.covariance_matrix() requires either\" +\n \"names or cov arg\")\n rc = self.rotation_coefs\n for i1,(n1,x1,y1) in enumerate(zip(names,x,y)):\n dx = x1 - x[i1+1:]\n dy = y1 - y[i1+1:]\n dxx,dyy = self._apply_rotation(dx,dy)\n h = np.sqrt(dxx*dxx + dyy*dyy)\n\n h[h<0.0] = 0.0\n h = self._h_function(h)\n if np.any(np.isnan(h)):\n raise Exception(\"nans in h for i1 {0}\".format(i1))\n cov.x[i1,i1+1:] += h\n for i in range(len(names)):\n cov.x[i+1:,i] = cov.x[i,i+1:]\n return cov", "def get_covariance(self):\n x = self.particles[:, 0]\n y = self.particles[:, 1]\n X = np.stack((x, y), axis=0)\n return np.cov(X)", "def calcCovarianceMatrix(data):\n # Create covariance matrix and array to store the mean values for x_mean, y_mean, z_mean\n C = np.zeros((data.shape[1], data.shape[1]))\n mean_xyz = []\n # Calculate all mean values\n for i in range(0, data.shape[1]):\n mean_xyz.append(data[:,i].mean())\n mean_xyz = np.array(mean_xyz)\n # Check whether dimensions agree \n if data[:,0].size != data[:,1].size or data[:,0].size != data[:,2].size:\n print \"X, Y and Z must be of same dimensions.\"\n else:\n # For each row in covariance matrix C\n for i in range(0, C.shape[0]):\n # For each column in covariance matrix C\n for j in range(0, C.shape[1]):\n C[i,j] = 0\n # For each point in the dataset, access x, y, z-values\n for point in data:\n # For each point, access x,y and z in all combinations (xx, xy, xz, yx, yy, yz etc)\n C[i][j] = C[i][j] + (point[i]-mean_xyz[i])*(point[j]-mean_xyz[j])\n # Divide by the total number of points \n C = (1.0/data.shape[0]) * C\n return C", "def get_cov_matrix_state_pars(self):\n cov = numpy.diag(numpy.zeros(self.get_num_variables() + self.get_num_parameters()))\n i = 0\n for v in self.variables:\n cov[i,i] = v.get_covariance()\n i += 1\n for p in self.parameters:\n cov[i,i] = p.get_covariance()\n i += 1\n return cov", "def compute_covariance_matrix(Xs, sigma_2):\n m, d = Xs.shape\n t1 = np.reshape(np.tile(Xs, m), (m, m, d))\n t2 = np.reshape(np.tile(Xs, (m, 1)), (m, m, d))\n K1 = np.linalg.norm(t1 - t2, axis=2)\n coeff = 0.1\n Sigma = np.ones((m, m)) - coeff*K1\n return Sigma", "def covariance (x, y):\n n = len(x)\n return dot(de_mean(x), de_mean(y))/(n-1)", "def covariance(x, y):\n n = len(x)\n return dot(de_mean(x), de_mean(y)) / (n - 1)", "def _get_variance_covariance_table(self):\n\n # variance-covariance matrix\n res = self._model.fit()\n X = self._model.exog\n x_prime_x_inverse = np.linalg.inv(np.matmul(X.transpose(), X))\n var_cov_matrix = res.mse_resid * x_prime_x_inverse\n var_cov_table = SimpleTable(data=var_cov_matrix,\n headers=self._model.exog_names,\n stubs=self._model.exog_names,\n title='Variance-covariance matrix')\n\n return var_cov_table", "def build_covariance(self):\n raise RuntimeError(\"Your Gaussian covariance code needs to \"\n \"over-ride the build_covariance method so it knows how to \"\n \"load the data covariance (or set constant_covariance=False and \"\n \"over-ride the extract_covariance method)\")\n\n #using info in self.options,\n #like filenames etc,\n #build covariance", "def cov_matrix(X, mu):\n m, n = X.shape\n X_minus_mu = X - mu\n sigma = (1 / m) * (X_minus_mu.T).dot(X_minus_mu)\n\n return sigma", "def cov(self):\n return self._cov", "def information_matrix(self):\n return self._cov.inv()", "def covariance(G, variables = [], conditionants = []):\n return parameters(G, variables = variables, \n conditionants = conditionants )[\"cov\"]", "def cov(self):\n return self.cond_proba.cov", "def covariance_matrix(self,x,y,names=None,cov=None):\n if not isinstance(x,np.ndarray):\n x = np.array(x)\n if not isinstance(y,np.ndarray):\n y = np.array(y)\n assert x.shape[0] == y.shape[0]\n\n if names is not None:\n assert x.shape[0] == len(names)\n c = np.zeros((len(names),len(names)))\n np.fill_diagonal(c,self.nugget)\n cov = Cov(x=c,names=names)\n elif cov is not None:\n assert cov.shape[0] == x.shape[0]\n names = cov.row_names\n c = np.zeros((len(names),1))\n c += self.nugget\n cont = Cov(x=c,names=names,isdiagonal=True)\n cov += cont\n\n else:\n raise Exception(\"GeoStruct.covariance_matrix() requires either \" +\n \"names or cov arg\")\n for v in self.variograms:\n v.covariance_matrix(x,y,cov=cov)\n return cov", "def getCovMatrix(self, caliStep, weights):\n\n Sigma = np.zeros([self.numObs, self.numObs])\n # scale observation data with normalized variance parameter to get covariance matrix\n for i in range(self.numObs):\n # use smaller weights for higher precision\n if self.scaleCovWithMax:\n Sigma[i, i] = self.sigma * weights[i] * max(self.obsData[:, i]) ** 2\n else:\n Sigma[i, i] = self.sigma * weights[i] * self.obsData[caliStep, i] ** 2\n return Sigma", "def cov(self, decomposed=False):\n if decomposed:\n return self._R, self._S\n else:\n return np.copy(self._C)", "def build_inverse_covariance(self):\n return np.linalg.inv(self.cov)", "def covariance(x, y):\n n = len(x)\n return dot(deviations_from_mean(x), deviations_from_mean(y))/ (n - 1)", "def correlation_matrix(self):\n correlation_matrix = self.model.covariance.copy()\n sigmaD = np.sqrt(np.diag(correlation_matrix))\n for ii in range(correlation_matrix.shape[0]):\n for jj in range(correlation_matrix.shape[1]):\n correlation_matrix[ii, jj] /= sigmaD[ii] * sigmaD[jj]\n return correlation_matrix", "def get_cov(self):\n\n if self._cov is not None:\n return self._cov\n\n names = ['ra', 'dec', 'parallax', 'pmra', 'pmdec']\n\n C = np.zeros((6,6))\n\n # pre-load the diagonal\n for i,name in enumerate(names):\n full_name = \"{}_error\".format(name)\n C[i,i] = self._data[full_name]**2\n\n for i,name1 in enumerate(names):\n for j,name2 in enumerate(names):\n if j <= i:\n continue\n full_name = \"{}_{}_corr\".format(name1, name2)\n C[i,j] = self._data[full_name] * np.sqrt(C[i,i]*C[j,j])\n C[j,i] = self._data[full_name] * np.sqrt(C[i,i]*C[j,j])\n\n if self._rv_err is not None:\n C[5,5] = self._rv_err**2\n\n self._cov = C\n return self._cov", "def _getCovMat(self, cov_expr):\n # store the expression\n self.expr = cov_expr\n # create a PETSC matrix for cov_mat\n cov_mat = PETSc.Mat().create()\n cov_mat.setType('aij')\n cov_mat.setSizes(self.domain.getNodes(), self.domain.getNodes())\n cov_mat.setUp()\n\n # scalar valued function is evaluated in this variable\n cov_ij = np.empty((1), dtype=float)\n # the points to evalute the expression\n xycor = np.empty((4), dtype=float)\n\n print '---------------------------'\n print '---------------------------'\n print ' Building Covariance Matrix'\n print '---------------------------'\n print '---------------------------'\n # Loop through global nodes and build the matrix for i < j because of\n # symmetric nature.\n for node_i in range(0, self.domain.getNodes()):\n # global node node_i\n for node_j in range(node_i, self.domain.getNodes()):\n # global node node_j\n temp_cov_ij = 0\n for elem_i in self.node_to_elem[node_i]:\n # elem_i : element attached to node_i\n # x1 : x co-ordinate of the centroid of element elem_i\n x1 = self.c_centroid_array[elem_i].x()\n # y1 : x co-ordinate of the centroid of element elem_i\n y1 = self.c_centroid_array[elem_i].y()\n for elem_j in self.node_to_elem[node_j]:\n # elem_j : element attached to node_j\n # x2 : x co-ordinate for the centroid of element elem_j\n x2 = self.c_centroid_array[elem_j].x()\n # y2 : y co-ordinate for the centroid of element elem_j\n y2 = self.c_centroid_array[elem_j].y()\n xycor[0] = x1\n xycor[1] = x2\n xycor[2] = y1\n xycor[3] = y2\n # evaluate the expression\n cov_expr.eval(cov_ij, xycor)\n if cov_ij[0] > 0:\n temp_cov_ij += (1.0 / 3) * (1.0 / 3) * \\\n cov_ij[0] * \\\n self.c_volume_array[elem_i] * \\\n self.c_volume_array[elem_j]\n\n cov_mat.setValue(node_i, node_j, temp_cov_ij)\n cov_mat.setValue(node_j, node_i, temp_cov_ij)\n cov_mat.assemblyBegin()\n cov_mat.assemblyEnd()\n print '---------------------------'\n print '---------------------------'\n print ' Finished Covariance Matrix'\n print '---------------------------'\n print '---------------------------'\n\n return cov_mat", "def compute_cov(self):\n subject = copy.copy(self.signals)\n subject -= subject.mean(axis=0)\n if self.standardize:\n subject = subject / subject.std(axis=0) # copy on purpose\n\n n_samples = subject.shape[0]\n self.cov_ = np.dot(subject.T, subject) / n_samples\n return self", "def build_covariance(self):\n raise RuntimeError(\"Internal cosmosis error in SingleValueGaussianLikelihood\")", "def solutionCovariance(self):\n return self.standardError2()*self.AtAinv", "def covariance(self,pt0,pt1):\n #raise Exception()\n cov = self.nugget\n for vario in self.variograms:\n cov += vario.covariance(pt0,pt1)\n return cov", "def get_covariance(self):\n ...", "def calculate_covariance_matrix(X, Y=None):\n\tif Y is None:\n\t\tY = X\n\tn_samples = np.shape(X)[0]\n\tcovariance_matrix = (1 / (n_samples-1)) * (X - X.mean(axis=0)).T.dot(Y - Y.mean(axis=0))\n\treturn np.array(covariance_matrix, dtype=float)", "def calculate_covariance_matrix(X, Y=None):\n if Y is None:\n Y = X\n n_samples = np.shape(X)[0]\n covariance_matrix = (1 / (n_samples - 1)) * (\n X - X.mean(axis=0)).T.dot(Y - Y.mean(axis=0))\n\n return np.array(covariance_matrix, dtype=float)", "def mpi_cov(data):\n m = mpi_mean(data)\n data_centered = data - m\n cov_local = dot(data_centered.T, data_centered)\n covmat = np.empty_like(cov_local)\n mpi.COMM.Allreduce(cov_local, covmat)\n num_data = mpi.COMM.allreduce(data.shape[0])\n covmat /= float(num_data)\n return covmat", "def print_latex_cov(P):\n out = ''\n # Print out header with state variables\n for var in STATE_VARS:\n out += \" & ${0:9s}$ \".format(var)\n\n out += '\\\\\\\\ \\n'\n\n\n # Print out correlation / covariance matrix \n for row in range(18):\n out += \"${0:3s}$ \".format(STATE_VARS[row])\n for col in range(18):\n # Print correlations on lower diagnal\n if col < row:\n out += \" & {0: 2.2f} \".format(float(P[row,col]/(sqrt(P[row,row]) * sqrt(P[col,col]) )))\n # Highlight variances in blue\n elif row == col:\n out += \" & {0: 2.2e} \".format(float(P[row,col]))\n else:\n out += \"& {0: 2.2e} \".format(float(P[row,col]))\n out += '\\\\\\\\ \\n'\n\n print out", "def test__get_covariance(self):\n # Setup\n copula = GaussianMultivariate(GaussianUnivariate)\n copula.fit(self.data)\n\n expected_covariance = np.array([\n [1., -0.01261819, -0.19821644],\n [-0.01261819, 1., -0.16896087],\n [-0.19821644, -0.16896087, 1.]\n ])\n\n # Run\n covariance = copula._get_covariance(self.data)\n\n # Check\n assert np.isclose(covariance, expected_covariance).all().all()", "def _compute_total_covariance_matrix(self) -> tf.Tensor:\n total_covariance_matrix = self.total_c_phi\\\n + tf.matmul(self.s_matrix_inv,\n tf.matmul(self.t_matrix, self.s_matrix_inv))\n return total_covariance_matrix", "def get_traj_cov(self):\n return np.dot(self._Phi.T, np.dot(self._sigma_W, self._Phi))", "def covariance(self, x1, x2, lengths):\n z = self.dist(x1, x2, lengths)\n return (self.a**2) * exp(-0.5*z)", "def muscovite():\n\n rho = 2834.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 181.; C[0,1] = 48.8; C[0,2] = 25.6; C[0,3] = 0.; C[0,4] = -14.2; C[0,5] = 0.\n C[1,0] = C[0,1]; C[1,1] = 178.4; C[1,2] = 21.2; C[1,3] = 0.; C[1,4] = 1.1; C[1,5] = 0.\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 58.6; C[2,3] = 0.; C[2,4] = 1.; C[2,5] = 0.\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 16.5; C[3,4] = 0.; C[3,5] = -5.2\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 19.5; C[4,5] = 0.\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 72.\n\n return C, rho", "def calculate_covariance(self, x):\n # tx = self.reshape_tensor2d(x)\n # Calcualte the covariance\n # tx_mean = K.mean(tx, axis=0)\n # return tx_mean\n # tx_normal = tx - tx_mean\n # return tx_normal\n # tx_cov = K.dot(tx_normal.T, tx_normal) / (self.cols * self.rows - 1)\n # return tx_cov\n raise DeprecationWarning(\"deprecated, should use calculate_pre_cov to do 4D direct computation\")", "def get_covariances(songs=songs_df, method='corrcoef', save=False, out_as='png'):\n # Make a matrix out of the rankings.\n songs_mat = np.matrix(songs['ranks'].tolist())\n\n if method == 'corrcoef':\n covariance_matrix = np.corrcoef(songs_mat)\n elif method == 'cov':\n covariance_matrix = np.cov(songs_mat)\n else:\n return \"Please choose 'cov' or 'corrcoeff' for method.\"\n\n mask = np.zeros_like(covariance_matrix)\n mask[np.triu_indices_from(mask)] = True\n sns.heatmap(covariance_matrix, mask=mask, cmap='RdBu_r')\n\n prefix = 'Normalized' if method == 'corrcoef' else ''\n plt.title(prefix + ' Covariance Matrix for Song Rankings Through Time',\n weight='bold')\n plt.xticks(np.arange(0, 560, 25))\n plt.yticks(np.arange(0, 560, 25))\n plt.xlabel('Song Number', weight='bold')\n plt.ylabel('Song Number', weight='bold')\n if save:\n if out_as == 'pdf':\n plt.savefig(fig_path + method + '_matrix.pdf')\n elif out_as == 'png':\n plt.savefig(fig_path + method + '_matrix.png', dpi=300)\n else:\n print \"Not saving; please choose 'png' or 'pdf' for 'out_as'\"\n plt.show()\n return covariance_matrix", "def stat_cov(stat_err,stat_der):\n # cast as numpy arrays\n stat_err = np.array(stat_err)\n stat_der = np.array(stat_der)\n # square the error for the variance\n stat_err2 = np.power(stat_err,2)\n # square the derivatives\n stat_der2 = np.power(stat_der,2)\n # sum of (derivative * error)^2\n cy = np.diag((stat_err2*stat_der2).sum(axis=0))\n \n # return the diagonal matrix\n return cy", "def covariance(x, mean_x, y, mean_y):\r\n \r\n covar = 0.0\r\n for i in range(len(x)):\r\n covar += (x[i] - mean_x) * (y[i] - mean_y)\r\n return covar", "def print_covariance(P):\n def b(string):\n \"\"\"\n Turns a given string blue.\n\n \"\"\"\n return \"\\033[94m{0}\\033[0m\".format(string)\n\n out = \" \"\n # Print out header with state variables\n for var in STATE_VARS:\n out += \"{0:9s} \".format(var)\n\n # Print out correlation / covariance matrix \n for row in range(18):\n out += \"\\n{0:3s} \".format(STATE_VARS[row])\n for col in range(18):\n # Print correlations on lower diagnal\n if col < row:\n out += \"{0: 2.2f}, \".format(float(P[row,col]/(sqrt(P[row,row]) * sqrt(P[col,col]) )))\n # Highlight variances in blue\n elif row == col:\n out += \"{0: 2.2e}, \".format(float(P[row,col]))\n else:\n out += \"{0: 2.2e}, \".format(float(P[row,col]))\n \n out += \"\\n\"\n\n print out", "def FormCovarianceMatrix(mat):\n nPts = mat.shape[0]\n sumVect = sum(mat)\n sumVect /= float(nPts)\n for row in mat:\n row -= sumVect\n return numpy.dot(numpy.transpose(mat),mat)/(nPts-1)", "def set_cov(self):\n v_mpart = self.d_vars['MPart']\n n_mpart = len(v_mpart)\n for p in combinations_with_replacement(range(n_mpart), 2):\n self.add_parameter('Cov', p[0], p[1])\n\n m_cov = np.zeros((n_mpart, n_mpart))\n return m_cov", "def FormCovarianceMatrix(mat):\n nPts = mat.shape[0]\n sumVect = sum(mat)\n sumVect /= float(nPts)\n for row in mat:\n row -= sumVect\n return numpy.dot(numpy.transpose(mat), mat) / (nPts - 1)", "def get_cov(self, npar=None, **args):\n return get_par(self, dummy='cov_mat', npar=npar, **args)", "def rand_cov():\n c = uniform(-1, 1)\n return [[uniform(0, 1), c], [c, uniform(0, 1)]]", "def _gp_cov_matrix(Nt, snr2, clen2):\n f = lambda x: np.exp(-(x**2)/clen2)\n C = snr2 * f(np.arange(Nt))\n C[0] += 1 # noise\n return scipy.linalg.toeplitz(C)", "def _covariance_matrix_prob_v1(self, merged_df, prob_vector):\n total_cov = merged_df.groupby(CYCLE_LABEL, as_index=True).cov()\n cov_matrix = 0\n for i in range(5):\n cov_matrix += total_cov.loc[i, :] * prob_vector[:, i]\n return cov_matrix", "def covariance_regularization(self):\n return self._covariance_regularization", "def covariance_type(self):\n return self._covariance_type", "def covariance(self, point_one, point_two):\n raise NotImplementedError(\"C++ wrapper currently does not support computing covariance quantities.\")", "def get_process_covariance_matrix(dt):\n # a = np.array([\n # [0.25 * dt ** 4, 0.5 * dt ** 3, 0.5 * dt ** 2],\n # [0.5 * dt ** 3, dt ** 2, dt],\n # [0.5 * dt ** 2, dt, 1]\n # ])\n\n a = np.array([\n [dt ** 6 / 36., dt ** 5 / 24., dt ** 4 / 6.],\n [dt ** 5 / 24., 0.25 * dt ** 4, 0.5 * dt ** 3],\n [dt ** 4 / 6., 0.5 * dt ** 3, dt ** 2]\n ])\n return a", "def calc_cov(self, array_x, array_y):\n cov = np.empty([len(array_x),len(array_x)], dtype = float) # initialize an empty 16*16 matrix (16 pressure levels)\n for x in range(len(array_x)):\n for y in range(len(array_y)):\n entry = array_x[x] * array_y[y]\n cov[x,y] = entry\n return cov", "def cov(m, y=None, rowvar=1, bias=0):\n\n X = array(m, ndmin=2, dtype=float)\n if X.shape[0] == 1:\n rowvar = 1\n if rowvar:\n axis = 0\n tup = (slice(None),newaxis)\n else:\n axis = 1\n tup = (newaxis, slice(None))\n\n\n if y is not None:\n y = array(y, copy=False, ndmin=2, dtype=float)\n X = concatenate((X,y),axis)\n\n X -= X.mean(axis=1-axis)[tup]\n if rowvar:\n N = X.shape[1]\n else:\n N = X.shape[0]\n\n if bias:\n fact = N*1.0\n else:\n fact = N-1.0\n\n if not rowvar:\n return (dot(X.T, X.conj()) / fact).squeeze()\n else:\n return (dot(X, X.T.conj()) / fact).squeeze()", "def _compute_covariance_matrix(self, list_obs_1, list_obs_2):\n\n assert isinstance(list_obs_1, list)\n assert isinstance(list_obs_2, list)\n\n cov_matrix = np.zeros((len(list_obs_1), len(list_obs_2)))\n cov_matrix_flat = [\n (i, j, self.covariance(xi, yj))\n for (i, xi) in enumerate(list_obs_1)\n for (j, yj) in enumerate(list_obs_2)\n ]\n for coord_value in cov_matrix_flat:\n cov_matrix[coord_value[:2]] = coord_value[2]\n\n return cov_matrix", "def covariance(x, y):\n x_mean = mean(x)\n y_mean = mean(y)\n diff_x_mean = (x_i - x_mean for x_i in x)\n diff_y_mean = (y_i - y_mean for y_i in y)\n sum_xy_diff = sum(a * b for a, b in zip(diff_x_mean, diff_y_mean))\n return sum_xy_diff / (len(x) - 1)", "def compute_measurement_covariance(jacobian, oldCovariance, sigmaObservation): \n\n return None", "def sys_cov(sys_err,sys_der,cov_mat=None):\n\n if cov_mat is not None:\n lcm = len(cov_mat)\n N = lcm + len(sys_err)\n cx = np.zeros((N,N))\n # plug in matrix at the leading rows and cols\n cx[0:lcm,0:lcm] = cov_mat\n \n # cast as numpy arrays\n sys_err = np.array(sys_err)\n sys_der = np.array(sys_der)\n # square the error for the variance and diagonalize\n sys_err2 = np.diag(np.power(sys_err,2))\n # determine the final input covariance matrix\n if cov_mat is None:\n cx = sys_err2\n else:\n cx[lcm:,lcm:] = sys_err2\n # sandwich rule, matrix algebra to get outgoing covariance\n # (@ symbol is matrix dot product)\n cy = sys_der.T@cx@sys_der\n \n # return NxN matrix (N = len(derivatives))\n return cy", "def se(self):\n return np.sqrt(self.scaled_vcov().diagonal().T)", "def covfunc(self, theta, d): \n sigmaf, l = theta[:2]\n xxl = np.sum((d/l)**2, axis=1)\n covariance = sigmaf**2 * np.exp(-xxl/2.)\n return covariance", "def empirical_covariance(system, excitation, m):\n observations = [system() @ excitation() for _ in range(m)]\n return np.cov(np.array(observations).T)", "def test_covar_on_vectors(self):\n input_file = self.get_file(\"vector.csv\")\n vect_schema = [(\"items\", sparktk.dtypes.vector(400))]\n\n # create a frame and covariance matrix\n cov_frame = self.context.frame.import_csv(input_file,\n schema=vect_schema)\n cov_matrix = cov_frame.covariance_matrix(['items'])\n\n # call numpy to get numpy result\n numpy_result = list(numpy.cov(list(cov_frame.take(cov_frame.count()).data),\n rowvar=False))\n\n # convert the frame rows into lists for ease of comparison\n sparktk_flat = list(numpy.array(cov_matrix.take(cov_matrix.count())).flat)\n numpy_flat = list(numpy.array(numpy_result).flat)\n\n # finally compare the expected results with those resturned by sparktk\n numpy.testing.assert_almost_equal(sparktk_flat, numpy_flat)", "def get_covariance(data_array):\n number_of_data = len(data_array)\n number_of_features = len(data_array[0]) if number_of_data != 0 else 0\n covariance_matrix = numpy.zeros([number_of_features, number_of_features])\n mean = numpy.zeros(number_of_features)\n for data in data_array:\n numpy.add(mean, data, mean)\n numpy.divide(mean, number_of_data, mean)\n for i in xrange(number_of_features):\n temp = numpy.zeros(number_of_data)\n for j in xrange(number_of_data):\n temp[j] = data_array[j][i] - mean[i]\n covariance_matrix[i][i] = max(numpy.dot(temp, temp.transpose()), 0.00001) / number_of_data\n return covariance_matrix, mean", "def compute_covariance_matrix1d(Xs):\n m, d = Xs.shape\n t1 = np.reshape(np.tile(Xs, m), (m, m, d))\n t2 = np.reshape(np.tile(Xs, (m, 1)), (m, m, d))\n K1 = np.abs(t1 - t2)\n K1 = np.reshape(K1, (m, m))\n coeff = 1.0\n Sigma = np.ones((m, m)) - coeff*K1\n return Sigma", "def get_cov_mat(samples_dir):\n if not os.path.isdir(samples_dir):\n print(\"Samples directory not found: \", samples_dir)\n exit()\n\n player_inputs = np.array((load_matrix_from_raw_samples(samples_dir, scaling_factor=100),)).T\n n_players = player_inputs.shape[1]\n aggr_cov_mat = np.zeros((n_players, n_players), dtype=\"complex128\")\n for n_avg in range(player_inputs.shape[0]):\n aggr_cov_mat += player_inputs[n_avg] @ player_inputs[n_avg].conjugate().T\n\n cov_mat = aggr_cov_mat / player_inputs.shape[0]\n return cov_mat", "def cov(xdata, ydata, mx=None, my=None):\n n, s = _SP(xdata, mx, ydata, my)\n if n > 1:\n return s/(n-1)\n else:\n raise ValueError('sample covariance requires at least two points')", "def acov (x,y, dimension=None,keepdims=0):\r\n if dimension == None:\r\n x = N.ravel(x)\r\n y = N.ravel(y)\r\n dimension = 0\r\n xmn = amean(x,dimension,1) # keepdims\r\n xdeviations = x - xmn\r\n ymn = amean(y,dimension,1) # keepdims\r\n ydeviations = y - ymn\r\n if type(dimension) == ListType:\r\n n = 1\r\n for d in dimension:\r\n n = n*x.shape[d]\r\n else:\r\n n = x.shape[dimension]\r\n covar = N.sum(xdeviations*ydeviations)/float(n-1)\r\n return covar", "def create_covariance_matrix(cls, coordinates):\n number_of_conformations = coordinates.shape[0]\n number_of_atoms = coordinates.shape[1]\n coordinates_per_conformation = number_of_atoms * 3\n covariance_matrix = numpy.zeros((coordinates_per_conformation, coordinates_per_conformation))\n coordinates = coordinates.reshape((number_of_conformations, coordinates_per_conformation))\n # Mean structure\n mean = coordinates.mean(0)\n # Changed for efficiency\n for coords in coordinates:\n deviations = coords - mean\n covariance_matrix += numpy.outer(deviations, deviations)\n return covariance_matrix / number_of_conformations", "def make_covariance_matrix(points, kernel):\n\n dim = len(points)\n p1 = np.reshape(points, (dim, 1, -1))\n p2 = np.reshape(points, (dim, -1, 1))\n\n return kernel(p1, p2)", "def plotCov(mu, C, axis):\n xx1 = [mu[0,0]-0.2,mu[0,0]+0.2]\n xx2 = [mu[1,0]-0.2,mu[1,0]+0.2]\n X1,X2 = meshgrid(linspace(xx1[0],xx1[1],50), linspace(xx2[0],xx2[1],50))\n f = zeros(shape(X1))\n Cinv = linalg.inv(C)\n CinvDet = linalg.det(Cinv)\n if CinvDet > 10**15:\n print \"The following covariance could not be plotted\"\n print \"Prec:\",Cinv\n print \"Det: \",CinvDet\n return\n\n for yi in range(shape(X1)[0]):\n for xi in range(shape(X1)[1]):\n x = mat([X1[yi,xi],X2[yi,xi]]).T - mu\n f[yi,xi] = x.T * Cinv * x\n\n axis.contour(X1,X2,f,[2.3,4.61,6.18,9.21,11.8])", "def cov_to_corr(cy):\n \n N = len(cy)\n \n corr = np.zeros((N,N))\n \n sd = np.sqrt(np.diag(cy))\n \n sdinv = np.diag(1/sd)\n \n corr = np.dot(np.dot(sdinv,cy),sdinv)\n \n #print(np.shape(corr))\n return corr", "def _compute_covariance(self, lc1, lc2):\n return np.cov(lc1.counts, lc2.counts)[0][1]", "def filter_covariance(cov_matrix, uvd=None, return_diag_as_uvdata=True, **array_kwargs):\n raise NotImplementedError(\"This function has not yet been written.\")", "def covariance_from_internal(internal_values, constr):\n chol = chol_params_to_lower_triangular_matrix(internal_values)\n cov = chol @ chol.T\n return cov[np.tril_indices(len(chol))]", "def sigma(self):\n with ops.name_scope(self.name):\n return self._cov.to_dense()", "def extract_inverse_covariance(self, block):\n return np.linalg.inv(self.cov)", "def covariance(x, mu_x, y, mu_y, pdf):\n if pdf.shape[0] != x.shape[0] or pdf.shape[1] != y.shape[0]:\n print(\"Error, mesh size does not match x and y\")\n n_x = x.shape[0]\n n_y = y.shape[0]\n cov_int = 0\n p_of_x = np.zeros(n_x)\n for i in range(0, n_x):\n for j in range(1, n_y):\n delta_y = y[j] - y[j - 1]\n p_of_x[i] += (\n delta_y\n / 2.0\n * ((y[j] - mu_y) * pdf[i, j] + (y[j - 1] - mu_y) * pdf[i, j - 1])\n )\n if i > 0:\n delta_x = x[i] - x[i - 1]\n cov_int += (\n delta_x\n / 2.0\n * ((x[i] - mu_x) * p_of_x[i] + (x[i - 1] - mu_x) * p_of_x[i - 1])\n )\n return cov_int", "def covariance(self, param1: list, param2: list) -> float:\n assert len(param1) == len(param2), \"Parameter lists must be of the same length.\"\n \n n = len(param1)\n \n mean1 = np.mean(param1)\n mean2 = np.mean(param2)\n \n arr1 = np.array(param1)\n arr2 = np.array(param2)\n \n arr1_diff = arr1 - mean1\n arr2_diff = arr2 - mean2\n \n multiplied = arr1_diff * arr2_diff\n sumMultiplied = sum(multiplied)\n covar = sumMultiplied/(n - 1.0)\n \n return covar", "def covariance(self, cond, include_obs=False, parametric_only=False, pad=1e-8):\n X1 = self.standardize_input_array(cond)\n m = X1.shape[0]\n\n Kstar = self.get_query_K(X1)\n if not parametric_only:\n tmp = self.Kinv_sp_tri * Kstar\n qf = np.dot(Kstar.T, tmp)\n k = self.kernel(X1,X1, identical=include_obs)\n gp_cov = k - qf\n else:\n gp_cov = np.zeros((m,m))\n\n R = self.query_R\n tmp = np.dot(self.invc, R)\n mean_cov = np.dot(tmp.T, tmp)\n gp_cov += mean_cov\n\n gp_cov += pad * np.eye(gp_cov.shape[0])\n return gp_cov", "def get_cov_re(self):\n pa = self._params[self.k_fe:]\n\n cov_re = np.zeros((self.k_re, self.k_re))\n cov_re[self._ix] = pa\n if self.use_sqrt:\n cov_re = np.dot(cov_re, cov_re.T)\n else:\n cov_re = (cov_re + cov_re.T) - np.diag(np.diag(cov_re))\n\n return cov_re", "def cov(self) -> 'DataFrame':\n if self._is_string():\n raise TypeError('DataFrame consists only of strings. Must have int, float, '\n 'or bool columns')\n\n x: ndarray = self._values_number()\n if x.dtype.kind == 'i':\n x0: ndarray = x[0]\n x_diff: ndarray = x - x0\n Exy: ndarray = (x_diff.T @ x_diff)\n Ex: ndarray = x_diff.sum(0)[np.newaxis, :]\n ExEy: ndarray = Ex.T @ Ex\n counts: Union[int, ndarray] = len(x)\n else:\n x0 = _math.get_first_non_nan(x)\n x_diff = x - x0\n x_not_nan: ndarray = (~np.isnan(x)).astype(int)\n\n x_diff_0: ndarray = np.nan_to_num(x_diff)\n counts = (x_not_nan.T @ x_not_nan)\n Exy = (x_diff_0.T @ x_diff_0)\n Ex = (x_diff_0.T @ x_not_nan)\n ExEy = Ex * Ex.T\n\n with np.errstate(invalid='ignore'):\n cov: ndarray = (Exy - ExEy / counts) / (counts - 1)\n\n new_data: Dict[str, ndarray] = {'f': np.asfortranarray(cov)}\n new_column_info: ColInfoT = {'Column Name': utils.Column('S', 0, 0)}\n new_columns: ndarray = np.empty(x.shape[1] + 1, dtype='O')\n new_columns[0] = 'Column Name'\n\n i: int = 0\n for col, dtype, loc in self._col_info_iter(): # type: str, str, int\n if dtype not in 'ifb':\n continue\n new_column_info[col] = utils.Column('f', i, i + 1)\n new_columns[i + 1] = col\n i += 1\n new_data['S'] = np.asfortranarray(new_columns[1:])[:, np.newaxis]\n return self._construct_from_new(new_data, new_column_info,\n np.asarray(new_columns, dtype='O'))", "def cpca_cov(sigma2, d, old=False):\n\n n_ = sigma2.shape[0]\n k_ = d.shape[0]\n i_n = np.eye(n_)\n lambda2_d = np.empty((n_, 1))\n e_d = np.empty((n_, n_))\n\n # Step 0. initialize constraints\n m_ = n_ - k_\n a_n = np.copy(d)\n\n for n in range(n_):\n # Step 1. orthogonal projection matrix\n p_n = [email protected](a_n@a_n.T)@a_n\n\n # Step 2. conditional dispersion matrix\n s2_n = p_n @ sigma2 @ p_n\n\n # Step 3. conditional principal directions/variances\n e_d[:, [n]], lambda2_d[n] = pca_cov(s2_n, 1)\n\n # Step 4. Update augmented constraints matrix\n if n+1 <= m_-1:\n a_n = np.concatenate((a_n.T, sigma2 @ e_d[:, [n]]), axis=1).T\n elif m_ <= n+1 <= n_-1:\n a_n = (sigma2 @ e_d[:, :n+1]).T\n\n return e_d, lambda2_d.squeeze()", "def covariance2d(self, P_x, P_y, P_z, P_x_dot, P_y_dot, P_z_dot, P_x_ddot, P_y_ddot, P_z_ddot):\n cov_matrix = numpy.array([[P_x, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, P_y, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, P_z, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, P_x_dot, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, P_y_dot, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, P_z_dot, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, P_x_ddot, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, P_y_ddot, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, P_z_ddot]])\n return cov_matrix", "def _dgp_cov_matrix(Nt, snr2=100, clen2=1):\n f = lambda x: np.exp(-(x**2)/clen2)\n C = snr2 * (2*f(np.arange(Nt)) - f(1+np.arange(Nt))- f(-1+np.arange(Nt)))\n C[0] += 2 + 0.01 # noise, add a small number to regularize\n C[1] += -1\n return scipy.linalg.toeplitz(C)", "def get_thermal_covariance(self):\n cov = []\n for var in self.noise_power_variance:\n cov.append(np.diag(var * np.ones(len(self.eta))))\n\n return cov", "def extract_covariance(self, block):\n raise RuntimeError(\"You need to implement the method \"\n \"'extract_covariance' if you set constant_covariance=False \"\n \"in a gaussian likelihood\")", "def estimateCovariance(df):\n import numpy as np\n m = df.select(df['scaledFeatures']).map(lambda x: x[0]).mean()\n dfZeroMean = df.select(df['scaledFeatures']).map(lambda x: x[0]).map(lambda x: x-m) # subtract the mean\n\n return dfZeroMean.map(lambda x: np.outer(x,x)).sum()/df.count()", "def var(self):\n return np.diag(self.covar)" ]
[ "0.7898988", "0.7637602", "0.7582342", "0.74682164", "0.73953617", "0.7171067", "0.71467644", "0.6950389", "0.6944435", "0.6932731", "0.6891629", "0.68414915", "0.68384755", "0.68313056", "0.68222475", "0.67440724", "0.6723901", "0.67163885", "0.6713542", "0.6699285", "0.6695643", "0.6692742", "0.66895354", "0.66498405", "0.66394657", "0.6636903", "0.66306", "0.6583718", "0.6567032", "0.6554199", "0.655379", "0.6551523", "0.6543604", "0.6537461", "0.6512904", "0.6495544", "0.6488788", "0.64684993", "0.644301", "0.6431153", "0.6420915", "0.64204514", "0.64005595", "0.6388857", "0.63708776", "0.637002", "0.6363111", "0.6338144", "0.633727", "0.6327009", "0.6316813", "0.63040614", "0.6292601", "0.6248535", "0.6238123", "0.6230477", "0.6202671", "0.6173788", "0.6167742", "0.61534095", "0.6117907", "0.61112094", "0.61093384", "0.60984516", "0.6097667", "0.608882", "0.60831034", "0.6083024", "0.60727865", "0.6055751", "0.60498744", "0.60378796", "0.6031152", "0.6029848", "0.6025811", "0.6014806", "0.6013841", "0.6013014", "0.59985614", "0.59914106", "0.59898776", "0.5978867", "0.5970983", "0.59646547", "0.59604275", "0.5950335", "0.59228545", "0.59190845", "0.5917381", "0.59169394", "0.5901627", "0.5893692", "0.58873945", "0.58794945", "0.5870862", "0.5868747", "0.58528084", "0.58274853", "0.5826423", "0.5823237" ]
0.7279606
5
outputs the noise covariance matrix, R
def getCovarianceNoiseMatrix(self): return np.dot ( self.getB().T, self.getB() )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def covariance_matrix(self):\n\n self._order_observations()\n self.cov_matrix = self._compute_covariance_matrix(\n self.list_observations, self.list_observations)\n\n self.cov_matrix += np.diag(np.array([self.noise] * self.n_observation))\n\n return self.cov_matrix", "def covariance(data_matrix):\n return np.asmatrix(np.cov(data_matrix, rowvar=0))", "def process_noise_cov(self, dt=0.0):\n raise NotImplementedError", "def process_noise_cov(self, dt=0.0):\n raise NotImplementedError", "def process_noise_cov(self, dt=0.0):\n raise NotImplementedError", "def covariance_matrix(self):\n\n cov_filename = self.covariance_filename\n cov_press, cov_data = self._co_star_read(cov_filename)\n\n # \"Fix\" covariances that are not positive definite\n if not np.all(np.linalg.eigvals(cov_data) > 0):\n warnings.warn(\"Covariance matrix for species {} is not positive definite, modifying eigenvals\".format(self.species))\n\n # Get eigen values and vector from matrix\n eigval, eigvec = np.linalg.eig(cov_data)\n\n # Find negative eigen values and set to the media\n eigval[np.where(eigval < 0)] = np.median(eigval)\n\n # Reconstruct matrix with modified eigen values\n cov_data = eigvec @ np.diag(eigval) @ np.linalg.inv(eigvec)\n\n return cov_data", "def rand_cov():\n c = uniform(-1, 1)\n return [[uniform(0, 1), c], [c, uniform(0, 1)]]", "def _gp_cov_matrix(Nt, snr2, clen2):\n f = lambda x: np.exp(-(x**2)/clen2)\n C = snr2 * f(np.arange(Nt))\n C[0] += 1 # noise\n return scipy.linalg.toeplitz(C)", "def cov(self):\n cov_ = np.dot(self.weights * self.demeaned.T, self.demeaned)\n cov_ /= self.sum_weights - self.ddof\n return cov_", "def muscovite():\n\n rho = 2834.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 181.; C[0,1] = 48.8; C[0,2] = 25.6; C[0,3] = 0.; C[0,4] = -14.2; C[0,5] = 0.\n C[1,0] = C[0,1]; C[1,1] = 178.4; C[1,2] = 21.2; C[1,3] = 0.; C[1,4] = 1.1; C[1,5] = 0.\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 58.6; C[2,3] = 0.; C[2,4] = 1.; C[2,5] = 0.\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 16.5; C[3,4] = 0.; C[3,5] = -5.2\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 19.5; C[4,5] = 0.\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 72.\n\n return C, rho", "def process_noise_cov(self, dt=0.0):\n if dt not in self._Q_cache:\n with torch.no_grad():\n d = self._dimension\n dt2 = dt * dt\n dt3 = dt2 * dt\n dt4 = dt2 * dt2\n Q = torch.zeros(d, d, dtype=self.sa2.dtype, device=self.sa2.device)\n Q[: d // 2, : d // 2] = 0.25 * dt4 * eye_like(self.sa2, d // 2)\n Q[: d // 2, d // 2 :] = 0.5 * dt3 * eye_like(self.sa2, d // 2)\n Q[d // 2 :, : d // 2] = 0.5 * dt3 * eye_like(self.sa2, d // 2)\n Q[d // 2 :, d // 2 :] = dt2 * eye_like(self.sa2, d // 2)\n Q = Q * self.sa2\n self._Q_cache[dt] = Q\n\n return self._Q_cache[dt]", "def get_cov_matrix_outputs(self):\n cov = numpy.diag(numpy.zeros(self.get_num_measured_outputs()))\n i = 0\n for o in self.outputs:\n if o.is_measured_output():\n cov[i,i] = o.get_covariance()\n i += 1\n return cov", "def cov(self):\n E_x = Sample.mean(self)\n Std_x = Sample.std(self)\n cov = Std_x/E_x\n return(cov)", "def process_noise_cov(self, dt=0.0):\n if dt not in self._Q_cache:\n with torch.no_grad():\n d = self._dimension\n dt2 = dt * dt\n dt3 = dt2 * dt\n Q = torch.zeros(d, d, dtype=self.sa2.dtype, device=self.sa2.device)\n eye = eye_like(self.sa2, d // 2)\n Q[: d // 2, : d // 2] = dt3 * eye / 3.0\n Q[: d // 2, d // 2 :] = dt2 * eye / 2.0\n Q[d // 2 :, : d // 2] = dt2 * eye / 2.0\n Q[d // 2 :, d // 2 :] = dt * eye\n # sa2 * dt is an intensity factor that changes in velocity\n # over a sampling period ``dt``, ideally should be ~``sqrt(q*dt)``.\n Q = Q * (self.sa2 * dt)\n self._Q_cache[dt] = Q\n\n return self._Q_cache[dt]", "def _mn_cov_ ( self , size = -1 , root = False ) :\n #\n if size <= 0 : size = len ( self )\n size = min ( size , len ( self ) ) \n #\n from array import array\n matrix = array ( 'd' , [ 0 for i in range(0, size * size) ] )\n self.mnemat ( matrix , size )\n #\n import ostap.math.linalg\n from ostap.core.core import Ostap \n mtrx = Ostap.Math.SymMatrix ( size )() \n for i in range ( 0 , size ) :\n for j in range ( i , size ) : \n mtrx [ i , j ] = matrix [ i * size + j ]\n \n return mtrx", "def process_noise_cov(self, dt=0.0):\n if dt not in self._Q_cache:\n Q = self.sv2 * dt * dt * eye_like(self.sv2, self._dimension)\n self._Q_cache[dt] = Q\n\n return self._Q_cache[dt]", "def covariance(x, y):\n n = len(x)\n return dot(de_mean(x), de_mean(y)) / (n - 1)", "def covariance (x, y):\n n = len(x)\n return dot(de_mean(x), de_mean(y))/(n-1)", "def get_cov_matrix_states(self):\n cov = numpy.diag(numpy.zeros(self.get_num_variables()))\n i = 0\n for v in self.variables:\n cov[i,i] = v.get_covariance()\n i += 1\n return cov", "def get_thermal_covariance(self):\n cov = []\n for var in self.noise_power_variance:\n cov.append(np.diag(var * np.ones(len(self.eta))))\n\n return cov", "def get_covariance(self):\n x = self.particles[:, 0]\n y = self.particles[:, 1]\n X = np.stack((x, y), axis=0)\n return np.cov(X)", "def covariance(x, y):\n n = len(x)\n return dot(deviations_from_mean(x), deviations_from_mean(y))/ (n - 1)", "def _noise_matrix(self):\n\n return self._noise * (np.random.rand(self._rows, self._cols)\n - 0.5)", "def cov_matrix(X, mu):\n m, n = X.shape\n X_minus_mu = X - mu\n sigma = (1 / m) * (X_minus_mu.T).dot(X_minus_mu)\n\n return sigma", "def compute_cov(self):\n subject = copy.copy(self.signals)\n subject -= subject.mean(axis=0)\n if self.standardize:\n subject = subject / subject.std(axis=0) # copy on purpose\n\n n_samples = subject.shape[0]\n self.cov_ = np.dot(subject.T, subject) / n_samples\n return self", "def get_cov_matrix_parameters(self):\n cov = numpy.diag(numpy.zeros(self.get_num_parameters()))\n i = 0\n for p in self.parameters:\n cov[i,i] = p.get_covariance()\n i += 1\n return cov", "def getCovMatrix(self, caliStep, weights):\n\n Sigma = np.zeros([self.numObs, self.numObs])\n # scale observation data with normalized variance parameter to get covariance matrix\n for i in range(self.numObs):\n # use smaller weights for higher precision\n if self.scaleCovWithMax:\n Sigma[i, i] = self.sigma * weights[i] * max(self.obsData[:, i]) ** 2\n else:\n Sigma[i, i] = self.sigma * weights[i] * self.obsData[caliStep, i] ** 2\n return Sigma", "def print_latex_cov(P):\n out = ''\n # Print out header with state variables\n for var in STATE_VARS:\n out += \" & ${0:9s}$ \".format(var)\n\n out += '\\\\\\\\ \\n'\n\n\n # Print out correlation / covariance matrix \n for row in range(18):\n out += \"${0:3s}$ \".format(STATE_VARS[row])\n for col in range(18):\n # Print correlations on lower diagnal\n if col < row:\n out += \" & {0: 2.2f} \".format(float(P[row,col]/(sqrt(P[row,row]) * sqrt(P[col,col]) )))\n # Highlight variances in blue\n elif row == col:\n out += \" & {0: 2.2e} \".format(float(P[row,col]))\n else:\n out += \"& {0: 2.2e} \".format(float(P[row,col]))\n out += '\\\\\\\\ \\n'\n\n print out", "def covariance(G, variables = [], conditionants = []):\n return parameters(G, variables = variables, \n conditionants = conditionants )[\"cov\"]", "def get_cosmic_variance(self, signal_power):\n if self.ps_dim == 2:\n cov = []\n \n for ii, sig_eta in enumerate(signal_power):\n cov.append((1 / self.grid_weights[ii] * np.diag(sig_eta)**2))\n\n return cov\n else:\n return self.grid_weights * signal_power", "def _dgp_cov_matrix(Nt, snr2=100, clen2=1):\n f = lambda x: np.exp(-(x**2)/clen2)\n C = snr2 * (2*f(np.arange(Nt)) - f(1+np.arange(Nt))- f(-1+np.arange(Nt)))\n C[0] += 2 + 0.01 # noise, add a small number to regularize\n C[1] += -1\n return scipy.linalg.toeplitz(C)", "def MVN_Denoise(Y, mvn_model, noise_std):\n return calc_weiner_filter(Y, mvn_model.mean, mvn_model.cov, noise_std)", "def build_covariance(self):\n raise RuntimeError(\"Your Gaussian covariance code needs to \"\n \"over-ride the build_covariance method so it knows how to \"\n \"load the data covariance (or set constant_covariance=False and \"\n \"over-ride the extract_covariance method)\")\n\n #using info in self.options,\n #like filenames etc,\n #build covariance", "def ICA_Denoise(Y, ica_model, noise_std):\n\n # TODO: YOUR CODE HERE", "def get_cov_matrix_state_pars(self):\n cov = numpy.diag(numpy.zeros(self.get_num_variables() + self.get_num_parameters()))\n i = 0\n for v in self.variables:\n cov[i,i] = v.get_covariance()\n i += 1\n for p in self.parameters:\n cov[i,i] = p.get_covariance()\n i += 1\n return cov", "def stat_cov(stat_err,stat_der):\n # cast as numpy arrays\n stat_err = np.array(stat_err)\n stat_der = np.array(stat_der)\n # square the error for the variance\n stat_err2 = np.power(stat_err,2)\n # square the derivatives\n stat_der2 = np.power(stat_der,2)\n # sum of (derivative * error)^2\n cy = np.diag((stat_err2*stat_der2).sum(axis=0))\n \n # return the diagonal matrix\n return cy", "def set_cov(self):\n v_mpart = self.d_vars['MPart']\n n_mpart = len(v_mpart)\n for p in combinations_with_replacement(range(n_mpart), 2):\n self.add_parameter('Cov', p[0], p[1])\n\n m_cov = np.zeros((n_mpart, n_mpart))\n return m_cov", "def build_inverse_covariance(self):\n return np.linalg.inv(self.cov)", "def covariance(mtrx):\r\n\r\n # Average column of matrix\r\n T = np.transpose(mtrx)\r\n ave = np.zeros(len(mtrx))\r\n mtrx = np.asarray(mtrx)\r\n\r\n if isinstance(mtrx, np.ndarray):\r\n ave = average(T)\r\n\r\n for col in T:\r\n if type(mtrx) == list:\r\n # If data isn't standardized\r\n ave += np.asarray(col)\r\n\r\n\r\n if len(mtrx[0]) > len(mtrx):\r\n for moreRows in range(len(mtrx[0]), len(mtrx)):\r\n mtrx[moreRows] = np.asarray(mtrx[moreRows])\r\n\r\n ave /= len(mtrx[0])\r\n\r\n\r\n phi = T - ave\r\n # Covariance matrix\r\n return np.dot(np.transpose(phi), phi)", "def compute_covariance_matrix(Xs, sigma_2):\n m, d = Xs.shape\n t1 = np.reshape(np.tile(Xs, m), (m, m, d))\n t2 = np.reshape(np.tile(Xs, (m, 1)), (m, m, d))\n K1 = np.linalg.norm(t1 - t2, axis=2)\n coeff = 0.1\n Sigma = np.ones((m, m)) - coeff*K1\n return Sigma", "def cov(xdata, ydata, mx=None, my=None):\n n, s = _SP(xdata, mx, ydata, my)\n if n > 1:\n return s/(n-1)\n else:\n raise ValueError('sample covariance requires at least two points')", "def cov(self):\n return self._cov", "def empirical_covariance(system, excitation, m):\n observations = [system() @ excitation() for _ in range(m)]\n return np.cov(np.array(observations).T)", "def getCovarianceMatrix(self):\n #ypost = np.dot ( self.getA().T, self.priorX )\n\n theta = np.mat ( self.getA() )\n Xm = np.mat ( self.priorX )\n\n ypost = Xm * theta\n yprior = self.priorY\n error = ypost - yprior\n #error = error - np.mean ( error, axis = 0 )\n return np.dot ( error.T, error )", "def mpi_cov(data):\n m = mpi_mean(data)\n data_centered = data - m\n cov_local = dot(data_centered.T, data_centered)\n covmat = np.empty_like(cov_local)\n mpi.COMM.Allreduce(cov_local, covmat)\n num_data = mpi.COMM.allreduce(data.shape[0])\n covmat /= float(num_data)\n return covmat", "def sys_cov(sys_err,sys_der,cov_mat=None):\n\n if cov_mat is not None:\n lcm = len(cov_mat)\n N = lcm + len(sys_err)\n cx = np.zeros((N,N))\n # plug in matrix at the leading rows and cols\n cx[0:lcm,0:lcm] = cov_mat\n \n # cast as numpy arrays\n sys_err = np.array(sys_err)\n sys_der = np.array(sys_der)\n # square the error for the variance and diagonalize\n sys_err2 = np.diag(np.power(sys_err,2))\n # determine the final input covariance matrix\n if cov_mat is None:\n cx = sys_err2\n else:\n cx[lcm:,lcm:] = sys_err2\n # sandwich rule, matrix algebra to get outgoing covariance\n # (@ symbol is matrix dot product)\n cy = sys_der.T@cx@sys_der\n \n # return NxN matrix (N = len(derivatives))\n return cy", "def gen_mixture():\n npr.seed(0)\n num_exp = int(1e4)\n x_dim = 2\n z_dim = 2\n mu1 = [5, 5,]\n mu2 = [-5, -5]\n theta = np.array([[2,1],[-1,-2]])\n sigma = 0.1\n u = npr.uniform((num_exp,))\n z = np.zeros((num_exp, z_dim))\n cov = np.zeros((z_dim, z_dim))\n np.fill_diagonal(cov, 1)\n sz = int(num_exp/2)\n z[:sz, ]= npr.multivariate_normal(mu1, cov,sz)\n z[sz:, ] = npr.multivariate_normal(mu2,cov,sz)\n mu_x = [email protected]()\n\n x = np.zeros((num_exp, x_dim))\n for i in range(num_exp):\n x[i,:] = npr.multivariate_normal(mu_x[:,i], sigma*cov)\n print(x.shape)\n np.save('data/syn_mixture.npy', x)", "def test_cov_q(self, ndlys=13):\n for d in self.d:\n d.flag_array[:] = False #ensure that there are no flags!\n d.select(times=np.unique(d.time_array)[:10], frequencies=d.freq_array[:16])\n for d_std in self.d_std:\n d_std.flag_array[:] = False\n d_std.select(times=np.unique(d_std.time_array)[:10], frequencies=d_std.freq_array[:16])\n self.ds = pspecdata.PSpecData(dsets=self.d, wgts=self.w, dsets_std=self.d_std)\n self.ds = pspecdata.PSpecData(dsets=self.d, wgts=self.w, dsets_std=self.d_std)\n Ntime = self.ds.Ntimes\n self.ds.set_Ndlys(ndlys)\n # Here is the analytic covariance matrix...\n chan_x, chan_y = np.meshgrid(range(self.ds.Nfreqs), range(self.ds.Nfreqs))\n cov_analytic = np.zeros((self.ds.spw_Ndlys, self.ds.spw_Ndlys), dtype=np.complex128)\n for alpha in range(self.ds.spw_Ndlys):\n for beta in range(self.ds.spw_Ndlys):\n cov_analytic[alpha, beta] = np.exp(-2j*np.pi*(alpha-beta)*(chan_x-chan_y)/self.ds.spw_Ndlys).sum()\n key1 = (0, 24, 38)\n key2 = (1, 25, 38)\n #print(cov_analytic)\n\n for input_data_weight in ['identity','iC', 'dayenu']:\n self.ds.set_weighting(input_data_weight)\n #check error raised\n if input_data_weight == 'dayenu':\n pytest.raises(ValueError,self.ds.R, key1)\n rpk = {'filter_centers':[0.],'filter_half_widths':[0.],'filter_factors':[0.]}\n self.ds.set_r_param(key1,rpk)\n self.ds.set_r_param(key2,rpk)\n for taper in taper_selection:\n qc = self.ds.cov_q_hat(key1,key2,model='dsets')\n self.assertTrue(np.allclose(np.array(list(qc.shape)),\n np.array([self.ds.Ntimes, self.ds.spw_Ndlys, self.ds.spw_Ndlys]), atol=1e-6))\n qc = self.ds.cov_q_hat(key1,key2,model='empirical')\n self.assertTrue(np.allclose(np.array(list(qc.shape)),\n np.array([self.ds.Ntimes, self.ds.spw_Ndlys, self.ds.spw_Ndlys]), atol=1e-6))\n\n \"\"\"\n Now test that analytic Error calculation gives Nchan^2\n \"\"\"\n self.ds.set_weighting('identity')\n qc = self.ds.cov_q_hat(key1, key2, model='dsets')\n self.assertTrue(np.allclose(qc,\n np.repeat(cov_analytic[np.newaxis, :, :], self.ds.Ntimes, axis=0), atol=1e-6))\n \"\"\"\n Test lists of keys\n \"\"\"\n self.ds.set_weighting('identity')\n qc=self.ds.cov_q_hat([key1], [key2], time_indices=[0], model='dsets')\n self.assertTrue(np.allclose(qc,\n np.repeat(cov_analytic[np.newaxis, :, :], self.ds.Ntimes, axis=0), atol=1e-6))\n self.assertRaises(ValueError, self.ds.cov_q_hat, key1, key2, time_indices=200)\n self.assertRaises(ValueError, self.ds.cov_q_hat, key1, key2, time_indices=\"watch out!\")", "def generate_covariance(ts, sigma0, sigma, tau):\n\n ndim = ts.shape[0]\n\n tis = np.tile(np.reshape(ts, (-1, 1)), (1, ndim))\n tjs = np.tile(ts, (ndim, 1))\n\n return sigma0*sigma0*np.eye(ndim) + sigma*sigma/(2.0*tau)*np.exp(-np.abs(tis-tjs)/tau)", "def calc_cov(self, array_x, array_y):\n cov = np.empty([len(array_x),len(array_x)], dtype = float) # initialize an empty 16*16 matrix (16 pressure levels)\n for x in range(len(array_x)):\n for y in range(len(array_y)):\n entry = array_x[x] * array_y[y]\n cov[x,y] = entry\n return cov", "def calcCovarianceMatrix(data):\n # Create covariance matrix and array to store the mean values for x_mean, y_mean, z_mean\n C = np.zeros((data.shape[1], data.shape[1]))\n mean_xyz = []\n # Calculate all mean values\n for i in range(0, data.shape[1]):\n mean_xyz.append(data[:,i].mean())\n mean_xyz = np.array(mean_xyz)\n # Check whether dimensions agree \n if data[:,0].size != data[:,1].size or data[:,0].size != data[:,2].size:\n print \"X, Y and Z must be of same dimensions.\"\n else:\n # For each row in covariance matrix C\n for i in range(0, C.shape[0]):\n # For each column in covariance matrix C\n for j in range(0, C.shape[1]):\n C[i,j] = 0\n # For each point in the dataset, access x, y, z-values\n for point in data:\n # For each point, access x,y and z in all combinations (xx, xy, xz, yx, yy, yz etc)\n C[i][j] = C[i][j] + (point[i]-mean_xyz[i])*(point[j]-mean_xyz[j])\n # Divide by the total number of points \n C = (1.0/data.shape[0]) * C\n return C", "def cov(m, y=None, rowvar=1, bias=0):\n\n X = array(m, ndmin=2, dtype=float)\n if X.shape[0] == 1:\n rowvar = 1\n if rowvar:\n axis = 0\n tup = (slice(None),newaxis)\n else:\n axis = 1\n tup = (newaxis, slice(None))\n\n\n if y is not None:\n y = array(y, copy=False, ndmin=2, dtype=float)\n X = concatenate((X,y),axis)\n\n X -= X.mean(axis=1-axis)[tup]\n if rowvar:\n N = X.shape[1]\n else:\n N = X.shape[0]\n\n if bias:\n fact = N*1.0\n else:\n fact = N-1.0\n\n if not rowvar:\n return (dot(X.T, X.conj()) / fact).squeeze()\n else:\n return (dot(X, X.T.conj()) / fact).squeeze()", "def cov(self):\n return self.cond_proba.cov", "def get_cov(self, npar=None, **args):\n return get_par(self, dummy='cov_mat', npar=npar, **args)", "def information_matrix(self):\n return self._cov.inv()", "def process_noise_cov(self, dt=0.0):\n if dt not in self._Q_cache:\n # q: continuous-time process noise intensity with units\n # length^2/time (m^2/s). Choose ``q`` so that changes in position,\n # over a sampling period ``dt``, are roughly ``sqrt(q*dt)``.\n q = self.sv2 * dt\n Q = q * dt * eye_like(self.sv2, self._dimension)\n self._Q_cache[dt] = Q\n\n return self._Q_cache[dt]", "def pca_noise(V, eig, std_dev=0.1, normalize=True):\n lambdas = eig / np.linalg.norm(eig) if normalize else eig\n\n k = len(lambdas)\n noise = np.random.normal(scale=std_dev, size=k)\n\n ret = (lambdas * noise).reshape((k, 1, 1)) * V\n return ret.sum(axis=0)", "def print_covariance(P):\n def b(string):\n \"\"\"\n Turns a given string blue.\n\n \"\"\"\n return \"\\033[94m{0}\\033[0m\".format(string)\n\n out = \" \"\n # Print out header with state variables\n for var in STATE_VARS:\n out += \"{0:9s} \".format(var)\n\n # Print out correlation / covariance matrix \n for row in range(18):\n out += \"\\n{0:3s} \".format(STATE_VARS[row])\n for col in range(18):\n # Print correlations on lower diagnal\n if col < row:\n out += \"{0: 2.2f}, \".format(float(P[row,col]/(sqrt(P[row,row]) * sqrt(P[col,col]) )))\n # Highlight variances in blue\n elif row == col:\n out += \"{0: 2.2e}, \".format(float(P[row,col]))\n else:\n out += \"{0: 2.2e}, \".format(float(P[row,col]))\n \n out += \"\\n\"\n\n print out", "def covariance(x, mean_x, y, mean_y):\r\n \r\n covar = 0.0\r\n for i in range(len(x)):\r\n covar += (x[i] - mean_x) * (y[i] - mean_y)\r\n return covar", "def get_traj_cov(self):\n return np.dot(self._Phi.T, np.dot(self._sigma_W, self._Phi))", "def build_covariance(self):\n raise RuntimeError(\"Internal cosmosis error in SingleValueGaussianLikelihood\")", "def ut(Xi, W, noise_cov=0):\n n, kmax = Xi.shape\n\n xm = np.zeros((n, 1))\n for k in range(kmax):\n xm += W[k] * Xi[:, k, None]\n\n xcov = np.zeros((n, n))\n for k in range(kmax):\n xcov += W[k] * (Xi[:, k, None] - xm) * (Xi[:, k, None] - xm).T\n\n xcov += noise_cov\n return xm, xcov", "def filter_covariance(cov_matrix, uvd=None, return_diag_as_uvdata=True, **array_kwargs):\n raise NotImplementedError(\"This function has not yet been written.\")", "def get_cov(self):\n\n if self._cov is not None:\n return self._cov\n\n names = ['ra', 'dec', 'parallax', 'pmra', 'pmdec']\n\n C = np.zeros((6,6))\n\n # pre-load the diagonal\n for i,name in enumerate(names):\n full_name = \"{}_error\".format(name)\n C[i,i] = self._data[full_name]**2\n\n for i,name1 in enumerate(names):\n for j,name2 in enumerate(names):\n if j <= i:\n continue\n full_name = \"{}_{}_corr\".format(name1, name2)\n C[i,j] = self._data[full_name] * np.sqrt(C[i,i]*C[j,j])\n C[j,i] = self._data[full_name] * np.sqrt(C[i,i]*C[j,j])\n\n if self._rv_err is not None:\n C[5,5] = self._rv_err**2\n\n self._cov = C\n return self._cov", "def numerical_covariance(self, params={}, nrealisations=200, nthreads=1):\n\n if nrealisations < 2:\n raise ValueError(\"nrealisations must be more than one\")\n\n # We use a hack where we define an external function which *passed*\n # this object just so that we can do multiprocessing on it.\n fnc = partial(_produce_mock, self, params)\n\n pool = MyPool(nthreads)\n \n power = pool.map(fnc, np.arange(int(nrealisations/2)))\n power2 = pool.map(fnc, np.arange(int(nrealisations/2)))\n power.extend(power2)\n \n # Note, this covariance *already* has thermal noise built in.\n cov = []\n mean = []\n \n for ii in range(self.n_obs):\n mean.append(np.mean(np.array(power)[:,ii,:,:], axis=0))\n\n if self.ps_dim == 2:\n cov.append([np.cov(x) for x in np.array(power)[:,ii,:,:].transpose((1, 2, 0))])\n else:\n cov = np.var(np.array(power)[:,ii,:,:], axis=0)\n\n #Cleanup the memory\n for i in range(len(power)-1,-1,-1):\n del power[i] \n \n pool.close()\n pool.join()\n\n return mean, cov", "def GSM_Denoise(Y, gsm_model, noise_std):\n X = np.empty(Y.shape)\n k = gsm_model.mix.shape[0]\n I = np.identity(gsm_model.cov[0, :].shape[0])\n for i in range(k):\n mvn = multivariate_normal(cov=(gsm_model.cov[i, :] + ((noise_std**2) * I)))\n upper_arg = gsm_model.mix[i] * (mvn.logpdf(Y[:, i]))\n lower_arg = 0\n for j in range(k):\n inner_mvn = multivariate_normal(cov=(gsm_model.cov[j] + ((noise_std**2) * I)))\n lower_arg += gsm_model.mix[j] * (inner_mvn.logpdf(Y[:, i]))\n c_i = upper_arg / lower_arg\n weiner_i = calc_weiner_filter(Y, np.zeros(Y.shape[0]), gsm_model.cov[i, :], noise_std)\n X += c_i * weiner_i\n return X", "def cov(self, decomposed=False):\n if decomposed:\n return self._R, self._S\n else:\n return np.copy(self._C)", "def set_noise_cov(self, noise_cov):\n self.check_nside(noise_cov)\n self.noise_cov = sc.NoiseCov(noise_cov, self.mask.mask)\n try:\n self.set_delta()\n except:\n pass", "def generate_positive_semi_definite_matrix(n_dim):\n cov = np.random.randn(n_dim, n_dim)\n return np.dot(cov, cov.T)", "def compute_measurement_covariance(jacobian, oldCovariance, sigmaObservation): \n\n return None", "def covfunc(self, theta, d): \n sigmaf, l = theta[:2]\n xxl = np.sum((d/l)**2, axis=1)\n covariance = sigmaf**2 * np.exp(-xxl/2.)\n return covariance", "def covariance_matrix(self,x,y,names=None,cov=None):\n if not isinstance(x,np.ndarray):\n x = np.array(x)\n if not isinstance(y,np.ndarray):\n y = np.array(y)\n assert x.shape[0] == y.shape[0]\n\n if names is not None:\n assert x.shape[0] == len(names)\n c = np.zeros((len(names),len(names)))\n np.fill_diagonal(c,self.contribution)\n cov = Cov(x=c,names=names)\n elif cov is not None:\n assert cov.shape[0] == x.shape[0]\n names = cov.row_names\n c = np.zeros((len(names),1)) + self.contribution\n cont = Cov(x=c,names=names,isdiagonal=True)\n cov += cont\n\n else:\n raise Exception(\"Vario2d.covariance_matrix() requires either\" +\n \"names or cov arg\")\n rc = self.rotation_coefs\n for i1,(n1,x1,y1) in enumerate(zip(names,x,y)):\n dx = x1 - x[i1+1:]\n dy = y1 - y[i1+1:]\n dxx,dyy = self._apply_rotation(dx,dy)\n h = np.sqrt(dxx*dxx + dyy*dyy)\n\n h[h<0.0] = 0.0\n h = self._h_function(h)\n if np.any(np.isnan(h)):\n raise Exception(\"nans in h for i1 {0}\".format(i1))\n cov.x[i1,i1+1:] += h\n for i in range(len(names)):\n cov.x[i+1:,i] = cov.x[i,i+1:]\n return cov", "def cc_cov(r, **kwargs):\r\n sample_corr = r.corr()\r\n n_assets = len(r.columns)\r\n avg_distinct_rho = (sample_corr.values.sum() - n_assets) / (\r\n n_assets * (n_assets - 1)) # Taking avg of off diagonal corr matrix on one side\r\n const_corr = np.full_like(sample_corr, avg_distinct_rho)\r\n np.fill_diagonal(const_corr, 1.)\r\n sd = r.std()\r\n # Convert to cov using statsmodel\r\n const_cov_sm = mh.corr2cov(const_corr, sd)\r\n # Convert to cov using formula and outer product - alternate way is to use sd @ sd.T instead of np.outer(sd, sd) -> yields matrix(mxm)\r\n const_cov = const_corr * np.outer(sd, sd)\r\n return pd.DataFrame(const_cov, columns=r.columns, index=r.columns)", "def empirical_covariance_multiple(system, excitation, ms):\n observations = [system() @ excitation() for _ in range(max(ms))]\n\n Cs = [np.cov(np.array(observations[0:m]).T) for m in ms]\n return Cs", "def cone(df, mu, N, alphacov=2.0):\r\n return alphacov / ((N + 1.3)**2 + mu)", "def define_noise(self, ctx, model):\n # Only save the mean/cov if we have foregrounds, and they don't update every iteration (otherwise, get them\n # every iter).\n if self.foreground_cores and not any([fg._updating for fg in self.foreground_cores]):\n if not self.use_analytical_noise:\n mean, covariance = self.numerical_covariance(\n nrealisations=self.nrealisations, nthreads=self._nthreads\n )\n else:\n # Still getting mean numerically for now...\n mean = self.numerical_covariance(nrealisations=self.nrealisations, nthreads=self._nthreads)[0]\n\n covariance = self.analytical_covariance(self.u, self.eta,\n np.median(self.frequencies),\n self.frequencies.max() - self.frequencies.min())\n\n thermal_covariance = self.get_thermal_covariance()\n covariance = [x + y for x, y in zip(covariance, thermal_covariance)]\n\n else:\n # Only need thermal variance if we don't have foregrounds, otherwise it will be embedded in the\n # above foreground covariance... BUT NOT IF THE FOREGROUND COVARIANCE IS ANALYTIC!!\n # covariance = self.get_thermal_covariance()\n # mean = np.repeat(self.noise_power_expectation, len(self.eta)).reshape((len(self.u), len(self.eta)))\n mean = 0\n covariance = 0\n\n return [{\"mean\": mean, \"covariance\": covariance}]", "def get_covariances(songs=songs_df, method='corrcoef', save=False, out_as='png'):\n # Make a matrix out of the rankings.\n songs_mat = np.matrix(songs['ranks'].tolist())\n\n if method == 'corrcoef':\n covariance_matrix = np.corrcoef(songs_mat)\n elif method == 'cov':\n covariance_matrix = np.cov(songs_mat)\n else:\n return \"Please choose 'cov' or 'corrcoeff' for method.\"\n\n mask = np.zeros_like(covariance_matrix)\n mask[np.triu_indices_from(mask)] = True\n sns.heatmap(covariance_matrix, mask=mask, cmap='RdBu_r')\n\n prefix = 'Normalized' if method == 'corrcoef' else ''\n plt.title(prefix + ' Covariance Matrix for Song Rankings Through Time',\n weight='bold')\n plt.xticks(np.arange(0, 560, 25))\n plt.yticks(np.arange(0, 560, 25))\n plt.xlabel('Song Number', weight='bold')\n plt.ylabel('Song Number', weight='bold')\n if save:\n if out_as == 'pdf':\n plt.savefig(fig_path + method + '_matrix.pdf')\n elif out_as == 'png':\n plt.savefig(fig_path + method + '_matrix.png', dpi=300)\n else:\n print \"Not saving; please choose 'png' or 'pdf' for 'out_as'\"\n plt.show()\n return covariance_matrix", "def solutionCovariance(self):\n return self.standardError2()*self.AtAinv", "def sigma(self):\n with ops.name_scope(self.name):\n return self._cov.to_dense()", "def _inverse_covariance(spectral_array):\n hsi_img = spectral_array.array_data\n\n n_lines, n_samples, n_band = hsi_img.shape\n n_pixels = n_lines * n_samples\n hsi_data = np.reshape(hsi_img, (n_pixels, n_band), order='F').T\n inverse_covariance = np.linalg.pinv(np.cov(hsi_data.T, rowvar=False))\n\n return inverse_covariance", "def cov_matrix(gx, gy, winsize, alpha):\n\n gx = edge_mirror(gx, winsize)\n gy = edge_mirror(gy, winsize)\n radius_filter = gen_gaussian(winsize)\n radius_filter = numpy.rot90(radius_filter, 2)\n\n lenth = sum(sum(radius_filter))\n\n gx = signal.convolve2d(gx, radius_filter, mode='valid')\n gy = signal.convolve2d(gy, radius_filter, mode='valid')\n\n c11 = numpy.multiply(gx, gx)\n c22 = numpy.multiply(gy, gy)\n c12 = numpy.multiply(gx, gy)\n\n\n # SVD closed form\n lambda1 = (c11 + c22 + numpy.sqrt((c11 - c22)**2 + 4*c12**2)) / 2\n lambda2 = (c11 + c22 - numpy.sqrt((c11 - c22)**2 + 4*c12**2)) / 2\n numer = c11 + c12 - lambda1\n denom = c22 + c12 - lambda2\n\n ev1 = numpy.zeros_like(numer)\n ev2 = numpy.zeros_like(ev1)\n\n rows, cols = numer.shape\n for r in range(rows):\n for c in range(cols):\n if abs(denom[r, c]) < _opzero:\n if abs(numer[r, c]) < _opzero:\n if abs(denom[r, c]) > abs(numer[r, c]):\n ev1[r, c] = 0\n ev2[r, c] = 1\n else:\n ev1[r, c] = 1\n ev2[r, c] = 0\n else:\n ev1[r, c] = 1\n ev2[r, c] = 0\n else:\n theta = math.atan(-numer[r, c]/denom[r, c])\n ev1 = math.sin(theta)\n ev2 = math.cos(theta)\n\n sv1 = math.sqrt(abs(lambda1[r, c]))\n sv2 = math.sqrt(abs(lambda2[r, c]))\n p = ((sv1 * sv2 + _epsa) / lenth)**alpha\n s1 = (sv1 + 1) / (sv2 + 1)\n s2 = 1. / s1\n c11[r, c] = p * (s1 * ev2 ** 2 + s2 * ev1 ** 2)\n c22[r, c] = p * (s1 * ev1 ** 2 + s2 * ev2 ** 2)\n c12[r, c] = p * (s1 - s2) * ev1 * ev2\n\n c11 = edge_mirror(c11, winsize)\n c12 = edge_mirror(c12, winsize)\n c22 = edge_mirror(c22, winsize)\n\n return c11, c12, c22", "def se(self):\n return np.sqrt(self.scaled_vcov().diagonal().T)", "def acov (x,y, dimension=None,keepdims=0):\r\n if dimension == None:\r\n x = N.ravel(x)\r\n y = N.ravel(y)\r\n dimension = 0\r\n xmn = amean(x,dimension,1) # keepdims\r\n xdeviations = x - xmn\r\n ymn = amean(y,dimension,1) # keepdims\r\n ydeviations = y - ymn\r\n if type(dimension) == ListType:\r\n n = 1\r\n for d in dimension:\r\n n = n*x.shape[d]\r\n else:\r\n n = x.shape[dimension]\r\n covar = N.sum(xdeviations*ydeviations)/float(n-1)\r\n return covar", "def measure_noise(R, N=10):\n\n m = np.zeros(R.shape[0])\n\n e = np.random.multivariate_normal(m, R.real, size=N)\\\n + 1.0j * np.random.multivariate_normal(m, cov=R.real, size=N)\n\n return e.T", "def get_cov_mat(samples_dir):\n if not os.path.isdir(samples_dir):\n print(\"Samples directory not found: \", samples_dir)\n exit()\n\n player_inputs = np.array((load_matrix_from_raw_samples(samples_dir, scaling_factor=100),)).T\n n_players = player_inputs.shape[1]\n aggr_cov_mat = np.zeros((n_players, n_players), dtype=\"complex128\")\n for n_avg in range(player_inputs.shape[0]):\n aggr_cov_mat += player_inputs[n_avg] @ player_inputs[n_avg].conjugate().T\n\n cov_mat = aggr_cov_mat / player_inputs.shape[0]\n return cov_mat", "def generate_noise_vector(self, ):\n self.noise.resize_(\n self.batch_size, int(self.opt.nz), 1, 1).normal_(0, 1)\n self.noisev = Variable(self.noise) # TODO: Add volatile=True???", "def pcov(xdata, ydata, mx=None, my=None):\n n, s = _SP(xdata, mx, ydata, my)\n if n > 0:\n return s/n\n else:\n raise ValueError('population covariance requires at least one point')", "def covariance(self):\n return self._covariance", "def cpca_cov(sigma2, d, old=False):\n\n n_ = sigma2.shape[0]\n k_ = d.shape[0]\n i_n = np.eye(n_)\n lambda2_d = np.empty((n_, 1))\n e_d = np.empty((n_, n_))\n\n # Step 0. initialize constraints\n m_ = n_ - k_\n a_n = np.copy(d)\n\n for n in range(n_):\n # Step 1. orthogonal projection matrix\n p_n = [email protected](a_n@a_n.T)@a_n\n\n # Step 2. conditional dispersion matrix\n s2_n = p_n @ sigma2 @ p_n\n\n # Step 3. conditional principal directions/variances\n e_d[:, [n]], lambda2_d[n] = pca_cov(s2_n, 1)\n\n # Step 4. Update augmented constraints matrix\n if n+1 <= m_-1:\n a_n = np.concatenate((a_n.T, sigma2 @ e_d[:, [n]]), axis=1).T\n elif m_ <= n+1 <= n_-1:\n a_n = (sigma2 @ e_d[:, :n+1]).T\n\n return e_d, lambda2_d.squeeze()", "def covariance(self,pt0,pt1):\n #raise Exception()\n cov = self.nugget\n for vario in self.variograms:\n cov += vario.covariance(pt0,pt1)\n return cov", "def extract_inverse_covariance(self, block):\n return np.linalg.inv(self.cov)", "def plotCov(mu, C, axis):\n xx1 = [mu[0,0]-0.2,mu[0,0]+0.2]\n xx2 = [mu[1,0]-0.2,mu[1,0]+0.2]\n X1,X2 = meshgrid(linspace(xx1[0],xx1[1],50), linspace(xx2[0],xx2[1],50))\n f = zeros(shape(X1))\n Cinv = linalg.inv(C)\n CinvDet = linalg.det(Cinv)\n if CinvDet > 10**15:\n print \"The following covariance could not be plotted\"\n print \"Prec:\",Cinv\n print \"Det: \",CinvDet\n return\n\n for yi in range(shape(X1)[0]):\n for xi in range(shape(X1)[1]):\n x = mat([X1[yi,xi],X2[yi,xi]]).T - mu\n f[yi,xi] = x.T * Cinv * x\n\n axis.contour(X1,X2,f,[2.3,4.61,6.18,9.21,11.8])", "def _getCovMat(self, cov_expr):\n # store the expression\n self.expr = cov_expr\n # create a PETSC matrix for cov_mat\n cov_mat = PETSc.Mat().create()\n cov_mat.setType('aij')\n cov_mat.setSizes(self.domain.getNodes(), self.domain.getNodes())\n cov_mat.setUp()\n\n # scalar valued function is evaluated in this variable\n cov_ij = np.empty((1), dtype=float)\n # the points to evalute the expression\n xycor = np.empty((4), dtype=float)\n\n print '---------------------------'\n print '---------------------------'\n print ' Building Covariance Matrix'\n print '---------------------------'\n print '---------------------------'\n # Loop through global nodes and build the matrix for i < j because of\n # symmetric nature.\n for node_i in range(0, self.domain.getNodes()):\n # global node node_i\n for node_j in range(node_i, self.domain.getNodes()):\n # global node node_j\n temp_cov_ij = 0\n for elem_i in self.node_to_elem[node_i]:\n # elem_i : element attached to node_i\n # x1 : x co-ordinate of the centroid of element elem_i\n x1 = self.c_centroid_array[elem_i].x()\n # y1 : x co-ordinate of the centroid of element elem_i\n y1 = self.c_centroid_array[elem_i].y()\n for elem_j in self.node_to_elem[node_j]:\n # elem_j : element attached to node_j\n # x2 : x co-ordinate for the centroid of element elem_j\n x2 = self.c_centroid_array[elem_j].x()\n # y2 : y co-ordinate for the centroid of element elem_j\n y2 = self.c_centroid_array[elem_j].y()\n xycor[0] = x1\n xycor[1] = x2\n xycor[2] = y1\n xycor[3] = y2\n # evaluate the expression\n cov_expr.eval(cov_ij, xycor)\n if cov_ij[0] > 0:\n temp_cov_ij += (1.0 / 3) * (1.0 / 3) * \\\n cov_ij[0] * \\\n self.c_volume_array[elem_i] * \\\n self.c_volume_array[elem_j]\n\n cov_mat.setValue(node_i, node_j, temp_cov_ij)\n cov_mat.setValue(node_j, node_i, temp_cov_ij)\n cov_mat.assemblyBegin()\n cov_mat.assemblyEnd()\n print '---------------------------'\n print '---------------------------'\n print ' Finished Covariance Matrix'\n print '---------------------------'\n print '---------------------------'\n\n return cov_mat", "def covariance(x, y):\n x_mean = mean(x)\n y_mean = mean(y)\n diff_x_mean = (x_i - x_mean for x_i in x)\n diff_y_mean = (y_i - y_mean for y_i in y)\n sum_xy_diff = sum(a * b for a, b in zip(diff_x_mean, diff_y_mean))\n return sum_xy_diff / (len(x) - 1)", "def __init__(self, N0, N1):\n #self.w = np.zeros(N);\n self.p0 = N0/(N0+N1) \n self.p1 = N1/(N0+N1)\n self.mu0 = np.zeros(N0+N1)\n self.mu1 = np.zeros(N0+N1)\n self.covariance = 0", "def extract_covariance_log_determinant(self, block):\n sign, log_inv_det = np.linalg.slogdet(self.inv_cov)\n log_det = -log_inv_det\n return log_det", "def calculate_covariance_matrix(X, Y=None):\n if Y is None:\n Y = X\n n_samples = np.shape(X)[0]\n covariance_matrix = (1 / (n_samples - 1)) * (\n X - X.mean(axis=0)).T.dot(Y - Y.mean(axis=0))\n\n return np.array(covariance_matrix, dtype=float)", "def covariance(self, x1, x2, lengths):\n z = self.dist(x1, x2, lengths)\n return (self.a**2) * exp(-0.5*z)", "def covariance2d(self, P_x, P_y, P_z, P_x_dot, P_y_dot, P_z_dot, P_x_ddot, P_y_ddot, P_z_ddot):\n cov_matrix = numpy.array([[P_x, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, P_y, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, P_z, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, P_x_dot, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, P_y_dot, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, P_z_dot, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, P_x_ddot, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, P_y_ddot, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, P_z_ddot]])\n return cov_matrix", "def calculate_covariance_matrix(X, Y=None):\n\tif Y is None:\n\t\tY = X\n\tn_samples = np.shape(X)[0]\n\tcovariance_matrix = (1 / (n_samples-1)) * (X - X.mean(axis=0)).T.dot(Y - Y.mean(axis=0))\n\treturn np.array(covariance_matrix, dtype=float)" ]
[ "0.71300656", "0.66976005", "0.6629202", "0.6629202", "0.6629202", "0.6622315", "0.6622108", "0.6622073", "0.6570352", "0.6554128", "0.6520113", "0.64823085", "0.64064324", "0.6393191", "0.6317923", "0.62303", "0.6217225", "0.6212507", "0.6195888", "0.6166094", "0.612643", "0.60941535", "0.6054734", "0.6047353", "0.5995319", "0.59950817", "0.5979224", "0.59743917", "0.59668756", "0.59310156", "0.5928348", "0.5921733", "0.59132147", "0.59080064", "0.59000355", "0.5890488", "0.588879", "0.58739686", "0.5869594", "0.58659095", "0.58641165", "0.5862943", "0.5857791", "0.58446014", "0.5841578", "0.58315563", "0.5828035", "0.5822101", "0.58169794", "0.5814429", "0.57957065", "0.578992", "0.5777518", "0.5763232", "0.5755046", "0.57546955", "0.5748693", "0.5746825", "0.57458997", "0.57204676", "0.571575", "0.5707356", "0.56950843", "0.56884366", "0.5677969", "0.56529015", "0.56461203", "0.56390303", "0.5627033", "0.56269264", "0.5620035", "0.56062925", "0.5604235", "0.5591319", "0.55740446", "0.5568545", "0.5562989", "0.55596924", "0.55543715", "0.5551506", "0.5542651", "0.553364", "0.5530393", "0.55285114", "0.55121195", "0.5503519", "0.5502529", "0.5468719", "0.5467673", "0.5454924", "0.54517174", "0.543947", "0.5438371", "0.5429831", "0.54280853", "0.5426671", "0.54259443", "0.54175365", "0.5414176", "0.5411588" ]
0.7025741
1
This will check whether the entires are pairwise close enough (within tol)
def CompareMatrices(mat1, mat2, tol): # just going to assume they are the same size... for i in range(len(mat1)): for j in range(len(mat1)): if abs(mat1[i][j] - mat2[i][j]) > tol: return False return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_symmetric(adj : np.ndarray, tol : float = 1e-8) -> bool:\n return np.all(np.abs(adj-adj.T) < tol)", "def check_restraint_pairs_for_doubles(list): # Also consider that a1 and a2 can be switches\r\n for i in range(len(list) - 1):\r\n for j in range(i + 1, len(list)):\r\n if (list[i].r1 == list[j].r1 and list[i].r2 == list[j].r2) or (\r\n list[i].r1 == list[j].r2 and list[i].r2 == list[j].r1) or list[i].distance == list[j].distance:\r\n return True\r\n return False", "def are_symmetrically_related(self, point_a, point_b, tol=0.001):\n if np.allclose(self.operate(point_a), point_b, atol=tol):\n return True\n if np.allclose(self.operate(point_b), point_a, atol=tol):\n return True\n return False", "def arecloseenough(x1, x2):\n\n if abs(x1 - x2) <= VERYSMALL:\n return True\n \n return False", "def comparison_test():\n for pose in SE2.interesting_points():\n se2 = se2_from_SE2(pose)\n SE2a = SE2_from_se2_slow(se2)\n SE2b = SE2_from_se2(se2)\n # printm('pose', pose, 'se2', se2)\n # printm('SE2a', SE2a, 'SE2b', SE2b)\n SE2.assert_close(SE2a, pose)\n # print('SE2a = pose Their distance is %f' % d)\n SE2.assert_close(SE2b, pose)\n # print('SE2b = pose Their distance is %f' % d)\n assert_allclose(SE2a, SE2b, atol=1e-8, err_msg=\"SE2a != SE2b\")\n assert_allclose(SE2a, pose, atol=1e-8, err_msg=\"SE2a != pose\")\n assert_allclose(SE2b, pose, atol=1e-8, err_msg=\"SE2b != pose\")", "def all_near(self, mtrx, tol=10e-8):\n if not isinstance(mtrx, Matrix):\n return False\n if not (self.m == mtrx.m and self.n == mtrx.n):\n return False\n for i in range(self.m):\n for j in range(self.n):\n if abs(self[i, j] - mtrx[i, j]) > tol:\n return False\n return True", "def test_epipolar(dxy_0, ep_vec, dxy, tol):\n delta=np.abs(np.dot((dxy-dxy_0), [ep_vec[1], -ep_vec[0]]))\n disp_mag=np.sqrt((dxy[:,0]-dxy_0[0])**2 +(dxy[:,1]-dxy_0[1])**2)\n good=(delta < tol) | (delta < 0.02 * disp_mag )\n return good, delta", "def test_distances(self):\n for p1, p2, distance in DISTANCES:\n calculated = p1.approximate_distance_meters(p2)\n self.assertAlmostEqual(distance, calculated, delta=5)", "def test__compute_tolerance_distance():\n classifier = classifier_module.Classifier(None)\n L1 = [11.2, 41.43, 1.33]\n L2 = [10.9, 41.45, 1.34]\n L3 = [12.0, 41.4412, 1.001]\n L4 = [11.3, 41.15, 1.12]\n L5 = [11.223, 41.0, 1.31]\n AL = [L1, L2, L3, L4, L5]\n symbol = \"a\"\n classifier._compute_tolerance_distance(AL, symbol)\n tolerance_distance_path = \\\n classifier_module.Classifier._get_file_path( \\\n classifier.files[classifier_module.DISTANCE_TOLERANCE_FILE], symbol)\n file_with_tolerance_distance = \\\n open(tolerance_distance_path, 'r')\n tolerance_distance = float(file_with_tolerance_distance.readline())\n file_with_tolerance_distance.close()\n assert fabs(tolerance_distance - 0.5506099238118276) < epsilon", "def within_tolerance(a_vec, b_vec, tol_vec):\n\tfor a, b, tol in zip(a_vec, b_vec, tol_vec):\n\t\tif abs(a - b) > tol:\n\t\t\treturn False\n\treturn True", "def Q1_test():\n A, p1, p2 = [0,0], [2,4], [6,5]\n return (distance(A,p1) > 4.472135) and (distance(p1,p2) < 4.472136)", "def _isproperdist(X):\n X = np.asarray(X)\n if not np.allclose(np.sum(X), 1) or not np.all(X>=0) or not np.all(X<=1):\n return False\n else:\n return True", "def test_perform_pairwise_tests_too_few_obs(self):\r\n exp = [['foo', 'bar', nan, nan, nan, nan, nan], ['foo', 'baz',\r\n -\r\n 7.794228634059948, 0.008032650971672552, 0.016065301943345104,\r\n nan, nan], ['bar', 'baz',\r\n -\r\n 2.598076211353316, 0.060844967173160069, 0.12168993434632014,\r\n nan, nan]]\r\n obs = _perform_pairwise_tests(self.labels3, self.dists3, 'low', 0)\r\n self.compare_multiple_level_array(obs, exp)\r\n\r\n\r\n exp = [['foo', 'bar', nan, nan, nan, nan, nan]]\r\n obs = _perform_pairwise_tests(['foo', 'bar'], [[], [1, 2, 4]], 'high',\r\n 20)\r\n self.compare_multiple_level_array(obs, exp)", "def close(a,b):\n return abs(a-b) < epsilon", "def test_inter_score_distances(self):\n buff_score = self.pdb.get_interaction_energy(ff=self.ff)\n ff_co = self.ff.distance_cutoff\n for (at_a, at_b), _ in buff_score.inter_scores:\n self.assertTrue(isambard.geometry.distance(at_a, at_b) <= ff_co)", "def testSymmetry(self):\n for profile1 in self.profiles:\n for profile2 in self.profiles:\n self.assertEqual(profile1.edit_distance(profile2), profile2.edit_distance(profile1))", "def testTriangleInequality(self):\n for profile1 in self.profiles:\n for profile2 in self.profiles:\n for profile3 in self.profiles:\n self.assertTrue(profile1.edit_distance(profile3) <= profile1.edit_distance(profile2) + profile2.edit_distance(profile3))", "def test_distance(self):\n for emb_vals, point, dist_gt in self.DISTANCE_EXAMPLES:\n print(emb_vals, point, dist_gt)\n emb = to_emb(emb_vals)\n dist = emb.distance(point)\n assert np.allclose(dist, dist_gt), \\\n (\"Wrong distance for point {}: expected {} but was {};\"\n \"\\nembedding:\\n{}\").format(point, dist_gt, dist, str(emb))", "def equalWithinTolerance(a, b, tol):\n return abs(a - b) <= tol", "def testCheckPairsHaveCorrectOrder(self):\n add_geo = pd.DataFrame({\n 'date':\n pd.to_datetime(\n ['2019-01-01', '2019-10-01', '2019-01-01', '2019-10-01']),\n 'geo': [5, 5, 6, 6],\n 'response': [4.45, 20, 4.55, 20],\n 'spend': [10, 10, 10, 10]\n })\n new_data = pd.concat([self.test_data, add_geo], sort=False)\n test_class = TrimmedMatchGeoXDesign(\n GeoXType.HEAVY_UP,\n pretest_data=new_data,\n time_window_for_design=self.design_window,\n time_window_for_eval=self.evaluation_window,\n matching_metrics={'response': 1.0})\n test_class.create_geo_pairs(use_cross_validation=True)\n test_class.create_geo_level_eval_data()\n test_class.geo_level_eval_data[0].sort_values(by='geo', inplace=True)\n test_class.geo_level_eval_data[0].reset_index(drop=True, inplace=True)\n pairs = test_class.pairs[0].round({'distance': 5})\n self.assertTrue(\n test_class.geo_level_eval_data[0].sort_index(axis=1).equals(\n pd.DataFrame({\n 'geo': [1, 2, 3, 4, 5, 6],\n 'pair': [3, 1, 3, 1, 2, 2],\n 'response': [2.0, 5.0, 2.0, 4.0, 20.0, 20.0],\n 'spend': [1.5, 2.5, 1.5, 6, 10, 10]\n })))\n self.assertTrue(\n pairs.equals(\n pd.DataFrame({\n 'geo1': [4, 6, 1],\n 'geo2': [2, 5, 3],\n 'distance': [1/16, 0.1/16, 0.0],\n 'pair': [1, 2, 3]\n })))", "def equals(a, b, tol=1e-10):\n return np.abs(a-b) <= tol", "def test_distances(self):\n\n cent_1 = np.array([0.5, 0.5])\n verts_1 = np.array([[0., 1.], [0., 0.], [1., 0.], [1., 1.]])\n cent_2 = cent_1 - 0.5\n verts_2 = verts_1 - np.array([0.5, 0.5])\n\n # Compare the center-vertex distances between point sets with rigidly shifted coordinates\n self.assertTrue(all(po.cvdist(verts_1, cent_1) == po.cvdist(verts_2, cent_2)))\n # Compare the vertex-vertex distances between point sets with rigidly shifted coordinates\n self.assertTrue(all(po.vvdist(verts_1) == po.vvdist(verts_2)))", "def _allclose(x, y, rtol=1e-7, atol=1e-14):\n for a, b in zip(x, y):\n if np.abs(a - b) > (atol + rtol * np.abs(b)):\n return False\n return True", "def isparallel(p1, p2, tol=10*_eps):\n \n return np.linalg.norm(np.cross(p1.w, p2.w) ) < tol", "def point_isclose(a, b, *args, **kwargs):\n for x, y in zip(a, b):\n if not isclose(x, y, *args, **kwargs):\n return False\n return True", "def isclose ( a , b , rel_tol = 1.e-9 , abs_tol = 0.0 ) :\n return abs ( a - b ) <= max ( rel_tol * max ( abs ( a ) , abs ( b ) ) , abs_tol )", "def has_converged(x, y, epsilon=EPSILON):\n for a, b in itertools.izip(x, y):\n if abs(a - b) > epsilon:\n return False\n return True", "def has_converged(x, y, epsilon=EPSILON):\n for a, b in itertools.izip(x, y):\n if abs(a - b) > epsilon:\n return False\n return True", "def test_get_adjacent_distances(self):\r\n dm_str = [\"\\ts1\\ts2\\ts3\", \"s1\\t0\\t2\\t4\", \"s2\\t2\\t0\\t3.2\",\r\n \"s3\\t4\\t3.2\\t0\"]\r\n dm_header, dm = parse_distmat(dm_str)\r\n # error cases: fewer than 2 valid sample ids\r\n self.assertRaises(ValueError,\r\n get_adjacent_distances, dm_header, dm,\r\n [])\r\n self.assertRaises(ValueError,\r\n get_adjacent_distances, dm_header, dm,\r\n ['s1'])\r\n self.assertRaises(ValueError,\r\n get_adjacent_distances, dm_header, dm,\r\n ['s0', 's1'])\r\n self.assertRaises(ValueError,\r\n get_adjacent_distances, dm_header, dm,\r\n ['s1', 's4'])\r\n\r\n # one pair of valid distances\r\n self.assertEqual(get_adjacent_distances(dm_header, dm, ['s1', 's2']),\r\n ([2], [('s1', 's2')]))\r\n self.assertEqual(get_adjacent_distances(dm_header, dm, ['s1', 's1']),\r\n ([0], [('s1', 's1')]))\r\n self.assertEqual(get_adjacent_distances(dm_header, dm, ['s1', 's3']),\r\n ([4], [('s1', 's3')]))\r\n self.assertEqual(get_adjacent_distances(dm_header, dm, ['s2', 's3']),\r\n ([3.2], [('s2', 's3')]))\r\n\r\n # multiple valid distances\r\n self.assertEqual(get_adjacent_distances(dm_header,\r\n dm,\r\n ['s1', 's2', 's3']),\r\n ([2, 3.2], [('s1', 's2'), ('s2', 's3')]))\r\n self.assertEqual(get_adjacent_distances(dm_header,\r\n dm,\r\n ['s1', 's3', 's2', 's1']),\r\n ([4, 3.2, 2], [('s1', 's3'), ('s3', 's2'), ('s2', 's1')]))\r\n\r\n # mixed valid and invalid distances ignores invalid distances\r\n self.assertEqual(get_adjacent_distances(dm_header,\r\n dm,\r\n ['s1', 's3', 's4', 's5', 's6', 's2', 's1']),\r\n ([4, 3.2, 2], [('s1', 's3'), ('s3', 's2'), ('s2', 's1')]))\r\n # strict=True results in missing sample ids raising an error\r\n self.assertRaises(ValueError, get_adjacent_distances,\r\n dm_header,\r\n dm,\r\n ['s1',\r\n 's3',\r\n 's4',\r\n 's5',\r\n 's6',\r\n 's2',\r\n 's1'],\r\n strict=True)", "def __eq__(self, candidate):\n return np.linalg.norm(self.components()\n -\n candidate.components()) < 1.e-7", "def test_t_paired_specific_difference(self):\r\n x, y = self.x, self.y\r\n # difference is 0.2, so test should be non-significant if 0.2 passed\r\n self.failIf(t_paired(y, x, exp_diff=0.2)[0] > 1e-10)\r\n # same, except that reversing list order reverses sign of difference\r\n self.failIf(t_paired(x, y, exp_diff=-0.2)[0] > 1e-10)\r\n # check that there's no significant difference from the true mean\r\n self.assertFloatEqual(\r\n t_paired(y, x, exp_diff=0.2)[1], 1, 1e-4)", "def test_EstimateDistances(self):\n d = EstimateDistances(self.al, JC69())\n d.run()\n canned_result = {('b', 'e'): 0.440840,\n ('c', 'e'): 0.440840,\n ('a', 'c'): 0.088337,\n ('a', 'b'): 0.188486,\n ('a', 'e'): 0.440840,\n ('b', 'c'): 0.0883373}\n result = d.getPairwiseDistances()\n self.assertDistsAlmostEqual(canned_result, result)\n \n # excercise writing to file\n d.writeToFile('junk.txt')\n try:\n os.remove('junk.txt')\n except OSError:\n pass # probably parallel", "def test_tanimoto_distance(get_distributions):\n for i, dist_a in enumerate(get_distributions):\n for j, dist_b in enumerate(get_distributions):\n tanimototo = tanimoto_distance(dist_a, dist_b)\n if i == j:\n assert pytest.approx(tanimototo, 0.0001) == 1\n else:\n assert tanimototo < 1", "def all_close(goal, actual, tolerance):\n #all_equal = True\n if type(goal) is list:\n for index in range(len(goal)):\n if abs(actual[index] - goal[index]) > tolerance:\n return False\n\n elif type(goal) is geometry_msgs.msg.PoseStamped:\n return all_close(goal.pose, actual.pose, tolerance)\n\n elif type(goal) is geometry_msgs.msg.Pose:\n return all_close(pose_to_list(goal), pose_to_list(actual), tolerance)\n\n return True", "def test_dist_itslef(self):\n X = [[0, 10], [4, 2]] # Just some points. I've no idea where on globe.\n c = cdist(X[0], X[1])\n string_geopy = '{}'.format(great_circle(X[0], X[1]))\n float_geopy = float(string_geopy[:-3])\n self.assertTrue(np.round(c) == np.round(float_geopy))\n\n X = [[34.0522, 118.2437], # Lon Angeles\n [37.7749, 122.4194]] # San Francisco\n c = cdist(X[0], X[1])\n string_geopy = '{}'.format(great_circle(X[0], X[1]))\n float_geopy = float(string_geopy[:-3])\n self.assertTrue(np.round(c) == np.round(float_geopy))", "def close_enough(poly, target_poly, eps=0.01):\n if type(target_poly) != type(poly):\n target_poly = list(target_poly)\n if len(poly) != len(target_poly):\n raise Exception(\"Incorrect number of terms\")\n for term, target_term in zip(poly, target_poly):\n if abs(term - target_term) > eps:\n print(\"At least one of the terms is not close enough\")\n return False\n return True", "def are_similar(first_coords: List[Tuple[int, int]], second_coords: List[Tuple[int, int]]) -> bool:\n # Step 1: Get angles of each triangle\n # Step 2: Compare grades of two triangles\n # Step 3: If two angles are equal then first triangle is similar to second triangle\n pass", "def all_close(goal, actual, tolerance):\n all_equal = True\n if type(goal) is list:\n for index in range(len(goal)):\n if abs(actual[index] - goal[index]) > tolerance:\n return False\n\n elif type(goal) is geometry_msgs.msg.PoseStamped:\n return all_close(goal.pose, actual.pose, tolerance)\n\n elif type(goal) is geometry_msgs.msg.Pose:\n return all_close(pose_to_list(goal), pose_to_list(actual), tolerance)\n\n return True", "def all_close(goal, actual, tolerance):\n all_equal = True\n if type(goal) is list:\n for index in range(len(goal)):\n if abs(actual[index] - goal[index]) > tolerance:\n return False\n\n elif type(goal) is geometry_msgs.msg.PoseStamped:\n return all_close(goal.pose, actual.pose, tolerance)\n\n elif type(goal) is geometry_msgs.msg.Pose:\n return all_close(pose_to_list(goal), pose_to_list(actual), tolerance)\n\n return True", "def all_close(goal, actual, tolerance):\n all_equal = True\n if type(goal) is list:\n for index in range(len(goal)):\n if abs(actual[index] - goal[index]) > tolerance:\n return False\n\n elif type(goal) is geometry_msgs.msg.PoseStamped:\n return all_close(goal.pose, actual.pose, tolerance)\n\n elif type(goal) is geometry_msgs.msg.Pose:\n return all_close(pose_to_list(goal), pose_to_list(actual), tolerance)\n\n return True", "def _allequal(x, rtol=1e-7, atol=1e-14):\n if len(x) == 1:\n return True\n\n for a in x[1:]:\n if np.abs(a - x[0]) > (atol + rtol * np.abs(a)):\n return False\n return True", "def hasPseudoknots(self):\n pairs = self.directed()\n seen = [] # list of pairs against which you compare each time\n pairs.sort()\n for pair in pairs:\n if not seen:\n seen.append(pair)\n else:\n lastseen_up, lastseen_down = seen[-1]\n while pair[0] > lastseen_down:\n seen.pop()\n if not seen:\n break\n else:\n lastseen_up,lastseen_down = seen[-1]\n if not seen:\n seen.append(pair)\n continue\n if pair[1]>lastseen_down:\n #pseudoknot found\n return True\n else:\n #good pair\n seen.append(pair)\n return False", "def get_possible_tw(self):\n ev = self.ev\n f = np.array([np.abs(a - b) for a in ev for b in ev if not np.isclose(a, b)])\n return f[~(np.triu(np.abs(f[:, None] - f) <= settings.EQ_COMPARE_TOL, 1)).any(0)]", "def approx_eq(x, y, tolerance = 0.000001):\n\treturn abs(x - y) < tolerance", "def all_close(goal, actual, tolerance):\n all_equal = True\n if type(goal) is list:\n for index in range(len(goal)):\n if abs(actual[index] - goal[index]) > tolerance:\n return False\n elif type(goal) is geometry_msgs.msg.PoseStamped:\n return all_close(goal.pose, actual.pose, tolerance)\n elif type(goal) is geometry_msgs.msg.Pose:\n return all_close(pose_to_list(goal), pose_to_list(actual), tolerance)\n return True", "def are_close(coord1, coord2, tolerance=10):\n return vincenty(coord1, coord2).meters < tolerance", "def checarPs(self,p1,p2):\n return abs(p1-p2) < 0.00001", "def is_close(coord_a: tuple, coord_b: tuple, mindist: float):\n return distance(coord_a, coord_b) <= mindist", "def _prediction_match(self, thermo, ref_values, eps=0.05):\n singlet_array = self._get_singlet_array(thermo)\n for cur_array, ref_array in zip(singlet_array, ref_values):\n for cur_val, ref_val in zip(cur_array, ref_array):\n if abs(cur_val - ref_val) > eps:\n return False\n return True", "def checkTupleAlmostEqualIn(tup, tupList, place):\n for T in tupList:\n length = len(tup)\n if length != len(T):\n continue\n for i in range(length):\n if type(tup[i]) is float:\n if round(tup[i], place) != round(T[i], place):\n break\n else:\n if tup[i] != T[i]:\n break\n if i == length - 1:\n return True\n return False", "def check_norms(test_norms, gold_norms, atol, rtol):\n if len(test_norms) != len(gold_norms):\n print(\"Number of timesteps do not match\", flush=True)\n return (False, 1.0e16, 1.0e16)\n\n test_pass = True\n abs_diff = 0.0\n rel_diff = 0.0\n\n for t1, t2 in zip(test_norms, gold_norms):\n adiff = abs(t1 - t2)\n rdiff = abs(t1 / t2 - 1.0)\n\n abs_diff = max(abs_diff, adiff)\n rel_diff = max(rel_diff, rdiff)\n\n if (adiff > atol) and (rdiff > rtol):\n test_pass = False\n\n return (test_pass, abs_diff, rel_diff)", "def links_with(self, other, tollerance = 0.05):\n return (\n self.start.distance_to(other.start) < tollerance or\n self.start.distance_to(other.end) < tollerance or\n self.end.distance_to(other.end) < tollerance or\n self.end.distance_to(other.start) < tollerance\n )", "def test_for_convergence(self, error_tol):\n list_of_best_indvs = []\n for island in self._islands:\n best_indv = island.best_individual()\n list_of_best_indvs.append(best_indv)\n list_of_best_indvs.sort(key=lambda x: x.fitness)\n\n best_indv = list_of_best_indvs[0]\n converged = best_indv.fitness <= error_tol\n\n self._best_indv = best_indv\n self._converged = converged\n return converged", "def dist(self, one, two):\n return sum((one[0] != two[0], one[1] != two[1]))", "def is_almost_equal(self, x ,y ,epsilon=1*10**(-8)):\n \treturn abs(x-y) <= epsilon", "def approx_eq(x, y, tolerance=1e-15):\n return abs(x - y) < tolerance", "def Q2_test():\n A, p1, p2, p3 = [0,0], [2,4], [6,5], [5,1]\n pts = [p1,p2,p3]\n dist = distances(pts, A)\n tab_dist = [[0, 4.123105625617661, 4.242640687119285, 4.47213595499958], [4.123105625617661, 0, 4.123105625617661, 7.810249675906654], [4.242640687119285, 4.123105625617661, 0, 5.0990195135927845], [4.47213595499958, 7.810249675906654, 5.0990195135927845, 0]]\n return np.allclose(np.array(tab_dist),np.array(dist))", "def validate_points(a, b):\r\n\tdiff_y = b[0] - a[0]\r\n\tdiff_x = b[1] - a[1]\r\n\r\n\treturn (diff_y == 0 and diff_x != 0) or (diff_x == 0 and diff_y != 0) or abs(diff_x) == abs(diff_y)", "def checkValidOneMove(ndSoln):\n for i in range(len(ndSoln)-1):\n x1 = ndSoln[i][0]\n y1 = ndSoln[i][1]\n x2 = ndSoln[i+1][0]\n y2 = ndSoln[i+1][1]\n #take Euclidean distance between two consecutive moves\n #which should be approx. 1.0 if the move is valid\n if not(math.isclose(hypot(x2-x1,y2-y1),1.0)):\n return False\n return True", "def ase_cells_are_similar(ase_a, ase_b, thr=2):\n comp_similar = []\n for cell_a, cell_b in zip(ase_a.cell.flat, ase_b.cell.flat):\n comp_similar.append(round(cell_a, thr) == round(cell_b, thr))\n return all(comp_similar)", "def identical_to(self, elem):\n \n return (self.n == elem.n) and (math.fabs(self.dx - elem.dx) < 0.001) and (math.fabs(self.dy - elem.dy) < 0.001) and (math.fabs(self.dz - elem.dz) < 0.001)", "def assert_compare(x, y, atol=1e-5, method='ALL'):\n mae = 0\n mse = 0\n rmse = 0\n result = 0\n if method == 'MAE':\n mae = np.abs(x-y).mean()\n result = mae\n elif method == 'RMSE':\n rmse = np.sqrt(np.square(x - y).mean())\n result = rmse\n #result=np.sqrt(((x - y) ** 2).mean())\n elif method == 'MSE':\n mse = np.square(x - y).mean()\n result = mse\n #result=((x - y) ** 2).mean()\n else:\n mae = np.abs(x-y).mean()\n rmse = np.sqrt(np.square(x - y).mean())\n mse = np.square(x - y).mean()\n\n if result > atol or (method == 'ALL' and (mae > atol or rmse > atol or mse > atol)):\n f = six.StringIO()\n f.write(\n 'assert_compare failed: \\n' +\n ' atol: {} \\n'.format(atol) +\n ' method: {}\\n'.format(method) +\n ' MAE: {}\\n'.format(mae) +\n ' MSE: {}\\n'.format(mse) +\n ' RMSE: {}\\n'.format(rmse) +\n ' shape: {} {}\\n'.format(x.shape, y.shape) +\n ' dtype: {} {}\\n'.format(x.dtype, y.dtype))\n if x.shape == y.shape:\n xx = x if x.ndim != 0 else x.reshape((1,))\n yy = y if y.ndim != 0 else y.reshape((1,))\n err = np.abs(xx - yy)\n i = np.unravel_index(np.argmax(err), err.shape)\n f.write(\n ' i: {}\\n'.format(i) +\n ' x[i]: {}\\n'.format(xx[i]) +\n ' y[i]: {}\\n'.format(yy[i]) +\n ' err[i]: {}\\n'.format(err[i]))\n opts = np.get_printoptions()\n try:\n np.set_printoptions(threshold=10000)\n f.write('x: ' + np.array2string(x, prefix='x: ') + '\\n')\n f.write('y: ' + np.array2string(y, prefix='y: ') + '\\n')\n finally:\n np.set_printoptions(**opts)\n logging.warning(f.getvalue())\n return False\n else:\n return True", "def permissible(e1, e2):\n return e1[1] == e2[0] and \\\n total_edge_length(e1, e2) < maximum_distance and \\\n total_edge_angle(e1, e2) < maximum_angle_delta", "def brute_force(savedPnts, unitRadius, point):\n for pnt in savedPnts:\n d = distance(pnt, point)\n if d < unitRadius: return False\n return True", "def almost_eq(e1,e2) :\n\treturn round(e1-e2,4) == 0.0", "def test_ilsr_pairwise():\n for case in iter_testcases('pairwise'):\n n_items = case[\"n_items\"]\n data = case[\"data\"]\n assert np.allclose(\n case[\"ml_est\"], ilsr_pairwise(n_items, data),\n atol=ATOL, rtol=RTOL)", "def test_EstimateDistances_fromUnaligned(self):\n d = EstimateDistances(self.collection, JC69(), do_pair_align=True,\n rigorous_align=True)\n d.run()\n canned_result = {('b', 'e'): 0.440840,\n ('c', 'e'): 0.440840,\n ('a', 'c'): 0.088337,\n ('a', 'b'): 0.188486,\n ('a', 'e'): 0.440840,\n ('b', 'c'): 0.0883373}\n result = d.getPairwiseDistances()\n self.assertDistsAlmostEqual(canned_result, result)\n \n d = EstimateDistances(self.collection, JC69(), do_pair_align=True,\n rigorous_align=False)\n d.run()\n canned_result = {('b', 'e'): 0.440840,\n ('c', 'e'): 0.440840,\n ('a', 'c'): 0.088337,\n ('a', 'b'): 0.188486,\n ('a', 'e'): 0.440840,\n ('b', 'c'): 0.0883373}\n result = d.getPairwiseDistances()\n self.assertDistsAlmostEqual(canned_result, result)", "def is_stable(p1, p2, p3, tol=0.001):\n p = Point(0, 0, 0)\n u = vector_from_to(p1, p2)\n v = vector_from_to(p1, p3)\n n = cross(u, v)\n w = vector_from_to(p1, p)\n n2 = dot(n, n)\n beta = dot(cross(u, w), n) / n2\n gamma = dot(cross(w, v), n) / n2\n alpha = 1 - gamma - beta\n # then coordinate of the projected point (p_) of point p\n # p_ = alpha * p1 + beta * p2 + gamma * p3\n min_val = -tol\n max_val = 1 + tol\n cond1 = min_val <= alpha <= max_val\n cond2 = min_val <= beta <= max_val\n cond3 = min_val <= gamma <= max_val\n return cond1 and cond2 and cond3", "def Q4_test():\n chemin = [3,2,1,0]\n tab_dist = [[0, 4.123105625617661, 4.242640687119285, 4.47213595499958], [4.123105625617661, 0, 4.123105625617661, 7.810249675906654], [4.242640687119285, 4.123105625617661, 0, 5.0990195135927845], [4.47213595499958, 7.810249675906654, 5.0990195135927845, 0]]\n d = longueur(chemin, tab_dist)\n return (d > 13.34523076) and (d < 13.34523077)", "def is_perfect_square():", "def is_close(x, y, thresh=1e-8):\n\n diff = x - y\n return diff > (-thresh) and diff < thresh", "def test_perform_pairwise_tests_single_comp(self):\r\n # Verified with R's t.test function.\r\n exp = [['foo', 'bar', -6.5999999999999996, 0.0070804795641244006,\r\n 0.0070804795641244006, 0.100000000001, 0.10000000000001]]\r\n np.random.seed(self.value_for_seed)\r\n obs = _perform_pairwise_tests(self.labels1, self.dists1, 'two-sided',\r\n 999)\r\n self.compare_multiple_level_array(obs, exp)", "def test_nearest_location_adjacent():\n locations = [(1, 3), (3, 5)]\n\n assert nearest_location(locations, 2) == 0\n assert nearest_location(locations, 3) == 1", "def _check_approx_fixed_point(V_current, V_previous, tol):\n\n # Compute the sup norm between `V_current` and `V_previous`\n sup_norm = np.max(np.abs(V_current - V_previous))\n\n # Algorithm termination condition\n fp = sup_norm <= tol\n\n return fp, sup_norm", "def test_stations_by_distance():\n station_list = build_station_list()\n #test for stations closest to cambridge city coordinates\n station_list_sort = stations_by_distance(station_list, (52.2053, 0.1218))\n output = [(station.name, distance) for (station, distance) in station_list_sort]\n for n in range(1, len(station_list)):\n #make sure that the distance of the previous station to the point is less than the next one in the list\n assert output[n-1][1] <= output[n][1]", "def _raise_assert_on_np_is_close_all(self, np0, np1):\r\n\r\n return self.assertTrue(np.isclose(np0, np1).all())", "def _check_normalization(self):\n lastDistance = None\n distance = None\n for idx in xrange(len(self) - 1):\n distance = self[idx+1][0] - self[idx][0]\n\n # first run\n if lastDistance is None:\n lastDistance = distance\n continue\n\n if lastDistance != distance:\n return False\n\n lastDistance = distance\n\n return True", "def curvesSimilar(t1, y1, t2, y2, tol):\n # Make synchornized version of t2,y2 called t2sync,y2sync.\n t2sync=[]\n y2sync=[]\n for timepoint1 in t1:\n (index, timepoint2)=getNearestTime(timepoint1, t2sync)\n t2sync.append(timepoint2)\n y2sync.append(y2[index])\n\n # Get R^2 value equivalent:\n normalizedError=[(y1[x]-y2sync[x])**2/y1[x]**2 for x in range(len(y1))]/len(y1)\n\n if normalizedError > tol:\n return False\n else: \n return True", "def findUncertainPairs(field_distances, data_model, bias=0.5):\n\n probability = core.scorePairs(field_distances, data_model)\n\n p_max = (1.0 - bias)\n logger.info(p_max)\n\n informativity = numpy.copy(probability)\n informativity[probability < p_max] /= p_max\n informativity[probability >= p_max] = (1 - probability[probability >= p_max])/(1-p_max)\n\n\n return numpy.argsort(-informativity)", "def test_ppt_distinguishability_werner_hiding_pairs():\n dim = 2\n sigma_0 = (np.kron(np.identity(dim), np.identity(dim)) + swap_operator(dim)) / (dim * (dim + 1))\n sigma_1 = (np.kron(np.identity(dim), np.identity(dim)) - swap_operator(dim)) / (dim * (dim - 1))\n\n states = [sigma_0, sigma_1]\n\n expected_val = 1 / 2 + 1 / (dim + 1)\n\n primal_res = ppt_distinguishability(states, probs=None, dist_method=\"min-error\", strategy=True)\n dual_res = ppt_distinguishability(states, probs=None, dist_method=\"min-error\", strategy=False)\n\n np.testing.assert_equal(np.isclose(primal_res, expected_val, atol=0.001), True)\n np.testing.assert_equal(np.isclose(dual_res, expected_val, atol=0.001), True)\n\n primal_res = ppt_distinguishability(\n states, probs=None, dist_method=\"unambiguous\", strategy=True\n )\n dual_res = ppt_distinguishability(states, probs=None, dist_method=\"unambiguous\", strategy=False)\n\n np.testing.assert_equal(np.isclose(primal_res, 1 / 3, atol=0.001), True)\n np.testing.assert_equal(np.isclose(dual_res, 1 / 3, atol=0.001), True)", "def _similar_dist(geo, geoi, arg=3e-1):\n return almost_equal_dist_matrix(geo, geoi, thresh=arg)", "def topology_error(self, test_vectors):\n if not isinstance(test_vectors, collections.Iterable):\n test_vectors = [test_vectors]\n\n def are_bmus_neighbours(test_vector):\n bmu = self.bmu(test_vector)\n nodes_wo_bmu = (node for node in self.codebook if node is not bmu)\n bmu2 = min(nodes_wo_bmu, key=lambda x: x.distance_sq(test_vector))\n return self.codebook.are_neighbours(bmu, bmu2)\n\n return (sum(not(are_bmus_neighbours(vec)) for vec in test_vectors) /\n len(test_vectors))", "def test_EstimateDistances_fromThreeway(self):\n d = EstimateDistances(self.al, JC69(), threeway=True)\n d.run()\n canned_result = {('b', 'e'): 0.495312,\n ('c', 'e'): 0.479380,\n ('a', 'c'): 0.089934,\n ('a', 'b'): 0.190021,\n ('a', 'e'): 0.495305,\n ('b', 'c'): 0.0899339}\n result = d.getPairwiseDistances(summary_function=\"mean\")\n self.assertDistsAlmostEqual(canned_result, result)", "def almost_equals(self, other):\n import math\n ox, oy = other\n dx = self[0] - ox\n dy = self[1] - oy\n return (dx*dx + dy*dy) < pygonal.EPSILON2", "def test_nearest_location_even():\n assert nearest_location([(3, 6), (8, 13)], 6, 0) == 0\n assert nearest_location([(3, 6), (8, 13)], 6, 1) == 0\n assert nearest_location([(3, 6), (8, 13)], 7, 0) == 1\n assert nearest_location([(3, 6), (8, 13)], 7, 1) == 1", "def testOddNumberOfGeos(self):\n add_geo = pd.DataFrame({\n 'date':\n pd.to_datetime(\n ['2019-01-01', '2019-10-01']),\n 'geo': [5, 5],\n 'response': [10, 20],\n 'spend': [1, 1.5]\n })\n\n pretest_data = pd.concat([self.test_data, add_geo], sort=False)\n test_class = TrimmedMatchGeoXDesign(\n GeoXType.HEAVY_UP,\n pretest_data=pretest_data,\n time_window_for_design=self.design_window,\n time_window_for_eval=self.evaluation_window,\n matching_metrics={'response': 1.0})\n test_class.create_geo_pairs(use_cross_validation=False)\n test_class.create_geo_level_eval_data()\n expected_geo_data = [\n pd.DataFrame({\n 'geo': [1, 3, 2, 4],\n 'pair': [1, 1, 2, 2],\n 'response': [2, 2, 5, 4],\n 'spend': [1.5, 1.5, 2.5, 6]\n }),\n pd.DataFrame({\n 'geo': [2, 4],\n 'pair': [2, 2],\n 'response': [5, 4],\n 'spend': [2.5, 6]\n })\n ]\n for ind in range(len(expected_geo_data)):\n self.assertTrue(test_class.geo_level_eval_data[ind].sort_index(\n axis=1).equals(expected_geo_data[ind]))\n expected_pairs = [\n pd.DataFrame({\n 'geo1': [1, 2],\n 'geo2': [3, 4],\n 'distance': [0.0, 0.0],\n 'pair': [1, 2]\n }),\n pd.DataFrame({\n 'geo1': [2],\n 'geo2': [4],\n 'distance': [0.0],\n 'pair': [2]\n })\n ]\n for ind in range(len(expected_pairs)):\n self.assertTrue(test_class.pairs[ind].equals(expected_pairs[ind]))", "def test_euclidean_distance(self):\n knn = Knn(n_neighbors=3)\n knn.fit(np.array(little_X), little_Y)\n d = knn._euclidean_distance(np.array([5, 6]))\n assert (d == [5,5]).all(), \"Euclidean Distance is not correct\"", "def all_equal(x, y, eps=None):\n if eps:\n return all([abs(i - j) <= eps\n for i, j in zip(x, y)\n if i is not None and j is not None])\n return all([i == j for i, j in zip(x, y)])", "def test_positivity(alpha, dists, divergence):\n for dist1, dist2 in combinations(dists, 2):\n assert divergence(dist1, dist2, alpha) > 0", "def almost_equal_values(x, y, precision):\n return round(x - y, precision) == 0", "def check_location_confidence(self):\n\t\t## not the best way of doing things, but since the number of targets is fairly small its not a big deal\n\t\tepsilon_pixels = .05 * self.horizontal_resolution #arbitrary confidence factor\n\t\tepsilon_meters = .08\n\t\tpixel_distances = []\n\t\tactual_distances = []\n\t\tnum_observed = 0\n\t\tfor ti in self.targs:\n\t\t\tif ti.props_are_set:\n\t\t\t\tfor tj in self.targs:\n\t\t\t\t\tif tj.props_are_set: \n\t\t\t\t\t\tpixel_dist = np.linalg.norm(tj.position_camera - ti.position_camera)\n\t\t\t\t\t\tactual_dist = np.abs(tj.d_cam_image - ti.d_cam_image)\n\t\t\t\t\t\tif pixel_dist == 0:\n\t\t\t\t\t\t\tpixel_dist = 10000 #ignore two of the same points\n\t\t\t\t\t\t\tactual_dist = 10000\n\t\t\t\t\t\tpixel_distances.append(pixel_dist)\t\n\t\t\t\t\t\tactual_distances.append(actual_dist)\n\t\t\t\t\telse:\n\t\t\t\t\t\tpixel_distances.append(10000)\n\t\t\t\t\t\tactual_distances.append(10000)\n\t\t\telse:\n\t\t\t\tfor _ in self.targs:\n\t\t\t\t\tpixel_distances.append(10000)\n\t\t\t\t\tactual_distances.append(10000)\n\t\tmin_ind_pixel = np.argmin(pixel_distances)\n\t\tmin_ind_actual = np.argmin(actual_distances)\n\t\t#min_ind is encoded in base (num_targets); decode it to find the closest two points\n\t\tbest_guys = [self.targs[min_ind_pixel/len(self.targs)],self.targs[min_ind_pixel%len(self.targs)]]\n\t\tif pixel_distances[min_ind_pixel] > epsilon_pixels or actual_distances[min_ind_actual] > epsilon_meters:\n\t\t\t#measurements are not trustworthy, return nothing\n\t\t\treturn None\n\n\t\treturn best_guys", "def torch_the_same(X, Y, eps=1e-8):\n return (X - Y).abs().min() < eps", "def test_attometers_validate_list(self):\n attometers = inches_to.attometers([1.0, 2.0, 3.0, 4.0])\n comparison = np.array([2.54e16, 2*2.54e16, 3*2.54e16, 4*2.54e16])\n\n try:\n for i in range(len(comparison)):\n self.assertTrue(math.isclose(attometers[i], comparison[i], rel_tol=self.accepted_error))\n print('{:.40s}{}'.format(sys._getframe().f_code.co_name + self.padding, self.passed))\n except AssertionError:\n print('{:.40s}{}'.format(sys._getframe().f_code.co_name + self.padding, self.failed))", "def check_pointing_pair(self):\n\n for index in range(self.board_size):\n squ = self.squares[index]\n nos = self.get_numbers([self.possibles[cell[0]][cell[1]] for cell in squ])\n\n for num in nos:\n s_row, s_col, found = self.same_row_col(num, squ)\n if s_row:\n row = found[0][0]\n for c in range(self.board_size):\n if (row, c) not in squ:\n if num in self.possibles[row][c]:\n self.possibles[row][c].remove(num)\n if s_col:\n col = found[0][1]\n for r in range(self.board_size):\n if (r, col) not in squ:\n if num in self.possibles[r][col]:\n self.possibles[r][col].remove(num)", "def _is_converged(self):\n if self._last_operating_point is None:\n return False\n\n # Tolerance for comparing operating points. If all states changes\n # within this tolerance in the Euclidean norm then we've converged.\n TOLERANCE = 1e-4\n for ii in range(self._horizon):\n last_x = self._last_operating_point[0][ii]\n current_x = self._current_operating_point[0][ii]\n\n if np.linalg.norm(last_x - current_x) > TOLERANCE:\n return False\n\n return True", "def contains(self, points, abs_tol=ABS_TOL):\n test = self.A.dot(points) - self.b[:, np.newaxis] < abs_tol\n return np.all(test, axis=0)", "def __eq__(self, line):\n \n return abs( 1 - np.dot(sm.unitvec(self.vec), sm.unitvec(line.vec))) < 10*_eps", "def all_equals(solution1, solution2, tol=0.001):\n tokens1 = solution1.split()\n tokens2 = solution2.split()\n\n for token1, token2 in zip(tokens1, tokens2):\n if not equals(token1, token2, tol=tol):\n print(token1, token2)\n return False\n\n return True", "def problem1(self, s):\n\n # Test with good inputs (4 points)\n x = np.array([1, 2])\n y = np.array([2, 2])\n points = self.numTest(euclidean_metric(x,y), s.euclidean_metric(x,y),\n \"\\n\\teuclidean_metric() failed.\")\n \n x = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])\n y = np.array([2, 6, 4, 8, 0, 2, 4, 7, 5, 11])\n points += self.numTest(euclidean_metric(x,y), s.euclidean_metric(x,y),\n \"\\n\\teuclidean_metric() failed.\")\n \n x = (np.random.random(100)-.5)*200\n y = (np.random.random(100)-.5)*200\n points += self.numTest(euclidean_metric(x,y), s.euclidean_metric(x,y),\n \"\\n\\teuclidean_metric() failed.\")*2\n \n # Test with bad inputs (1 point)\n x = np.array([1, 2])\n y = np.array([1, 2, 3])\n try:\n s.euclidean_metric(x, y)\n self.feedback += \"\\n\\teuclidean_metric() failed to raise a \"\n self.feedback += \"ValueError for vectors of different lengths\"\n except:\n points += 1\n\n return points", "def almost_equal(x, y):\n return abs(x-y) < FP_PREC", "def Q8_test():\n dispo = [False, True, True, False]\n tab_dist = [[0, 4.123105625617661, 4.242640687119285, 4.47213595499958], [4.123105625617661, 0, 4.123105625617661, 7.810249675906654], [4.242640687119285, 4.123105625617661, 0, 5.0990195135927845], [4.47213595499958, 7.810249675906654, 5.0990195135927845, 0]]\n return 1 == indice(0, tab_dist, dispo)" ]
[ "0.6715366", "0.65444267", "0.65051025", "0.64619166", "0.63805026", "0.6331604", "0.6313639", "0.6308023", "0.62470406", "0.61799294", "0.61504745", "0.6149808", "0.6135852", "0.6134906", "0.61283964", "0.6122861", "0.6102767", "0.6097179", "0.60936743", "0.60933644", "0.6086768", "0.6084359", "0.6080711", "0.6070554", "0.606459", "0.6062066", "0.60617214", "0.60617214", "0.60432976", "0.59977156", "0.599466", "0.59828186", "0.5969638", "0.5964046", "0.59568256", "0.59560895", "0.5939316", "0.59375906", "0.59375906", "0.59375906", "0.59352183", "0.59302425", "0.5927932", "0.59070647", "0.59033775", "0.58986", "0.5885489", "0.58745354", "0.58692914", "0.58692235", "0.5867615", "0.5857003", "0.58499503", "0.5848189", "0.5834997", "0.5831397", "0.5828356", "0.58243376", "0.58093715", "0.5803429", "0.5802601", "0.5797517", "0.5790618", "0.57806265", "0.5779529", "0.57772374", "0.57660466", "0.5757813", "0.57472646", "0.57448435", "0.57441545", "0.57399166", "0.57328755", "0.5731807", "0.5727554", "0.5726247", "0.57176185", "0.5717157", "0.57167166", "0.57097393", "0.5706283", "0.5705834", "0.5691668", "0.56886244", "0.5687911", "0.5687882", "0.5686446", "0.56794775", "0.5677767", "0.5670185", "0.56688994", "0.56654704", "0.56627375", "0.5661558", "0.56613433", "0.56506383", "0.56502634", "0.5646791", "0.5645936", "0.5639304", "0.5636823" ]
0.0
-1
Takes a list of tuples, and each element is compared to the next one Any tuple that changes has the index of it returned
def ContinuousCompare(lst, tol): changing_indices = set() last_tup = None # iterate over all the tuples for i in range(len(lst)): # if it's the first entry, we just want to assign it and move onto the # next iteration if i == 0: last_tup = lst[i] continue else: tup = lst[i] # remove the indices already found to change so we don't keep testing # them indices_left_to_check = set(range(len(tup))) - changing_indices for j in indices_left_to_check: if (tup[j] - last_tup[j]).magnitude > tol: # if it changes, add it to the list changing_indices.add(j) last_tup = tup return changing_indices
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def task9_find_before_tuple(lst):\n result = 0\n for elem in lst:\n if isinstance(elem, tuple):\n result = lst[lst.index(elem) - 1]\n break\n return result", "def innerloop(the_list):\n for index in range(len(the_list)-1):\n if the_list[index] > the_list[index+1]:\n smaller = the_list[index+1]\n bigger = the_list[index]\n the_list[index] = smaller\n the_list[index+1] = bigger\n\n return tuple(the_list)", "def onlyonechanged(pair):\r\n\r\n l1 = pair[0]\r\n l2 = pair[1]\r\n res = [ x != y for (x,y) in zip(l1, l2)]\r\n\r\n if sum(res)==1:\r\n ret_res=(sum(res)==1)\r\n ret_pos=[i for i, e in enumerate(res) if e != 0]\r\n else :\r\n ret_res=False\r\n ret_pos=[0]\r\n \r\n return ret_res, ret_pos[0]", "def check_for_tuple_matches(tuples, tuple):\r\n category = tuple[1]\r\n score = tuple[2]\r\n tup_ind = -1\r\n for t in xrange(0, len(tuples)):\r\n if tuples[t][1] == category and tuples[t][2] == score:\r\n tup_ind = t\r\n break\r\n\r\n if tup_ind == -1:\r\n tuples.append([0, category, score, [], []])\r\n tup_ind = len(tuples) - 1\r\n return tuples, tup_ind", "def add_unique_tuple_to_list(a_list, a_tuple):\n for i, test_tuple in enumerate(a_list):\n if test_tuple[:-1] == a_tuple[:-1]:\n a_list[i] = a_tuple\n break\n else:\n a_list.append(a_tuple)", "def index(l_: List[int], i: Tuple[int, ...]) -> Tuple[int, ...]:\n return tuple([l_[x] for x in i])", "def get_pairs(my_list):\n return [(current, my_list[idx + 1] if - 1 else None) for idx, current in enumerate(my_list) if idx < len(my_list) - 1]", "def apply(self):\n next_one = super().apply()\n next_both = set()\n\n for tup in next_one:\n if (tup[1], tup[0]) in next_one:\n next_both.add(tup)\n\n return list(next_both)", "def handle_pairs(truth, pred, first_ix):\n next_ix = first_ix\n while next_ix < len(truth) and truth[next_ix] == truth[first_ix]:\n next_ix += 1\n pairs = len(times_to_compare) * (next_ix - first_ix)\n correct = 0\n tied = 0\n for i in range(first_ix, next_ix):\n rank, count = times_to_compare.rank(pred[i])\n correct += rank\n tied += count\n\n return (pairs, correct, tied, next_ix)", "def is_next_to(index, alist):\n if not index == 0 and alist[index - 1] == alist[index]:\n return True\n elif not index >= len(alist) - 1 and alist[index + 1] == alist[index]:\n return True\n else:\n return False", "def pairwise(lst):\r\n if not lst: return\r\n\r\n for i in range(len(lst)-1):\r\n yield lst[i], lst[i+1]\r\n yield lst[-1], None", "def unpack_all_equal_tuple(t):\n if not isinstance(t, tuple):\n return t\n\n assert all(x == t[0] for x in t)\n return t[0]", "def palindromePairs(lst):\n results = []\n for i, e1 in enumerate(lst):\n for j, e2 in enumerate(lst):\n if i != j:\n if isPalindrome(e1+e2):\n results.append((i, j))\n return results", "def _handle_pairs(truth, pred, first_ix, times_to_compare):\n next_ix = first_ix\n while next_ix < len(truth) and truth[next_ix] == truth[first_ix]:\n next_ix += 1\n pairs = len(times_to_compare) * (next_ix - first_ix)\n correct = np.int64(0)\n tied = np.int64(0)\n for i in range(first_ix, next_ix):\n rank, count = times_to_compare.rank(pred[i])\n correct += rank\n tied += count\n\n return (pairs, correct, tied, next_ix)", "def mysort(lst: List[T], compare: Callable[[T, T], int]) -> List[T]:\n temp = lst\n switched = True\n while switched:\n switched = False\n for i in range(len(temp) - 1):\n if compare(temp[i], temp[i + 1]) == 1:\n temp[i], temp[i + 1] = temp[i + 1], temp[i]\n switched = True\n\n return temp", "def secondSmallest(lstItems):\n \n if len(lstItems) == 0:\n return -1;\n \n index = 0#current index in the list\n returnNext = False;#return the next occurence of a value equal to valSmallest\n valSmallest = lstItems[0]#just declaring and initializing valSmallest\n\n for val in lstItems:\n if returnNext == True and val == valSmallest:\n return index;\n\n if numSmaller(lstItems, val) == 0 and len(occurrences(lstItems, val)) > 1:\n returnNext = True;\n valSmallest = val\n elif numSmaller(lstItems, val) == 1:\n iSecondSmallest = index;\n index+= 1\n\n return iSecondSmallest;", "def listscan(list):\n n = len(list) #Assign length of list to variable n\n x = 0 #Position variable. Start at beginning of list\n for rep in range(n - 1): #Perform scanning operation for length of list. Subtract one due to pairing\n if list[x] > list[x + 1]: #Compare values of selected index, and that ahead of it, this happens if list out of order\n return True\n elif list[x] <= list[x + 1] and x + 1 < n - 1: #If values not out of order, move forward, and continue scan\n x = x + 1\n else: #Base case. Once scan is through, exit\n print(list)\n return False", "def runs(lst):\n for j, two in enumerate(lst):\n if j == 0:\n one, i = two, 0\n if one != two:\n yield j - i, one\n i = j\n one = two\n yield j - i + 1, two", "def changing_seq(lst):\n\tseq_idx = -1\n\tif lst[0] > lst[1]:\n\t\tidx = 0 \n\t\twhile idx < len(lst) - 1:\n\t\t\tif lst[idx] < lst[idx + 1]:\n\t\t\t\tseq_idx = idx\n\t\t\t\tbreak\n\t\t\tidx += 1\n\n\telif lst[0] < lst[1]:\n\t\tidx = 0 \n\t\twhile idx < len(lst) - 1:\n\t\t\tif lst[idx] > lst[idx + 1]:\n\t\t\t\tseq_idx = idx\n\t\t\t\tbreak\n\t\t\tidx += 1\n\treturn seq_idx", "def isort(my_list):\n comparision_count = 0\n for index in range(len(my_list)-1):\n # Picking for each Number\n comparision_count = comparision_count + 1\n if my_list[index+1] < my_list[index]:\n # moving the element\n my_list[index + 1], my_list[index] = my_list[index], my_list[index + 1]\n newposition = index\n\n # Comparing the changed element with the already sorted list\n for i in range(index,0,-1):\n comparision_count = comparision_count + 1\n if my_list[newposition-1] > my_list[i]:\n # moving the element\n my_list[i] , my_list[newposition-1] = my_list[newposition-1] , my_list[i]\n newposition = i-1 # remembering the new position\n else:\n # Since it is a sorted list, breaking loop if condition fails atleast once\n break\n return (my_list , comparision_count)", "def SortTupleList(TupleList):\r\n SortedList = []\r\n for i in range(0, len(TupleList)):\r\n Pivot = len(SortedList) / 2\r\n while TupleList[i] < SortedList(Pivot):\r\n pass", "def replace_in(tuple1, item, rep):\n if tuple1 is None:\n return None\n f = []\n for t in tuple1:\n e = list(t)\n for i in xrange(len(e)):\n if e[i] == item:\n e[i] = rep\n f.append(tuple(e))\n\n return f", "def find_indices(li, first_elt, second_elt):\r\n index1, index2 = li.index(first_elt), li.index(second_elt)\r\n if index1 == index2:\r\n index2 = index1 + 1 + li[index1+1:].index(second_elt)\r\n if index1 > index2:\r\n index1, index2 = index2, index1\r\n return (index1+1, index2+1)", "def tuple_has_duplicates(my_tuple):\n\n duplicates = []\n for i in my_tuple:\n if my_tuple.count(i) > 1 and i not in duplicates:\n duplicates.append(i)\n counter = 0\n # for k in my_tuple:\n # if i == k:\n # counter += 1\n # if counter > 1 and i not in duplicates:\n # duplicates.append(i)\n if duplicates:\n return duplicates\n else:\n return False", "def around(elements):\n previous, current = None, None\n for next_item in elements:\n if current:\n yield previous, current, next_item\n previous, current = current, next_item\n if current:\n yield previous, current, None", "def nv_tuple_list_replace(l, v):\n _found = False\n for i, x in enumerate(l):\n if x[0] == v[0]:\n l[i] = v\n _found = True\n\n if not _found:\n l.append(v)", "def pairs(lst):\n i = iter(lst)\n prev = next(i)\n for item in i:\n yield prev, item\n prev = item", "def _next_hit(self, elements):\n hits = ((item, item.next_hit(self._ray)) for item in elements)\n try:\n return sorted(\n [(hit[0], hit[1], item) for item, hit in hits if hit is not None])[0]\n except IndexError:\n return None", "def sort_by_return(list_of_tuples):\n list_of_tuples = sorted(list_of_tuples, key=lambda item: item[0])\n left_side = list_of_tuples[0:2]\n right_side = list_of_tuples[2:4]\n left_side = sorted(left_side, key=lambda item: item[1])\n right_side = sorted(right_side, key=lambda item: item[1])\n result = left_side + right_side\n return result", "def slow_closest_pair(cluster_list):\n cl_copy = cluster_list[:]\n output_list = [99999, -1, -1]\n\n for index1 in range(len(cl_copy)):\n \tfor index2 in range(1,len(cl_copy)):\n \t\ttemp_dist = pair_distance(cl_copy, index1, index2)\n \t\tif output_list[0] > temp_dist[0] and index1 != index2:\n \t\t\toutput_list = [temp_dist[0], temp_dist[1], temp_dist[2]] \t\n return tuple(output_list)", "def pairwise(iterable):\n previous, current = None, None\n \n for current in iterable:\n if previous:\n yield previous, current\n previous = current\n if current:\n yield current, None", "def sortTuple(lstTuples, element):\n\n lstTuples.sort(key=lambda x: x[element-1])\n return lstTuples", "def compare_list(first, second):\n temp1 = first\n temp2 = second\n while temp1 and temp2:\n if temp1.data == temp2.data:\n temp1 = temp1.next\n temp2 = temp2.next\n else:\n return False\n if not temp1 and not temp2:\n return True\n return False", "def remove_adjacent(some_list):\n # This function will reduce element that have the same value next to it to single element.\n bucket = []\n for i in range(len(some_list)):\n try:\n #print(\"{0:>3}-{1:<3}\".format(f\"{some_list[i]}\",f\"{some_list[i+1]}\"),end=\"\")\n if some_list[i] == some_list[i+1]:\n bucket.append(some_list[i])\n #print(\"same!!\",end=\"\")\n except:\n pass\n #print(\"\")\n for j in bucket:\n some_list.remove(j)\n return some_list", "def stagger_tuple(elements_list, initial=None):\n res = []\n previous_element = initial\n for element in elements_list:\n if previous_element is not None:\n res.append((previous_element, element))\n previous_element = element\n return res", "def closest_coord(self, list, coord):\n\n closest = (0,0)\n second_closest = (0,0)\n for c in list:\n if self.distance(c, coord) < self.distance(closest, coord) and (c != coord):\n second_closest = closest\n closest = c\n #print(closest, coord)\n return (closest, second_closest)", "def condense_matches(matches: List[Tuple[int, ...]]) -> List[Tuple[int, ...]]:\n new_matches = []\n for match in matches:\n if match not in new_matches and tuple(reversed(match)) not in new_matches:\n new_matches.append(match)\n return new_matches", "def bubble_sort(data_list_or_tuple):\n data_list = list(data_list_or_tuple)\n for count, _ in enumerate(data_list, 1):\n for x in range(len(data_list)-count):\n if data_list[x] > data_list[x+1]:\n data_list[x], data_list[x+1] = data_list[x+1], data_list[x]\n return data_list", "def closest_value_index(val, lst):\n index = 0\n for item in lst:\n if item > val:\n return index\n index += 1\n return index-1", "def mini(lst, key=lambda x: x):\n best, besti = lst[0],0\n for i in xrange(1,len(lst)): \n if key(lst[i]) < key(best):\n best, besti = lst[i], i\n return best,besti", "def find_two_smallest(L):\n\n # Get a sorted copy of the list so that the two smallest items are at the\n # front\n temp_list = sorted(L)\n smallest = temp_list[0]\n next_smallest = temp_list[1]\n\n # Find the indices in the original list L\n min1 = L.index(smallest)\n min2 = L.index(next_smallest)\n\n return (min1, min2)", "def index(x, t):\n if x < t[0]:\n return 0\n\n for i in range(len(t) - 1):\n if t[i] <= x < t[i + 1]:\n return i\n\n return len(t) - 2", "def matchloc(alist,val): \n return [ilc for ilc,jlc in enumerate(alist) if jlc==val]", "def pairwise(s: List[Any]) -> Iterator[Tuple[Any, Any]]:\n\n a, b = itertools.tee(s)\n next(b, None)\n return zip(a, b)", "def changeTupleItem(tData,idx,newItem):\r\n list_data=list(tData)\r\n list_data[idx]=newItem\r\n new_Tuble=tuple(list_data)\r\n \r\n return new_Tuble", "def pairs(iterable):\n previous = None\n for item in iterable:\n current = item\n if previous is not None:\n yield previous, current\n previous = current", "def previous_and_next(all_items: Iterable) -> Iterable:\n previous_items, items, next_items = tee(all_items, 3)\n previous_items = chain([None], previous_items)\n next_items = chain(islice(next_items, 1, None), [None])\n return zip(previous_items, items, next_items)", "def getdifference(triplet_old,triplet_new):\r\n for i in range(0,3):\r\n if (triplet_new[i]!=triplet_old[i]):\r\n \r\n return (triplet_new[i],triplet_old[i],i)", "def all_pairs(items, sort=False):\n if sort:\n items = sorted(items)\n for i, ni in enumerate(items):\n for j, nj in enumerate(items):\n if j > i: yield ni, nj", "def neighbours(assignment): \n for index_1, index_2 in itertools.combinations(range(len(assignment)), 2):\n new_assign = list(assignment)\n new_assign[index_1], new_assign[index_2] = new_assign[index_2], new_assign[index_1]\n yield tuple(new_assign)", "def enumerate(x) -> List[Tuple[int, any]]:\n pass", "def check_object_repeated(lists, obj):\n for any_obj in lists:\n if check_tuples(any_obj['indexes'], obj['indexes']):\n return None\n return obj", "def mysort(lst: List[T], compare: Callable[[T, T], int]) -> List[T]:\n for i in range(1, len(lst)): #loops through each element starting at the second one\n for j in range(i, 0, -1): #loops through each element coming before i starting at i and going backwards\n if compare(lst[j], lst[j-1]) < 0: #checks to see if the previous element is smaller than the current (by saying <0 we keep the sort stable as well)\n lst[j], lst[j-1] = lst[j-1], lst[j] #if they are, we switch them\n else:\n break #if they are not, we know that the element is in its proper place\n return lst", "def remove_adjacent(list):\n a = []\n for item in list:\n if len(a):\n if a[-1] != item:\n a.append(item)\n else: a.append(item) \n return a", "def pairs(lst):\r\n\tfor i in range(1, len(lst), 2):\r\n\t\tyield lst[i-1], lst[i]", "def ifidentity(x):\n for idx,val in enumerate(x):\n if idx+ 1 != val:\n return False\n return True", "def yield2(l):\n\n l = list(l)\n\n for x in range(0,len(l),2):\n try:\n yield [l[x],l[x+1]]\n except IndexError:\n yield [l[x],None]", "def iter_tuple_quick_sort(list, max, min):\n #push min and max indices onto a stack\n stack = LinkedList()\n stack.push(min)\n stack.push(max)\n\n #keep pushing min and max indices onto stack to\n #iteratively sort those lists\n while not stack.is_empty():\n #pop the next min and max indices to partition\n max = stack.pop()\n min = stack.pop()\n\n #partition the list and find the next pivot\n pivot_index = tuple_partition(list, max, min)\n\n #if there's more than one element in the list\n #check if LHS needs to be partitioned\n if pivot_index - 1 >= min:\n stack.push(min)\n stack.push(pivot_index - 1)\n #check if RHS needs to be partitioned\n if pivot_index + 1 <= max:\n stack.push(pivot_index + 1)\n stack.push(max)", "def find_pairs_in_list(arr, k, l):\n i = 0\n j = 0\n n = len(arr)\n container = Container()\n container.inc(arr[i])\n\n while j < n and i <= j:\n\n values = set()\n for m in range(l+1):\n values.add(arr[i] + m)\n values.add(arr[i] - m)\n\n for value in values:\n if (container.count(value) > 0 and value != arr[i]) or \\\n (container.count(arr[i]) > 1):\n return True\n\n if j < n:\n j += 1\n container.inc(arr[j])\n if j - i > k or j == n-1:\n container.dec(arr[i])\n i += 1\n\n return False", "def neighbour(seq):\n it = iter(seq)\n it_next = itertools.islice(itertools.chain(iter(seq), [None]), 1, None)\n\n prev = None\n for curr, next in zip(it, it_next):\n yield(prev, curr, next)\n prev = curr", "def find_match(\n numbers: List[int],\n desired: int,\n count: int = 2\n) -> Tuple[int, ...]:\n for combo in combinations(numbers, count):\n if reduce(lambda x, y: x+y, combo) == desired:\n return combo\n return ()", "def _remove_duplicates(point_tuple_list: List[Tuple[pya.DPoint, ...]]) -> List[Tuple[pya.DPoint, ...]]:\n\n if len(point_tuple_list) < 2:\n return point_tuple_list\n\n unique_points = [point_tuple_list[0]]\n previous_point = point_tuple_list[0]\n for p_tuple in point_tuple_list[1:]:\n if (p_tuple[0] - previous_point[0]).norm() > 0:\n unique_points.append(p_tuple)\n previous_point = p_tuple\n\n return unique_points", "def uniform_list_check(value_list):\n return reduce((lambda acc, value: acc and value == value_list[0]), value_list, True)", "def single_move(triple):\r\n c = 0 # mark of a 1\r\n h = 0 # mark of a 2\r\n for i in triple:\r\n if i==1:\r\n c+=1\r\n elif i==2:\r\n h+=1\r\n if (c,h) == (2,0):\r\n return True\r\n elif (c,h) == (0,2):\r\n return True\r\n return False", "def eq_distance(list_of_numbers):\n\n\t#return the list of three numbers if they exist, return False otherwise\n\tfor i in list_of_numbers:\n\t\tfor j in list_of_numbers:\n\t\t\tif j > i:\n\t\t\t\tdiff = j - i\n\t\t\t\tif (j + diff) in list_of_numbers:\n\t\t\t\t\treturn (i,j,j+diff)\n\t\t\t\telif (i - diff) in list_of_numbers:\n\t\t\t\t\treturn (i-diff, i , j)\n\treturn False", "def list_duplicates_of(seq,item):\n\n start_at = -1\n locs = []\n while True:\n try:\n loc = seq.index(item,start_at+1)\n except ValueError:\n break\n else:\n locs.append(loc)\n start_at = loc\n return locs", "def next_click(self, x, y, board):\n if self.SOLVED:\n yield (None, None)\n for i in xrange(x, len(board)):\n for j in xrange(y, len(board[i])):\n if self.is_clickable(i, j, board):\n yield (i, j)", "def _get_index_of_nth_occurrence(input_list: list[Any],\n *,\n element: Any,\n count: int,\n ) -> int:\n return tuple(index for index, item in enumerate(input_list)\n if item == element)[count]", "def successor_pair(basepair, start, stop):\n\tx , y = basepair\n\tif (x + 1 > stop) or (y - 1 < start):\n\t\treturn (-1,-1)\n\telse:\n\t\treturn ( x + 1 , y - 1 )", "def every_other_new (list):\n return list[::2]", "def retrieve(self, index):\n if not 1 <= index <= self.count:\n return (False,None)\n if self.isEmpty() is True:\n return (False, None)\n\n current = self.head.next\n for teller in range(1, index):\n current = current.next\n return (True, current.item)", "def next_tuple(self):\n raise NotImplementedError(\"Spout not implementing next_tuple() method\")", "def lookahead(iterable):\n # Get an iterator and pull the first value.\n it = iter(iterable)\n last = next(it)\n # Run the iterator to exhaustion (starting from the second value).\n for val in it:\n # Report the *previous* value (more to come).\n yield last, True\n last = val\n # Report the last value.\n yield last, False", "def lookahead(iterable):\n # Get an iterator and pull the first value.\n it = iter(iterable)\n last = next(it)\n # Run the iterator to exhaustion (starting from the second value).\n for val in it:\n # Report the *previous* value (more to come).\n yield last, True\n last = val\n # Report the last value.\n yield last, False", "def lookahead(iterable):\n # Get an iterator and pull the first value.\n it = iter(iterable)\n last = next(it)\n # Run the iterator to exhaustion (starting from the second value).\n for val in it:\n # Report the *previous* value (more to come).\n yield last, True\n last = val\n # Report the last value.\n yield last, False", "def get_idxs_in_correct_order(idx1, idx2):\n if idx1 < idx2: return idx1, idx2\n else: return idx2, idx1", "def pertOfMin(index , listmin):\n for t in listmin:\n if index==t:\n return True\n\n return False", "def tuple_partition(list, max, min):\n #make the pivot the rightmost element in the list\n pivot = list[max]\n\n #start the index at the leftmost element\n pivot_index = min\n\n #make the current element the leftmost element\n current = min\n\n #iterate through list\n while current < max:\n #if current element is less than or equal to pivot\n #swap current element with index\n #then increment index\n if list[current][1] >= pivot[1]:\n temp = list[current]\n list[current] = list[pivot_index]\n list[pivot_index] = temp\n pivot_index += 1\n current += 1\n\n #put pivot into correct index\n temp = list[pivot_index]\n list[pivot_index] = list[max]\n list[max] = temp\n\n return pivot_index", "def ssort(mylist):\n comparisionCount = 0\n for position in range(len(mylist)):\n # Assume the current position is the minimum element\n minPosition = position\n for nextPos in range(position+1,len(mylist)):\n comparisionCount = comparisionCount + 1 # Comparing the values in the positions\n if (mylist[minPosition]>mylist[nextPos]):\n # Remembering the position of the minimum value\n minPosition = nextPos\n\n comparisionCount = comparisionCount + 1 #Comparing the positions\n if position != minPosition:\n # Swap the numbers\n mylist[minPosition],mylist[position] = mylist[position],mylist[minPosition]\n return (mylist,comparisionCount)", "def partition(first):\n lt = None\n eq = None\n gt = None\n\n p = first.next\n\n # put first element into equal list\n first.next = eq\n eq = first\n\n while p is not None:\n q = p\n p = p.next\n\n if q.value < eq.value:\n q.next = lt\n lt = q\n elif q.value > eq.value:\n q.next = gt\n gt = q\n else:\n q.next = eq\n eq = q\n\n # \"first\" is the last equal element\n return lt, eq, first, gt", "def test_nearest_location_adjacent():\n locations = [(1, 3), (3, 5)]\n\n assert nearest_location(locations, 2) == 0\n assert nearest_location(locations, 3) == 1", "def tuples_2_bool(tuples, x):\n if np.ndim(tuples) == 1:\n tuples = [tuples]\n\n out = np.zeros(x.size, dtype=bool)\n for l, u in tuples:\n out[(x > l) & (x < u)] = True\n return out", "def getProximity(tuples):\n\t\t\tsortedIndices = [indices for indices in tuples]\n\t\t\t#return abs(sortedIndices[0][1] - sortedIndices[-1][0])\n\t\t\treturn sortedIndices[-1][0] - sortedIndices[0][1]", "def indices(lst, element):\n result = []\n offset = -1\n while True:\n try:\n offset = lst.index(element, offset + 1)\n except ValueError:\n return result\n result.append(offset)", "def slow_closest_pair(cluster_list):\n dist, idx1, idx2 = float(\"inf\"), -1, -1\n for idx_u in range(len(cluster_list)):\n for idx_v in range(len(cluster_list)):\n if idx_u != idx_v:\n dist_uv = pair_distance(cluster_list, idx_u, idx_v)\n dist, idx1, idx2 = min((dist, idx1, idx2), dist_uv)\n return (dist, idx1, idx2)", "def check_sum_before_and_after_index(int_list):\n # a one-item list always meets this requirement\n if len(int_list) == 1:\n print \"YES\"\n return\n\n # for each integer, find the sum of all the integers\n # before it, storing the total product so far each time\n sum_of_all_ints_before_index = [None] * len(int_list)\n\n sum_so_far = 0\n for i in xrange(len(int_list)):\n sum_of_all_ints_before_index[i] = sum_so_far\n sum_so_far += int_list[i]\n\n # for each integer, find the sum of all the integers\n # after it, storing the total product so far each time\n sum_of_all_ints_after_index = [None] * len(int_list)\n\n sum_so_far = 0\n i = len(int_list) - 1\n while i >= 0:\n sum_of_all_ints_after_index[i] = sum_so_far\n sum_so_far += int_list[i]\n i -= 1\n\n # for each index, compare before / after entries\n for i in xrange(len(int_list)):\n if sum_of_all_ints_before_index[i] == sum_of_all_ints_after_index[i]:\n print \"YES\"\n return\n\n # if no number exists meeting the sum criteria\n print \"NO\"\n return", "def element_to_tuple(list_of_elements):\n return list(map(lambda x: tuple(x), list_of_elements))", "def find_valid_posse(board: 'List') -> 'List':\n for i, a in enumerate(board):\n for j, b in enumerate(board):\n if j != i:\n for k, c in enumerate(board):\n if k not in (i, j) and \\\n is_valid_posse((a, b, c)):\n # print((i, j, k))\n return [a, b, c]", "def exhuastiveMatch(idx_seq, adapters, calcDistance, max_distance, warning_fails=True, verbose=0):\n match = None\n tie = False\n for i,a in enumerate(adapters):\n d = calcDistance(idx_seq, a[1])\n if( d <= max_distance ):\n if( match is None ):\n match = [i,d]\n else:\n print >>sys.stderr, \"WARNING: Multiple matches for\",idx_seq\n if( warning_fails ):\n sys.exit(2)\n if( d == match[1] ):\n tie = True\n if( d < match[1] ):\n tie = False\n match = [i,d]\n if( tie ):\n match = None\n if( verbose >= 3 ):\n print >>sys.stderr, i, idx_seq, a[1], d\n return match", "def findMin(list, t_value):\n currMin = sys.maxsize\n result = 0\n for index in list:\n if t_value[(index[0], index[1], tuple(index[2].items()))] < currMin:\n currMin = t_value[(index[0], index[1], tuple(index[2].items()))]\n result = index\n return result", "def exists_at_time(e, t):\n t0 = 0\n for l, x in e:\n t1 = t0 + l\n if t > t0 and t < t1:\n return x\n t0 = t1\n return x", "def find_duplicate(student_list):\r\n place_holder = student_info('null', 'null', '0', '0')\r\n current = place_holder\r\n dupe = []\r\n final = []\r\n for student in student_list:\r\n previous = current\r\n current = student\r\n if current.first == previous.first:\r\n if previous in final:\r\n dupe.append(final.pop())\r\n dupe.append(student)\r\n elif current.first != previous.first:\r\n if len(dupe) > 1:\r\n dupe.sort(key=lambda x: x[1])\r\n for student_dupe in dupe:\r\n final.append(student_dupe)\r\n final.append(student)\r\n dupe = []\r\n else:\r\n final.append(student)\r\n if len(dupe) > 1:\r\n dupe.sort(key=lambda x: x[1])\r\n for student_dupe in dupe:\r\n final.append(student_dupe)\r\n for student_final in final:\r\n print(student_format(student_final))", "def scan_flip_larger(x, a):\n for i, (y, d) in enumerate(a):\n if y > x:\n a[i] = y, flip(d)", "def _previous(self, coord):\n candidates = [(coord[0] - 1, coord[1]), (coord[0] + 1, coord[1]), (coord[0], coord[1] - 1), (coord[0], coord[1] + 1)]\n for candidate in (x for x in candidates if 0 <= x[0] < self.dimension and 0 <= x[1] < self.dimension):\n if self.board[candidate[0]][candidate[1]].next == self.board[coord[0]][coord[1]]:\n return candidate", "def _assert_same(values):\n assert len(values) > 0\n first, rest = values[0], values[1:]\n for v in rest:\n assert v == first\n return first", "def sort_list_of_tuples(list):\n list.sort(key=lambda x: x[0])\n return list", "def pairwise(iterable):\n a, b = tee(iterable)\n next(b, None)\n return zip(a, b)", "def pairwise(iterable):\n a, b = tee(iterable)\n next(b, None)\n return zip(a, b)", "def pairwise(iterable):\n a, b = tee(iterable)\n next(b, None)\n return zip(a, b)", "def pairwise(iterable):\n a, b = tee(iterable)\n next(b, None)\n return zip(a, b)" ]
[ "0.69720376", "0.67319834", "0.6196822", "0.6108386", "0.60973054", "0.5961634", "0.58837163", "0.58421284", "0.57686746", "0.5747028", "0.5688401", "0.56793284", "0.5658989", "0.56409854", "0.56406134", "0.56301296", "0.56054074", "0.5603956", "0.5601578", "0.55650455", "0.5554385", "0.5551445", "0.55247855", "0.5508838", "0.5502948", "0.5473888", "0.5456793", "0.54513943", "0.54451936", "0.54451746", "0.5433128", "0.54294777", "0.5413252", "0.53920555", "0.53864753", "0.5386117", "0.5377051", "0.5368838", "0.5367877", "0.5352884", "0.5337227", "0.5319947", "0.5319832", "0.53159606", "0.5298283", "0.5293451", "0.52874845", "0.5286118", "0.5277623", "0.52488494", "0.5248357", "0.5247742", "0.5212334", "0.52106357", "0.51942545", "0.51937443", "0.5164945", "0.51621157", "0.5160685", "0.51604366", "0.51574653", "0.51464206", "0.5122374", "0.51075524", "0.5099391", "0.50943804", "0.5093942", "0.5089024", "0.5084316", "0.507891", "0.5073714", "0.5073282", "0.5060446", "0.5060446", "0.5060446", "0.50603867", "0.5049002", "0.5044894", "0.5044845", "0.50432634", "0.503897", "0.5034878", "0.5032842", "0.50315654", "0.5028713", "0.502855", "0.50185823", "0.5015499", "0.50149745", "0.50138605", "0.501325", "0.5012434", "0.50092113", "0.50067234", "0.5004562", "0.49991882", "0.49885786", "0.49885786", "0.49885786", "0.49885786" ]
0.629942
2
Determine if the object has a parent with the supplied name.
def has_parent(obj, parent_name): if obj.parent is None: return False if obj.parent.name is None: return False elif obj.parent.name == parent_name: return True else: return has_parent(obj.parent, parent_name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def has_parent(self):\n return self.parent != None", "def has_parent(self):\n return self._parent_ is not None", "def _contains_in_self_or_parent(self, name: str) -> bool:\n return name in self", "def is_parent(self):\n if self.parent is not None:\n return False\n return True", "def is_parent_of(self):\n return self.hasLabel('parent_of')", "def has_parent(self):\n return False", "def isChildExists(self, name):\n return self.getChild(name) != None", "def is_parent(self) -> bool:\n return AccountEntry.objects.filter(parent=self).exists()", "def contains_parent(self, pid):\n return pid in self._parent_ids", "def has_parent(self, index):\n return self.get_parent_index(index) < len(self.heap)", "def is_parent(self):\n return not self.children", "def is_parent_of(cls, *args):\n return cls.graph_traversal(None, None, Bytecode()).is_parent_of(*args)", "def has_parent_key(self):\n if self.is_root():\n return False\n try:\n self.parent_key()\n return True\n except ParseException:\n return False", "def has_parent(self, term):\n for parent in self.parents:\n if parent.id == term or parent.has_parent(term):\n return True\n return False", "def has_name(self, name: str) -> bool:\n return name in self.child_tags", "def is_parent(self, item):\n if len(self.df.loc[self.df['parent_code']==item, :]): \n return True\n return False", "def has_parents(self):\n return len(self._parents) > 0", "def is_parent(review_request, commit_data=None):\n commit_data = fetch_commit_data(review_request, commit_data=commit_data)\n\n return str(commit_data.extra_data.get(\n SQUASHED_KEY, False)).lower() == 'true'", "def is_parent(self, mother, kid):\n mom_node = self.names_to_nodes[mother]\n child_node = self.names_to_nodes[kid]\n return child_node.is_parent(mom_node)", "def __contains__(self, name):\n return (name in self._defs) or \\\n ((self._parent is not None) and (name in self._parent))", "def is_parent_ref(\n self,\n schema: s_schema.Schema,\n reference: Object,\n ) -> bool:\n return False", "def is_parent(child, parent):\n # Get the list of processes\n assert child is not None\n assert parent is not None\n #child_ranks = [i for i in xrange(child.Get_size())]\n child_group = child.Get_group()\n parent_group = parent.Get_group()\n inter_group = MPI.Group.Intersect(child_group, parent_group)\n return child_group.Get_size() == inter_group.Get_size()", "def islchild(self):\n\t\tif (self.parent() and self.parent().lchild() is self): #TODO is or == here\n\t\t\treturn True\n\t\treturn False", "def parent_of(self, eid):\n return self.E(eid).is_parent_of()", "def is_in(self, other):\n if self.name == other.name:\n return True\n else:\n if self.parent:\n return self.parent.is_in(other)\n else:\n return False", "def isSetParentSBMLObject(self):\n return _libsbml.ASTNode_isSetParentSBMLObject(self)", "def _has_parents(self, node: CFNode) -> bool:\n return bool(self._graph._backedges[node])", "def __contains__(self, item):\n if item in self._parents:\n return True\n else:\n return False", "def is_parent_of_catalog(self, *args, **kwargs):\n # Implemented from kitosid template for -\n # osid.resource.BinHierarchySession.is_parent_of_bin\n return self._get_provider_session('catalog_hierarchy_session').is_parent_of_catalog(*args, **kwargs)", "def find_parent(self,\r\n index):\r\n\r\n if not index.level() > 1:\r\n return False\r\n go_on = True\r\n while not index.is_top() or index.level() < 1:\r\n\r\n\r\n if str(index.parent()) in self.indexes():\r\n return index.parent()\r\n index = index.parent()\r\n\r\n return False", "def findParent(self, name=None, attrs={}, **kwargs):\r\n # NOTE: We can't use _findOne because findParents takes a different\r\n # set of arguments.\r\n r = None\r\n l = self.findParents(name, attrs, 1)\r\n if l:\r\n r = l[0]\r\n return r", "def exists(self, name):\n try:\n self.container.get_object(name)\n return True\n except NoSuchObject:\n return False", "def inherits_from(child, parent_name):\n if inspect.isclass(child):\n if parent_name in [c.__name__ for c in inspect.getmro(child)[1:]]:\n return True\n return False", "def isContainedIn(self, t):\n if self.parent is None:\n return False\n if self.parent.getClassName() == t:\n return True\n return self.parent.isContainedIn(t)", "def _is_child(self, parent, child): # type: (str, str) -> bool\n return child != parent and child.startswith(parent + \".\")", "def ignore_parent(self) -> bool:\n return self._strategy.ignore_parent", "def orphaned(self):\n return (self.parent is None)", "def is_found_in_parents(mcs, name, parents):\n for parent in parents:\n\n for cls in reversed(parent.__mro__):\n\n if hasattr(cls, name):\n return True\n\n if cls.__class__ is mcs:\n break\n\n return False", "def is_known(self, child):\r\n return child in self._parents", "def _is_left(self):\n if self.parent is None:\n return None\n else:\n return self is self.parent.left", "def has_name(self):\n return self.name is not None", "def is_left_child(self):\n if self.parent == None:\n return False\n\n return self.parent.left == self", "def parents(self, asset_vid):\n return self \\\n .asset(asset_vid) \\\n .inE() \\\n .is_parent_of()", "def hasname(self):\n\t\treturn self.name is not None", "def is_root(self):\n return not self.parents.exists()", "def is_recorded_name(self, name: str) -> bool:\n return self.is_top_level() and name in self._toplevel_names", "def is_object(self, name: str) -> bool:\r\n return os.path.exists(self._path_for_pickle(name))", "def isSetIdentifyingParent(self):\n return _libsbml.SpeciesTypeComponentIndex_isSetIdentifyingParent(self)", "def can_be_parent(self, give_reason=False):\n reason = None\n if self.is_child:\n reason = _(\"The specified parent product is a child product.\")\n if self.has_stockrecords:\n reason = _(\"One can't add a child product to a product with stock records.\")\n is_valid = reason is None\n if give_reason:\n return is_valid, reason\n else:\n return is_valid", "def parent(self) -> Optional[Heirarchical]:\n return None", "def is_wrapped_by(self, name):\n\n\t\ttry:\n\t\t\tself._find_wrapper_by_name(name)\n\t\texcept ValueError:\n\t\t\treturn False\n\t\telse:\n\t\t\treturn True", "def is_root(self):\n return self.parent == None", "def remove_parent(self, parent: object, remove_child=True) -> bool:\n if parent in self._parents:\n self._parents.remove(parent)\n\n if remove_child:\n parent._children.remove(self)\n\n return True\n\n return False", "def is_ancestor_of(self, node):\r\n if self.node_type == 'Stage':\r\n return True\r\n\r\n parent = node.get_parent()\r\n while parent:\r\n if id(parent) == id(self):\r\n return True\r\n parent = parent.get_parent()\r\n\r\n return False", "def has_name(self, name):\n\t\t\treturn name in self.classes", "def has_child(self):\n return False", "def find_parent(self):\n pass", "def any_parent_has_power(self, member_name):\n for parent in self.parents_of(member_name):\n if parent.has_power:\n return True\n \n return False", "def has_name(self, name):\n\t\treturn name in self.classes", "def is_labelled_parent(elm):\n klass = elm.attrib.get('class')\n if klass in widgets_toplevel:\n return True\n if klass == 'GtkShortcutsGroup':\n children = elm.findall(\"property[@name='title']\")\n if len(children) >= 1:\n return True\n if klass == 'GtkFrame' or klass == 'GtkNotebook':\n children = elm.findall(\"child[@type='tab']\") + elm.findall(\"child[@type='label']\")\n if len(children) >= 1:\n return True\n return False", "def has_name(self, name):\n return name in self.classes", "def hasparents(self):\n return bool(self.parents)", "def __contains__(self, pid):\n return self.contains_child(pid) or self.contains_parent(pid)", "def find_parent(self):\n parent = self._parent\n if parent:\n return parent\n elif not self.is_root:\n psobj = self.get_sobj().GetFather()\n parent = self.__class__(self._std, self._bld, psobj.GetID())\n self._parent = parent\n return parent", "def has_parent_catalogs(self, *args, **kwargs):\n # Implemented from kitosid template for -\n # osid.resource.BinHierarchySession.has_parent_bins\n return self._get_provider_session('catalog_hierarchy_session').has_parent_catalogs(*args, **kwargs)", "def is_root(self):\n return self._parent == None", "def is_left_child(self):\n is_left_child = False\n parent = self.get_parent()\n if parent is not None:\n is_left_child = parent.get_left() == self\n\n return is_left_child", "def find_entity_by_name(parent_entity, name):\n children = parent_entity.GetChildEntity()\n for item in children:\n if item.GetName() == name:\n return item\n return None", "def has_chain(praw_r, praw_comment, username):\n if not hasattr(praw_comment, 'parent_id'):\n return False\n parent = praw_r.get_info(thing_id=praw_comment.parent_id)\n if not parent or type(parent) != praw.objects.Comment:\n return False\n return is_comment_owner(parent, username)", "def is_root(self) -> bool:\n return self.parent_id is None", "def _has(self, name):\n return hasattr(self._, name)", "def isAncestorOf(self, node):\n if (self in node.parents()):\n return True\n elif (not node.isSource()):\n return reduce(lambda x,y: x or y, [self.isAncestorOf(x) for x in node.parents()])\n else:\n return False", "def get_parent_object_from_request(self, request):\n resolved = resolve(request.path_info)\n if 'object_id' in resolved.kwargs:\n return self.parent_model.objects.get(pk=resolved.kwargs['object_id'])\n return None", "def get_parent_object_from_request(self, request):\n resolved = resolve(request.path_info)\n if 'object_id' in resolved.kwargs:\n return self.parent_model.objects.get(pk=resolved.kwargs['object_id'])\n return None", "def get_parent_object_from_request(self, request):\n resolved = resolve(request.path_info)\n if 'object_id' in resolved.kwargs:\n return self.parent_model.objects.get(pk=resolved.kwargs['object_id'])\n return None", "def parent(self):\n address = self.parent_address\n try:\n parent = Page.objects.get(address=address)\n except Page.DoesNotExist:\n parent = None\n\n return parent", "def HasAncestor(self, other):\n return (self == other) or (self.parent and self.parent.HasAncestor(other))", "def get_parent(self):\n if self.parent:\n return self.parent()\n else:\n return None", "def is_parent_of_book(self, id_, book_id):\n # Implemented from template for\n # osid.resource.BinHierarchySession.is_parent_of_bin\n if self._catalog_session is not None:\n return self._catalog_session.is_parent_of_catalog(id_=id_, catalog_id=book_id)\n return self._hierarchy_session.is_parent(id_=book_id, parent_id=id_)", "def pl_exists(self, name):\n for i, d in enumerate(self.ui.leftwin.data):\n if d.data.name == name:\n return i\n\n return -1", "def maybe_focus_parent(self):\n parent_window_data = self.get_windows_with_parent_data()\n focused_node = filter(lambda w: w['window']['focused'], parent_window_data)\n if len(focused_node) <= 0:\n return\n focused_node = focused_node[0]\n\n focused_parent = focused_node['parent']\n focused_window = focused_node['window']\n if focused_parent['layout'] == 'tabbed':\n call('i3-msg focus parent')", "def is_root(self):\n return self.parent_id is None", "def is_root(self):\n return not self.parent", "def parent( objects, parentObj, objectNames = [], relative = False, world = False ):\n \n if not objectNames:\n \n for i in range( len( objects ) ):\n \n objectNames.append( None )\n \n for o, n in zip( objects, objectNames ):\n \n parentsList = getParentList( o, ascending = True )\n \n if parentsList and world:\n \n mc.parent( o, world=True, r = relative )\n continue\n \n if parentsList and not world:\n \n if parentObj == parentsList[0]:\n \n pass\n \n else:\n \n mc.parent( o, parentObj, r = relative )\n \n else:\n \n mc.parent( o, parentObj, r = relative )\n \n if n:\n \n mc.rename( o, n )", "def has_parent(parent, parent_id, children):\n args = get_args(request.args)\n if request.method == 'GET':\n #Something like /api/domains/<id>/virtualmachines will be equivalent to listVirtualMachines?domainid=<id>\n verb = \"list\"\n subject = children\n #If parent is 'domains' it is added into args as domainid, i.e singular[domains] + 'id'\n args[singular[parent] + 'id'] = parent_id\n return apicall(verb, subject, args)", "def _is_nested(pkg: str, pkg_path: str, parent: str, parent_path: str) -> bool:\n norm_pkg_path = _path.normpath(pkg_path)\n rest = pkg.replace(parent, \"\", 1).strip(\".\").split(\".\")\n return pkg.startswith(parent) and norm_pkg_path == _path.normpath(\n Path(parent_path, *rest)\n )", "def _is_child_path(path, parent_path, link_name=None):\n b_path = to_bytes(path, errors='surrogate_or_strict')\n\n if link_name and not os.path.isabs(b_path):\n # If link_name is specified, path is the source of the link and we need to resolve the absolute path.\n b_link_dir = os.path.dirname(to_bytes(link_name, errors='surrogate_or_strict'))\n b_path = os.path.abspath(os.path.join(b_link_dir, b_path))\n\n b_parent_path = to_bytes(parent_path, errors='surrogate_or_strict')\n return b_path == b_parent_path or b_path.startswith(b_parent_path + to_bytes(os.path.sep))", "def _determine_parent(self, caller):\n self.msgin(4, \"determine_parent\", caller)\n\n parent = None\n if caller:\n pname = caller.identifier\n\n if isinstance(caller, Package):\n parent = caller\n\n elif '.' in pname:\n pname = pname[:pname.rfind('.')]\n parent = self.findNode(pname)\n\n elif caller.packagepath:\n # XXX: I have no idea why this line\n # is necessary.\n parent = self.findNode(pname)\n\n self.msgout(4, \"determine_parent ->\", parent)\n return parent", "def in_ancestry(self, formula):\n if formula in self.formulas:\n return True\n if self.parent:\n return self.parent.in_ancestry(formula)\n return False", "def hasChildren():", "def find_parent_of(self, *args):\n return _ida_hexrays.citem_t_find_parent_of(self, *args)", "def _refers_to_parent_table(self) -> bool:\n pt = self.parent_persist_selectable\n mt = self.child_persist_selectable\n result = False\n\n def visit_binary(binary: BinaryExpression[Any]) -> None:\n nonlocal result\n c, f = binary.left, binary.right\n if (\n isinstance(c, expression.ColumnClause)\n and isinstance(f, expression.ColumnClause)\n and pt.is_derived_from(c.table)\n and pt.is_derived_from(f.table)\n and mt.is_derived_from(c.table)\n and mt.is_derived_from(f.table)\n ):\n result = True\n\n visitors.traverse(self.primaryjoin, {}, {\"binary\": visit_binary})\n return result", "def object_exists(self, name: str):\n file_path = self.__get_file_path(name)\n return os.path.exists(file_path)", "def _object_exists(name):\n conn = sqlite3.connect('/dev/input')\n try:\n cur = conn.cursor()\n sql = 'SELECT ROWID FROM object WHERE name=? AND deleted=0'\n cur.execute(sql, (name, ))\n result = cur.fetchall()\n return len(result) > 0\n finally:\n conn.close()", "def is_parent_of(a, b):\n a = a.rstrip(\"/\") + \"/\"\n b = b.rstrip(\"/\") + \"/\"\n return b.startswith(a)", "def has_nested_attr(__obj: object, __name: str) -> bool:\n pre, _, post = __name.rpartition('.')\n if pre:\n if has_nested_attr(__obj, pre):\n return has_nested_attr(get_nested_attr(__obj, pre), post)\n else:\n return False\n else:\n return hasattr(__obj, post)", "def is_defined_in_xxx(xxx, cls):\n if not cls.parent:\n return False\n\n if not isinstance(cls.parent, namespace.namespace_t):\n return False\n\n if xxx != cls.parent.name:\n return False\n\n xxx_ns = cls.parent\n if not xxx_ns.parent:\n return False\n\n if not isinstance(xxx_ns.parent, namespace.namespace_t):\n return False\n\n if '::' != xxx_ns.parent.name:\n return False\n\n global_ns = xxx_ns.parent\n return None is global_ns.parent", "def ParentOrMatch(self, other):\n return self._dir == other or self._dir.startswith(other + \"/\")", "def test_lacking_parent(self):\n pass", "def IsDescendantOf(self, parent, item):\r\n\r\n while item:\r\n \r\n if item == parent:\r\n \r\n # item is a descendant of parent\r\n return True\r\n \r\n item = item.GetParent()\r\n \r\n return False" ]
[ "0.7545343", "0.7435591", "0.73406065", "0.7337848", "0.7336612", "0.7255639", "0.7117234", "0.7116028", "0.70370907", "0.6815638", "0.6784905", "0.67783093", "0.6720416", "0.6667514", "0.6614133", "0.6522801", "0.6410729", "0.640435", "0.63520426", "0.6320416", "0.6236038", "0.61792105", "0.6169194", "0.6147922", "0.6118637", "0.61061174", "0.6092991", "0.60689795", "0.60651433", "0.6010433", "0.6002905", "0.596375", "0.59582835", "0.5956177", "0.59557253", "0.5941649", "0.59297377", "0.59267354", "0.5913112", "0.58900404", "0.58647126", "0.58464295", "0.5806993", "0.5802811", "0.57965195", "0.5773553", "0.57469255", "0.5723074", "0.5707321", "0.56989783", "0.5698079", "0.56831944", "0.567159", "0.56606054", "0.5627313", "0.56121755", "0.5610785", "0.5606452", "0.56062263", "0.5605853", "0.56045103", "0.55899954", "0.55805004", "0.55550176", "0.55425674", "0.55326617", "0.55299747", "0.55243987", "0.5502307", "0.54890895", "0.548146", "0.54783905", "0.5463791", "0.5463791", "0.5463791", "0.5463586", "0.5463127", "0.5434658", "0.5434344", "0.54320174", "0.54261506", "0.5421926", "0.54177153", "0.540853", "0.5391215", "0.5373439", "0.5369025", "0.53629166", "0.53524625", "0.53488487", "0.53366196", "0.53360695", "0.53253275", "0.5318478", "0.5317142", "0.5316857", "0.53067404", "0.5296223", "0.52923346", "0.5279773" ]
0.8476002
0
The event triggered when an error is raised while invoking a command.
async def on_command_error(self, ctx, error): if hasattr(ctx.command, 'on_error'): return ignored = (commands.CommandNotFound, commands.UserInputError) error = getattr(error, 'original', error) try: if isinstance(error, ignored): return elif isinstance(error, commands.DisabledCommand): return await send(ctx, self.messages.disabled.format(ctx.command), tag=True, expire=True) elif isinstance(error, commands.CheckFailure): return await send(ctx, self.messages.no_pm.format(ctx.command), tag=False, expire=True) elif isinstance(error, ArgIsNaN): return await send(ctx, self.messages.arg_is_nan.format( error.args[0] ), tag=True, expire=True) elif isinstance(error, PageOOB): return await send(ctx, self.messages.page_oob, tag=True, expire=True) elif isinstance(error, WrongArgLength): return await send(ctx, self.messages.wrong_arg_length.format( ctx.command, error.args[0] ), tag=True, expire=True) elif isinstance(error, UserNotFound): return await send(ctx, self.messages.user_not_found.format( error.args[0] ), tag=True, expire=True) elif isinstance(error, CommandError): return await send(ctx, self.messages[error.command][error.args[0]], tag=True, expire=True) except Exception: pass exception_string = ctx.author.mention + "\n```" + \ "".join(traceback.format_exception( type(error), error, error.__traceback__ )) + "```" if ctx.guild == None: await ctx.send( content=exception_string ) return channel = None for possible_channel in ctx.guild.channels: if possible_channel.name == self.messages.error_channel: channel = possible_channel break if channel: await channel.send(exception_string)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def on_command_error(self, ctx, error):\n\n if hasattr(ctx.command, 'on_error'):\n return\n\n error = getattr(error, 'original', error)\n\n if isinstance(error, commands.MissingRequiredArgument):\n LOG.error(f\"Missing argument in command {ctx.command}\")\n message = \"An argument is missing\\n\\n\"\n message += f\"{self.command_prefix}{ctx.command.signature}\"\n await self.send(ctx.channel, message, code_block=True)\n elif type(error) not in self.handled_exceptions:\n LOG.error(f\"Exception '{type(error).__name__}' raised in command '{ctx.command}':\")\n traceback.print_exception(type(error), error, error.__traceback__, file=sys.stderr)", "async def on_command_error(self, ctx, error):\n\n # This prevents any commands with local handlers being handled here in on_command_error.\n if hasattr(ctx.command, 'on_error'):\n return\n\n\n if hasattr(ctx.command, 'on_command_error'):\n return\n\n # This prevents any cogs with an overwritten cog_command_error being handled here.\n cog = ctx.cog\n if cog:\n if cog._get_overridden_method(cog.cog_command_error) is not None:\n return\n\n\n # Allows us to check for original exceptions raised and sent to CommandInvokeError.\n # If nothing is found. We keep the exception passed to on_command_error.\n error = getattr(error, 'original', error)\n\n # Anything in ignored will return and prevent anything happening.\n if isinstance(error, commands.CommandNotFound):\n await ctx.send(f'Command pas trouvé')\n return\n if isinstance(error, commands.DisabledCommand):\n await ctx.send(f'{ctx.command} has been disabled.')\n return\n\n if isinstance(error,commands.errors.PrivateMessageOnly):\n await ctx.message.delete()\n channel = await ctx.message.author.create_dm()\n await channel.send(f'{ctx.command} ne peut être exécuté que en message privé !!')\n return\n # For this error example we check to see where it came from...\n if isinstance(error, commands.BadArgument):\n await ctx.send('Mauvais arguments passés')\n return\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send('Il manque des arguments à la commande')\n return\n # All other Errors not returned come here. And we can just print the default TraceBack.\n logger.error(f'Ignoring exception in command {ctx.command} : {type(error)} {error} {error.__traceback__}')", "async def on_command_error(self, ctx: commands.Context, error: Any):\n \n # Notify user for MissingRequiredArgument errors\n if isinstance(error, commands.MissingRequiredArgument):\n command_name = ctx.message.content.split(\" \")[0]\n msg = translate(\"err_missing_parameter\", await culture(ctx)).format(command_name, error.param.name)\n return await ctx.send(msg)\n else:\n # Log the warning\n log_warn(error)\n\n # Notify user with general error\n msg = translate(\"err_unrecognized_command\", await culture(ctx))\n await ctx.send(msg)", "async def on_command_error(self, ctx, err):\n if type(err) is CommandNotFound:\n await self.send_message(ctx, 'I don\\'t know how to do that.')", "async def on_command_error(\n self, ctx: commands.Context, error: commands.CommandError\n ) -> None:\n if getattr(error, \"handled\", False):\n logger.debug(\n f\"Command {ctx.command} had its error already handled locally; ignoring.\"\n )\n return\n\n error = getattr(error, \"original\", error)\n\n if isinstance(error, commands.CommandNotFound):\n return # Skip logging CommandNotFound Error\n\n elif isinstance(error, commands.UserInputError):\n if isinstance(error, commands.MissingRequiredArgument):\n description = (\n f\"`{error.param.name}` is a required argument that is missing.\"\n \"\\n\\nUsage:\\n\"\n f\"```{ctx.prefix}{ctx.command} {ctx.command.signature}```\"\n )\n else:\n description = (\n f\"Your input was invalid: {error}\\n\\nUsage:\\n\"\n f\"```{ctx.prefix}{ctx.command} {ctx.command.signature}```\"\n )\n\n embed = self.error_embed(description)\n await ctx.send(embed=embed)\n\n elif isinstance(error, commands.CommandOnCooldown):\n mins, secs = divmod(math.ceil(error.retry_after), 60)\n embed = self.error_embed(\n f\"This command is on cooldown:\\nPlease retry in **{mins} minutes {secs} seconds**.\"\n )\n await ctx.send(embed=embed, delete_after=10)\n\n elif isinstance(error, commands.DisabledCommand):\n await ctx.send(embed=self.error_embed(\"This command has been disabled.\"))\n\n elif isinstance(error, commands.NoPrivateMessage):\n await ctx.send(\n embed=self.error_embed(\"This command can only be used in the server.\")\n )\n\n elif isinstance(error, commands.CheckFailure):\n await ctx.send(\n embed=self.error_embed(\"You aren't allowed to use this command.\")\n )\n\n elif isinstance(error, commands.BadArgument):\n self.revert_cooldown_counter(ctx.command, ctx.message)\n embed = self.error_embed(\n \"The argument you provided was invalid: \"\n f\"{error}\\n\\nUsage:\\n```{ctx.prefix}{ctx.command} {ctx.command.signature}```\"\n )\n await ctx.send(embed=embed)\n else:\n await self.handle_unexpected_error(ctx, error)\n return # Exit early to avoid logging.\n\n logger.debug(\n f\"Error Encountered: {type(error).__name__} - {str(error)}, \"\n f\"Command: {ctx.command}, \"\n f\"Author: {ctx.author}, \"\n f\"Channel: {ctx.channel}\"\n )", "async def on_command_error(self, ctx, error):\n if self._error_cd.get_bucket(ctx.message).update_rate_limit():\n return self.dispatch('global_cooldown', ctx, '_warn_cd', '⚠️') \n \n error = getattr(error, 'original', error)\n \n is_owner = await ctx.is_owner()\n e_args = (type(error), error, error.__traceback__, 4) \n \n if not isinstance(error, (HTTPException, ClientException, CommandOnCooldown)):\n print_exception(*e_args)\n \n # Cooldown bypass \n if (isinstance(error, CommandOnCooldown) # there must be a better way\n and (is_owner or ctx.permissions_for(ctx.author).manage_messages)):\n return await ctx.reinvoke()\n \n if is_owner:\n lines = ''.join(format_exception(*e_args)) \n else:\n lines = str(error)\n \n await ctx.display(embed=ColoredEmbed(title='Error',\n description='```py\\n' + lines + '```'))", "async def on_command_error(self, ctx: Context, e: commands.CommandError) -> None:\n if hasattr(ctx.command, \"on_error\"):\n return\n\n e = getattr(e, \"original\", e)\n\n await ctx.message.add_reaction(\"\\U0000274c\")\n\n embed = DefaultEmbed(ctx, title=\"**An error has occurred:**\")\n\n if isinstance(e, commands.DisabledCommand):\n embed.description = \"Command not currently enabled.\"\n\n elif isinstance(e, commands.UserInputError):\n embed.description = f\"Command received bad argument: {e}.\"\n\n elif isinstance(e, commands.NotOwner):\n embed.description = \"You do not have enough permissions for this command.\"\n\n elif isinstance(e, commands.CommandOnCooldown):\n embed.description = f\"{e}.\"\n\n elif isinstance(e, commands.CheckFailure):\n embed.description = \"You do not have enough permissions to run this command.\"\n\n elif isinstance(e, commands.MissingPermissions):\n embed.description = \"Bot does not have enough permissions for this command.\"\n\n elif isinstance(e, commands.CommandNotFound):\n embed.description = \"Unknown command.\"\n\n else:\n embed.description = f\"{type(e).__name__}: {e}\"\n\n log.error(\"An error has occurred.\", exc_info=(type(e), e, e.__traceback__))\n\n embed.description = f\"`{embed.description}`\"\n\n await ctx.send(embed=embed)", "async def on_command_error(ctx, error):\n if isinstance(error, commands.CheckFailure) or isinstance(error, commands.MissingPermissions):\n print(\"!ERROR! \" + str(ctx.author.id) + \" did not have permissions for \" + ctx.command.name + \" command\")\n elif isinstance(error, commands.BadArgument):\n argument = list(ctx.command.clean_params)[len(ctx.args[2:] if ctx.command.cog else ctx.args[1:])]\n await ctx.send(\"Could not find the \" + argument)\n elif isinstance(error, commands.MissingRequiredArgument):\n await ctx.send(ctx.command.name + \" is missing arguments\")\n elif isinstance(error, commands.BotMissingPermissions):\n await ctx.send(\"Bot is missing permissions.\")\n else:\n print('Ignoring exception in command {}:'.format(ctx.command), file=sys.stderr)\n traceback.print_exception(type(error), error, error.__traceback__, file=sys.stderr)", "async def on_command_error(self, ctx: IceTeaContext, error: Exception):\n # Reports that a command is on cool down\n if isinstance(error, commands.CommandOnCooldown):\n await ctx.send(\n f\"This command is on cooldown! Hold your horses! >:c\\nTry again in \"\n f\"**{int(error.retry_after)}** seconds\")\n # Reports that the command is disabled\n elif isinstance(error, commands.errors.DisabledCommand):\n await ctx.send(\"That functionality is currently disabled\")\n # Reports that the command cannot be handled inside a PM\n elif isinstance(error, commands.errors.NoPrivateMessage):\n await ctx.send(\"I am unable to processes this command inside a PM\")\n elif isinstance(error, commands.MissingRequiredArgument):\n await ctx.send(f\"Sorry, you forgot to include ``{error.param}`` with that call, try again\")\n elif isinstance(error, commands.BadArgument):\n await ctx.send(\n f\"Sorry, I could not do anything with what you provided me.\\n\"\n f\"You can use ``{ctx.prefix}help {ctx.invoked_with}`` for more info\")\n # Reports on non generic errors\n elif isinstance(error, commands.errors.CommandInvokeError):\n try:\n await ctx.message.add_reaction(\"\\U000026a0\")\n\n def check(reaction, reactor):\n return ctx.message.id == reaction.message.id and reaction.emoji == \"\\U000026a0\" and reaction.count > 1 \\\n and reactor == ctx.bot.owner\n\n try:\n await ctx.bot.wait_for(\"reaction_add\", check=check, timeout=30)\n embed = discord.Embed(color=0xff0000, description='displays detailed error information',\n title='Iceteabot error log')\n embed.add_field(name=\"Command used\", value=f\"{ctx.invoked_with}\")\n embed.add_field(name=\"Command author\", value=f\"{ctx.message.author.display_name}\")\n embed.add_field(name=\"args\", value=ctx.kwargs or ctx.args)\n embed.add_field(name=\"Error\", value=error.original, inline=False)\n embed.add_field(name=\"Log\",\n value=f\"```py\\n{traceback.format_tb(error.original.__traceback__)[-1]}```\")\n embed.timestamp = datetime.datetime.utcnow()\n debug_channel = ctx.bot.get_channel(360895354033537029)\n if debug_channel is not None:\n await debug_channel.send(embed=embed)\n else:\n await ctx.send(embed=embed, delete_after=10)\n try:\n await ctx.message.clear_reactions()\n await ctx.message.delete()\n except discord.Forbidden:\n pass\n except discord.HTTPException:\n pass\n except asyncio.TimeoutError:\n try:\n await ctx.message.clear_reactions()\n await ctx.message.delete()\n except discord.Forbidden:\n pass\n except discord.HTTPException:\n pass\n except discord.Forbidden:\n pass\n finally:\n try:\n from sentry_sdk import capture_exception\n capture_exception(error)\n except ImportError:\n pass", "async def on_command_error(\n self, ctx: commands.Context, error: commands.CommandError\n ):\n log.debug(\"The Error Handler was invoked to handle an error\")\n\n trace = \"\".join(\n traceback.format_exception(type(error), error, error.__traceback__)\n )\n trace = trace.strip()\n\n if hasattr(ctx.command, \"on_error\"):\n log.debug(\"Invoked, but will not override command's own error handler\")\n return\n\n cog = ctx.cog\n if cog:\n if cog._get_overridden_method(cog.cog_command_error) is not None:\n log.debug(\"Invoked, but will not override cog's own error handler\")\n return\n\n # Allows us to check for original exceptions raised and sent to CommandInvokeError.\n # If nothing is found. We keep the exception passed to on_command_error.\n error = getattr(error, \"original\", error)\n ignored = (commands.CommandNotFound,)\n\n if isinstance(error, ignored):\n log.debug(f\"Ignored exception {type(error)} - {error}\")\n return\n\n # Check for specific exceptions to be handled\n if isinstance(error, commands.DisabledCommand):\n await ctx.send(f\"{ctx.command} has been disabled.\")\n\n elif isinstance(error, commands.CommandOnCooldown):\n try:\n await ctx.send(str(error))\n except discord.HTTPException:\n pass\n\n elif isinstance(error, commands.NoPrivateMessage):\n try:\n await ctx.author.send(\n f\"{ctx.command} can not be used in Private Messages.\"\n )\n except discord.HTTPException:\n pass\n\n elif isinstance(error, commands.errors.CheckFailure):\n log.debug(f\"A command was called, but a check failed. Trace: \\n{trace}\")\n\n elif isinstance(error, commands.MissingRequiredArgument):\n log.debug(f\"A command was missing a required argument. Trace: \\n{trace}\")\n try:\n await ctx.send(\"```\\nUsage:\\n\" + ctx.command.help + \"```\")\n except discord.HTTPException:\n pass\n\n elif isinstance(error, merrors.MiltonInputError):\n # Send feedback to user\n try:\n await ctx.send(error.msg)\n except discord.HTTPException:\n pass\n\n else:\n # All other Errors not returned come here.\n # Skip the prompt line\n if \"CommandInterface\" in self.bot.cogs:\n print(\"\")\n\n log.error(f\"Ignoring exception in command {ctx.command}:\\n\" f\"{trace}\")\n\n # Re-print the handle for the CLI cog\n if \"CommandInterface\" in self.bot.cogs:\n print(\">> \", end=\"\")", "async def on_command_error(self, ctx, error):\n\n if hasattr(ctx.command, 'on_error'):\n return\n\n ignored = (commands.CommandNotFound, commands.UserInputError)\n error = getattr(error, 'original', error)\n\n if isinstance(error, ignored):\n return\n \n elif isinstance(error, commands.DisabledCommand):\n return await ctx.send(embed=bot_tools.create_simple_embed(_title='Error', _description=f'{ctx.command} has been disabled.'))\n\n elif isinstance(error, commands.NoPrivateMessage):\n try:\n return await ctx.author.send(embed=bot_tools.create_simple_embed(_title='Error', _description=f'{ctx.command} can not be used in DMs.'))\n except:\n pass\n\n\n elif isinstance(error, bot_tools.AdminCheckFailure):\n return await ctx.send(embed=bot_tools.create_simple_embed(_title='Error', _description=f'{ctx.command} can only be used by admins.'))\n\n elif isinstance(error, bot_tools.OwnerCheckFailure):\n return await ctx.send(embed=bot_tools.create_simple_embed(_title='Error', _description=f'{ctx.command} can only be used by the server owner.'))\n\n\n print('Ignoring exception in command {}:'.format(ctx.command), file=sys.stderr)\n traceback.print_exception(type(error), error, error.__traceback__, file=sys.stderr)", "async def on_command_error(ctx, error):\n await send_block(\n ctx,\n \"\".join(\n traceback.format_exception(\n etype=type(error), value=error, tb=error.__traceback__\n )\n ),\n )", "async def on_command_error(self, ctx, error):\n\n # This prevents any commands with local handlers being handled here in on_command_error.\n if hasattr(ctx.command, 'on_error'):\n return\n\n ignored = (commands.CommandNotFound, commands.UserInputError)\n\n # Allows us to check for original exceptions raised and sent to CommandInvokeError.\n # If nothing is found. We keep the exception passed to on_command_error.\n error = getattr(error, 'original', error)\n\n # Anything in ignored will return and prevent anything happening.\n if isinstance(error, ignored):\n return\n\n elif isinstance(error, commands.DisabledCommand):\n await ctx.send(f'{self.bot.settings.prefix}{ctx.command} has been disabled.')\n return\n\n elif isinstance(error, commands.NoPrivateMessage):\n try:\n await ctx.channel.send(f'{self.bot.settings.prefix}{ctx.command} can not be used in Private Messages.')\n except:\n pass\n return\n\n elif isinstance(error, commands.BadArgument):\n await ctx.send(f'Refer to.{self.bot.settings.prefix}help {ctx.command}')\n return\n\n elif isinstance(error, commands.BotMissingPermissions):\n missing = [perm.replace('_', ' ').replace('guild', 'server').title() for perm in error.missing_perms]\n if len(missing) > 2:\n fmt = '{}, and {}'.format(\"**, **\".join(missing[:-1]), missing[-1])\n else:\n fmt = ' and '.join(missing)\n await ctx.send(f'I need the **{fmt}** permission(s) to run this command.')\n return\n\n if isinstance(error, commands.MissingPermissions):\n missing = [perm.replace('_', ' ').replace('guild', 'server').title() for perm in error.missing_perms]\n if len(missing) > 2:\n fmt = '{}, and {}'.format(\"**, **\".join(missing[:-1]), missing[-1])\n else:\n fmt = ' and '.join(missing)\n await ctx.send(f'You need the **{fmt}** permission(s) to use this command.')\n return\n\n # All other Errors not returned come here... And we can just print the default TraceBack.\n print('Ignoring exception in command {}:'.format(ctx.command), file=sys.stderr)\n traceback.print_exception(type(error), error, error.__traceback__, file=sys.stderr)", "async def on_command_error(self, ctx: Context, e: errors.CommandError) -> None:\n command = ctx.command\n\n if hasattr(e, \"handled\"):\n log.trace(f\"Command {command} had its error already handled locally; ignoring.\")\n return\n\n debug_message = (\n f\"Command {command} invoked by {ctx.message.author} with error \"\n f\"{e.__class__.__name__}: {e}\"\n )\n\n if isinstance(e, errors.CommandNotFound) and not getattr(ctx, \"invoked_from_error_handler\", False):\n if await self.try_silence(ctx):\n return\n if await self.try_run_fixed_codeblock(ctx):\n return\n await self.try_get_tag(ctx) # Try to look for a tag with the command's name\n elif isinstance(e, errors.UserInputError):\n log.debug(debug_message)\n await self.handle_user_input_error(ctx, e)\n elif isinstance(e, errors.CheckFailure):\n log.debug(debug_message)\n await self.handle_check_failure(ctx, e)\n elif isinstance(e, errors.CommandOnCooldown | errors.MaxConcurrencyReached):\n log.debug(debug_message)\n await ctx.send(e)\n elif isinstance(e, errors.CommandInvokeError):\n if isinstance(e.original, ResponseCodeError):\n await self.handle_api_error(ctx, e.original)\n elif isinstance(e.original, LockedResourceError):\n await ctx.send(f\"{e.original} Please wait for it to finish and try again later.\")\n elif isinstance(e.original, InvalidInfractedUserError):\n await ctx.send(f\"Cannot infract that user. {e.original.reason}\")\n else:\n await self.handle_unexpected_error(ctx, e.original)\n elif isinstance(e, errors.ConversionError):\n if isinstance(e.original, ResponseCodeError):\n await self.handle_api_error(ctx, e.original)\n else:\n await self.handle_unexpected_error(ctx, e.original)\n elif isinstance(e, errors.DisabledCommand):\n log.debug(debug_message)\n else:\n # ExtensionError\n await self.handle_unexpected_error(ctx, e)", "async def on_command_error(\n self,\n ctx: commands.Context,\n error: commands.CommandError\n ):\n # Skips errors that were already handled locally.\n if getattr(ctx, 'handled', False):\n return\n\n if isinstance(error, commands.NoPrivateMessage):\n embed = discord.Embed(\n title='Oops!',\n description='Command Failed To Execute. Reason:\\n`Command Can Not Be Used In Direct Messages`',\n color=0xFF0000\n )\n await ctx.send(embed=embed)\n\n elif isinstance(error, commands.TooManyArguments):\n embed = discord.Embed(\n title='Oops!',\n description='Command Failed To Execute. Reason:\\n`Passed In Too Many Arguments`',\n color=0xFF0000\n )\n await ctx.send(embed=embed)\n\n elif isinstance(error, commands.NSFWChannelRequired):\n embed = discord.Embed(\n title='Oops!',\n description='Command Failed To Execute. Reason:\\n`This Channel Is Not NSFW`',\n color=0xFF0000\n )\n await ctx.send(embed=embed)\n\n elif isinstance(error, commands.CommandNotFound):\n embed = discord.Embed(\n title='Oops!',\n description='Command Failed To Execute. Reason:\\n`Not Found`', #Todo - Possibly remove this\n color=0xFF0000 #Because its kinda annoying ngl\n )\n await ctx.send(embed=embed)\n \n elif isinstance(error, discord.Forbidden):\n embed = discord.Embed(\n title='Oops!',\n description='Command Failed To Execute. Reason:\\n`Discord Is Restricting Command Execution`',\n color=0xFF0000\n )\n embed.add_field(\n name='Possiblities',\n value='`You Are Trying To Use This Command On Someone Who Is Higher Than Either The Bot Or You`',\n inline=True\n )\n await ctx.send(embed=embed)\n\n elif isinstance(error, commands.MissingRequiredArgument):\n embed = discord.Embed(\n title='Oops!',\n description=f'Command Failed To Execute. Reason:\\n`Missing Required Argument:`\\n`{error.param.name}`',\n color=0xFF0000\n )\n await ctx.send(embed=embed)\n\n elif (\n isinstance(error, commands.NotOwner)\n or isinstance(error, commands.MissingPermissions)\n ):\n embed = discord.Embed(\n title='Oops',\n description='Command Failed To Execute. Reason:\\n`Missing Permissions`',\n color=0xFF0000\n )\n await ctx.send(embed=embed)\n\n elif (\n isinstance(error, commands.CommandOnCooldown)\n or isinstance(error, commands.CheckFailure)\n ):\n embed = discord.Embed(\n title='Oops',\n description='Command Failed To Execute. Reason\\n```{error}```',\n color=0xFF0000\n ) \n await ctx.send(embed=embed)\n\n elif isinstance(error, commands.DisabledCommand): #SoonTM\n embed = discord.Embed(\n title='Oops!',\n description='Command Failed To Execute. Reason:\\n`Command Is Disabled`',\n color=0xFF0000\n )\n await ctx.send(embed=embed)\n\n elif isinstance(error, commands.BadArgument):\n embed = discord.Embed(\n title='Oops!',\n description=f'Command Failed To Execute. Reason:\\n`Bad Argument`\\n```{error}```',\n color=0xFF0000\n )\n await ctx.send(embed=embed)\n\n elif isinstance(error, commands.BotMissingPermissions):\n embed = discord.Embed(\n title='Oops!',\n description='Command Failed To Execute. Reason:\\n`Bot Is Missing Permissions`',\n color=0xFF0000\n )\n await ctx.send(embed=embed)\n log.error(\n f'{ctx.command.qualified_name} cannot be executed because the '\n f'bot is missing the following permissions: '\n f'{\", \".join(error.list)}'\n )\n\n elif isinstance(error, commands.CommandInvokeError):\n embed = discord.Embed(\n title='Oops!',\n description='Command Failed To Execute. Reason:\\n`INTERNAL ERROR`',\n color=0xFF0000 \n )\n embed.set_footer(text='Please Contact Tylerr#6979 For Help')\n await ctx.send(embed=embed)\n log.error(\n f'{ctx.command.qualified_name} failed to execute. ',\n exc_info=error.original\n )", "async def cog_command_error(self, ctx: Context, error: CommandInvokeError):\n if isinstance(error.original, NoRolesError):\n await error.original.handle_error(ctx)\n else:\n await super().cog_command_error(ctx, error)", "def bcp_error(self, **kwargs):\n self.log.warning('Received error command from client')", "def handle_error(self, api, command):\n return self.handle_log(api, command, level=logging.ERROR)", "def command_error(fmt, *args, **kwargs):\n raise CommandError(fmt.format(*args, **kwargs))", "def error(self, msg):\n self.send_command('error', {\n 'msg': msg,\n })", "def error_received(self, exc):\n print('Error received:', exc)", "async def on_command_error(ctx: commands.Context, error: commands.CommandError):\n if isinstance(error, commands.CommandNotFound):\n message = f\"This command is not listed in {bot.user} dictionary. Please try again.\"\n embed = discord.Embed(title=f\"Woah, woah!\", description=message, colour=0xd80000)\n await ctx.send(embed=embed, delete_after=5)\n # return # Return because we don't want to show an error for every command not found\n elif isinstance(error, commands.CommandOnCooldown):\n message = f\"This command is on cooldown. Please try again after {round(error.retry_after, 1)} seconds.\"\n embed = discord.Embed(title=f\"Woah, woah!\", description=message, colour=0xd80000)\n await ctx.send(embed=embed, delete_after=5)\n elif isinstance(error, commands.MissingPermissions):\n message = \"You are missing the required permissions to run this command!\"\n embed = discord.Embed(title=f\"Woah, woah!\", description=message, colour=0xd80000)\n await ctx.send(embed=embed, delete_after=5)\n elif isinstance(error, commands.NoPrivateMessage):\n message = \"Private messages only. How cute.\"\n embed = discord.Embed(title=f\"Woah, woah!\", description=message, colour=0xd80000)\n await ctx.send(embed=embed, delete_after=5)\n elif isinstance(error, commands.MissingRequiredArgument):\n message = \"Command is missing an argument. Try again.\"\n embed = discord.Embed(title=f\"Woah, woah!\", description=message, colour=0xd80000)\n await ctx.send(embed=embed, delete_after=5)\n elif isinstance(error, commands.CheckFailure):\n message = \"You do not have the permissions to do this.\"\n embed = discord.Embed(title=f\"Woah, woah!\", description=message, colour=0xd80000)\n await ctx.send(embed=embed, delete_after=5)\n elif isinstance(error, (commands.MissingRole, commands.MissingAnyRole)):\n message = \"You don't have any role to run this command.\"\n embed = discord.Embed(title=f\"Woah, woah!\", description=message, colour=0xd80000)\n await ctx.send(embed=embed, delete_after=5)\n else:\n message = \"Oh no! Something went wrong while running the command!\"\n embed = discord.Embed(title=f\"Woah, woah!\", description=message, colour=0xd80000)\n await ctx.send(embed=embed, delete_after=5)", "def error_check(command):\r\n\r\n # TODO\r", "def handle_invalid_command(self, msg):\n return self.create_response(Command.INVALID_COMMAND.value)", "async def cog_command_error(self, ctx: Context, error: Exception) -> None:\n if isinstance(error, InWhitelistCheckFailure):\n error.handled = True", "async def cog_command_error(self, ctx:utils.Context, error:commands.CheckFailure):\n\n # Throw errors properly for me\n if ctx.author.id in self.bot.config['owners']:\n text = f'```py\\n{error}```'\n await ctx.send(text)\n raise error\n\n elif isinstance(error, commands.NotOwner):\n await ctx.send(\"You need to be registered as an owner to run this command.\")\n return", "def error(self, msg):\n fullmsg = \"Subcommand '%s': %s\\n%s\" % (self.tool.get_command(), msg,\n self.get_usage_command())\n raise SBToolError(fullmsg, True)", "async def on_error(self, event: str, error: Exception, *args, **kwargs):\n print(f\"Ignoring exception in {event}\", file=sys.stderr)\n traceback.print_exception(type(error), error, error.__traceback__, file=sys.stderr)", "def error(self, error):\n pass", "def error_callback(error_message, command):\n print(\"ERROR: \" + error_message)\n print(\"Sending the command \\\"retry in 10 seconds\\\"...\")\n command.try_again(10)\n print(\"Done!\")", "def error(self, *args, **kwargs):", "def OnError(self, error):\r\n\t\tLogErr(error)\r\n#\t\tself.Shutdown()\r\n\t\tself.onError()", "def bcp_receive_error(self, client, **kwargs):\n self.log.warning('Received Error command from host with parameters: %s',\n kwargs)", "def on_error(self, event: ThreadResult):\n if self._on_error is not None:\n self._on_error(event.data)", "def error(self, msg):\n from django.core.management import CommandError\n\n raise CommandError(msg)", "def error(self, msg, *args, **kwargs):\n pass", "def _on_error(self, error):\n print(error + \" for \" + self.session_name)", "def _call_error_handler(self, event, err, **kwargs):\n if self._on_error_handler:\n event.error = str(err)\n event.origin_state = self.fullname\n return self._on_error_handler(event)", "def errReceived(self, data):\n log.msg(\"Error output from process: \" + data,\n isError=True)", "def onentererror(self, event):\n print('onentererror; event: %s, %s->%s' % (event.event, event.src, event.dst))", "def add_error_event(self, obj, event, *args):\n\t\thid = obj.connect(event, self._err_emited, *args)\t\n\t\tself.handlers_id.append(hid)", "async def handle_unexpected_error(\n self, ctx: commands.Context, error: commands.CommandError\n ) -> None:\n await ctx.send(\n f\"Sorry, an unexpected error occurred. Please let us know!\\n\\n\"\n f\"```{error.__class__.__name__}: {error}```\"\n )\n\n push_alert = Embed(\n title=\"An unexpected error occurred\",\n color=Colours.soft_red,\n )\n push_alert.add_field(\n name=\"User\",\n value=f\"id: {ctx.author.id} | username: {ctx.author.mention}\",\n inline=False,\n )\n push_alert.add_field(\n name=\"Command\", value=ctx.command.qualified_name, inline=False\n )\n push_alert.add_field(\n name=\"Message & Channel\",\n value=f\"Message: [{ctx.message.id}]({ctx.message.jump_url}) | Channel: <#{ctx.channel.id}>\",\n inline=False,\n )\n push_alert.add_field(\n name=\"Full Message\", value=ctx.message.content, inline=False\n )\n\n dev_alerts = self.bot.get_channel(Channels.devalerts)\n if dev_alerts is None:\n logger.info(\n f\"Fetching dev-alerts channel as it wasn't found in the cache (ID: {Channels.devalerts})\"\n )\n try:\n dev_alerts = await self.bot.fetch_channel(Channels.devalerts)\n except discord.HTTPException as discord_exc:\n logger.exception(\"Fetch failed\", exc_info=discord_exc)\n return\n\n # Trigger the logger before trying to use Discord in case that's the issue\n logger.error(\n f\"Error executing command invoked by {ctx.message.author}: {ctx.message.content}\",\n exc_info=error,\n )\n await dev_alerts.send(embed=push_alert)", "async def handle_unexpected_error(ctx: Context, e: errors.CommandError) -> None:\n await ctx.send(\n f\"Sorry, an unexpected error occurred. Please let us know!\\n\\n\"\n f\"```{e.__class__.__name__}: {e}```\"\n )\n\n ctx.bot.stats.incr(\"errors.unexpected\")\n\n with push_scope() as scope:\n scope.user = {\n \"id\": ctx.author.id,\n \"username\": str(ctx.author)\n }\n\n scope.set_tag(\"command\", ctx.command.qualified_name)\n scope.set_tag(\"message_id\", ctx.message.id)\n scope.set_tag(\"channel_id\", ctx.channel.id)\n\n scope.set_extra(\"full_message\", ctx.message.content)\n\n if ctx.guild is not None:\n scope.set_extra(\n \"jump_to\",\n f\"https://discordapp.com/channels/{ctx.guild.id}/{ctx.channel.id}/{ctx.message.id}\"\n )\n\n log.error(f\"Error executing command invoked by {ctx.message.author}: {ctx.message.content}\", exc_info=e)", "def _handle_error(self, errno, msg):\n if self.error_callback != None:\n #Call the error callback but expect failure.\n try:\n self.error_callback(errno, msg, self.rpcclient)\n except Exception as ex:\n self.log.failure(\"Error in error handler for '{cmd!r}'.\",cmd=self.command)\n else:\n #If no handler is set, all we do is log.\n self.log.error(\"Notice: no on_error defined for '{cmd!r}, command result: {msg!r}\",cmd=self.command,msg=msg)", "def _err_emited(self, *args):\n\t\tdebug(\"OnEventDeferred : err event catched\")\n\t\tself.errback(*args)\n\t\tself._clean()", "def shell_fail_server(self, cmd):\n self.shell_cmd = cmd\n raise ConnectionResetError", "def repo_error(self, repo_id, error):\n self.send(repo_id, 'repo_error', error)", "def error(self, *args, **kwargs):\n if len(args) == 3:\n print(f\"ERROR: {args[1]}\")\n else:\n print(f\"ERROR: {args[0]}\")", "def error(self):\n ...", "def error(self, message):\n ErrorExit('error: {}\\n'.format(message), 2)", "def error(self, msg):\n vim.command('call pymode#error(\"%s\")' % str(msg))", "def failure_callback(self):\n error_filename = self.run_dir / \"eplusout.err\"\n if error_filename.exists():\n with open(error_filename, \"r\") as stderr:\n stderr_r = stderr.read()\n self.exception = EnergyPlusProcessError(\n cmd=self.cmd, stderr=stderr_r, idf=self.idf\n )\n self.cleanup_callback()", "def onError(self, stanza):\n errorNode = stanza.get_error()\n if self.verbose:\n print( \"error type = %s\"%errorNode.get_type() )\n print( \"error message = %s\"%errorNode.get_message() )\n self.disconnect()\n raise RuntimeError", "def error(self, handler):\n pass", "def hook_notifyerror(self,msg,subsystem=None):\n ui.notifyerror(msg,subsystem)", "async def ticker_error(ctx, error):\n print(error)\n if isinstance(error, commands.UserInputError):\n await ctx.send(\"Invalid input.\")\n else:\n await ctx.send(\"Oops, something bad happened..\")", "def errReceived(self, data):\n log.msg('err: %s' % data)", "def err(self, text):\n if not self.is_quiet_err:\n self.__emit(\"ERROR: \" + text, sys.stderr)", "def on_processing_error(self, event, context, exc):\n pass", "def fail(msg):\n\n # Not sure if simply raising the exception is clearer.\n raise CommandFailed(msg)", "def handle_err(self):\n pass", "def precmd_hook_exception(self, data: plugin.PrecommandData) -> plugin.PrecommandData:\n self.called_precmd += 1\n raise ValueError", "def error(self):\n pass", "def error(self, message, **args):\n\t\terror_message = Utils.boldCode() + \"Error: \" + Utils.normalCode() + message\n\t\t\n\t\tif args.has_key(\"target\"):\n\t\t\tself.sendMessage(args[\"target\"], error_message)\n\t\t\t\n\t\tif args.has_key(\"console\"):\n\t\t\tif args[\"console\"]:\n\t\t\t\tprint self.errorTime(), \"<ERROR>\", Utils.stripCodes(message)\n\t\telse:\n\t\t\tprint self.errorTime(), \"<ERROR>\", Utils.stripCodes(message)", "def errFunc(runType):\n logger.error('Execution type not recognized! {}'.format(runType))\n raise InvalidExecutionType('{} is not a valid command'.format(runType))", "def error(self, *args):\n\n if self.is_on(_Log.ERROR):\n self._write(self._err, *args)", "async def eval_error(self, ctx, error):\n if isinstance(error, commands.CheckFailure):\n embed = discord.Embed(title=\"You do not have permission to use eval\", colour=RED)\n else:\n cmd = ctx.message.content.split(\" \", maxsplit=1)[1].strip(\"` \")\n trace = format_traceback(traceback.format_exception(type(error), error, error.__traceback__))\n embed = discord.Embed(title=\"Evaluation\", description=f\"**Error**\\n```python\\n{trace}```\", colour=RED)\n embed.add_field(name=\"Input\", value=f\"```python\\n{cmd}\\n```\", inline=False)\n embed.set_author(name=ctx.author.display_name, icon_url=ctx.author.avatar_url)\n await ctx.message.delete()\n await ctx.send(embed=embed)", "def _unhandled(self, context, message, reason):\r\n # TODO: call host's method instead\r\n self._host.unhandled.append((context.str, message.serialize(), reason))\r\n self._host.expected[context.str] = None\r\n eprint(\"{}: Command {} can't be handled due to {}\".format(self._host.name, message.serialize(), reason))", "def handle_build_error(error):\n sys.stderr.write('Error running command `%s`. Returned %s.\\n' % (\n ' '.join(error.argv), str(error.error_code)))", "def error(self):\n return self._decorator_wrapper(EventName.error)", "def error(self, msg):\n\n self(msg, ERROR)", "def command_failed_error(cmd):\n\n output_1 = colored(' - Error: Failed to run command ', 'red')\n output_2 = command(cmd)\n return output_1 + output_2 + '\\n'", "def on_error(self, callback):\n self.error_callback = callback", "def shell_fail_python(self, cmd):\n self.shell_cmd = cmd\n raise AttributeError", "def error(ctx, flow):\n ctx.log(\"error\")", "def test_unknown_command(self):\n\n self.assertRaises(commands.CommandNotFoundError,\n self.commands.run_command,\n '<unknown_command>', '')", "def standard_error_handler(error_function):\n\n async def wrapper(cls, ctx, error):\n\n extra = f\"\\n\\nSee the help message for more information.\"\n\n # This prevents any commands with local handlers being handled here\n if hasattr(ctx.command, \"on_error\"):\n return\n\n # Allows us to check for original exceptions raised and sent to CommandInvokeError.\n # If nothing is found. We keep the exception passed to on_command_error.\n error = getattr(error, \"original\", error)\n\n ignored = (commands.CommandNotFound,)\n\n # Anything in ignored will return and prevent anything happening.\n if any([isinstance(error, i) for i in ignored]):\n return\n\n if isinstance(error, DisabledCommand):\n await pretty_print(\n ctx, \"This command is disabled!\", title=\"Error\", color=ERROR_COLOR\n )\n\n elif isinstance(error, MemberNotFound):\n await pretty_print(\n ctx,\n str(error) + \"\\nNote: this command is case-sensitive.\" + extra,\n title=\"Error\",\n color=ERROR_COLOR,\n )\n return\n\n elif isinstance(error, RoleNotFound):\n await pretty_print(\n ctx,\n str(error) + \"\\nNote: this command is case-sensitive.\" + extra,\n title=\"Error\",\n color=ERROR_COLOR,\n )\n return\n\n elif isinstance(error, NoPrivateMessage):\n await pretty_print(\n ctx,\n \"This command cannot be run in a private message.\" + extra,\n title=\"Error\",\n color=ERROR_COLOR,\n )\n return\n\n elif isinstance(error, PrivateMessageOnly):\n try:\n await ctx.message.delete()\n extra += \"\\nYour message has been deleted\"\n except:\n print(\"Could not delete message\")\n await pretty_print(\n ctx,\n \"This command should be run in a Private Message only!\" + extra,\n title=\"Error\",\n color=ERROR_COLOR,\n )\n return\n\n elif isinstance(error, MissingRole):\n await pretty_print(\n ctx, str(error) + extra, title=\"Error\", color=ERROR_COLOR\n )\n return\n\n elif isinstance(error, IllegalRole):\n await pretty_print(\n ctx, error.message + extra, title=\"Error\", color=ERROR_COLOR\n )\n return\n\n elif isinstance(error, CheckFailure):\n await pretty_print(\n ctx,\n \"Could not run command, do you have sufficient permissions in this channel?\"\n + extra,\n title=\"Error\",\n color=ERROR_COLOR,\n )\n return\n\n elif isinstance(error, BadArgument):\n await ctx.send_help(ctx.command)\n await pretty_print(\n ctx,\n \"Could not run command, is it formatted properly?\" + extra,\n title=\"Error\",\n color=ERROR_COLOR,\n )\n return\n\n elif isinstance(error, MissingRequiredArgument):\n await ctx.send_help(ctx.command)\n await pretty_print(\n ctx, \"Missing required arguments\", title=\"Error\", color=ERROR_COLOR\n )\n return\n\n elif isinstance(error, BadUnionArgument):\n await ctx.send_help(ctx.command)\n await pretty_print(\n ctx,\n \"Invalid argument\",\n title=\"Error\",\n color=ERROR_COLOR,\n )\n return\n\n elif isinstance(error, WalletNotVerified):\n await pretty_print(\n ctx, error.message + extra, title=\"Error\", color=ERROR_COLOR\n )\n return\n\n elif isinstance(error, InvalidCoin):\n await pretty_print(\n ctx, error.message + extra, title=\"Error\", color=ERROR_COLOR\n )\n return\n\n elif isinstance(error, RequestError):\n await pretty_print(\n ctx, error.message + extra, title=\"Error\", color=ERROR_COLOR\n )\n return\n elif isinstance(error, FatalError):\n await pretty_print(\n ctx, error.message + extra, title=\"Error\", color=ERROR_COLOR\n )\n return\n await error_function(cls, ctx, error)\n\n return wrapper", "def call_error():\r\n print(\"Error in input format.\")\r\n sys.exit()", "def _error_check(self, command_response):\n error_list = command_response.find(\"./clierror\")\n command_obj = command_response.find(\"./input\")\n if error_list is not None:\n command = command_obj.text if command_obj is not None else \"Unknown command\"\n msg = etree.tostring(error_list).decode()\n raise NXAPICommandError(command, msg)", "def get_unknown_argument_error(self, arg):\n return \"%s\\n%s\" % (textwrap.fill(\"Unknown command: '%s'.\" % (arg)),\n self.get_usage_command())", "def on_error(data):\n print('Market Data Error', data)", "def whenException(self, channel, call):", "def test_cmd_error(self):\n task = Task(\"uid\", False, False, \"does_not_exist\", None, \".\")\n task._checkpoint_dir = tmp_checkpoint_dir()\n with self.assertRaisesRegexp(RuntimeError, \".*executing Task's command:.*\"):\n task.run()\n task.shell = True\n with self.assertRaisesRegexp(RuntimeError, \".*executing Task's command:.*\"):\n task.run()\n task._dry_run = True\n task.run() # No longer raises RuntimeError", "def process_dead_command(self):\n command_terminal = adapter_serializers.CommandTerminal(data=self.command)\n if not command_terminal.is_valid():\n logger.error('Receive an invaid data : {}'.format(command_terminal.format_errors()))\n raise natrix_exceptions.TriggerBugException(\n message=u'command is invalid: {}'.format(command_terminal.format_errors())\n )\n\n if not command_terminal.process():\n # TODO:\n logger.error('failed')\n else:\n logger.info('success')", "def _error(self, *args, **kwargs):\n print(\"[{}]\".format(self.type), *args, file=sys.stderr, **kwargs)\n sys.exit(1)", "def command_error(self, message):\n if message == 'error-cannot-find-dest':\n reply = QMessageBox.warning(\n self, 'Feed Checker',\n self.translate('MainWindow', '''An error occurred.'''),\n QMessageBox.Ok)\n if reply == QMessageBox.Ok:\n self.text_info.setText('ready...')", "def error(self, message):\n sys.stderr.write('error: %s\\n' % message)\n self.print_help()\n sys.exit(2)", "def execute(arg):\n print('Invalid command!!!')\n return", "def error(self, message):\n self.exit(2, f\"Input error: {message}\\n\")", "def indicate_error(self):\n pass", "def cmd_error_private(self, argument):\n if self.is_admin:\n if argument.isdigit():\n n = min(int(argument), 50)\n else:\n n = 5\n with open(\"log/errors.log\") as f:\n lines = f.readlines()\n err = \"\".join(lines[-n:]).rstrip()\n if err:\n self.send(self.target, \"%s\", err)\n else:\n self.logger.warning(\"User %s tried to use '%s' without being admin\" % (self.nick, \"error\"))", "def onError(self, error):\n log.err(\"Encountered an error: {0}\".format(\n error.getErrorMessage()))\n return error", "def on_error(self, exception):\n traceback.print_exc()", "def error_callback(bot, update, error):\n if isinstance(error, TelegramError):\n raise error # raise it for more sentry verbosity", "def __empty_error_event_handler(self, govee, device, message, exception):\n\n pass", "def invalid_command(response): # string -> interaction\r\n print(\"Sorry; '\" + response + \"' isn't a valid command. Please try again.\")", "def error(self, *args, **kwargs):\n self.msg(logging.ERROR, *args, **kwargs)", "def error(self, *args):\n self.mylog.error(*args)", "def error(self, *args, **kwargs):\n\n message = self.get_message(*args, **kwargs)\n self.logger.error(message)", "def on_job_error(\n self,\n scheduler: plugin_jobs.Scheduler,\n job: tools_jobs.Job,\n exc: BaseException,\n ):\n self.error(exception=exc)" ]
[ "0.7980478", "0.7709615", "0.76937354", "0.7669524", "0.7622439", "0.7463245", "0.7443354", "0.7433384", "0.7432164", "0.7427173", "0.74214035", "0.74195987", "0.72977245", "0.72538507", "0.7153273", "0.714725", "0.6872951", "0.681055", "0.68062097", "0.6620473", "0.66178834", "0.65958995", "0.6538409", "0.65300375", "0.65274066", "0.6514418", "0.6512693", "0.64909697", "0.6490168", "0.6481927", "0.6443379", "0.6414948", "0.6412767", "0.63499326", "0.633019", "0.63300395", "0.6329566", "0.631908", "0.63114405", "0.63006836", "0.6261883", "0.6261301", "0.6259427", "0.6257654", "0.62535655", "0.62534106", "0.6206308", "0.62006664", "0.6190097", "0.6157569", "0.6146077", "0.6142537", "0.61114705", "0.6106709", "0.6096281", "0.60628927", "0.60528034", "0.6048866", "0.6044363", "0.6036689", "0.60247666", "0.60156476", "0.6014129", "0.60070926", "0.5984747", "0.5967603", "0.5959421", "0.59440476", "0.5943071", "0.5942597", "0.59379023", "0.59301496", "0.59262645", "0.5919522", "0.59121263", "0.59102833", "0.5901898", "0.58945394", "0.58860016", "0.5867803", "0.5866207", "0.5864844", "0.5863488", "0.5855576", "0.5849066", "0.58480555", "0.5838361", "0.58350897", "0.5828545", "0.582679", "0.58247286", "0.58116704", "0.57997227", "0.5797798", "0.5797014", "0.5794179", "0.5786506", "0.5780798", "0.5773253", "0.57513726" ]
0.72716826
13
will simulation PARALLEL_UNIVERSES_COUNT universes then, will return the overall multiverse survival of the player
def compute_player_score(): progress_bar = ProgressBar(label="Computing universes") survivals_count = 0 for i in range(PARALLEL_UNIVERSES_COUNT): if simulate_universe(): survivals_count += 1 progress_bar.set_progression((i + 1) / PARALLEL_UNIVERSES_COUNT) progress_bar.end("\n\n") return survivals_count / PARALLEL_UNIVERSES_COUNT
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_god_score():\n\n survivals_count = 0\n for _ in range(PARALLEL_UNIVERSES_COUNT):\n best_survival = random.uniform(MIN_DISEASE_SURVIVAL, MAX_DISEASE_SURVIVAL)\n for _ in range(random.randint(MIN_TREATMENTS_COUNT, MAX_TREATMENTS_COUNT)):\n treated_survival = random.uniform(MIN_TREATED_SURVIVAL, MAX_TREATED_SURVIVAL)\n if treated_survival > best_survival:\n best_survival = treated_survival\n if random.uniform(0, 1) <= best_survival:\n survivals_count += 1\n\n return survivals_count / PARALLEL_UNIVERSES_COUNT", "def simulate_universe():\n\n # untreated_survival is the probability to survive if not treated\n # this is an exact law of the universe, the player will not have this information\n untreated_survival = random.uniform(MIN_DISEASE_SURVIVAL, MAX_DISEASE_SURVIVAL)\n\n trials: list[Trial] = []\n\n treated_survivals: dict[Trial, float] = {}\n\n for _ in range(random.randint(MIN_TREATMENTS_COUNT, MAX_TREATMENTS_COUNT)):\n group_size = random.randint(MIN_GROUP_SIZE, MAX_GROUP_SIZE)\n\n # treated_survival is the probability to survive if treated\n # this is an exact law of the universe, the player will not have this information\n # therefore it is stored in a separate dict and not in the given-to-player Trial object\n treated_survival = random.uniform(MIN_TREATED_SURVIVAL, MAX_TREATED_SURVIVAL)\n\n trial = Trial(group_size, untreated_survival, treated_survival)\n\n trials.append(trial)\n treated_survivals[trial] = treated_survival\n\n chosen_trial = playground.choose_trial(trials)\n\n if chosen_trial is None: # None means no treatment\n chosen_survival = untreated_survival\n else:\n chosen_survival = treated_survivals[chosen_trial]\n\n return random.uniform(0, 1) <= chosen_survival", "def calculate_survivors(self, planet=None):\n mage_life = 0\n if self.magos:\n\n # Calculate survivors\n mage_life = sum(self.race.rango_vid_mago) // 2\n survivors = min(self.vida / mage_life, len(self.magos))\n if planet:\n planet.magos = survivors\n\n # Kill off the dead and improve the survivors\n shuffle(self.magos)\n [self.magos.pop() for i in range(len(self.magos) - survivors)]\n for m in self.magos:\n m = mage(m.ataque + 5, m.vida + 10)\n\n if self.soldados:\n\n # Calculate survivors\n soldier_life = sum(self.race.rango_vid_soldado) // 2\n survivors = self.vida - len(self.magos)*mage_life\n survivors //= soldier_life\n if planet:\n planet.soldados = survivors\n\n # Kill off the dead and improve the survivors\n shuffle(self.soldados)\n [self.soldados.pop()\n for i in range(len(self.soldados) - survivors)]\n for s in self.soldados:\n s = soldier(s.ataque + 5, s.vida + 10)", "def tournament_selection(self, population: List[IndividualType]) -> List[IndividualType]:\n survivors: List[IndividualType] = []\n for _ in range(self.configuration.n_survivors):\n # Choose participants\n rooster: List[IndividualType] = random.sample(population, self.configuration.rooster_size)\n # Select fittest of participants as survivor\n fittest_individual_of_rooster = self.get_best_individual(rooster)\n population.remove(fittest_individual_of_rooster)\n survivors.append(fittest_individual_of_rooster)\n return survivors", "def college_selectivity():", "def child_U(self):\n return math.sqrt(self.number_visits) * self.child_priors / (\n 1 + self.child_number_visits)", "def playersVehicleScoring(self):\n self.__playersDriverNum()\n return self.Rf2Scor.mVehicles[self.__playersDriverNum()]", "def __population_quality(self) -> float:\n population_identifier = np.zeros(shape=self.Dataset.size)\n subgroup_identifier = np.ones(shape=len(self.get_cover()))\n group = np.concatenate((population_identifier,\n subgroup_identifier))\n\n subgroup_times = self.Dataset.survival[self.get_cover()]\n subgroup_status = self.Dataset.status[self.get_cover()]\n\n time = np.concatenate((self.Dataset.survival, subgroup_times))\n status = np.concatenate((self.Dataset.status, subgroup_status))\n\n _, pvalue = sm.duration.survdiff(time, status, group)\n return 1 - pvalue", "def survived(self,rates):\n # Juvenile or adult survival\n if self.age > 3:\n prob = rates[1]\n else:\n prob = rates[0]\n if np.random.binomial(1,prob):\n return True\n else:\n return False", "def simulation(self, n = 42):\n\n self.initialisation()\n i = 0\n while i < n and self.agent.vivant :\n self.step()\n i+= 1\n return self.perfGlobale", "def vote_of_citizens():\n\tglobal vote_first_candidate\n\tglobal vote_second_candidate\n\tglobal blank_vote\n\t\n\tfor i in range(NUMBER_OF_CITIZENS):\n\t\tvote = random.randint(1,10)\n\n\t\tif(vote <= 3):\n\t\t\tvote_first_candidate+=1\n\t\telif(vote > 3 and vote <= 6):\n\t\t\tvote_second_candidate+=1\n\t\telse:\n\t\t\tblank_vote+=1", "def p(party, vote_count, s):\n return t(party, vote_count) / d(s)", "def vi_get_county_with_max_voting(self):\r\n\r\n # fetch voting information from SCB database\r\n self.vi_fetch_voting_info_from_scb()\r\n for voting_year, voting_data in self.voting_information.items():\r\n max_percent = 0.0\r\n county_with_max_percent = ''\r\n for county, percentage in voting_data.items():\r\n if percentage > max_percent:\r\n max_percent = percentage\r\n county_with_max_percent = county\r\n self.vi_log_msg(voting_year, county_with_max_percent, max_percent)", "def vis_survival_stats(data, outcomes, feature):\n pass", "def test_repetition_vector():\n cosimulations = [\n example.control.gauss_seidel(1., 5., 1.),\n example.control.gauss_seidel(1., 5., 1., True),\n example.control.gauss_seidel(1., 5., 1., True, True),\n example.control.gauss_seidel(1., 5., 1., False, True),\n example.control.gauss_jacobi(1., 5., 1.),\n example.control.multi_rate(1., 5., 1.),\n ]\n for cosimulation in cosimulations:\n sdfg = cs.convert_to_sdf(cosimulation)\n schedule = sdf.calculate_schedule(sdfg)\n network, hs, _, _ = cosimulation\n _, connections = network\n repetitions = cs.repetition_vector(connections, hs)\n for agent in sdfg[0]:\n assert sum(agent == executed for executed in schedule) == repetitions[agent]", "def survivors_selection(self):\n q = 5\n new_population = []\n for i in range(self._population_size):\n batch = []\n for j in range(q):\n r = random.randint(0, (self._child2population_ratio + 1) * self._population_size - 1)\n if r < self._population_size:\n batch.append(self._population[r])\n else:\n batch.append(self._children[r - self._population_size])\n new_population.append(self.select_best(batch))\n\n self._population = new_population", "def getRandomMinorCiv(self):\n\t\t\n\t\treturn con.iIndependent + gc.getGame().getSorenRandNum(iNumMinorPlayers, 'Random minor civilization')", "def mi_pressure_vessel(self, u):\n assert len(u) == 4, 'MI Pressure vessel design needs to specify 4 parameters.'\n R = u[0]\n L = u[1]\n ts = u[2]\n th = u[3]\n fitness = 0.6224 * R * ts * L + 1.7781 * R ** 2 * th + 3.1611 * ts ** 2 * L + 19.8621 * R * ts ** 2\n return fitness", "def survival_score(timeSurvived, duration, winPlace):\n\n\tsurvival = (timeSurvived / duration) * 100\n\tif winPlace == 1:\n\t\twin_place = 100\n\telse:\n\t\twin_place = 100 - winPlace\n\tsurvival_score = int(survival * 0.8 + win_place * 0.2)\n\tif survival_score < 50:\n\t\tsurvival_score = 50\n\n\treturn survival_score", "def suicide_query(game_mode=0, observability=-1, game_seed=-1, agent=-1):\n\n event_id = \"death\"\n\n # Keep only those games within given configuration\n if game_seed != -1:\n selection = data.loc[(data['game_mode'] == game_mode) & (data['observability'] == observability) &\n (data['game_seed'] == game_seed)]\n else:\n selection = data.loc[(data['game_mode'] == game_mode) & (data['observability'] == observability)]\n if agent != -1:\n for index, row in selection.iterrows():\n if agent not in row[\"agents\"]:\n selection.drop(index, inplace=True)\n\n # print(selection.size)\n\n team_kill_count = []\n ngames = 0 # Number of games in which this agent dies\n suicides = 0 # Number of games in which this agent commits suicide\n events_per_sample = []\n team_kills = 0\n\n # Iterate through selected game data\n for index, row in selection.iterrows():\n if agent in row[\"agents\"] and row['event_id'] == event_id: # This agent played in the game\n\n # Find its agent ID depending on its position in the agent list. There may be more than 1 agent of this\n # type in the game, so iterate over all and check individually.\n ll = row[\"agents\"]\n indices = [i for i, el in enumerate(ll) if el == agent]\n\n for agent_id in indices:\n # teammate = (agent_id + 2) % 4\n sample_event_counter = 0\n for event in row[\"event_data\"]:\n if event[\"agent_id\"] == agent_id: # This agent dies\n if event[\"killer\"] == agent_id: # Suicide\n sample_event_counter += 1\n # if event[\"killer\"] == teammate: # Killed by teammate\n # team_kills += 1\n # if event[\"agent_id\"] == teammate: # Teammate dies\n # if event[\"killer\"] == agent_id: # Killed by this agent\n # team_kill_count += 1\n ngames += 1\n events_per_sample.append(sample_event_counter)\n suicides += sample_event_counter\n\n # suicide_count.append(100*suicides/ngames) # Showing percentage of game suicides\n # team_kill_count.append(100*team_kills/games)\n\n # percentage = 100 * suicides / ngames\n # mean = ngames * (percentage / 100)\n # variance = mean * (1 - (percentage / 100))\n # std_dev = math.sqrt(variance)\n # std_err = std_dev / math.sqrt(ngames)\n # h = std_err * scipy.stats.t.ppf(1.95 / 2., ngames - 1) # 95 confidence interval\n # return percentage, h\n\n # print(events_per_sample)\n mean = suicides/ngames\n variance = sum([pow(x - mean, 2) for x in events_per_sample])/len(events_per_sample)\n std_dev = math.sqrt(variance)\n std_err = std_dev/math.sqrt(len(events_per_sample))\n h = std_err * scipy.stats.t.ppf(1.95 / 2., ngames - 1) # 95% confidence interval\n return mean * 100, h * 100 # , team_kill_count", "def test_josephus_survivor(self):\n\n allure.dynamic.title(\"Testing josephus_survivor function\")\n allure.dynamic.severity(allure.severity_level.NORMAL)\n allure.dynamic.description_html('<h3>Codewars badge:</h3>'\n '<img src=\"https://www.codewars.com/users/myFirstCode'\n '/badges/large\">'\n '<h3>Test Description:</h3>'\n \"<p>In this kata you have to verify that the function \"\n \"correctly returns who is the \\\"survivor\\\", ie: the \"\n \"last element of a Josephus permutation.</p>\")\n\n test_data = [\n ((7, 3), 4),\n ((11, 19), 10),\n ((1, 300), 1),\n ((14, 2), 13),\n ((100, 1), 100)\n ]\n\n for test_data, expected in test_data:\n n = test_data[0]\n k = test_data[1]\n result = josephus_survivor(n, k)\n\n with allure.step(\"Enter test data (n: {}, k: {}) and verify \"\n \"the output ({}) vs expected ({})\".format(n,\n k,\n result,\n expected)):\n print_log(n=n,\n k=k,\n result=result,\n expected=expected)\n\n self.assertEqual(expected,\n result)", "def survivalIntegral(self):\n if self.isReplicateGroup():\n # here we average over the underlying replicates\n si=numpy.zeros([len(self.activeChildWellIndices())])\n\n allstatuses=StatusMessage()\n statuses=StatusMessage()\n i=0\n for clstc in self.activeChildWells():\n si[i], sivar, status = clstc.survivalIntegral()\n if status is not None:\n allstatuses.addStatus(status)\n if status is not None and days is not None:\n statuses.addStatus(status)\n i+=1\n\n idcs=numpy.isnan(si)\n if numpy.all(idcs):\n allstatuses.addStatus(self.initDiffStatus)\n return None, None, allstatuses\n\n simean, sivar = maskedArrayToMeanVar(si, ddof=1)\n\n statuses.addStatus(self.initDiffStatus)\n return simean, sivar, statuses\n\n days, viability, viabilityvar, initDiffStatus=self.viability()\n si=None\n if viability is not None and days is not None:\n si=numpy.trapz(viability,x=days)\n return si,None,None", "def count_vario(dist_param, picker_param):\n orig = '/home/zby/MAGISTERKA/MGR/results/oryginal.clustered.t'\n cl_orig = read_clustered(orig)\n name_tag = ''\n ndist = dist_param[1:]\n npick = picker_param[1:]\n for index in drange(5, 20, 0.5):\n name_tag = \"{}_{}_{}\".format(index, npick, ndist)\n try:\n clust2 = read_clustered(tfidf_name('merged.stem{}.stop.clustered.t', name_tag))\n except:\n print(\"no data for {}\".format(name_tag))\n continue\n var, norm = variation_of_information(cl_orig, clust2)\n print(\" {} VOI is {}\".format(name_tag, norm))", "def get_team_results(usrs, sched):\t\n\t\n\ttotal_consistency = 0\n\ttotal_completion = 0\n\tfor user in usrs:\n\t\tresult = get_consistency(user, sched)\n\t\t\n\t\ttotal_consistency += result[\"consistency\"]\n\t\ttotal_completion += result[\"completion\"]\n\t\n\tteam_consistency = 0\n\tteam_completion = 0\n\t\t\n\tif(len(usrs) != 0):\n\t\tteam_consistency = total_consistency / float(len(usrs))\n\t\tteam_completion = total_completion / float(len(usrs))\n\t\t\n\treturn { \"consistency\" : team_consistency, \"completion\" : team_completion }", "def CRIT_U(p,tv): \n rho=RHO(p,tv)\n # Note: 144 = 2x 72 N; 0.3 = 0.6 drag coef * surface area 0.5 m**2\n uc=np.power(np.divide(144,np.multiply(rho,0.3)),0.5)\n \n return uc", "def user_interaction_score(uv, recommended_News, ranked=True):\n\n iv = recommended_News[\"topical_vector\"]\n\n product = simple_doct_product(uv, iv)\n\n epsilon = 10e-5\n\n if (product + epsilon) > 1.0:\n vui = 0.99\n else:\n vui = beta_distribution(product)\n\n # Awared preference\n ita = beta_distribution(0.98)\n pui = vui * ita\n\n return pui", "def compute_objective(Instance: dict):\r\n\r\n print(\"Computing objectives values...\")\r\n # Retrieve usefull infos\r\n T_max = Instance[T_STR]\r\n scenario_numbers = Instance[SCENARIO_NUMBER]\r\n Interventions = Instance[INTERVENTIONS_STR]\r\n quantile = Instance[QUANTILE_STR]\r\n # Retrieve risk final distribution\r\n risk = compute_risk_distribution(Interventions, T_max, scenario_numbers)\r\n # Compute mean risk\r\n mean_risk = compute_mean_risk(risk, T_max, scenario_numbers)\r\n # Compute quantile\r\n q = compute_quantile(risk, T_max, scenario_numbers, quantile)\r\n print(\"Done\")\r\n\r\n return mean_risk, q", "def _extract_life_sci_relevance(self, bib):\n life_sci_relevant = 0\n for system_key in ('ipc', 'ecla', 'ipcr', 'cpc'):\n try:\n for classif in bib[system_key]:\n if life_sci_relevant == 0 and self.relevant_regex.match(classif):\n life_sci_relevant = 1\n except KeyError:\n # Skip the warning - classifications are processed again below\n pass\n\n return life_sci_relevant", "def tournament(population, context, n, num_competitors=2):\n result = []\n for i in range(n):\n competitors = np.random.choice(population, num_competitors)\n result.append(max(competitors))\n return result, context", "def langmuir_occ(p, k):\n\n intermediate = k * p\n\n occupancy = intermediate / (intermediate + 1)\n\n return occupancy", "def sum_by_university(self,df):\n with pd.option_context('display.max_rows', None, 'display.max_columns', None): # more options can be specified also\n df_univ = df.groupby(['Coll/Univ']).sum()\n \n df_univ = df_univ.sort_values('PB')\n df_top_univ = df_univ[-30:]\n \n #Visual bargraph for top 30 Colleges and number of pro-bowl appearances they produce\n df_univ_PB = df_top_univ['PB']\n univ_plot = df_univ_PB.plot(kind=\"barh\", fontsize=4)\n univ_plot.set_xlabel(\"Pro bowl appearances\")\n univ_plot.set_title(\"PRO BOWL APPEARANCES, BY COLLEGE/UNIVERSITY, 2010-2020\")\n plt.show()\n \n return", "def computeAvaliableTutors(self):\r\n subject = self.requestedSubject\r\n for tutor in AppUser.objects.all():\r\n if subject in tutor.subjectsOffered.all():\r\n self.avaliableTutors.add(tutor)", "def get_victors(self):\n if self.is_game_over():\n scores = [p.get_score() for p in self.state.get_players()]\n if len(scores) == 0:\n return []\n max_score = max(scores)\n victors = []\n for p in self.state.get_players():\n if p.get_color() not in self.violators and p.get_score() == max_score:\n victors.append(self.players[p.get_color()])\n return victors\n else:\n return None", "def get_precinct_votes():\n c.execute(\n \"SELECT Contest_Name, County, Precinct, Sum(Total_Votes) as Votes from v group by County, Precinct, Contest_Name order by Votes ASC\")\n return c.fetchall()", "def get_verse_count(surah):\n return len(surah)", "def mutual_info_score(self):\n _, _, I_CK = self._entropies()\n return I_CK / self.grand_total", "def find_survivor(num_people, kill_every):\n\n # 1 represents alive, 0 represents dead\n people = [1 for person in xrange(num_people)]\n\n # track kills so we know when only one is left\n dead_count = 0\n # for a 0-indexed list, start at position -1\n position = -1\n # continue kills until only one left\n while dead_count < num_people - 1:\n # increment position 'kill_every' times, skipping already dead people\n for _ in xrange(kill_every):\n position += 1\n while people[position % num_people] == 0:\n position += 1\n # make the kill\n people[position % num_people] = 0\n dead_count += 1\n\n # add one because of 0-indexed list\n return people.index(1) + 1", "def sngl_obj_evo(self, lacking):\n prob, algo = self.probinit('jde', 0)\n l = list()\n u = 6+(self.N-3)*4\n for i in range(lacking):\n archi = archipelago(algo,prob,8,16, topology=fully_connected())\n for j in range(u):\n archi.evolve(5)\n stdout.write(\"\\r{0} / {1}\".format(i*u+j+1, lacking*u))\n stdout.flush()\n tmp = [isl for isl in archi]\n tmp.sort(key = lambda x: x.population.champion.f[0]);\n l.append(tmp[0].population.champion)\n stdout.write(\" Done. \")\n return l, prob", "def get_nucliators_num_and_proba(self):\n XY = self.XY\n TIMES = self.die_times\n # CHEN'S IMPLEMENTATION\n # nucliators = np.array([True for i in range(len(TIMES))])\n # leaders = np.array([-1 for i in range(len(TIMES))])\n # cells_idx_sorted_by_times = np.arange(0, len(TIMES), 1)\n # for cell_idx in cells_idx_sorted_by_times:\n # # nucliators[cell_idx] = True\n # cell_death = TIMES[cell_idx]\n # neighbors_prior_death = [True for i in range(len(self.neighbors_list[cell_idx]))]\n # for neighbor_idx in self.neighbors_list[cell_idx]:\n # # if nucliators[cell_idx] == True:\n # # break\n # neighbor_death = TIMES[neighbor_idx]\n # if cell_death > neighbor_death:# and leaders[cell_idx] == -1:\n # nucliators[cell_idx] = False\n # # leaders[cell_idx] = cell_idx\n # elif cell_death == neighbor_death and not nucliators[neighbor_idx]:\n # nucliators[cell_idx] = False\n # leaders[cell_idx] = cell_idx\n # else:\n # nucliators[cell_idx] = True\n # # if leaders[neighbor_idx] != -1:\n # # leaders[cell_idx] = leaders[neighbor_idx]\n #\n # self.nucliators = nucliators\n # self.nucliators_num = nucliators.sum()\n # self.nucliation_proba = self.nucliators_num / len(XY)\n\n # MY IMPLEMENTATION\n self.nucliators = self.nucliators_counter.calc_nucliators()\n self.nucliators_num = self.nucliators.sum()\n self.nucliation_proba = self.nucliators_num / len(self.XY)", "def get_collisions(self) -> int:\n return 0 # no obstacles are spawned for Circle tasks", "def compute_multiuser_local_sensitivity(sc, dataset, num_iters_ls,\n num_users_ls):\n res = []\n users_already_processed = set()\n all_users = list(get_user_list(dataset))\n for x in xrange(num_users_ls):\n while True:\n cur_user = random.choice(all_users)\n print \"Trying user\", cur_user\n if cur_user in users_already_processed:\n print \"Oops, we've already processed this one\"\n continue\n if max_movies_per_user == 0:\n break\n print \"Looking at their ratings\"\n u_ratings = get_ratings_from_uid(dataset, cur_user)\n u_ratings_list = u_ratings.collect()\n l = len(u_ratings_list)\n if l > max_movies_per_user:\n print \"This user has too many movies: \",\\\n l, \">\", max_movies_per_user\n users_already_processed.add(cur_user)\n continue\n else:\n print \"This user with\", l, \"movies \" +\\\n \"rated is fine!\"\n break\n print \"Probing user\", cur_user\n report = compute_user_local_sensitivity(sc, dataset, cur_user,\n num_iters_ls)\n users_already_processed.add(cur_user)\n res.append(report)\n return res", "def __init__(self):\n self.persons = []\n self.partnerships = []\n self.straight_males = set()\n self.females = set()\n self.high_sexual_activity = set()\n self.singles = set()\n self.infected = set()\n self.resistant = set()\n self.age_group = [set() for x in range(5)]\n self.age_group_mixing_prob = age_group_mixing()\n self.age_groups = [(i,j) for i in range(5) for j in range(5)]\n self.time = 0\n self.number_of_symptomatic = 0\n self.number_of_asymptomatic = 0\n self.number_of_res_symp = 0\n self.number_of_res_asymp = 0\n self.number_of_steady = 0\n self.r0_infected = []", "def _get_willingness_vector(self, user_id):\n will_vec = np.zeros(len(self.genres))\n for i, g in enumerate(self.genres):\n will_vec[i] = self.genre_willigness.get_affinity(user_id, g)\n return will_vec", "def get_rank() -> int:\n return collective.get_rank()", "def naive_majority(voters):\n half = len(voters)//2\n for index, voter in enumerate(voters):\n count = 0\n for other_voter in voters:\n if voter == other_voter:\n count += 1\n if count > half:\n return Outcome.has_majority\n return Outcome.no_majority", "def University_calculation(jobid):\r\n min_cgpa=90\r\n \"\"\"~~~~~~~~~\"\"\"\r\n dbconnect= connect_to_db()\r\n Candidate_qualifications=pd.read_sql(\"select candidate_id,university_name,institute_name,aggregate from candidate_qualification where candidate_id in(select candidate_id from master_id where job_id=\"+str(jobid)+\")\",con=dbconnect)\r\n College_data=pd.read_sql(\"select * from college_tiers\",con=dbconnect)\r\n Candidate_qualifications[\"aggregate\"]=Candidate_qualifications[\"aggregate\"].apply(lambda x: 0 if x<0 or x>100 else x)\r\n Candidate_qualifications[\"aggregate\"]=Candidate_qualifications[\"aggregate\"].apply(lambda x: x*10 if 5<x<10 else x)\r\n Candidate_qualifications[\"aggregate\"]=Candidate_qualifications[\"aggregate\"].apply(lambda x: 25*x if 0<x<4 else x)\r\n\r\n def Aggregate():\r\n Unique_candids=Candidate_qualifications[[\"candidate_id\",\"aggregate\"]].groupby(\"candidate_id\").mean()\r\n Unique_candids[\"aggregate\"]=Unique_candids[\"aggregate\"].apply(lambda x:x-min_cgpa)\r\n minval=min(Unique_candids[\"aggregate\"])\r\n maxval=max(Unique_candids[\"aggregate\"])\r\n Unique_candids[\"aggregate\"]=Unique_candids[\"aggregate\"].apply(lambda x:(x-minval)*100/(maxval-minval))\r\n Unique_candids=Unique_candids.reset_index()\r\n return Unique_candids\r\n \r\n def University_name():\r\n stop_words=[\"of\",\"on\",\"for\",\"the\",\"&\",\"and\"]\r\n unique_candids=list(np.unique(Candidate_qualifications[\"candidate_id\"]))\r\n candidate_univdict={}\r\n for i in unique_candids:\r\n candidate_univdict[i]=Candidate_qualifications[[\"university_name\",\"institute_name\"]][Candidate_qualifications[\"candidate_id\"]==i].values.tolist()\r\n candidate_univdict={k:list(map(lambda x:list(filter(lambda y:str(y).strip() not in[\"\",\"None\"],x)),v)) for k,v in candidate_univdict.items()}\r\n candidate_univdict={k: np.unique(list(itertools.chain.from_iterable(v))).tolist() for k,v in candidate_univdict.items()}\r\n for i in candidate_univdict.keys():\r\n for j in candidate_univdict[i]:\r\n if j in list(map(lambda x: str(x).lower(),College_data[\"College\"].tolist())):\r\n candidate_univdict[i][candidate_univdict[i].index(j)]=list(College_data[\"Tier\"][College_data[\"College\"]==j])[0]\r\n continue;\r\n if j in list(map(lambda x: str(x).lower(),College_data[\"College\"].tolist())):\r\n candidate_univdict[i][candidate_univdict[i].index(j)]=list(College_data[\"Tier\"][College_data[\"Ancronym\"]==j])[0]\r\n continue;\r\n else:\r\n Processed_collegedata=pd.DataFrame(College_data[\"College\"].apply(lambda x: [i for i in re.sub(\"[\\.-]\",\" \",x.lower()).split() if i not in stop_words]))\r\n Processed_collegedata[\"Ancronym\"]=College_data[\"Ancronym\"].apply(lambda x: [i for i in re.sub(\"[\\.-]\",\" \",x.lower()).split() if i not in stop_words])\r\n val=[w for w in re.sub(\"[\\.-]\",\" \",j.lower()).split() if w not in stop_words]\r\n Processed_collegedata[\"College\"]=Processed_collegedata[\"College\"].apply(lambda x:(len(set(val).intersection(set(x))))/len(set(val).union(set(x))))\r\n Processed_collegedata[\"Ancronym\"]=Processed_collegedata[\"Ancronym\"].apply(lambda x:(len(set(val).intersection(set(x))))/len(set(val).union(set(x))))\r\n maxval=Processed_collegedata.max().idxmax()\r\n if Processed_collegedata[maxval].idxmax()>0.5:\r\n candidate_univdict[i][candidate_univdict[i].index(j)]=College_data.ix[Processed_collegedata[maxval].idxmax(),\"Tier\"]\r\n else:\r\n candidate_univdict[i][candidate_univdict[i].index(j)]=3\r\n \r\n candidate_univdict={k:100/min(v) for k,v in candidate_univdict.items() if len(v)>0}\r\n College_score=pd.DataFrame.from_dict(candidate_univdict,orient=\"index\")\r\n College_score=College_score.reset_index()\r\n College_score.columns=[\"candidate_id\",\"Tier_score\"]\r\n return College_score\r\n result=pd.merge(Aggregate(),University_name(),how=\"outer\",on=\"candidate_id\")\r\n result=pd.merge(pd.DataFrame(np.unique(Candidate_qualifications[\"candidate_id\"]),columns=[\"candidate_id\"]),result,how=\"left\",on=\"candidate_id\")\r\n result=result.fillna(0)\r\n return result", "def find_stationary_population(survival_rates):\n max_iter = 10000\n\n mass_in = np.zeros(age_max, dtype=np.float64)\n mass_in[0] = 1.0\n mass_out = np.zeros(age_max, dtype=np.float64)\n fertility = np.float64(1.0)\n\n for _ in range(max_iter):\n mass_new = mass_in * survival_rates\n mass_out[1:] = mass_new[:-1]\n mass_out[0] = 1 - sum(mass_new)\n fertility = mass_out[0] / mass_in[0]\n mass_in = copy.deepcopy(mass_out)\n\n fertility_stationary = fertility\n mass_stationary = mass_out\n\n return mass_stationary, fertility_stationary", "def virus_monte_carlo(initial_infected, population, k):\n people_array = np.arange(1, population+1, dtype=int)\n current_infected = initial_infected\n people_infected = np.array([current_infected])\n time_array = np.array([0])\n \n # Array math.\n counter = 0\n for _ in people_array:\n probability = (k)*current_infected/population\n random_array = np.random.uniform(0, 1, size=people_array.size)\n random_bool = np.where(random_array <= probability, True, False)\n people_array = people_array[random_bool != True]\n if people_array.size != population:\n current_infected = (population-people_array.size)\n people_infected = np.append(people_infected, current_infected)\n counter+=1\n time_array = np.append(time_array, counter)\n if people_infected.size == population:\n break\n \n return (time_array, people_infected)", "def GetUniverse(u):\n raise NotImplementedError", "def recover_all_cristal_members():\n members={}\n members['permanent']=[]\n members['non-permanent']=[]\n\n teams=scrape_webpage_cristal_all_teams()\n\n for team in teams:\n team_members=scrape_webpage_cristal_members_by_teams(team)\n\n members['permanent']+=team_members['permanent']\n members['non-permanent']+=team_members['non-permanent']\n\n return members", "def how_many(self):\n\tprint \"We have {0} robots !!!\".format(self.population)", "def i_e_c():\r\n parties = {}\r\n \r\n print(\"Independent Electoral Commission\")\r\n print(\"--------------------------------\")\r\n party = input(\"Enter the names of parties (terminated by DONE):\\n\")\r\n \r\n while party != 'DONE':\r\n if party:\r\n if not(party in parties):\r\n parties[party] = 1\r\n else:\r\n parties[party] += 1\r\n \r\n party = input('')\r\n \r\n parties2 = sorted(list(parties.keys())) \r\n \r\n if len(parties) > 0:\r\n print(\"\\nVote counts:\")\r\n \r\n for i in parties2:\r\n print(i.ljust(10) + ' -', parties[i])", "def simulationTwoDrugsVirusPopulations():\n #TODO", "def tournament():\n return min(sample(population, sample_size)).chromosome[:]", "def ncusps(self):\n n = self.level()\n return sum([arith.euler_phi(arith.gcd(d,n//d)) for d in n.divisors()])", "def vscr_ratchet_group_session_get_participants_count(self, ctx):\n vscr_ratchet_group_session_get_participants_count = self._lib.vscr_ratchet_group_session_get_participants_count\n vscr_ratchet_group_session_get_participants_count.argtypes = [POINTER(vscr_ratchet_group_session_t)]\n vscr_ratchet_group_session_get_participants_count.restype = c_uint\n return vscr_ratchet_group_session_get_participants_count(ctx)", "def ensembleVote(x, classes, ensemble):\n votes = np.array([0 for kk in range(len(classes))])\n for i in ensemble:\n votes = votes + classProbs(x, ensemble[i][\"tree\"], classes)\n maxVote = 0\n loc = None\n for ind, vote in enumerate(votes):\n if vote > maxVote:\n maxVote = vote\n loc = ind\n prediction = classes[loc]\n return prediction", "def get_participants_data(self):\n participants = []\n for (email, uid) in self.tokens.items():\n participant = {} \n participant['uid'] = uid\n participant['email'] = email\n response = 0\n questions = 0\n sections = [x for x in self.values() if ISurveySection.providedBy(x)]\n for section in sections:\n response += len(section.responses.get(uid, {}))\n questions += len(section.question_ids)\n if response != 0:\n participant['finished'] = Decimal(response) / Decimal(questions) * 100\n else:\n participant['finished'] = 0 \n participants.append(participant)\n return participants", "def execMaxpTabu(y, w, threshold=100.0, maxit=2, tabuLength=5, typeTabu=\"exact\"):\n print(\"Running max-p-regions model (Duque, Anselin and Rey, 2010)\")\n print(\"Local search method: Tabu Search\")\n print(\"Number of areas: \", len(y))\n print(\"threshold value: \", threshold)\n distanceType = \"EuclideanSquared\"\n distanceStat = \"Centroid\";\n objectiveFunctionType = \"SS\";\n selectionType = \"Minimum\";\n numRegionsType = \"EndogenousThreshold\";\n\n # CONSTRUCTION PHASE 1: GROWING FEASIBLE REGIONS\n\n start = tm.time()\n\n # print w\n # print y\n\n am = AreaManager(w, y, distanceType)\n maxP = 0\n bestCandidates = {}\n for i in range(maxit):\n\n # print \"**** Iteration %d of %d ...\"%(i+1,maxit)\n\n rm = RegionMaker(am,\n distanceType = distanceType,\n distanceStat = distanceStat,\n selectionType = selectionType,\n objectiveFunctionType = objectiveFunctionType,\n numRegionsType = numRegionsType,\n threshold = threshold)\n numRegions = len(rm.feasibleRegions)\n rm.getObj()\n\n # print \"rm.feasibleRegions\",rm.feasibleRegions\n # print \"obj\",rm.getObj()\n\n if numRegions > maxP:\n bestCandidates = {}\n maxP = numRegions\n obj = rm.objInfo\n bestCandidates[obj] = rm.feasibleRegions\n if numRegions == maxP:\n obj = rm.objInfo\n if obj in bestCandidates:\n pass\n else:\n bestCandidates[obj] = rm.feasibleRegions\n else:\n pass\n\n # print \"bestCandidates\", bestCandidates\n\n ofValues = list(bestCandidates.keys())\n basicMemory = BasicMemory()\n while len(ofValues) >= 1:\n\n # RECREATE SOLUTION\n\n rm.resetNow()\n minOfValue = min(ofValues)\n ofValues.remove(minOfValue)\n partialSolution = bestCandidates[minOfValue]\n\n # print \"ASSIGNING ENCLAVES\"\n # print partialSolution\n\n regionId = 0\n for growReg in partialSolution:\n seedGrowReg = partialSolution[growReg][0]\n rm.assignSeeds(seedGrowReg, regionId)\n partialSolution[growReg].remove(seedGrowReg)\n if len(partialSolution[growReg]) >= 1:\n for areaInGrow in partialSolution[growReg]:\n rm.assignArea(areaInGrow, regionId)\n regionId += 1\n\n # CONSTRUCTION PHASE 2: ENCLAVES ASSIGNATION\n\n rm.feasibleRegions = copy.deepcopy(rm.region2Area)\n rm.getIntraBorderingAreas()\n rm.newExternal = set(rm.unassignedAreas)\n if len(rm.unassignedAreas) != 0:\n rm.constructionStage = \"enclaves\"\n while len(rm.unassignedAreas) != 0:\n rm.constructRegions()\n rm.objInfo = rm.getObjective(rm.region2Area)\n rm.feasibleRegions = copy.deepcopy(rm.region2Area)\n rm.getIntraBorderingAreas()\n\n # print \"ASSIGNED SOLUTION\"\n # print \"OBJ: \", rm.getObjective(rm.region2Area), rm.returnRegions()\n\n rm.calculateRegionValueThreshold()\n\n # LOCAL SEARCH\n\n rm.calcObj()\n convTabu = min(10,old_div(len(y),maxP)) # convTabu=230*numpy.sqrt(maxP)\n\n # print \"###ENTERING TABU\",rm.objInfo,rm.returnRegions()\n\n rm.tabuMove(tabuLength, convTabu = convTabu, typeTabu=typeTabu)\n rm.calcObj()\n\n # print \"***** AFTER TABU\",rm.objInfo,rm.returnRegions()\n # EVALUATE SOLUTION\n\n if rm.objInfo < basicMemory.objInfo:\n basicMemory.updateBasicMemory(rm)\n time = tm.time() - start\n Sol = basicMemory.regions\n Of = basicMemory.objInfo\n print(\"FINAL SOLUTION: \", Sol)\n print(\"FINAL OF: \", Of)\n output = { \"objectiveFunction\": Of,\n \"runningTime\": time,\n \"algorithm\": \"maxpTabu\",\n \"regions\": len(Sol),\n \"r2a\": Sol,\n \"distanceType\": distanceType,\n \"distanceStat\": distanceStat,\n \"selectionType\": selectionType,\n \"ObjectiveFuncionType\": objectiveFunctionType}\n print(\"Done\")\n return output", "def T(v,securite):\n to_return = {} #renvoie le dictionnaire {indice du contact (0 -> direct / sinon -> plus ou moins direct) : set({disque})} \n Cv = set(C(v,securite))\n Tv = set(Cv)\n i=0\n xv,yv=l[v][0],l[v][1]\n while Cv != set() and i<5:\n to_return[str(i)]=Cv\n new_Cv = set()\n for j in Cv:\n xj,yj=l[j][0],l[j][1]\n #si j est devant v, on ne le copte pas\n if sqrt((xj-xt)**2+(yj-yt)**2)<sqrt((xv-xt)**2+(yv-yt)**2):\n continue\n new_Cv= new_Cv.__or__(C(j,securite).__sub__(Tv.__or__(set(j).__or__({v}))))\n Tv = Tv.__or__(new_Cv)\n Cv = new_Cv\n i+=1\n return to_return", "def test_uv(u, v, Id3, K=2**20, num=100, p=0.01):\n Pu = IntegerMatrixProduct(u)\n Pv = IntegerMatrixProduct(v)\n \n filename = '../candidates_full/killed_{}_{}'.format(t2s(u), t2s(v))\n killed = set(read_identities(filename))\n\n Id3_to_test = set(Id3).difference(killed)\n if not Id3_to_test:\n return u,v,-1\n\n eltsUV = []\n while len(eltsUV) < num:\n a = random_integer_max_plus_matrix(3,-K,K,p)**6\n b = random_integer_max_plus_matrix(3,-K,K,p)**6\n U = Pu(a,b)\n V = Pv(a,b)\n if U != V:\n eltsUV.append((U,V))\n\n nkilled = 0\n for s,t in Id3_to_test:\n if not is_relation(s, t, eltsUV, False):\n nkilled += 1\n killed.add((s,t))\n\n killed = sorted(killed)\n f = open(filename, 'w')\n for s,t in killed:\n f.write(t2s(s) + ' ' + t2s(t) + '\\n')\n f.close()\n return u,v,nkilled", "def popIII_lifetime(m):\n if np.size(m) > 1:\n lifetimes = np.zeros(np.size(m))\n for i,mass in enumerate(m):\n lifetimes[i] = _popIII_lifetimes[np.argmin(np.abs(_popIII_lifetime_masses-mass))]\n return lifetimes\n else:\n index = np.argmin(np.abs(_popIII_lifetime_masses-m))\n return _popIII_lifetimes[index]", "def _compute_global_stats():\n global_stats = []\n \n wmt16_group = Group.objects.filter(name='WMT16')\n wmt16_users = _get_active_users_for_group(wmt16_group)\n \n # Check how many HITs have been completed. We now consider a HIT to be\n # completed once it has been annotated by one or more annotators.\n #\n # Before we required `hit.users.count() >= 3` for greater overlap.\n hits_completed = HIT.objects.filter(mturk_only=False, completed=True).count()\n \n # Check any remaining active HITs which are not yet marked complete.\n for hit in HIT.objects.filter(active=True, mturk_only=False, completed=False):\n if hit.users.count() >= 1:\n hits_completed = hits_completed + 1\n hit.completed = True\n hit.save()\n \n # Compute remaining HITs for all language pairs.\n hits_remaining = HIT.compute_remaining_hits()\n \n # Compute number of results contributed so far.\n ranking_results = RankingResult.objects.filter(\n item__hit__completed=True, item__hit__mturk_only=False)\n \n from math import factorial\n system_comparisons = 0\n for result in ranking_results:\n result.reload_dynamic_fields()\n # TODO: this implicitly counts A=B comparisons for multi systems.\n # Basically, inflating the number of pairwise comparisons... Fix!\n combinations = factorial(result.systems)/(factorial(result.systems-2) * 2) if result.systems > 2 else 0\n system_comparisons = system_comparisons + combinations\n \n # Aggregate information about participating groups.\n groups = set()\n for user in wmt16_users:\n for group in _identify_groups_for_user(user):\n groups.add(group)\n \n # Compute average/total duration over all results.\n durations = RankingResult.objects.all().values_list('duration', flat=True)\n total_time = sum([datetime_to_seconds(x) for x in durations])\n avg_time = total_time / float(hits_completed or 1)\n avg_user_time = total_time / float(3 * hits_completed or 1)\n \n global_stats.append(('Users', len(wmt16_users)))\n global_stats.append(('Groups', len(groups)))\n global_stats.append(('HITs completed', '{0:,}'.format(hits_completed)))\n global_stats.append(('HITs remaining', '{0:,}'.format(hits_remaining)))\n global_stats.append(('Ranking results', '{0:,}'.format(ranking_results.count())))\n global_stats.append(('System comparisons', '{0:,}'.format(system_comparisons)))\n global_stats.append(('Average duration (per HIT)', seconds_to_timedelta(avg_time)))\n global_stats.append(('Average duration (per task)', seconds_to_timedelta(avg_user_time)))\n global_stats.append(('Total duration', seconds_to_timedelta(total_time)))\n \n # Create new status data snapshot\n TimedKeyValueData.update_status_if_changed('users', str(len(wmt16_users)))\n TimedKeyValueData.update_status_if_changed('groups', str(len(groups)))\n TimedKeyValueData.update_status_if_changed('hits_completed', str(hits_completed))\n TimedKeyValueData.update_status_if_changed('hits_remaining', str(hits_remaining))\n TimedKeyValueData.update_status_if_changed('ranking_results', str(ranking_results.count()))\n TimedKeyValueData.update_status_if_changed('system_comparisons', str(system_comparisons))\n TimedKeyValueData.update_status_if_changed('duration_per_hit', str(seconds_to_timedelta(avg_time)))\n TimedKeyValueData.update_status_if_changed('duration_per_task', str(seconds_to_timedelta(avg_user_time)))\n TimedKeyValueData.update_status_if_changed('duration_total', str(seconds_to_timedelta(total_time)))\n \n return global_stats", "def run_v1(self):\n start_time = dt.datetime.now()\n first_key_set = set(self.first)\n second_key_set = set(self.second)\n dict = {}\n\n for idx, key in enumerate(first_key_set):\n if(key in dict):\n dict[key] += (len(filter(lambda x:x==key, self.first)))\n else :\n dict[key] = int((len(filter(lambda x: x == key, self.first))))\n\n for idx, key in enumerate(second_key_set):\n if(key in dict):\n dict[key] += (len(filter(lambda x:x==key, self.second)))\n else :\n dict[key] = int((len(filter(lambda x: x == key, self.second))))\n\n result = max(dict.iterkeys(), key=(lambda key : dict[key]))\n print(\"inviting {0} people interest in {1}\".format(dict[result] , result))\n\n end_time = dt.datetime.now()\n print(\"time cost : {0}\".format(end_time - start_time))", "def vorticity(self, u, v):\n return self.to_grid(self.vorticity_spectral(u, v))", "def final_penguins_num_for_neutral(game, ice, my_arrival_turn=-1, until_turn=300, groups=[]):\r\n status = \"neutral\"\r\n last_status = status\r\n my_penguin_amount = ice.penguin_amount\r\n last_group_turns_till_arrival = 0\r\n groups_toward_ice = [g for g in game.get_all_penguin_groups() if g.destination.equals(ice)]\r\n groups_toward_ice.sort(key=lambda g: some(g, groups))\r\n for g in groups_toward_ice:\r\n \r\n if g not in groups:\r\n g_turn_till_arrival = real_turn_teal_arrival(g)\r\n else:\r\n g_turn_till_arrival = illusion_turn_teal_arrival(g)\r\n \r\n if g_turn_till_arrival >= until_turn:\r\n return status == \"neutral\"\r\n\r\n if status == \"mine\":\r\n my_penguin_amount += (g_turn_till_arrival - last_group_turns_till_arrival) * ice.penguins_per_turn\r\n elif status == \"enemy\":\r\n my_penguin_amount -= (g_turn_till_arrival - last_group_turns_till_arrival) * ice.penguins_per_turn\r\n\r\n if g_turn_till_arrival == last_group_turns_till_arrival and last_status == \"neutral\":\r\n aaa = g.penguin_amount\r\n if g in game.get_enemy_penguin_groups():\r\n aaa *= -1\r\n my_penguin_amount = last_group_amount+aaa\r\n\r\n if status == \"neutral\" and g_turn_till_arrival != last_group_turns_till_arrival:\r\n my_penguin_amount -= g.penguin_amount\r\n if my_penguin_amount < 0:\r\n if g in game.get_my_penguin_groups():\r\n my_penguin_amount *= -1\r\n else:\r\n last_group_turns_till_arrival = g_turn_till_arrival\r\n last_group_amount = g.penguin_amount\r\n last_status = status\r\n if g in game.get_enemy_penguin_groups():\r\n last_group_amount *= -1\r\n continue\r\n else:\r\n if g in game.get_enemy_penguin_groups():\r\n my_penguin_amount -= g.penguin_amount\r\n else:\r\n my_penguin_amount += g.penguin_amount\r\n \r\n last_group_turns_till_arrival = g_turn_till_arrival\r\n last_group_amount = g.penguin_amount\r\n last_status = status\r\n if g in game.get_enemy_penguin_groups():\r\n last_group_amount *= -1\r\n \r\n if my_penguin_amount > 0:\r\n status = \"mine\"\r\n elif my_penguin_amount == 0:\r\n status = \"neutral\"\r\n else:\r\n status = \"enemy\"\r\n\r\n if until_turn != 300:\r\n return status == \"neutral\"\r\n if status == \"neutral\":\r\n return my_penguin_amount, True\r\n elif my_arrival_turn == -1 or my_arrival_turn < last_group_turns_till_arrival:\r\n return my_penguin_amount, False\r\n else:\r\n if status == \"mine\":\r\n return my_penguin_amount + (my_arrival_turn - last_group_turns_till_arrival) * ice.penguins_per_turn, False\r\n else:\r\n return my_penguin_amount - (my_arrival_turn - last_group_turns_till_arrival) * ice.penguins_per_turn, False", "def surprisal(self, token: str, follower: str) -> float:\n try: \n dist = self._cooccurrence_matrix.distribution(token)\n except KeyError:\n dist = self._cooccurrence_matrix.distribution('UNKNOWN_TOKEN')\n return dist.surprisal(follower)", "def survival(value=t, lam=lam, f=failure):\n return sum(f * log(lam) - lam * value)", "def participation(self):\n if self.params.treaty:\n p = [self.params.p2050, self.params.p2050, self.params.p2100,\n self.params.p2150, self.params.pmax]\n return np.concatenate((\n (p[1] + (p[0] - p[1]) * np.exp(np.arange(5) * -.25)),\n (p[2] + (p[1] - p[2]) * np.exp(np.arange(5) * -.25)),\n (p[3] + (p[2] - p[3]) * np.exp(np.arange(5) * -.25)),\n (p[4] + (p[3] - p[4]) * np.exp(np.arange(45) * -.25)),\n ))\n return np.ones(self.params.tmax)", "def get_pub_scores(self, subset='auth_all'):\n from nltk.corpus import stopwords\n from nltk.tokenize import word_tokenize\n import csv\n from difflib import SequenceMatcher\n import jellyfish\n# self.sanity_check()\n\n if subset == 'auth_top':\n pubs = self.pub_auth_top['pub']\n elif subset == 'auth_all':\n pubs = self.pub_auth_all['pub']\n elif subset == 'inst_top':\n pubs = self.pub_inst_top['pub']\n elif subset == 'inst_all':\n pubs = self.pub_inst_all['pub']\n\n # load publication metrics\n\n # download stowords the first time\n def similar(a, b):\n return SequenceMatcher(None, a, b).ratio()\n\n def get_q(s):\n q = 0\n if \"Q4\" in s:\n q = 4\n if \"Q3\" in s:\n q = 3\n if \"Q2\" in s:\n q = 2\n if \"Q1\" in s:\n q = 1\n return q\n\n stop_words = set(stopwords.words('english'))\n\n journals = []\n with open('scimagojr.csv', newline='') as csvfile:\n s = csv.reader(csvfile, delimiter=';')\n for row in s:\n jname = row[2].lower()\n word_tokens = word_tokenize(jname)\n fname = [w for w in word_tokens if w not in stop_words]\n sent1 = ' '.join(fname)\n sent1 = sent1.replace('/', '')\n row[2] = sent1\n journals.append(row)\n\n Q = []\n for p in pubs:\n jname = p.lower()\n word_tokens = word_tokenize(jname)\n fname = [w for w in word_tokens if w not in stop_words]\n sent1 = ' '.join(fname)\n sent1 = sent1.replace('/', '')\n\n match = 0\n J = \"\"\n for Journal in journals:\n journal = Journal[2]\n s1 = similar(sent1, journal)\n s2 = jellyfish.jaro_winkler(sent1, journal)\n if s1 > 0.9 and s2 > 0.9:\n match += 1\n J = Journal[-1]\n Q.append(get_q(J))\n\n if subset == 'auth_top':\n self.pub_auth_top['Q'] = Q\n elif subset == 'auth_all':\n self.pub_auth_all['Q'] = Q\n elif subset == 'inst_top':\n self.pub_inst_top['Q'] = Q\n elif subset == 'inst_all':\n self.pub_inst_all['Q'] = Q", "def _tournament(self,probs,n,size):\n participants = np.random.choice(\n self.n_agents,\n size=size,\n replace=False)\n winners = np.argpartition(probs[participants], -n)[-n:]\n return participants[winners]", "def N(self):\n return len(self.cavity_grid.cavities) + 1", "def fitness(self):\n return (len(self.body)**2) * self.age", "def most_similar_actors_pca(self, moviename):\n movie_tag_frame = self.sim_act_diff_mov_tf.get_movie_tag_matrix()\n movie_tag_matrix = movie_tag_frame.values\n movies = list(movie_tag_frame.index.values)\n tags = list(movie_tag_frame)\n\n (U,s,Vh) = util.PCA(movie_tag_matrix)\n\n tag_latent_matrix = U[:, :5]\n movie_latent_matrix = numpy.dot(movie_tag_matrix, tag_latent_matrix)\n\n latent_movie_matrix = movie_latent_matrix.transpose()\n movie_movie_matrix = numpy.dot(movie_latent_matrix, latent_movie_matrix)\n\n index_movie = None\n for i, j in enumerate(movies):\n if j == moviename:\n index_movie = i\n break\n\n if index_movie == None:\n print(\"Movie Id not found.\")\n return None\n\n movie_row = movie_movie_matrix[index_movie].tolist()\n movie_movie_dict = dict(zip(movies, movie_row))\n del movie_movie_dict[moviename]\n\n for key in movie_movie_dict.keys():\n movie_movie_dict[key] = abs(movie_movie_dict[key])\n\n movie_movie_dict = sorted(movie_movie_dict.items(), key=operator.itemgetter(1), reverse=True)\n\n if movie_movie_dict == None:\n return None\n actors = []\n for (movie,val) in movie_movie_dict:\n if val <= 0:\n break\n movieid = util.get_movie_id(movie)\n actors = actors + self.sim_act_diff_mov_tf.get_actors_of_movie(movie)\n if len(actors) >= 10:\n break\n\n actors_of_given_movie = self.sim_act_diff_mov_tf.get_actors_of_movie(moviename)\n\n actorsFinal = [x for x in actors if x not in actors_of_given_movie]\n\n actornames = []\n for actorid in actorsFinal:\n actor = util.get_actor_name_for_id(actorid)\n actornames.append(actor)\n\n return actornames", "def get_target_proportions_of_current_trial(individuals, target):\n ambulance_waits, ambulance_target_waits = 0, 0\n other_waits, other_target_waits = 0, 0\n for individual in individuals:\n ind_class = len(individual.data_records) - 1\n rec = individual.data_records[-1]\n if rec.node == 2 and ind_class == 0:\n other_waits += 1\n if rec.waiting_time < target:\n other_target_waits += 1\n elif rec.node == 2 and ind_class == 1:\n ambulance_waits += 1\n if rec.waiting_time < target:\n ambulance_target_waits += 1\n\n return ambulance_waits, ambulance_target_waits, other_waits, other_target_waits", "def tally_votes(precinct_votes):\n county_cumulative_votes = {}\n county_cumulative_percentages = {}\n for pv in precinct_votes:\n c.execute(\n \"SELECT Choice, Total_Votes from v where Contest_Name=? and County=? and Precinct=? order by Choice ASC\",\n pv[0:3])\n precinct_choices = {}\n for pc in c.fetchall():\n precinct_choices[pc[0]] = pc[1]\n if pv[0:2] not in county_cumulative_votes:\n county_cumulative_votes[pv[0:2]] = {}\n county_cumulative_percentages[pv[0:2]] = {}\n cumulative_choices = precinct_choices\n else:\n cumulative_choices = {}\n for choice, vote in precinct_choices.items():\n cumulative_choices[choice] = vote + county_cumulative_votes[pv[0:2]][choice][-1]\n append_map_array(county_cumulative_votes[pv[0:2]], cumulative_choices)\n append_map_array(county_cumulative_percentages[pv[0:2]], normalize(cumulative_choices))\n\n return (county_cumulative_votes, county_cumulative_percentages)", "def unique_interactors(self):\n if self.results is not None:\n return tuple(self.results['interactor_name'].unique())", "def have_sequels(self):\n # Get any movies that may be a sequel to another movie in the db.\n regex = ' (VIII|VII|VI|V|IV|III|II)?$'\n sequels = Movie.objects.filter(name__regex=regex)\n\n # For each potential sequel, find the original version of the movie and\n # add it to the result set -- with its corresponding sequel count.\n results = {}\n for sequel in sequels:\n # Get the original name.\n prefix = re.sub(regex, '', sequel.name)\n\n # Get the original movie from the db -- if it exists.\n movies = Movie.objects.filter(\n name=prefix,\n genres=sequel.genres.all()\n )\n\n # Jump to the next loop if the sequel has no original in the db.\n if len(movies) < 1:\n continue\n\n # Add the movie and count to the results.\n if movies[0].name not in results:\n results[movies[0].name] = 1\n else:\n results[movies[0].name] += 1\n\n return results", "def getlife(self):\n return self.vida", "def get_diversity(population):\n\t# average variance of each component\n\treturn np.average(np.var(population, axis = 0))\n\treturn np.average(np.std(population, axis = 0))\n\treturn np.average(np.std(population, axis = 0) / ((self.benchmarks.bound[1] - self.benchmarks.bound[0]) / 2))", "def kepler_U(mu, dt, ro, vro, inv_a, nMax=500):\n\n \"\"\"\n ratios = []\n # For some parabolic comets, using some initial values improves the convergence\n for x in [sqrt(mu)*abs(inv_a)*dt]: #+ LINEAR_GRID :\n converged, result, ratio = kepler_U_prv(mu, x , dt, ro, vro, inv_a, nMax=1000)\n if converged:\n return result \n else :\n ratios.append(str(ratio))\n logger.error(f\"Number max iteration reached but not converged, ratios: {','.join(ratios)}\")\n return result \n \"\"\"\n x = sqrt(mu)*abs(inv_a)*dt\n return myastro.kepler_u.kepler_U(mu, x, dt, ro, vro, inv_a, nMax)", "def select(self, test):\n survivors = []\n for particle in self.particles:\n # Find the originating particle\n parent = particle\n while parent.origin is not None:\n parent = parent.origin.initial_state[0]\n if test(parent, particle) is True:\n survivors.append(particle)\n return ParticleCollection(survivors)", "def test_fav_6(self):\n\t\tplayer_list = [Player(\"Blake Base\", 1, 300000, 10), Player(\"Corey Catcher\", 2, 500000, 20), Player(\"Dexter Dugout\", 3, 200000, 50)]\n\t\tself.assertEqual( free_agent_vorp(player_list, 100000, 4), (0, 0, []) )", "def test_reviewer_capacity(resident_names, hospital_names, capacities, seed):\n\n _, _, match = _make_match(resident_names, hospital_names, capacities, seed)\n\n match.solve()\n match.reviewers[0].matching = range(match.reviewers[0].capacity + 1)\n\n with pytest.raises(Exception):\n match._check_reviewer_capacity()", "def _majority_vote(self, claims):\n c_df = claims[['source_id', 'object_id', 'value']].copy()\n discovered_truths = c_df.groupby(['object_id'\n ]).apply(lambda x: self.elect(x))\n discovered_truths = pd.DataFrame(discovered_truths)\n discovered_truths = discovered_truths.rename(columns={\n 0: 'value'\n }).reset_index()\n return discovered_truths", "def calculate_team_velocities(team_members, issues):\n team_velocities = {team_member: [issue['estimates'][team_member]/float(issue['time_taken'])\n for issue in issues.itervalues() if issue['completed']]\n for team_member in team_members}\n return team_velocities", "def mi_chemical_process(self, u):\n assert len(u) == 7, 'Chemical process design needs to specify 7 parameters.'\n fitness = (u[3] - 1) ** 2 + (u[4] - 2) ** 2 + (u[5] - 1) ** 2 - log(u[6] + 1) + (u[0] - 1) ** 2 + (u[1] - 2) ** 2 + (u[2] - 3) ** 2\n return fitness", "def _count_parties(data_set): #DEMOCRATS, THEN REPUBLICANS\r\n reps = 0\r\n dems = 0\r\n for data_point in data_set:\r\n if data_point.dat_party == \"R\": reps+=1\r\n if data_point.dat_party == \"D\": dems+=1\r\n\r\n return (dems, reps)", "def testMultiagent(self):\n \n p = Parser(dora_domain.split(\"\\n\"))\n dom = domain.Domain.parse(p.root)\n p = Parser(dora_multiagent.split(\"\\n\"))\n scen = scenario.MapsimScenario.parse(p.root, dom)\n\n self.assert_(\"r2d2\" in scen.agents)\n self.assert_(\"c3po\" in scen.agents)\n self.assert_(\"michael\" in scen.agents)\n worldprob = scen.world\n r2prob = scen.agents[\"r2d2\"]\n c3prob = scen.agents[\"c3po\"]\n mprob = scen.agents[\"michael\"]\n\n self.assertEqual(len(worldprob.init), 5)\n self.assertEqual(len(r2prob.init), 5)\n self.assertEqual(len(c3prob.init), 5)\n self.assertEqual(len(mprob.init), 5)\n\n self.assertEqual(len(worldprob.objects), 7)\n self.assertEqual(len(r2prob.objects), 7)\n self.assertEqual(len(c3prob.objects), 7)\n self.assertEqual(len(mprob.objects), 7)", "def survive(self):\n for ind, agent in enumerate(self.agents):\n if not agent.survived(self.survival_rates):\n # agent.die sets nest to unoccupied\n agent.die()\n # remove dead individual, this pop may be a bottleneck\n self.agents.pop(ind)\n self.pop_size -= 1\n # delete agent\n # this is inefficient, also needs gc call\n del agent\n else:\n agent.inc_age()", "def run_monte_carlo(runs, pool, goals=False):\n total_ranking = {\n 1: [],\n 2: [],\n 3: [],\n 4: [],\n 5: []\n }\n for run in range(runs):\n if goals:\n curr_score = run_one_pool(pool, True)\n else:\n curr_score = run_one_pool(pool)\n total_ranking = rank_teams_of_curr_run(curr_score, total_ranking)\n return total_ranking", "def session():\n \n # ind is a list of dictionaries for the actions. \n ind=[]\n for i in range(IND_INIT_SIZE):\n ind.append(action())\n ind.sort(key=lambda r: r[\"date\"]) # sorts the sequences by date of action\n \n beginning=ind[0]['date']\n feature_vect=creator.Individual()\n feature_vect.append(beginning.hour)\n for i in range(5):\n feature_vect.append(0)\n\n for act in ind:\n duration=act['date']-beginning\n if act['type']=='logon':\n feature_vect[2]+=1\n elif act['type']=='email' and act['activity']=='Send':\n feature_vect[3]+=1\n elif act['type']=='file' and (act[\"to_removable_media\"]==True or act[\"from_removable_media\"]==True):\n feature_vect[4]=+1\n elif act[\"type\"]==\"http\":\n feature_vect[5]+=1\n\n feature_vect[1]=duration.total_seconds()/60 # the duration is in minutes\n \n # Normalize the vector\n maxFV=max(feature_vect)\n for i in range(len(feature_vect)):\n feature_vect[i]/=maxFV\n \n return feature_vect", "def get_num_reoccurring_actors():\n movie_titles = get_titles(constants.movie_api)\n movie_actors = get_actors(movie_titles, 'movie')\n\n tv_titles = get_titles(constants.tv_api)\n tv_actors = get_actors(tv_titles, 'tv')\n\n return len(movie_actors & tv_actors)", "def coverage(self, user_list):\n all_recom_set = set()\n all_item = set(self.train['movieId'].values)\n print('\\nCalculated coverage: ')\n for user in tqdm(user_list):\n recom_data = self._get_recommend(user)\n recom_item = set([data[0] for data in recom_data])\n all_recom_set.update(recom_item)\n print('\\nCoverage is: ', len(all_recom_set) / (len(all_item) * 1.0))\n return len(all_recom_set) / (len(all_item) * 1.0)", "def get_voters():", "def get_voters():", "def test_n_volunteers(self):\r\n\r\n app = self.create_app_with_contributors(anonymous=2, registered=3, two_tasks=True)\r\n total_volunteers = cached_apps.n_volunteers(app.id)\r\n\r\n err_msg = \"Volunteers is %s, it should be 5\" % total_volunteers\r\n assert total_volunteers == 5, err_msg", "def _compute_sensitivities(self, context):\n _logger.info(\"calling _compute_sensitivities.\")\n cached_id = np.random.randint(1000)\n if self.start_epoch == context.epoch_id:\n sensitivities_file = self.sensitivities_file\n else:\n sensitivities_file = self.sensitivities_file + \".epoch\" + str(\n context.epoch_id)\n sensitivities = self._load_sensitivities(sensitivities_file)\n\n for param in context.eval_graph.all_parameters():\n if not re.match(self.pruned_params, param.name()):\n continue\n if param.name() not in sensitivities:\n sensitivities[param.name()] = {\n 'pruned_percent': [],\n 'loss': [],\n 'size': param.shape()[0]\n }\n\n metric = None\n\n for param in sensitivities.keys():\n ratio = self.delta_rate\n while ratio < 1:\n ratio = round(ratio, 2)\n if ratio in sensitivities[param]['pruned_percent']:\n _logger.debug('{}, {} has computed.'.format(param, ratio))\n ratio += self.delta_rate\n continue\n if metric is None:\n metric = self._eval_graph(context, self.eval_rate,\n cached_id)\n\n param_backup = {}\n # prune parameter by ratio\n self._prune_parameters(\n context.eval_graph,\n context.scope, [param], [ratio],\n context.place,\n lazy=True,\n param_backup=param_backup)\n self.pruned_list[0]\n # get accuracy after pruning and update self.sensitivities\n pruned_metric = self._eval_graph(context, self.eval_rate,\n cached_id)\n loss = metric - pruned_metric\n _logger.info(\"pruned param: {}; {}; loss={}\".format(\n param, ratio, loss))\n for brother in self.pruned_list[0]:\n if re.match(self.pruned_params, brother):\n if brother not in sensitivities:\n sensitivities[brother] = {\n 'pruned_percent': [],\n 'loss': []\n }\n sensitivities[brother]['pruned_percent'].append(ratio)\n sensitivities[brother]['loss'].append(loss)\n\n self._save_sensitivities(sensitivities, sensitivities_file)\n\n # restore pruned parameters\n for param_name in param_backup.keys():\n param_t = context.scope.find_var(param_name).get_tensor()\n param_t.set(self.param_backup[param_name], context.place)\n\n# pruned_metric = self._eval_graph(context)\n\n ratio += self.delta_rate\n return sensitivities", "def scammerScore(userID):\n\tres = getMoreStuff(userID)\n\treturn res", "async def _release_heist(self, ctx):\r\n author = ctx.message.author\r\n guild = ctx.guild\r\n player_time = await self.thief.get_member_timeserved(author)\r\n base_time = await self.thief.get_member_sentence(author)\r\n oob = await self.thief.get_member_oob(author)\r\n\r\n # Theme variables\r\n theme = await self.thief.get_guild_theme(guild)\r\n t_jail = theme[\"Jail\"]\r\n t_sentence = theme[\"Sentence\"]\r\n\r\n if await self.thief.get_member_status(author) != \"Apprehended\" or oob:\r\n await ctx.send(\"I can't remove you from {0} if you're not \"\r\n \"*in* {0}.\".format(t_jail))\r\n return\r\n\r\n remaining = self.thief.cooldown_calculator(player_time, base_time)\r\n if remaining != \"No Cooldown\":\r\n await ctx.send(\"You still have time on your {}. You still need to wait:\\n\"\r\n \"```{}```\".format(t_sentence, remaining))\r\n return\r\n\r\n msg = \"You served your time. Enjoy the fresh air of freedom while you can.\"\r\n\r\n if oob:\r\n msg = \"You are no longer on probation! 3x penalty removed.\"\r\n await self.thief.set_member_oob(author, False)\r\n\r\n await self.thief.set_member_sentence(author, 0)\r\n await self.thief.set_member_timeserved(author, 0)\r\n await self.thief.set_member_free(author)\r\n\r\n await ctx.send(msg)" ]
[ "0.66658336", "0.5772919", "0.57570714", "0.5285743", "0.52517307", "0.51297843", "0.5128983", "0.51264876", "0.50322986", "0.50084907", "0.4935301", "0.49063638", "0.48907402", "0.48814934", "0.48217684", "0.48150674", "0.48144037", "0.48021117", "0.47568566", "0.4754652", "0.46895778", "0.4689276", "0.4685097", "0.46762037", "0.46718", "0.46686104", "0.46530622", "0.46495634", "0.46324232", "0.4615847", "0.45991042", "0.4597289", "0.45965865", "0.45962894", "0.45936087", "0.45782712", "0.45688918", "0.45595986", "0.45544007", "0.45377323", "0.45302904", "0.45286408", "0.45273235", "0.45250452", "0.45245215", "0.45207447", "0.45187557", "0.45021486", "0.45009965", "0.44961107", "0.44855276", "0.4482233", "0.44736463", "0.44549054", "0.44510278", "0.44453606", "0.44422972", "0.4441908", "0.4435252", "0.44349927", "0.44347468", "0.44284564", "0.44221294", "0.44115368", "0.4408157", "0.44017223", "0.44007453", "0.4396617", "0.4391421", "0.43909574", "0.43861854", "0.4385226", "0.4376273", "0.43756226", "0.43745056", "0.4362673", "0.4362137", "0.43567523", "0.43535942", "0.43502176", "0.43496388", "0.434352", "0.43332788", "0.43324575", "0.43320328", "0.43272156", "0.4326012", "0.43247226", "0.43241027", "0.43233582", "0.43206766", "0.4320136", "0.4317268", "0.4314323", "0.4314041", "0.4314041", "0.43117195", "0.43096182", "0.43083733", "0.43050742" ]
0.73624575
0
simulates a universe and uses playground.choose_trial to take a decision return true in cas of survival in the simulated universe
def simulate_universe(): # untreated_survival is the probability to survive if not treated # this is an exact law of the universe, the player will not have this information untreated_survival = random.uniform(MIN_DISEASE_SURVIVAL, MAX_DISEASE_SURVIVAL) trials: list[Trial] = [] treated_survivals: dict[Trial, float] = {} for _ in range(random.randint(MIN_TREATMENTS_COUNT, MAX_TREATMENTS_COUNT)): group_size = random.randint(MIN_GROUP_SIZE, MAX_GROUP_SIZE) # treated_survival is the probability to survive if treated # this is an exact law of the universe, the player will not have this information # therefore it is stored in a separate dict and not in the given-to-player Trial object treated_survival = random.uniform(MIN_TREATED_SURVIVAL, MAX_TREATED_SURVIVAL) trial = Trial(group_size, untreated_survival, treated_survival) trials.append(trial) treated_survivals[trial] = treated_survival chosen_trial = playground.choose_trial(trials) if chosen_trial is None: # None means no treatment chosen_survival = untreated_survival else: chosen_survival = treated_survivals[chosen_trial] return random.uniform(0, 1) <= chosen_survival
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_run_sim_1():\n rnd = rand.Arrivals(36, 41)\n sim.run_sim(3, 2, 5, 6, 22, rnd)", "def run_trial():\n env = gym.make('CartPole-v0')\n obs_dim = env.observation_space.shape[0]\n n_actions = env.action_space.n\n\n qnet = QNet(obs_dim, n_actions)\n agent = Sarsa(qnet, n_actions, 0.99, 1.0, 0.05, 1e4)\n optim = torch.optim.RMSprop(qnet.parameters(), lr=0.01)\n memory = Memory()\n\n return_hist = []\n timestep = 1\n\n while timestep < 1e5:\n state = env.reset()\n done = False\n while not done:\n # Pick action and run a single environment step\n action = agent.act(state, timestep).item()\n next_state, reward, done, _ = env.step(action)\n # Add experience to memory for training\n memory.add_experience(state, action, reward, next_state, done)\n\n state = next_state\n\n # Run a single training step every 32 timesteps\n if timestep % 32 == 0:\n batch = memory.sample()\n agent.train(batch, optim)\n\n # Evaluate the current agent every 1000 agents\n if timestep % 1000 == 0:\n eval_return = evaluate(agent)\n return_hist.append(eval_return)\n\n timestep += 1\n\n return np.array(return_hist)", "def run(): \n learning_rate = 0.42\n discount_rate = 0.15\n initial_q_hat = 4\n \n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent, learning_rate, discount_rate, initial_q_hat) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # specify agent to track\n # NOTE: You can set enforce_deadline=False while debugging to allow longer trials\n\n # Now simulate it\n sim = Simulator(e, update_delay=0, display=False) # create simulator (uses pygame when display=True, if available)\n # NOTE: To speed up simulation, reduce update_delay and/or set display=False\n\n sim.run(n_trials=100) # run for a specified number of trials\n print \"Failed trials: \"\n print a.get_failed_trials()\n # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line", "def _do_trial(self, i, j, permutation_vector, energies, adaptor):\n delta = energies[i, i] - energies[j, i] + energies[j, j] - energies[i, j]\n accepted = False\n\n if delta >= 0:\n accepted = True\n else:\n metrop = math.exp(delta)\n rand = random.random()\n if rand < metrop:\n accepted = True\n\n if accepted:\n self._swap_permutation(i, j, permutation_vector)\n self._swap_energies(i, j, energies)\n adaptor.update(i, True)\n else:\n adaptor.update(i, False)", "def run(num_trials):\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # specify agent to track\n # NOTE: You can set enforce_deadline=False while debugging to allow longer trials\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.1, display=True) \n # create simulator (uses pygame when display=True, if available)\n # NOTE: To speed up simulation, reduce update_delay and/or set display=False\n\n sim.run(n_trials=num_trials) # run for a specified number of trials\n # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line\n\n a.performace_report(num_trials)", "def test_run_sim():\n rnd = rand.Arrivals(31, 40)\n sim.run_sim(2, 1, 3, 4, 24, rnd)", "def test_conjecture():\n print(\"Executing test_conjecture:\")\n\n theory=[]\n\n print(language.program_string(theory))\n for i in range(10):\n theory=conjecture.vary([theory], 0, [], steps=1)\n print(f\"Theory after {i+1} stages of variation:\")\n print(language.program_string(theory))", "def main():\n\t# GET THE DIALOG CONTENT\n\tpolicyFlag = int(sys.argv[3])\n\tif policyFlag == 0:\n\t\texistedIterNum = 15000\n\t\texistedThetaFileName = 'algorithms/theta/cycle_tree/of0w1.0g5v0.0625l0.05'\n\telif policyFlag == 1:\n\t\texistedIterNum = 10000\n\t\texistedThetaFileName = 'algorithms/theta/cycle_tree/of0w1.0g5v0.0625l0.05Retroflex'\n\n\tqLearnOpts = {'gamma': 1.0, \n\t\t\t\t 'alpha': 0.0, \n\t\t\t\t 'epsilon': 0.0}\n\tnumOfTurn = util.returnConvertedIndexListCount('b','cycle_tree')\n\tnumofgauss = 5\n\tvar = 0.0625\n\tlamda = 0.05\n\tunitNum = 101\n\ta = qlearningAgents.FittedQLearningAgent(numOfTurn,numofgauss,var,lamda,unitNum, **qLearnOpts)\t\t\n\ta.openThetaFile(existedThetaFileName,existedIterNum)\n\n\tturnNum = int(sys.argv[1])\n\tuserUnitScore = []\n\tuserUnitScoreVector = sys.argv[2].split(',')\n\tfor i in userUnitScoreVector:\n\t\t\tuserUnitScore.append(float(i)/100.0)\n\n\tstate = State.State(turnNum, userUnitScore)\n\tprint a.getAction(state)", "def run(n_trials, params):\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # specify agent to track\n a.set_Qtable(params['epsilon'], params['gamma'], params['learn_rate'], params['lrconst'],\n params['learn_rate_decay'], params['epsconst'], params['epsilon_decay'])\n # NOTE: You can set enforce_deadline=False while debugging to allow longer trials\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.000001, display=False) # create simulator (uses pygame when display=True, if available)\n # NOTE: To speed up simulation, reduce update_delay and/or set display=False\n\n sim.run(n_trials=n_trials) # run for a specified number of trials\n\n return a.reached_destination", "def scenario3(sim):\n\n init_logging(logfile=None, debug=True)\n second = 1000.0\n duration = 10\n tau_m = 20 # ms\n cm = 1.0 # nF\n v_reset = -60\n cell_parameters = dict(\n tau_m=tau_m,\n cm=cm,\n v_rest=-70,\n e_rev_E=0,\n e_rev_I=-70,\n v_thresh=-54,\n v_reset=v_reset,\n tau_syn_E=5,\n tau_syn_I=5,\n )\n g_leak = cm / tau_m # µS\n\n w_min = 0.0 * g_leak\n w_max = 0.05 * g_leak\n\n r1 = 5.0\n r2 = 40.0\n\n sim.setup()\n pre = sim.Population(100, sim.SpikeSourcePoisson())\n post = sim.Population(10, sim.IF_cond_exp())\n\n pre.set(duration=duration * second)\n pre.set(start=0.0)\n pre[:50].set(rate=r1)\n pre[50:].set(rate=r2)\n assert_equal(pre[49].rate, r1)\n assert_equal(pre[50].rate, r2)\n post.set(**cell_parameters)\n post.initialize(v=RandomDistribution('normal', mu=v_reset, sigma=5.0))\n\n stdp = sim.STDPMechanism(\n sim.SpikePairRule(tau_plus=20.0, tau_minus=20.0,\n A_plus=0.01, A_minus=0.01),\n sim.AdditiveWeightDependence(w_min=w_min, w_max=w_max),\n #dendritic_delay_fraction=0.5))\n dendritic_delay_fraction=1)\n\n connections = sim.Projection(pre, post, sim.AllToAllConnector(),\n synapse_type=stdp,\n receptor_type='excitatory')\n\n initial_weight_distr = RandomDistribution('uniform', low=w_min, high=w_max)\n connections.randomizeWeights(initial_weight_distr)\n initial_weights = connections.get('weight', format='array', gather=False)\n # assert initial_weights.min() >= w_min\n # assert initial_weights.max() < w_max\n # assert initial_weights[0, 0] != initial_weights[1, 0]\n\n pre.record('spikes')\n post.record('spikes')\n post[0:1].record('v')\n\n sim.run(duration * second)\n\n actual_rate = pre.mean_spike_count() / duration\n expected_rate = (r1 + r2) / 2\n errmsg = \"actual rate: %g expected rate: %g\" % (actual_rate, expected_rate)\n assert abs(actual_rate - expected_rate) < 1, errmsg\n #assert abs(pre[:50].mean_spike_count()/duration - r1) < 1\n #assert abs(pre[50:].mean_spike_count()/duration- r2) < 1\n final_weights = connections.get('weight', format='array', gather=False)\n assert initial_weights[0, 0] != final_weights[0, 0]\n\n try:\n import scipy.stats\n except ImportError:\n raise SkipTest\n t, p = scipy.stats.ttest_ind(initial_weights[:50, :].flat, initial_weights[50:, :].flat)\n assert p > 0.05, p\n t, p = scipy.stats.ttest_ind(final_weights[:50, :].flat, final_weights[50:, :].flat)\n assert p < 0.01, p\n assert final_weights[:50, :].mean() < final_weights[50:, :].mean()\n sim.end()\n return initial_weights, final_weights, pre, post, connections", "def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=False) # set agent to track\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.0) # reduce update_delay to speed up simulation\n sim.run(n_trials=num_of_experiments) # press Esc or close pygame window to quit\n \n pd.Series(a.success).to_pickle('success_' + exp_id + '.pickle')\n a.Q_table.to_pickle('qtable_' + exp_id + '.pickle')\n pd.Series(a.q_delta_avg).to_pickle('convergence_' + exp_id + '.pickle')\n pd.Series(a.t_total).to_pickle('steps_' + exp_id + '.pickle')", "def solve(num_wizards, num_constraints, wizards, constraints):\n print(\"starting time\")\n start = time.time()\n print(\"hello\")\n end = time.time()\n print(\"time taken\",end - start)\n random.shuffle(wizards)\n assignment = wizards\n loop_counter = 0\n temperature = 10.0\n while not numSat(constraints, assignment) == len(constraints):\n loop_counter += 1\n if loop_counter % 500 == 0:\n print(\"numSat\", numSat(constraints, assignment))\n print(\"loops\",loop_counter)\n print(\"time taken\", time.time()-start)\n a = random.sample([i for i in range(len(wizards))], 1)\n b = random.sample([i for i in range(len(wizards))], 1)\n new_assignment = swap(assignment, a[0], b[0])\n delta = numSat(constraints, assignment) - numSat(constraints, new_assignment)\n if delta < 0:\n print(\"delta negative\")\n assignment = new_assignment\n else: \n # print(\"sample greater than expo\")\n sample = random.uniform(0.0,1.0)\n expo = np.exp(-1*float(delta) / temperature)\n if sample < expo:\n # print(\"sample less than expo\")\n assignment = new_assignment\n temperature -= 0.2\n\n print(\"Solved BITCH! numSat:\", numSat(constraints, assignment))\n return assignment", "def One_Decision(self):\n\n # EP.Population.one_year(self) # performs statistics\n\n agent = self.selectIndividual() # agent who will play the game \n # print agent.ID, 'about to move'\n self.CallsSinceLastMove += 1\n if agent.decisionToMove() and agent.moves():\n self.Moves += 1\n self.CallsSinceLastMove = 0\n # if self.popSize: self.Observer.season(self.Moves // self.popSize) # sets StepId\n self.Observer.season() # sets StepId\n # print(self.Observer.StepId)\n if self.Observer.Visible(): # time for display\n Satisfactions = self.satisfaction()\n for (Colour, Satisfaction) in Satisfactions:\n self.Observer.curve(Name='%s Satisfaction' % str(Colour), Value=Satisfaction)\n # if Satisfactions:\n # self.Observer.curve(Name='Global Satisfaction', Value=sum([S for (C,S) in Satisfactions])/len(Satisfactions))\n \n if self.CallsSinceLastMove > 10 * self.popSize:\n return False # situation is probably stable\n return True # simulation goes on", "def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=False) # specify agent to track\n # NOTE: You can set enforce_deadline=False while debugging to allow longer trials\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.0000001, display=False) # create simulator (uses pygame when display=True, if available)\n # NOTE: To speed up simulation, reduce update_delay and/or set display=False\n\n sim.run(n_trials=100) # run for a specified number of trials\n # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line\n\n print 'alpha, gamma:', a.alpha, a.gamma\n print 'penalties:', a.total_penalties\n print 'total rewards:', a.total_rewards", "def simulation(self, n = 42):\n\n self.initialisation()\n i = 0\n while i < n and self.agent.vivant :\n self.step()\n i+= 1\n return self.perfGlobale", "def RandomVacuumAgent():\n return Agent(RandomAgentProgram(['Right',\n 'Left',\n 'Suck',\n 'NoOp']))", "def simulation():\n # initialize action set\n action_set = np.zeros(int((s.MAX_INSPECT - s.MIN_INSPECT) / s.DELTA) + 3)\n x, i = s.MIN_INSPECT, 1\n while x <= s.MAX_INSPECT:\n action_set[i] = x\n x += s.DELTA\n i += 1\n action_set[-1] = np.inf\n action_number = len(action_set)\n\n # initialize current state\n current_state = math.floor(np.random.rand(1) * s.NUM_STATES)\n\n # initialize action index\n if current_state == 0:\n action_index = 0\n elif current_state == s.NUM_STATES - 1:\n action_index = action_number - 1\n\n if current_state != 0 and current_state != s.NUM_STATES - 1:\n action_index = action_number - 2\n\n # initialize policy set\n greedy_policy = np.zeros(s.NUM_STATES)\n greedy_policy[-1] = np.inf\n for i in range(1, s.NUM_STATES - 1):\n greedy_policy[i] = s.MAX_INSPECT\n\n visit_times = np.zeros([s.NUM_STATES, action_number])\n\n # initialization for simulation\n falpha, Aalpha, delay_T, uni_parameter = equivalent_markov(greedy_policy)\n stable_prob, potential = stable_potential(falpha, Aalpha, uni_parameter)\n last_value = falpha + np.matmul(Aalpha, potential)\n dis_value = last_value\n # ave_vector = np.matmul(stable_prob, falpha)\n # ave_estimate = ave_vector.tolist()\n each_transit_cost, each_transit_time, total_reward = (0 for i in range(3))\n\n # initialize DQN model if selected\n dqn = DQN() if MODEL == 1 else None\n # initialize Q-table if Q-learning selected\n q_factor = ql.init_q_factor(action_number) if MODEL == 2 else None\n\n for out_step in range(s.EPOCH):\n epsilon = s.EPSILON_1 if MODEL == 1 else s.EPSILON_2\n\n for inner_step in range(s.EPOCH_LEARN):\n\n visit_times[current_state, action_index] += 1\n current_action = greedy_policy[current_state]\n\n inspect_cost = 0 if current_state == s.NUM_STATES - 1 else s.K5 * current_action\n\n flag, sojourn_T, service_T, next_state = state_transition(current_state, current_action)\n each_transit_time = s.DISCOUNT * each_transit_time + (sojourn_T - each_transit_time) / pow(\n out_step * s.EPOCH_LEARN + (inner_step + 1), s.Q_AVE_STEP)\n end_sojourn_T = math.exp(- s.ALPHA * sojourn_T)\n end_serve_T = math.exp(- s.ALPHA * service_T)\n\n if s.ALPHA == 0:\n dis_T, dis_serve_T, dis_wait_T = sojourn_T, service_T, sojourn_T - service_T\n else:\n dis_T, dis_serve_T = (1 - end_sojourn_T) / s.ALPHA, (1 - end_serve_T) / s.ALPHA\n dis_wait_T = (end_serve_T - end_sojourn_T) / s.ALPHA\n\n if flag == 0: # no processing, waiting\n cost_real = (s.K1 * (s.NUM_STATES - current_state) + s.K3) * sojourn_T + inspect_cost\n cost_purt = (s.K1 * (s.NUM_STATES - current_state) + s.K3) * dis_T + inspect_cost\n else: # no waiting, processing\n cost_real = s.K1 * (s.NUM_STATES - current_state - 1) * sojourn_T + s.K2 * service_T + s.K3 * (\n sojourn_T - service_T) + s.K4 + inspect_cost\n cost_purt = s.K1 * (s.NUM_STATES - current_state - 1) * dis_T + s.K2 * dis_serve_T + s.K3 * dis_wait_T \\\n + s.K4 * end_serve_T + inspect_cost\n\n each_transit_cost = s.DISCOUNT * each_transit_cost + (cost_real - each_transit_cost) / (\n pow(out_step * s.EPOCH_LEARN + (inner_step + 1), s.Q_AVE_STEP))\n\n ave_q_cost = each_transit_cost / each_transit_time\n # ave_estimate.append(ave_q_cost)\n cost_dis = cost_purt - ave_q_cost * dis_T\n\n if MODEL == 1:\n reward = - cost_dis\n dqn.store_transition(current_state, action_index, reward, next_state)\n if dqn.memory_counter >= s.MEMORY_CAPACITY:\n dqn.learn(s.EPOCH_LEARN, inner_step, PS)\n else:\n difference = cost_dis + end_sojourn_T * min(q_factor[next_state, :]) \\\n - q_factor[current_state, action_index]\n q_factor = ql.update_q_factor(q_factor, current_state, action_index, difference,\n visit_times, inner_step, PS)\n current_state = next_state # transit to next state\n\n if current_state == 0:\n action_index = 0\n elif current_state == s.NUM_STATES - 1:\n action_index = action_number - 1\n else:\n if MODEL == 1:\n action_index = int(dqn.choose_action(current_state, epsilon))\n if action_set[action_index] <= 1:\n greedy_policy[current_state] = action_set[action_index]\n else:\n greedy_policy[current_state] = 1\n else:\n if np.random.rand(1) < epsilon:\n action_index = int(np.floor(np.random.rand(1) * (action_number - 2)) + 1)\n else:\n # minimal_q_value = np.min(q_factor[current_state, :])\n action_index = np.argmin(q_factor[current_state, :])\n greedy_policy[current_state] = action_set[action_index]\n\n # store the policy learned from the iterations\n optimal_policy = greedy_policy\n\n if MODEL != 1:\n for i in range(1, s.NUM_STATES - 1):\n # minimal_q_value_temp = np.min(q_factor[i, :])\n action_index_temp = np.argmin(q_factor[i, :])\n optimal_policy[i] = action_set[action_index_temp]\n\n falpha, Aalpha, delay_T, uni_parameter = equivalent_markov(optimal_policy)\n stable_prob, potential = stable_potential(falpha, Aalpha, uni_parameter)\n\n last_value = falpha + np.matmul(Aalpha, potential)\n dis_value = np.concatenate((dis_value, last_value), axis=1)\n total_reward += - np.ndarray.item(last_value[0])\n # new_ave_cost = np.matmul(stable_prob, falpha)\n # ave_vector = np.concatenate((ave_vector, new_ave_cost))\n print(\"epoch: {} , the epoch reward is {}\".format(out_step, round(- np.ndarray.item(last_value[0]), 2)))\n\n # result = np.asarray(dis_value)\n print(\"total reward:\", total_reward)\n\n return dis_value, total_reward", "def Main():\n numberOfPopulation = 350\n numberOfDays = 60\n \n simulation = Simulation(Covid19(), numberOfPopulation, numberOfDays, \"Covid 19 Simulation\")\n simulation.run() \n simulation = Simulation(Ebola(), numberOfPopulation, numberOfDays, \"Ebola Simulation\")\n simulation.run()", "def RandomVacuumAgent():\n return Agent(RandomAgentProgram(['Right', 'Left', 'Suck', 'NoOp']))", "def simulationDelayedTreatment(numTrials):\n\n delays = [300,150,75,0]\n results = [[],[],[],[]]\n for place in range(0, 4):\n for trial in range(numTrials):\n viruses = []\n for num in range(100):\n viruses.append(ResistantVirus(0.1,0.05, {'guttagonol': False}, 0.005))\n patient = TreatedPatient(viruses, 1000)\n for delay in range(delays[place]):\n patient.update()\n patient.addPrescription(\"guttagonol\") \n for l in range(150):\n patient.update()\n results[place].append(patient.getTotalPop())\n pylab.hist(results[0])\n pylab.hist(results[1])\n pylab.hist(results[2])\n pylab.hist(results[3])\n pylab.show()\n for x in range(0, 10):", "def test_result_reproducibility(monkeypatch):\n script = os.path.abspath(\"examples/scikitlearn-iris/main.py\")\n monkeypatch.chdir(os.path.dirname(os.path.abspath(__file__)))\n config = \"orion_config.yaml\"\n\n orion.core.cli.main(\n [\"hunt\", \"--config\", config, \"python\", script, \"orion~choices([0.1])\"]\n )\n\n experiment = create_experiment(name=\"scikit-iris-tutorial\")\n assert experiment.stats is not None\n assert experiment.stats.best_evaluation == 0.6666666666666667", "def main(_):\n description = xm.ExperimentDescription(\n 'HIS - trial=%d' % FLAGS.trial, tags=['his'])\n experiment = build_experiment()\n xm.launch_experiment(description, experiment)", "def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # specify agent to track\n # NOTE: You can set enforce_deadline=False while debugging to allow longer trials\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.00000001, display=False) # create simulator (uses pygame when display=True, if available)\n # NOTE: To speed up simulation, reduce update_delay and/or set display=False\n\n sim.run(n_trials=100) # run for a specified number of trials\n # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line", "def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # specify agent to track\n # NOTE: You can set enforce_deadline=False while debugging to allow longer trials\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.001, display=True) # create simulator (uses pygame when display=True, if available)\n # NOTE: To speed up simulation, reduce update_delay and/or set display=False\n\n sim.run(n_trials=100) # run for a specified number of trials\n # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line", "def three_experiments_with_trials(family_with_trials, single_with_trials):", "def simulationTwoDrugsDelayedTreatment(numTrials):\n numViruses = 100\n maxPop = 1000\n maxBirthProb = 0.1\n clearProb = 0.05\n resistances = {'guttagonol': False, 'grimpex': False}\n mutProb = 0.005\n\n first_drug = 150\n second_drug = 300\n steps = first_drug + second_drug\n total_vs = [0 for i in range(steps)]\n resis_vs = list(total_vs)\n results = list(total_vs)\n\n for trial in range(numTrials):\n viruses = []\n for i in range(numViruses):\n viruses.append(ResistantVirus(maxBirthProb, clearProb, resistances, mutProb))\n patient = TreatedPatient(viruses, maxPop)\n\n for step in range(steps):\n if step == first_drug:\n patient.addPrescription('guttagonol')\n elif step == second_drug:\n patient.addPrescription('grimpex')\n patient.update()\n total_vs[step] += patient.getTotalPop()\n resis_vs[step] += patient.getResistPop(['guttagonol'])\n resis_vs[step] += patient.getResistPop(['grimpex'])\n\n results.append(patient.getTotalPop())\n\n pylab.hist(results, 9)\n pylab.show()", "def decision():\n return random.choice(['GoToNormal','GoToSleep'])", "def trial(self):\n pass", "def act(self, s, exploration, game, return_pred_opp=False):\n opponent_p = self.compute_opponent_model(s)\n # print(opponent_p)\n opponent_action = np.random.choice(\n opponent_p.size, size=1, p=opponent_p)[0]\n # agent_p = np.exp(self.Q[s][:, opponent_action])\n agent_p = self.compute_marginal_pi(s)\n if exploration and random.random() < self.episilon:\n agent_action = random.randint(0, self.action_num - 1)\n else:\n if self.verbose:\n for s in self.Q.keys():\n print('{}--------------'.format(self.id_))\n print('Q of agent {}: state {}: {}'.format(self.id_, s, str(self.Q[s])))\n # print('QAof agent {}: state {}: {}'.format(self.id_, s, str(self.Q_A[s])))\n # self.Q_A\n print('pi of agent {}: state {}: {}'.format(self.id_, s, self.pi[s]))\n # print('pi of opponent agent {}: state{}: {}'.format(self.id_, s, self.opponent_best_pi[s]))\n print('{}--------------'.format(self.id_))\n agent_action = StationaryAgent.sample(agent_p)\n if return_pred_opp:\n return agent_action, opponent_action\n else:\n return agent_action", "def decideOnGoal(self):\r\n\r\n\t\tself.goalNode = self.simulationHandle.getMap().getRandomNode()", "def test_variational():\n # iris\n #pres = \"Test pour le data set Iris (facile, classique)\"\n #test_from_func_variational(pres, 15, 10, 3, True, Iris)\n\n # breast cancer\n pres = \"Test pour le data set Breast Cancer (facile, classique)\"\n test_from_func_variational(pres, 15, 10, 3, True, Breast_cancer)\n\n # digits\n # pres = \"Test pour le data set Digits (difficile, classique)\"\n # test_from_func(pres, 10, 10, 10, True, Digits, quantum_instance)\n\n # wine\n # pres = \"Test pour le data set Wine (moyen, classique)\"\n # test_from_func(pres, 15, 10, 5, True, Wine, quantum_instance)\n\n # gaussian\n pres = \"Test pour des données gaussiennes (moyen, classique)\"\n for _ in range(1):\n print(\"\\n\")\n print(\"New iteration\")\n test_from_func_variational(pres, 25, 10, 2, True, Gaussian)\n print(\"\\n\")\n\n # small adn strings\n pres = \"Test pour des séquences ADN courtes (difficile, classique)\"\n test_from_func_variational(pres, 10, 15, 14, True, Sequence)\n\n #Quantum data\n pres = \"Test pour des données générées par ordinateur quantique (facile, quantique)\"\n print(pres)\n _, samp_train, samp_test, labels = ad_hoc_data(15, 10, 2, 0.3, True)\n sample_m, sample_p = stock_get(20, 0.3)\n\n labels_me = [-1, 1]\n samp_train_me = {-1: np.array(sample_m[:15]), 1: np.array(sample_p[:15])}\n samp_test_me = {-1: np.array(sample_m[15:]), 1: np.array(sample_p[15:])}\n print(samp_train)\n print(samp_train_me)\n print(samp_test)\n print(samp_test_me)\n\n my_impl_variational(samp_train, samp_test, labels)\n print(\"Pour autres données quantiques\")\n my_impl_variational(samp_train_me, samp_test_me, labels_me)", "def run_sim(self):\n self.operator, var_form, opt = self.generate_VQE_args()\n\n backend = Aer.get_backend('statevector_simulator')\n quantum_instance = QuantumInstance(backend=backend)\n vqe = VQE(self.operator, var_form, opt) \n\n self.result = vqe.run(quantum_instance)\n solution = self.extract_solution(self.result, False)\n return solution", "def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # set agent to track\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.01) # reduce update_delay to speed up simulation\n sim.run(n_trials=100) # press Esc or close pygame window to quit\n return [a.state_action_table, a.reward_hist]", "def test_run_simplega():\n WRFga_winner = run_simplega(pop_size=100, n_generations=1, testing=True)\n assert WRFga_winner.Fitness >= 0", "def main(num_trials, num_actions):\n\tfor i in xrange(int(num_trials)):\n\t\ttrial(i+1, int(num_actions))", "def solve(num_wizards, num_constraints, wizards, constraints):\n\n # print(num_wizards)\n # print(num_constraints)\n # print(wizards)\n # print(constraints)\n # node_set = set(wizards)\n \n\n\n def cost(sol,num_constraints,constraints):\n constraints_satisfied = 0\n constraints_failed = []\n output_ordering_map = {k: v for v, k in enumerate(sol)}\n for c in constraints:\n\n m = output_ordering_map # Creating an alias for easy reference\n\n wiz_a = m[c[0]]\n wiz_b = m[c[1]]\n wiz_mid = m[c[2]]\n\n if (wiz_a < wiz_mid < wiz_b) or (wiz_b < wiz_mid < wiz_a):\n constraints_failed.append(c)\n else:\n constraints_satisfied += 1\n return num_constraints - constraints_satisfied\n\n def neighbors(sol):\n wiz1 = random.randint(0,num_wizards-1)\n wiz2 = random.randint(0,num_wizards-1)\n\n new_sol = copy.copy(sol)\n temp = new_sol[wiz1]\n new_sol[wiz1] = new_sol[wiz2]\n new_sol[wiz2] = temp\n \n return new_sol\n\n def acceptance_probability(old_cost,new_cost,T):\n exponent = (old_cost - new_cost) / T\n \n try:\n ans = math.exp(exponent)\n except OverflowError:\n ans = float('inf')\n return ans\n\n\n def anneal(solution, num_constraints, constraints):\n old_cost = 0\n new_cost = 0\n old_cost = cost(solution,num_constraints,constraints)\n T = 1.0\n T_min = 0.000001\n alpha = 0.98\n while T > T_min:\n i = 1\n while i <= 1000:\n new_solution = neighbors(solution)\n new_cost = cost(new_solution,num_constraints,constraints)\n if new_cost == 0:\n return new_solution,new_cost\n ap = acceptance_probability(old_cost, new_cost, T)\n if ap > random.random():\n solution = new_solution\n old_cost = new_cost\n i += 1\n T = T*alpha\n return solution, old_cost\n\n s = copy.copy(wizards)\n random.shuffle(s)\n ret = anneal(s,num_constraints,constraints)\n \n for i in range(10):\n if ret[1] == 0:\n break\n random.shuffle(s)\n new_ret = anneal(s,num_constraints,constraints)\n print(i)\n if new_ret[1] < ret[1]:\n ret = new_ret\n print(\"constraints failed: {0}\".format(ret[1]))\n return ret[0]", "def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # set agent to track\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.5) # reduce update_delay to speed up simulation\n sim.run(n_trials=100) # press Esc or close pygame window to quit", "def simulate(player,environment,n_trials=1000,verbose=False):\n environment.player = player\n rewards = []\n \n for i in range(1,n_trials+1):\n \n if i % (n_trials/5) == 0:\n if verbose:\n print (\"Loading game {}\".format(i))\n try:\n result = environment.play_game()\n rewards.append(result)\n except Exception:\n tb.print_exc(file=sys.stdout)\n \n return rewards", "def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline= True ) # set agent to track\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.5) # reduce update_delay to speed up simulation\n sim.run(n_trials=100) # press Esc or close pygame window to quit", "def step(self, timestep):\n add_rate = self.sc.add_rate\n\n (agent_id, agent_traits) = self.model.get_random_agent()\n (neighbor_id, neighbor_traits) = self.model.get_random_neighbor_for_agent(agent_id)\n\n\n\n if agent_traits == neighbor_traits:\n return\n elif agent_traits.isdisjoint(neighbor_traits):\n return\n elif neighbor_traits.issubset(agent_traits):\n return\n else:\n prob = analysis.calc_probability_interaction_extensible(agent_traits, neighbor_traits)\n draw = npr.random()\n if draw < prob:\n neighbor_diff_traits = analysis.get_traits_differing_from_focal_extensible(agent_traits, neighbor_traits)\n #log.debug(\"neighbor_diff_traits: %s\", neighbor_diff_traits)\n neighbor_random_diff_trait = random.sample(neighbor_diff_traits, 1)\n add_draw = npr.random()\n if add_draw < add_rate:\n # we add the neighbor's trait, without replacing an existing trait\n agent_traits.add(neighbor_random_diff_trait[0])\n #log.debug(\"adding trait w/o replacement: %s\", neighbor_random_diff_trait[0])\n self.model.set_agent_traits(agent_id, agent_traits)\n else:\n # we replace an existing trait with the neighbor's trait\n focal_trait_to_replace = random.sample(agent_traits, 1)\n #log.debug(\"replacing trait %s with %s\", focal_trait_to_replace[0], neighbor_random_diff_trait[0])\n agent_traits.remove(focal_trait_to_replace[0])\n agent_traits.add(neighbor_random_diff_trait[0])\n self.model.set_agent_traits(agent_id, agent_traits)\n\n # track the interaction and time\n self.model.update_interactions(timestep)\n else:\n # no interaction given the random draw and probability, so just return\n #log.debug(\"no interaction\")\n return", "def RandomAgentProgram(actions):\n return lambda percept: random.choice(actions)", "def RandomAgentProgram(actions):\n return lambda percept: random.choice(actions)", "def run_experiment():\n return [random.random() < 0.5 for _ in range(1000)]", "def run_experiment():\n return [random.random() < 0.5 for _ in range(1000)]", "def run_experiment():\n return [random.random() < 0.5 for _ in range(1000)]", "def run_experiment():\n return [random.random() < 0.5 for _ in range(1000)]", "def simulationDelayedTreatment(numTrials):\n \n #Initialization\n #delayList = [300, 150, 75, 0]\n delayList = [150]\n #Patient init variables\n numViruses = 100\n maxPop = 1000\n #Virus init variables\n maxBirthProb = 0.1\n clearProb = 0.05\n #clearProb = 0.10\n resistances = { 'guttagonol': True }\n mutProb = 0.005\n \n results = {}\n \n for n in delayList:\n cured = 0\n popList = []\n for i in range(numTrials):\n pop = runTrial(n, numViruses, maxPop, maxBirthProb, clearProb, resistances, mutProb)\n popList.append(pop)\n if pop == 0:\n cured +=1\n results[n] = popList\n #print popList\n print \"Delay : %(delay)d Percentage cured %(percent)2f\" % {\"delay\" : n, \"percent\" : cured/float(numTrials) }\n \n\n drawHist(results, numTrials)", "def step_solution(self):\n import time, random\n time.sleep(1.0)\n print '(step_solution) Implement me!'\n return True if random.random() < 0.25 else False", "def decision():\n return random.random() > 0.5", "def decide(self, state: OthelloState, actions: list):\n # -------- TASK 2 ------------------------------------------------------\n # Your task is to implement an algorithm to choose an action form the\n # given `actions` list. You can implement any algorithm you want.\n # However, you should keep in mind that the execution time of this\n # function is limited. So, instead of choosing just one action, you can\n # generate a sequence of increasing good action.\n # This function is a generator. So, you should use `yield` statement\n # rather than `return` statement. To find more information about\n # generator functions, you can take a look at:\n # https://www.geeksforgeeks.org/generators-in-python/\n #\n # If you generate multiple actions, the last action will be used in the\n # game.\n #\n # Tips\n # ====\n # 1. During development of your algorithm, you may want to find the next\n # state after applying an action to the current state; in this case,\n # you can use the following patterns:\n # `next_state = current_state.successor(action)`\n #\n # 2. If you need to simulate a game from a specific state to find the\n # the winner, you can use the following pattern:\n # ```\n # simulator = Game(FirstAgent(), SecondAgent())\n # winner = simulator.play(starting_state=specified_state)\n # ```\n # The `MarkovAgent` has illustrated a concrete example of this\n # pattern.\n #\n # 3. You are free to choose what kind of game-playing agent you\n # implement. Some of the obvious approaches are the following:\n # 3.1 Implement alpha-beta (and investigate its potential for searching deeper\n # than what is possible with Minimax). Also, the order in which the actions\n # are tried in a given node impacts the effectiveness of alpha-beta: you could\n # investigate different ways of ordering the actions/successor states.\n # 3.2 Try out better heuristics, e.g. ones that take into account the higher\n # importance of edge and corner cells. Find material on this in the Internet.\n # 3.3 You could try out more advanced Monte Carlo search methods (however, we do\n # not know whether MCTS is competitive because of the high cost of the full\n # gameplays.)\n # 3.4 You could of course try something completely different if you are willing to\n # invest more time.\n #\n # GL HF :)\n # ----------------------------------------------------------------------\n\n # Replace the following lines with your algorithm\n best_action = actions[0]\n yield best_action", "def test_purity():\n psi = qt.fock(3)\n rho_test = qt.ket2dm(psi)\n test_pure = purity(rho_test)\n assert_equal(test_pure,1)", "def test_random_unitary_gate(self):\n shots = 2000\n circuits = ref_unitary_gate.unitary_random_gate_circuits_nondeterministic(final_measure=True)\n targets = ref_unitary_gate.unitary_random_gate_counts_nondeterministic()\n result = execute(circuits, self.SIMULATOR, shots=shots).result()\n self.assertTrue(getattr(result, 'success', False))\n self.compare_counts(result, circuits, targets, delta=0.05 * shots)", "def simulationWithDrug(numTrials = 20, numTimeSteps = 500):\r\n random.seed()\r\n\r\n # Virus Characteristics.\r\n maxPop = 1000\r\n numViruses = 100\r\n maxBirthProb = 0.1\r\n clearProb = 0.05\r\n resistances={'guttagonol':False}\r\n mutProb= 0.005\r\n dataMatrix = numpy.zeros(shape = (numTrials, numTimeSteps)) \r\n for trial in range(numTrials): \r\n\r\n # Model a random patient with the given virus charateristics. \r\n viruses = resistantVirusCollection(numViruses, maxBirthProb, clearProb,resistances,mutProb)\r\n randPatientX = Patient(viruses, maxPop)\r\n\r\n #Use drug on patient\r\n randPatientX.addPrescription('guttagonol')\r\n\r\n # Simulate the time-steps.\r\n dataMatrix[trial][0] = numViruses\r\n for time in range(1, numTimeSteps):\r\n dataMatrix[trial][time] = randPatientX.update() \r\n \r\n # Statistical Analysis.\r\n meanData = dataMatrix.mean(0)\r\n time = numpy.arange(numTimeSteps) \r\n stdData95_CI = dataMatrix.std(0) * 2\r\n selectedTime = numpy.arange(0, numTimeSteps, 10)\r\n\r\n # Ploting.\r\n pylab.plot(time, meanData)\r\n pylab.errorbar(time[selectedTime], meanData[selectedTime], stdData95_CI[selectedTime], fmt = 'o') \r\n pylab.show()", "def runSurvey():\n fieldFile = globals()['settings']['fieldFile']\n # Number of particles to launch\n numParticles = globals()['settings']['numParticles']\n # Radius of spherical simulation boundary used for launching and exiting\n rLim = globals()['settings']['rLim']\n # Particle stepping method\n steppingMethod = globals()['settings']['steppingMethod']\n # Coarseness of output grid that counts particle fluxes in simulation volume\n fluxGridCoarseness = globals()['settings']['fluxGridCoarseness']\n \n # B field in R and Z\n r, z, BR = fieldGrid('fields/Brs_' + fieldFile)\n _, _, BZ = fieldGrid('fields/Bzs_' + fieldFile)\n _, _, habitatBR = fieldGrid('fields/Brs_habitat_' + fieldFile)\n _, _, habitatBZ = fieldGrid('fields/Bzs_habitat_' + fieldFile)\n r = r[:-1]\n z = z[:-1]\n BR = BR[:-1,:-1] # I MAY CAUSE A BUG IN THE FUTURE\n BZ = BZ[:-1,:-1]\n habitatMax = np.max((habitatBR**2+habitatBZ**2)**.5)\n habitatPrescription = 30\n BR += habitatBR*habitatPrescription/habitatMax\n BZ += habitatBZ*habitatPrescription/habitatMax\n print('Habitat prescription (T):', habitatPrescription)\n Bmagnitude = (BR**2+BZ**2)**.5\n\n qms, vs = qmAndVelocitySpectrum(numParticles)\n if globals()['settings']['qmPrescribed']:\n qms = np.ones(numParticles)*globals()['settings']['qmPrescribed']\n if globals()['settings']['v0Prescribed']:\n vs = np.ones(numParticles)*globals()['settings']['v0Prescribed']\n\n startingPoints = [randomPointOnSphere(rLim) for _ in range(numParticles)]\n directions = [randomDirectionCos(-sp) for sp in startingPoints]\n\n # Simulate without magnetic field\n start = time.time()\n rReduced, zReduced, gridOff, _, habitatCrossingsOff, GDTcrossingsOff, gridOffUnscaled, _ = monteCarloRun(startingPoints, qms, vs, directions, BR, BZ, r, z, rLim, fluxGridCoarseness, 0)\n print('Time elapsed (s):', int(time.time()-start))\n \n # Simulate with magnetic field\n start = time.time()\n _, _, gridOn, trappedOn, habitatCrossingsOn, GDTcrossingsOn, gridOnUnscaled, trappedOnUnscaled = monteCarloRun(startingPoints, qms, vs, directions, BR, BZ, r, z, rLim, fluxGridCoarseness, steppingMethod)\n print('Time elapsed (s):', int(time.time()-start))\n # np.save('cache/{}particles_accel.npy'.format(numParticles), [rReduced, zReduced, gridOn])\n try:\n print('---\\nGDT crossing change: {}%'.format(round(100*(GDTcrossingsOn-GDTcrossingsOff)/GDTcrossingsOff, 3)))\n print('Habitat crossing change: {}%\\n---'.format(round(100*(habitatCrossingsOn-habitatCrossingsOff)/habitatCrossingsOff, 3)))\n except Exception as e:\n print(e)\n \n # plotDiff(r, z, Bmagnitude, gridOn, gridOff)\n plot6panel(r, z, rReduced, zReduced, Bmagnitude, gridOn, gridOff, trappedOn)", "def test5():\n setLogLevel(\"info\")\n info(\"Configuracion Unidad experimental\")\n \"\"\" 1 -> Definicion de la topologia \"\"\"\n t1 = Topologia1()\n ue1 = UnidadExperimental()\n ue1.setTopo(t1)\n ue1.definirNodosClaves(A = 'h1', C='h2', V='h3') # Caso solo para trafico normal\n ue1.setController('ryu', 'simple_switch_13.py,ofctl_rest.py')\n info(\"Configuracion del experimento\")\n \"\"\" 3. Confiracion del experimento \"\"\"\n exp1 = Experimento()\n exp1.configureParams(ue1)\n exp1.configurarTrafico('ataque')\n \"\"\" 4. Inicio del experimento \"\"\"\n exp1.startTest()\n exp1.pingAllTest() # **************** Parece que es necesario que se de un arranque al controlador\n # **************** para que aprenda las reglas antes del ataque.\n\n \"\"\" 5. Aplicacion de pruebas \"\"\"\n exp1.trafico.pingMeasure()\n #exp1.trafico.pingMeasure(filename='ping_ataque_test.log')\n \"\"\" 6. Fin del experimento \"\"\"\n exp1.endTest()\n info(\"Removiendo la topologia\\n\")\n exp1.killTest()\n info(\"Removiendo el controlador\\n\")\n exp1.killController() # Si no se pone no se finaliza el controlador", "def simulationWithDrug(numTrials = 100, numTimeSteps = 300):\n random.seed()\n\n # Virus Characteristics.\n maxPop = 1000\n numViruses = 100\n maxBirthProb = 0.1\n clearProb = 0.05\n \n gutResistVirusMatrix = numpy.zeros(shape = (numTrials, numTimeSteps))\n dataMatrix = numpy.zeros(shape = (numTrials, numTimeSteps)) \n for trial in range(numTrials): \n\n # Model a random patient with the given virus charateristics. \n viruses = virusCollection(numViruses, maxBirthProb, clearProb, ['guttagonol'])\n randPatientX = Patient(viruses, maxPop)\n\n # Simulate the time-steps.\n dataMatrix[trial][0] = numViruses\n for time in range(1, numTimeSteps):\n if time == 150:\n randPatientX.addPrescription('guttagonol')\n dataMatrix[trial][time] = randPatientX.update()\n gutResistVirusMatrix[trial][time] = randPatientX.getResistPop(['guttagonol']) \n \n # Statistical Analysis.\n meanData = dataMatrix.mean(0)\n time = numpy.arange(numTimeSteps) \n stdData95_CI = dataMatrix.std(0) * 2\n selectedTime = numpy.arange(0, numTimeSteps, 10)\n\n meanResistVirus = gutResistVirusMatrix.mean(0)\n\n #f = pylab.figure(figsize=(15, 7))\n\n # Plotting.\n #pylab.subplot(121)\n pylab.plot(time, meanData, label='Mean Virus Population')\n pylab.errorbar(time[selectedTime], meanData[selectedTime], stdData95_CI[selectedTime], fmt = 'o', color = 'blue')\n pylab.grid() \n pylab.xlabel('Time Steps')\n pylab.ylabel('Total Virus Population')\n pylab.title('Effect of Guttagonol on Virus Population being administered\\nafter {} Timesteps over a total period of {} Timesteps'.format('150', '300'), fontsize='medium')\n\n stdDevGutVirusPop = gutResistVirusMatrix.std(0) * 2\n\n # Plotting 2nd graph\n #pylab.subplot(122)\n pylab.plot(time, meanResistVirus, label='Mean Guttagonol-resistant Virus Population', color = 'red')\n pylab.errorbar(time[selectedTime], meanResistVirus[selectedTime], stdDevGutVirusPop[selectedTime], fmt = 'o', color = 'red')\n pylab.legend(fontsize='x-small', loc='best')\n #pylab.grid()\n #pylab.xlabel('Time Steps')\n #pylab.ylabel('Total Guttagonol-Resistant Virus Population')\n #pylab.title('Total Number of Guttagonol-Resistant Virus Population after {} Timesteps\\nDrug administered after {} Timesteps'.format('300', '150'), fontsize='medium')\n pylab.show()", "def choose_trial_to_run(self, trial_runner):\n\n raise NotImplementedError", "def simulationDelayedTreatment(numTrials):\n numViruses = 100\n maxPop = 1000\n maxBirthProb = 0.1\n clearProb = 0.05\n resistances = {'guttagonol': False}\n mutProb = 0.005\n\n delays = [300, 150, 75, 0]\n results = []\n\n for delay in delays:\n for i in range(numTrials):\n virusList = []\n virusPop = 0\n for n in range(numViruses):\n virusList.append(ResistantVirus(maxBirthProb, clearProb, resistances, mutProb))\n my_patient = TreatedPatient(virusList, maxPop)\n\n for step in range(delay + 150):\n if step == delay:\n my_patient.addPrescription('guttagonol')\n virusPop = my_patient.update()\n results.append(virusPop)\n\n toPlot = []\n for i in range(0, len(results), numTrials):\n toPlot.append(results[i:i + numTrials])\n # print toPlot\n\n for i, _ in enumerate(delays):\n pylab.subplot(2, 2, i + 1)\n pylab.hist(toPlot[i])\n pylab.show()", "def test_win(self):\n self.choice.return_value = \"ant\"\n self.input.side_effect = list(\"ant\" \"n\")\n\n gallows.main()\n\n self.xprint.assert_any_call('Yes! The secret word is \"ant\"! '\n 'You have won!')", "def choose_action( self):\n \"\"\"greedy, random, e-greedy, boltzmann, bayesian\"\"\"\n\tif self.exploration == \"greedy\":\n #Choose an action with the maximum expected value.\n a,allQ = sess.run([q_net.predict,q_net.Q_out],feed_dict={q_net.inputs:[s],q_net.keep_per:1.0})\n a = a[0]\n return a\n if self.exploration == \"random\":\n #Choose an action randomly.\n a = env.action_space.sample()\n if self.exploration == \"e-greedy\":\n #Choose an action by greedily (with e chance of random action) from the Q-network\n if np.random.rand(1) < e or total_steps < pre_train_steps:\n a = env.action_space.sample()\n else:\n a,allQ = sess.run([q_net.predict,q_net.Q_out],feed_dict={q_net.inputs:[s],q_net.keep_per:1.0})\n a = a[0]\n return a\n if self.exploration == \"boltzmann\":\n #Choose an action probabilistically, with weights relative to the Q-values.\n Q_d,allQ = sess.run([q_net.Q_dist,q_net.Q_out],feed_dict={q_net.inputs:[s],q_net.Temp:e,q_net.keep_per:1.0})\n a = np.random.choice(Q_d[0],p=Q_d[0])\n a = np.argmax(Q_d[0] == a)\n return a\n if self.exploration == \"bayesian\":\n #Choose an action using a sample from a dropout approximation of a bayesian q-network.\n a,allQ = sess.run([q_net.predict,q_net.Q_out],feed_dict={q_net.inputs:[s],q_net.keep_per:(1-e)+0.1})\n a = a[0]\n return a", "def objective(trial):\n # The parameters that we will calibrate the model for are shown here.\n # Optuna trial i\n BOD = trial.suggest_uniform(\"BOD\", 0, 1) #Review ranges here\n k_r = trial.suggest_uniform(\"k_r\", 0, 1) #Review Ranges here \n \n def ChLa(t):\n return 1 # Need to link to data\n\n def I(x):\n return 1 # Need to link to data\n\n K_z = 2 * 10**(-5) # p.51\n a = K_z\n k_b = 0.1 # Table 5\n th_b = 1.047 # Table 5\n k_r = 0.1 # Table 5\n YCHO2 = 0.0083 # Table 5\n th_p = 1.036 # Table 5\n th_s = 1.065 # Table 5\n th_r = 1.047 # Table 5\n\n def Temp(t):\n \"\"\"\n Function that maps time to temperature\n \"\"\"\n return 20 # Need to link to data\n\n def P_max(t):\n return 9.6 * 1.036 **(Temp(t) - 20) # Eq. 4\n\n def L_min(t):\n I = 1 # Need to link to PAR data\n K_1 = 0.687 * 1.086**(Temp(t) - 20)\n K_2 = 15\n return I * (1 + 2 * np.sqrt(K_1 / K_2)) / (I + K_1 + I**2 / K_2) # Eq. 5\n \n # f deals with sink and source terms \n def f(x, t):\n return -1 / YCHO2 * k_r * th_r**(Temp(t) - 20) * ChLa(t) + P_max(t) * L_min(t) * ChLa(t) - k_b * th_b**(Temp(t)-20) * BOD \n\n L = 200 # Length of domain\n dt = 1 / 48 # Mesh spacing in t\n F = a * dt # a * dt / dx**2\n T = 100 # Simulation time stop\n\n # Solving the PDE\n DO, x, t, _ = solver_FE_simple(I, a, f, L, dt, F, T)\n \n # Creating some bogus targets while database errors are happening\n DO_data = DO + np.random.random(len(DO))\n\n # Using mean squared error as the measure of fit, where we want\n # to minimize this number\n return ((DO - DO_data)**2).mean()", "def test_optimalagentfinder () :\n def valNetwork (s) : \n s = s.float()\n v = reduce(model.withReluDropout, model.v[:-1], s)\n v = model.v[-1](v)\n return v\n acrobotBases = acrobotRewardBases(np.pi / 8, np.pi / 8)\n fn = random.sample(acrobotBases, k=1).pop()\n agent = findOptimalAgent(fn)\n model = agent.model\n toExternal = lambda x, y : toExternalStateRep([x, y, 0, 0])\n valFn = reduce(compose, [float, valNetwork, torch.tensor, toExternal])\n RFn = compose(fn, toExternal)\n xRange = np.arange(-np.pi, np.pi, 0.1)\n yRange = np.arange(-np.pi, np.pi, 0.1)\n plotFunction(RFn, xRange, yRange, 'theta1', 'theta2', 'R')\n plotFunction(valFn, xRange, yRange, 'theta1', 'theta2', 'V')", "def test_gan():\n nbr_qubits = 5\n\n # Normal law\n # N = 5*10 ** 3\n #\n # Database = np.random.normal(0, 1, N)\n # test_gan_qiskit(nbr_qubits, Database)\n\n # beta\n arr_beta = beta_proba(nbr_qubits, 2, 5)\n\n general_gantest(arr_beta, nbr_qubits)\n\n # uniform not on [0, 32]\n if nbr_qubits == 5:\n arr_unif = [1 / 24] * 24 + 8 * [0]\n general_gantest(arr_unif, nbr_qubits)", "def run_simulation(self, state):\n \"*** YOUR CODE HERE ***\"\n player = 0\n visited_states = [(player, state)]\n depth_limited = self.depth != -1\n depth = self.depth\n expand = True\n while not visited_states[-1][1].isWin() and not visited_states[-1][1].isLose():\n if depth_limited and depth == 0: break\n state = self.UCB1(state, player) # Selection & Simulation\n if expand and state not in self.plays: # Expansion\n expand = False\n self.plays[state] = 0\n self.wins[state] = 0\n visited_states.append((player, state))\n player = (player + 1) % state.getNumAgents()\n if not expand and depth_limited and player == 0: depth -= 1\n \n for player, state in visited_states:\n if state in self.plays: # Not simulated nodes\n self.plays[state] += 1\n eval = self.evaluationFunction(visited_states[-1][1])\n if depth_limited:\n if player == 0: self.wins[state] += eval\n if player != 0: self.wins[state] -= eval\n else:\n if player == 0: self.wins[state] += eval\n if player != 0: self.wins[state] += (1 - eval)", "def test_smoke(self):\n\t\tinit_state = torch.tensor(0.0)\n\t\ttotal_time = torch.tensor(4.0)\n\t\tprint('Agent state trajectory and actions:')\n\t\tAgent().play(init_state, total_time)\n\t\tpyro.clear_param_store()", "def test6():\n setLogLevel(\"info\")\n info(\"Configuracion Unidad experimental\")\n \"\"\" 1 -> Definicion de la topologia \"\"\"\n t1 = Topologia1()\n ue1 = UnidadExperimental()\n ue1.setTopo(t1)\n ue1.definirNodosClaves(A = 'h1', C='h2', V='h3') # Caso solo para trafico normal\n ue1.setController('ryu', 'simple_switch_13.py,ofctl_rest.py')\n info(\"Configuracion del experimento\")\n \"\"\" 3. Confiracion del experimento \"\"\"\n exp1 = Experimento()\n exp1.configureParams(ue1)\n exp1.configurarTrafico('ataque')\n \"\"\" 4. Inicio del experimento \"\"\"\n exp1.startTest()\n \"\"\" 5. Aplicacion de pruebas \"\"\"\n exp1.trafico.iperfMeasure()\n exp1.trafico.iperfMeasure(filename='iperf_ataque_test.log')\n \"\"\" 6. Fin del experimento \"\"\"\n exp1.endTest()\n info(\"Removiendo la topologia\\n\")\n exp1.killTest()\n info(\"Removiendo el controlador\\n\")\n exp1.killController() # Si no se pone no se finaliza el controlador", "def prepare_experiment(assumptions):\n print(\"\\nGenerate species parameters\")\n np.random.seed(assumptions['seed']) \n params = MakeParams(assumptions) \n if assumptions[\"selected_function\"] == \"f5_invader_suppression\":\n print(\"\\nDraw invader feature\")\n params = create_invader(params, assumptions)\n print(params[\"c\"])\n \n print(\"\\nDraw per-capita function and cost\")\n f1_species_smooth, f1_species_rugged, f2_species_smooth, f2_species_rugged = draw_species_function(assumptions)\n params.update({\"f1_species_smooth\": f1_species_smooth, \"f1_species_rugged\": f1_species_rugged, \"f2_species_smooth\": f2_species_smooth, \"f2_species_rugged\": f2_species_rugged})\n gi = draw_species_cost(f1_species_smooth, assumptions)\n params.update({\"g\": gi})\n \n print(\"\\nConstruct plate\")\n np.random.seed(assumptions['seed']) \n plate = make_plate(assumptions,params)\n \n print(\"\\nAdd community function to plate\")\n plate = add_community_function(plate, assumptions, params)\n \n if not pd.isnull(assumptions[\"overwrite_plate\"]) :\n print(\"\\nUpdating the initial plate composition by overwrite_plate\")\n plate = overwrite_plate(plate, assumptions)\n \n print(\"\\nPrepare Protocol\")\n #Extract Protocol from protocol database\n algorithms = make_algorithms(assumptions)\n params_algorithm = algorithms[algorithms['algorithm_name'] == assumptions['protocol']]\n \n #Params_simulation by default contains all assumptions not stored in params.\n params_simulation = dict((k, assumptions[k]) for k in assumptions.keys() if k not in params.keys())\n \n return params, params_simulation , params_algorithm, plate", "def run_sim(self):\n \n OS = self.OpticalSystem\n TL = self.TargetList\n SU = self.SimulatedUniverse\n Obs = self.Observatory\n TK = self.TimeKeeping\n \n # TODO: start using this self.currentSep\n # set occulter separation if haveOcculter\n if OS.haveOcculter == True:\n self.currentSep = Obs.occulterSep\n \n # choose observing modes selected for detection (default marked with a flag)\n allModes = OS.observingModes\n det_modes = list(filter(lambda mode: 'imag' in mode['inst']['name'], allModes))\n # and for characterization (default is first spectro/IFS mode)\n spectroModes = list(filter(lambda mode: 'spec' in mode['inst']['name'], allModes))\n if np.any(spectroModes):\n char_modes = spectroModes\n # if no spectro mode, default char mode is first observing mode\n else:\n char_modes = [allModes[0]]\n \n # begin Survey, and loop until mission is finished\n log_begin = 'OB%s: survey beginning.'%(TK.OBnumber + 1)\n self.logger.info(log_begin)\n self.vprint(log_begin)\n t0 = time.time()\n sInd = None\n ObsNum = 0\n while not TK.mission_is_over(OS, Obs, det_modes[0]):\n \n # acquire the NEXT TARGET star index and create DRM\n old_sInd = sInd #used to save sInd if returned sInd is None\n DRM, sInd, det_intTime, waitTime, det_mode = self.next_target(sInd, det_modes)\n \n if sInd is not None:\n ObsNum += 1\n\n if OS.haveOcculter == True:\n # advance to start of observation (add slew time for selected target)\n success = TK.advanceToAbsTime(TK.currentTimeAbs.copy() + waitTime)\n \n # beginning of observation, start to populate DRM\n DRM['star_ind'] = sInd\n DRM['star_name'] = TL.Name[sInd]\n DRM['arrival_time'] = TK.currentTimeNorm.copy().to('day')\n DRM['OB_nb'] = TK.OBnumber\n DRM['ObsNum'] = ObsNum\n pInds = np.where(SU.plan2star == sInd)[0]\n DRM['plan_inds'] = pInds.astype(int)\n log_obs = (' Observation #%s, star ind %s (of %s) with %s planet(s), ' \\\n + 'mission time at Obs start: %s')%(ObsNum, sInd, TL.nStars, len(pInds), \n TK.currentTimeNorm.to('day').copy().round(2))\n self.logger.info(log_obs)\n self.vprint(log_obs)\n\n # PERFORM DETECTION and populate revisit list attribute\n DRM['det_info'] = []\n detected, det_fZ, det_systemParams, det_SNR, FA = \\\n self.observation_detection(sInd, det_intTime, det_mode)\n # update the occulter wet mass\n if OS.haveOcculter == True:\n DRM = self.update_occulter_mass(DRM, sInd, det_intTime, 'det')\n det_data = {}\n det_data['det_status'] = detected\n det_data['det_SNR'] = det_SNR\n det_data['det_fZ'] = det_fZ.to('1/arcsec2')\n det_data['det_params'] = det_systemParams\n det_data['det_mode'] = dict(det_mode)\n det_data['det_time'] = det_intTime.to('day')\n del det_data['det_mode']['inst'], det_data['det_mode']['syst']\n DRM['det_info'].append(det_data)\n\n # PERFORM CHARACTERIZATION and populate spectra list attribute\n DRM['char_info'] = []\n if char_modes[0]['SNR'] not in [0, np.inf]:\n characterized, char_fZ, char_systemParams, char_SNR, char_intTime = \\\n self.observation_characterization(sInd, char_modes)\n else:\n char_intTime = None\n lenChar = len(pInds) + 1 if True in FA else len(pInds)\n characterized = np.zeros((lenChar,len(char_modes)), dtype=float)\n char_SNR = np.zeros((lenChar,len(char_modes)), dtype=float)\n char_fZ = np.array([0./u.arcsec**2, 0./u.arcsec**2])\n char_systemParams = SU.dump_system_params(sInd)\n\n for mode_index, char_mode in enumerate(char_modes):\n char_data = {}\n assert char_intTime != 0, \"Integration time can't be 0.\"\n # update the occulter wet mass\n if OS.haveOcculter == True and char_intTime is not None:\n char_data = self.update_occulter_mass(char_data, sInd, char_intTime, 'char')\n if np.any(characterized):\n vprint(' Char. results are: {}'.format(characterized[:-1, mode_index]))\n # populate the DRM with characterization results\n char_data['char_time'] = char_intTime.to('day') if char_intTime else 0.*u.day\n char_data['char_status'] = characterized[:-1, mode_index] if FA else characterized[:,mode_index]\n char_data['char_SNR'] = char_SNR[:-1, mode_index] if FA else char_SNR[:, mode_index]\n char_data['char_fZ'] = char_fZ[mode_index].to('1/arcsec2')\n char_data['char_params'] = char_systemParams\n # populate the DRM with FA results\n char_data['FA_det_status'] = int(FA)\n char_data['FA_char_status'] = characterized[-1, mode_index] if FA else 0\n char_data['FA_char_SNR'] = char_SNR[-1] if FA else 0.\n char_data['FA_char_fEZ'] = self.lastDetected[sInd,1][-1]/u.arcsec**2 \\\n if FA else 0./u.arcsec**2\n char_data['FA_char_dMag'] = self.lastDetected[sInd,2][-1] if FA else 0.\n char_data['FA_char_WA'] = self.lastDetected[sInd,3][-1]*u.arcsec \\\n if FA else 0.*u.arcsec\n \n # populate the DRM with observation modes\n char_data['char_mode'] = dict(char_mode)\n del char_data['char_mode']['inst'], char_data['char_mode']['syst']\n DRM['char_info'].append(char_data)\n \n DRM['exoplanetObsTime'] = TK.exoplanetObsTime.copy()\n\n # append result values to self.DRM\n self.DRM.append(DRM)\n \n else:#sInd == None\n sInd = old_sInd#Retain the last observed star\n if(TK.currentTimeNorm.copy() >= TK.OBendTimes[TK.OBnumber]): # currentTime is at end of OB\n #Conditional Advance To Start of Next OB\n if not TK.mission_is_over(OS, Obs, det_mode):#as long as the mission is not over\n TK.advancetToStartOfNextOB()#Advance To Start of Next OB\n elif(waitTime is not None):\n #CASE 1: Advance specific wait time\n success = TK.advanceToAbsTime(TK.currentTimeAbs.copy() + waitTime)\n self.vprint('waitTime is not None')\n else:\n startTimes = TK.currentTimeAbs.copy() + np.zeros(TL.nStars)*u.d # Start Times of Observations\n observableTimes = Obs.calculate_observableTimes(TL,np.arange(TL.nStars),startTimes,self.koMap,self.koTimes,self.mode)[0]\n #CASE 2 If There are no observable targets for the rest of the mission\n if((observableTimes[(TK.missionFinishAbs.copy().value*u.d > observableTimes.value*u.d)*(observableTimes.value*u.d >= TK.currentTimeAbs.copy().value*u.d)].shape[0]) == 0):#Are there any stars coming out of keepout before end of mission\n self.vprint('No Observable Targets for Remainder of mission at currentTimeNorm= ' + str(TK.currentTimeNorm.copy()))\n #Manually advancing time to mission end\n TK.currentTimeNorm = TK.missionLife\n TK.currentTimeAbs = TK.missionFinishAbs\n else:#CASE 3 nominal wait time if at least 1 target is still in list and observable\n #TODO: ADD ADVANCE TO WHEN FZMIN OCURS\n inds1 = np.arange(TL.nStars)[observableTimes.value*u.d > TK.currentTimeAbs.copy().value*u.d]\n inds2 = np.intersect1d(self.intTimeFilterInds, inds1) #apply intTime filter\n inds3 = self.revisitFilter(inds2, TK.currentTimeNorm.copy() + self.dt_max.to(u.d)) #apply revisit Filter #NOTE this means stars you added to the revisit list \n self.vprint(\"Filtering %d stars from advanceToAbsTime\"%(TL.nStars - len(inds3)))\n oTnowToEnd = observableTimes[inds3]\n if not oTnowToEnd.value.shape[0] == 0: #there is at least one observableTime between now and the end of the mission\n tAbs = np.min(oTnowToEnd)#advance to that observable time\n else:\n tAbs = TK.missionStart + TK.missionLife#advance to end of mission\n tmpcurrentTimeNorm = TK.currentTimeNorm.copy()\n success = TK.advanceToAbsTime(tAbs)#Advance Time to this time OR start of next OB following this time\n self.vprint('No Observable Targets a currentTimeNorm= %.2f Advanced To currentTimeNorm= %.2f'%(tmpcurrentTimeNorm.to('day').value, TK.currentTimeNorm.to('day').value))\n else:#TK.mission_is_over()\n dtsim = (time.time() - t0)*u.s\n log_end = \"Mission complete: no more time available.\\n\" \\\n + \"Simulation duration: %s.\\n\"%dtsim.astype('int') \\\n + \"Results stored in SurveySimulation.DRM (Design Reference Mission).\"\n self.logger.info(log_end)\n print(log_end)", "def run_all_signal_trials(args):\n delta_t = args.deltaT\n delta_t_days = delta_t / 86400.\n\n ana_dir = cy.utils.ensure_dir('/data/user/apizzuto/csky_cache/greco_ana')\n greco_ana = cy.get_analysis(\n cy.selections.repo,\n 'version-002-p10',\n cy.selections.GRECOOnlineDataSpecs.GRECO_IC86_2012_2019,\n dir=ana_dir)\n conf = {\n 'ana': greco_ana,\n 'extended': True,\n 'space': \"ps\",\n 'time': \"transient\",\n 'sig': 'transient'}\n\n only_gamma = not args.all_nova\n print('only', only_gamma)\n print('all', args.all_nova)\n print('~all', ~args.all_nova)\n weighting_scheme = args.weighting\n src, sample_str = get_sources(only_gamma, weighting_scheme, delta_t_days)\n\n cy.CONF['src'] = src\n cy.CONF['mp_cpus'] = 5\n\n def ndarray_to_Chi2TSD(trials):\n return cy.dists.Chi2TSD(cy.utils.Arrays(trials))\n\n bg = cy.bk.get_all(\n '/data/user/apizzuto/Nova/csky_trials/stacking_sens_res/bg/',\n '{}_delta_t_{:.2e}_seed_*.npy'.format(\n sample_str, delta_t),\n merge=np.concatenate,\n post_convert=ndarray_to_Chi2TSD\n )\n\n tr = cy.get_trial_runner(\n conf, ana=greco_ana, src=src,\n inj_conf={'flux': cy.hyp.PowerLawFlux(args.index)}\n )\n\n result = {}\n\n if delta_t < 1e3:\n n_sig_step = 5\n elif delta_t <= 86400. and args.index <= 2.5:\n n_sig_step = 7\n elif delta_t <= 86400:\n n_sig_step = 15\n elif args.index <= 2.5:\n n_sig_step = 15\n else:\n n_sig_step = 25\n\n ########################################################################\n # SENSITIVITY CALCULATION\n ########################################################################\n beta = 0.9\n sensitivity = tr.find_n_sig(\n bg.median(), beta, batch_size=args.ntrials_sig,\n n_sig_step=n_sig_step, max_batch_size=0,\n logging=True, n_bootstrap=1)\n\n sensitivity['E2dNdE'] = tr.to_E2dNdE(sensitivity, E0=1., unit=1e3)\n\n ########################################################################\n # DISCOVERY POTENTIAL CALC\n ########################################################################\n thresh_ts = bg.isf_nsigma(5.)\n beta = 0.5 # beta = 0.5\n discovery = tr.find_n_sig(\n thresh_ts, beta, batch_size=args.ntrials_sig,\n n_sig_step=n_sig_step, max_batch_size=0,\n logging=True, n_bootstrap=1)\n discovery['E2dNdE'] = tr.to_E2dNdE(discovery, E0=1., unit=1e3)\n discovery['nsigma'] = 5.\n discovery['CL'] = beta\n\n ########################################################################\n # FIT BIAS TRIALS\n ########################################################################\n n_sigs = np.r_[:201:10]\n trials = [tr.get_many_fits(\n int(args.ntrials_sig/2), n_sig=n_sig,\n logging=False, seed=n_sig) for n_sig in n_sigs]\n for (n_sig, t) in zip(n_sigs, trials):\n t['ntrue'] = np.repeat(n_sig, len(t))\n allt = cy.utils.Arrays.concatenate(trials)\n\n result['bg'] = bg\n result['sensitivity'] = sensitivity\n result['discovery'] = discovery\n result['fit'] = allt\n result['settings'] = args\n result['source_info'] = {'ra': src.ra, 'dec': src.dec, 'mjd': src.mjd}\n\n with open(\n '/data/user/apizzuto/Nova/csky_trials/stacking_sens_res/'\n + 'signal_results/' +\n '{}_delta_t_{:.2e}_gamma_{}_.pkl'.format(\n sample_str, delta_t, args.index), 'wb') as f:\n pickle.dump(result, f)", "def simulationDelayedTreatment(numTrials):\n \n \n results = []\n gutresults = []\n for a in range(300):\n results.append([])\n gutresults.append([])\n for b in range(numTrials):\n viruses = []\n for c in range(10000):\n resistances = {'guttagonol': False}\n vir = ResistantVirus(.1, .05, resistances, .005)\n viruses.append(vir)\n \n Mark = TreatedPatient(viruses, 1000)\n \n for d in range(150):\n pop = Mark.update()\n results[d].append(pop)\n gutpop = Mark.getResistPop(['guttagonol'])\n gutresults[d].append(gutpop)\n \n Mark.addPrescription('guttagonol')\n \n for e in range(150, 300):\n newpop = Mark.update()\n results[e].append(newpop)\n newgutpop = Mark.getResistPop(['guttagonol'])\n gutresults[e].append(newgutpop)\n \n FinalResults = results[299]\n print len(FinalResults)\n \n \n \n pylab.figure(5)\n pylab.hist(FinalResults, bins = 10)\n pylab.title('Simulation with Drugs - Frequency')\n pylab.xlabel('Virus Population')\n pylab.ylabel('Number of Trials with Population') \n pylab.legend()\n pylab.show()", "def main():\n\n # Hypothesis:\n # The `impact` encapsulates the volatility, stability and overall\n # fluctuation of the market; in particular, movements that would\n # affect one's portfolio, e.g. unexpected (i.e. not predicted)\n # increases or drops in prices.\n # For the StrategyLearner should directly affect the learned\n # policy, particularly, in terms of willingness to take risks by\n # betting on the behavior of the market.\n # This can be translated into three metrics:\n # - Number of entries:\n # These should be reduced as market impact increases which\n # shows the learning agent being more cautious about its bets\n # - Cumulative return:\n # Directly related to the point mentioned above, as market\n # impact increases and the agent's willingness to take risks\n # decreaes, so is the overall performance of the strategy\n # - Training episodes:\n # This applies specifically to the Q-Learning agent, but it\n # is interesting to see how as the market impact increases,\n # the number of complete training episodes (i.e. a complete\n # pass on the trading data) is not affected. One would think\n # that the agent would converge faster when the impact is\n # large as it would quickly realize that the most optimal\n # strategy is to not do anything. However, impact does not\n # affect the rate of convergence, but rather the strategy\n # that the agent converges to\n\n # Set the seed for reproducibility\n random.seed(1481090000)\n\n # Experiment parameters\n symbol = 'JPM'\n # In-sample: January 1, 2008 to December 31 2009\n start_date = dt.datetime(2008, 1, 1)\n end_date = dt.datetime(2009, 12, 31)\n starting_value = 100000\n commission = 0.0\n # Values to use to evaluate the effect of the impact\n impact_values = [0.0, 0.005, 0.01, 0.05, 0.1, 0.25, 0.5, 1.0]\n\n all_entries = []\n all_returns = []\n all_episodes = []\n\n for impact in impact_values:\n log.info(\"Evaluating the effect of impact=%s\", impact)\n strategy_learner = StrategyLearner(verbose=False, impact=impact)\n\n log.info(\"Training StrategyLearner\")\n strategy_learner.addEvidence(\n symbol=symbol,\n sd=start_date,\n ed=end_date,\n sv=starting_value\n )\n\n log.info(\"Querying StrategyLearner to generate trades\")\n trades = strategy_learner.testPolicy(\n symbol=symbol,\n sd=start_date,\n ed=end_date,\n sv=starting_value\n )\n\n log.info(\"Transforming StrategyLearner trades into marketsim orders\")\n orders = _convert_trades_to_marketisim_orders(symbol, trades)\n\n log.info(\"Computing portfolio values for %d orders\", orders.shape[0])\n port_vals = compute_portvals(\n orders,\n start_val=starting_value,\n commission=commission,\n impact=impact\n )\n\n cumulative_return = _compute_cumulative_return(port_vals)\n\n all_entries.append(strategy_learner.metadata['entries'])\n all_returns.append(cumulative_return)\n all_episodes.append(strategy_learner.metadata['training_episodes'])\n\n _plot_and_save_number_of_entries_per_impact_value(impact_values, all_entries)\n _plot_and_save_number_of_episodes_per_impact_value(impact_values, all_episodes)\n _plot_and_save_cumulative_return_per_impact_value(impact_values, all_returns)", "def run_trials(self, num=0):\n if num == 'all':\n self.trials_to_run = len(self.trials)\n else:\n self.trials_to_run = num\n self.vision_egg.go()", "def runAnalyticalSim(self, sim_rounds = 10**7, factor=\"mu\"):\n # create simulation agents\n M = self.getPopulationSize()\n N = self.getSampleSize()\n\n available_strategies = self.getAvailableStrategies()\n sim_agents = [Agent(available_strategies) for i in range(M)]\n tot_count = [0 for strategy in available_strategies]\n\n # count strategies in current population\n strat_count = [0 for strategy in available_strategies]\n for sim_agent in sim_agents:\n strat_count[available_strategies.index(sim_agent.getStrategy())] += 1\n\n # repeat 10 million times\n for i in range(sim_rounds):\n\n # handle each agent\n for focal_player in sim_agents:\n\n # update frequencies for avg payoffs\n self.clearFrequencies()\n for i, strategy in enumerate(available_strategies):\n self.setFrequency(strategy, strat_count[i])\n\n # option 1: random switch strategy\n mu_proba = np.random.random()\n if mu_proba <= self.getExplorationRate():\n strat_count[available_strategies.index(focal_player.getStrategy())] -= 1\n focal_player.switchToOtherAvailableStrategy()\n strat_count[available_strategies.index(focal_player.getStrategy())] += 1\n\n # option 2: choose model to (maybe) imitate\n else:\n # select model player\n model_player_index = np.random.randint(0, M-1)\n while model_player_index == sim_agents.index(focal_player):\n model_player_index = np.random.randint(0, M-1)\n model_player = sim_agents[model_player_index]\n\n # define imitation outcome\n proba_copy = self.Fermi(self.getPayoff(model_player.getStrategy()), self.getPayoff(focal_player.getStrategy()))\n proba_event = np.random.random()\n if proba_event <= proba_copy:\n strat_count[available_strategies.index(focal_player.getStrategy())] -= 1\n focal_player.setStrategy(model_player.getStrategy())\n strat_count[available_strategies.index(focal_player.getStrategy())] += 1\n\n # remember population strategies\n for i in range(len(tot_count)):\n tot_count[i] += strat_count[i]\n\n # obtain final frequency\n for i in range(len(strat_count)):\n strat_count[i] = strat_count[i] / M\n\n # obtain total frequency\n for i, strategy in enumerate(available_strategies):\n tot_count[i] = tot_count[i] / (sim_rounds * M)\n\n # export to file: strat_count (enables comparison of both results)\n self.saveResults(tot_count, \"{}\".format(self.getCase()), factor)", "def run():\n trials = 100\n\n multipliers = [0.25, 0.3, 0.35, 0.5, 0.75, 1, 1.25, 1.45, 1.5, 1.55, 1.6] # Coefficients for learning rate\n\n mean_penalty = []\n median_penalty = []\n std_penalty = []\n\n mean_trial_time = []\n median_trial_time = []\n std_trial_time = []\n\n mean_success_rate = []\n median_success_rate = []\n std_success_rate = []\n\n for m in multipliers:\n all_penalties = [] # All penalties from trail sets\n all_average_trial_time = []\n all_success_rates = []\n\n for i in range(0, 20):\n # print \"Trial set:\", i\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n agent = e.create_agent(LearnerAgent) # create agent\n agent.mult = m\n e.set_primary_agent(agent, enforce_deadline=True) # specify agent to track\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.0, display=False) # create simulator (uses pygame when display=True, if available)\n\n sim.run(n_trials=trials) # run for a specified number of trials\n\n all_penalties.append(agent.all_trails_penalties)\n all_average_trial_time.append(agent.time/float(trials))\n all_success_rates.append(float(trials-agent.aborted_trials)/trials)\n\n mean_penalty.append(np.mean(all_penalties))\n median_penalty.append(np.median(all_penalties))\n std_penalty.append(np.std(all_penalties))\n\n mean_trial_time.append(np.mean(all_average_trial_time))\n median_trial_time.append(np.median(all_average_trial_time))\n std_trial_time.append(np.std(all_average_trial_time))\n\n mean_success_rate.append(np.mean(all_success_rates))\n median_success_rate.append(np.median(all_success_rates))\n std_success_rate.append(np.std(all_success_rates))\n\n for i in range(0, len(multipliers)):\n print \"\"\n print \"Multiplier:\", multipliers[i]\n print \"\"\n print \"Mean penalty per {} trials:\".format(trials), mean_penalty[i]\n print \"Median penalty per {} trials:\".format(trials), median_penalty[i]\n print \"Std.Dev. penalty per {} trials:\".format(trials), std_penalty[i]\n\n print \"\"\n print \"Mean trial time:\", mean_trial_time[i]\n print \"Median trial time:\", median_trial_time[i]\n print \"Std.Dev. trial time:\", std_trial_time[i]\n\n print \"\"\n print \"Mean success rate per {} trials:\".format(trials), mean_success_rate[i]\n print \"Median success rate per {} trials:\".format(trials), median_success_rate[i]\n print \"Std.Dev. success rate per {} trials:\".format(trials), std_success_rate[i]", "def run_experiment() -> List[bool]:\n return [random.random() < 0.5 for _ in range(1000)]", "def select_action(state, policy, model, num_actions,\n EPS_START, EPS_END, EPS_DECAY, steps_done, alpha, beta):\n # sample = random.random()\n # eps_threshold = EPS_END + (EPS_START - EPS_END) * \\\n # math.exp(-1. * steps_done / EPS_DECAY)\n # .data.max(1)[1].view(1, 1)\n # if sample <= eps_threshold:\n # return LongTensor([[random.randrange(num_actions)]])\n\n\n \n Q = model(Variable(state, volatile=True).type(FloatTensor))\n pi0 = policy(Variable(state, volatile=True).type(FloatTensor))\n # print(pi0.data.numpy())\n V = torch.log((torch.pow(pi0, alpha) * torch.exp(beta * Q)).sum(1) ) / beta\n \n #### FOUND ERROR: ( Q ) returns a tensor of nan at some point\n if np.isnan( Q.sum(1).data[0]) :\n print(\"Q = \", Q)\n print(\"state = \", state)\n\n pi_i = torch.pow(pi0, alpha) * torch.exp(beta * (Q - V))\n m = Categorical(pi_i)\n action = m.sample().data.view(1, 1)\n return action\n # numpy.random.choice(numpy.arange(0, num_actions), p=probabilities)", "def target_portfolio_simulation(num_of_years=30, trials=100, method='normal'):\n print(\"Running method target_portfolio_simulation()\")\n\n sim_fia_cv = pd.DataFrame(index=range(num_of_years + 1))\n\n sim_base_total = pd.DataFrame(index=range(num_of_years + 1))\n sim_base_income = pd.DataFrame(index=range(num_of_years + 1))\n\n sim_port_total = pd.DataFrame(index=range(num_of_years + 1))\n sim_port_income = pd.DataFrame(index=range(num_of_years + 1))\n\n read_income_inputs = pd.read_excel(src + \"portfolio_information.xlsx\", sheet_name='income_model_inputs',\n index_col=[0])\n\n read_returns_est = pd.read_excel(src + \"portfolio_information.xlsx\", sheet_name='income_assets_returns_estimates',\n index_col=[0])\n\n read_asset_weights = pd.read_excel(src + \"portfolio_information.xlsx\", sheet_name='asset_weights',\n index_col=[0])\n\n # read_asset_weights.drop(read_asset_weights.index[-1], axis=0, inplace=True)\n\n # read random returns for simulation\n read_normal = pd.read_csv(src + 'median_returns_unsorted.csv', index_col=[0], parse_dates=True)\n cols = [read_normal.columns[c].split('_')[1] for c in np.arange(len(read_normal.columns))]\n read_normal.rename(columns=dict(zip(list(read_normal.columns), cols)), inplace=True)\n\n read_small = pd.read_csv(src + 'median_returns_smallest.csv', index_col=[0], parse_dates=True)\n read_small.rename(columns=dict(zip(list(read_small.columns), cols)), inplace=True)\n\n read_large = pd.read_csv(src + 'median_returns_largest.csv', index_col=[0], parse_dates=True)\n read_large.rename(columns=dict(zip(list(read_large.columns), cols)), inplace=True)\n\n assets_col_names = list(read_normal.columns)\n\n tickers = list(read_asset_weights.index)\n wts = np.array(read_asset_weights.loc[:, 'base'])\n\n def asset_median_returns(data, ticker):\n return data.filter(regex=ticker).median(axis=1)\n\n # dataframe for unsorted returns (normal)\n median_returns_normal = read_normal.copy()\n median_returns_normal.loc[:, 'portfolio_return'] = median_returns_normal.dot(wts)\n median_normal_fia = pd.DataFrame({'FIA': asset_median_returns(read_normal, 'FIA')})\n\n # dataframe for smallest to largest returns\n median_returns_smallest = read_small.copy()\n median_returns_smallest.loc[:, 'portfolio_return'] = median_returns_smallest.dot(wts)\n median_smallest_fia = pd.DataFrame({'FIA': asset_median_returns(read_small, 'FIA')})\n\n # dataframe for largest to smallest returns\n median_returns_largest = read_large.copy()\n median_returns_largest.loc[:, 'portfolio_return'] = median_returns_largest.dot(wts)\n median_largest_fia = pd.DataFrame({'FIA': asset_median_returns(read_large, 'FIA')})\n\n years = list(range(0, num_of_years + 1))\n income_cols = ['year', 'strategy_term', 'index_returns', 'term_ret', 'term_ret_with_par', 'term_annualize',\n 'ann_net_spread', 'term_ret_netspr', 'high_inc_benefit_base', 'rider_fee', 'eoy_income',\n 'contract_value']\n\n term = int(read_income_inputs.loc['term', 'inputs'])\n fia_ret = read_returns_est.loc[read_returns_est.index[-1], 'Annualized Returns']\n fia_risk = read_returns_est.loc[read_returns_est.index[-1], 'Annualized Risk']\n par_rate = float(read_income_inputs.loc['par_rate', 'inputs'])\n spread = float(read_income_inputs.loc['spread', 'inputs'])\n bonus_term = int(read_income_inputs.loc['bonus_term', 'inputs'])\n premium = float(read_income_inputs.loc['premium', 'inputs'])\n income_bonus = float(read_income_inputs.loc['income_bonus', 'inputs'])\n\n income_starts = int(read_income_inputs.loc['start_income_years', 'inputs'])\n income_growth = float(read_income_inputs.loc['income_growth', 'inputs'])\n rider_fee = float(read_income_inputs.loc['rider_fee', 'inputs'])\n inc_payout_factor = float(read_income_inputs.loc['income_payout_factor', 'inputs'])\n contract_bonus = float(read_income_inputs.loc['contract_bonus', 'inputs'])\n social = float(read_income_inputs.loc['social', 'inputs'])\n inflation = float(read_income_inputs.loc['inflation', 'inputs'])\n wtd_cpn_yield = float(read_income_inputs.loc['wtd_coupon_yld', 'inputs'])\n life_expectancy = int(read_income_inputs.loc['life_expectancy_age', 'inputs'])\n clients_age = int(read_income_inputs.loc['clients_age', 'inputs'])\n\n # ---------------INCOME MODEL--------------------------------------------\n runs = 0\n returns_dict = {}\n asset_dict = {}\n fia_dict = {}\n\n income_df = pd.DataFrame(index=years, columns=income_cols)\n income_df.loc[:, 'year'] = years\n income_df.loc[:, 'strategy_term'] = income_df.loc[:, 'year'] % term\n income_df.loc[:, 'strategy_term'] = income_df['strategy_term'].apply(lambda x: 1 if x == 0 else 0)\n\n if method == 'normal':\n income_df.loc[:, 'index_returns'] = read_normal.loc[:, 'FIA']\n\n elif method == 'smallest':\n income_df.loc[:, 'index_returns'] = read_small.loc[:, 'FIA']\n\n else:\n income_df.loc[:, 'index_returns'] = read_large.loc[:, 'FIA']\n\n # income_df.loc[:, 'index_returns'] = np.random.normal(fia_ret, fia_risk, size=(len(years), 1))\n\n cumprod = (1. + income_df['index_returns']).rolling(window=term).agg(lambda x: x.prod()) - 1\n income_df.loc[:, 'term_ret'] = np.where(income_df.loc[:, 'strategy_term'] == 1, cumprod, 0)\n income_df.loc[:, 'term_ret_with_par'] = income_df.loc[:, 'term_ret'] * par_rate\n income_df.loc[:, 'term_annualize'] = income_df.loc[:, 'term_ret_with_par'].apply(\n lambda x: (1 + x) ** (1 / term) - 1)\n income_df.loc[:, 'ann_net_spread'] = income_df.loc[:, 'term_annualize'] - spread\n income_df.loc[:, 'ann_net_spread'] = np.where(income_df.loc[:, 'strategy_term'] == 1,\n income_df.loc[:, 'ann_net_spread'], 0)\n income_df.loc[:, 'term_ret_netspr'] = income_df.loc[:, 'ann_net_spread'].apply(lambda x: (1 + x) ** term - 1)\n\n for counter in years:\n if counter == 0:\n income_df.loc[counter, 'high_inc_benefit_base'] = premium * (1 + income_bonus)\n\n elif counter <= min(bonus_term, income_starts):\n income_df.loc[counter, 'high_inc_benefit_base'] = income_df.loc[counter - 1, 'high_inc_benefit_base'] * \\\n (1 + income_growth)\n else:\n income_df.loc[counter, 'high_inc_benefit_base'] = income_df.loc[counter - 1, 'high_inc_benefit_base']\n\n income_df.loc[:, 'rider_fee'] = income_df.loc[:, 'high_inc_benefit_base'] * rider_fee\n income_df.loc[:, 'eoy_income'] = np.where(income_df.loc[:, 'year'] > income_starts,\n income_df.loc[:, 'high_inc_benefit_base'] * inc_payout_factor, 0)\n\n for counter in years:\n if counter == 0:\n income_df.loc[counter, 'contract_value'] = premium * (1 + contract_bonus)\n\n elif income_df.loc[counter, 'strategy_term'] == 1:\n x1 = income_df.loc[counter - 1, 'contract_value'] - income_df.loc[counter, 'rider_fee']\n x2 = (x1 * (1 + income_df.loc[counter, 'term_ret_netspr'])) - income_df.loc[counter, 'eoy_income']\n income_df.loc[counter, 'contract_value'] = x2\n\n else:\n x1 = income_df.loc[counter - 1, 'contract_value'] - income_df.loc[counter, 'rider_fee'] - \\\n income_df.loc[counter, 'eoy_income']\n\n income_df.loc[counter, 'contract_value'] = x1\n\n # variable stores the income number that is used in the base and fia portfolio calcs.\n\n income_from_fia = income_df.loc[income_df.index[-1], 'eoy_income']\n\n income_df.loc[:, 'contract_value'] = income_df.loc[:, 'contract_value'].apply(lambda x: 0 if x <= 0 else x)\n\n sim_fia_cv.loc[:, str(runs)] = income_df.loc[:, 'contract_value']\n\n # --------------------BASE MODEL---------------------------------------------\n base_wts = read_asset_weights.loc[:, 'base']\n base_assets = list(base_wts.index)\n base_weights = list(base_wts.values)\n base_returns = list(read_returns_est.loc[:, 'Annualized Returns'].values)\n base_std = list(read_returns_est.loc[:, 'Annualized Risk'].values)\n\n base_investment = float(read_income_inputs.loc['risky_assets', 'Base'])\n adv_fees = float(read_income_inputs.loc['advisor_fees', 'Base'])\n\n # -------------------required income----------------------------------\n req_annual_income = float(read_income_inputs.loc['annual_income', 'inputs'])\n income_needed = req_annual_income - social\n income_net_fia_income = max(0, income_needed - income_from_fia)\n cpn_income_base = base_investment * wtd_cpn_yield\n\n # ----------------------RANDOM RETURNS--------------------------\n r_cols = base_assets\n boy_value = ['bv_{}'.format(name) for name in base_assets]\n eoy_value = ['ev_{}'.format(name) for name in base_assets]\n\n random_returns = pd.DataFrame(index=income_df.index, columns=r_cols)\n\n for c in range(len(r_cols)):\n ret = np.random.normal(base_returns[c], base_std[c], size=(len(random_returns.index), 1))\n\n if method == 'smallest':\n random_returns = read_small.copy()\n\n elif method == 'largest':\n random_returns = read_large.copy()\n\n else:\n random_returns = read_normal.copy()\n\n base_df = random_returns.copy()\n fia_portfolio_df = random_returns.copy()\n port_investment = float(read_income_inputs.loc['risky_assets', 'FIA'])\n cpn_income_port = port_investment * wtd_cpn_yield\n\n # -------------BASE PORTFOLIO----------------------------\n for name in boy_value:\n base_df.loc[:, name] = 0.0\n\n for counter in years:\n period_returns = list(random_returns.loc[counter, :])\n if counter == 0:\n\n base_df.loc[counter, boy_value] = [base_weights[c] * base_investment for c in range(len(boy_value))]\n\n base_df.loc[counter, 'total'] = base_df.loc[counter, boy_value].sum()\n base_df.loc[counter, 'total_net_fees'] = 0.0\n base_df.loc[counter, 'income'] = 0.0\n base_investment = base_df.loc[counter, boy_value].sum()\n\n elif (counter > 0) and (counter < income_starts):\n\n base_df.loc[counter, boy_value] = [base_weights[c] * base_investment * (1 + period_returns[c])\n for c in range(len(boy_value))]\n base_df.loc[counter, 'total'] = base_df.loc[counter, boy_value].sum()\n base_df.loc[counter, 'adv_fees'] = base_df.loc[counter, 'total'] * adv_fees\n base_df.loc[counter, 'total_net_fees'] = base_df.loc[counter, 'total'] - base_df.loc[\n counter, 'adv_fees']\n\n # --coupon payment is invested back into the risky portfolio until the income is withdrawn----\n base_investment = base_df.loc[counter, 'total_net_fees'] + cpn_income_base\n\n else:\n\n base_df.loc[counter, boy_value] = [base_weights[c] * base_investment * (1 + period_returns[c])\n for c in range(len(boy_value))]\n base_df.loc[counter, 'total'] = base_df.loc[counter, boy_value].sum()\n base_df.loc[counter, 'adv_fees'] = base_df.loc[counter, 'total'] * adv_fees\n\n # ---req. income is adjusted for inflation from the second year of withdrawal. Reinvestment of coupon\n # stops from the year income starts. Req. income is reduced by the coupon payments\n\n if counter == income_starts:\n\n income_needed = req_annual_income - social\n base_df.loc[counter, 'income'] = income_needed - cpn_income_base\n income_needed = req_annual_income\n\n else:\n income_needed = income_needed * (1 + inflation) - social\n base_df.loc[counter, 'income'] = income_needed - cpn_income_base\n income_needed = income_needed + social\n\n base_df.loc[counter, 'total_net_fees'] = base_df.loc[counter, 'total'] - \\\n base_df.loc[counter, 'adv_fees'] - \\\n base_df.loc[counter, 'income']\n\n base_investment = base_df.loc[counter, 'total_net_fees']\n\n base_df.loc[:, 'adj_total'] = base_df.loc[:, 'total_net_fees'].apply(lambda x: x if x > 0 else 0)\n sim_base_total.loc[:, 's_{}'.format(str(runs))] = base_df.loc[:, 'total_net_fees']\n sim_base_income.loc[:, 's_{}'.format(str(runs))] = base_df.loc[:, 'income']\n\n # ----------------------------FIA PORTFOLIO----------------------------------------------\n for name in boy_value:\n fia_portfolio_df.loc[:, name] = 0.0\n\n for counter in years:\n period_returns = list(random_returns.loc[counter, :])\n if counter == 0:\n\n fia_portfolio_df.loc[counter, boy_value] = [base_weights[c] * port_investment\n for c in range(len(boy_value))]\n fia_portfolio_df.loc[counter, 'total'] = fia_portfolio_df.loc[counter, boy_value].sum()\n fia_portfolio_df.loc[counter, 'total_net_fees'] = 0.0\n fia_portfolio_df.loc[counter, 'income'] = 0.0\n port_investment = fia_portfolio_df.loc[counter, boy_value].sum()\n\n elif (counter > 0) and (counter < income_starts):\n\n fia_portfolio_df.loc[counter, boy_value] = [base_weights[c] * port_investment * (1 + period_returns[c])\n for c in range(len(boy_value))]\n fia_portfolio_df.loc[counter, 'total'] = fia_portfolio_df.loc[counter, boy_value].sum()\n fia_portfolio_df.loc[counter, 'adv_fees'] = fia_portfolio_df.loc[counter, 'total'] * adv_fees\n fia_portfolio_df.loc[counter, 'total_net_fees'] = fia_portfolio_df.loc[counter, 'total'] - \\\n fia_portfolio_df.loc[counter, 'adv_fees']\n\n port_investment = fia_portfolio_df.loc[counter, 'total_net_fees'] + cpn_income_port\n\n else:\n fia_portfolio_df.loc[counter, boy_value] = [base_weights[c] * port_investment * (1 + period_returns[c])\n for c in range(len(boy_value))]\n fia_portfolio_df.loc[counter, 'total'] = fia_portfolio_df.loc[counter, boy_value].sum()\n fia_portfolio_df.loc[counter, 'adv_fees'] = fia_portfolio_df.loc[counter, 'total'] * adv_fees\n\n # ---req. income is adjusted for inflation from the second year of withdrawal. Reinvestment of coupon\n # stops from the year income starts. Req. income is reduced by the coupon payments\n\n if counter == income_starts:\n\n income_needed = req_annual_income - social\n income_net_fia_income = max(0, income_needed - income_from_fia)\n fia_portfolio_df.loc[counter, 'income'] = max(0, income_net_fia_income - cpn_income_port)\n income_needed = req_annual_income\n\n else:\n income_needed = income_needed * (1 + inflation) - social\n income_net_fia_income = max(0, income_needed - income_from_fia)\n fia_portfolio_df.loc[counter, 'income'] = max(0, income_net_fia_income - cpn_income_port)\n income_needed = income_needed + social\n\n fia_portfolio_df.loc[counter, 'total_net_fees'] = fia_portfolio_df.loc[counter, 'total'] - \\\n fia_portfolio_df.loc[counter, 'adv_fees'] - \\\n fia_portfolio_df.loc[counter, 'income']\n\n port_investment = fia_portfolio_df.loc[counter, 'total_net_fees']\n\n sim_port_total.loc[:, 's_{}'.format(str(runs))] = fia_portfolio_df.loc[:, 'total_net_fees'] + \\\n income_df.loc[:, 'contract_value']\n\n sim_port_income.loc[:, 's_{}'.format(str(runs))] = fia_portfolio_df.loc[:, 'income']\n\n fia_portfolio_df.loc[:, 'adj_total'] = fia_portfolio_df.loc[:, 'total_net_fees'].apply(\n lambda x: x if x > 0 else 0)\n\n # ---------income breakdown for Base portfolio----------------------------------\n base_df.to_csv(src + 'base_port_detail.csv')\n sim_base_total.to_csv(src + 'base_ending_values.csv')\n income_breakdown_base = pd.DataFrame(sim_base_total.quantile(0.5, axis=1))\n income_breakdown_base.loc[:, 'income_from_portfolio'] = sim_base_income.quantile(0.5, axis=1)\n income_breakdown_base.loc[:, 'fia_income'] = 0.0\n income_breakdown_base.loc[:, 'social_security_income'] = social\n income_breakdown_base.loc[:, 'coupon_income'] = cpn_income_base\n\n income_breakdown_base.rename(columns={income_breakdown_base.columns[0]: 'portfolio_ending_value'}, inplace=True)\n income_breakdown_base.loc[:, 'income_from_portfolio'][\n income_breakdown_base.loc[:, 'portfolio_ending_value'] <= 0] = 0\n income_breakdown_base.loc[:, 'total_income'] = income_breakdown_base.loc[:, income_breakdown_base.columns[1:]].sum(\n axis=1)\n\n # ------------Block Ends-------------------------------------------------------------\n\n # ---------income breakdown for FIA portfolio----------------------------------\n fia_portfolio_df.to_csv(src + 'fia_port_detail.csv')\n sim_port_total.to_csv(src + 'fiaport_ending_values.csv')\n\n income_breakdown_port = pd.DataFrame(sim_port_total.quantile(0.5, axis=1))\n income_breakdown_port.loc[:, 'income_from_portfolio'] = sim_port_income.quantile(0.5, axis=1)\n income_breakdown_port.loc[:, 'fia_income'] = income_from_fia\n income_breakdown_port.loc[:, 'social_security_income'] = social\n income_breakdown_port.loc[:, 'coupon_income'] = cpn_income_port\n\n income_breakdown_port.rename(columns={income_breakdown_port.columns[0]: 'portfolio_ending_value'}, inplace=True)\n income_breakdown_port.loc[:, 'income_from_portfolio'][\n income_breakdown_port.loc[:, 'portfolio_ending_value'] <= 0] = 0\n income_breakdown_port.loc[:, 'total_income'] = income_breakdown_port.loc[:, income_breakdown_port.columns[1:]].sum(\n axis=1)\n\n # ------------Block Ends-------------------------------------------------------------\n q_cut = [0.0, 0.05, 0.25, 0.5, 0.75, 0.95, 1.0]\n sim_base_income[sim_base_total < income_needed] = 0.0\n\n sim_port_income[sim_port_total < income_net_fia_income] = 0\n\n sim_port_income = sim_port_income + income_from_fia\n\n # base_quantile = sim_base_total.loc[sim_base_total.index[-1]].quantile([0.05, 0.25, 0.50, 0.75, 0.90])\n\n # port_quantile = sim_port_total.loc[sim_port_total.index[-1]].quantile([0.05, 0.25, 0.50, 0.75, 0.90])\n\n base_quantile = sim_base_total.loc[sim_base_total.index[-1]].quantile(q_cut)\n\n port_quantile = sim_port_total.loc[sim_port_total.index[-1]].quantile(q_cut)\n\n # q_cut = [0.0, .05, 0.25, 0.5, 0.75, 0.95, 1.0]\n cols = ['Min', '5th', '25th', '50th', '75th', '90th', 'Max']\n\n # ----drop year 0--------\n sim_base_total = sim_base_total[1:]\n sim_port_total = sim_port_total[1:]\n\n # ----------------quantile analysis for base terminal value--------------------------\n base_qcut = pd.DataFrame(index=sim_base_total.index, columns=cols)\n for c in range(len(cols)):\n base_qcut.loc[:, cols[c]] = sim_base_total.quantile(q_cut[c], axis=1)\n\n base_qcut.clip(lower=0, inplace=True)\n\n # ----------------------quantile analysis for base income----------------------------\n base_income_qcut = pd.DataFrame(index=sim_base_income.index, columns=cols)\n for c in range(len(cols)):\n base_income_qcut.loc[:, cols[c]] = sim_base_income.quantile(q_cut[c], axis=1)\n\n # ----Remove NaN's prior to the income start years------------\n # base_income_qcut = base_income_qcut.loc[income_starts:]\n\n # -------------quantile analysis for portfolio terminal value ----------------\n port_qcut = pd.DataFrame(index=sim_port_total.index, columns=cols)\n for c in range(len(cols)):\n port_qcut.loc[:, cols[c]] = sim_port_total.quantile(q_cut[c], axis=1)\n\n port_qcut.clip(lower=0, inplace=True)\n\n # ---------------quantile analysis for portfolio income----------------------------\n port_income_qcut = pd.DataFrame(index=sim_port_income.index, columns=cols)\n for c in range(len(cols)):\n port_income_qcut.loc[:, cols[c]] = sim_port_income.quantile(q_cut[c], axis=1)\n\n # ----Remove NaN's prior to the income start years------------\n # port_income_qcut = port_income_qcut.loc[income_starts:]\n\n # ----------probability ending value will be less than 0 at the end of the horizon -----------------------\n base_legacy_risk = (sim_base_total.loc[sim_base_total.index[life_expectancy - clients_age]] < 0).sum() / (\n trials + 1)\n port_legacy_risk = (sim_port_total.loc[sim_port_total.index[life_expectancy - clients_age]] < 0).sum() / (\n trials + 1)\n\n legacy_risk = pd.DataFrame([base_legacy_risk, port_legacy_risk,\n 'Prob. of portfolio value less than 0 at the end of the expected life'],\n index=['base', 'fia_portfolio', 'Notes'],\n columns=['Ruin Probability'])\n\n # -----------Year-wise probability of ending value greater than 0 -----------------\n base_psuccess = sim_base_total.apply(lambda x: x > 0).sum(axis=1) / (trials + 1)\n port_psuccess = sim_port_total.apply(lambda x: x > 0).sum(axis=1) / (trials + 1)\n\n # -----------------------WRITING FILES TO EXCEL ---------------------------\n col_names = ['50th', 'age', 'comment']\n writer = pd.ExcelWriter(src + method + '_simulated_income_summary.xlsx', engine='xlsxwriter')\n read_income_inputs.to_excel(writer, sheet_name='inputs_for_income')\n\n read_returns_est.to_excel(writer, sheet_name='asset_returns_estimates')\n\n age_index = list(range(clients_age + 1, clients_age + len(base_qcut) + 1))\n base_qcut.loc[:, 'age'] = age_index\n base_qcut.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n base_qcut.loc[income_starts:, col_names].to_excel(writer, sheet_name='base_ending_value_quantiles')\n\n base_income_qcut = base_income_qcut.loc[1:, :]\n base_income_qcut.loc[:, 'age'] = age_index\n base_income_qcut.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n base_income_qcut.loc[income_starts:, col_names].to_excel(writer, sheet_name='base_income_quantiles')\n\n port_qcut.loc[:, 'age'] = age_index\n port_qcut.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n port_qcut.loc[income_starts:, col_names].to_excel(writer, sheet_name='fia_port_ending_value_quantiles')\n\n port_income_qcut = port_income_qcut.loc[1:, :]\n port_income_qcut.loc[:, 'age'] = age_index\n port_income_qcut.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n # port_income_qcut.loc[:, 'ending_contract_value'] = sim_fia_cv\n port_income_qcut.loc[income_starts:, col_names].to_excel(writer, sheet_name='fia_port_income_quantiles')\n\n # prob_success_df = pd.concat([base_psuccess, port_psuccess], axis=1)\n # prob_success_df.rename(columns={prob_success_df.columns[0]: 'prob(ending_value>0)_base',\n # prob_success_df.columns[1]: 'prob(ending_value>0)_port'}, inplace=True)\n\n # prob_success_df.loc[:, 'age'] = age_index\n # prob_success_df.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n # prob_success_df.to_excel(writer, sheet_name='success_probability')\n\n income_breakdown_base = income_breakdown_base.loc[1:, :]\n income_breakdown_base.loc[:, 'age'] = age_index\n income_breakdown_base.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n income_breakdown_base.loc[income_starts:, :].to_excel(writer, sheet_name='base_income_breakdown_median')\n\n income_breakdown_port = income_breakdown_port.loc[1:, :]\n income_breakdown_port.loc[:, 'age'] = age_index\n income_breakdown_port.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n income_breakdown_port.loc[:, 'ending_contract_value'] = income_df.loc[:, 'contract_value']\n income_breakdown_port.loc[income_starts:, :].to_excel(writer, sheet_name='fia_income_breakdown_median')\n\n if method == 'normal':\n # median_returns_normal.loc[:, 'fia_median_returns'] = median_normal_fia\n median_returns_normal.to_excel(writer, sheet_name='gr_port_median_normal')\n\n elif method == 'smallest':\n # median_returns_smallest.loc[:, 'fia_median_returns'] = median_smallest_fia\n median_returns_smallest.to_excel(writer, sheet_name='gr_port_median_asc')\n\n else:\n # median_returns_largest.loc[:, 'fia_median_returns'] = median_largest_fia\n median_returns_largest.to_excel(writer, sheet_name='gr_port_median_desc')\n\n terminal_val = pd.read_csv(src + 'terminal_values.csv', index_col=[0])\n ending_val = pd.read_csv(src + 'ending_values.csv', index_col=[0])\n ending_val_ror = pd.read_csv(src + 'ending_values_ror.csv', index_col=[0])\n\n terminal_val.to_excel(writer, sheet_name='terminal_values')\n ending_val.to_excel(writer, sheet_name='port_ending_values')\n ending_val_ror.to_excel(writer, sheet_name='port_annual_growth')\n\n writer.save()\n\n # -----------------Plotting charts--------------------------------------------\n base_qcut.loc[income_starts:].plot(grid=True, title='Quantile Terminal Value - Base Portfolio')\n plt.savefig(src + \"quantile_terminal_base.png\")\n plt.close('all')\n\n base_income_qcut.plot(grid=True, title='Quantile Income - Base Portfolio')\n plt.savefig(src + \"quantile_income_base.png\")\n plt.close('all')\n\n base_psuccess.plot(grid=True, title='Probability of Success (Portfolio Ending Value > 0) - Base Portfolio')\n plt.savefig(src + \"success_probabilty_base.png\")\n plt.close('all')\n\n (1 - base_psuccess).plot(grid=True, title='Probability of Ruin (Portfolio Ending Value < 0) - Base Portfolio')\n plt.savefig(src + \"ruin_probability_base.png\")\n plt.close('all')\n\n port_qcut.loc[income_starts:].plot(grid=True, title='Quantile Terminal Value - FIA Portfolio')\n plt.savefig(src + \"quantile_terminal_fia.png\")\n plt.close('all')\n\n port_income_qcut.plot(grid=True, title='Quantile Income - FIA Portfolio')\n plt.savefig(src + \"quantile_income_fia.png\")\n plt.close('all')\n\n port_psuccess.plot(grid=True, title='Probability of Success (Portfolio Ending Value > 0) - FIA Portfolio')\n plt.savefig(src + \"success_probabilty_fia.png\")\n plt.close('all')\n\n (1 - port_psuccess).plot(grid=True, title='Probability of Ruin (Portfolio Ending Value < 0) - FIA Portfolio')\n plt.savefig(src + \"ruin_probability_fia.png\")\n plt.close('all')\n\n print(\"simulation completed for {}\".format(method))", "def test(simulation=False):\n\tsimulation = False\n\tif simulation:\n\t\tdyn.enable_vrep()\n\t\n\tctrl = init_ctrl()\n\n\tif simulation:\n\t\tpeter = SymbiotSpidey(ctrl)\n\telse:\n\t\tpeter = Spidey(ctrl)\n\n\tif simulation:\n\t\tctrl.start_sim()\n\n\tpeter.compliant = False\n\tprint peter.legs_references\n\n\tleg = peter.legs[0]\n\tpos = leg.position()\n\tpos = Vector3D(pos.x+6, pos.y, pos.z)\n\tleg.move(pos)\n\tctrl.wait(200)\n\tprint pos.x, leg.position().x, pos.x == leg.position().x\n\n\tpeter.compliant = True\n\n\tif simulation:\n\t\tctrl.stop_sim()", "def test_unitary_gate_real(self):\n shots = 100\n qobj = ref_unitary_gate.unitary_gate_circuits_real_deterministic(final_measure=True)\n qobj.config.shots = shots\n circuits = [experiment.header.name for experiment in qobj.experiments]\n targets = ref_unitary_gate.unitary_gate_counts_real_deterministic(shots)\n job = QasmSimulator().run(qobj)\n result = job.result()\n self.is_completed(result)\n self.compare_counts(result, circuits, targets, delta=0)", "def simulate(params,n_states,n_trials,env = \"rich\", policy=\"softmax\",\\\n D=0.5, mod = \"constant\",thresh = 0, k=1,rnd_seeds = None, V0=0.0, full=False,\n rmag = 1, lmag = 0):\n\n\tdef calc_D(state):\n\t\t\"\"\"\n\t\tcalculates D for the current trial and returns\n\t\tthe updated state tracker for D and respective betas\n\n\t\tD represents dopamine levels (equivalent of rho in OpAL)\n\t\tScales between 0 and 1, with 1 high level of DA\n\t\t\"\"\"\n\t\tif t < thresh:\n\t\t\tstate.D_g[t] = 0.5\n\t\t\tstate.D_n[t] = 0.5\n\t\telse:\n\t\t\tif mod == \"constant\":\n\t\t\t\tstate.D_g[t] = D\n\t\t\t\tstate.D_n[t] = 1-D\n\t\t\tif mod == \"value\":\n\t\t\t\t# NOTE: if rmag and lmag is 1/0, can just use V\n\t\t\t\t# average of two actions\n\t\t\t\tV = np.mean(1/2*(state.QG[t,:] - state.QN[t,:])) # state average(?) \n\t\t\t\tV = 1/(1 + np.exp(-V*k)) # translate between 0 and 1\n\t\t\t\tstate.D_g[t] = V \n\t\t\t\tstate.D_n[t] = 1 - V\n\t\treturn state\n\n\n\tdef generate_state():\n\t\t\"\"\"\n\t\tGet appropriate reward probabilities and magnitudes\n\t\tfor the specified environment type\n\t\t\"\"\"\n\n\t\tprobs = calc_probs(env)\n\t\tn_options = len(probs)\n\n\t\t# feedback for agent\n\t\tr_mag = np.zeros(n_options) + rmag\n\t\tl_mag = np.zeros(n_options) + lmag\n\n\t\tnew_state = Bogacz(n_trials, n_options, probs, r_mag, l_mag, V0=V0)\n\t\treturn new_state\n\n\n\t# learning rate, damping, decay, softmax temp\n\talpha_a, epsilon, lbda, beta = params\n\tstates = []\n\n\t# do the thing\n\tfor s in np.arange(n_states):\n\n\t\t# check if random seed provided\n\t\tif rnd_seeds is not None:\n\t\t\trandom.seed(rnd_seeds[s])\n\t\t\tnp.random.seed(rnd_seeds[s])\n\n\t\tstate = generate_state()\n\t\tfor t in range(n_trials):\n\n\t\t\tstate.idx = t\n\t\t\tstate=calc_D(state)\t\t\t\t\t# get D\n\t\t\tstate.policy_softmax(beta)\n\t\t\tstate.act(alpha_a, epsilon, lbda)\t# update \n\n\t\t\tif full:\n\t\t\t\tstate.update_other_actions(alpha_a, epsilon, lbda)\n\n\t\tstates.append(state)\t\t\t\t\t# save sim\n\n\treturn states", "def main():\n\tresults = []\n\n\tconfig = configparser.ConfigParser()\n\tconfig.read(\"simulation.ini\")\n\tsettings = config['sim']\n\n\tcompleted_obj_hw = int(settings[\"ClientsPerCampaign\"]) * float(settings[\"CompletedPctgHW\"])\n\texceeded_obj_hw = float(settings[\"ExceededPctgHW\"])\n\tsignificance_level = float(settings[\"SignificanceLevel\"])\n\tz_val_two_tails = scipy.stats.norm.ppf(1 - (significance_level / 2))\n\n\tprint(\"Completed Target HW: \" + str(completed_obj_hw))\n\tprint(\"Exceeded Target HW: \" + str(exceeded_obj_hw))\n\n\tcompleted_vals = []\n\texceeded_vals = []\n\tdone = False\n\n\tcompleted_avg = 0\n\texceeded_avg = 0\n\tcompleted_hw = 0\n\texceeded_hw = 0\n\n\ti = 0\n\twhile not done:\n\t\tprint(\"RUN: \" + str(i + 1))\n\t\tenv = simpy.Environment()\n\t\tsim = Simulation(env, settings, i == 0)\n\t\tsim.run()\n\t\tresults.append(sim.results)\n\t\ti += 1\n\n\t\tif settings['RunOnce'] == 'yes':\n\t\t\tprint(\"RUN ONCE\")\n\t\t\tsys.exit()\n\n\t\tcompleted_vals.append(sim.results['completed_count'])\n\t\texceeded_vals.append(sim.results['exceeded_proportion'])\n\n\t\tif i < 2:\n\t\t\tprint(\"---------------\")\n\t\t\tcontinue\n\n\t\tcompleted_avg = sum(completed_vals) / len(completed_vals)\n\t\tcompleted_S = sum([(v - completed_avg) ** 2 for v in completed_vals]) / (i - 1)\n\t\tcompleted_S = math.sqrt(completed_S)\n\t\tcompleted_hw = (z_val_two_tails * completed_S) / math.sqrt(i)\n\t\tprint(\"runs: \" + str(i) + \" completed HW: \" + str(completed_hw))\n\n\t\texceeded_avg = sum(exceeded_vals) / len(exceeded_vals)\n\t\texceeded_S = math.sqrt(exceeded_avg * (1 - exceeded_avg))\n\t\texceeded_hw = (z_val_two_tails * exceeded_S) / math.sqrt(i)\n\t\tprint(\"runs: \" + str(i) + \" exceeded HW: \" + str(exceeded_hw))\n\n\t\tif completed_hw < completed_obj_hw and exceeded_hw < exceeded_obj_hw:\n\t\t\tprint(\"END ITERATIONS\")\n\t\t\tdone = True\n\n\t\tprint(\"---------------\")\n\n\n\tfilename = 'results/Results_' + settings['FileSizeGB'] + '_' + settings['TorrentThreshold'] + '_' + settings['HTTPDownThreshold'] \\\n\t\t+ '_' + settings['HTTPUp'] + '_' + str(random.randint(0,10000)) + '.xlsx'\n\n\tprint(\"Saving XLSX to: \" + filename)\n\twb = xs.Workbook(filename)\n\n\tws = wb.add_worksheet()\n\n\tws.write(0, 1, 'Exceded')\n\tws.write(0, 2, 'Completed')\n\n\ti = 1\n\tfor result in results:\n\t\tws.write(i, 0, i)\n\t\tws.write(i, 1, result['exceeded_proportion'])\n\t\tws.write(i, 2, result['completed_count'])\n\t\ti += 1\n\n\tws.write(i, 0, 'average')\n\tws.write(i, 1, exceeded_avg)\n\tws.write(i, 2, completed_avg)\n\ti += 1\n\tws.write(i, 0, 'half width')\n\tws.write(i, 1, exceeded_hw)\n\tws.write(i, 2, completed_hw)\n\n\twb.close()", "def test4():\n setLogLevel(\"info\")\n info(\"Configuracion Unidad experimental\")\n \"\"\" 1 -> Definicion de la topologia \"\"\"\n t1 = Topologia1()\n ue1 = UnidadExperimental()\n ue1.setTopo(t1)\n ue1.definirNodosClaves(C='h2', V='h3') # Caso solo para trafico normal\n ue1.setController('ryu', 'simple_switch_13.py,ofctl_rest.py')\n info(\"Configuracion del experimento\")\n \"\"\" 3. Confiracion del experimento \"\"\"\n exp1 = Experimento()\n exp1.configureParams(ue1)\n exp1.configurarTrafico('normal')\n \"\"\" 4. Inicio del experimento \"\"\"\n exp1.startTest()\n \"\"\" 5. Aplicacion de pruebas \"\"\"\n exp1.trafico.iperfMeasure()\n exp1.trafico.iperfMeasure(filename='iperf_normal_test.log')\n \"\"\" 6. Fin del experimento \"\"\"\n exp1.endTest()\n info(\"Removiendo la topologia\\n\")\n exp1.killTest()\n info(\"Removiendo el controlador\\n\")\n exp1.killController() # Si no se pone no se finaliza el controlador", "def chooseAction(self, gameState):\n actions = gameState.getLegalActions(self.index)\n actions.remove('Stop') #DON'T STOP THE DISCO\n\n # You can profile your evaluation time by uncommenting these lines\n # start = time.time()\n values = [self.evaluate(gameState, a) for a in actions]\n\n if gameState.getAgentState(self.index).isPacman:\n self.turnsAsPacman+=1\n elif self.turnsAsPacman < 4 and self.turnsAsPacman > -1:\n self.loopProtection+=1\n if self.loopProtection > 2:\n self.loopProtection = -1\n self.turnsAsPacman = -1\n\n if self.index == self.debug_index:\n print(actions)\n print(values)\n # print(self.getPreviousObservation(), file=sys.stderr)\n\n # print 'eval time for agent %d: %.4f' % (self.index, time.time() - start)\n\n maxValue = max(values)\n bestActions = [a for a, v in zip(actions, values) if v == maxValue]\n # if self.index == 1:\n # print(bestActions, file=sys.stderr)\n\n #run for start if enough food is held\n foodLeft = len(self.getFood(gameState).asList())\n\n \"\"\"\n #maybe keep this, but it's not always efficient for hauler\n if foodLeft <= 2 or gameState.getAgentState(self.index).numCarrying > 5:\n bestDist = 9999\n for action in actions:\n successor = self.getSuccessor(gameState, action)\n pos2 = successor.getAgentPosition(self.index)\n dist = self.getMazeDistance(self.start,pos2)\n if dist < bestDist:\n bestAction = action\n bestDist = dist\n return bestAction\n \"\"\"\n\n choice = random.choice(bestActions)\n\n if self.index == self.debug_index:\n print(\"Choice: \" + choice)\n print(\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\")\n\n return choice", "def test_1():\n constr = dict()\n constr['maxfun'] = np.random.randint(1, 5 + 1)\n\n get_random_init(constr)\n simulate('test.trempy.ini')\n estimate('test.trempy.ini')", "def select_random_trial(completed_trials, possible_trials):\n if completed_trials is None:\n completed_trials = []\n if len(completed_trials) == len(possible_trials):\n return None, None\n\n incomplete_trials = np.setdiff1d(possible_trials, completed_trials)\n rand_trial_idx = np.random.randint(0, len(incomplete_trials))\n trial = incomplete_trials[rand_trial_idx]\n\n return select_trial(trial)", "def run_scenario(self):\n self.initialize_random_map()\n self.visualize_environment('initial')\n self.get_tower_target_coverages()\n self.solve_environment()\n self.visualize_environment('solved')", "def main():\n global repeat\n regime = collect()\n start = int(raw_input(\"Which line of the exercise script would you like to begin with? \")) - 1\n regime = regime[start:]\n say(\"Ready?\")\n time.sleep(1)\n for exercise in regime:\n coach(exercise[:-1])\n while repeat:\n repeat = False\n coach(exercise[:-1])\n say(\"Session complete.\")", "def test3():\n setLogLevel(\"info\")\n info(\"Configuracion Unidad experimental\")\n \"\"\" 1 -> Definicion de la topologia \"\"\"\n t1 = Topologia1()\n ue1 = UnidadExperimental()\n ue1.setTopo(t1)\n ue1.definirNodosClaves(C='h2', V='h3') # Caso solo para trafico normal\n ue1.setController('ryu', 'simple_switch_13.py,ofctl_rest.py')\n info(\"Configuracion del experimento\")\n \"\"\" 3. Confiracion del experimento \"\"\"\n exp1 = Experimento()\n exp1.configureParams(ue1)\n exp1.configurarTrafico('normal')\n \"\"\" 4. Inicio del experimento \"\"\"\n exp1.startTest()\n \"\"\" 5. Aplicacion de pruebas \"\"\"\n exp1.trafico.pingMeasure()\n exp1.trafico.pingMeasure(filename='ensayo_ping.log')\n \"\"\" 6. Fin del experimento \"\"\"\n exp1.endTest()\n info(\"Removiendo la topologia\\n\")\n exp1.killTest()\n info(\"Removiendo el controlador\\n\")\n exp1.killController() # Si no se pone no se finaliza el controlador", "def run_suite(strategy):\n \n # create a TestSuite object\n suite = poc_simpletest.TestSuite()\n \n # test gen_all_holds on various inputs\n hand = (1,)\n num_sides = 6\n suite.run_test(strategy(hand, num_sides), (3.5, ()), \"Test #1:\")\n\n suite.report_results()", "def run_experiment():\n pass", "def run_trial(self, occ):\n from scitbx.array_family import flex\n seed = int(time.time() / os.getpid())\n random.seed(seed)\n flex.set_random_seed(seed)\n fmodel = self.fmodel\n selection = self.selection.deep_copy()\n iselection = self.iselection.deep_copy()\n d_min = fmodel.f_obs().d_min()\n xrs = fmodel.xray_structure.deep_copy_scatterers()\n hd_sel = xrs.hd_selection()\n sites_start = xrs.sites_cart().deep_copy()\n assert (len(selection) == len(sites_start))\n fmodel.update_xray_structure(xrs, update_f_calc=True)\n sites_start_selected = sites_start.select(iselection)\n occ_start = xrs.scatterers().extract_occupancies().deep_copy()\n u_start = xrs.extract_u_cart_plus_u_iso()\n if (self.params.adjust_b_factors_if_poor_density):\n fofc_map = fmodel.map_coefficients(\n map_type=\"mFo-DFc\",\n exclude_free_r_reflections=True).fft_map(\n resolution_factor=1/4.).apply_sigma_scaling().real_map_unpadded()\n sites_frac = xrs.sites_frac()\n b_scale_isel = flex.size_t()\n for i_seq in iselection :\n map_value = fofc_map.tricubic_interpolation(sites_frac[i_seq])\n if (map_value < -2.5):\n b_scale_isel.append(i_seq)\n if (len(b_scale_isel) > 0):\n b_scale_sel = flex.bool(sites_frac.size(), False).set_selected(\n b_scale_isel, True)\n xrs.scale_adp(factor=0.75, selection=b_scale_sel)\n nearby_water_selection = mmtbx.building.get_nearby_water_selection(\n pdb_hierarchy=self.pdb_hierarchy,\n xray_structure=xrs,\n selection=selection)\n two_fofc_map, fofc_map = alt_confs.get_partial_omit_map(\n fmodel=fmodel,\n selection=iselection,\n selection_delete=None,#nearby_water_selection,\n negate_surrounding=self.params.negate_surrounding,\n partial_occupancy=occ)\n #xrs.set_occupancies(occ_start)\n make_sub_header(\"Simulated annealing into mFo-DFc map\", out=self.out)\n if (self.params.shake_sites is not None):\n xrs.shake_sites_in_place(self.params.shake_sites, selection=selection)\n sites_new = mmtbx.building.run_real_space_annealing(\n xray_structure=xrs,\n pdb_hierarchy=self.pdb_hierarchy,\n processed_pdb_file=self.processed_pdb_file,\n selection=selection,\n target_map=fofc_map,\n d_min=d_min,\n params=self.params.simulated_annealing,\n #wc=5, # FIXME why does this need to be scaled?\n target_map_rsr=two_fofc_map,\n rsr_after_anneal=self.params.rsr_after_anneal,\n out=self.out,\n debug=True)\n # now compute CC of refined sites to difference map\n fmodel.update_xray_structure(xrs, update_f_calc=True)\n fc_coeffs = fmodel.f_model()\n fc_fft_map = fc_coeffs.fft_map(resolution_factor=1/4.)\n fc_map = fc_fft_map.apply_sigma_scaling().real_map_unpadded()\n pdb_atoms = self.pdb_hierarchy.atoms()\n # XXX should this only calculate statistics for the central atoms?\n map_stats = mmtbx.building.get_model_map_stats(\n selection=self.selection_score,\n target_map=fofc_map,\n model_map=fc_map,\n unit_cell=xrs.unit_cell(),\n sites_cart=sites_new,\n pdb_atoms=pdb_atoms,\n local_sampling=False)\n # reset xray structure\n xrs.set_sites_cart(sites_start)\n xrs.set_u_cart(u_start)\n fmodel.update_xray_structure(xrs, update_f_calc=True)\n # we may only want the rmsd and max. dev. from a subset of sites, e.g.\n # the central residue of a sliding window (minus hydrogens)\n selection_score = self.selection_score.deep_copy()\n if (type(selection_score).__name__ != 'bool'):\n selection_score = flex.bool(hd_sel.size(), False).set_selected(\n self.selection_score, True)\n selection_score &= ~hd_sel\n site_stats = alt_confs.coord_stats_with_flips(\n sites1=sites_start.select(selection_score),\n sites2=sites_new.select(selection_score),\n atoms=self.pdb_hierarchy.atoms().select(selection_score))\n return alt_confs.trial_result(\n sites_cart=sites_new.select(self.iselection),\n min_fofc=map_stats.min,\n mean_fofc=map_stats.mean,\n rmsd=site_stats.rmsd,\n max_dev=site_stats.max_dev,\n cc=map_stats.cc)", "def act(self, s, exploration, game):\n agent_p = self.compute_marginal_pi(s, one_hot=False)\n if self.exploration and random.random() < self.episilon:\n agent_action = random.randint(0, self.action_num - 1)\n else:\n if self.verbose:\n for s in self.Q.keys():\n print('{}--------------'.format(self.id_))\n print('Q of agent {}: state {}: {}'.format(self.id_, s, str(self.Q[s])))\n # print('QAof agent {}: state {}: {}'.format(self.id_, s, str(self.Q_A[s])))\n # self.Q_A\n print('pi of agent {}: state {}: {}'.format(self.id_, s, self.pi[s]))\n # print('pi of opponent agent {}: state{}: {}'.format(self.id_, s, self.opponent_best_pi[s]))\n print('{}--------------'.format(self.id_))\n agent_action = np.argmax(agent_p)\n return agent_action", "def three_experiments_same_name_with_trials(\n two_experiments_same_name, orionstate, storage\n):\n\n orion.core.cli.main(\n [\n \"hunt\",\n \"--init-only\",\n \"--enable-evc\",\n \"-n\",\n \"test_single_exp\",\n \"./black_box.py\",\n \"--x~uniform(0,1)\",\n \"--y~normal(0,1)\",\n \"--z~+normal(0,1)\",\n ]\n )\n ensure_deterministic_id(\"test_single_exp\", storage, version=3)\n\n exp = experiment_builder.build(name=\"test_single_exp\", version=1, storage=storage)\n exp2 = experiment_builder.build(name=\"test_single_exp\", version=2, storage=storage)\n exp3 = experiment_builder.build(name=\"test_single_exp\", version=3, storage=storage)\n\n x = {\"name\": \"/x\", \"type\": \"real\"}\n y = {\"name\": \"/y\", \"type\": \"real\"}\n z = {\"name\": \"/z\", \"type\": \"real\"}\n x_value = 0\n for status in Trial.allowed_stati:\n x[\"value\"] = x_value + 0.1 # To avoid duplicates\n y[\"value\"] = x_value * 10\n z[\"value\"] = x_value * 100\n trial = Trial(experiment=exp.id, params=[x], status=status)\n trial2 = Trial(experiment=exp2.id, params=[x, y], status=status)\n trial3 = Trial(experiment=exp3.id, params=[x, y, z], status=status)\n orionstate.database.write(\"trials\", trial.to_dict())\n orionstate.database.write(\"trials\", trial2.to_dict())\n orionstate.database.write(\"trials\", trial3.to_dict())\n x_value += 1", "def _run_test_case(radio, lines):\n calc_reachable_surface_and_people(radio, lines)", "def TestTrial(ss, returnOnChg):\n ss.TestEnv.Step()\n\n chg = env.CounterChg(ss.TestEnv, env.Epoch)\n if chg:\n if ss.ViewOn and ss.TestUpdt.value > leabra.AlphaCycle:\n ss.UpdateView(False)\n ss.LogTstEpc(ss.TstEpcLog)\n if returnOnChg:\n return\n\n ss.Net.LayerByName(\"Output\").SetType(emer.Compare)\n ss.ApplyInputs(ss.TestEnv)\n ss.AlphaCyc(False)\n ss.TrialStats(False)\n ss.LogTstTrl(ss.TstTrlLog)", "def simulationWithDrug():\n totalPopulation = []\n resistantPopulation = []\n \n patient = Patient(getViruses(100, 0.1, 0.05, {\"guttagonol\":False}, 0.05), 1000)\n \n for i in range (0, 150):\n totalPopulation.append(patient.update())\n resistantPopulation.append(patient.getResistPop(\"guttagonol\"))\n \n patient.addPrescription(\"guttagonol\")\n \n for i in range (0, 150):\n totalPopulation.append(patient.update())\n resistantPopulation.append(patient.getResistPop(\"guttagonol\"))\n \n plotPopulation(totalPopulation)\n plotPopulation(resistantPopulation)\n pylab.show()", "def select_action(engine, observation):\n with torch.no_grad():\n dqn.eval()\n if torch.rand(1).item() < epsilon:\n return random_action(observation)\n else:\n return dqn(observation).greedy()", "def step(self, timestep):\n (agent_id, agent_traits) = self.model.get_random_agent()\n (neighbor_id, neighbor_traits) = self.model.get_random_neighbor_for_agent(agent_id)\n\n prob = analysis.calc_probability_interaction_axelrod(agent_traits, neighbor_traits)\n\n if prob == 0.0:\n return\n elif prob == 1.0:\n return\n else:\n draw = npr.random()\n if draw < prob:\n differing_features = analysis.get_different_feature_positions_axelrod(agent_traits, neighbor_traits)\n old_agent_traits = list(agent_traits)\n if len(differing_features) == 1:\n random_feature = differing_features[0]\n else:\n rand_feature_num = npr.randint(0, len(differing_features))\n random_feature = differing_features[rand_feature_num]\n neighbor_trait = neighbor_traits[random_feature]\n agent_traits[random_feature] = neighbor_trait\n #log.debug(\"agent %s: old: %s neighbor: %s post: %s differing: %s feature: %s val: %s \", agent_id, old_agent_traits, neighbor_traits, agent_traits,differing_features, random_feature, neighbor_trait )\n self.model.set_agent_traits(agent_id, agent_traits)\n\n # track the interaction and time\n self.model.update_interactions(timestep)\n else:\n # no interaction given the random draw and probability, so just return\n #log.debug(\"no interaction\")\n return", "def simulationTwoDrugsDelayedTreatment():\n\n # TODO", "def observe(self, trials):\n with self.get_client() as ax_client:\n for trial in trials:\n if not self.has_suggested(trial):\n _, trial_index = ax_client.attach_trial(\n AxOptimizer.transform_params(flatten(trial.params), self.space)\n )\n self._trials_map[self.get_id(trial)] = trial_index\n\n if not self.has_observed(trial):\n # Check the trial status\n trial_status = trial.status\n\n # If the trial status is `completed`\n if trial_status == \"completed\":\n # Complete it in Ax\n ax_trial_index = self._trials_map[self.get_id(trial)]\n raw_data = {\n \"objective\": trial.objective.value,\n **{\n s.name: s.value\n for s in trial.statistics\n if s.name in self.extra_objectives\n },\n **{r.name: r.value for r in trial.constraints},\n }\n ax_client.complete_trial(\n trial_index=ax_trial_index, raw_data=raw_data\n )\n\n # If the trial status is `broken`\n elif trial_status == \"broken\":\n # Set is as broken is Ax\n ax_trial_index = self._trials_map[self.get_id(trial)]\n ax_client.log_trial_failure(ax_trial_index)\n\n # Register the unobserved trial\n self.register(trial)" ]
[ "0.6240639", "0.6103099", "0.60925263", "0.5984078", "0.5937608", "0.5916742", "0.5768528", "0.5740173", "0.5682504", "0.5681896", "0.56587714", "0.56468713", "0.5616107", "0.56043226", "0.5596143", "0.5585067", "0.558289", "0.5577875", "0.5572243", "0.55598336", "0.55593103", "0.5553036", "0.55502874", "0.55475104", "0.55386823", "0.5531397", "0.5530812", "0.5515855", "0.55136627", "0.5507623", "0.550739", "0.54982257", "0.54966134", "0.5488721", "0.5479947", "0.54741925", "0.5466911", "0.5448837", "0.54436946", "0.54401565", "0.54386264", "0.54386264", "0.54372466", "0.54372466", "0.54372466", "0.54372466", "0.5434224", "0.54189867", "0.5415367", "0.53927726", "0.5385578", "0.53579813", "0.5354959", "0.5354501", "0.53544724", "0.5354096", "0.5351389", "0.5350407", "0.5325949", "0.53218824", "0.5320331", "0.5314048", "0.5303985", "0.52993625", "0.52992094", "0.52958626", "0.5281683", "0.5277513", "0.52743834", "0.52705574", "0.5257996", "0.525552", "0.5248661", "0.52460647", "0.5241784", "0.52364534", "0.5236342", "0.52345943", "0.52313143", "0.5230924", "0.5220935", "0.52182204", "0.5216767", "0.5212994", "0.52112037", "0.52084386", "0.52081627", "0.520702", "0.52054787", "0.52034795", "0.5200228", "0.5196822", "0.5196695", "0.5184476", "0.5179184", "0.51762754", "0.5161488", "0.5160592", "0.51588976", "0.51576185" ]
0.799685
0
Does the same thing as the simulate_universes function but do not use playground.choose_trial. Instead of using trials, it takes a decision according to the real (secret) treatment survival. The obtained score is the score you obtain if you know all the variables and not only the trials facts
def compute_god_score(): survivals_count = 0 for _ in range(PARALLEL_UNIVERSES_COUNT): best_survival = random.uniform(MIN_DISEASE_SURVIVAL, MAX_DISEASE_SURVIVAL) for _ in range(random.randint(MIN_TREATMENTS_COUNT, MAX_TREATMENTS_COUNT)): treated_survival = random.uniform(MIN_TREATED_SURVIVAL, MAX_TREATED_SURVIVAL) if treated_survival > best_survival: best_survival = treated_survival if random.uniform(0, 1) <= best_survival: survivals_count += 1 return survivals_count / PARALLEL_UNIVERSES_COUNT
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def simulate_universe():\n\n # untreated_survival is the probability to survive if not treated\n # this is an exact law of the universe, the player will not have this information\n untreated_survival = random.uniform(MIN_DISEASE_SURVIVAL, MAX_DISEASE_SURVIVAL)\n\n trials: list[Trial] = []\n\n treated_survivals: dict[Trial, float] = {}\n\n for _ in range(random.randint(MIN_TREATMENTS_COUNT, MAX_TREATMENTS_COUNT)):\n group_size = random.randint(MIN_GROUP_SIZE, MAX_GROUP_SIZE)\n\n # treated_survival is the probability to survive if treated\n # this is an exact law of the universe, the player will not have this information\n # therefore it is stored in a separate dict and not in the given-to-player Trial object\n treated_survival = random.uniform(MIN_TREATED_SURVIVAL, MAX_TREATED_SURVIVAL)\n\n trial = Trial(group_size, untreated_survival, treated_survival)\n\n trials.append(trial)\n treated_survivals[trial] = treated_survival\n\n chosen_trial = playground.choose_trial(trials)\n\n if chosen_trial is None: # None means no treatment\n chosen_survival = untreated_survival\n else:\n chosen_survival = treated_survivals[chosen_trial]\n\n return random.uniform(0, 1) <= chosen_survival", "def simulationTwoDrugsDelayedTreatment(numTrials):\n numViruses = 100\n maxPop = 1000\n maxBirthProb = 0.1\n clearProb = 0.05\n resistances = {'guttagonol': False, 'grimpex': False}\n mutProb = 0.005\n\n first_drug = 150\n second_drug = 300\n steps = first_drug + second_drug\n total_vs = [0 for i in range(steps)]\n resis_vs = list(total_vs)\n results = list(total_vs)\n\n for trial in range(numTrials):\n viruses = []\n for i in range(numViruses):\n viruses.append(ResistantVirus(maxBirthProb, clearProb, resistances, mutProb))\n patient = TreatedPatient(viruses, maxPop)\n\n for step in range(steps):\n if step == first_drug:\n patient.addPrescription('guttagonol')\n elif step == second_drug:\n patient.addPrescription('grimpex')\n patient.update()\n total_vs[step] += patient.getTotalPop()\n resis_vs[step] += patient.getResistPop(['guttagonol'])\n resis_vs[step] += patient.getResistPop(['grimpex'])\n\n results.append(patient.getTotalPop())\n\n pylab.hist(results, 9)\n pylab.show()", "def simulationDelayedTreatment(numTrials):\n\n delays = [300,150,75,0]\n results = [[],[],[],[]]\n for place in range(0, 4):\n for trial in range(numTrials):\n viruses = []\n for num in range(100):\n viruses.append(ResistantVirus(0.1,0.05, {'guttagonol': False}, 0.005))\n patient = TreatedPatient(viruses, 1000)\n for delay in range(delays[place]):\n patient.update()\n patient.addPrescription(\"guttagonol\") \n for l in range(150):\n patient.update()\n results[place].append(patient.getTotalPop())\n pylab.hist(results[0])\n pylab.hist(results[1])\n pylab.hist(results[2])\n pylab.hist(results[3])\n pylab.show()\n for x in range(0, 10):", "def run(): \n learning_rate = 0.42\n discount_rate = 0.15\n initial_q_hat = 4\n \n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent, learning_rate, discount_rate, initial_q_hat) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # specify agent to track\n # NOTE: You can set enforce_deadline=False while debugging to allow longer trials\n\n # Now simulate it\n sim = Simulator(e, update_delay=0, display=False) # create simulator (uses pygame when display=True, if available)\n # NOTE: To speed up simulation, reduce update_delay and/or set display=False\n\n sim.run(n_trials=100) # run for a specified number of trials\n print \"Failed trials: \"\n print a.get_failed_trials()\n # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line", "def simulationDelayedTreatment(numTrials):\n \n #Initialization\n #delayList = [300, 150, 75, 0]\n delayList = [150]\n #Patient init variables\n numViruses = 100\n maxPop = 1000\n #Virus init variables\n maxBirthProb = 0.1\n clearProb = 0.05\n #clearProb = 0.10\n resistances = { 'guttagonol': True }\n mutProb = 0.005\n \n results = {}\n \n for n in delayList:\n cured = 0\n popList = []\n for i in range(numTrials):\n pop = runTrial(n, numViruses, maxPop, maxBirthProb, clearProb, resistances, mutProb)\n popList.append(pop)\n if pop == 0:\n cured +=1\n results[n] = popList\n #print popList\n print \"Delay : %(delay)d Percentage cured %(percent)2f\" % {\"delay\" : n, \"percent\" : cured/float(numTrials) }\n \n\n drawHist(results, numTrials)", "def test_run_sim_1():\n rnd = rand.Arrivals(36, 41)\n sim.run_sim(3, 2, 5, 6, 22, rnd)", "def simulationTwoDrugsDelayedTreatment(numTrials):\n # TODO", "def simulationDelayedTreatment(numTrials):\n numViruses = 100\n maxPop = 1000\n maxBirthProb = 0.1\n clearProb = 0.05\n resistances = {'guttagonol': False}\n mutProb = 0.005\n\n delays = [300, 150, 75, 0]\n results = []\n\n for delay in delays:\n for i in range(numTrials):\n virusList = []\n virusPop = 0\n for n in range(numViruses):\n virusList.append(ResistantVirus(maxBirthProb, clearProb, resistances, mutProb))\n my_patient = TreatedPatient(virusList, maxPop)\n\n for step in range(delay + 150):\n if step == delay:\n my_patient.addPrescription('guttagonol')\n virusPop = my_patient.update()\n results.append(virusPop)\n\n toPlot = []\n for i in range(0, len(results), numTrials):\n toPlot.append(results[i:i + numTrials])\n # print toPlot\n\n for i, _ in enumerate(delays):\n pylab.subplot(2, 2, i + 1)\n pylab.hist(toPlot[i])\n pylab.show()", "def simulationTwoDrugsDelayedTreatment(numTrials):\n #Initialization\n delayList = [300, 150, 75, 0]\n #delayList = [150]\n #Patient init variables\n numViruses = 100\n maxPop = 1000\n #Virus init variables\n maxBirthProb = 0.1\n clearProb = 0.05\n #clearProb = 0.10\n resistances = { 'guttagonol': False, 'grimpex' : False }\n #mutProb = 0.005\n mutProb = 0.010\n \n results = {}\n \n for n in delayList:\n cured = 0\n popList = []\n print \"Running trials for delay %(delay)d\" % {'delay' : n}\n for i in range(numTrials):\n #print \"Trial: \" + str(i)\n pop = runTrialTwoDrugs(n, numViruses, maxPop, maxBirthProb, clearProb, resistances, mutProb)\n popList.append(pop)\n if pop < 50:\n cured +=1\n results[n] = popList\n #print popList\n print \"Delay : %(delay)d Percentage cured %(percent)2f\" % {\"delay\" : n, \"percent\" : cured/float(numTrials) }\n \n\n drawHist(results, numTrials)", "def run(num_trials):\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # specify agent to track\n # NOTE: You can set enforce_deadline=False while debugging to allow longer trials\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.1, display=True) \n # create simulator (uses pygame when display=True, if available)\n # NOTE: To speed up simulation, reduce update_delay and/or set display=False\n\n sim.run(n_trials=num_trials) # run for a specified number of trials\n # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line\n\n a.performace_report(num_trials)", "def test(numTrials):\n # Your Code Here\n hits = 0.0\n for i in range(numTrials):\n result = trial()\n #print result\n hits += result\n return hits / numTrials", "def simulationWithDrug(numTrials = 20, numTimeSteps = 500):\r\n random.seed()\r\n\r\n # Virus Characteristics.\r\n maxPop = 1000\r\n numViruses = 100\r\n maxBirthProb = 0.1\r\n clearProb = 0.05\r\n resistances={'guttagonol':False}\r\n mutProb= 0.005\r\n dataMatrix = numpy.zeros(shape = (numTrials, numTimeSteps)) \r\n for trial in range(numTrials): \r\n\r\n # Model a random patient with the given virus charateristics. \r\n viruses = resistantVirusCollection(numViruses, maxBirthProb, clearProb,resistances,mutProb)\r\n randPatientX = Patient(viruses, maxPop)\r\n\r\n #Use drug on patient\r\n randPatientX.addPrescription('guttagonol')\r\n\r\n # Simulate the time-steps.\r\n dataMatrix[trial][0] = numViruses\r\n for time in range(1, numTimeSteps):\r\n dataMatrix[trial][time] = randPatientX.update() \r\n \r\n # Statistical Analysis.\r\n meanData = dataMatrix.mean(0)\r\n time = numpy.arange(numTimeSteps) \r\n stdData95_CI = dataMatrix.std(0) * 2\r\n selectedTime = numpy.arange(0, numTimeSteps, 10)\r\n\r\n # Ploting.\r\n pylab.plot(time, meanData)\r\n pylab.errorbar(time[selectedTime], meanData[selectedTime], stdData95_CI[selectedTime], fmt = 'o') \r\n pylab.show()", "def run(n_trials, params):\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # specify agent to track\n a.set_Qtable(params['epsilon'], params['gamma'], params['learn_rate'], params['lrconst'],\n params['learn_rate_decay'], params['epsconst'], params['epsilon_decay'])\n # NOTE: You can set enforce_deadline=False while debugging to allow longer trials\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.000001, display=False) # create simulator (uses pygame when display=True, if available)\n # NOTE: To speed up simulation, reduce update_delay and/or set display=False\n\n sim.run(n_trials=n_trials) # run for a specified number of trials\n\n return a.reached_destination", "def run():\n trials = 100\n\n multipliers = [0.25, 0.3, 0.35, 0.5, 0.75, 1, 1.25, 1.45, 1.5, 1.55, 1.6] # Coefficients for learning rate\n\n mean_penalty = []\n median_penalty = []\n std_penalty = []\n\n mean_trial_time = []\n median_trial_time = []\n std_trial_time = []\n\n mean_success_rate = []\n median_success_rate = []\n std_success_rate = []\n\n for m in multipliers:\n all_penalties = [] # All penalties from trail sets\n all_average_trial_time = []\n all_success_rates = []\n\n for i in range(0, 20):\n # print \"Trial set:\", i\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n agent = e.create_agent(LearnerAgent) # create agent\n agent.mult = m\n e.set_primary_agent(agent, enforce_deadline=True) # specify agent to track\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.0, display=False) # create simulator (uses pygame when display=True, if available)\n\n sim.run(n_trials=trials) # run for a specified number of trials\n\n all_penalties.append(agent.all_trails_penalties)\n all_average_trial_time.append(agent.time/float(trials))\n all_success_rates.append(float(trials-agent.aborted_trials)/trials)\n\n mean_penalty.append(np.mean(all_penalties))\n median_penalty.append(np.median(all_penalties))\n std_penalty.append(np.std(all_penalties))\n\n mean_trial_time.append(np.mean(all_average_trial_time))\n median_trial_time.append(np.median(all_average_trial_time))\n std_trial_time.append(np.std(all_average_trial_time))\n\n mean_success_rate.append(np.mean(all_success_rates))\n median_success_rate.append(np.median(all_success_rates))\n std_success_rate.append(np.std(all_success_rates))\n\n for i in range(0, len(multipliers)):\n print \"\"\n print \"Multiplier:\", multipliers[i]\n print \"\"\n print \"Mean penalty per {} trials:\".format(trials), mean_penalty[i]\n print \"Median penalty per {} trials:\".format(trials), median_penalty[i]\n print \"Std.Dev. penalty per {} trials:\".format(trials), std_penalty[i]\n\n print \"\"\n print \"Mean trial time:\", mean_trial_time[i]\n print \"Median trial time:\", median_trial_time[i]\n print \"Std.Dev. trial time:\", std_trial_time[i]\n\n print \"\"\n print \"Mean success rate per {} trials:\".format(trials), mean_success_rate[i]\n print \"Median success rate per {} trials:\".format(trials), median_success_rate[i]\n print \"Std.Dev. success rate per {} trials:\".format(trials), std_success_rate[i]", "def simulationDelayedTreatment(numTrials):\n \n \n results = []\n gutresults = []\n for a in range(300):\n results.append([])\n gutresults.append([])\n for b in range(numTrials):\n viruses = []\n for c in range(10000):\n resistances = {'guttagonol': False}\n vir = ResistantVirus(.1, .05, resistances, .005)\n viruses.append(vir)\n \n Mark = TreatedPatient(viruses, 1000)\n \n for d in range(150):\n pop = Mark.update()\n results[d].append(pop)\n gutpop = Mark.getResistPop(['guttagonol'])\n gutresults[d].append(gutpop)\n \n Mark.addPrescription('guttagonol')\n \n for e in range(150, 300):\n newpop = Mark.update()\n results[e].append(newpop)\n newgutpop = Mark.getResistPop(['guttagonol'])\n gutresults[e].append(newgutpop)\n \n FinalResults = results[299]\n print len(FinalResults)\n \n \n \n pylab.figure(5)\n pylab.hist(FinalResults, bins = 10)\n pylab.title('Simulation with Drugs - Frequency')\n pylab.xlabel('Virus Population')\n pylab.ylabel('Number of Trials with Population') \n pylab.legend()\n pylab.show()", "def simulationWithDrug(numTrials = 100, numTimeSteps = 300):\n random.seed()\n\n # Virus Characteristics.\n maxPop = 1000\n numViruses = 100\n maxBirthProb = 0.1\n clearProb = 0.05\n \n gutResistVirusMatrix = numpy.zeros(shape = (numTrials, numTimeSteps))\n dataMatrix = numpy.zeros(shape = (numTrials, numTimeSteps)) \n for trial in range(numTrials): \n\n # Model a random patient with the given virus charateristics. \n viruses = virusCollection(numViruses, maxBirthProb, clearProb, ['guttagonol'])\n randPatientX = Patient(viruses, maxPop)\n\n # Simulate the time-steps.\n dataMatrix[trial][0] = numViruses\n for time in range(1, numTimeSteps):\n if time == 150:\n randPatientX.addPrescription('guttagonol')\n dataMatrix[trial][time] = randPatientX.update()\n gutResistVirusMatrix[trial][time] = randPatientX.getResistPop(['guttagonol']) \n \n # Statistical Analysis.\n meanData = dataMatrix.mean(0)\n time = numpy.arange(numTimeSteps) \n stdData95_CI = dataMatrix.std(0) * 2\n selectedTime = numpy.arange(0, numTimeSteps, 10)\n\n meanResistVirus = gutResistVirusMatrix.mean(0)\n\n #f = pylab.figure(figsize=(15, 7))\n\n # Plotting.\n #pylab.subplot(121)\n pylab.plot(time, meanData, label='Mean Virus Population')\n pylab.errorbar(time[selectedTime], meanData[selectedTime], stdData95_CI[selectedTime], fmt = 'o', color = 'blue')\n pylab.grid() \n pylab.xlabel('Time Steps')\n pylab.ylabel('Total Virus Population')\n pylab.title('Effect of Guttagonol on Virus Population being administered\\nafter {} Timesteps over a total period of {} Timesteps'.format('150', '300'), fontsize='medium')\n\n stdDevGutVirusPop = gutResistVirusMatrix.std(0) * 2\n\n # Plotting 2nd graph\n #pylab.subplot(122)\n pylab.plot(time, meanResistVirus, label='Mean Guttagonol-resistant Virus Population', color = 'red')\n pylab.errorbar(time[selectedTime], meanResistVirus[selectedTime], stdDevGutVirusPop[selectedTime], fmt = 'o', color = 'red')\n pylab.legend(fontsize='x-small', loc='best')\n #pylab.grid()\n #pylab.xlabel('Time Steps')\n #pylab.ylabel('Total Guttagonol-Resistant Virus Population')\n #pylab.title('Total Number of Guttagonol-Resistant Virus Population after {} Timesteps\\nDrug administered after {} Timesteps'.format('300', '150'), fontsize='medium')\n pylab.show()", "def perform_study(self, trial_list=None):\n # Ideally, this type of HO is called with a list of trials for which\n # the parameter has to be identified.\n self.trial_list = trial_list\n if self.trial_list is None:\n raise Exception(\"Sorry, Hyperparameter optimization without \"\n \"training currently only works if a list of \"\n \"trials is provided.\")\n\n # TODO: For now. Needs some refinements later.\n if isinstance(self.trial_list[0], optuna.trial.FrozenTrial):\n trial_type = \"optuna\"\n else:\n trial_type = \"oat\"\n self.objective = ObjectiveNoTraining(self.params, self.data_handler,\n trial_type)\n self.trial_losses = [self.objective(row) for row in self.trial_list]\n\n # Return the best lost value we could achieve.\n return min(self.trial_losses)", "def test_run_sim():\n rnd = rand.Arrivals(31, 40)\n sim.run_sim(2, 1, 3, 4, 24, rnd)", "def _do_trial(self, i, j, permutation_vector, energies, adaptor):\n delta = energies[i, i] - energies[j, i] + energies[j, j] - energies[i, j]\n accepted = False\n\n if delta >= 0:\n accepted = True\n else:\n metrop = math.exp(delta)\n rand = random.random()\n if rand < metrop:\n accepted = True\n\n if accepted:\n self._swap_permutation(i, j, permutation_vector)\n self._swap_energies(i, j, energies)\n adaptor.update(i, True)\n else:\n adaptor.update(i, False)", "def simulationTwoDrugsDelayedTreatment(numTrials):\n results = []\n gutresults = []\n \n for a in range(375):\n results.append([])\n gutresults.append([])\n \n for b in range(numTrials):\n viruses = []\n for c in range(100):\n resistances = {'guttagonol': False, 'grimpex': False}\n vir = ResistantVirus(.1, .05, resistances, .02)\n viruses.append(vir)\n \n Mark = TreatedPatient(viruses, 1000)\n \n for d in range(150):\n pop = Mark.update()\n results[d].append(pop)\n gutpop = Mark.getResistPop(['guttagonol'])\n gutresults[d].append(gutpop)\n \n Mark.addPrescription('guttagonol')\n \n for e in range(150, 225):\n newpop = Mark.update()\n results[e].append(newpop)\n newgutpop = Mark.getResistPop(['guttagonol'])\n gutresults[e].append(newgutpop)\n \n Mark.addPrescription('grimpex')\n \n for f in range(225, 375):\n newpop = Mark.update()\n results[f].append(newpop)\n \n \n FinalResults = results[374]\n print len(FinalResults)\n \n \n pylab.figure(6)\n pylab.hist(FinalResults, bins = 10)\n pylab.title('300 day delay')\n pylab.xlabel('Virus Population')\n pylab.ylabel('Number of Trials with Population') \n pylab.show()", "def first_stage_test(solver, sub_u_rate):\n bins_num = 100\n\n classifier = joblib.load(join(join(_result_path, solver), str(sub_u_rate)) + '/logistic.pkl')\n\n # evaluate positive set, which contains spies\n positive = np.load(\"./processed_data/train/raw/train_p.npy\")\n positive_x = positive[:, : -1]\n result_p = np.array(classifier.predict_proba(positive_x)[:, 1])\n plt.hist(result_p, bins=bins_num)\n plt.savefig(join(join(_result_path, solver), str(sub_u_rate)) + '/positive.png')\n plt.show()\n print(\"\\npositive set results: average: \" + str(np.mean(result_p)) + \" variance:\" + str(np.var(result_p)))\n print(\"max: \" + str(result_p.max()) + \" min: \" + str(result_p.min()))\n\n # evaluate spy set\n spy = np.load(_spy_path)\n spy_x = spy[:, : -1]\n result_spy = np.array(classifier.predict_proba(spy_x)[:, 1])\n plt.hist(result_spy, bins=bins_num)\n plt.savefig(join(join(_result_path, solver), str(sub_u_rate)) + '/spy.png')\n plt.show()\n print(\"\\nspy set results: average: \" + str(np.mean(result_spy)) + \" variance:\" + str(np.var(result_spy)))\n print(\"max: \" + str(result_spy.max()) + \" min: \" + str(result_spy.min()))\n\n # evaluate sub-unlabeled set\n sub_u = np.load(\"./processed_data/train/sub_u_\" + str(sub_u_rate) + \".npy\")\n sub_u_x = sub_u[:, :-1]\n result_sub_u = np.array(classifier.predict_proba(sub_u_x)[:, 1])\n plt.hist(result_sub_u, bins=bins_num)\n plt.savefig(join(join(_result_path, solver), str(sub_u_rate)) + '/sub-u.png')\n plt.show()\n print(\"\\nsub-unlabeled set results: average: \" + str(np.mean(result_sub_u)) + \" variance:\" + str(np.var(result_sub_u)))\n print(\"max: \" + str(result_sub_u.max()) + \" min: \" + str(result_sub_u.min()))\n\n # evaluate the whole unlabeled set\n unlabeled = np.load(\"./processed_data/train/raw/train_u.npy\")\n unlabeled_x = unlabeled[:, :-1]\n result_unlabeled = np.array(classifier.predict_proba(unlabeled_x)[:, 1])\n plt.hist(result_unlabeled, bins=bins_num)\n plt.savefig(join(join(_result_path, solver), str(sub_u_rate)) + '/unlabeled.png')\n plt.show()\n print(\"\\nunlabeled set results: average: \" + str(np.mean(result_unlabeled)) + \" variance:\" + str(\n np.var(result_unlabeled)))\n print(\"max: \" + str(result_unlabeled.max()) + \" min: \" + str(result_unlabeled.min()))", "def simulationWithDrug(numViruses, maxPop, maxBirthProb, clearProb, resistances,\n mutProb, numTrials):\n \n #create viruses list\n viruses = []\n for i in range(numViruses):\n viruses.append(ResistantVirus(maxBirthProb, clearProb, resistances, mutProb))\n \n #create test patient P1\n results = np.zeros(numTrials*300).reshape(300,numTrials)\n resultsPopResist = np.zeros(numTrials*300).reshape(300,numTrials)\n \n #runs numTrials of 300 steps, putting results in an array of 300 lines, \n # numTrials columns\n for t in range(numTrials) :\n P1 = TreatedPatient(viruses, maxPop)\n for s in range(150):\n P1.update()\n results[s][numTrials-1] += P1.getTotalPop()\n resultsPopResist[s][numTrials-1] += P1.getResistPop(['guttagonol'])\n \n P1.addPrescription('guttagonol')\n for s in range(150,300):\n P1.update()\n results[s][numTrials-1]+=P1.getTotalPop()\n resultsPopResist[s][numTrials-1] += P1.getResistPop(['guttagonol'])\n \n \n #calculating average of virus population size at each step \n yValues1 = []\n for i in range(300):\n a = sum(results[i].tolist())/len(results[i])\n yValues1.append(a)\n \n yValues2 = []\n for i in range(300):\n a = sum(resultsPopResist[i].tolist())/len(resultsPopResist[i])\n yValues2.append(a)\n\n pylab.plot(yValues1,label='pop average')\n pylab.plot(yValues2,'r--',label = 'resistant virus population')\n pylab.title('virus pop average at each step')\n pylab.legend()\n pylab.xlabel('Time Steps')\n pylab.ylabel('pop #')\n pylab.show()", "def simulationWithoutDrug(numTrials = 20, numTimeSteps = 500):\r\n random.seed()\r\n\r\n # Virus Characteristics.\r\n maxPop = 1000\r\n numViruses = 100\r\n maxBirthProb = 0.1\r\n clearProb = 0.05\r\n \r\n dataMatrix = numpy.zeros(shape = (numTrials, numTimeSteps)) \r\n for trial in range(numTrials): \r\n\r\n # Model a random patient with the given virus charateristics. \r\n viruses = virusCollection(numViruses, maxBirthProb, clearProb)\r\n randPatientX = SimplePatient(viruses, maxPop)\r\n\r\n # Simulate the time-steps.\r\n dataMatrix[trial][0] = numViruses\r\n for time in range(1, numTimeSteps):\r\n dataMatrix[trial][time] = randPatientX.update() \r\n \r\n # Statistical Analysis.\r\n meanData = dataMatrix.mean(0)\r\n time = numpy.arange(numTimeSteps) \r\n stdData95_CI = dataMatrix.std(0) * 2\r\n selectedTime = numpy.arange(0, numTimeSteps, 10)\r\n\r\n # Ploting.\r\n pylab.plot(time, meanData)\r\n pylab.errorbar(time[selectedTime], meanData[selectedTime], stdData95_CI[selectedTime], fmt = 'o') \r\n pylab.show()", "def objective(trial):\n # The parameters that we will calibrate the model for are shown here.\n # Optuna trial i\n BOD = trial.suggest_uniform(\"BOD\", 0, 1) #Review ranges here\n k_r = trial.suggest_uniform(\"k_r\", 0, 1) #Review Ranges here \n \n def ChLa(t):\n return 1 # Need to link to data\n\n def I(x):\n return 1 # Need to link to data\n\n K_z = 2 * 10**(-5) # p.51\n a = K_z\n k_b = 0.1 # Table 5\n th_b = 1.047 # Table 5\n k_r = 0.1 # Table 5\n YCHO2 = 0.0083 # Table 5\n th_p = 1.036 # Table 5\n th_s = 1.065 # Table 5\n th_r = 1.047 # Table 5\n\n def Temp(t):\n \"\"\"\n Function that maps time to temperature\n \"\"\"\n return 20 # Need to link to data\n\n def P_max(t):\n return 9.6 * 1.036 **(Temp(t) - 20) # Eq. 4\n\n def L_min(t):\n I = 1 # Need to link to PAR data\n K_1 = 0.687 * 1.086**(Temp(t) - 20)\n K_2 = 15\n return I * (1 + 2 * np.sqrt(K_1 / K_2)) / (I + K_1 + I**2 / K_2) # Eq. 5\n \n # f deals with sink and source terms \n def f(x, t):\n return -1 / YCHO2 * k_r * th_r**(Temp(t) - 20) * ChLa(t) + P_max(t) * L_min(t) * ChLa(t) - k_b * th_b**(Temp(t)-20) * BOD \n\n L = 200 # Length of domain\n dt = 1 / 48 # Mesh spacing in t\n F = a * dt # a * dt / dx**2\n T = 100 # Simulation time stop\n\n # Solving the PDE\n DO, x, t, _ = solver_FE_simple(I, a, f, L, dt, F, T)\n \n # Creating some bogus targets while database errors are happening\n DO_data = DO + np.random.random(len(DO))\n\n # Using mean squared error as the measure of fit, where we want\n # to minimize this number\n return ((DO - DO_data)**2).mean()", "def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=False) # specify agent to track\n # NOTE: You can set enforce_deadline=False while debugging to allow longer trials\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.0000001, display=False) # create simulator (uses pygame when display=True, if available)\n # NOTE: To speed up simulation, reduce update_delay and/or set display=False\n\n sim.run(n_trials=100) # run for a specified number of trials\n # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line\n\n print 'alpha, gamma:', a.alpha, a.gamma\n print 'penalties:', a.total_penalties\n print 'total rewards:', a.total_rewards", "def simulation(self, n = 42):\n\n self.initialisation()\n i = 0\n while i < n and self.agent.vivant :\n self.step()\n i+= 1\n return self.perfGlobale", "def test(args):\n brain_name = env.brain_names[0]\n brain = env.brains[brain_name]\n env_info = env.reset(train_mode=True)[brain_name]\n\n num_agents = len(env_info.agents)\n print('Number of agents:', num_agents)\n\n # dim of each action\n action_size = brain.vector_action_space_size\n print('Size of each action:', action_size)\n\n # dim of the state space\n states = env_info.vector_observations\n state_size = states.shape[1]\n\n agent = MADDPG(state_size, action_size, actor_layer_dim_1=args.actor_layer_dim_1,\n actor_layer_dim_2=args.actor_layer_dim_2,\n actor_layer_dim_3=args.actor_layer_dim_3,\n critic_layer_dim_1=args.critic_layer_dim_1,\n critic_layer_dim_2=args.critic_layer_dim_2,\n critic_layer_dim_3=args.critic_layer_dim_3)\n\n agent.load(\n \"chkpts/{}/{:02d}_best_model.checkpoint\".format(args.model_path, args.loop_counter))\n\n test_scores = []\n for i_episode in tqdm(range(1, 1+args.test_n_run)):\n # initialize the scores\n scores = np.zeros(num_agents)\n env_info = env.reset(train_mode=True)[\n brain_name] # reset the environment\n states = env_info.vector_observations # get the current states\n dones = [False]*num_agents\n while not np.any(dones):\n actions = agent.act(states) # select actions\n # send the actions to the environment\n env_info = env.step(actions)[brain_name]\n next_states = env_info.vector_observations # get the next states\n rewards = env_info.rewards # get the rewards\n dones = env_info.local_done # see if episode has finished\n scores += rewards # update the scores\n # roll over the states to next time step\n states = next_states\n\n test_scores.append(np.max(scores))\n\n avg_score = sum(test_scores)/len(test_scores)\n print(\"Test Score: {}\".format(avg_score))\n\n return avg_score", "def test_variational():\n # iris\n #pres = \"Test pour le data set Iris (facile, classique)\"\n #test_from_func_variational(pres, 15, 10, 3, True, Iris)\n\n # breast cancer\n pres = \"Test pour le data set Breast Cancer (facile, classique)\"\n test_from_func_variational(pres, 15, 10, 3, True, Breast_cancer)\n\n # digits\n # pres = \"Test pour le data set Digits (difficile, classique)\"\n # test_from_func(pres, 10, 10, 10, True, Digits, quantum_instance)\n\n # wine\n # pres = \"Test pour le data set Wine (moyen, classique)\"\n # test_from_func(pres, 15, 10, 5, True, Wine, quantum_instance)\n\n # gaussian\n pres = \"Test pour des données gaussiennes (moyen, classique)\"\n for _ in range(1):\n print(\"\\n\")\n print(\"New iteration\")\n test_from_func_variational(pres, 25, 10, 2, True, Gaussian)\n print(\"\\n\")\n\n # small adn strings\n pres = \"Test pour des séquences ADN courtes (difficile, classique)\"\n test_from_func_variational(pres, 10, 15, 14, True, Sequence)\n\n #Quantum data\n pres = \"Test pour des données générées par ordinateur quantique (facile, quantique)\"\n print(pres)\n _, samp_train, samp_test, labels = ad_hoc_data(15, 10, 2, 0.3, True)\n sample_m, sample_p = stock_get(20, 0.3)\n\n labels_me = [-1, 1]\n samp_train_me = {-1: np.array(sample_m[:15]), 1: np.array(sample_p[:15])}\n samp_test_me = {-1: np.array(sample_m[15:]), 1: np.array(sample_p[15:])}\n print(samp_train)\n print(samp_train_me)\n print(samp_test)\n print(samp_test_me)\n\n my_impl_variational(samp_train, samp_test, labels)\n print(\"Pour autres données quantiques\")\n my_impl_variational(samp_train_me, samp_test_me, labels_me)", "def simulationTwoDrugsDelayedTreatment(numTrials):\n \n numViruses = 100\n maxPop = 1000\n maxBirthProb = 0.1\n clearProb = 0.05\n resistances = {'guttagonol': False, 'grimpex': False}\n mutProb = 0.005\n delays = [300, 150, 75, 0]\n f, axarr = pylab.subplots(2, 2)\n x_plot = []\n\n for delay in delays:\n FinalPopSize = [0.0 for x in range(numTrials)]\n for trial in range(numTrials):\n viruses = [ResistantVirus(maxBirthProb, clearProb, resistances, mutProb) for n in range(numViruses)]\n patient = TreatedPatient(viruses, maxPop)\n for i in range(150):\n patient.update()\n patient.addPrescription('guttagonol')\n for j in range(150, 150+delay):\n patient.update()\n patient.addPrescription('grimpex')\n for k in range(150+delay, 300+delay):\n patient.update()\n FinalPopSize[trial] = patient.getTotalPop()\n x_plot.append(FinalPopSize)\n\n axarr[0, 0].hist(x_plot[0])\n axarr[0, 1].hist(x_plot[1])\n axarr[1, 0].hist(x_plot[2])\n axarr[1, 1].hist(x_plot[3])\n pylab.show()\n return x_plot", "def run_trial():\n env = gym.make('CartPole-v0')\n obs_dim = env.observation_space.shape[0]\n n_actions = env.action_space.n\n\n qnet = QNet(obs_dim, n_actions)\n agent = Sarsa(qnet, n_actions, 0.99, 1.0, 0.05, 1e4)\n optim = torch.optim.RMSprop(qnet.parameters(), lr=0.01)\n memory = Memory()\n\n return_hist = []\n timestep = 1\n\n while timestep < 1e5:\n state = env.reset()\n done = False\n while not done:\n # Pick action and run a single environment step\n action = agent.act(state, timestep).item()\n next_state, reward, done, _ = env.step(action)\n # Add experience to memory for training\n memory.add_experience(state, action, reward, next_state, done)\n\n state = next_state\n\n # Run a single training step every 32 timesteps\n if timestep % 32 == 0:\n batch = memory.sample()\n agent.train(batch, optim)\n\n # Evaluate the current agent every 1000 agents\n if timestep % 1000 == 0:\n eval_return = evaluate(agent)\n return_hist.append(eval_return)\n\n timestep += 1\n\n return np.array(return_hist)", "def test_conjecture():\n print(\"Executing test_conjecture:\")\n\n theory=[]\n\n print(language.program_string(theory))\n for i in range(10):\n theory=conjecture.vary([theory], 0, [], steps=1)\n print(f\"Theory after {i+1} stages of variation:\")\n print(language.program_string(theory))", "def scenario3(sim):\n\n init_logging(logfile=None, debug=True)\n second = 1000.0\n duration = 10\n tau_m = 20 # ms\n cm = 1.0 # nF\n v_reset = -60\n cell_parameters = dict(\n tau_m=tau_m,\n cm=cm,\n v_rest=-70,\n e_rev_E=0,\n e_rev_I=-70,\n v_thresh=-54,\n v_reset=v_reset,\n tau_syn_E=5,\n tau_syn_I=5,\n )\n g_leak = cm / tau_m # µS\n\n w_min = 0.0 * g_leak\n w_max = 0.05 * g_leak\n\n r1 = 5.0\n r2 = 40.0\n\n sim.setup()\n pre = sim.Population(100, sim.SpikeSourcePoisson())\n post = sim.Population(10, sim.IF_cond_exp())\n\n pre.set(duration=duration * second)\n pre.set(start=0.0)\n pre[:50].set(rate=r1)\n pre[50:].set(rate=r2)\n assert_equal(pre[49].rate, r1)\n assert_equal(pre[50].rate, r2)\n post.set(**cell_parameters)\n post.initialize(v=RandomDistribution('normal', mu=v_reset, sigma=5.0))\n\n stdp = sim.STDPMechanism(\n sim.SpikePairRule(tau_plus=20.0, tau_minus=20.0,\n A_plus=0.01, A_minus=0.01),\n sim.AdditiveWeightDependence(w_min=w_min, w_max=w_max),\n #dendritic_delay_fraction=0.5))\n dendritic_delay_fraction=1)\n\n connections = sim.Projection(pre, post, sim.AllToAllConnector(),\n synapse_type=stdp,\n receptor_type='excitatory')\n\n initial_weight_distr = RandomDistribution('uniform', low=w_min, high=w_max)\n connections.randomizeWeights(initial_weight_distr)\n initial_weights = connections.get('weight', format='array', gather=False)\n # assert initial_weights.min() >= w_min\n # assert initial_weights.max() < w_max\n # assert initial_weights[0, 0] != initial_weights[1, 0]\n\n pre.record('spikes')\n post.record('spikes')\n post[0:1].record('v')\n\n sim.run(duration * second)\n\n actual_rate = pre.mean_spike_count() / duration\n expected_rate = (r1 + r2) / 2\n errmsg = \"actual rate: %g expected rate: %g\" % (actual_rate, expected_rate)\n assert abs(actual_rate - expected_rate) < 1, errmsg\n #assert abs(pre[:50].mean_spike_count()/duration - r1) < 1\n #assert abs(pre[50:].mean_spike_count()/duration- r2) < 1\n final_weights = connections.get('weight', format='array', gather=False)\n assert initial_weights[0, 0] != final_weights[0, 0]\n\n try:\n import scipy.stats\n except ImportError:\n raise SkipTest\n t, p = scipy.stats.ttest_ind(initial_weights[:50, :].flat, initial_weights[50:, :].flat)\n assert p > 0.05, p\n t, p = scipy.stats.ttest_ind(final_weights[:50, :].flat, final_weights[50:, :].flat)\n assert p < 0.01, p\n assert final_weights[:50, :].mean() < final_weights[50:, :].mean()\n sim.end()\n return initial_weights, final_weights, pre, post, connections", "def test_success_formula(n_meanings, n_signals, n_trials, n_runs):\r\n input_meaning_p = rand_p(n_meanings)\r\n print(input_meaning_p)\r\n print()\r\n dyad = ProductSystem.rand_dyad(n_meanings, n_signals)\r\n print(dyad.psys.matrix)\r\n print()\r\n print(dyad.rsys.matrix)\r\n print()\r\n data = tuple(cummean(tuple(dyad.randsuccess(input_meaning_p) \\\r\n for j in range(n_trials))) \\\r\n for i in range(n_runs))\r\n graph = sns.tsplot(data, err_style='unit_traces')\r\n graph.set(xlabel='Number of trials', ylabel='Success proportion', ylim=[0,1])\r\n success_p = dyad.success_p(input_meaning_p)\r\n print(success_p)\r\n sns.tsplot((success_p,) * n_trials, color=sns.color_palette()[1])", "def perform_trials(self, evolver: 'Evolver'):\r\n\r\n approach_ind = evolver.approach[0]\r\n\r\n approach_params = evolver.approach_params.copy()\r\n approach_params[self.evolve_param.name] = self.checking\r\n\r\n sens_params = self.new_sensitives.copy()\r\n sens_params[self.sensitive[1].name] = self.sens_checking\r\n\r\n trial_best = float('-inf')\r\n trial_patience = evolver.settings.trial_patience\r\n trial_epsilon = evolver.settings.trial_epsilon\r\n trial_patience_used = 0\r\n trial_index = 0\r\n\r\n if self.sensitive[1].categorical:\r\n metric_store = self.sens_sweep[self.sens_checking]\r\n else:\r\n evolver.logger.debug('sens_sweep_pts=%s, sens_sweep_len=%s, sens_checking=%s', self.sens_sweep_pts, self.sens_sweep_len, self.sens_checking)\r\n insert_ind = (\r\n np.searchsorted(self.sens_sweep_pts[:self.sens_sweep_len], self.sens_checking)\r\n if self.sens_sweep_len > 0\r\n else 0\r\n )\r\n assert isinstance(insert_ind, (int, np.int32, np.int64)), f'insert_ind={insert_ind}, type(insert_ind)={type(insert_ind)}'\r\n if insert_ind < self.sens_sweep_len:\r\n self.sens_sweep_pts[insert_ind+1:self.sens_sweep_len+1] = (\r\n self.sens_sweep_pts[insert_ind:self.sens_sweep_len])\r\n self.sens_sweep_pts[insert_ind] = self.sens_checking\r\n\r\n self.sens_sweep[insert_ind+1:self.sens_sweep_len+1] = (\r\n self.sens_sweep[insert_ind:self.sens_sweep_len])\r\n self.sens_sweep[insert_ind, :] = 0\r\n else:\r\n self.sens_sweep_pts[insert_ind] = self.sens_checking\r\n metric_store = self.sens_sweep[insert_ind]\r\n\r\n while (trial_index < evolver.settings.max_trials\r\n and trial_patience_used < trial_patience):\r\n for worker in evolver.workers:\r\n worker.job_queue.put((approach_ind, approach_params.copy(), sens_params.copy()))\r\n evolver.logger.debug('dispatched jobs')\r\n\r\n for worker in evolver.workers:\r\n while True:\r\n try:\r\n result = worker.result_queue.get()\r\n break\r\n except InterruptedError:\r\n evolver.logger.critical('result_queue.get() was interrupted')\r\n\r\n if trial_index == evolver.settings.max_trials:\r\n continue\r\n result_metric = result[evolver.settings.metric_name]\r\n metric_store[trial_index] = result_metric\r\n trial_index += 1\r\n\r\n if result_metric - trial_epsilon > trial_best:\r\n evolver.logger.debug('got trial metric %s (improved old: %s)', result_metric, trial_best)\r\n trial_best = result_metric\r\n if trial_patience_used < trial_patience:\r\n trial_patience_used = 0\r\n elif trial_patience_used < trial_patience:\r\n trial_patience_used += 1\r\n evolver.logger.debug('got trial metric %s, exhausted patience %s/%s',\r\n result_metric, trial_patience_used, trial_patience)\r\n else:\r\n evolver.logger.debug('got trial metric %s (worse, but already out of patience)', result_metric)", "def simulation():\n # initialize action set\n action_set = np.zeros(int((s.MAX_INSPECT - s.MIN_INSPECT) / s.DELTA) + 3)\n x, i = s.MIN_INSPECT, 1\n while x <= s.MAX_INSPECT:\n action_set[i] = x\n x += s.DELTA\n i += 1\n action_set[-1] = np.inf\n action_number = len(action_set)\n\n # initialize current state\n current_state = math.floor(np.random.rand(1) * s.NUM_STATES)\n\n # initialize action index\n if current_state == 0:\n action_index = 0\n elif current_state == s.NUM_STATES - 1:\n action_index = action_number - 1\n\n if current_state != 0 and current_state != s.NUM_STATES - 1:\n action_index = action_number - 2\n\n # initialize policy set\n greedy_policy = np.zeros(s.NUM_STATES)\n greedy_policy[-1] = np.inf\n for i in range(1, s.NUM_STATES - 1):\n greedy_policy[i] = s.MAX_INSPECT\n\n visit_times = np.zeros([s.NUM_STATES, action_number])\n\n # initialization for simulation\n falpha, Aalpha, delay_T, uni_parameter = equivalent_markov(greedy_policy)\n stable_prob, potential = stable_potential(falpha, Aalpha, uni_parameter)\n last_value = falpha + np.matmul(Aalpha, potential)\n dis_value = last_value\n # ave_vector = np.matmul(stable_prob, falpha)\n # ave_estimate = ave_vector.tolist()\n each_transit_cost, each_transit_time, total_reward = (0 for i in range(3))\n\n # initialize DQN model if selected\n dqn = DQN() if MODEL == 1 else None\n # initialize Q-table if Q-learning selected\n q_factor = ql.init_q_factor(action_number) if MODEL == 2 else None\n\n for out_step in range(s.EPOCH):\n epsilon = s.EPSILON_1 if MODEL == 1 else s.EPSILON_2\n\n for inner_step in range(s.EPOCH_LEARN):\n\n visit_times[current_state, action_index] += 1\n current_action = greedy_policy[current_state]\n\n inspect_cost = 0 if current_state == s.NUM_STATES - 1 else s.K5 * current_action\n\n flag, sojourn_T, service_T, next_state = state_transition(current_state, current_action)\n each_transit_time = s.DISCOUNT * each_transit_time + (sojourn_T - each_transit_time) / pow(\n out_step * s.EPOCH_LEARN + (inner_step + 1), s.Q_AVE_STEP)\n end_sojourn_T = math.exp(- s.ALPHA * sojourn_T)\n end_serve_T = math.exp(- s.ALPHA * service_T)\n\n if s.ALPHA == 0:\n dis_T, dis_serve_T, dis_wait_T = sojourn_T, service_T, sojourn_T - service_T\n else:\n dis_T, dis_serve_T = (1 - end_sojourn_T) / s.ALPHA, (1 - end_serve_T) / s.ALPHA\n dis_wait_T = (end_serve_T - end_sojourn_T) / s.ALPHA\n\n if flag == 0: # no processing, waiting\n cost_real = (s.K1 * (s.NUM_STATES - current_state) + s.K3) * sojourn_T + inspect_cost\n cost_purt = (s.K1 * (s.NUM_STATES - current_state) + s.K3) * dis_T + inspect_cost\n else: # no waiting, processing\n cost_real = s.K1 * (s.NUM_STATES - current_state - 1) * sojourn_T + s.K2 * service_T + s.K3 * (\n sojourn_T - service_T) + s.K4 + inspect_cost\n cost_purt = s.K1 * (s.NUM_STATES - current_state - 1) * dis_T + s.K2 * dis_serve_T + s.K3 * dis_wait_T \\\n + s.K4 * end_serve_T + inspect_cost\n\n each_transit_cost = s.DISCOUNT * each_transit_cost + (cost_real - each_transit_cost) / (\n pow(out_step * s.EPOCH_LEARN + (inner_step + 1), s.Q_AVE_STEP))\n\n ave_q_cost = each_transit_cost / each_transit_time\n # ave_estimate.append(ave_q_cost)\n cost_dis = cost_purt - ave_q_cost * dis_T\n\n if MODEL == 1:\n reward = - cost_dis\n dqn.store_transition(current_state, action_index, reward, next_state)\n if dqn.memory_counter >= s.MEMORY_CAPACITY:\n dqn.learn(s.EPOCH_LEARN, inner_step, PS)\n else:\n difference = cost_dis + end_sojourn_T * min(q_factor[next_state, :]) \\\n - q_factor[current_state, action_index]\n q_factor = ql.update_q_factor(q_factor, current_state, action_index, difference,\n visit_times, inner_step, PS)\n current_state = next_state # transit to next state\n\n if current_state == 0:\n action_index = 0\n elif current_state == s.NUM_STATES - 1:\n action_index = action_number - 1\n else:\n if MODEL == 1:\n action_index = int(dqn.choose_action(current_state, epsilon))\n if action_set[action_index] <= 1:\n greedy_policy[current_state] = action_set[action_index]\n else:\n greedy_policy[current_state] = 1\n else:\n if np.random.rand(1) < epsilon:\n action_index = int(np.floor(np.random.rand(1) * (action_number - 2)) + 1)\n else:\n # minimal_q_value = np.min(q_factor[current_state, :])\n action_index = np.argmin(q_factor[current_state, :])\n greedy_policy[current_state] = action_set[action_index]\n\n # store the policy learned from the iterations\n optimal_policy = greedy_policy\n\n if MODEL != 1:\n for i in range(1, s.NUM_STATES - 1):\n # minimal_q_value_temp = np.min(q_factor[i, :])\n action_index_temp = np.argmin(q_factor[i, :])\n optimal_policy[i] = action_set[action_index_temp]\n\n falpha, Aalpha, delay_T, uni_parameter = equivalent_markov(optimal_policy)\n stable_prob, potential = stable_potential(falpha, Aalpha, uni_parameter)\n\n last_value = falpha + np.matmul(Aalpha, potential)\n dis_value = np.concatenate((dis_value, last_value), axis=1)\n total_reward += - np.ndarray.item(last_value[0])\n # new_ave_cost = np.matmul(stable_prob, falpha)\n # ave_vector = np.concatenate((ave_vector, new_ave_cost))\n print(\"epoch: {} , the epoch reward is {}\".format(out_step, round(- np.ndarray.item(last_value[0]), 2)))\n\n # result = np.asarray(dis_value)\n print(\"total reward:\", total_reward)\n\n return dis_value, total_reward", "def simulationTwoDrugsDelayedTreatment():\n\n # TODO", "def run_experiment(train, order, test, future, actual, trials):\n total_mse = 0\n time = 0\n while time < trials:\n time += 1\n total_mse += mse(predict(markov_chain(train, order), test, future), actual)\n return total_mse / float(trials)", "def run(self, agent: ThompsonSampler, n_trials: int = 1000) -> Tuple[int, np.ndarray]:\n\n for i in range(n_trials):\n best_variant = agent.choose_variant()\n agent.reward = np.random.binomial(n=1, p=self.payouts[best_variant]) # mimick real behaviour\n agent.update()\n\n self.a_b_beta_prams.append([(a_i, b_i) for a_i, b_i in zip(agent.a, agent.b)])\n self.total_reward += agent.reward\n self.variants_rewards[best_variant] += agent.reward\n\n return self.total_reward, self.variants_rewards", "def simulationDelayedTreatment(numTrials):\n \n numViruses = 100\n maxPop = 1000\n maxBirthProb = 0.1\n clearProb = 0.05\n resistances = {'guttagonol': False}\n mutProb = 0.005\n delays = [300, 150, 75, 0]\n f, axarr = pylab.subplots(2, 2)\n x_plot = []\n\n for delay in delays:\n FinalPopSize = [0.0 for x in range(numTrials)]\n for trial in range(numTrials):\n viruses = [ResistantVirus(maxBirthProb, clearProb, resistances, mutProb) for n in range(numViruses)]\n patient = TreatedPatient(viruses, maxPop)\n for i in range(delay):\n patient.update()\n patient.addPrescription('guttagonol')\n for j in range(delay, delay+150):\n patient.update()\n FinalPopSize[trial] = patient.getTotalPop()\n x_plot.append(FinalPopSize)\n\n axarr[0, 0].hist(x_plot[0])\n axarr[0, 1].hist(x_plot[1])\n axarr[1, 0].hist(x_plot[2])\n axarr[1, 1].hist(x_plot[3])\n pylab.show()\n\n # pylab.plot(avgPopSize, label = 'avg pop size')\n # pylab.plot(avgGuttagonolResistantPop, label = 'avg pop size guttagonol-resistant')\n # pylab.xlabel(\"Time\")\n # pylab.ylabel(\"Average Population Size\")\n # pylab.title(\"Average Size of the Virus Populations\")\n # pylab.legend(loc = 'best')\n # pylab.show()", "def run(self, n_trials=10):\n # Create study object\n if self.study is None:\n self.study = optuna.create_study(\n direction=\"minimize\",\n sampler=optuna.samplers.RandomSampler(seed=123)\n )\n # Run trials\n self.study.optimize(\n lambda x: self.objective(x),\n n_trials=n_trials,\n n_jobs=-1\n )", "def run_test(d):\n\n ######### Problem Specification\n\n # Data generation parameters\n prior_mu_z = np.zeros(d, dtype=np.float32) # Prior mean\n prior_sigma_z = np.eye(d, dtype=np.float32) # Prior covariance matrix\n\n # True model parameters\n num_range = np.arange(-(d-1)/2, (d+1)/2, dtype=np.float32)\n\n t_delta = num_range / 5 \n\n if d == 1:\n t_sigma = np.ones(1)\n else: \n # Allow sigma to range from 0.1 to 1\n t_sigma = 36/(10*(d-1)**2) * num_range**2 + 0.1 \n\n ######### Variable Initialization\n\n # Initial model parameters - same across all methods\n init_delta = prior_mu_z.copy()\n init_log_sigma = 3 * np.ones(d)\n\n # Initial HVAE variational parameters\n init_T = 5.\n init_eps = 0.005 * np.ones(d)\n max_eps = params['max_eps'] * np.ones(d)\n init_logit_eps = np.log(init_eps/(max_eps - init_eps))\n init_log_T_0 = np.log(init_T - 1)\n\n # Initial NF variational parameters\n init_u_pre_reparam = scipy.stats.truncnorm.rvs(-2, 2, scale=0.1, size=d)\n init_w = scipy.stats.truncnorm.rvs(-2, 2, scale=0.1, size=d)\n init_b = 0.1\n\n # Initial VAE parameters\n init_mu_z = prior_mu_z.copy()\n init_log_sigma_z = np.ones(d)\n\n ######### Set up models\n\n HVAE_model_1 = HVAE(\n ['delta', 'log_sigma', 'logit_eps', 'log_T_0'],\n [init_delta, init_log_sigma, init_logit_eps, init_log_T_0], \n 'HVAE_1', d, params['HVAE_K_1'])\n HVAE_model_2 = HVAE(\n ['delta', 'log_sigma', 'logit_eps', 'log_T_0'],\n [init_delta, init_log_sigma, init_logit_eps, init_log_T_0], \n 'HVAE_2', d, params['HVAE_K_2'])\n\n HVAE_model_notemp_1 = HVAE(\n ['delta', 'log_sigma', 'logit_eps'],\n [init_delta, init_log_sigma, init_logit_eps], \n 'HVAE_notemp_1', d, params['HVAE_K_1'])\n HVAE_model_notemp_2 = HVAE(\n ['delta', 'log_sigma', 'logit_eps'], \n [init_delta, init_log_sigma, init_logit_eps],\n 'HVAE_notemp_2', d, params['HVAE_K_2'])\n\n NF_model_1 = NF(\n ['delta', 'log_sigma', 'u_pre_reparam', 'w', 'b'],\n [init_delta, init_log_sigma, init_u_pre_reparam, init_w, init_b],\n 'NF_1', d, params['NF_K_1'])\n NF_model_2 = NF(\n ['delta', 'log_sigma', 'u_pre_reparam', 'w', 'b'],\n [init_delta, init_log_sigma, init_u_pre_reparam, init_w, init_b],\n 'NF_2', d, params['NF_K_2'])\n\n VB_model = VB(['delta', 'log_sigma', 'mu_z', 'log_sigma_z'], \n [init_delta, init_log_sigma, init_mu_z, init_log_sigma_z], 'VB', d)\n\n model_list = [HVAE_model_1, HVAE_model_2, HVAE_model_notemp_1, \n HVAE_model_notemp_2, NF_model_1, NF_model_2, VB_model]\n \n ######### Generate Training Data & Save - One for each test\n\n train_data_list = []\n\n for i in range(params['n_tests']):\n z = np.random.multivariate_normal(prior_mu_z, prior_sigma_z)\n x = np.random.multivariate_normal(z + t_delta, np.diag(t_sigma**2), \n size=params['n_data'])\n train_data_list.append(x)\n\n # Folder should have already been created in the initializations\n data_path = os.path.join('save', str(d), 'train_data.p')\n pickle.dump(train_data_list, open(data_path, 'wb')) \n\n ######### Train models\n\n sess = tf.Session()\n sess.run(tf.global_variables_initializer())\n\n # Store the final parameter values for all test runs in this dictionary\n final_params = {}\n\n for m in model_list:\n\n final_values = []\n\n for i in range(params['n_tests']):\n (delta, sigma) = m.train(sess, train_data_list[i], i)\n final_values.append((delta, sigma))\n\n final_params[m.model_name] = final_values.copy()\n\n ######### Test models using difference between parameters\n\n param_diffs = {}\n\n for m in model_list:\n\n diffs = []\n\n for i in range(params['n_tests']):\n delta = final_params[m.model_name][i][0]\n sigma = final_params[m.model_name][i][1]\n\n delta_diff = np.sum((delta - t_delta)**2)\n sigma_diff = np.sum((sigma - t_sigma)**2)\n\n diffs.append((delta_diff, sigma_diff))\n\n param_diffs[m.model_name] = diffs.copy()\n\n # Save parameter differences in a pickle file\n diff_path = os.path.join('save', str(d), 'all_diffs.p')\n pickle.dump(param_diffs, open(diff_path, 'wb'))", "def act(self, s, exploration, game, return_pred_opp=False):\n opponent_p = self.compute_opponent_model(s)\n # print(opponent_p)\n opponent_action = np.random.choice(\n opponent_p.size, size=1, p=opponent_p)[0]\n # agent_p = np.exp(self.Q[s][:, opponent_action])\n agent_p = self.compute_marginal_pi(s)\n if exploration and random.random() < self.episilon:\n agent_action = random.randint(0, self.action_num - 1)\n else:\n if self.verbose:\n for s in self.Q.keys():\n print('{}--------------'.format(self.id_))\n print('Q of agent {}: state {}: {}'.format(self.id_, s, str(self.Q[s])))\n # print('QAof agent {}: state {}: {}'.format(self.id_, s, str(self.Q_A[s])))\n # self.Q_A\n print('pi of agent {}: state {}: {}'.format(self.id_, s, self.pi[s]))\n # print('pi of opponent agent {}: state{}: {}'.format(self.id_, s, self.opponent_best_pi[s]))\n print('{}--------------'.format(self.id_))\n agent_action = StationaryAgent.sample(agent_p)\n if return_pred_opp:\n return agent_action, opponent_action\n else:\n return agent_action", "def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # specify agent to track\n # NOTE: You can set enforce_deadline=False while debugging to allow longer trials\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.00000001, display=False) # create simulator (uses pygame when display=True, if available)\n # NOTE: To speed up simulation, reduce update_delay and/or set display=False\n\n sim.run(n_trials=100) # run for a specified number of trials\n # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line", "def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # specify agent to track\n # NOTE: You can set enforce_deadline=False while debugging to allow longer trials\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.001, display=True) # create simulator (uses pygame when display=True, if available)\n # NOTE: To speed up simulation, reduce update_delay and/or set display=False\n\n sim.run(n_trials=100) # run for a specified number of trials\n # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line", "def Transformer(robot1,robot2): \n #with a 50% change surgery will be triggered\n return Random_Actuation.surgery(\"Transformer\",robot1,robot2,50)", "def simulate(params,n_states,n_trials,env = \"rich\", policy=\"softmax\",\\\n D=0.5, mod = \"constant\",thresh = 0, k=1,rnd_seeds = None, V0=0.0, full=False,\n rmag = 1, lmag = 0):\n\n\tdef calc_D(state):\n\t\t\"\"\"\n\t\tcalculates D for the current trial and returns\n\t\tthe updated state tracker for D and respective betas\n\n\t\tD represents dopamine levels (equivalent of rho in OpAL)\n\t\tScales between 0 and 1, with 1 high level of DA\n\t\t\"\"\"\n\t\tif t < thresh:\n\t\t\tstate.D_g[t] = 0.5\n\t\t\tstate.D_n[t] = 0.5\n\t\telse:\n\t\t\tif mod == \"constant\":\n\t\t\t\tstate.D_g[t] = D\n\t\t\t\tstate.D_n[t] = 1-D\n\t\t\tif mod == \"value\":\n\t\t\t\t# NOTE: if rmag and lmag is 1/0, can just use V\n\t\t\t\t# average of two actions\n\t\t\t\tV = np.mean(1/2*(state.QG[t,:] - state.QN[t,:])) # state average(?) \n\t\t\t\tV = 1/(1 + np.exp(-V*k)) # translate between 0 and 1\n\t\t\t\tstate.D_g[t] = V \n\t\t\t\tstate.D_n[t] = 1 - V\n\t\treturn state\n\n\n\tdef generate_state():\n\t\t\"\"\"\n\t\tGet appropriate reward probabilities and magnitudes\n\t\tfor the specified environment type\n\t\t\"\"\"\n\n\t\tprobs = calc_probs(env)\n\t\tn_options = len(probs)\n\n\t\t# feedback for agent\n\t\tr_mag = np.zeros(n_options) + rmag\n\t\tl_mag = np.zeros(n_options) + lmag\n\n\t\tnew_state = Bogacz(n_trials, n_options, probs, r_mag, l_mag, V0=V0)\n\t\treturn new_state\n\n\n\t# learning rate, damping, decay, softmax temp\n\talpha_a, epsilon, lbda, beta = params\n\tstates = []\n\n\t# do the thing\n\tfor s in np.arange(n_states):\n\n\t\t# check if random seed provided\n\t\tif rnd_seeds is not None:\n\t\t\trandom.seed(rnd_seeds[s])\n\t\t\tnp.random.seed(rnd_seeds[s])\n\n\t\tstate = generate_state()\n\t\tfor t in range(n_trials):\n\n\t\t\tstate.idx = t\n\t\t\tstate=calc_D(state)\t\t\t\t\t# get D\n\t\t\tstate.policy_softmax(beta)\n\t\t\tstate.act(alpha_a, epsilon, lbda)\t# update \n\n\t\t\tif full:\n\t\t\t\tstate.update_other_actions(alpha_a, epsilon, lbda)\n\n\t\tstates.append(state)\t\t\t\t\t# save sim\n\n\treturn states", "def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # set agent to track\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.01) # reduce update_delay to speed up simulation\n sim.run(n_trials=100) # press Esc or close pygame window to quit\n return [a.state_action_table, a.reward_hist]", "def run_example():\r\n num_die_sides = 6\r\n hand = (1, 1, 1, 5, 6)\r\n hand_score, hold = strategy(hand, num_die_sides)\r\n print \"Best strategy for hand\", hand, \"is to hold\", hold, \"with expected score\", hand_score", "def run_example():\r\n num_die_sides = 6\r\n hand = (1, 1, 1, 5, 6)\r\n hand_score, hold = strategy(hand, num_die_sides)\r\n print \"Best strategy for hand\", hand, \"is to hold\", hold, \"with expected score\", hand_score", "def observe(self, trials):\n with self.get_client() as ax_client:\n for trial in trials:\n if not self.has_suggested(trial):\n _, trial_index = ax_client.attach_trial(\n AxOptimizer.transform_params(flatten(trial.params), self.space)\n )\n self._trials_map[self.get_id(trial)] = trial_index\n\n if not self.has_observed(trial):\n # Check the trial status\n trial_status = trial.status\n\n # If the trial status is `completed`\n if trial_status == \"completed\":\n # Complete it in Ax\n ax_trial_index = self._trials_map[self.get_id(trial)]\n raw_data = {\n \"objective\": trial.objective.value,\n **{\n s.name: s.value\n for s in trial.statistics\n if s.name in self.extra_objectives\n },\n **{r.name: r.value for r in trial.constraints},\n }\n ax_client.complete_trial(\n trial_index=ax_trial_index, raw_data=raw_data\n )\n\n # If the trial status is `broken`\n elif trial_status == \"broken\":\n # Set is as broken is Ax\n ax_trial_index = self._trials_map[self.get_id(trial)]\n ax_client.log_trial_failure(ax_trial_index)\n\n # Register the unobserved trial\n self.register(trial)", "def run(num, similarity_name=None, reward_name='acc', alpha=0.3):\n \n # --\n # Get subject's data\n sdata = get_behave_data(num)\n sdata.update(get_similarity_data(num))\n \n responses = np.array(sdata['resp'])\n\n rewards = None\n if reward_name == 'acc':\n rewards = np.array(sdata['acc'],dtype=np.float32)\n elif reward_name == 'gl':\n rewards = np.array(sdata['gl'],dtype=np.float32)\n\n trials = np.array(fmri.catreward.roi.data.get_trials())\n conds = list(set(trials))\n ## conds are the unqie entries in trials\n \n values = np.zeros_like(trials,dtype=np.float32)\n rpes = np.zeros_like(trials,dtype=np.float32)\n sim_rewards = np.zeros_like(trials,dtype=np.float32)\n ## Returned....\n\n # Each cond has n states, \n # matching the number of \n # responses (approx 2: {1,6}).\n #\n # Wrong button presses \n # are included, however these\n # are never rewarded so stay at 0.\n for cond in conds:\n if cond == 0: continue\n ## Drop jitter.\n\n # Create states and their rewards.\n mask = trials == cond\n states_c = responses[mask]\n rewards_c = rewards[mask] ## _c for cond...\n\n # Get the RL alg we want to run.\n # based on similarity_name\n if similarity_name == None:\n # No similarity:\n values_c, rpes_c = rl.reinforce.b_delta(\n rewards_c, states_c, alpha)\n sim_rewards_c = rewards_c\n ## To give a consistent return\n ## just map rewards to sim_rewards\n else:\n # Get the similarity data, filter it by mask, and run RL.\n similarity_c = np.array(sdata[similarity_name])[mask]\n values_c, rpes_c, sim_rewards_c = rl.reinforce.b_delta_similarity(\n rewards_c, states_c, similarity_c, alpha)\n \n # sim_rewards_c does not need to be\n # unpacked when similarity_name is None\n sim_rewards_c = rl.misc.unpack(sim_rewards_c, states_c)\n\n # Unpack values and rpes \n # based on states_c\n values_c = rl.misc.unpack(values_c, states_c)\n rpes_c = rl.misc.unpack(rpes_c, states_c)\n \n # Now use the mask to map values_c, etc,\n # into trials space\n values[mask] = values_c\n rpes[mask] = rpes_c\n sim_rewards[mask] = sim_rewards_c\n \n return values.tolist(), rpes.tolist(), sim_rewards.tolist()", "def act(self, s, exploration, game):\n agent_p = self.compute_marginal_pi(s, one_hot=False)\n if self.exploration and random.random() < self.episilon:\n agent_action = random.randint(0, self.action_num - 1)\n else:\n if self.verbose:\n for s in self.Q.keys():\n print('{}--------------'.format(self.id_))\n print('Q of agent {}: state {}: {}'.format(self.id_, s, str(self.Q[s])))\n # print('QAof agent {}: state {}: {}'.format(self.id_, s, str(self.Q_A[s])))\n # self.Q_A\n print('pi of agent {}: state {}: {}'.format(self.id_, s, self.pi[s]))\n # print('pi of opponent agent {}: state{}: {}'.format(self.id_, s, self.opponent_best_pi[s]))\n print('{}--------------'.format(self.id_))\n agent_action = np.argmax(agent_p)\n return agent_action", "def sweep(variables={}, trials=0, output=None, trialsParms={},\n showProgress=True):\n if not isinstance(variables, (list, dict)):\n raise ValueError(\"variables must be list or dict: {}\".format(\n variables))\n if not isinstance(trials, int):\n raise ValueError(\"trials must be int\")\n if output is not None and not isinstance(output, str):\n raise ValueError(\"output must be str or None\")\n if not isinstance(trialsParms, dict):\n raise ValueError(\"trialsParms must be a dict\")\n\n trialsParmsDefaults = { 'E': 0.1, 'eps': 1e-2, 'min': 3, 'max': 10000 }\n for k, v in trialsParms.items():\n if k not in trialsParmsDefaults:\n raise ValueError(\"Bad trialsParms key: {}\".format(k))\n\n trialsParmsDefaults[k] = v\n if k == 'min' and trials > 0:\n raise ValueError(\"trialsParms['min'] cannot be specified with \"\n \"trials > 0, as that disables trial count detection.\")\n elif k == 'max' and trials != 0:\n raise ValueError(\"trialsParms['max'] cannot be specified with \"\n \"trials != 0. Use a negative trials number instead.\")\n trialsParmsDefaults.update(trialsParms)\n if trialsParmsDefaults['E'] <= 0. and trialsParmsDefaults['eps'] <= 0.:\n raise ValueError(\"Both E and eps cannot be zero in trialsParms\")\n if trialsParmsDefaults['min'] > trialsParmsDefaults['max']:\n raise ValueError(\"min cannot be greater than max: {} / {}\".format(\n trialsParmsDefaults['min'], trialsParmsDefaults['max']))\n\n allParms = set()\n nExperiments = 0\n if isinstance(variables, list):\n nExperiments = len(variables)\n for v in variables:\n if not isinstance(v, dict):\n raise ValueError(\"If a list, variables must contain dicts\")\n if len(allParms) == 0:\n allParms.update(v.keys())\n else:\n if allParms != set(v.keys()):\n raise ValueError(\"All parameter dictionaries must have \"\n \"same keys; found {} against {}\".format(allParms,\n set(v.keys())))\n for k in v:\n allParms.add(k)\n elif isinstance(variables, dict):\n nExperiments = 1\n for k, v in variables.items():\n allParms.add(k)\n nExperiments *= len(v)\n else:\n raise NotImplementedError(variables)\n\n nTrialsMin = trialsParmsDefaults['min']\n nTrialsMax = trialsParmsDefaults['max']\n if trials != 0:\n nTrialsMax = abs(trials)\n if trials > 0:\n nTrialsMin = trials\n else:\n nTrialsMin = min(nTrialsMin, abs(trials))\n\n with Work([1]) as w:\n def _getNextParams(store):\n \"\"\"Returns the next parameter combination that needs to be tried.\n \"\"\"\n if isinstance(variables, list):\n if not hasattr(store, 'i'):\n store.i = 0\n if store.i == len(variables):\n return\n store.i += 1\n v = variables[store.i - 1]\n if not isinstance(v, dict):\n raise ValueError(\"Parameter was not dict? {}\".format(v))\n elif isinstance(variables, dict):\n if not hasattr(store, 'stack'):\n store.keys = list(variables.keys())\n store.stack = { name: 0 for name in store.keys }\n store.carry = 0\n\n if store.carry != 0:\n # All set, no more combinations to try\n return\n\n # Load up next set\n v = {}\n for name, vals in variables.items():\n v[name] = vals[store.stack[name]]\n\n # Increment and cascade\n store.carry = 1\n for k in store.keys:\n if store.carry == 0:\n break\n store.stack[k] += 1\n if store.stack[k] >= len(variables[k]):\n store.stack[k] = 0\n store.carry = 1\n else:\n store.carry = 0\n else:\n raise NotImplementedError(variables)\n\n if showProgress:\n sys.stderr.write(\"job_stream.baked: Starting {} of {} with {} \"\n \"trials\\n\".format(store.id, nExperiments-1,\n store.actualMin))\n\n # v is dict with parameters at the moment; give it an ID\n v = v.copy()\n v['id'] = store.id\n store.id += 1\n return (v, store.actualMin)\n\n\n def _getZc(n):\n \"\"\"For a given number of trials n, returns z_c from Driels et al.\n and whether or not there should be an extra trial due to\n uncertainty.\n \"\"\"\n # An extra trial is required for low counts, due to the fact\n # that there is higher variance in the calculated deviation.\n extra = 1\n\n vFree = n - 1\n zc = 1.96\n if vFree > 15:\n # Normal distribution, and enough that we do not need to\n # have an extra trial.\n extra = 0\n elif vFree >= 10:\n # Here and below is a t-distribution; note that this comes\n # from the 97.5% column in Table 3 of Driels et al., since\n # those coefficients don't include the tail\n zc = 2.23\n elif vFree >= 5:\n zc = 2.57\n elif vFree >= 4:\n zc = 2.78\n elif vFree >= 3:\n zc = 3.18\n elif vFree >= 2:\n zc = 4.30\n elif vFree >= 1:\n zc = 12.71\n return zc, extra\n\n\n @w.frame\n def spinUpParms(store, first):\n if not hasattr(store, 'init'):\n store.init = True\n store.id = 0\n store.results = []\n\n initial = []\n # Start as many experiment combinations as we have CPUs for,\n # with one extra to keep things busy\n nCpu = inline.getCpuCount()\n\n # Calculate the actual minimum number of trials to run\n store.actualMin = min(nTrialsMax, max(nTrialsMin,\n (nCpu // nExperiments)))\n\n while len(initial) < (nCpu // store.actualMin) + 1:\n n = _getNextParams(store)\n if n is None:\n break\n initial.append(n)\n return Multiple(initial)\n\n # Get here, we're done. Print results, optionally put in csv\n otherCols = { 'id', 'trials' }\n otherCols.update(allParms)\n valCols = set()\n for r in store.results:\n for k in r:\n if k in otherCols or k in valCols:\n continue\n valCols.add(k)\n cols = [ 'id', 'trials' ] + sorted(allParms) + sorted(valCols)\n df = pd.DataFrame(store.results, columns=cols)\n df.set_index('id', inplace=True)\n print(df.to_string())\n if output is not None:\n df.to_csv(output)\n\n @w.frame(emit=lambda s: s.result)\n def spinUpTrials(store, first):\n if not hasattr(store, 'init'):\n store.init = True\n store.parms = first[0]\n store.trialDone = 0\n store.trialNext = first[1]\n store.resultAvg = collections.defaultdict(float)\n store.resultVar = collections.defaultdict(float)\n return Multiple([ Args(trial=i, **store.parms)\n for i in range(store.trialNext) ])\n\n # If we get here, this trial is done; convert variances to\n # deviations\n devs = { k: v ** 0.5 for k, v in store.resultVar.items() }\n store.result = store.parms\n for k, v in [ ('trials', store.trialDone) ]:\n if k in store.result:\n raise ValueError(\"Duplicate key: {}\".format(k))\n store.result[k] = v\n for k, v in store.resultAvg.items():\n store.result[k] = v\n devk = '{}_dev'.format(k)\n if devk in store.result:\n raise ValueError(\"Duplicate key: {}\".format(devk))\n store.result[devk] = devs[k]\n\n # Calculate error region with 95% confidence\n n = store.trialDone\n zc, _ = _getZc(n)\n errk = '{}_err'.format(k)\n if errk in store.result:\n raise ValueError(\"Duplicate key: {}\".format(errk))\n store.result[errk] = zc * devs[k] / n ** 0.5\n\n yield w\n\n @w.frameEnd\n def spinDownTrials(store, result):\n if (not isinstance(result, dict)\n or store.trialDone + 1 > store.trialNext):\n raise ValueError(\"Result from sweep()'s pipeline must be a \"\n \"single dict (cannot emit Multiple either)\")\n\n store.trialDone += 1\n n = store.trialDone\n\n avgs = store.resultAvg\n devs = store.resultVar\n\n # Recurse through each result; all keys must be present in each\n # result\n for k in avgs.keys():\n if k not in result:\n raise ValueError(\"Key {} was not in a result\".format(k))\n\n for k, v in result.items():\n if k in store.parms:\n raise ValueError(\"Duplicate result key {} found in \"\n \"parameters as well\".format(k))\n if n != 1 and k not in avgs:\n raise ValueError(\"Key {} was not in a result\".format(k))\n\n # Valid for n == 1\n oldAvg = avgs[k]\n oldDev = devs[k] if not np.isnan(devs[k]) else 0.\n newAvg = oldAvg + (v - oldAvg) / n\n avgs[k] = newAvg\n if np.isnan(newAvg):\n devs[k] = newAvg\n else:\n devs[k] = max(0., oldDev + oldAvg ** 2 - newAvg ** 2 + (\n v ** 2 - oldDev - oldAvg ** 2) / n)\n\n numToSpawn = 0\n if store.trialNext >= nTrialsMax:\n # Nothing more to run\n pass\n\n totalNeeded = nTrialsMin\n if n == 1:\n # Not enough information to predict the number of trials, so\n # do not propagate more unless only one was propagated in the\n # first place.\n if store.trialNext != 1 or nTrialsMax == 1:\n # More than one propagated\n numToSpawn = 0\n else:\n # We need another to get statistical significance\n numToSpawn = 1\n else:\n # Determine if more results are needed... remember, we're\n # looking for 95% confidence that the true mean is +- E% of the\n # Number of free variables is the same as the number of samples\n # minus one\n zc, extra = _getZc(n)\n\n numNeeded = 0\n # Find the most needed\n for k in avgs.keys():\n avg = avgs[k]\n if np.isnan(avg):\n continue\n dev = devs[k] ** 0.5\n err = trialsParmsDefaults['E'] * abs(avg)\n err = max(err, trialsParmsDefaults['eps'])\n need = zc * dev / err\n numNeeded = max(numNeeded,\n int(math.ceil(need ** 2 + extra)))\n\n totalNeeded = numNeeded\n # Spawn a number equal to num needed minus num already spawned\n numNeeded = numNeeded - store.trialNext\n # Spawn at most two more per completed... this is quite quick\n # growth but will be bounded by the absolute number needed.\n numToSpawn = min(2, max(0, numNeeded))\n numToSpawn = max(0, min(\n store.trialDone - store.trialNext\n + inline.getCpuCount(),\n nTrialsMax - store.trialNext,\n numToSpawn))\n\n trialOld = store.trialNext\n store.trialNext += numToSpawn\n if showProgress:\n sys.stderr.write(\"job_stream.baked: {} done with trial \"\n \"{}{}{}\\n\".format(\n store.parms['id'], n-1,\n # Information on new trials started\n \"; starting {}:{}\".format(trialOld,\n store.trialNext) if numToSpawn else \"\",\n # Information on needed trials\n \"; estimated {} needed\".format(\n totalNeeded)\n if numToSpawn or n == store.trialNext\n else \"\"))\n\n return Multiple([\n Args(trial=store.trialNext - 1 - i, **store.parms)\n for i in range(numToSpawn) ])\n\n @w.frameEnd\n def spinDownParms(store, parms):\n store.results.append(parms)\n return _getNextParams(store)", "def main(num_trials, num_actions):\n\tfor i in xrange(int(num_trials)):\n\t\ttrial(i+1, int(num_actions))", "def rtest_predictoutcome():\n\n #define cohort size\n npatients = 2\n\n #init healthy patients\n simulator = AbbcEnvironment(patients=npatients)\n\n #simulate healthy patients for long term in short term increments\n nstep = int(long_term/short_term)\n\n #define action taken : -1 means patients will be simulated as healthy\n action = np.repeat(-1, npatients)\n\n #init episode list\n episode = [simulator.state]\n\n #main simulation loop to generate episodes\n for step in range(nstep):\n episode += simulator.take_action(action=action, simtime=short_term)\n\n #episode length is 1+2*nstep consisting of intit state (5xnpat) followed by\n # next state and reward (1xnpat) repeating each time step.\n #print(episode)\n #print(len(episode))\n\n #---semi gradient temporal difference (0) algorithm ---\n #init hyperparameters\n alpha = .1 #learning rate\n #init Value function model\n agent = AbbcAgent(discount=1.0)\n #loop over episodes\n for patient in range(npatients):\n #state = [nstep]\n #state += episode[0][:,patient] #get inital state\n state = np.append(episode[0][:,patient],nstep).reshape((6,1)) #get inital state\n\n print(state)\n #loop over time steps in episode\n for k in range(1,nstep+1):\n #get next state and reward\n #nextstate = [nstep-k]\n #nextstate = episode[k*2-1][:,patient]\n nextstate = np.append(episode[k*2-1][:,patient],nstep-k).reshape((6,1))\n\n reward = episode[k*2][patient]\n\n #get magnitude for forces\n magnitude = alpha * (reward + agent.discount * agent.get_value(nextstate)\n - agent.get_value(state))\n #compute forces\n forces = computeforces(agent.prednet, state, 0, \"iden\")\n\n #update model\n for layer in forces:\n index = layer[\"layer\"]\n agent.prednet[index][\"weight\"] += magnitude * layer[\"fweight\"]\n agent.prednet[index][\"bias\"] += magnitude * layer[\"fbias\"]\n\n state = np.copy(nextstate)\n\n\n #make predictions\n state = np.append(episode[0][:,patient],nstep).reshape((6,1)) #get inital state\n print(agent.get_value(state))\n\n #Value function approximates outcome return at time horizon.\n assert(False)\n\n ##define action taken\n #action = np.repeat(2, npatients)\n ##main simulation loop\n #for step in range(nstep):\n # _, drugy_reward[step,:] = simulator.take_action(action=action, simtime=short_term)", "def run_example():\n num_die_sides = 6\n hand = (1, 1, 1, 5, 6)\n hand_score, hold = strategy(hand, num_die_sides)\n print \"Best strategy for hand\", hand, \"is to hold\", hold, \"with expected score\", hand_score", "def run_example():\n num_die_sides = 6\n hand = (1, 1, 1, 5, 6)\n hand_score, hold = strategy(hand, num_die_sides)\n print \"Best strategy for hand\", hand, \"is to hold\", hold, \"with expected score\", hand_score", "def run_example():\n num_die_sides = 6\n hand = (1, 1, 1, 5, 6)\n hand_score, hold = strategy(hand, num_die_sides)\n print \"Best strategy for hand\", hand, \"is to hold\", hold, \"with expected score\", hand_score", "def simulationTwoDrugsVirusPopulations():\n #TODO", "def test_josephus_survivor(self):\n\n allure.dynamic.title(\"Testing josephus_survivor function\")\n allure.dynamic.severity(allure.severity_level.NORMAL)\n allure.dynamic.description_html('<h3>Codewars badge:</h3>'\n '<img src=\"https://www.codewars.com/users/myFirstCode'\n '/badges/large\">'\n '<h3>Test Description:</h3>'\n \"<p>In this kata you have to verify that the function \"\n \"correctly returns who is the \\\"survivor\\\", ie: the \"\n \"last element of a Josephus permutation.</p>\")\n\n test_data = [\n ((7, 3), 4),\n ((11, 19), 10),\n ((1, 300), 1),\n ((14, 2), 13),\n ((100, 1), 100)\n ]\n\n for test_data, expected in test_data:\n n = test_data[0]\n k = test_data[1]\n result = josephus_survivor(n, k)\n\n with allure.step(\"Enter test data (n: {}, k: {}) and verify \"\n \"the output ({}) vs expected ({})\".format(n,\n k,\n result,\n expected)):\n print_log(n=n,\n k=k,\n result=result,\n expected=expected)\n\n self.assertEqual(expected,\n result)", "def run_experiments():\n if False: # Change to False when done finding max_scoring_num_rolls\n six_sided_max = max_scoring_num_rolls(six_sided)\n print('Max scoring num rolls for six-sided dice:', six_sided_max)\n\n if False: # Change to True to test always_roll(8)\n print('always_roll(8) win rate:', average_win_rate(always_roll(8)))\n\n if False: # Change to True to test bacon_strategy\n print('bacon_strategy win rate:', average_win_rate(bacon_strategy))\n\n if False: # Change to True to test swap_strategy\n print('swap_strategy win rate:', average_win_rate(swap_strategy))\n\n if True: # Change to True to test final_strategy\n print('final_strategy win rate:', average_win_rate(final_strategy))\n\n \"*** You may add additional experiments as you wish ***\"", "def survival(value=t, lam=lam, f=failure):\n return sum(f * log(lam) - lam * value)", "def run_example():\n num_die_sides = 6\n hand = (1,2,5,5,5)\n hand_score, hold = strategy(hand, num_die_sides)\n print \"Best strategy for hand\", hand, \"is to hold\", hold, \"with expected score\", hand_score", "def objective(trial):\n %time\n env = gym.make('Delivery-v0')\n alpha = trial.suggest_discrete_uniform('alpha', 0.3,0.9,0.3)\n gamma = trial.suggest_discrete_uniform('gamma', 0.6, 1,0.1)\n epsilon = trial.suggest_discrete_uniform('epsilon', 0.01, 0.11, 0.04)\n episodes = 1000000\n \n # For plotting metrics\n all_epochs = []\n all_penalties = []\n rewards = []\n \n #Initialize Q table of 22500 x 8 size (22500 states and 8 actions) with all zeroes\n q_table = np.zeros([env.observation_space.n, env.action_space.n]) \n \n for i in range(1, episodes+1):\n state = env.reset()\n episode_rewards = []\n\n epochs, penalties, reward, = 0, 0, 0\n done = False\n\n while not done:\n if random.uniform(0, 1) < epsilon:\n action = env.action_space.sample() # Explore action space randomly\n else:\n action = np.argmax(q_table[state]) # Exploit learned values by choosing optimal values\n\n next_state, reward, done, info = env.step(action) \n\n old_value = q_table[state, action]\n next_max = np.max(q_table[next_state])\n\n new_value = (1 - alpha) * old_value + alpha * (reward + gamma * next_max)\n q_table[state, action] = new_value\n\n if reward == -10:\n penalties += 1\n \n\n state = next_state\n episode_rewards.append(reward)\n epochs += 1\n \n if done == True:\n break \n if epochs == 1000:\n break \n rewards.append(np.sum(episode_rewards))\n \n last_reward = np.mean(rewards)\n # trial.report(-1 * last_reward)\n\n return -1 * last_reward", "def main():\n\tresults = []\n\n\tconfig = configparser.ConfigParser()\n\tconfig.read(\"simulation.ini\")\n\tsettings = config['sim']\n\n\tcompleted_obj_hw = int(settings[\"ClientsPerCampaign\"]) * float(settings[\"CompletedPctgHW\"])\n\texceeded_obj_hw = float(settings[\"ExceededPctgHW\"])\n\tsignificance_level = float(settings[\"SignificanceLevel\"])\n\tz_val_two_tails = scipy.stats.norm.ppf(1 - (significance_level / 2))\n\n\tprint(\"Completed Target HW: \" + str(completed_obj_hw))\n\tprint(\"Exceeded Target HW: \" + str(exceeded_obj_hw))\n\n\tcompleted_vals = []\n\texceeded_vals = []\n\tdone = False\n\n\tcompleted_avg = 0\n\texceeded_avg = 0\n\tcompleted_hw = 0\n\texceeded_hw = 0\n\n\ti = 0\n\twhile not done:\n\t\tprint(\"RUN: \" + str(i + 1))\n\t\tenv = simpy.Environment()\n\t\tsim = Simulation(env, settings, i == 0)\n\t\tsim.run()\n\t\tresults.append(sim.results)\n\t\ti += 1\n\n\t\tif settings['RunOnce'] == 'yes':\n\t\t\tprint(\"RUN ONCE\")\n\t\t\tsys.exit()\n\n\t\tcompleted_vals.append(sim.results['completed_count'])\n\t\texceeded_vals.append(sim.results['exceeded_proportion'])\n\n\t\tif i < 2:\n\t\t\tprint(\"---------------\")\n\t\t\tcontinue\n\n\t\tcompleted_avg = sum(completed_vals) / len(completed_vals)\n\t\tcompleted_S = sum([(v - completed_avg) ** 2 for v in completed_vals]) / (i - 1)\n\t\tcompleted_S = math.sqrt(completed_S)\n\t\tcompleted_hw = (z_val_two_tails * completed_S) / math.sqrt(i)\n\t\tprint(\"runs: \" + str(i) + \" completed HW: \" + str(completed_hw))\n\n\t\texceeded_avg = sum(exceeded_vals) / len(exceeded_vals)\n\t\texceeded_S = math.sqrt(exceeded_avg * (1 - exceeded_avg))\n\t\texceeded_hw = (z_val_two_tails * exceeded_S) / math.sqrt(i)\n\t\tprint(\"runs: \" + str(i) + \" exceeded HW: \" + str(exceeded_hw))\n\n\t\tif completed_hw < completed_obj_hw and exceeded_hw < exceeded_obj_hw:\n\t\t\tprint(\"END ITERATIONS\")\n\t\t\tdone = True\n\n\t\tprint(\"---------------\")\n\n\n\tfilename = 'results/Results_' + settings['FileSizeGB'] + '_' + settings['TorrentThreshold'] + '_' + settings['HTTPDownThreshold'] \\\n\t\t+ '_' + settings['HTTPUp'] + '_' + str(random.randint(0,10000)) + '.xlsx'\n\n\tprint(\"Saving XLSX to: \" + filename)\n\twb = xs.Workbook(filename)\n\n\tws = wb.add_worksheet()\n\n\tws.write(0, 1, 'Exceded')\n\tws.write(0, 2, 'Completed')\n\n\ti = 1\n\tfor result in results:\n\t\tws.write(i, 0, i)\n\t\tws.write(i, 1, result['exceeded_proportion'])\n\t\tws.write(i, 2, result['completed_count'])\n\t\ti += 1\n\n\tws.write(i, 0, 'average')\n\tws.write(i, 1, exceeded_avg)\n\tws.write(i, 2, completed_avg)\n\ti += 1\n\tws.write(i, 0, 'half width')\n\tws.write(i, 1, exceeded_hw)\n\tws.write(i, 2, completed_hw)\n\n\twb.close()", "def run_experiments():\n if True: # Change to False when done finding max_scoring_num_rolls\n six_sided_max = max_scoring_num_rolls(six_sided)\n print('Max scoring num rolls for six-sided dice:', six_sided_max)\n four_sided_max = max_scoring_num_rolls(four_sided)\n print('Max scoring num rolls for four-sided dice:', four_sided_max)\n\n if False: # Change to True to test always_roll(8)\n print('always_roll(8) win rate:', average_win_rate(always_roll(8)))\n\n if True: # Change to True to test bacon_strategy\n print('bacon_strategy win rate:', average_win_rate(bacon_strategy))\n\n if True: # Change to True to test swap_strategy\n print('swap_strategy win rate:', average_win_rate(swap_strategy))\n\n if True:\n print('final_strategy win rate:', average_win_rate(final_strategy))\n\n \"*** You may add additional experiments as you wish ***\"", "def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=False) # set agent to track\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.0) # reduce update_delay to speed up simulation\n sim.run(n_trials=num_of_experiments) # press Esc or close pygame window to quit\n \n pd.Series(a.success).to_pickle('success_' + exp_id + '.pickle')\n a.Q_table.to_pickle('qtable_' + exp_id + '.pickle')\n pd.Series(a.q_delta_avg).to_pickle('convergence_' + exp_id + '.pickle')\n pd.Series(a.t_total).to_pickle('steps_' + exp_id + '.pickle')", "def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # set agent to track\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.5) # reduce update_delay to speed up simulation\n sim.run(n_trials=100) # press Esc or close pygame window to quit", "def target_portfolio_simulation(num_of_years=30, trials=100, method='normal'):\n print(\"Running method target_portfolio_simulation()\")\n\n sim_fia_cv = pd.DataFrame(index=range(num_of_years + 1))\n\n sim_base_total = pd.DataFrame(index=range(num_of_years + 1))\n sim_base_income = pd.DataFrame(index=range(num_of_years + 1))\n\n sim_port_total = pd.DataFrame(index=range(num_of_years + 1))\n sim_port_income = pd.DataFrame(index=range(num_of_years + 1))\n\n read_income_inputs = pd.read_excel(src + \"portfolio_information.xlsx\", sheet_name='income_model_inputs',\n index_col=[0])\n\n read_returns_est = pd.read_excel(src + \"portfolio_information.xlsx\", sheet_name='income_assets_returns_estimates',\n index_col=[0])\n\n read_asset_weights = pd.read_excel(src + \"portfolio_information.xlsx\", sheet_name='asset_weights',\n index_col=[0])\n\n # read_asset_weights.drop(read_asset_weights.index[-1], axis=0, inplace=True)\n\n # read random returns for simulation\n read_normal = pd.read_csv(src + 'median_returns_unsorted.csv', index_col=[0], parse_dates=True)\n cols = [read_normal.columns[c].split('_')[1] for c in np.arange(len(read_normal.columns))]\n read_normal.rename(columns=dict(zip(list(read_normal.columns), cols)), inplace=True)\n\n read_small = pd.read_csv(src + 'median_returns_smallest.csv', index_col=[0], parse_dates=True)\n read_small.rename(columns=dict(zip(list(read_small.columns), cols)), inplace=True)\n\n read_large = pd.read_csv(src + 'median_returns_largest.csv', index_col=[0], parse_dates=True)\n read_large.rename(columns=dict(zip(list(read_large.columns), cols)), inplace=True)\n\n assets_col_names = list(read_normal.columns)\n\n tickers = list(read_asset_weights.index)\n wts = np.array(read_asset_weights.loc[:, 'base'])\n\n def asset_median_returns(data, ticker):\n return data.filter(regex=ticker).median(axis=1)\n\n # dataframe for unsorted returns (normal)\n median_returns_normal = read_normal.copy()\n median_returns_normal.loc[:, 'portfolio_return'] = median_returns_normal.dot(wts)\n median_normal_fia = pd.DataFrame({'FIA': asset_median_returns(read_normal, 'FIA')})\n\n # dataframe for smallest to largest returns\n median_returns_smallest = read_small.copy()\n median_returns_smallest.loc[:, 'portfolio_return'] = median_returns_smallest.dot(wts)\n median_smallest_fia = pd.DataFrame({'FIA': asset_median_returns(read_small, 'FIA')})\n\n # dataframe for largest to smallest returns\n median_returns_largest = read_large.copy()\n median_returns_largest.loc[:, 'portfolio_return'] = median_returns_largest.dot(wts)\n median_largest_fia = pd.DataFrame({'FIA': asset_median_returns(read_large, 'FIA')})\n\n years = list(range(0, num_of_years + 1))\n income_cols = ['year', 'strategy_term', 'index_returns', 'term_ret', 'term_ret_with_par', 'term_annualize',\n 'ann_net_spread', 'term_ret_netspr', 'high_inc_benefit_base', 'rider_fee', 'eoy_income',\n 'contract_value']\n\n term = int(read_income_inputs.loc['term', 'inputs'])\n fia_ret = read_returns_est.loc[read_returns_est.index[-1], 'Annualized Returns']\n fia_risk = read_returns_est.loc[read_returns_est.index[-1], 'Annualized Risk']\n par_rate = float(read_income_inputs.loc['par_rate', 'inputs'])\n spread = float(read_income_inputs.loc['spread', 'inputs'])\n bonus_term = int(read_income_inputs.loc['bonus_term', 'inputs'])\n premium = float(read_income_inputs.loc['premium', 'inputs'])\n income_bonus = float(read_income_inputs.loc['income_bonus', 'inputs'])\n\n income_starts = int(read_income_inputs.loc['start_income_years', 'inputs'])\n income_growth = float(read_income_inputs.loc['income_growth', 'inputs'])\n rider_fee = float(read_income_inputs.loc['rider_fee', 'inputs'])\n inc_payout_factor = float(read_income_inputs.loc['income_payout_factor', 'inputs'])\n contract_bonus = float(read_income_inputs.loc['contract_bonus', 'inputs'])\n social = float(read_income_inputs.loc['social', 'inputs'])\n inflation = float(read_income_inputs.loc['inflation', 'inputs'])\n wtd_cpn_yield = float(read_income_inputs.loc['wtd_coupon_yld', 'inputs'])\n life_expectancy = int(read_income_inputs.loc['life_expectancy_age', 'inputs'])\n clients_age = int(read_income_inputs.loc['clients_age', 'inputs'])\n\n # ---------------INCOME MODEL--------------------------------------------\n runs = 0\n returns_dict = {}\n asset_dict = {}\n fia_dict = {}\n\n income_df = pd.DataFrame(index=years, columns=income_cols)\n income_df.loc[:, 'year'] = years\n income_df.loc[:, 'strategy_term'] = income_df.loc[:, 'year'] % term\n income_df.loc[:, 'strategy_term'] = income_df['strategy_term'].apply(lambda x: 1 if x == 0 else 0)\n\n if method == 'normal':\n income_df.loc[:, 'index_returns'] = read_normal.loc[:, 'FIA']\n\n elif method == 'smallest':\n income_df.loc[:, 'index_returns'] = read_small.loc[:, 'FIA']\n\n else:\n income_df.loc[:, 'index_returns'] = read_large.loc[:, 'FIA']\n\n # income_df.loc[:, 'index_returns'] = np.random.normal(fia_ret, fia_risk, size=(len(years), 1))\n\n cumprod = (1. + income_df['index_returns']).rolling(window=term).agg(lambda x: x.prod()) - 1\n income_df.loc[:, 'term_ret'] = np.where(income_df.loc[:, 'strategy_term'] == 1, cumprod, 0)\n income_df.loc[:, 'term_ret_with_par'] = income_df.loc[:, 'term_ret'] * par_rate\n income_df.loc[:, 'term_annualize'] = income_df.loc[:, 'term_ret_with_par'].apply(\n lambda x: (1 + x) ** (1 / term) - 1)\n income_df.loc[:, 'ann_net_spread'] = income_df.loc[:, 'term_annualize'] - spread\n income_df.loc[:, 'ann_net_spread'] = np.where(income_df.loc[:, 'strategy_term'] == 1,\n income_df.loc[:, 'ann_net_spread'], 0)\n income_df.loc[:, 'term_ret_netspr'] = income_df.loc[:, 'ann_net_spread'].apply(lambda x: (1 + x) ** term - 1)\n\n for counter in years:\n if counter == 0:\n income_df.loc[counter, 'high_inc_benefit_base'] = premium * (1 + income_bonus)\n\n elif counter <= min(bonus_term, income_starts):\n income_df.loc[counter, 'high_inc_benefit_base'] = income_df.loc[counter - 1, 'high_inc_benefit_base'] * \\\n (1 + income_growth)\n else:\n income_df.loc[counter, 'high_inc_benefit_base'] = income_df.loc[counter - 1, 'high_inc_benefit_base']\n\n income_df.loc[:, 'rider_fee'] = income_df.loc[:, 'high_inc_benefit_base'] * rider_fee\n income_df.loc[:, 'eoy_income'] = np.where(income_df.loc[:, 'year'] > income_starts,\n income_df.loc[:, 'high_inc_benefit_base'] * inc_payout_factor, 0)\n\n for counter in years:\n if counter == 0:\n income_df.loc[counter, 'contract_value'] = premium * (1 + contract_bonus)\n\n elif income_df.loc[counter, 'strategy_term'] == 1:\n x1 = income_df.loc[counter - 1, 'contract_value'] - income_df.loc[counter, 'rider_fee']\n x2 = (x1 * (1 + income_df.loc[counter, 'term_ret_netspr'])) - income_df.loc[counter, 'eoy_income']\n income_df.loc[counter, 'contract_value'] = x2\n\n else:\n x1 = income_df.loc[counter - 1, 'contract_value'] - income_df.loc[counter, 'rider_fee'] - \\\n income_df.loc[counter, 'eoy_income']\n\n income_df.loc[counter, 'contract_value'] = x1\n\n # variable stores the income number that is used in the base and fia portfolio calcs.\n\n income_from_fia = income_df.loc[income_df.index[-1], 'eoy_income']\n\n income_df.loc[:, 'contract_value'] = income_df.loc[:, 'contract_value'].apply(lambda x: 0 if x <= 0 else x)\n\n sim_fia_cv.loc[:, str(runs)] = income_df.loc[:, 'contract_value']\n\n # --------------------BASE MODEL---------------------------------------------\n base_wts = read_asset_weights.loc[:, 'base']\n base_assets = list(base_wts.index)\n base_weights = list(base_wts.values)\n base_returns = list(read_returns_est.loc[:, 'Annualized Returns'].values)\n base_std = list(read_returns_est.loc[:, 'Annualized Risk'].values)\n\n base_investment = float(read_income_inputs.loc['risky_assets', 'Base'])\n adv_fees = float(read_income_inputs.loc['advisor_fees', 'Base'])\n\n # -------------------required income----------------------------------\n req_annual_income = float(read_income_inputs.loc['annual_income', 'inputs'])\n income_needed = req_annual_income - social\n income_net_fia_income = max(0, income_needed - income_from_fia)\n cpn_income_base = base_investment * wtd_cpn_yield\n\n # ----------------------RANDOM RETURNS--------------------------\n r_cols = base_assets\n boy_value = ['bv_{}'.format(name) for name in base_assets]\n eoy_value = ['ev_{}'.format(name) for name in base_assets]\n\n random_returns = pd.DataFrame(index=income_df.index, columns=r_cols)\n\n for c in range(len(r_cols)):\n ret = np.random.normal(base_returns[c], base_std[c], size=(len(random_returns.index), 1))\n\n if method == 'smallest':\n random_returns = read_small.copy()\n\n elif method == 'largest':\n random_returns = read_large.copy()\n\n else:\n random_returns = read_normal.copy()\n\n base_df = random_returns.copy()\n fia_portfolio_df = random_returns.copy()\n port_investment = float(read_income_inputs.loc['risky_assets', 'FIA'])\n cpn_income_port = port_investment * wtd_cpn_yield\n\n # -------------BASE PORTFOLIO----------------------------\n for name in boy_value:\n base_df.loc[:, name] = 0.0\n\n for counter in years:\n period_returns = list(random_returns.loc[counter, :])\n if counter == 0:\n\n base_df.loc[counter, boy_value] = [base_weights[c] * base_investment for c in range(len(boy_value))]\n\n base_df.loc[counter, 'total'] = base_df.loc[counter, boy_value].sum()\n base_df.loc[counter, 'total_net_fees'] = 0.0\n base_df.loc[counter, 'income'] = 0.0\n base_investment = base_df.loc[counter, boy_value].sum()\n\n elif (counter > 0) and (counter < income_starts):\n\n base_df.loc[counter, boy_value] = [base_weights[c] * base_investment * (1 + period_returns[c])\n for c in range(len(boy_value))]\n base_df.loc[counter, 'total'] = base_df.loc[counter, boy_value].sum()\n base_df.loc[counter, 'adv_fees'] = base_df.loc[counter, 'total'] * adv_fees\n base_df.loc[counter, 'total_net_fees'] = base_df.loc[counter, 'total'] - base_df.loc[\n counter, 'adv_fees']\n\n # --coupon payment is invested back into the risky portfolio until the income is withdrawn----\n base_investment = base_df.loc[counter, 'total_net_fees'] + cpn_income_base\n\n else:\n\n base_df.loc[counter, boy_value] = [base_weights[c] * base_investment * (1 + period_returns[c])\n for c in range(len(boy_value))]\n base_df.loc[counter, 'total'] = base_df.loc[counter, boy_value].sum()\n base_df.loc[counter, 'adv_fees'] = base_df.loc[counter, 'total'] * adv_fees\n\n # ---req. income is adjusted for inflation from the second year of withdrawal. Reinvestment of coupon\n # stops from the year income starts. Req. income is reduced by the coupon payments\n\n if counter == income_starts:\n\n income_needed = req_annual_income - social\n base_df.loc[counter, 'income'] = income_needed - cpn_income_base\n income_needed = req_annual_income\n\n else:\n income_needed = income_needed * (1 + inflation) - social\n base_df.loc[counter, 'income'] = income_needed - cpn_income_base\n income_needed = income_needed + social\n\n base_df.loc[counter, 'total_net_fees'] = base_df.loc[counter, 'total'] - \\\n base_df.loc[counter, 'adv_fees'] - \\\n base_df.loc[counter, 'income']\n\n base_investment = base_df.loc[counter, 'total_net_fees']\n\n base_df.loc[:, 'adj_total'] = base_df.loc[:, 'total_net_fees'].apply(lambda x: x if x > 0 else 0)\n sim_base_total.loc[:, 's_{}'.format(str(runs))] = base_df.loc[:, 'total_net_fees']\n sim_base_income.loc[:, 's_{}'.format(str(runs))] = base_df.loc[:, 'income']\n\n # ----------------------------FIA PORTFOLIO----------------------------------------------\n for name in boy_value:\n fia_portfolio_df.loc[:, name] = 0.0\n\n for counter in years:\n period_returns = list(random_returns.loc[counter, :])\n if counter == 0:\n\n fia_portfolio_df.loc[counter, boy_value] = [base_weights[c] * port_investment\n for c in range(len(boy_value))]\n fia_portfolio_df.loc[counter, 'total'] = fia_portfolio_df.loc[counter, boy_value].sum()\n fia_portfolio_df.loc[counter, 'total_net_fees'] = 0.0\n fia_portfolio_df.loc[counter, 'income'] = 0.0\n port_investment = fia_portfolio_df.loc[counter, boy_value].sum()\n\n elif (counter > 0) and (counter < income_starts):\n\n fia_portfolio_df.loc[counter, boy_value] = [base_weights[c] * port_investment * (1 + period_returns[c])\n for c in range(len(boy_value))]\n fia_portfolio_df.loc[counter, 'total'] = fia_portfolio_df.loc[counter, boy_value].sum()\n fia_portfolio_df.loc[counter, 'adv_fees'] = fia_portfolio_df.loc[counter, 'total'] * adv_fees\n fia_portfolio_df.loc[counter, 'total_net_fees'] = fia_portfolio_df.loc[counter, 'total'] - \\\n fia_portfolio_df.loc[counter, 'adv_fees']\n\n port_investment = fia_portfolio_df.loc[counter, 'total_net_fees'] + cpn_income_port\n\n else:\n fia_portfolio_df.loc[counter, boy_value] = [base_weights[c] * port_investment * (1 + period_returns[c])\n for c in range(len(boy_value))]\n fia_portfolio_df.loc[counter, 'total'] = fia_portfolio_df.loc[counter, boy_value].sum()\n fia_portfolio_df.loc[counter, 'adv_fees'] = fia_portfolio_df.loc[counter, 'total'] * adv_fees\n\n # ---req. income is adjusted for inflation from the second year of withdrawal. Reinvestment of coupon\n # stops from the year income starts. Req. income is reduced by the coupon payments\n\n if counter == income_starts:\n\n income_needed = req_annual_income - social\n income_net_fia_income = max(0, income_needed - income_from_fia)\n fia_portfolio_df.loc[counter, 'income'] = max(0, income_net_fia_income - cpn_income_port)\n income_needed = req_annual_income\n\n else:\n income_needed = income_needed * (1 + inflation) - social\n income_net_fia_income = max(0, income_needed - income_from_fia)\n fia_portfolio_df.loc[counter, 'income'] = max(0, income_net_fia_income - cpn_income_port)\n income_needed = income_needed + social\n\n fia_portfolio_df.loc[counter, 'total_net_fees'] = fia_portfolio_df.loc[counter, 'total'] - \\\n fia_portfolio_df.loc[counter, 'adv_fees'] - \\\n fia_portfolio_df.loc[counter, 'income']\n\n port_investment = fia_portfolio_df.loc[counter, 'total_net_fees']\n\n sim_port_total.loc[:, 's_{}'.format(str(runs))] = fia_portfolio_df.loc[:, 'total_net_fees'] + \\\n income_df.loc[:, 'contract_value']\n\n sim_port_income.loc[:, 's_{}'.format(str(runs))] = fia_portfolio_df.loc[:, 'income']\n\n fia_portfolio_df.loc[:, 'adj_total'] = fia_portfolio_df.loc[:, 'total_net_fees'].apply(\n lambda x: x if x > 0 else 0)\n\n # ---------income breakdown for Base portfolio----------------------------------\n base_df.to_csv(src + 'base_port_detail.csv')\n sim_base_total.to_csv(src + 'base_ending_values.csv')\n income_breakdown_base = pd.DataFrame(sim_base_total.quantile(0.5, axis=1))\n income_breakdown_base.loc[:, 'income_from_portfolio'] = sim_base_income.quantile(0.5, axis=1)\n income_breakdown_base.loc[:, 'fia_income'] = 0.0\n income_breakdown_base.loc[:, 'social_security_income'] = social\n income_breakdown_base.loc[:, 'coupon_income'] = cpn_income_base\n\n income_breakdown_base.rename(columns={income_breakdown_base.columns[0]: 'portfolio_ending_value'}, inplace=True)\n income_breakdown_base.loc[:, 'income_from_portfolio'][\n income_breakdown_base.loc[:, 'portfolio_ending_value'] <= 0] = 0\n income_breakdown_base.loc[:, 'total_income'] = income_breakdown_base.loc[:, income_breakdown_base.columns[1:]].sum(\n axis=1)\n\n # ------------Block Ends-------------------------------------------------------------\n\n # ---------income breakdown for FIA portfolio----------------------------------\n fia_portfolio_df.to_csv(src + 'fia_port_detail.csv')\n sim_port_total.to_csv(src + 'fiaport_ending_values.csv')\n\n income_breakdown_port = pd.DataFrame(sim_port_total.quantile(0.5, axis=1))\n income_breakdown_port.loc[:, 'income_from_portfolio'] = sim_port_income.quantile(0.5, axis=1)\n income_breakdown_port.loc[:, 'fia_income'] = income_from_fia\n income_breakdown_port.loc[:, 'social_security_income'] = social\n income_breakdown_port.loc[:, 'coupon_income'] = cpn_income_port\n\n income_breakdown_port.rename(columns={income_breakdown_port.columns[0]: 'portfolio_ending_value'}, inplace=True)\n income_breakdown_port.loc[:, 'income_from_portfolio'][\n income_breakdown_port.loc[:, 'portfolio_ending_value'] <= 0] = 0\n income_breakdown_port.loc[:, 'total_income'] = income_breakdown_port.loc[:, income_breakdown_port.columns[1:]].sum(\n axis=1)\n\n # ------------Block Ends-------------------------------------------------------------\n q_cut = [0.0, 0.05, 0.25, 0.5, 0.75, 0.95, 1.0]\n sim_base_income[sim_base_total < income_needed] = 0.0\n\n sim_port_income[sim_port_total < income_net_fia_income] = 0\n\n sim_port_income = sim_port_income + income_from_fia\n\n # base_quantile = sim_base_total.loc[sim_base_total.index[-1]].quantile([0.05, 0.25, 0.50, 0.75, 0.90])\n\n # port_quantile = sim_port_total.loc[sim_port_total.index[-1]].quantile([0.05, 0.25, 0.50, 0.75, 0.90])\n\n base_quantile = sim_base_total.loc[sim_base_total.index[-1]].quantile(q_cut)\n\n port_quantile = sim_port_total.loc[sim_port_total.index[-1]].quantile(q_cut)\n\n # q_cut = [0.0, .05, 0.25, 0.5, 0.75, 0.95, 1.0]\n cols = ['Min', '5th', '25th', '50th', '75th', '90th', 'Max']\n\n # ----drop year 0--------\n sim_base_total = sim_base_total[1:]\n sim_port_total = sim_port_total[1:]\n\n # ----------------quantile analysis for base terminal value--------------------------\n base_qcut = pd.DataFrame(index=sim_base_total.index, columns=cols)\n for c in range(len(cols)):\n base_qcut.loc[:, cols[c]] = sim_base_total.quantile(q_cut[c], axis=1)\n\n base_qcut.clip(lower=0, inplace=True)\n\n # ----------------------quantile analysis for base income----------------------------\n base_income_qcut = pd.DataFrame(index=sim_base_income.index, columns=cols)\n for c in range(len(cols)):\n base_income_qcut.loc[:, cols[c]] = sim_base_income.quantile(q_cut[c], axis=1)\n\n # ----Remove NaN's prior to the income start years------------\n # base_income_qcut = base_income_qcut.loc[income_starts:]\n\n # -------------quantile analysis for portfolio terminal value ----------------\n port_qcut = pd.DataFrame(index=sim_port_total.index, columns=cols)\n for c in range(len(cols)):\n port_qcut.loc[:, cols[c]] = sim_port_total.quantile(q_cut[c], axis=1)\n\n port_qcut.clip(lower=0, inplace=True)\n\n # ---------------quantile analysis for portfolio income----------------------------\n port_income_qcut = pd.DataFrame(index=sim_port_income.index, columns=cols)\n for c in range(len(cols)):\n port_income_qcut.loc[:, cols[c]] = sim_port_income.quantile(q_cut[c], axis=1)\n\n # ----Remove NaN's prior to the income start years------------\n # port_income_qcut = port_income_qcut.loc[income_starts:]\n\n # ----------probability ending value will be less than 0 at the end of the horizon -----------------------\n base_legacy_risk = (sim_base_total.loc[sim_base_total.index[life_expectancy - clients_age]] < 0).sum() / (\n trials + 1)\n port_legacy_risk = (sim_port_total.loc[sim_port_total.index[life_expectancy - clients_age]] < 0).sum() / (\n trials + 1)\n\n legacy_risk = pd.DataFrame([base_legacy_risk, port_legacy_risk,\n 'Prob. of portfolio value less than 0 at the end of the expected life'],\n index=['base', 'fia_portfolio', 'Notes'],\n columns=['Ruin Probability'])\n\n # -----------Year-wise probability of ending value greater than 0 -----------------\n base_psuccess = sim_base_total.apply(lambda x: x > 0).sum(axis=1) / (trials + 1)\n port_psuccess = sim_port_total.apply(lambda x: x > 0).sum(axis=1) / (trials + 1)\n\n # -----------------------WRITING FILES TO EXCEL ---------------------------\n col_names = ['50th', 'age', 'comment']\n writer = pd.ExcelWriter(src + method + '_simulated_income_summary.xlsx', engine='xlsxwriter')\n read_income_inputs.to_excel(writer, sheet_name='inputs_for_income')\n\n read_returns_est.to_excel(writer, sheet_name='asset_returns_estimates')\n\n age_index = list(range(clients_age + 1, clients_age + len(base_qcut) + 1))\n base_qcut.loc[:, 'age'] = age_index\n base_qcut.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n base_qcut.loc[income_starts:, col_names].to_excel(writer, sheet_name='base_ending_value_quantiles')\n\n base_income_qcut = base_income_qcut.loc[1:, :]\n base_income_qcut.loc[:, 'age'] = age_index\n base_income_qcut.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n base_income_qcut.loc[income_starts:, col_names].to_excel(writer, sheet_name='base_income_quantiles')\n\n port_qcut.loc[:, 'age'] = age_index\n port_qcut.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n port_qcut.loc[income_starts:, col_names].to_excel(writer, sheet_name='fia_port_ending_value_quantiles')\n\n port_income_qcut = port_income_qcut.loc[1:, :]\n port_income_qcut.loc[:, 'age'] = age_index\n port_income_qcut.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n # port_income_qcut.loc[:, 'ending_contract_value'] = sim_fia_cv\n port_income_qcut.loc[income_starts:, col_names].to_excel(writer, sheet_name='fia_port_income_quantiles')\n\n # prob_success_df = pd.concat([base_psuccess, port_psuccess], axis=1)\n # prob_success_df.rename(columns={prob_success_df.columns[0]: 'prob(ending_value>0)_base',\n # prob_success_df.columns[1]: 'prob(ending_value>0)_port'}, inplace=True)\n\n # prob_success_df.loc[:, 'age'] = age_index\n # prob_success_df.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n # prob_success_df.to_excel(writer, sheet_name='success_probability')\n\n income_breakdown_base = income_breakdown_base.loc[1:, :]\n income_breakdown_base.loc[:, 'age'] = age_index\n income_breakdown_base.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n income_breakdown_base.loc[income_starts:, :].to_excel(writer, sheet_name='base_income_breakdown_median')\n\n income_breakdown_port = income_breakdown_port.loc[1:, :]\n income_breakdown_port.loc[:, 'age'] = age_index\n income_breakdown_port.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n income_breakdown_port.loc[:, 'ending_contract_value'] = income_df.loc[:, 'contract_value']\n income_breakdown_port.loc[income_starts:, :].to_excel(writer, sheet_name='fia_income_breakdown_median')\n\n if method == 'normal':\n # median_returns_normal.loc[:, 'fia_median_returns'] = median_normal_fia\n median_returns_normal.to_excel(writer, sheet_name='gr_port_median_normal')\n\n elif method == 'smallest':\n # median_returns_smallest.loc[:, 'fia_median_returns'] = median_smallest_fia\n median_returns_smallest.to_excel(writer, sheet_name='gr_port_median_asc')\n\n else:\n # median_returns_largest.loc[:, 'fia_median_returns'] = median_largest_fia\n median_returns_largest.to_excel(writer, sheet_name='gr_port_median_desc')\n\n terminal_val = pd.read_csv(src + 'terminal_values.csv', index_col=[0])\n ending_val = pd.read_csv(src + 'ending_values.csv', index_col=[0])\n ending_val_ror = pd.read_csv(src + 'ending_values_ror.csv', index_col=[0])\n\n terminal_val.to_excel(writer, sheet_name='terminal_values')\n ending_val.to_excel(writer, sheet_name='port_ending_values')\n ending_val_ror.to_excel(writer, sheet_name='port_annual_growth')\n\n writer.save()\n\n # -----------------Plotting charts--------------------------------------------\n base_qcut.loc[income_starts:].plot(grid=True, title='Quantile Terminal Value - Base Portfolio')\n plt.savefig(src + \"quantile_terminal_base.png\")\n plt.close('all')\n\n base_income_qcut.plot(grid=True, title='Quantile Income - Base Portfolio')\n plt.savefig(src + \"quantile_income_base.png\")\n plt.close('all')\n\n base_psuccess.plot(grid=True, title='Probability of Success (Portfolio Ending Value > 0) - Base Portfolio')\n plt.savefig(src + \"success_probabilty_base.png\")\n plt.close('all')\n\n (1 - base_psuccess).plot(grid=True, title='Probability of Ruin (Portfolio Ending Value < 0) - Base Portfolio')\n plt.savefig(src + \"ruin_probability_base.png\")\n plt.close('all')\n\n port_qcut.loc[income_starts:].plot(grid=True, title='Quantile Terminal Value - FIA Portfolio')\n plt.savefig(src + \"quantile_terminal_fia.png\")\n plt.close('all')\n\n port_income_qcut.plot(grid=True, title='Quantile Income - FIA Portfolio')\n plt.savefig(src + \"quantile_income_fia.png\")\n plt.close('all')\n\n port_psuccess.plot(grid=True, title='Probability of Success (Portfolio Ending Value > 0) - FIA Portfolio')\n plt.savefig(src + \"success_probabilty_fia.png\")\n plt.close('all')\n\n (1 - port_psuccess).plot(grid=True, title='Probability of Ruin (Portfolio Ending Value < 0) - FIA Portfolio')\n plt.savefig(src + \"ruin_probability_fia.png\")\n plt.close('all')\n\n print(\"simulation completed for {}\".format(method))", "def _gen_pert(self, count, **kwargs):\n self._check_pert(**kwargs)\n pert = FairBetaPert(**kwargs)\n rvs = pert.random_variates(count)\n return rvs", "def test():\n \n print('Loading best networks')\n env.guesser, agent.dqn = load_networks(i_episode='best')\n #env.guesser, agent.dqn = load_networks(i_episode='best', avg_reward = )\n\n # predict outcome on test data\n y_hat_test = np.zeros(len(env.y_test))\n y_hat_test_prob = np.zeros(len(env.y_test))\n \n print('Computing predictions of test data')\n n_test = len(env.X_test)\n for i in range(n_test):\n \n if i % 1000 == 0:\n print('{} / {}'.format(i, n_test))\n \n state = env.reset(mode='test', \n patient=i,\n train_guesser=False)\n mask = env.reset_mask()\n \n # run episode\n for t in range(FLAGS.episode_length):\n\n # select action from policy\n action = agent.get_action(state, eps=0, mask=mask)\n mask[action] = 0\n \n # take the action\n state, reward, done, guess = env.step(action, mode='test') \n \n if guess != -1:\n y_hat_test_prob[i] = torch.argmax(env.probs).item()\n \n if done:\n break\n y_hat_test[i] = guess\n \n C = confusion_matrix(env.y_test, y_hat_test)\n print('confusion matrix: ')\n print(C)\n\n acc = np.sum(np.diag(C)) / len(env.y_test)\n\n print('Test accuracy: ', np.round(acc, 3))", "def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline= True ) # set agent to track\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.5) # reduce update_delay to speed up simulation\n sim.run(n_trials=100) # press Esc or close pygame window to quit", "def run_experiments():\n if True: # Change to False when done finding max_scoring_num_rolls\n six_sided_max = max_scoring_num_rolls(six_sided)\n print('Max scoring num rolls for six-sided dice:', six_sided_max)\n rerolled_max = max_scoring_num_rolls(reroll(six_sided))\n print('Max scoring num rolls for re-rolled dice:', rerolled_max)\n\n if False: # Change to True to test always_roll(8)\n print('always_roll(8) win rate:', average_win_rate(always_roll(8)))\n\n if False: # Change to True to test bacon_strategy\n print('bacon_strategy win rate:', average_win_rate(bacon_strategy))\n\n if False: # Change to True to test swap_strategy\n print('swap_strategy win rate:', average_win_rate(swap_strategy))\n\n \"*** You may add additional experiments as you wish ***\"", "def run():\n\n Number_repetitions = 1\n Rate = np.zeros((Number_repetitions,1))\n Rate20 = np.zeros((Number_repetitions,1))\n Penalty20 = np.zeros((Number_repetitions, 1))\n\n # Loop to average\n for idx in np.arange(0,Number_repetitions,1):\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # specify agent to track\n # NOTE: You can set enforce_deadline=False while debugging to allow longer trials\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.0000001, display=True) # create simulator (uses pygame when display=True, if available)\n # NOTE: To speed up simulation, reduce update_delay and/or set display=False\n\n sim.run(n_trials=100) # run for a specified number of trials\n # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line\n\n # I've edited the enviroment variable to do the plot creating an completions array\n completions = np.array(e.completions)\n rate = float(completions.sum())/float((len(completions)))\n rate20 = float(completions[-20:].sum())/20\n\n Rate[idx] = rate\n Rate20[idx] = rate20\n\n Wrong = np.array(a.wrong_moves_per_run[-20:]).mean()\n Penalty20[idx] = Wrong\n\n plt.scatter(np.arange(0,len(completions)),completions)\n plt.plot(Wrong)\n plt.xlabel('Trial')\n plt.ylabel('1 = Get in the destination, 0 = did not get')\n plt.title('Reiforcement learning progress')\n plt.legend(['Rate of completion: ' + str(rate) + '. Rate last 20: ' + str(rate20) + '.Mean penalty last 20: ' + str(Wrong)])\n plt.show()\n\n #print 'Accuracy: ' + str(Rate) + '. Mean: ' + str(np.mean(Rate))\n #print 'Mean 20: ' + str(np.mean(Rate20))#'Accuracy 20: ' + str(Rate20) + '. Mean 20: ' + str(np.mean(Rate20))\n #print 'Mean_penalty: ' + str(np.mean(Penalty20))\n\n # Print state table with actions\n #t = 0\n #for state in a.states:\n #print 'State ' + str(state) + '. Best action: ' + str((str(np.argmax(a.QTable[t][:]))))\n #t += 1", "def test_optimalagentfinder () :\n def valNetwork (s) : \n s = s.float()\n v = reduce(model.withReluDropout, model.v[:-1], s)\n v = model.v[-1](v)\n return v\n acrobotBases = acrobotRewardBases(np.pi / 8, np.pi / 8)\n fn = random.sample(acrobotBases, k=1).pop()\n agent = findOptimalAgent(fn)\n model = agent.model\n toExternal = lambda x, y : toExternalStateRep([x, y, 0, 0])\n valFn = reduce(compose, [float, valNetwork, torch.tensor, toExternal])\n RFn = compose(fn, toExternal)\n xRange = np.arange(-np.pi, np.pi, 0.1)\n yRange = np.arange(-np.pi, np.pi, 0.1)\n plotFunction(RFn, xRange, yRange, 'theta1', 'theta2', 'R')\n plotFunction(valFn, xRange, yRange, 'theta1', 'theta2', 'V')", "def __trial__(self, trial, practice):\n from klibs.KLEventQueue import pump\n from klibs.KLUserInterface import show_cursor, hide_cursor\n\n # At start of every trial, before setup_response_collector or trial_prep are run, retrieve\n # the values of the independent variables (factors) for that trial (as generated earlier by\n # TrialFactory) and set them as attributes of the experiment object.\n for iv, value in trial.items():\n setattr(self, iv, value)\n\n pump()\n self.setup_response_collector()\n self.trial_prep()\n tx = None\n try:\n if P.development_mode and (P.dm_trial_show_mouse or (P.eye_tracking and not P.eye_tracker_available)):\n show_cursor()\n self.evm.start_clock()\n if P.eye_tracking and not P.manual_eyelink_recording:\n self.el.start(P.trial_number)\n P.in_trial = True\n self.__log_trial__(self.trial())\n P.in_trial = False\n if P.eye_tracking and not P.manual_eyelink_recording:\n self.el.stop()\n if P.development_mode and (P.dm_trial_show_mouse or (P.eye_tracking and not P.eye_tracker_available)):\n hide_cursor()\n self.evm.stop_clock()\n self.trial_clean_up()\n except TrialException as e:\n self.trial_clean_up()\n self.evm.stop_clock()\n tx = e\n if P.eye_tracking and not P.manual_eyelink_recording:\n # todo: add a warning, here, if the recording hasn't been stopped when under manual control\n self.el.stop()\n if tx:\n raise tx", "def RandomVacuumAgent():\n return Agent(RandomAgentProgram(['Right',\n 'Left',\n 'Suck',\n 'NoOp']))", "def test_repetition_vector():\n cosimulations = [\n example.control.gauss_seidel(1., 5., 1.),\n example.control.gauss_seidel(1., 5., 1., True),\n example.control.gauss_seidel(1., 5., 1., True, True),\n example.control.gauss_seidel(1., 5., 1., False, True),\n example.control.gauss_jacobi(1., 5., 1.),\n example.control.multi_rate(1., 5., 1.),\n ]\n for cosimulation in cosimulations:\n sdfg = cs.convert_to_sdf(cosimulation)\n schedule = sdf.calculate_schedule(sdfg)\n network, hs, _, _ = cosimulation\n _, connections = network\n repetitions = cs.repetition_vector(connections, hs)\n for agent in sdfg[0]:\n assert sum(agent == executed for executed in schedule) == repetitions[agent]", "def test_prop_fluctuation(self):\n tmax = 10.0\n dt = 1.0\n\n ini_rate = 80.0\n\n nsteps = int_r(tmax/dt)\n\n tutor = SimpleNeurons(1, out_fct=lambda i: ini_rate + i*20.0/nsteps - 10.0)\n reward = MockReward(lambda _: 1.0)\n tutor_rule = ReinforcementTutorRule(tutor, reward, tau=0,\n constrain_rates=False, ini_rate=ini_rate, learning_rate=1.0,\n use_tutor_baseline=False)\n\n sim = simulation.Simulation(tutor, reward, tutor_rule, dt=dt)\n sim.run(tmax)\n\n drates = (tutor_rule.rates - ini_rate)[:, 0]\n\n fluctuations = (np.arange(nsteps)*20.0/nsteps - 10.0)\n mask = (fluctuations > 0)\n ratio = np.mean(drates[mask] / fluctuations[mask])\n\n self.assertLess(np.max(np.abs(drates - ratio*fluctuations)), 1e-6)", "def demo_test():\n u_D = Expression('1 + x[0]*x[0] + 2*x[1]*x[1]')\n kappa = Expression('x[0] + x[1]')\n f = Expression('-8*x[0] - 10*x[1]')\n u = solver(kappa, f, u_D, 6, 4, 1)\n # Dump solution to file in VTK format\n file = File(\"poisson.pvd\")\n file << u\n # Plot solution and mesh\n plot(u)", "def RandomVacuumAgent():\n return Agent(RandomAgentProgram(['Right', 'Left', 'Suck', 'NoOp']))", "def user_interaction(uv, recommended_News, ranked=True):\n\n iv = recommended_News[\"topical_vector\"]\n\n product = simple_doct_product(uv, iv)\n\n epsilon = 10e-5\n\n if (product + epsilon) > 1.0:\n vui = 0.99\n else:\n vui = beta_distribution(product)\n\n # Awared preference\n ita = beta_distribution(0.98)\n pui = vui * ita\n\n rand_num = np.random.random()\n\n if rand_num < pui:\n return True\n else:\n return False", "def step(self, particles, best_state, best_fitness, run_locals):\r\n # continuous testing of inputs\r\n if self.testing_unit.testing_level > 1 and not self.testing_unit.c_test_step_inp(particles,\r\n best_state,\r\n best_fitness,\r\n run_locals):\r\n raise ValueError(\"step won't run, input's aren't valid.\")\r\n # apply the fitness function to get this generations fitness values\r\n fitness = np.empty((particles.shape[0]))\r\n #fitness = np.apply_along_axis(run_locals[\"fitness_function\"], 0, particles[:, 0, :, :]) # hopefully works\r\n for i in range(particles.shape[0]):\r\n fitness[i] = run_locals[\"fitness_function\"](particles[i, 0])\r\n\r\n # find any personal improvements\r\n better = best_fitness < fitness\r\n # set them\r\n best_fitness[better] = fitness[better]\r\n # set their states\r\n best_state[better] = particles[better, 0]\r\n\r\n # find highest of group\r\n best_of_group = np.argmax(best_fitness, axis=0)\r\n\r\n if self.verbosity > 6: # some random high verbosity outputs that were once used for debugging, might give ideas\r\n print(\"step high verb: \")\r\n print(particles[0])\r\n print(particles[:, 1].shape)\r\n print(best_state.shape)\r\n print(np.repeat(best_state[best_of_group][np.newaxis, :], particles[:, 1].shape[0], axis=0).shape)\r\n\r\n # run calculation for the velocity calculation\r\n # Maurice Clerc. Standard Particle Swarm Optimisation. 2012. hal-00764996\r\n particles[:, 1] = (run_locals[\"PSO_VELOCITY_WEIGHT\"] * particles[:, 1] +\r\n run_locals[\"PSO_INDIVIDUAL_WEIGHT\"] * np.random.rand(particles[:, 0].shape[0],\r\n particles[:, 0].shape[1],\r\n particles[:, 0].shape[2]) *\r\n (best_state - particles[:, 0]) +\r\n run_locals[\"PSO_GROUP_WEIGHT\"] * np.random.rand(particles[:, 0].shape[0],\r\n particles[:, 0].shape[1],\r\n particles[:, 0].shape[2]) *\r\n (best_state[best_of_group] - particles[:, 0]))\r\n\r\n # run calculation for point calculation\r\n particles[:, 0] = particles[:, 0] + particles[:, 1]\r\n #if True and ((particles[:, 0] < np.array(run_locals[\"axes\"])[:, 0]).any() or \\\r\n # (particles[:, 0] > np.array(run_locals[\"axes\"])[:, 1]).any()):\r\n #print(particles[:, 0].shape)\r\n #mask = np.logical_or(particles[:, 0] < np.array(run_locals[\"axes\"])[:, 0],\r\n # particles[:, 0] > np.array(run_locals[\"axes\"])[:, 1])\r\n #print(particles.shape)\r\n #print(np.arange(particles.shape[0]).shape)\r\n #print(np.arange(particles.shape[0])[mask])\r\n #print(particles[np.argmax(mask), 1])\r\n # clip the particles to be within the axes\r\n particles[:, 0] = np.clip(particles[:, 0],\r\n np.array(run_locals[\"axes\"])[:, 0],\r\n np.array(run_locals[\"axes\"])[:, 1])\r\n #if self.globi < 10:\r\n # self.glob[self.globi] = particles[0, 0, 0, 0]\r\n # self.guub[self.globi] = particles[0, 1, 0, 0]\r\n # self.glub[self.globi] = best_state[best_of_group][0, 0]\r\n # self.globi += 1\r\n #else:\r\n #print(self.glob[:10])\r\n #print(self.guub[:10])\r\n #print(self.glub[:10])\r\n #raise ValueError(self.glob)\r\n\r\n return particles, best_state, best_fitness", "def test_agent(AgentFactory, steps, envs, percepts):\n print ('RUN TEST AGENT')\n envs.add_thing(AgentFactory)\n #envs.run(steps)\n \n agent = AgentFactory\n agent.program(percept)\n #envs.run(steps)\n envs.runPLWumpus(agent, steps)\n #envs.runPLWumpus(steps)\n print(' ------------PLWumpus test agent KB-----------------------')\n print(agent.KB.clauses)\n #print envs.to_string()\n print('test_agent', envs)\n #print agent.KB.clauses\n return agent.performance\n\n def score(env):\n agent = AgentFactory()\n env.add_thing(agent)\n env.run(steps)\n print('test_agent' , env)\n return agent.performance\n\n #return mean(map(score, envs))\n return None", "def _objective(self, trial, X, y, weights=None, split=None):\n\n # Generate even weights if none\n if weights is None:\n weights = pd.Series(np.ones(len(y)), index=y.index)\n else:\n weights = pd.Series(weights, index=y.index)\n\n # Execute trial function\n try:\n res = eval(self.function)\n except:\n raise RuntimeError(f\"Optuna execution error: {self.function}\")\n\n # If indicator result is tuple, select the one of interest\n if isinstance(res, tuple):\n res = res[self.idx]\n\n # Ensure result is a dataframe with same index as X\n res = pd.DataFrame(res, index=X.index)\n\n # If indicator result is dataframe, select the one of interest\n if len(res.columns) > 1:\n res = pd.DataFrame(res.iloc[:, self.idx])\n\n # y may be a subset of X, so reduce result to y and convert to series\n res_y = res.reindex(y.index).iloc[:, 0].replace([np.inf, -np.inf], np.nan)\n\n # Save all trial results for pruning and reporting\n # Only the best trial will eventually be saved to limit storage requirements\n self.res_y.append(res_y) # Save results\n\n # Indicator result may be all NANs based on parameter set\n # Return FALSE and alert\n if np.isnan(res_y).sum() / len(res_y) > .95: # Most or all NANs\n self.res_y_corr.append(np.zeros(len(y)))\n if split is not None:\n return tuple([False] * (len(split) - 1))\n else:\n return False\n\n # Obtain correlation for entire dataset\n if self.spearman:\n corr = _weighted_spearman(np.array(y), np.array(res_y), np.array(weights))\n else:\n corr = _weighted_pearson(np.array(y), np.array(res_y), np.array(weights))\n\n # Save correlation for res_y\n self.res_y_corr.append(corr)\n\n # Multi-objective optimization\n # Obtain correlation to target for each split for Optuna to maximize\n if split is not None:\n mo = []\n for i, e in enumerate(split):\n if i == 0:\n s = e\n continue\n\n # y could be a subset of X, use index of X to filter y\n idx = X[s:e].index\n\n # Filter y based on X split\n y_se = np.array(y[y.index.isin(idx)]).astype('float64')\n\n # Filter y predictions based on X split\n res_y_se = np.array(res_y[res_y.index.isin(idx)]).astype('float64')\n\n # Filter weights based on X split\n weights_se = np.array(weights[weights.index.isin(idx)]).astype('float64')\n\n if np.isnan(res_y_se).sum() / len(res_y_se) > .95:\n return tuple([False]*(len(split)-1))\n\n if self.spearman:\n mo.append(_weighted_spearman(y_se, res_y_se, weights_se))\n else:\n mo.append(_weighted_pearson(y_se, res_y_se, weights_se))\n s = e\n return tuple(mo)\n\n # Single objective optimization return corr for entire dataset\n else:\n return corr", "def test_run(make_runner: Callable[..., TargetFunctionRunner]) -> None:\n runner = make_runner(target, use_instances=True)\n run_info = TrialInfo(config=2, instance=\"test\", seed=0, budget=0.0)\n\n # submit runs! then get the value\n runner.submit_trial(run_info)\n result = next(runner.iter_results(), None)\n\n assert result is not None\n\n run_info, run_value = result\n\n assert run_value.cost == 4\n assert run_value.status == StatusType.SUCCESS", "def run_test_cases(self):\n line = (\n '{reindeer} can fly {speed} km/s for {time} seconds'\n ', but then must rest for {rest} seconds.'\n )\n inputs = (\n line.format(reindeer='Comet', speed=14, time=10, rest=127),\n line.format(reindeer='Dancer', speed=16, time=11, rest=162),\n line.format(reindeer='Vixen', speed=18, time=12, rest=207),\n line.format(reindeer='Prancer', speed=20, time=13, rest=264),\n )\n test_cases = (\n solver.TestCase('\\n'.join(inputs[:1]), 2660, 2503),\n solver.TestCase('\\n'.join(inputs[:2]), 2660, 1564),\n solver.TestCase('\\n'.join(inputs[:3]), 2660, 1101),\n solver.TestCase('\\n'.join(inputs), 2660, 994),\n solver.TestCase('\\n'.join(inputs[1:]), 2640, 1201),\n solver.TestCase('\\n'.join(inputs[2:]), 2592, 1517),\n solver.TestCase('\\n'.join(inputs[3:]), 2540, 2503),\n )\n for test_case in test_cases:\n self._run_test_case(test_case)", "def simulate(player,environment,n_trials=1000,verbose=False):\n environment.player = player\n rewards = []\n \n for i in range(1,n_trials+1):\n \n if i % (n_trials/5) == 0:\n if verbose:\n print (\"Loading game {}\".format(i))\n try:\n result = environment.play_game()\n rewards.append(result)\n except Exception:\n tb.print_exc(file=sys.stdout)\n \n return rewards", "def run():\n step = 0\n vehicles = []\n # persons = []\n while traci.simulation.getMinExpectedNumber() > 0:\n\t\n\n # Adding the new vehicle to the list of vehicles\n if len(traci.simulation.getLoadedIDList()) > 0:\n vehicles = vehicles + traci.simulation.getLoadedIDList()\n\n # Removing from the list of vehicles those who were removed\n if len(traci.simulation.getArrivedIDList()) > 0:\n vehicles = [x for x in vehicles if x not in traci.simulation.getArrivedIDList()]", "def test_10_test_model(self, example):\n res = example.calc_model()\n print(example.trips_ij)\n total_trips_target = example.persons_gi.sum()\n total_trips_actual = example.trips_ij.sum()\n np.testing.assert_almost_equal(total_trips_target, total_trips_actual)", "def eval_test(self, rng_key, svi_state):\n def body_fn(i, loss_sum):\n rng_key_i = random.fold_in(rng_key, i) \n rng_key_i, rng_key_ls, rng_key_var, rng_key_sigma = random.split(rng_key_i, 4)\n \n length_i = numpyro.sample(\"length\", dist.InverseGamma(1,.1), rng_key=rng_key_ls)\n var_i = numpyro.sample(\"var\", dist.LogNormal(0,0.1), rng_key=rng_key_var)\n sigma_i = numpyro.sample(\"noise\", dist.HalfNormal(0.1), rng_key=rng_key_sigma)\n \n batch = self.gp_predictive(rng_key_i, self.x\n , ls=length_i, var=var_i, sigma=sigma_i\n )\n\n loss = self.svi.evaluate(svi_state, batch['y']) / self.batch_size\n loss_sum += loss\n return loss_sum\n\n loss = lax.fori_loop(0, self.num_test, body_fn, 0.0)\n loss = loss / self.num_test\n\n return loss", "def test_variability(self):\n # some reproducible arbitrariness\n np.random.seed(343143)\n\n n = 10\n t_max = 20.0\n dt = 0.1\n G = RandomLayer(n)\n\n M1 = simulation.EventMonitor(G)\n\n sim1 = simulation.Simulation(G, M1, dt=dt)\n sim1.run(t_max)\n \n M2 = simulation.EventMonitor(G)\n sim2 = simulation.Simulation(G, M2, dt=dt)\n sim2.run(t_max)\n\n self.assertNotEqual(len(M1.t), 0)\n self.assertNotEqual(len(M2.t), 0)\n self.assertNotEqual(M1.t, M2.t)", "def predictions_0(data):\n predictions=[]\n for _, passenger in data.iterrows():\n #Predict the survival of 'passenger'\n predictions.append(0)\n #Return our predictions\n return pd.Series(predictions)\n #make the predictions\n predictions=predictions_0(data)\n \n '''\n Question 1\n Using the RMS Titanic data,how accurate would a prediction be that none of the passengers survived?\n Hint:Run the code cell below to see the accuracy of this prediction.\n '''\n print accuracy_score(outcomes,predictions)", "def test_score():\n\n tpot_obj = TPOTClassifier()\n\n try:\n tpot_obj.score(testing_features, testing_classes)\n assert False # Should be unreachable\n except ValueError:\n pass", "def _run_test_case(radio, lines):\n calc_reachable_surface_and_people(radio, lines)", "def compute_pg_vars(trajs, policy, baseline, discount, gae_lambda):\n for traj in trajs:\n # Include the last observation here, in case the trajectory is not finished\n baselines = baseline.predict(np.concatenate(\n [traj[\"observations\"], [traj[\"last_observation\"]]]))\n if traj['finished']:\n # If already finished, the future cumulative rewards starting from the final state is 0\n baselines[-1] = 0.\n # This is useful when fitting baselines. It uses the baseline prediction of the last state value to perform\n # Bellman backup if the trajectory is not finished.\n traj['returns'] = compute_cumulative_returns(\n traj['rewards'], baselines, discount)\n traj['advantages'] = compute_advantages(\n traj['rewards'], baselines, discount, gae_lambda)\n traj['baselines'] = baselines[:-1]\n\n # First, we compute a flattened list of observations, actions, and advantages\n all_obs = np.concatenate([traj['observations'] for traj in trajs], axis=0)\n all_acts = np.concatenate([traj['actions'] for traj in trajs], axis=0)\n all_advs = np.concatenate([traj['advantages'] for traj in trajs], axis=0)\n all_dists = {\n k: np.concatenate([traj['distributions'][k] for traj in trajs], axis=0)\n for k in trajs[0]['distributions'].keys()\n }\n\n # Normalizing the advantage values can make the algorithm more robust to reward scaling\n all_advs = (all_advs - np.mean(all_advs)) / (np.std(all_advs) + 1e-8)\n\n # Form chainer variables\n all_obs = Variable(all_obs)\n all_acts = Variable(all_acts)\n all_advs = Variable(all_advs.astype(np.float32, copy=False))\n all_dists = policy.distribution.from_dict(\n {k: Variable(v) for k, v in all_dists.items()})\n\n return all_obs, all_acts, all_advs, all_dists", "def test_purity():\n psi = qt.fock(3)\n rho_test = qt.ket2dm(psi)\n test_pure = purity(rho_test)\n assert_equal(test_pure,1)", "def evaluationFunction(self, currentGameState, action):\r\n # Useful information you can extract from a GameState (pacman.py)\r\n successorGameState = currentGameState.generatePacmanSuccessor(action)\r\n newPos = successorGameState.getPacmanPosition()\r\n newFood = successorGameState.getFood()\r\n newGhostStates = successorGameState.getGhostStates()\r\n newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]\r\n\r\n \"*** YOUR CODE HERE ***\"\r\n #evaluation function - assigning weights to important aspects of the game state(good food, ghosts, high scaredTimes of ghosts)\r\n #retVal - value to be returned after calculating the weights\r\n retVal = 0\r\n \r\n #Getting the distance of pacman from food\r\n foodDist = 'Inf'\r\n for i in newFood.asList():\r\n #Need to get the closest food \r\n foodDist = min(foodDist, manhattanDistance(i, newPos))\r\n \r\n #weight for min food dist\r\n if(foodDist != 0):\r\n retVal += 1.0/(1000*float(foodDist))\r\n else:\r\n retVal = 0\r\n \r\n #Getting the distance of pacman from ghosts\r\n ghostDist = 0\r\n for j in newGhostStates:\r\n #max dist from ghosts\r\n ghostDist = max(ghostDist, manhattanDistance(j.getPosition(), newPos))\r\n #min dist from ghosts\r\n ghostMinDist = min('Inf', manhattanDistance(j.getPosition(), newPos))\r\n \r\n #weight for min dist from ghost\r\n if ghostMinDist < 2:\r\n retVal -= 1000\r\n \r\n #Getting the scaredTimes of the ghosts and adding weights\r\n for k in newScaredTimes:\r\n retVal += k \r\n \r\n #Final retVal\r\n retVal = retVal + successorGameState.getScore()\r\n \r\n return retVal", "def runThompson(self):\n \n #Init vars, N number of user sessions, d=number of ads\n N = self.myDS.shape[0] \n d = self.myDS.shape[1] \n total_reward=0\n self.opt_selected=[]\n \n #Declare vars to count to calculate upper bounds\n number_of_rewards_1 = [0] * d\n number_of_rewards_0 = [0] * d\n \n #Calcultate confidance bounds\n for n in range(0,N):\n ad=0\n max_random=0\n for i in range (0,d):\n random_beta = random.betavariate(number_of_rewards_1[i]+1,number_of_rewards_0[i]+1)\n if random_beta>max_random:\n max_random=random_beta\n ad = i\n self.opt_selected.append(ad)\n reward=self.myDS.values[n,ad]\n if (reward==1):\n number_of_rewards_1[ad]=number_of_rewards_1[ad]+1\n else:\n number_of_rewards_0[ad]=number_of_rewards_0[ad]+1 \n total_reward=total_reward+reward\n \n return total_reward", "def simulate_traits(sd, num_traits=max_num, heritabilities=['0.01', '0.1', '0.25', '0.5', '0.75', '0.9', '0.99'],\n\t\t\tnum_effects=100):\n\n\tsnps = sd.getSnps()\n\tcpl = sd.getChrPosList()\n\tnum_snps = len(snps)\n\tnum_accessions = len(sd.accessions)\n\n\tsim_traits_dict = {}\n\tfor her in heritabilities:\n\t\th = float(her)\n\t\ttrait_pairs = []\n\t\tsample_indices_list = []\n\t\tsnp_effects_list = []\n\t\tnum_non_overlapping_list = []\n\t\trho_est_list = []\n\t\ttrait1_perc_var_list = []\n\t\ttrait2_perc_var_list = []\n\t\tfor i in range(num_traits):\n\t\t\tif (i + 1) % int(num_traits / 100) == 0:\n\t\t\t\tsys.stdout.write('.')\n\t\t\t\tsys.stdout.flush()\n\t\t\t\t#print (i + 1) / int(num_traits / 100)\n\t\t\t#Simulate trait pair..\n\n\t\t\tnum_non_overlapping = int(round(stats.beta.rvs(4, 2) * num_effects))\n\t\t\tnum_non_overlapping_list.append(num_non_overlapping)\n\t\t\tnum_causatives = num_effects + num_non_overlapping\n\t\t\tsample_indices = random.sample(range(num_snps), num_causatives)\n\t\t\tchosen_snps = sp.array([snps[i] for i in sample_indices])\n\t\t\tc = sp.random.random_integers(0, 1, (num_causatives, 1))\n\t\t\tchosen_snps = sp.absolute(c - chosen_snps)\n\t\t\texp_effects = stats.expon.rvs(scale=1, size=(num_causatives, 1))\n\t\t\t#exp_effects = stats.norm.rvs(scale=1, size=(num_causatives, 1))\n\t\t\tsnp_effects = chosen_snps * exp_effects\n\t\t\tsnp_effects1 = snp_effects[:num_effects]\n\t\t\tsnp_effects2 = snp_effects[-num_effects:]\n\t\t\ttrait1 = sp.sum(snp_effects1, 0)\n\t\t\ttrait2 = sp.sum(snp_effects2, 0)\n\n\t\t\tgv = sp.var(trait1, ddof=1)\n\t\t\terror = stats.norm.rvs(0, 1, size=num_accessions)\n\t\t\tev = sp.var(error, ddof=1)\n\t\t\tn_trait1 = trait1 + error * sp.sqrt(((1.0 - h) / h) * (gv / ev))\n\t\t\ttrait1_perc_var_list.append(sp.var(snp_effects1, 1) / sp.var(n_trait1))\n\t\t\tn_trait1 = (n_trait1 - sp.mean(n_trait1)) / sp.std(n_trait1)\n\t\t\tgv = sp.var(trait2, ddof=1)\n\t\t\terror = stats.norm.rvs(0, 1, size=num_accessions)\n\t\t\tev = sp.var(error, ddof=1)\n\t\t\tn_trait2 = trait2 + error * sp.sqrt(((1.0 - h) / h) * (gv / ev))\n\t\t\ttrait2_perc_var_list.append(sp.var(snp_effects2, 1) / sp.var(n_trait2))\n\t\t\tn_trait2 = (n_trait2 - sp.mean(n_trait2)) / sp.std(n_trait2)\n\t\t\ttrait_pairs.append((n_trait1, n_trait2))\n\t\t\tsample_indices_list.append(sample_indices)\n\t\t\tsnp_effects_list.append(exp_effects)\n\t\t\trho_est = sp.corrcoef(trait1, trait2)\n\t\t\trho_est_list.append(rho_est)\n\n\t\t\t#Variance contributions.\n\n\n\t\tsim_traits_dict[her] = {'trait_pairs':trait_pairs, 'sample_indices_list':sample_indices_list,\n\t\t\t\t'num_non_overlapping_list':num_non_overlapping_list, 'snp_effects_list':snp_effects_list,\n\t\t\t\t'rho_est_list':rho_est_list, 'trait1_perc_var_list':trait1_perc_var_list,\n\t\t\t\t'trait2_perc_var_list':trait2_perc_var_list}\n\treturn sim_traits_dict" ]
[ "0.7318751", "0.63951796", "0.609098", "0.593011", "0.58897114", "0.5845657", "0.583531", "0.58247733", "0.5823327", "0.5815546", "0.57771176", "0.5770173", "0.5762508", "0.57173645", "0.5677805", "0.56669015", "0.56631994", "0.5633992", "0.5622041", "0.5611543", "0.5583957", "0.5567471", "0.5557582", "0.5543354", "0.5492297", "0.54843444", "0.5466166", "0.5440442", "0.5404676", "0.53973556", "0.539403", "0.5385561", "0.53816307", "0.5363822", "0.53575706", "0.534525", "0.53286153", "0.53177077", "0.5317653", "0.5313496", "0.53088695", "0.53057826", "0.5301401", "0.5298884", "0.528677", "0.5274174", "0.5260887", "0.5246677", "0.5246677", "0.522889", "0.5223124", "0.52180237", "0.52033526", "0.52008337", "0.51975435", "0.51962626", "0.51962626", "0.51962626", "0.51924133", "0.518154", "0.51758957", "0.5168363", "0.5166757", "0.51664144", "0.51663184", "0.5152807", "0.5137563", "0.5136602", "0.5124455", "0.51213866", "0.5120832", "0.51168466", "0.51148343", "0.5104768", "0.5101608", "0.5099373", "0.509775", "0.5090001", "0.50883174", "0.50860804", "0.5084911", "0.50833404", "0.5083021", "0.5080179", "0.5077083", "0.50721586", "0.5068144", "0.50668246", "0.50638306", "0.5061995", "0.50612307", "0.5052497", "0.50508046", "0.5050178", "0.5047501", "0.5042428", "0.50386226", "0.5036409", "0.5034973", "0.503452" ]
0.5252823
47
Method gets user credential from storage JSON file If credential are not in storage or are invalid, gets new credentials If stored credential are expired, refreshes them
def get_credentials(self, **kwargs): creds_file = os.path.join(kwargs['user_dir'], 'credentials.json') # Getting credentials from Storage store = file.Storage(creds_file) creds = store.get() # Validating or refreshing credentials, if necessary if creds is None or creds.invalid: flow = client.flow_from_clientsecrets(self.client_secret_file, self.scopes) creds = tools.run_flow(flow, store) elif creds.access_token_expired: creds.refresh(httplib2.Http()) else: pass return creds
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _load_user_credentials(self, storage):\n # Set up a Flow object to be used if we need to authenticate.\n flow = client.flow_from_clientsecrets(\n self.client_secrets,\n scope=self.api_scopes,\n message=tools.message_if_missing(self.client_secrets))\n\n # Retrieve credentials from storage.\n # If the credentials don't exist or are invalid run through the installed\n # client flow. The storage object will ensure that if successful the good\n # credentials will get written back to file.\n\n credentials = storage.get()\n if credentials is None or credentials.invalid:\n credentials = tools.run_flow(flow, storage)\n\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'credentialv_modify.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_stored_credentials(user):\n with open(\"access.json\", \"r\") as f:\n credentials = json.load(f)\n user_creds = credentials[user]\n return user_creds[\"access_token\"], user_creds[\"access_secret\"]", "def get_credentials(self):\n home_dir = os.path.expanduser(\"~\")\n credential_dir = os.path.join(home_dir, \".credentials\")\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir, \"autoto.json\")\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n credentials = tools.run_flow(flow, store, self.auth_flags)\n print(\"Storing credentials to \" + credential_path)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'clockwise.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatability with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir, 'google-photos-stats.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n credentials = tools.run_flow(flow, store, flags)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'appsactivity-python-showtime.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n print('Storing credentials to ' + credential_path)\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'thejam_calendar.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'grader.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n credentials = tools.run_flow(flow, store, tools.argparser.parse_args(args=[]))\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n credential_dir = os.path.realpath('.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = oauth2client.file.Storage(credential_path) # stores the users credentials --> TODO: put in database\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n\n credentials = tools.run_flow(flow, store, flags)\n\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n\thome_dir = os.path.expanduser('~')\n\tcredential_dir = os.path.join(home_dir, '.credentials')\n\tif not os.path.exists(credential_dir):\n\t\tos.makedirs(credential_dir)\n\tcredential_path = os.path.join(credential_dir, \n\t\t\t\t\t\t\t\t\t'facebook_updater.json')\n\t\t\t\t\t\t\t\t\t\n\tstore = oauth2client.file.Storage(credential_path)\n\tcredentials = store.get()\n\tif not credentials or credentials.invalid:\n\t\tflow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n\t\tflow.user_agent = APPLICATION_NAME\n\t\tif flags:\n\t\t\tcredentials = tools.run_flow(flow, store, flags)\n\t\tprint ('Storing credentials to ' + credential_path)\n\treturn credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir, 'fb-drive.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'sally.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\r\n home_dir = os.path.expanduser('~')\r\n credential_dir = os.path.join(home_dir, '.credentials')\r\n if not os.path.exists(credential_dir):\r\n os.makedirs(credential_dir)\r\n credential_path = os.path.join(credential_dir,'drive-python-quickstart.json')\r\n\r\n store = Storage(credential_path)\r\n credentials = store.get()\r\n if not credentials or credentials.invalid:\r\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\r\n flow.user_agent = APPLICATION_NAME\r\n if flags:\r\n credentials = tools.run_flow(flow, store, flags)\r\n else: # Needed only for compatibility with Python 2.6\r\n credentials = tools.run(flow, store)\r\n print('Storing credentials to ' + credential_path)\r\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'admin-directory_v1-NestedGroupSync.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatability with Python 2.6\n credentials = tools.run(flow, store)\n print 'Storing credentials to' + credential_path\n return credentials", "def get_credentials():\n credentials_path = os.path.join(CREDENTIALS_DIR, CREDENTIALS_FILE)\n store = oauth2client.file.Storage(credentials_path)\n credentials = store.locked_get()\n\n if not credentials or credentials.invalid:\n client_secret_path = os.path.join(CREDENTIAL_DIR, CLIENT_SECRET_FILE)\n flow = client.flow_from_clientsecrets(client_secret_path, \n scope='https://www.googleapis.com/auth/admin.directory.resource.calendar',\n redirect_uri='urn:ietf:wg:oauth:2.0:oob')\n\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n\n print(\"Storing credentials to: \" + credentials_path)\n\n\n return credentials", "def get_credentials(self):\r\n home_dir = os.path.expanduser('~')\r\n credential_dir = os.path.join(home_dir, '.credentials')\r\n if not os.path.exists(credential_dir):\r\n os.makedirs(credential_dir)\r\n credential_path = os.path.join(credential_dir, self.CRED_FILENAME)\r\n \r\n store = Storage(credential_path)\r\n credentials = store.get()\r\n if not credentials or credentials.invalid:\r\n flow = client.flow_from_clientsecrets(self.CLIENT_SECRET_FILE, self.SCOPES)\r\n flow.user_agent = self.APPLICATION_NAME\r\n if flags:\r\n credentials = tools.run_flow(flow, store, flags)\r\n else: # Needed only for compatibility with Python 2.6\r\n credentials = tools.run(flow, store)\r\n print('Storing credentials to ' + credential_path)\r\n return credentials", "def get_credentials():\r\n home_dir = os.path.expanduser('~')\r\n credential_dir = os.path.join(home_dir, '.credentials')\r\n if not os.path.exists(credential_dir):\r\n os.makedirs(credential_dir)\r\n credential_path = os.path.join(credential_dir,\r\n 'bis-python-quickstart.json')\r\n\r\n store = oauth2client.file.Storage(credential_path)\r\n credentials = store.get()\r\n if not credentials or credentials.invalid:\r\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\r\n flow.user_agent = APPLICATION_NAME\r\n if flags:\r\n credentials = tools.run_flow(flow, store, flags)\r\n else: # Needed only for compatibility with Python 2.6\r\n credentials = tools.run(flow, store)\r\n print('Storing credentials to ' + credential_path)\r\n return credentials", "def get_credentials() -> client.Credentials:\n\n credential_path = os.path.join(HOME_DIR, \"google-credentials.json\")\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(os.path.join(HOME_DIR, CLIENT_SECRET_FILE), SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n # This attempts to open an authorization page in the default web browser, and asks the user\n # to grant the bot access to their data. If the user grants permission, the run_flow()\n # function returns new credentials.\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print(\"Storing credentials to \" + credential_path)", "def get_credentials():\r\n home_dir = os.path.expanduser('~')\r\n credential_dir = os.path.join(home_dir, '.credentials')\r\n if not os.path.exists(credential_dir):\r\n os.makedirs(credential_dir)\r\n credential_path = os.path.join(credential_dir,\r\n 'calendar-python-quickstart.json')\r\n\r\n store = oauth2client.file.Storage(credential_path)\r\n credentials = store.get()\r\n if not credentials or credentials.invalid:\r\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\r\n flow.user_agent = APPLICATION_NAME\r\n if flags:\r\n credentials = tools.run_flow(flow, store, flags)\r\n else: # Needed only for compatibility with Python 2.6\r\n credentials = tools.run(flow, store)\r\n print('Storing credentials to ' + credential_path)\r\n return credentials", "def get_credentials(self):\n try:\n with open(self.credentials_file, 'r') as fh_credentials:\n credentials_dict = json.loads(fh_credentials.read())\n return credentials_dict\n except IOError:\n self.reset_credentials()\n with open(self.credentials_file, 'r') as fh_credentials:\n return json.loads(fh_credentials.read())", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(config['client secret file'], SCOPES)\n flow.user_agent = APPLICATION_NAME\n if args:\n credentials = tools.run_flow(flow, store, args)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n credential_dir = os.getcwd()\n credential_path = os.path.join(credential_dir,\n 'smarking_error_check.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\r\n home_dir = os.path.expanduser('~')\r\n credential_dir = os.path.join(home_dir, '.credentials')\r\n if not os.path.exists(credential_dir):\r\n os.makedirs(credential_dir)\r\n credential_path = os.path.join(credential_dir,\r\n 'gmail-python-spam-filter.json')\r\n\r\n store = Storage(credential_path)\r\n credentials = store.get()\r\n if not credentials or credentials.invalid:\r\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\r\n flow.user_agent = APPLICATION_NAME\r\n if flags:\r\n credentials = tools.run_flow(flow, store, flags)\r\n else: # Needed only for compatibility with Python 2.6\r\n credentials = tools.run(flow, store)\r\n print('Storing credentials to ' + credential_path)\r\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser(os.getcwd())\n credential_dir = os.path.join(home_dir, '.credentials')\n print(credential_dir)\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'client_secret_OCR.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n print(\"Current folder: \" + os.getcwd())\n flow = client.flow_from_clientsecrets(\n \"../../\" + CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials(self):\n #\n # Why is this not read from the yaml file?\n path = Path(path_expand(self.credential_file)).resolve()\n if not os.path.exists(path):\n os.makedirs(path)\n\n credentials_path = (path / 'google-drive-credentials.json').resolve()\n print(credentials_path)\n\n store = Storage(credentials_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(self.client_secret_file,\n self.scopes)\n flow.user_agent = self.application_name\n #\n # SHOUDL THE FLAGS NOT BE SET IN THE YAML FILE OR DOCOPTS OFTHE COMMAND?\n #\n if self.flags:\n credentials = tools.run_flow(flow, store, self.flags)\n\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n store = Storage(CREDENTIAL_PATH)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n credentials = tools.run_flow(flow, store, None)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n #home_dir = os.path.expanduser('~')\n home_dir = os.path.expanduser('/home/pi/')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir, 'gmail-python-quickstart.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_stored_credentials(user_id):\n #\n # To instantiate an OAuth2Credentials instance from a Json\n # representation, use the oauth2client.client.Credentials.new_from_json\n # class method.\n user = engine.query(User).filter(userId=user_id).first()\n if user:\n user_dict = user.__dict__\n if user_dict['credentials']:\n # credentials = Credentials.new_from_json(user['credentials'])\n credentials = json.loads(user_dict['credentials'])\n token_expiry = credentials['token_expiry']\n dexp = parser.parse(str(token_expiry))\n dexp = dexp.replace(tzinfo=None)\n dnow = datetime.now()\n\n if dexp > dnow:\n return Credentials.new_from_json(user_dict['credentials'])\n else:\n status_code, data = renew_access_token(client_id=credentials['client_id'],\n client_secret=credentials['client_secret'],\n refresh_token=credentials['refresh_token'],\n )\n if status_code == INT_OK:\n credentials['access_token'] = data['access_token']\n credentials['token_expiry'] = datetime_util(datetime.now() + timedelta(seconds=float(str(data['expires_in']))))\n credentials = Credentials.new_from_json(json_encode(credentials))\n user.update_credentials(credentials.to_json())\n user.sync()\n return credentials\n else:\n return None\n else:\n return None\n return None", "def authorize_credentials():\n credentials = STORAGE.get()\n # If the credentials doesn't exist in the storage location then run the flow\n if credentials is None or credentials.invalid:\n flow = flow_from_clientsecrets(CREDENTIAL_JSON, scope=SCOPE)\n http = httplib2.Http()\n credentials = run_flow(flow, STORAGE, http=http)\n return credentials", "def get_credentials():\n store = Storage(CLIENT_CREDENTIALS_FILE)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + CLIENT_CREDENTIALS_FILE)\n return credentials", "def get_credentials(account):\n credential_dir = os.path.join(HOME_DIR, META_DIR, account, \"credentials\")\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir, 'pyDrive.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'reseller-python-quickstart.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials(args, my_dirname):\n\n credential_dir = os.path.join(my_dirname, '.credentials')\n if not os.path.exists(credential_dir):\n os.mkdir(credential_dir)\n credential_path = os.path.join(credential_dir, 'sheets.googleapis.com-cotus-checker.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n try:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n credentials = tools.run_flow(flow, store, args)\n print('Storing credentials to ' + credential_path)\n except (oauth2client.clientsecrets.InvalidClientSecretsError, json.decoder.JSONDecodeError):\n pass\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'sheets.googleapis.logSheets.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n print('Storing credentials to ' + credential_path)\n return credentials", "def load_credentials(cred=\"credentials_prod.json\"):\n if isinstance(cred, dict):\n # Easy way to handle if a function was handed valid credentials\n pass\n elif isinstance(cred, str):\n with open(cred, 'r') as f:\n cred = json.load(f)\n else:\n raise ValueError(\"Invalid input cred={0}\".format(cred))\n\n # Check for correct entries\n cred_keys = [ \"access_token\", \"expires_in\", \"refresh_token\", \"scope\", \"token_type\"]\n for k in cred_keys:\n if k not in cred:\n raise ValueError(\"Credentials missing key {0}\".format(k))\n return cred", "def get_credentials(key):\n with open(\"credentials.json\", \"r\") as credentials_file:\n credentials_data = json.load(credentials_file)\n\n try:\n return credentials_data[key]\n except KeyError:\n raise KeyError(f\"Credential {key} was not found in file.\")", "def _load_credentials(creds_file=None):\n\n creds = None\n\n # Validate the credentials file\n if not creds_file:\n creds_file = 'credentials.json'\n if not os.path.exists(creds_file):\n creds_file = os.path.join(expanduser('~'), 'credentials.json')\n if not os.path.exists(creds_file):\n raise SystemExit('Could not find a credentials.json file. ' \\\n 'Either pass one as argument or make sure credentials.json exists in ' \\\n 'the current directory or ' + expanduser('~'))\n\n # Creates CACHE_DIR if it does not exist\n # mode 0x777 (the default) is used because the system's umask value is masked out first\n if not os.path.exists(CACHE_DIR):\n os.mkdir(CACHE_DIR)\n\n pickle_filename = os.path.join(CACHE_DIR, 'weechat-gcal-token.pickle')\n\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first time.\n if os.path.exists(pickle_filename):\n with open(pickle_filename, 'rb') as token:\n creds = pickle.load(token)\n\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(creds_file, SCOPES)\n creds = flow.run_local_server(port=0)\n\n # Save the credentials for the next run\n with open(pickle_filename, 'wb') as token:\n pickle.dump(creds, token)\n\n return creds", "def get_credentials():\n # normal, sane way of doing this that really shouldn't be changed\n #home_dir = os.path.expanduser('~')\n #credential_dir = os.path.join(home_dir, '.credentials')\n #if not os.path.exists(credential_dir):\n # os.makedirs(credential_dir)\n #credential_path = os.path.join(credential_dir,'calendar-python-quickstart.json')\n\n # stupid hacky way that I came up with to fix an issue with running this app as root\n credential_path = os.path.join('./credentials','calendar-python-quickstart.json') \n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n home_dir = './ignore' #os.path.expanduser('./')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'gmail-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n home_dir = './ignore' #os.path.expanduser('./')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'gmail-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'sheets.googleapis.com-python.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def GetAccountNameAndPassword(credential,\n credentials_path=DEFAULT_CREDENTIAL_PATH):\n if (credentials_path == DEFAULT_CREDENTIAL_PATH and not\n os.path.exists(DEFAULT_CREDENTIAL_PATH)):\n cloud_storage.GetIfChanged(\n DEFAULT_CREDENTIAL_PATH, DEFAULT_CREDENTIAL_BUCKET)\n\n with open(credentials_path, 'r') as f:\n credentials = json.load(f)\n c = credentials.get(credential)\n return c['username'], c['password']", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'drive-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'drive-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials(credentials_filename, application_name, client_secret_file, scopes):\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n credentials_filename)\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(client_secret_file, scopes)\n flow.user_agent = application_name\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n# else: # Needed only for compatibility with Python 2.6\n# credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'gmail-quickstart.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatability with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_creds():\n\tcredentials = None\n\tif os.path.exists('token.pickle'):\n\t\twith open('token.pickle', 'rb') as token:\n\t\t\tcredentials = pickle.load(token)\n\t# If there are no (valid) credentials available, let the user log in.\n\tif not credentials or not credentials.valid:\n\t\tif credentials and credentials.expired and credentials.refresh_token:\n\t\t\tcredentials.refresh(Request())\n\t\telse:\n\t\t\tflow = InstalledAppFlow.from_client_secrets_file('config/sa.json', SCOPES)\n\t\t\tcredentials = flow.run_local_server(port=0)\n\t\t# Save the credentials for the next run\n\t\twith open('token.pickle', 'wb') as token:\n\t\t\tpickle.dump(credentials, token)\n\treturn credentials", "def get_credentials(self):\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'homework_logger-gmail-api.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(self.CLIENT_SECRET_FILE, self.SCOPES)\n flow.user_agent = self.APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir, 'sheets.googleapis.com-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n credentials = tools.run_flow(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n credential_dir = os.path.dirname(os.path.realpath(CLIENT_SECRET_FILE))\n credential_path = os.path.join(\n credential_dir, 'sheets.googleapis.com-endosys-events.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n credentials = tools.run_flow(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'gmail-python-quickstart.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'gmail-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials(self):\r\n \r\n try:\r\n import argparse\r\n #flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()\r\n if self.noauth == True:\r\n flags = tools.argparser.parse_args(args=['--noauth_local_webserver'])\r\n else:\r\n flags = tools.argparser.parse_args(args=[])\r\n except ImportError:\r\n flags = None \r\n \r\n home_dir = os.path.expanduser('~')\r\n credential_dir = os.path.join(home_dir, '.credentials')\r\n if not os.path.exists(credential_dir):\r\n os.makedirs(credential_dir)\r\n credential_path = os.path.join(credential_dir,'sheets.googleapis.com-allstarbot.json')\r\n\r\n store = Storage(credential_path)\r\n credentials = store.get()\r\n if not credentials or credentials.invalid:\r\n secret = Path(self.CLIENT_SECRET_FILE)\r\n if secret.exists():\r\n flow = client.flow_from_clientsecrets(self.CLIENT_SECRET_FILE, self.SCOPES)\r\n else:\r\n print(\"client_secret.json not found, using env vars\")\r\n if not os.environ.get('client_id') or not os.environ.get('client_secret'): \r\n print(\"env vars client_id and client_secret not found. canceling\")\r\n raise Exception(\"client secret error\")\r\n else:\r\n flow = OAuth2WebServerFlow(\r\n os.environ.get('client_id'),\r\n os.environ.get('client_secret'),\r\n self.SCOPES) \r\n \r\n flow.params['access_type'] = 'offline'\r\n flow.user_agent = self.APPLICATION_NAME\r\n if flags:\r\n credentials = tools.run_flow(flow, store, flags)\r\n else: # Needed only for compatibility with Python 2.6\r\n credentials = tools.run(flow, store)\r\n print('Storing credentials to ' + credential_path)\r\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'gmail-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(args.clientSecretFile, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if args:\n credentials = tools.run_flow(flow, store, args)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'gmail-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'gmail-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'gmail-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'gmail-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n #home_dir = os.path.expanduser('~')\n home_dir = (HOME_DIR)\n credential_dir = os.path.join(home_dir, '.credentials')\n print(\"Credentials folder: \",credential_dir)\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'sheets.googleapis.com-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def load_creds(self):\n home = expanduser(\"~\")\n with open(os.path.join(home, 'creds.json')) as creds_file:\n self.creds_data = json.load(creds_file)", "def get_credentials(data_dir_path, client_secret_file_path, scopes, application_name):\n #home_dir = os.path.expanduser('~')\n #credential_dir = os.path.join(home_dir, '.credentials')\n credential_dir = os.path.join(data_dir_path, \".credentials\")\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'gmail-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(client_secret_file_path, scopes)\n flow.user_agent = application_name\n\n try:\n import argparse\n flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()\n except ImportError:\n flags = None\n\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def getCredentials(scopes):\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,'admin-directory_v1-python-quickstart.json')\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, scopes)\n flow.user_agent = APPLICATION_NAME\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def _get_credentials(flags):\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'drive-python-visualizerhelptext.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def getCredentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir, 'gmail-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'sheets.googleapis.com-python-quickstart.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'sheets.googleapis.com-python-quickstart.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials(self):\n #### DONT EDIT.\n SCOPES = ['https://spreadsheets.google.com/feeds',\n 'https://www.googleapis.com/auth/drive']\n CLIENT_SECRET_FILE = 'client_secret.json'\n APPLICATION_NAME = 'reporter'\n ####\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'reporter_creds.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n credentials = tools.run_flow(flow, store)\n return credentials", "def acquire_oauth2_credentials():\n if os.path.isfile(\"%s/cre.json\" % file_path):\n f = open(\"%s/cre.json\" % file_path, \"r\")\n credentials = client.OAuth2Credentials.from_json(f.read())\n f.close()\n else: \n flow = client.flow_from_clientsecrets(\n \"%s/client_secrets.json\" % file_path,\n scope='https://www.googleapis.com/auth/analytics.readonly',\n redirect_uri='urn:ietf:wg:oauth:2.0:oob')\n auth_uri = flow.step1_get_authorize_url()\n webbrowser.open(auth_uri)\n auth_code = input('Enter the authentication code: ')\n credentials = flow.step2_exchange(auth_code)\n write_credentials(\"%s/cre.json\" % file_path, credentials)\n return credentials", "def get_credential_storage(filename, client_id, user_agent, scope,\n warn_on_readonly=True):\n # Recreate the legacy key with these specific parameters\n key = {'clientId': client_id, 'userAgent': user_agent,\n 'scope': util.scopes_to_string(scope)}\n return get_credential_storage_custom_key(\n filename, key, warn_on_readonly=warn_on_readonly)", "def get_credentials():\n scope = ['https://www.googleapis.com/auth/adsense.readonly',\n 'https://www.googleapis.com/auth/analytics.readonly']\n\n #get your client secret file\n cwd = os.getcwd()\n pathToFile = os.path.join(cwd,\n 'YOURCLIENTSECRETPATH.json')\n print \"This is your client secret path:\",pathToFile\n\n #first part of the folow process\n #https://developers.google.com/api-client-library/python/guide/aaa_oauth\n flow = oauth2client.client.flow_from_clientsecrets(pathToFile,scope,redirect_uri='urn:ietf:wg:oauth:2.0:oob')#'urn:ietf:wg:oauth:2.0:oob'\n \n #check to see if you have something already\n storage = oauth2client.file.Storage('creds.dat') #this is a made up file name\n credentials = storage.get()\n \n #if they dont exist already go ahead and get them\n if not credentials or credentials.invalid:\n #get authorization url\n auth_url = flow.step1_get_authorize_url()\n #open the url to get a code\n webbrowser.open(auth_url)\n\n #enter the code to reauth\n codeStr = str(raw_input('enter code here:'))\n credentials = flow.step2_exchange(codeStr)\n #save the code to the dat\n storage = oauth2client.file.Storage('creds.dat')\n storage.put(credentials)\n \n return credentials\n\n else:\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'sheets.googleapis.com-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'sheets.googleapis.com-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'sheets.googleapis.com-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'sheets.googleapis.com-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(\n credential_dir, 'sheets.googleapis.com-python-quickstart.json'\n )\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def getUser(self, username):\r\n if (self._credCache is None or\r\n os.path.getmtime(self.filename) > self._cacheTimestamp):\r\n self._cacheTimestamp = os.path.getmtime(self.filename)\r\n self._credCache = dict(self._loadCredentials())\r\n return self._credCache[username]", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'sheets.googleapis.com-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(resource_path(CLIENT_SECRET_FILE), SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials(self):\n home_dir = os.path.expanduser('~')\n # credential_dir = os.path.join(home_dir, '.credentials')\n credential_dir = '.credentials'\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir, 'drive-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(GoogleDocsConverter.CLIENT_SECRET_FILE, GoogleDocsConverter.SCOPES)\n flow.user_agent = GoogleDocsConverter.APPLICATION_NAME\n if self.flags:\n credentials = tools.run_flow(flow, store, self.flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def _get_credentials(self):\n if self.config_file:\n with open(self.config_file) as f:\n config_str = f.read()\n credentials_dict = json.loads(config_str)\n self.credentials = credentials_dict[self.account][self.auth_type]\n else:\n self.credentials = {\n \"account\": os.environ.get('SNOWSQL_ACCOUNT'),\n \"user\": os.environ.get('SNOWSQL_USER'),\n \"password\": os.environ.get('SNOWSQL_PWD')\n }", "def get_creds_file(self):\n filename = self.filename\n\n home = str(Path.home())\n filepath = home + os.sep + filename\n self.path = filepath\n if not os.path.isfile(filepath):\n return False\n\n j = json.load(open(filepath))\n self.keys = j\n return j", "def _get_credential(self):\n creds = None\n\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n 'credentials.json', self.config['SCOPES'])\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n\n self.service = build('drive', 'v3', credentials=creds)", "def get_stored_user():\n filename = 'nombre_usuario.json'\n\n try:\n with open(filename) as f_object:\n username = json.load(f_object)\n except FileNotFoundError:\n return None\n else:\n return username", "def get_credential(host,\n credential_file=DEFAULT_CREDENTIAL_FILE,\n globus_credential_file=DEFAULT_GLOBUS_CREDENTIAL_FILE,\n config_file=DEFAULT_CONFIG_FILE,\n requested_scope=None,\n force_scope_lookup=False,\n match_scope_tag=\"deriva-all\"):\n # load deriva credential set first\n credentials = read_credential(credential_file or DEFAULT_CREDENTIAL_FILE, create_default=True)\n creds = credentials.get(host, credentials.get(host.lower(), dict()))\n\n # load globus credentials and merge, if present\n if globus_credential_file is not None and \\\n os.path.isfile(globus_credential_file) and \\\n os.path.getsize(globus_credential_file) > 10: # Don't load empty json\n try:\n globus_client = GlobusNativeLogin(hosts=[host], config_file=config_file)\n scope_map = globus_client.hosts_to_scope_map(hosts=[host], match_scope_tag=match_scope_tag,\n force_refresh=force_scope_lookup,\n warn_on_discovery_failure=True if not creds else False)\n tokens = globus_client.is_logged_in(exclude_defaults=True)\n if tokens:\n # 1. look for the explicitly requested scope in the token store, if specified\n token = globus_client.find_access_token_for_scope(requested_scope, tokens)\n if not token:\n # 2. try to determine the scope to use based on host-to-scope(s) mappings\n token = globus_client.find_access_token_for_host(host,\n scope_map,\n tokens,\n match_scope_tag=match_scope_tag)\n if token:\n creds[\"bearer-token\"] = token\n except Exception as e:\n logging.warning(\"Exception while getting Globus credentials: %s\" % format_exception(e))\n\n return creds or None", "def get(self):\n self._lock.acquire()\n try:\n f = open(self._filename, 'r')\n credentials = pickle.loads(f.read())\n f.close()\n credentials.set_store(self.put)\n except:\n credentials = None\n self._lock.release()\n\n return credentials", "def _get_credentials(self):\n\n scopes = 'https://www.googleapis.com/auth/drive'\n client_secret_file = '%s/config/client_secret.json' % PROJECT_DIR\n application_name = 'Drive API Quickstart'\n\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n\n credential_path = os.path.join(credential_dir, 'drive-quickstart.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(client_secret_file, scopes)\n flow.user_agent = application_name\n\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatability with Python 2.6\n credentials = tools.run(flow, store)\n\n return credentials", "def get_credentials():\n try:\n import argparse\n flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()\n except ImportError:\n flags = None\n\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'appsactivity-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(GoogleGsuiteAPI.CLIENT_SECRET_FILE, GoogleGsuiteAPI.SCOPES)\n flow.user_agent = GoogleGsuiteAPI.APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n return ServiceAccountCredentials.from_json_keyfile_dict(SERVICE_ACCOUNT, scopes = SCOPES)", "def get_creds(\n user=\"default\", config=None, scope=default_scope, creds_dir=None, save=True\n):\n config = config or get_config()\n try:\n if \"private_key_id\" in config:\n return SACredentials.from_service_account_info(config, scopes=scope)\n\n if not isinstance(user, str):\n raise ConfigException(\n \"Need to provide a user key as a string if not using a service account\"\n )\n\n if creds_dir is None:\n creds_dir = get_config_dir() / \"creds\"\n\n creds_file = Path(creds_dir) / user\n\n if creds_file.exists():\n # need to convert Path to string for python 2.7\n return OAuthCredentials.from_authorized_user_file(str(creds_file))\n\n flow = InstalledAppFlow.from_client_config(config, scope)\n creds = flow.run_local_server(\n host=\"localhost\",\n port=8182,\n authorization_prompt_message=\"Please visit this URL: {url}\",\n success_message=\"The auth flow is complete; you may close this window.\",\n open_browser=False,\n )\n\n if save:\n creds_data = {\n \"refresh_token\": creds.refresh_token,\n \"token_uri\": creds.token_uri,\n \"client_id\": creds.client_id,\n \"client_secret\": creds.client_secret,\n \"scopes\": creds.scopes,\n }\n\n ensure_path(creds_dir)\n creds_file.write_text(decode(json.dumps(creds_data)))\n\n return creds\n except Exception:\n exc_info = sys.exc_info()\n raise ConfigException(*exc_info[1:])", "def _get_credential(self, key):\n return self._data.get(key, None)", "def GetCredentials(self, credentials_path: str,\n client_secrets_path: str) -> Optional[Any]:\n scopes = ['openid', 'https://www.googleapis.com/auth/userinfo.email']\n credentials = None\n\n # Load credentials file if it exists\n if os.path.exists(credentials_path):\n try:\n credentials = Credentials.from_authorized_user_file(\n credentials_path, scopes)\n except ValueError as exception:\n msg = f'Error loading credentials: {exception!s}'\n self.ModuleError(msg, critical=True)\n # Refresh credentials using existing refresh_token\n if credentials and credentials.refresh_token:\n self.logger.debug('Found a refresh token. Requesting new id_token...')\n try:\n credentials.refresh(Request())\n except google_exceptions.RefreshError as exception:\n self.logger.debug(f'Error refreshing credentials: {exception!s}')\n else:\n # No credentials file, acquire new credentials from secrets file.\n self.logger.debug(\n 'Could not find existing credentials. Requesting new tokens.')\n try:\n appflow = flow.InstalledAppFlow.from_client_secrets_file(\n client_secrets_path, scopes)\n except FileNotFoundError as exception:\n msg = f'Client secrets file not found: {exception!s}'\n self.ModuleError(msg, critical=True)\n\n self.logger.info(\n 'Starting local HTTP server on localhost:8888 for OAUTH flow. '\n 'If running dftimewolf remotely over SSH you will need to tunnel '\n 'port 8888.')\n appflow.run_local_server(host='localhost', port=8888, open_browser=False)\n credentials = appflow.credentials\n\n # Save credentials\n if credentials:\n with open(credentials_path, 'w', encoding='utf-8') as token:\n token.write(credentials.to_json())\n\n return credentials" ]
[ "0.7427812", "0.7198125", "0.71123177", "0.7038576", "0.7036431", "0.7013569", "0.6985227", "0.69778216", "0.6926867", "0.6853255", "0.683662", "0.676247", "0.6717914", "0.6712703", "0.6711394", "0.6686284", "0.66799074", "0.66706616", "0.6668772", "0.66675913", "0.6653017", "0.6646677", "0.66466665", "0.66422653", "0.663915", "0.66196084", "0.6583781", "0.65727955", "0.6572328", "0.6565939", "0.6565939", "0.6565884", "0.65500414", "0.65500414", "0.65500414", "0.65500414", "0.65500414", "0.6513575", "0.6473456", "0.64618117", "0.6459248", "0.6453072", "0.6440553", "0.64357656", "0.6419818", "0.641764", "0.6417609", "0.64142144", "0.6408888", "0.6408888", "0.64011455", "0.6371436", "0.6369899", "0.6369899", "0.6352146", "0.63458496", "0.63162386", "0.6308073", "0.6303761", "0.62918186", "0.62878674", "0.6283258", "0.6280944", "0.62753654", "0.6274006", "0.6274006", "0.6274006", "0.6274006", "0.62542975", "0.6243345", "0.62301135", "0.62284875", "0.62281674", "0.6217159", "0.6212776", "0.6212776", "0.6188854", "0.618856", "0.61758804", "0.61756116", "0.61719835", "0.61719835", "0.61719835", "0.61719835", "0.61697745", "0.6163607", "0.61631113", "0.61068165", "0.6099544", "0.607882", "0.6069842", "0.60695654", "0.60542583", "0.605319", "0.6016519", "0.60138565", "0.6004236", "0.5994429", "0.5972194", "0.59677124" ]
0.70376235
4
Equal width bin, take a uniform distribution for the sample value range. Buckets include the right boundary, and exclude the left boundary. Namely, boundaries=[0., 1., 2.] generates buckets (inf, 0.], (0., 1.], (1., 2.], and (2., +inf)
def uniform(feature, bins): t = (feature.max()-feature.min())/bins return [t*i for i in range(1, bins)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def all_bucket_boundaries(self):\n\n lower = self._lower_bounds[0]\n for i in xrange(1, self.total_buckets):\n upper = self._lower_bounds[i]\n yield (lower, upper)\n lower = upper\n\n yield (lower, float('Inf'))", "def bucket_boundaries(self, bucket):\n\n if bucket < 0 or bucket >= self.total_buckets:\n raise IndexError('bucket %d out of range' % bucket)\n if bucket == self.total_buckets - 1:\n return (self._lower_bounds[bucket], float('Inf'))\n return (self._lower_bounds[bucket], self._lower_bounds[bucket + 1])", "def test_bins(self):\n min_val = 0\n max_val = 1\n buckets = 10\n values_per_bucket = 10\n\n import numpy\n\n data = list(numpy.linspace(min_val, max_val, buckets * values_per_bucket))\n bins = numpy.linspace(min_val, max_val + sys.float_info.epsilon, buckets + 1)\n digitized = numpy.digitize(data, bins)\n counts = numpy.bincount(digitized)\n self.assertEqual(buckets + 1, len(counts))\n self.assertEqual(0, counts[0])\n for bucket in counts[1:]:\n self.assertEqual(values_per_bucket, bucket)", "def get_bucket_boundaries(feature):\n return np.unique(np.percentile(feature, range(0, 100))).tolist()", "def get_bins(val: List[float]) -> List[float]:\n r_min = np.min(val)\n r_max = np.max(val)\n min_bins = 2\n max_bins = 50\n # Calculate bin width using either Freedman-Diaconis or Sturges estimator\n bin_edges = np.histogram_bin_edges(val, bins=\"auto\")\n if len(bin_edges) < min_bins:\n return list(np.linspace(start=r_min, stop=r_max, num=min_bins))\n elif len(bin_edges) <= max_bins:\n return list(bin_edges)\n # Clamp to max_bins by estimating a good bin range to be more robust to outliers\n q75, q25 = np.percentile(val, [75, 25])\n iqr = q75 - q25\n width = 2 * iqr / max_bins\n start = max((q75 + q25) / 2 - iqr, r_min)\n stop = min(start + max_bins * width, r_max)\n # Take the minimum of range and 2x IQR to account for outliers\n edges = list(np.linspace(start=start, stop=stop, num=max_bins))\n prefix = [r_min] if start > r_min else []\n suffix = [r_max] if stop < r_max else []\n return prefix + edges + suffix", "def test_range_argument_ignored(self):\n bins_range = (1, 2)\n\n bin_edges, hist, _, _ = hist_w_unc(\n self.input,\n bins=self.bin_edges,\n bins_range=bins_range,\n normed=False,\n )\n\n # check if we end up with the same bin edges anyway\n np.testing.assert_array_almost_equal(self.bin_edges, bin_edges)\n np.testing.assert_array_almost_equal(self.hist, hist)", "def createBins():\n theBins = []\n startFreq = 60\n for a in range(32):\n endFreq = int(startFreq*1.12+12)\n theRange = (startFreq, endFreq)\n startFreq = endFreq\n theBins.append(theRange)\n return(theBins)", "def setup_bins(self):\n width = int((self.max - self.min) / self.bin_size)\n bins = {\n i * width + self.min: (idx, idx + self.bin_size)\n for i, idx in enumerate(range(0, len(self.nums), self.bin_size))\n }\n return bins", "def createBinsByEqualWidth(self, data, colIndex, numOfBins):\n colData = list(map(lambda x: float(x[colIndex]), data))\n minVal, maxVal = min(colData), max(colData)\n width = round(((maxVal - minVal) / numOfBins), 3)\n bins = {\"value<=\"+str(width): lambda x: x <= width}\n for i in range(1, numOfBins-1):\n bins[str(width) + '<value<=' + str(width+width)] = (lambda x: width < x <= width + width)\n width = width + width\n bins[\"value>\" + str(width)] = (lambda x: x > width)\n return bins", "def _bucket_boundaries(max_length, min_length=8, length_bucket_step=1.1):\n assert length_bucket_step > 1.0\n x = min_length\n boundaries = []\n while x < max_length:\n boundaries.append(x)\n x = max(x + 1, int(x * length_bucket_step))\n return boundaries", "def _bucket_boundaries(self, max_length, min_length=8, length_bucket_step=1.1):\n assert min_length <= max_length\n assert length_bucket_step > 1.0\n x = min_length\n boundaries = []\n while x < max_length:\n boundaries.append(x)\n x = max(x + 1, int(x * length_bucket_step))\n return boundaries", "def binning(data, low, high):\n if len(data) == 0: return 1\n\n mask1 = (data >= low)\n mask2 = (data < high)\n mask3 = numpy.logical_and(mask1, mask2)\n data = data[mask3]\n\n if len(data) == 0: return 10\n\n data.sort()\n q1 = data[int(math.floor(0.25*len(data)))]\n q3 = data[int(math.floor(0.75*len(data)))]\n binwidth = 2. * (q3 - q1) / len(data)**(1./3.)\n if binwidth > 0.:\n return max(10, int(math.ceil((high - low)/binwidth)))\n else:\n return 10", "def generate_binned_values( lower_lim, upper_lim, chr_length, snps_per_chr, indels_per_chr, resolution ):\n\t\n\tsnp_data = []\n\tindel_data = []\n\twhile True:\n\t\tif upper_lim >= chr_length:\n\t\t\tbreak\n\t\telse:\n\t\t\tsnp_tmp = []\n\t\t\tindel_tmp = []\n\t\t\tfor SNP in snps_per_chr:\n\t\t\t\tif SNP <= upper_lim and SNP > lower_lim:\n\t\t\t\t\tsnp_tmp.append( 'X' )\n\t\t\tfor indel in indels_per_chr:\n\t\t\t\tif indel <= upper_lim and indel > lower_lim:\n\t\t\t\t\tindel_tmp.append( 'X' )\n\t\t\tsnp_data.append( len( snp_tmp ) )\n\t\t\tindel_data.append( len( indel_tmp ) )\n\t\tupper_lim += resolution\n\t\tlower_lim += resolution\n\treturn max( snp_data ), max( indel_data ), snp_data, indel_data", "def test_bins(self):\n\n for filename in ['%s/population_padang_1.asc' % TESTDATA,\n '%s/test_grid.asc' % TESTDATA]:\n\n R = read_layer(filename)\n rmin, rmax = R.get_extrema()\n\n for N in [2, 3, 5, 7, 10, 16]:\n linear_intervals = R.get_bins(N=N, quantiles=False)\n\n assert linear_intervals[0] == rmin\n assert linear_intervals[-1] == rmax\n\n d = (rmax - rmin) / N\n for i in range(N):\n assert numpy.allclose(linear_intervals[i], rmin + i * d)\n\n quantiles = R.get_bins(N=N, quantiles=True)\n A = R.get_data(nan=True).flat[:]\n\n mask = numpy.logical_not(numpy.isnan(A)) # Omit NaN's\n l1 = len(A)\n A = A.compress(mask)\n l2 = len(A)\n\n if filename == '%s/test_grid.asc' % TESTDATA:\n # Check that NaN's were removed\n assert l1 == 35\n assert l2 == 30\n\n # Assert that there are no NaN's\n assert not numpy.alltrue(numpy.isnan(A))\n\n number_of_elements = len(A)\n average_elements_per_bin = number_of_elements / N\n\n # Count elements in each bin and check\n i0 = quantiles[0]\n for i1 in quantiles[1:]:\n count = numpy.sum((i0 < A) & (A < i1))\n if i0 == quantiles[0]:\n refcount = count\n\n if i1 < quantiles[-1]:\n # Number of elements in each bin must vary by no\n # more than 1\n assert abs(count - refcount) <= 1\n assert abs(count - average_elements_per_bin) <= 3\n else:\n # The last bin is allowed vary by more\n pass\n\n i0 = i1", "def bucket_for_value(self, value):\n\n # bisect.bisect_left is wrong because the buckets are of [lower, upper) form\n return bisect.bisect(self._lower_bounds, value) - 1", "def binrange(_min, _max, stepsize, include_upper=False):\n _min = _min - _min % stepsize\n _max = _max - _max % stepsize + stepsize * (1 + include_upper)\n return np.arange(_min, _max, stepsize)", "def set_bin_(data, bin_num, scale=100.):\n temp_data = -numpy.sort(-numpy.abs(data))\n\n bin_size = len(temp_data) / bin_num * 2\n bins = numpy.array([temp_data[int(i * bin_size)] for i in range(1, int(bin_num / 2))])\n bins = numpy.sort(numpy.append(numpy.append(-bins, [0.]), bins))\n bound = numpy.max(numpy.abs(data)) * scale\n bins = numpy.sort(numpy.append(-bound, numpy.append(bins, bound)))\n return bins", "def test_range_argument(self):\n # we test with range from 0 to 2, with 3 bins -> [0, 0.66, 1.33, 2] exp. bins\n bins_range = (0, 2)\n bins_exp = np.array([0, 2 / 3, 1 + 1 / 3, 2])\n hist_exp = np.array([1, 2, 0])\n\n bin_edges, hist, _, _ = hist_w_unc(\n self.input,\n bins=self.n_bins,\n bins_range=bins_range,\n normed=False,\n )\n\n # check if we end up with the same bin edges anyway\n np.testing.assert_array_almost_equal(bins_exp, bin_edges)\n np.testing.assert_array_almost_equal(hist_exp, hist)", "def logbin_distribution(data, nbins = 30):\n # define the support of the distribution\n lower_bound = min(data)\n upper_bound = max(data)\n # define bin edges\n log = np.log10\n lower_bound = log(lower_bound) if lower_bound > 0 else -1\n upper_bound = log(upper_bound)\n bins = np.logspace(lower_bound, upper_bound, nbins)\n\n # compute the histogram using numpy\n y, _ = np.histogram(data, bins=bins, density=True)\n # for each bin, compute its midpoint\n x = bins[1:] - np.diff(bins) / 2.0\n # if bin is empty, drop it from the resulting list\n drop_indices = [i for i,k in enumerate(y) if k == 0.0]\n x = [k for i,k in enumerate(x) if i not in drop_indices]\n y = [k for i,k in enumerate(y) if i not in drop_indices]\n return x, y", "def test_bins(self):\n\n \n for filename in ['data/population_padang_1.asc', \n 'data/test_grid.asc']: \n \n R = read_coverage(filename)\n \n min, max = R.get_extrema() #use_numeric=True)\n \n for N in [2,3,5,7,10,16]:\n linear_intervals = R.get_bins(N=N, quantiles=False) \n \n assert linear_intervals[0] == min\n assert linear_intervals[-1] == max \n \n d = (max-min)/N\n for i in range(N):\n assert numpy.allclose(linear_intervals[i], min + i*d) \n \n \n quantiles = R.get_bins(N=N, quantiles=True)\n\n A = R.get_data(nan=True).flat[:] \n \n mask = numpy.logical_not(numpy.isnan(A)) # Omit NaN's\n l1 = len(A)\n A = A.compress(mask) \n l2 = len(A)\n \n if filename == 'data/test_grid.asc':\n # Check that NaN's were removed\n \n assert l1 == 35\n assert l2 == 30\n \n \n # Assert that there are no NaN's \n assert not numpy.alltrue(numpy.isnan(A))\n \n number_of_elements = len(A)\n average_elements_per_bin = number_of_elements/N\n \n # Count elements in each bin and check\n\n i0 = quantiles[0]\n for i1 in quantiles[1:]:\n count = numpy.sum((i0 < A) & (A < i1))\n if i0 == quantiles[0]:\n refcount = count\n \n \n if i1 < quantiles[-1]:\n # Number of elements in each bin must vary by no more than 1\n assert abs(count - refcount) <= 1 \n assert abs(count - average_elements_per_bin) <= 3\n \n \n else:\n # The last bin is allowed vary by more\n pass\n \n i0 = i1", "def buckets(self, disable_last_bucket_padding=False):\n if self.__total_count == 0:\n return\n\n # We use the minimum value for the lower bound of the first bucket.\n previous = self.__min\n for i in range(0, len(self.__counts)):\n if self.__counts[i] > 0:\n yield self.__counts[i], previous, self.__bucket_ranges[i]\n previous = self.__bucket_ranges[i]\n\n if self.__overflow == 0:\n return\n\n if not disable_last_bucket_padding:\n padding = 0.01\n else:\n padding = 0.0\n\n # We use the maximum value for the upper bound of the overflow range. Note, we added 0.01 to make sure the\n # boundary is exclusive to the values that fell in it.\n yield self.__overflow, self.__bucket_ranges[-1], self.__max + padding", "def bincalc(nbin=0.1,bmin=5,bmax=2000):\n\n logbmin=np.log10(bmin)\n logbmax=np.log10(bmax)\n\n logbins=np.arange(logbmin,logbmax,nbin)\n\n bins=10**logbins\n\n #bins=np.linspace(bmin,bmax,60)\n return (bins)", "def create_uniform_grid(low, high, bins=(10, 10)):\n grid = [np.linspace(low[dim], high[dim], bins[dim] + 1)[1:-1]\n for dim in range(len(bins))]\n\n return grid", "def FixedWidthBucketer(width, num_finite_buckets=100):\n return Bucketer(width=width, growth_factor=0.0,\n num_finite_buckets=num_finite_buckets)", "def from_bins(bins):\n return 0.5*(bins[1:] + bins[:-1])", "def __init__(self, bucket_ranges):\n # An array of the histogram bucket boundaries, such as 1, 10, 30, 100\n self.__bucket_ranges = list(bucket_ranges)\n last_value = None\n for i in self.__bucket_ranges:\n if last_value is not None and i < last_value:\n raise ValueError(\"The bucket_ranges argument must be sorted.\")\n else:\n last_value = i\n\n # __counts[i] holds the total number of values we have seen >= to __boundaries[i-1] and < __boundaries[i]\n self.__counts = [0] * len(bucket_ranges)\n # __overflows holds the number of values >= __boundaries[-1]\n self.__overflow = 0\n # The minimum and maximum values seen.\n self.__min = None\n self.__max = None\n # The total number of values collected.\n self.__total_count = 0\n # The sum of the values collected\n self.__total_values = 0", "def _prep_buckets(buckets, len_x):\n if isinstance(buckets, int):\n lims = np.linspace(0, len_x-1, buckets+1, dtype=int)\n else:\n lims = buckets\n buckets = len(lims)-1\n\n # Determine center of each bucket\n mids = np.rint(np.convolve(lims, np.ones(2), 'valid') / 2).astype(int)\n mids[0] = 0\n mids[-1] = len_x - 1\n\n return lims, mids", "def uniform_binning(ts, bins):\n symb = np.asarray(bins * (ts - ts.min()) / (ts.max() - ts.min() + 1e-12), dtype=int)\n return symb", "def _make_bins(start, stop, step):\n bin_edges = np.arange(start, stop + step, step)\n\n return bin_edges", "def bin_data(x, N_bins=100, xmin=0.0, xmax=1.0, density=False):\n\n hist_y, hist_edges = np.histogram(x, bins=N_bins, range=(xmin, xmax), density=density)\n hist_x = 0.5 * (hist_edges[1:] + hist_edges[:-1])\n hist_sy = np.sqrt(hist_y)\n hist_mask = hist_y > 0\n\n return hist_x, hist_y, hist_sy, hist_mask", "def _get_distribution ( bin_size, Flag = 0 ):\n \n # Get the step size\n lower = 0; upper = step = 1/bin_size\n Dist = {}\n \n # make bins\n while upper <= 1:\n Dist[Flag] = [ lower, upper ]\n Flag += 1\n lower = upper\n upper += step\n return Dist", "def hist_bin_opt (x, minbin=20, maxbin=600, spacing=10, N_trials=1):\n bin_checks = np.arange(minbin, maxbin, spacing)\n # bin_checks = np.linspace(150, 300, 16)\n costs = np.zeros(len(bin_checks))\n i = 0\n # this might be vectorizable in np\n for n_bins in bin_checks:\n # use np.histogram to do the numerical minimization\n pdf, bin_edges = np.histogram(x, n_bins)\n # calculate bin width\n # some discrepancy here but should be fine\n w_bin = np.unique(np.diff(bin_edges))\n if len(w_bin) > 1: w_bin = w_bin[0]\n # calc mean and var\n kbar = np.mean(pdf)\n kvar = np.var(pdf)\n # calc cost\n costs[i] = (2.*kbar - kvar) / (N_trials * w_bin)**2.\n i += 1\n # find the bin size corresponding to a minimization of the costs\n bin_opt_list = bin_checks[costs.min() == costs]\n bin_opt = bin_opt_list[0]\n return bin_opt", "def make_buckets(entries, low_bit: BitPos, cap_bit: BitPos) -> \"list[Bucket]\":\n num_bits = cap_bit - low_bit\n assert num_bits > 0\n buckets = [Bucket() for _ in range(0, 2 ** num_bits)]\n mask = (1 << num_bits) - 1\n for (codepoint, width) in entries:\n buckets[(codepoint >> low_bit) & mask].append(codepoint, width)\n return buckets", "def gen_buckets(num_buckets, data, max_val=256):\n\n default_size_of_bucket = int(len(data)/3)\n print(f\"Bucket size: {default_size_of_bucket}\")\n all_buckets = []\n for i in range(num_buckets):\n curr_buck = [0 for _ in range(max_val)]\n np.random.shuffle(data)\n curr_sample = data[0:default_size_of_bucket]\n for i in range(len(curr_sample)):\n curr_buck[curr_sample[i]] += 1\n all_buckets.append(curr_buck)\n return all_buckets", "def optimalBins(x,factor=1):\n sz = optimalBinSize(x) * factor\n return np.arange(x.min(), x.max(), sz)", "def frequency_bucket_floor(bucket_index):\n\tfraction = bucket_index / FREQUENCY_BUCKETS\n\tlog_range = [math.log(edge, 2) for edge in HEARING_RANGE]\n\tlog_floor = log_range[0] + fraction * (log_range[1] - log_range[0])\n\treturn 2 ** log_floor", "def bucket_dist(g_var, x_var, all_bins, tar_bin, label, df):\n return (\n df.groupby(g_var)[x_var]\n .value_counts(normalize=True, bins=all_bins)\n [:, tar_bin]\n .to_frame()\n .assign(Interval = label)\n )", "def make_age_bins(bin_size=1, lower=40, upper=69):\n bins = np.arange(lower, upper+1, bin_size)\n bins = np.append(bins, upper+1)\n print(bins)\n return bins", "def test_irregular(self):\n import numpy as np\n import histogrammar\n\n h = histogrammar.IrregularlyBin([0, 10, 20, 40, 100])\n h.fillnumpy([-5, 5, 5, 50, 10, 100, 1000, 50, 50])\n\n np.testing.assert_array_equal(h.bin_entries(), [1., 2., 1., 0., 3., 2.])\n np.testing.assert_array_equal(h.bin_edges(), [float('-inf'), 0., 10., 20., 40., 100., float('inf')])\n np.testing.assert_array_equal(h.bin_centers(), [float('-inf'), 5., 15., 30., 70., float('inf')])\n assert h.num_bins() == 6\n assert h.n_bins == 6\n np.testing.assert_almost_equal(h.mpv, 70.)\n\n np.testing.assert_array_equal(h.bin_entries(10, 40), [1., 0.])\n np.testing.assert_array_equal(h.bin_edges(10, 40), [10., 20., 40.])\n np.testing.assert_array_equal(h.bin_centers(10, 40), [15., 30.])\n assert h.num_bins(10, 40) == 2\n\n np.testing.assert_array_equal(h.bin_entries(5, 110), [2., 1., 0., 3., 2.])\n np.testing.assert_array_equal(h.bin_edges(5, 110), [0., 10., 20., 40., 100., float('inf')])\n np.testing.assert_array_equal(h.bin_centers(5, 110), [5., 15., 30., 70., float('inf')])\n assert h.num_bins(5, 110) == 5", "def createBinsByEntropy(self, data, structure, colName, numOfBins):\n splits = self.miningCalculator.getBestSplitsInDataByInfoGain(data, structure, colName, numOfBins-1)\n splits.sort()\n bins = {\"value<=\"+str(splits[0]): lambda x: x <= splits[0]}\n if len(splits) > 1:\n for i in range(1, numOfBins-1):\n bins[str(splits[i-1]) + '<value<=' + str(splits[i])] = (lambda x: splits[i-1] < x <= splits[i])\n bins[\"value>\" + str(splits[len(splits)-1])] = (lambda x: x > splits[len(splits)-1])\n return bins", "def getMistagBinBounds(config, mistag, mistagdistrib):\n mistag.setBins(1000, 'MistagBinBounds')\n from ROOT import RooArgSet, RooHistPdf, RooDataHist\n if (mistagdistrib.InheritsFrom('RooAbsData') and not\n mistagdistrib.InheritsFrom('RooDataHist')):\n # ok, unbinned data set, get only tagged events, and form a binned clone\n argset = RooArgSet(mistag)\n mistagdistrib = mistagdistrib.reduce(\n RooFit.SelectVars(argset), RooFit.cut('0 != qt'))\n ROOT.SetOwnership(mistagdistrib, True)\n dhist = RooDataHist(\n '%s_binned' % mistagdistrib.GetName(),\n '%s_binned' % mistagdistrib.GetName(),\n mistagdistrib.get(), 'MistagBinBounds')\n dhist.add(mistagdistrib)\n mistagdistrib = dhist\n if mistagdistrib.InheritsFrom('RooAbsData'):\n # convert a binned dataset to a RooHistPdf\n dhist = mistagdistrib\n mistagdistrib = RooHistPdf('%s_pdf' % dhist.GetName(),\n '%s_pdf' % dhist.GetName(), RooArgSet(mistag), dhist)\n if (mistagdistrib.InheritsFrom('RooAbsPdf')):\n # use createCdf to obtain the CDF\n cdfroofit = mistagdistrib.createCdf(\n RooArgSet(mistag), RooArgSet(mistag))\n ROOT.SetOwnership(cdfroofit, True)\n def cdf(x):\n oldval = mistag.getVal()\n mistag.setVal(x)\n retVal = cdfroofit.getVal()\n mistag.setVal(oldval)\n return retVal\n if (mistagdistrib.InheritsFrom('RooHistPdf') and\n (abs(cdf(mistag.getMin())) > 1e-9 or\n abs(cdf(mistag.getMax()) - 1.) > 1e-9)):\n # createCdf does not work properly for RooHistPdf in older ROOT\n # versions because RooHistPdf does not support integrals over\n # subranges, so we have to fake this functionality until it's\n # supported by RooFit upstream\n #\n # capture histogram bin boundaries and contents\n print 'WARNING: Your version of RooFit still has buggy analytical ' \\\n 'integrals for RooHistPdf - activating workaround.'\n binboundlist = mistagdistrib.binBoundaries(\n mistag, mistag.getMin(), mistag.getMax())\n ROOT.SetOwnership(binboundlist, True)\n binbounds = [ v for v in binboundlist ]\n del binboundlist\n bincontents = [ ]\n oldval = mistag.getVal()\n for i in xrange(0, len(binbounds) - 1):\n mistag.setVal(0.5 * (binbounds[i] + binbounds[i + 1]))\n bincontents.append(mistagdistrib.getValV(RooArgSet(mistag)))\n mistag.setVal(oldval)\n # build CDF from histogram\n def cdf(x):\n s = 0.\n for i in xrange(0, len(binbounds) - 1):\n if x < binbounds[i]:\n break\n elif x >= binbounds[i + 1]:\n s += bincontents[i]\n else:\n s += (bincontents[i] * (x - binbounds[i]) /\n (binbounds[i + 1] - binbounds[i]))\n break\n return s\n # find x for which f(x) = y by bisection\n def mybisect(y, f, lo, hi):\n initdx = abs(hi - lo)\n flo, fhi = f(lo) - y, f(hi) - y\n if 0. == flo: return lo\n elif 0. == fhi: return hi\n mid = .5 * (lo + hi)\n while (abs(hi - lo) > 1e-15 and abs(hi - lo) / initdx > 1e-15):\n fmid = f(mid) - y\n if 0. == fmid: break\n elif flo * fmid < 0.: hi, fhi = mid, fmid\n elif fmid * fhi < 0.: lo, flo = mid, fmid\n else: raise ValueError('no sign change in f(x) between %g and %g'\n % (lo, hi))\n mid = .5 * (lo + hi)\n return mid\n # find binning with roughly same stats by inverting the CDF by bisection\n lo, hi, binsum = mistag.getMin(), mistag.getMax(), cdf(mistag.getMax())\n retVal = [ lo ]\n for i in xrange(1, config['NMistagCategories']):\n retVal.append(mybisect(binsum *\n float(i) / float(config['NMistagCategories']), cdf, lo, hi))\n retVal.append(hi)\n print 'INFO: suggested mistag category bounds: %s' % str(retVal)\n return retVal", "def relative_position_bucket(relative_position,\n bidirectional: bool = True,\n num_buckets: int = 32,\n max_distance: int = 128):\n ret = 0\n relative_position = -relative_position\n if bidirectional:\n assert num_buckets % 2 == 0, 'When bidirectional is True, the number of buckets must be ' \\\n 'divisible by 2.'\n num_buckets //= 2\n ret = ret + (relative_position < 0).astype(np.int32) * num_buckets\n relative_position = np.abs(relative_position)\n else:\n # Clip all the negative values to 0\n relative_position = np.clip(relative_position, a_min=0, a_max=None)\n # Now, the relative_position is in the range [0, inf)\n\n # Half of the buckets deal with the exact increments,\n # i.e., 0, 1, 2, ..., max_exact - 1, where max_exact = num_buckets // 2\n max_exact = num_buckets // 2\n is_small = relative_position < max_exact\n\n # The other half of the buckets are for logarithmically bigger bins in positions up to\n # max_distance\n val_if_large = max_exact + (\n np.log(relative_position.astype(np.float32) / max_exact)\n / math.log(max_distance / max_exact) * (num_buckets - max_exact)).astype(np.int32)\n val_if_large = np.minimum(val_if_large, num_buckets - 1)\n ret = ret + np.where(is_small, relative_position, val_if_large)\n return ret", "def rangeSample(val, minLim, maxLim):\n\tif val < minLim or val > maxLim:\n\t\tval = randint(minLim, maxLim)\n\treturn val", "def boundary(gap, min_tags_in_window, average):\n\tassert min_tags_in_window >= 1;\n\ttemp = 0;\n\tfor i in range(0, min_tags_in_window): temp += poisson(i, average);\n\ttemp = pow(temp, gap+1); \n\treturn temp*temp; # start & end ", "def create_bins(start, end, n_bins):\n bins = np.linspace(start, end, n_bins)\n return bins", "def random_from_bound(bound):\n if (isinstance(bound, tuple)):\n val = np.random.uniform(low = bound[0], high = bound[1])\n else:\n val = 0.0\n return val", "def estimate_bucket_pipeline(bucket_boundaries, num_samples, safe=True):\n if len(bucket_boundaries) < 2:\n raise ValueError('Bucket boundaries must contain at least 2 values')\n\n batch_step = 8\n\n batch_sizes = []\n for boundary in bucket_boundaries:\n batch_size = num_samples / (boundary - 1)\n batch_size = np.floor(batch_size / batch_step) if safe \\\n else np.round(batch_size / batch_step)\n batch_size *= batch_step\n\n if safe and batch_size < batch_step:\n if len(batch_sizes) < 2:\n raise ValueError('Too few samples per batch')\n\n return bucket_boundaries[:len(batch_sizes) - 1], batch_sizes, bucket_boundaries[len(batch_sizes) - 1]\n\n batch_sizes.append(max(batch_step, batch_size.astype(int)))\n\n return bucket_boundaries[:-1], batch_sizes, bucket_boundaries[-1]", "def binarize(h):\n # 1 (value >= 0.5)\n # 0 (value < 0.5)\n b = F.floor(h + 0.5)\n return b", "def initializeDistribution(self):\n if (self.lowerBoundUsed == False and self.upperBoundUsed == False):\n self._distribution = distribution1D.BasicWeibullDistribution(self.k,self.lambdaVar,self.low)\n self.lowerBound = self.low\n self.upperBound = sys.float_info.max\n else:\n if self.lowerBoundUsed == False:\n self.lowerBound = self.low\n if self.upperBoundUsed == False:\n self.upperBound = sys.float_info.max\n self._distribution = distribution1D.BasicWeibullDistribution(self.k,self.lambdaVar,self.lowerBound,self.upperBound,self.low)", "def define_bin_intervals(unique_distances: np.ndarray) -> pd.IntervalIndex:\n bin_centers: np.ndarray = np.concatenate(([0], unique_distances))\n\n bin_edges: np.ndarray = np.concatenate(\n [\n bin_centers[:-1] + (bin_centers[1:] - bin_centers[:-1]) / 2,\n bin_centers[-1:] + (bin_centers[-1:] - bin_centers[-2:-1]) / 2,\n ]\n )\n\n return pd.IntervalIndex.from_breaks(breaks=bin_edges)", "def histogram_continuous(name,\n data,\n bucket_min=None,\n bucket_max=None,\n bucket_count=DEFAULT_BUCKET_COUNT,\n step=None,\n description=None):\n summary_metadata = metadata.create_summary_metadata(\n display_name=None, description=description)\n summary_scope = (getattr(tf.summary.experimental, 'summary_scope', None)\n or tf.summary.summary_scope)\n with summary_scope(\n name,\n 'histogram_summary',\n values=[data, bucket_min, bucket_max, bucket_count, step]) as (tag,\n _):\n with tf.name_scope('buckets'):\n data = tf.cast(tf.reshape(data, shape=[-1]), tf.float64)\n if bucket_min is None:\n bucket_min = tf.reduce_min(data)\n if bucket_max is None:\n bucket_max = tf.reduce_min(data)\n range_ = bucket_max - bucket_min\n bucket_width = range_ / tf.cast(bucket_count, tf.float64)\n offsets = data - bucket_min\n bucket_indices = tf.cast(\n tf.floor(offsets / bucket_width), dtype=tf.int32)\n clamped_indices = tf.clip_by_value(bucket_indices, 0,\n bucket_count - 1)\n one_hots = tf.one_hot(clamped_indices, depth=bucket_count)\n bucket_counts = tf.cast(\n tf.reduce_sum(input_tensor=one_hots, axis=0), dtype=tf.float64)\n edges = tf.linspace(bucket_min, bucket_max, bucket_count + 1)\n edges = tf.concat([edges[:-1], [tf.cast(bucket_max, tf.float64)]],\n 0)\n edges = tf.cast(edges, tf.float64)\n left_edges = edges[:-1]\n right_edges = edges[1:]\n tensor = tf.transpose(\n a=tf.stack([left_edges, right_edges, bucket_counts]))\n return tf.summary.write(\n tag=tag, tensor=tensor, step=step, metadata=summary_metadata)", "def create_bin_boundaries(config, epoch_df, data_type, obs_per_bin, verbose=False):\n \n edges = create_edges_set(config, epoch_df, data_type)\n \n boundaries = []\n for edge in edges:\n start, end, freq = edge\n bin_size = freq * obs_per_bin\n boundaries.append(np.arange(start, end, bin_size))\n boundaries = np.concatenate(boundaries)\n \n return boundaries", "def histogramdd(sample, bins=10, range=None, weights=None, density=False):\n if isinstance(sample, cupy.ndarray):\n # Sample is an ND-array.\n if sample.ndim == 1:\n sample = sample[:, cupy.newaxis]\n nsamples, ndim = sample.shape\n else:\n sample = cupy.stack(sample, axis=-1)\n nsamples, ndim = sample.shape\n\n nbin = numpy.empty(ndim, int)\n edges = ndim * [None]\n dedges = ndim * [None]\n if weights is not None:\n weights = cupy.asarray(weights)\n\n try:\n nbins = len(bins)\n if nbins != ndim:\n raise ValueError(\n 'The dimension of bins must be equal to the dimension of the '\n ' sample x.'\n )\n except TypeError:\n # bins is an integer\n bins = ndim * [bins]\n\n # normalize the range argument\n if range is None:\n range = (None,) * ndim\n elif len(range) != ndim:\n raise ValueError('range argument must have one entry per dimension')\n\n # Create edge arrays\n for i in _range(ndim):\n if cupy.ndim(bins[i]) == 0:\n if bins[i] < 1:\n raise ValueError(\n '`bins[{}]` must be positive, when an integer'.format(i)\n )\n smin, smax = _get_outer_edges(sample[:, i], range[i])\n num = int(bins[i] + 1) # synchronize!\n edges[i] = cupy.linspace(smin, smax, num)\n elif cupy.ndim(bins[i]) == 1:\n if not isinstance(bins[i], cupy.ndarray):\n raise ValueError('array-like bins not supported')\n edges[i] = bins[i]\n if (edges[i][:-1] > edges[i][1:]).any(): # synchronize!\n raise ValueError(\n '`bins[{}]` must be monotonically increasing, when an '\n 'array'.format(i)\n )\n else:\n raise ValueError(\n '`bins[{}]` must be a scalar or 1d array'.format(i)\n )\n\n nbin[i] = len(edges[i]) + 1 # includes an outlier on each end\n dedges[i] = cupy.diff(edges[i])\n\n # Compute the bin number each sample falls into.\n ncount = tuple(\n # avoid cupy.digitize to work around NumPy issue gh-11022\n cupy.searchsorted(edges[i], sample[:, i], side='right')\n for i in _range(ndim)\n )\n\n # Using digitize, values that fall on an edge are put in the right bin.\n # For the rightmost bin, we want values equal to the right edge to be\n # counted in the last bin, and not as an outlier.\n for i in _range(ndim):\n # Find which points are on the rightmost edge.\n on_edge = sample[:, i] == edges[i][-1]\n # Shift these points one bin to the left.\n ncount[i][on_edge] -= 1\n\n # Compute the sample indices in the flattened histogram matrix.\n # This raises an error if the array is too large.\n xy = cupy.ravel_multi_index(ncount, nbin)\n\n # Compute the number of repetitions in xy and assign it to the\n # flattened histmat.\n hist = cupy.bincount(xy, weights, minlength=numpy.prod(nbin))\n\n # Shape into a proper matrix\n hist = hist.reshape(nbin)\n\n # This preserves the (bad) behavior observed in NumPy gh-7845, for now.\n hist = hist.astype(float) # Note: NumPy uses casting='safe' here too\n\n # Remove outliers (indices 0 and -1 for each dimension).\n core = ndim * (slice(1, -1),)\n hist = hist[core]\n\n if density:\n # calculate the probability density function\n s = hist.sum()\n for i in _range(ndim):\n shape = [1] * ndim\n shape[i] = nbin[i] - 2\n hist = hist / dedges[i].reshape(shape)\n hist /= s\n\n if any(hist.shape != numpy.asarray(nbin) - 2):\n raise RuntimeError('Internal Shape Error')\n return hist, edges", "def user_defined_bin_classification(\n input_df,\n field_name,\n bin_values,\n cmap_diverging=None,\n cmap_sequential=None\n\n ):\n # Check if largest value is large than last bin\n max_real_value = float(input_df[field_name].max())\n min_real_value = float(input_df[field_name].min())\n\n if max_real_value > 0 and min_real_value < 0:\n min_max_plot = True\n else:\n min_max_plot = False\n\n if not min_max_plot:\n\n # If only minus values\n if max_real_value < 0: #only min values\n if min_real_value > bin_values[0]:\n # add \"higher as bin\"\n bin_values.insert(0, min_real_value)\n elif bin_values[0] < min_real_value:\n crit_append_val = False\n raise Exception(\"The minimum user defined bin smaller is larger than minimum existing value\")\n\n if not cmap_sequential:\n cmap, cmap_rgb_colors = norm_cmap(bin_values[:1], cmap='Purples') #'YlOrBr'\n else:\n cmap, cmap_rgb_colors = norm_cmap(bin_values[:1], cmap=cmap_sequential) #'YlOrBr'\n\n else: #only positive values\n if max_real_value > bin_values[-1]:\n # add \"higher as bin\"\n bin_values.append(max_real_value)\n elif bin_values[-1] > max_real_value:\n raise Exception(\"The maximum user defined bin value is larger than maximum min: min: {} max: {}\".format(bin_values[-1], max_real_value))\n if not cmap_sequential:\n cmap, cmap_rgb_colors = norm_cmap(bin_values[1:], cmap='Purples')\n else:\n cmap, cmap_rgb_colors = norm_cmap(bin_values[1:], cmap=cmap_sequential)\n\n # e.g. [0, 3, 6] --> generates (0, 3], and (3, 6] bin\n input_df['bin_color'] = pd.cut(\n input_df[field_name],\n bin_values,\n include_lowest=True,\n right=True,\n labels=cmap_rgb_colors)\n\n color_zero = 'grey' # default\n else:\n\n if max_real_value < bin_values[-1]:\n raise Exception(\"The maximum user defined bin value is larger than maximum value {} {}\".format(bin_values[-1], max_real_value))\n elif min_real_value > bin_values[0]:\n raise Exception(\"The minimum user defined bin smaller is larger than minimum existing value\")\n else:\n pass\n\n # Add minimum and maximum value\n bin_values.append(max_real_value)\n bin_values.insert(0, min_real_value)\n\n if not cmap_diverging:\n cmap, cmap_rgb_colors = norm_cmap(bin_values, cmap='coolwarm')\n else:\n cmap, cmap_rgb_colors = norm_cmap(bin_values, cmap=cmap_diverging)\n\n # Reclassify zero value\n positive_bin_colors = []\n minus_bin_colors = []\n minus_bins = []\n positive_bins = [0]\n\n for cnt, i in enumerate(bin_values):\n if i < 0:\n minus_bin_colors.append(cmap_rgb_colors[cnt])\n minus_bins.append(i)\n elif i == 0:\n color_zero = cmap_rgb_colors[cnt]\n else:\n positive_bin_colors.append(cmap_rgb_colors[cnt])\n positive_bins.append(i)\n minus_bins.append(0)\n\n # ----\n # Classify\n # ----\n # Classify values in dataframe and assign color value as \"bin\" column\n minus_dataframe = input_df[field_name][input_df[field_name] < 0].to_frame()\n zero_dataframe = input_df[field_name][input_df[field_name] == 0].to_frame()\n plus_dataframe = input_df[field_name][input_df[field_name] > 0].to_frame()\n\n # e.g. [0, 3, 6] --> generates (0, 3], and (3, 6] bin\n minus_dataframe['bin_color'] = pd.cut(\n minus_dataframe[field_name],\n minus_bins,\n include_lowest=True,\n right=True,\n labels=minus_bin_colors)\n zero_dataframe['bin_color'] = [color_zero for _ in range(len(zero_dataframe))] #create list with zero color\n plus_dataframe['bin_color'] = pd.cut(\n plus_dataframe[field_name],\n positive_bins,\n include_lowest=True,\n right=True,\n labels=positive_bin_colors)\n \n # Add bins\n input_df = minus_dataframe.append(zero_dataframe)\n input_df = input_df.append(plus_dataframe)\n\n return input_df, cmap_rgb_colors, color_zero, min_real_value, max_real_value\n '''ax = input_df.plot()\n\n # Calculate color values\n\n #uk_gdf[uk_gdf['name'] == 'E06000024'].plot(ax=ax, facecolor='green', edgecolor='black')\n #uk_gdf[uk_gdf['diff_av_max'] < 0.01].plot(ax=ax, facecolor='blue', edgecolor='black')\n # Convert dict to dataframe\n #df = pd.DataFrame.from_dict(input_df, orient='index')\n\n #df['Coordinates'] = list(zip(df.longitude, df.latitude))\n #df['Coordinates'] = df['Coordinates'].apply(Point)\n\n # Load uk shapefile\n uk_shapefile = gpd.read_file(path_shapefile)\n\n # Assign correct projection\n crs = {'init': 'epsg:27700'} #27700 == OSGB_1936_British_National_Grid\n uk_gdf = gpd.GeoDataFrame(uk_shapefile, crs=crs)\n\n # Transform\n uk_gdf = uk_gdf.to_crs({'init' :'epsg:4326'})\n\n # Plot\n ax = uk_gdf.plot(color='white', edgecolor='black')\n\n # print coordinates\n #world.plot(column='gdp_per_cap', cmap='OrRd', scheme='quantiles');\n\n plt.savefig(fig_path)'''", "def histogram(data,binwidth, xmin,xmax):\n bins = arange(xmin,xmax, binwidth)\n binsc = bins + (0.5 * binwidth)\n try: #FIXME: use updated numpy.histogram\n histo = numpyhisto(data, bins, new=False)[0]\n except:\n histo = numpyhisto(data, bins)[0]\n return binsc[:len(histo)], histo", "def weightHistogram(self, min=None, max=None, nbins=10):\n raise NotImplementedError", "def bins(self, value):\n self.num_bins = int(value)", "def bucketize(point, bucket_size):\r\n return bucket_size * math.floor(point / bucket_size)", "def discrete_uniform_sampler(upper_value):\n return int(np.random.random() * upper_value)", "def histogram_discrete(name,\n data,\n bucket_min,\n bucket_max,\n step=None,\n description=None):\n summary_metadata = metadata.create_summary_metadata(\n display_name=None, description=description)\n summary_scope = (getattr(tf.summary.experimental, 'summary_scope', None)\n or tf.summary.summary_scope)\n with summary_scope(\n name, 'histogram_summary',\n values=[data, bucket_min, bucket_max, step]) as (tag, _):\n with tf.name_scope('buckets'):\n bucket_count = bucket_max - bucket_min + 1\n data = data - bucket_min\n one_hots = tf.one_hot(\n tf.reshape(data, shape=[-1]), depth=bucket_count)\n bucket_counts = tf.cast(\n tf.reduce_sum(input_tensor=one_hots, axis=0), tf.float64)\n edge = tf.cast(tf.range(bucket_count), tf.float64)\n # histogram can not draw when left_edge == right_edge\n left_edge = edge - 1e-12\n right_edge = edge + 1e-12\n tensor = tf.transpose(\n a=tf.stack([left_edge, right_edge, bucket_counts]))\n\n return tf.summary.write(\n tag=tag, tensor=tensor, step=step, metadata=summary_metadata)", "def bucketize(signal, windowsize, overlap):\n bucket_count = len(signal) / (windowsize - overlap) -1\n buckets = numpy.zeros((bucket_count, windowsize/2 + 1))\n hamming = numpy.hamming(windowsize)\n\n step = windowsize - overlap\n for i in xrange(bucket_count):\n start = i * step\n windowed = emphasis(signal[start:start+windowsize]) * hamming\n buckets[i] = numpy.abs(scipy.fftpack.fft(windowed)[:windowsize/2 +1])\n\n return buckets", "def create_bin_values(self):\n values = [-float(\"inf\"), self.offset, float(\"inf\")]\n value = self.start\n while self.offset + value <= self.stop:\n values.insert(1, self.offset - value)\n values.insert(-1, self.offset + value)\n value *= self.step\n return values", "def binaryBoundedOneSample(self, low=0., hi=+1., size=1):\n\n thisSample = np.random.uniform(low=low, high=hi, size=size)\n \n return thisSample", "def binning_axis(self) -> int:\r\n return 0", "def unique_binning(t):\n diff= np.unique(t)\n diff= diff[1:] - diff[:-1]\n diff = np.min(diff)/2\n return np.digitize(t, np.hstack([np.unique(t) + diff]))", "def to_bins(reward, num_bins, out=None):\n if out is None:\n out = torch.zeros(num_bins, device=reward.device, dtype=reward.dtype)\n reward = max(0, min(reward.item(), 1))\n ind = math.floor(reward*(num_bins-1))\n out[ind] = 1\n return out", "def createBinsByGiniIndex(self, data, structure, colIndex, numOfBins):\n splits = self.miningCalculator.getListWithBestValueSplitsOfDataByGini(data, structure, colIndex, numOfBins - 1)\n splits.sort()\n bins = {\"value<=\" + str(splits[0]): lambda x: x <= splits[0]}\n if len(splits) > 1:\n for i in range(1, numOfBins - 1):\n bins[str(splits[i - 1]) + '<value<=' + str(splits[i])] = (lambda x: splits[i - 1] < x <= splits[i])\n bins[\"value>\" + str(splits[len(splits) - 1])] = (lambda x: x > splits[len(splits) - 1])\n return bins", "def binning ( self , axis , name = '' ) :\n assert isinstance ( axis , ROOT.TAxis ),\\\n 'Invalid axis %s/%s' % ( axis , type ( axis ) )\n\n ## uniform binning?\n if not axis.IsVariableBinSize() : \n return ROOT.RooFit.Binning ( axis.GetNbins() , axis.GetXmin() , axis.GetXmax() )\n ##\n xbins = axis.GetXbins().GetArray()\n rb = ROOT.RooBinning ( axis.GetNbins() , xbins , name )\n ##\n self.aux_keep.append ( rb )\n ##\n return ROOT.RooFit.Binning ( rb )", "def slice_sample_bounded_max(N, burn, logdist, xx, widths, step_out, max_attempts, bounds):\n xx = copy.deepcopy(xx)\n D = len(xx)\n samples = []\n if (not isinstance(widths, list)) or len(widths) == 1:\n widths = np.ones(D) * widths\n\n log_Px = logdist(xx)\n\n for ii in range(N + burn):\n log_uprime = np.log(random.random()) + log_Px\n for dd in random.sample(range(D), D):\n x_l = copy.deepcopy(xx)\n x_r = copy.deepcopy(xx)\n xprime = copy.deepcopy(xx)\n\n # Create a horizontal interval (x_l, x_r) enclosing xx\n rr = random.random()\n x_l[dd] = max(xx[dd] - rr*widths[dd], bounds[dd][0])\n x_r[dd] = min(xx[dd] + (1-rr)*widths[dd], bounds[dd][1])\n\n if step_out:\n while logdist(x_l) > log_uprime and x_l[dd] > bounds[dd][0]:\n\n x_l[dd] = max(x_l[dd] - widths[dd], bounds[dd][0])\n while logdist(x_r) > log_uprime and x_r[dd] < bounds[dd][1]:\n x_r[dd] = min(x_r[dd] + widths[dd], bounds[dd][1])\n\n # Propose xprimes and shrink interval until good one found\n zz = 0\n num_attempts = 0\n while True:\n zz += 1\n # print(x_l)\n xprime[dd] = random.random()*(x_r[dd] - x_l[dd]) + x_l[dd]\n \n log_Px = logdist(xx)\n if log_Px > log_uprime:\n xx[dd] = xprime[dd]\n break\n else:\n # Shrink in\n num_attempts += 1\n if num_attempts >= max_attempts:\n # print('Failed to find something')\n break\n elif xprime[dd] > xx[dd]:\n x_r[dd] = xprime[dd]\n elif xprime[dd] < xx[dd]:\n x_l[dd] = xprime[dd]\n else:\n raise Exception('Slice sampling failed to find an acceptable point')\n # Record samples\n if ii >= burn:\n samples.append(copy.deepcopy(xx))\n return samples", "def uniform_sample(upper, num):\n sample = []\n for i in range(num):\n value = random.randint(0, upper - 1)\n sample.append(value)\n return sample", "def generate_histogram(data, buckets):\n if not data:\n return {}\n\n minimum = min(data)\n maximum = max(data)\n if minimum == maximum:\n return {data[0]: len(data)}\n\n buckets = min(len(data), buckets)\n bucket_size = (maximum-minimum)/buckets\n out = dict((i, 0) for i in range(buckets))\n for i in data:\n out[min(int((i-minimum)/bucket_size), buckets-1)] += 1\n return dict(((k*bucket_size)+minimum, v) for k, v in out.items())", "def add_sample(self, value):\n index = None\n # Find the index of the bucket for this value, which is going to be first bucket range greater than the value.\n for i in range(0, len(self.__bucket_ranges)):\n if self.__bucket_ranges[i] > value:\n index = i\n break\n\n # Increment that histogram bucket count.\n if index is not None:\n self.__counts[index] += 1\n else:\n # Otherwise, the value must have been greater than our last boundary, so increment the overflow\n self.__overflow += 1\n\n if self.__min is None or value < self.__min:\n self.__min = value\n\n if self.__max is None or value > self.__max:\n self.__max = value\n\n self.__total_count += 1\n self.__total_values += value", "def bin_absorbers(dat, bin_attr='z', binsize=None, bin_start=None,\n bin_end=None, bins=None):\n output = []\n if not bins: # if no custom bins specified, then take equallly spaced bins\n bins = np.arange(bin_start, bin_end, binsize)\n bins = list(zip(bins, bins + binsize))\n\n for b in bins:\n output.append(\n [item for item in dat if b[0] <= getattr(item, bin_attr) < b[1]]\n )\n return output", "def createBinsByEqualDepth(self, data, colIndex, numOfBins):\n colData = list(map(lambda x: float(x[colIndex]), data))\n Depth, splittedData, index = int(((len(colData) / numOfBins) + 1)), [], 0\n for i in range(0, numOfBins):\n splittedData, index, Depth = splittedData + [colData[index:Depth]], Depth, Depth + Depth\n bins = {\"value<=\" + str(max(splittedData[0])): lambda x: x <= max(splittedData[0])}\n index = 1\n while index < numOfBins-1:\n if max(splittedData[index-1]) != max(splittedData[index]):\n val1 = max(splittedData[index-1])\n val2 = max(splittedData[index])\n bins[str(val1) + '<value<=' + str(val2)] = (lambda x: val1 < x <= val2)\n index += 1\n bins[\"value>\" + str(max(splittedData[index-1]))] = (lambda x: x > max(splittedData[index-1]))\n return bins", "def bucketize(point, bucket_size):\n return bucket_size * math.floor(point / bucket_size)", "def bucketize(point, bucket_size):\n return bucket_size * math.floor(point / bucket_size)", "def bins(self):\n\n if self.hist_x_min is None or self.hist_x_max is None or self.hist_n_bin is None:\n return None\n\n if self.x_log:\n return np.logspace(np.log10(self.hist_x_min),\n np.log10(self.hist_x_max),\n self.hist_n_bin + 1)\n else:\n return np.linspace(self.hist_x_min, self.hist_x_max,\n self.hist_n_bin + 1)", "def grid_to_bins(grid, start_bin_val, end_bin_val):\n bin_centers = (grid[1:] + grid[:-1])/2.0\n bins = np.concatenate([[start_bin_val], bin_centers, [end_bin_val]])\n return bins", "def _uniform(val_range):\r\n return np.random.uniform(val_range[0], val_range[1])", "def midpoints(min_r_value, max_r_value, bin_size):\n min_r = min_r_value + 0.5 * bin_size\n max_r = max_r_value - 0.5 * bin_size\n return np.arange(min_r, max_r, bin_size)", "def get_bins(data) :\n\tbins=np.unique(data)\n\treturn np.append(bins[~np.isnan(bins)], max(bins) +1)", "def get_histogram(self):\n\n for bin in range(self.bins.size):\n bin_inf = self.bins[bin]\n try: bin_sup = self.bins[bin + 1]\n except IndexError: bin_sup = self.vmax\n self.hist[bin] = np.sum(\n (self.values >= bin_inf)*(self.values < bin_sup))\n\n binned_values = np.sum(self.hist)\n if binned_values == 0: return self.hist # no binned value\n else: self.hist /= np.sum(self.hist)\n return self.hist", "def get_zenith_bins(zenith_bin_width=10):\n # Create ZenithBin namedtuple\n zenith_field_names = ['zenith_min',\n 'zenith_max',\n 'zenith_bins',\n 'zenith_midpoints',\n 'zenith_bin_widths',\n ]\n ZenithBin = namedtuple('ZenithBins', zenith_field_names)\n\n # Define zenith range for this analysis\n zenith_min = 0\n zenith_max = 30\n zenith_bins = np.arange(zenith_min,\n zenith_max + zenith_bin_width,\n zenith_bin_width)\n\n zenith_bin_widths = zenith_bins[1:] - zenith_bins[:-1]\n zenith_midpoints = (zenith_bins[1:] + zenith_bins[:-1]) / 2\n\n # Create instance of ZenithBin with appropriate binning\n zenithbins = ZenithBin(zenith_min=zenith_min,\n zenith_max=zenith_max,\n zenith_bins=zenith_bins,\n zenith_midpoints=zenith_midpoints,\n zenith_bin_widths=zenith_bin_widths)\n\n return zenithbins", "def _get_optimal_threshold(arr, num_bins=1001, num_quantized_bins=255):\n if not isinstance(arr, np.ndarray):\n raise TypeError('get_optimal_threshold only supports input type of np.ndarray,'\n ' while received type=%s' % (str(type(arr))))\n min_val = np.min(arr)\n max_val = np.max(arr)\n th = max(abs(min_val), abs(max_val))\n\n hist, hist_edges = np.histogram(arr, bins=num_bins, range=(-th, th))\n zero_bin_idx = num_bins // 2\n num_half_quantized_bins = num_quantized_bins // 2\n assert np.allclose(hist_edges[zero_bin_idx] + hist_edges[zero_bin_idx + 1],\n 0, rtol=1e-5, atol=1e-7)\n\n thresholds = np.zeros(num_bins // 2 + 1 - num_quantized_bins // 2)\n divergence = np.zeros_like(thresholds)\n quantized_bins = np.zeros(num_quantized_bins, dtype=np.int32)\n # i means the number of bins on half axis excluding the zero bin.\n for i in range(num_quantized_bins // 2,\n num_bins // 2 + 1):\n p_bin_idx_start = zero_bin_idx - i\n p_bin_idx_stop = zero_bin_idx + i + 1\n thresholds[i - num_half_quantized_bins] = hist_edges[p_bin_idx_stop]\n sliced_nd_hist = hist[p_bin_idx_start:p_bin_idx_stop]\n\n # generate reference distribution p\n p = sliced_nd_hist.copy()\n assert p.size % 2 == 1\n assert p.size >= num_quantized_bins\n # put left outlier count in p[0]\n left_outlier_count = np.sum(hist[0:p_bin_idx_start])\n p[0] += left_outlier_count\n # put right outlier count in p[-1]\n right_outlier_count = np.sum(hist[p_bin_idx_stop:])\n p[-1] += right_outlier_count\n # is_nonzeros[k] indicates whether hist[k] is nonzero\n is_nonzeros = (sliced_nd_hist != 0).astype(np.int32)\n\n # calculate how many bins should be merged to generate quantized distribution q\n num_merged_bins = p.size // num_quantized_bins\n # merge hist into num_quantized_bins bins\n for j in range(num_quantized_bins):\n start = j * num_merged_bins\n stop = start + num_merged_bins\n quantized_bins[j] = sliced_nd_hist[start:stop].sum()\n quantized_bins[-1] += sliced_nd_hist[num_quantized_bins * num_merged_bins:].sum()\n # expand quantized_bins into p.size bins\n q = np.zeros(p.size, dtype=np.float32)\n for j in range(num_quantized_bins):\n start = j * num_merged_bins\n if j == num_quantized_bins - 1:\n stop = -1\n else:\n stop = start + num_merged_bins\n norm = is_nonzeros[start:stop].sum()\n if norm != 0:\n q[start:stop] = float(quantized_bins[j]) / float(norm)\n q[sliced_nd_hist == 0] = 0\n p = _smooth_distribution(p)\n # There is a chance that q is an invalid probability distribution.\n try:\n q = _smooth_distribution(q)\n except ValueError:\n divergence[i - num_half_quantized_bins] = float(\"inf\")\n else:\n divergence[i - num_half_quantized_bins] = stats.entropy(p, q)\n quantized_bins[:] = 0\n\n min_divergence_idx = np.argmin(divergence)\n min_divergence = divergence[min_divergence_idx]\n opt_th = thresholds[min_divergence_idx]\n return min_val, max_val, min_divergence, opt_th", "def bin_intervals(intervals, bins, interval_range=None, smoothing_window=None,\n nan_replacement=None, zero_to_nan=False):\n if intervals is None:\n return []\n\n intervals = np.array(list(intervals))\n\n if interval_range is None:\n try:\n interval_range = (min(intervals[:, 0]), max(intervals[:, 1]))\n except (IndexError, TypeError):\n raise ValueError(\"intervals cannot be None or length 0 if not providing interval_range!\")\n\n bin_size = (interval_range[1] - interval_range[0] + 1) / bins\n logger.debug(\"Bin size: {}\".format(bin_size))\n\n return RegionBased._bin_intervals_equidist(intervals, bin_size, interval_range, bins=bins,\n smoothing_window=smoothing_window,\n nan_replacement=nan_replacement,\n zero_to_nan=zero_to_nan)", "def getBinIndex(self, x):\n\t\tb = -1\n\t\tif x == self._max_val: # final bin is [low, high], where others are [low,high)\n\t\t\tb = len(self._bins)-1\n\t\telse:\n\t\t\tb = math.floor((x-self._min_val)/self._bin_width)\n\t\treturn int(b)", "def sample_from_boundary(bounds_dict):\n area = 0 if np.random.random() < 0.5 else 1 #area 0 is half the center table, area 1 is the whole right table\n\n x = np.random.random() * bounds_dict['x_r'][area] + min(bounds_dict['x'][area])\n y = np.random.random() * bounds_dict['y_r'][area] + min(bounds_dict['y'][area])\n\n z = np.random.random() * 0.15 + 0.24\n return([x, y, z])", "def test_nonmonotonic_bins(self):\n\n with pytest.raises(ValueError) as verr:\n avg.median2D(self.testInst, np.array([0., 300., 100.]), 'longitude',\n np.array([0., 24., 13.]), 'mlt',\n ['dummy1', 'dummy2', 'dummy3'], auto_bin=False)\n\n estr = 'bins must be monotonically increasing or decreasing'\n assert str(verr).find(estr) >= 0\n\n return", "def uniform_weight_init(\n input_size: int,\n output_size: int,\n min_bounds: float = 0.0,\n max_bounds: float = 1.0,\n positive_ratio: Optional[float] = None,\n) -> t.Tensor:\n if input_size < 1:\n raise ValueError(\"input_size must be a positive integer.\")\n if output_size < 1:\n raise ValueError(\"output_size must be a positive integer.\")\n if min_bounds > max_bounds:\n raise ValueError(\"min_bounds must not be greater than max_bounds.\")\n if positive_ratio is not None:\n if positive_ratio > 1 or 0 > positive_ratio:\n raise ValueError(\n \"positive_ratio must be None, or must be between zero and one.\"\n )\n\n result = t.empty((input_size, output_size))\n uniform_(result, a=min_bounds, b=max_bounds)\n\n # TODO: test this.\n if positive_ratio is not None:\n bernoulli_distribution = Bernoulli(t.tensor([positive_ratio]))\n mask = bernoulli_distribution.sample((input_size, output_size)).squeeze().bool()\n result.abs_()\n result = result.where(mask, -result)\n\n return result", "def window_standardize(img, lower_bound, upper_bound):\n img = np.clip(img, lower_bound, upper_bound)\n # x=x*2-1: map x to [-1,1]\n img = 2 * (img - lower_bound) / (upper_bound - lower_bound) - 1\n return img", "def test_num_bins(self):\n with Pandas() as pd:\n if pd is None:\n return\n with Numpy() as np: # noqa\n if numpy is None:\n return\n sys.stderr.write(\"\\n\")\n\n df1 = pd.DataFrame({'A': [0, 2, 4, 5, 7, 9, 11, 13, 13, 15]})\n df2 = pd.DataFrame({'A': [2, 4, 4, 6, 8, 7, 10, 14, 17, 19]})\n\n # building 1d-, 2d-, and 3d-histogram (iteratively)\n hist2 = hg.SparselyBin(origin=0.0, binWidth=1.0, quantity=unit('A'))\n hist3 = hg.SparselyBin(origin=0.0, binWidth=1.0, quantity=unit('A'))\n hist4 = hg.Bin(num=20, low=0.0, high=20., quantity=unit('A'))\n hist5 = hg.Bin(num=20, low=0.0, high=20., quantity=unit('A'))\n hist6 = hg.Bin(num=201, low=0.0, high=1.005)\n\n # fill them\n hist2.fill.numpy(df1)\n hist3.fill.numpy(df2)\n hist4.fill.numpy(df1)\n hist5.fill.numpy(df2)\n\n assert hist2.num_bins() == 16\n assert hist3.num_bins() == 18\n assert hist4.num_bins() == 20\n assert hist5.num_bins() == 20\n assert hist6.num_bins() == 201\n\n assert hist2.num_bins(low=10, high=25) == 15\n assert hist3.num_bins(low=10, high=25) == 15\n assert hist4.num_bins(low=10, high=25) == 10\n assert hist5.num_bins(low=10, high=25) == 10\n assert hist6.num_bins(low=0.2089, high=0.9333) == 146\n\n assert hist2.num_bins(low=-10, high=28) == 38\n assert hist3.num_bins(low=-10, high=28) == 38\n assert hist4.num_bins(low=-10, high=28) == 20\n assert hist5.num_bins(low=-10, high=28) == 20\n assert hist6.num_bins(low=0.205, high=0.935) == 146", "def to_constant_bin_number(d,\n N_bin,\n weight_pos=None,\n key=None,\n lower_bound=None,\n upper_bound=None,\n ):\n\n isdict = isinstance(d,dict)\n\n if not hasattr(d,'__len__'):\n raise TypeError(\"d must be iterable\")\n\n if not isdict and hasattr(d[0], '__len__'):\n if weight_pos is not None:\n key = lambda x: x[weight_pos]\n if key is None:\n raise ValueError(\"Must provide weight_pos or key for tuple list\")\n\n if not isdict and key:\n new_dict = {i: val for i, val in enumerate(d)}\n d = {i: key(val) for i, val in enumerate(d)}\n isdict = True\n is_tuple_list = True\n else:\n is_tuple_list = False\n\n if isdict:\n\n #get keys and values (weights)\n keys_vals = d.items()\n keys = [ k for k, v in keys_vals ]\n vals = [ v for k, v in keys_vals ]\n\n #sort weights decreasingly\n ndcs = revargsort(vals)\n\n weights = get(vals, ndcs)\n keys = get(keys, ndcs)\n\n bins = [ {} for i in range(N_bin) ]\n else:\n weights = sorted(d,key=lambda x: -x)\n bins = [ [] for i in range(N_bin) ]\n\n #find the valid indices\n if lower_bound is not None and upper_bound is not None and lower_bound<upper_bound:\n valid_ndcs = filter(lambda i: lower_bound < weights[i] < upper_bound,range(len(weights)))\n elif lower_bound is not None:\n valid_ndcs = filter(lambda i: lower_bound < weights[i],range(len(weights)))\n elif upper_bound is not None:\n valid_ndcs = filter(lambda i: weights[i] < upper_bound,range(len(weights)))\n elif lower_bound is None and upper_bound is None:\n valid_ndcs = range(len(weights))\n elif lower_bound>=upper_bound:\n raise Exception(\"lower_bound is greater or equal to upper_bound\")\n\n valid_ndcs = list(valid_ndcs)\n\n weights = get(weights, valid_ndcs)\n\n if isdict:\n keys = get(keys, valid_ndcs)\n\n #the total volume is the sum of all weights\n V_total = sum(weights)\n\n #the first estimate of the maximum bin volume is \n #the total volume divided to all bins\n V_bin_max = V_total / float(N_bin)\n\n #prepare array containing the current weight of the bins\n weight_sum = [0. for n in range(N_bin) ]\n\n #iterate through the weight list, starting with heaviest\n for item, weight in enumerate(weights):\n\n if isdict:\n key = keys[item]\n\n #put next value in bin with lowest weight sum\n b = argmin(weight_sum)\n\n #calculate new weight of this bin\n new_weight_sum = weight_sum[b] + weight\n\n found_bin = False\n while not found_bin:\n\n #if this weight fits in the bin\n if new_weight_sum <= V_bin_max:\n\n #...put it in \n if isdict:\n bins[b][key] = weight\n else:\n bins[b].append(weight)\n\n #increase weight sum of the bin and continue with\n #next item \n weight_sum[b] = new_weight_sum\n found_bin = True\n\n else:\n #if not, increase the max volume by the sum of\n #the rest of the bins per bin\n V_bin_max += sum(weights[item:]) / float(N_bin)\n\n if not is_tuple_list:\n return bins\n else:\n new_bins = []\n for b in range(N_bin):\n new_bins.append([])\n for key in bins[b]:\n new_bins[b].append(new_dict[key])\n return new_bins", "def findSignificantBins(return_bins):\n \n count = 0\n range_even = []\n range_odd = []\n for i in xrange(1,len(return_bins['my_list'])):\n value_range = (return_bins['my_list'][i] - return_bins['my_list'][(i-1)])\n count = count + 1\n if (count % 2 == 0) :\n range_even.append(value_range)\n else: \n range_odd.append(value_range)\n\n print \"This is the even {}\".format(range_even)\n print \"This is the odd {}\".format(range_odd)\n if (return_bins.get('start_value') == 0) : \n return range_even\n else:\n return range_odd", "def test_histogram_with_varying_number_of_bin(self):\n # this data use number of bins less than the max limit\n df1 = pd.Series([1, 2, 3, 4]).apply(str)\n profiler1 = FloatColumn(df1.name)\n profiler1.max_histogram_bin = 50\n profiler1.update(df1)\n num_bins = len(profiler1.profile['histogram']['bin_counts'])\n self.assertEqual(num_bins, 4)\n\n # this data uses large number of bins, which will be set to\n # the max limit\n df2 = pd.Series([3.195103249264023e+18, 9999995.0, 9999999.0,\n 0.0, -10 ** 10]).apply(str)\n profiler2 = FloatColumn(df2.name)\n profiler2.max_histogram_bin = 50\n profiler2.update(df2)\n num_bins = len(profiler2.profile['histogram']['bin_counts'])\n self.assertEqual(num_bins, 50)\n\n # max number of bin is increased to 10000\n profiler2 = FloatColumn(df2.name)\n profiler2.max_histogram_bin = 10000\n profiler2.update(df2)\n num_bins = len(profiler2.profile['histogram']['bin_counts'])\n self.assertEqual(num_bins, 10000)", "def bucket_intervals(alpha, bucket_sizes, sigma):\n heads = array(\"l\", [0] * sigma)\n tails = array(\"l\", [0] * sigma)\n j = 0\n for i in range(len(alpha)):\n heads[alpha[i]] = j\n j += bucket_sizes[alpha[i]]\n tails[alpha[i]] = j - 1\n\n # print_buckets(heads)\n # print_buckets(tails)\n return heads, tails", "def __call__(self, n_bins, segment, elements):\n\n # n_bins\n assert type(n_bins) is int\n assert n_bins > 0\n\n # segment\n assert type(segment) is list or type(segment) is tuple\n assert len(segment) == 2\n assert np.isscalar(segment[0]) and np.isscalar(segment[1])\n assert segment[0] < segment[1]\n\n # elements\n assert type(elements) is np.ndarray, f\"elements should be an np.ndarray, instead of {type(elements)}\"\n assert elements.dtype == np.number\n\n sorted_elements = np.sort(elements)\n\n bin_card = int(floor(elements.shape[0]/n_bins))\n\n bin_boundaries = [segment[0]]\n\n for i in range(1, n_bins):\n boundary_l = sorted_elements[i*bin_card - 1]\n boundary_r = sorted_elements[i * bin_card]\n boundary = (boundary_l+boundary_r)/2\n\n bin_boundaries.append(boundary)\n\n bin_boundaries.append(segment[1])\n\n return np.array(bin_boundaries)", "def init_buckets(len2freq):\n source = Counter(len2freq)\n\n if not len(source):\n raise ValueError('Empty length-to-frequency map')\n\n if not all(map(lambda x: isinstance(x, int), source.keys())):\n raise ValueError('Keys of length-to-frequency must be integers')\n\n if not all(map(lambda x: isinstance(x, int), source.values())):\n raise ValueError('Values of length-to-frequency must be integers')\n\n denominator = 8\n lengths = sorted(source.keys())\n\n buckets = []\n for lng in lengths:\n b = int(np.ceil(lng / denominator)) * denominator + 1\n if not len(buckets) or buckets[-1][0] != b:\n buckets.append((b, {}))\n buckets[-1][1][lng] = source[lng]\n\n return buckets", "def hist_bins(bins, vals):\r\n\r\n hist = zeros(len(bins))\r\n j = 0\r\n for i in vals:\r\n while bins[j] < i:\r\n j += 1\r\n hist[j] += 1\r\n\r\n return asarray(bins), hist", "def test_bin_width(self):\n with Pandas() as pd:\n if pd is None:\n return\n with Numpy() as np: # noqa\n if numpy is None:\n return\n sys.stderr.write(\"\\n\")\n\n df1 = pd.DataFrame({'A': [0, 1, 2, 3, 4, 3, 2, 1, 1, 1]})\n\n # building test histograms\n hist2 = hg.SparselyBin(origin=0.0, binWidth=1.0, quantity=unit('A'))\n hist3 = hg.SparselyBin(origin=0.0, binWidth=1.0, quantity=unit('A'))\n hist4 = hg.Bin(num=20, low=0.0, high=10., quantity=unit('A'))\n hist5 = hg.Bin(num=20, low=0.0, high=10., quantity=unit('A'))\n\n # fill them\n hist2.fill.numpy(df1)\n hist4.fill.numpy(df1)\n\n assert hist2.bin_width() == 1.0\n assert hist3.bin_width() == 1.0\n assert hist4.bin_width() == 0.5\n assert hist5.bin_width() == 0.5", "def get_histogram(self):\n\n values_array = np.array(self.values)\n for bin0 in range(self.bins[0].size):\n bin_inf0 = self.bins[0][bin0]\n try: bin_sup0 = self.bins[0][bin0 + 1]\n except IndexError: bin_sup0 = self.vmax[0]\n values = values_array[\n (values_array[:, 0] >= bin_inf0)\n *(values_array[:, 0] < bin_sup0)][:, 1]\n for bin1 in range(self.bins[1].size):\n bin_inf1 = self.bins[1][bin1]\n try: bin_sup1 = self.bins[1][bin1 + 1]\n except IndexError: bin_sup1 = self.vmax[1]\n self.hist[bin0*self.Nbins[1] + bin1, 2] = (\n np.sum((values >= bin_inf1)*(values < bin_sup1)))\n\n if np.sum(self.hist[:, 2]) > 0: # there are binned values\n self.hist[:, 2] /= np.sum(self.hist[:, 2])\n return self.hist" ]
[ "0.6999874", "0.6858696", "0.67939454", "0.6425844", "0.64145726", "0.63430184", "0.6245302", "0.6230007", "0.6219566", "0.62046456", "0.618017", "0.61405003", "0.61358374", "0.61317116", "0.6131306", "0.6127178", "0.60743845", "0.6071009", "0.6056603", "0.6021089", "0.598096", "0.59706736", "0.5945507", "0.59254426", "0.5912764", "0.59105533", "0.5893673", "0.5886097", "0.5883621", "0.5874091", "0.58546025", "0.58392495", "0.58376336", "0.58228385", "0.5822395", "0.58152443", "0.58141077", "0.57904196", "0.57540005", "0.574958", "0.57395005", "0.57335556", "0.57257754", "0.57217026", "0.5718705", "0.56946105", "0.56918204", "0.56894994", "0.56755316", "0.5657051", "0.56440055", "0.563542", "0.56231105", "0.5621798", "0.56201506", "0.56066936", "0.55989385", "0.5591447", "0.5590117", "0.55802685", "0.55782163", "0.55672836", "0.55574095", "0.555595", "0.55465263", "0.5545217", "0.5541841", "0.55401295", "0.55370593", "0.55358577", "0.552613", "0.55213255", "0.551516", "0.55127734", "0.5479759", "0.5479759", "0.54794174", "0.5477585", "0.54763526", "0.5471263", "0.54683185", "0.5436443", "0.5435574", "0.54334587", "0.54290766", "0.54179096", "0.5417772", "0.5409831", "0.5407476", "0.5380839", "0.5379667", "0.5378237", "0.537454", "0.53741765", "0.53698176", "0.53588283", "0.5358071", "0.5347258", "0.5345467", "0.5333518" ]
0.62419814
7
Equal frequency bin, take a uniform distribution of the sample size. Buckets include the right boundary, and exclude the left boundary. Namely, boundaries=[0., 1., 2.] generates buckets (inf, 0.], (0., 1.], (1., 2.], and (2., +inf).
def quantile(feature, bins): t = feature.sort_values().values w = round(len(t)/bins) return [t[w*i-1] for i in range(1, bins)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_bins(self):\n min_val = 0\n max_val = 1\n buckets = 10\n values_per_bucket = 10\n\n import numpy\n\n data = list(numpy.linspace(min_val, max_val, buckets * values_per_bucket))\n bins = numpy.linspace(min_val, max_val + sys.float_info.epsilon, buckets + 1)\n digitized = numpy.digitize(data, bins)\n counts = numpy.bincount(digitized)\n self.assertEqual(buckets + 1, len(counts))\n self.assertEqual(0, counts[0])\n for bucket in counts[1:]:\n self.assertEqual(values_per_bucket, bucket)", "def frequency_bucket_floor(bucket_index):\n\tfraction = bucket_index / FREQUENCY_BUCKETS\n\tlog_range = [math.log(edge, 2) for edge in HEARING_RANGE]\n\tlog_floor = log_range[0] + fraction * (log_range[1] - log_range[0])\n\treturn 2 ** log_floor", "def _get_distribution ( bin_size, Flag = 0 ):\n \n # Get the step size\n lower = 0; upper = step = 1/bin_size\n Dist = {}\n \n # make bins\n while upper <= 1:\n Dist[Flag] = [ lower, upper ]\n Flag += 1\n lower = upper\n upper += step\n return Dist", "def all_bucket_boundaries(self):\n\n lower = self._lower_bounds[0]\n for i in xrange(1, self.total_buckets):\n upper = self._lower_bounds[i]\n yield (lower, upper)\n lower = upper\n\n yield (lower, float('Inf'))", "def bucketize(signal, windowsize, overlap):\n bucket_count = len(signal) / (windowsize - overlap) -1\n buckets = numpy.zeros((bucket_count, windowsize/2 + 1))\n hamming = numpy.hamming(windowsize)\n\n step = windowsize - overlap\n for i in xrange(bucket_count):\n start = i * step\n windowed = emphasis(signal[start:start+windowsize]) * hamming\n buckets[i] = numpy.abs(scipy.fftpack.fft(windowed)[:windowsize/2 +1])\n\n return buckets", "def bucket_boundaries(self, bucket):\n\n if bucket < 0 or bucket >= self.total_buckets:\n raise IndexError('bucket %d out of range' % bucket)\n if bucket == self.total_buckets - 1:\n return (self._lower_bounds[bucket], float('Inf'))\n return (self._lower_bounds[bucket], self._lower_bounds[bucket + 1])", "def bucketize(point, bucket_size):\r\n return bucket_size * math.floor(point / bucket_size)", "def FixedWidthBucketer(width, num_finite_buckets=100):\n return Bucketer(width=width, growth_factor=0.0,\n num_finite_buckets=num_finite_buckets)", "def createBins():\n theBins = []\n startFreq = 60\n for a in range(32):\n endFreq = int(startFreq*1.12+12)\n theRange = (startFreq, endFreq)\n startFreq = endFreq\n theBins.append(theRange)\n return(theBins)", "def init_buckets(len2freq):\n source = Counter(len2freq)\n\n if not len(source):\n raise ValueError('Empty length-to-frequency map')\n\n if not all(map(lambda x: isinstance(x, int), source.keys())):\n raise ValueError('Keys of length-to-frequency must be integers')\n\n if not all(map(lambda x: isinstance(x, int), source.values())):\n raise ValueError('Values of length-to-frequency must be integers')\n\n denominator = 8\n lengths = sorted(source.keys())\n\n buckets = []\n for lng in lengths:\n b = int(np.ceil(lng / denominator)) * denominator + 1\n if not len(buckets) or buckets[-1][0] != b:\n buckets.append((b, {}))\n buckets[-1][1][lng] = source[lng]\n\n return buckets", "def test_bins(self):\n\n for filename in ['%s/population_padang_1.asc' % TESTDATA,\n '%s/test_grid.asc' % TESTDATA]:\n\n R = read_layer(filename)\n rmin, rmax = R.get_extrema()\n\n for N in [2, 3, 5, 7, 10, 16]:\n linear_intervals = R.get_bins(N=N, quantiles=False)\n\n assert linear_intervals[0] == rmin\n assert linear_intervals[-1] == rmax\n\n d = (rmax - rmin) / N\n for i in range(N):\n assert numpy.allclose(linear_intervals[i], rmin + i * d)\n\n quantiles = R.get_bins(N=N, quantiles=True)\n A = R.get_data(nan=True).flat[:]\n\n mask = numpy.logical_not(numpy.isnan(A)) # Omit NaN's\n l1 = len(A)\n A = A.compress(mask)\n l2 = len(A)\n\n if filename == '%s/test_grid.asc' % TESTDATA:\n # Check that NaN's were removed\n assert l1 == 35\n assert l2 == 30\n\n # Assert that there are no NaN's\n assert not numpy.alltrue(numpy.isnan(A))\n\n number_of_elements = len(A)\n average_elements_per_bin = number_of_elements / N\n\n # Count elements in each bin and check\n i0 = quantiles[0]\n for i1 in quantiles[1:]:\n count = numpy.sum((i0 < A) & (A < i1))\n if i0 == quantiles[0]:\n refcount = count\n\n if i1 < quantiles[-1]:\n # Number of elements in each bin must vary by no\n # more than 1\n assert abs(count - refcount) <= 1\n assert abs(count - average_elements_per_bin) <= 3\n else:\n # The last bin is allowed vary by more\n pass\n\n i0 = i1", "def bucketize(point, bucket_size):\n return bucket_size * math.floor(point / bucket_size)", "def bucketize(point, bucket_size):\n return bucket_size * math.floor(point / bucket_size)", "def setup_bins(self):\n width = int((self.max - self.min) / self.bin_size)\n bins = {\n i * width + self.min: (idx, idx + self.bin_size)\n for i, idx in enumerate(range(0, len(self.nums), self.bin_size))\n }\n return bins", "def get_bucket_boundaries(feature):\n return np.unique(np.percentile(feature, range(0, 100))).tolist()", "def get_bins(val: List[float]) -> List[float]:\n r_min = np.min(val)\n r_max = np.max(val)\n min_bins = 2\n max_bins = 50\n # Calculate bin width using either Freedman-Diaconis or Sturges estimator\n bin_edges = np.histogram_bin_edges(val, bins=\"auto\")\n if len(bin_edges) < min_bins:\n return list(np.linspace(start=r_min, stop=r_max, num=min_bins))\n elif len(bin_edges) <= max_bins:\n return list(bin_edges)\n # Clamp to max_bins by estimating a good bin range to be more robust to outliers\n q75, q25 = np.percentile(val, [75, 25])\n iqr = q75 - q25\n width = 2 * iqr / max_bins\n start = max((q75 + q25) / 2 - iqr, r_min)\n stop = min(start + max_bins * width, r_max)\n # Take the minimum of range and 2x IQR to account for outliers\n edges = list(np.linspace(start=start, stop=stop, num=max_bins))\n prefix = [r_min] if start > r_min else []\n suffix = [r_max] if stop < r_max else []\n return prefix + edges + suffix", "def gen_buckets(num_buckets, data, max_val=256):\n\n default_size_of_bucket = int(len(data)/3)\n print(f\"Bucket size: {default_size_of_bucket}\")\n all_buckets = []\n for i in range(num_buckets):\n curr_buck = [0 for _ in range(max_val)]\n np.random.shuffle(data)\n curr_sample = data[0:default_size_of_bucket]\n for i in range(len(curr_sample)):\n curr_buck[curr_sample[i]] += 1\n all_buckets.append(curr_buck)\n return all_buckets", "def test_bins(self):\n\n \n for filename in ['data/population_padang_1.asc', \n 'data/test_grid.asc']: \n \n R = read_coverage(filename)\n \n min, max = R.get_extrema() #use_numeric=True)\n \n for N in [2,3,5,7,10,16]:\n linear_intervals = R.get_bins(N=N, quantiles=False) \n \n assert linear_intervals[0] == min\n assert linear_intervals[-1] == max \n \n d = (max-min)/N\n for i in range(N):\n assert numpy.allclose(linear_intervals[i], min + i*d) \n \n \n quantiles = R.get_bins(N=N, quantiles=True)\n\n A = R.get_data(nan=True).flat[:] \n \n mask = numpy.logical_not(numpy.isnan(A)) # Omit NaN's\n l1 = len(A)\n A = A.compress(mask) \n l2 = len(A)\n \n if filename == 'data/test_grid.asc':\n # Check that NaN's were removed\n \n assert l1 == 35\n assert l2 == 30\n \n \n # Assert that there are no NaN's \n assert not numpy.alltrue(numpy.isnan(A))\n \n number_of_elements = len(A)\n average_elements_per_bin = number_of_elements/N\n \n # Count elements in each bin and check\n\n i0 = quantiles[0]\n for i1 in quantiles[1:]:\n count = numpy.sum((i0 < A) & (A < i1))\n if i0 == quantiles[0]:\n refcount = count\n \n \n if i1 < quantiles[-1]:\n # Number of elements in each bin must vary by no more than 1\n assert abs(count - refcount) <= 1 \n assert abs(count - average_elements_per_bin) <= 3\n \n \n else:\n # The last bin is allowed vary by more\n pass\n \n i0 = i1", "def set_bin_(data, bin_num, scale=100.):\n temp_data = -numpy.sort(-numpy.abs(data))\n\n bin_size = len(temp_data) / bin_num * 2\n bins = numpy.array([temp_data[int(i * bin_size)] for i in range(1, int(bin_num / 2))])\n bins = numpy.sort(numpy.append(numpy.append(-bins, [0.]), bins))\n bound = numpy.max(numpy.abs(data)) * scale\n bins = numpy.sort(numpy.append(-bound, numpy.append(bins, bound)))\n return bins", "def _bucket_boundaries(max_length, min_length=8, length_bucket_step=1.1):\n assert length_bucket_step > 1.0\n x = min_length\n boundaries = []\n while x < max_length:\n boundaries.append(x)\n x = max(x + 1, int(x * length_bucket_step))\n return boundaries", "def waste_frac(bucket):\n if not isinstance(bucket, tuple) or len(bucket) not in {0, 2}:\n raise ValueError('Wrong bucket format')\n\n if not len(bucket):\n return 0.0\n\n boundary, len2freq = bucket\n zero_cnt = sum([(boundary - 1 - lng) * f for lng, f in len2freq.items()])\n total_freq = sum([f for _, f in len2freq.items()])\n\n return zero_cnt / (total_freq * (boundary - 1))", "def bucket_dist(g_var, x_var, all_bins, tar_bin, label, df):\n return (\n df.groupby(g_var)[x_var]\n .value_counts(normalize=True, bins=all_bins)\n [:, tar_bin]\n .to_frame()\n .assign(Interval = label)\n )", "def createBinsByEqualWidth(self, data, colIndex, numOfBins):\n colData = list(map(lambda x: float(x[colIndex]), data))\n minVal, maxVal = min(colData), max(colData)\n width = round(((maxVal - minVal) / numOfBins), 3)\n bins = {\"value<=\"+str(width): lambda x: x <= width}\n for i in range(1, numOfBins-1):\n bins[str(width) + '<value<=' + str(width+width)] = (lambda x: width < x <= width + width)\n width = width + width\n bins[\"value>\" + str(width)] = (lambda x: x > width)\n return bins", "def make_buckets(entries, low_bit: BitPos, cap_bit: BitPos) -> \"list[Bucket]\":\n num_bits = cap_bit - low_bit\n assert num_bits > 0\n buckets = [Bucket() for _ in range(0, 2 ** num_bits)]\n mask = (1 << num_bits) - 1\n for (codepoint, width) in entries:\n buckets[(codepoint >> low_bit) & mask].append(codepoint, width)\n return buckets", "def uniform_binning(ts, bins):\n symb = np.asarray(bins * (ts - ts.min()) / (ts.max() - ts.min() + 1e-12), dtype=int)\n return symb", "def _bucket_boundaries(self, max_length, min_length=8, length_bucket_step=1.1):\n assert min_length <= max_length\n assert length_bucket_step > 1.0\n x = min_length\n boundaries = []\n while x < max_length:\n boundaries.append(x)\n x = max(x + 1, int(x * length_bucket_step))\n return boundaries", "def uniform(feature, bins):\n t = (feature.max()-feature.min())/bins\n return [t*i for i in range(1, bins)]", "def generate_binned_values( lower_lim, upper_lim, chr_length, snps_per_chr, indels_per_chr, resolution ):\n\t\n\tsnp_data = []\n\tindel_data = []\n\twhile True:\n\t\tif upper_lim >= chr_length:\n\t\t\tbreak\n\t\telse:\n\t\t\tsnp_tmp = []\n\t\t\tindel_tmp = []\n\t\t\tfor SNP in snps_per_chr:\n\t\t\t\tif SNP <= upper_lim and SNP > lower_lim:\n\t\t\t\t\tsnp_tmp.append( 'X' )\n\t\t\tfor indel in indels_per_chr:\n\t\t\t\tif indel <= upper_lim and indel > lower_lim:\n\t\t\t\t\tindel_tmp.append( 'X' )\n\t\t\tsnp_data.append( len( snp_tmp ) )\n\t\t\tindel_data.append( len( indel_tmp ) )\n\t\tupper_lim += resolution\n\t\tlower_lim += resolution\n\treturn max( snp_data ), max( indel_data ), snp_data, indel_data", "def binning(data, low, high):\n if len(data) == 0: return 1\n\n mask1 = (data >= low)\n mask2 = (data < high)\n mask3 = numpy.logical_and(mask1, mask2)\n data = data[mask3]\n\n if len(data) == 0: return 10\n\n data.sort()\n q1 = data[int(math.floor(0.25*len(data)))]\n q3 = data[int(math.floor(0.75*len(data)))]\n binwidth = 2. * (q3 - q1) / len(data)**(1./3.)\n if binwidth > 0.:\n return max(10, int(math.ceil((high - low)/binwidth)))\n else:\n return 10", "def from_bins(bins):\n return 0.5*(bins[1:] + bins[:-1])", "def test_irregular(self):\n import numpy as np\n import histogrammar\n\n h = histogrammar.IrregularlyBin([0, 10, 20, 40, 100])\n h.fillnumpy([-5, 5, 5, 50, 10, 100, 1000, 50, 50])\n\n np.testing.assert_array_equal(h.bin_entries(), [1., 2., 1., 0., 3., 2.])\n np.testing.assert_array_equal(h.bin_edges(), [float('-inf'), 0., 10., 20., 40., 100., float('inf')])\n np.testing.assert_array_equal(h.bin_centers(), [float('-inf'), 5., 15., 30., 70., float('inf')])\n assert h.num_bins() == 6\n assert h.n_bins == 6\n np.testing.assert_almost_equal(h.mpv, 70.)\n\n np.testing.assert_array_equal(h.bin_entries(10, 40), [1., 0.])\n np.testing.assert_array_equal(h.bin_edges(10, 40), [10., 20., 40.])\n np.testing.assert_array_equal(h.bin_centers(10, 40), [15., 30.])\n assert h.num_bins(10, 40) == 2\n\n np.testing.assert_array_equal(h.bin_entries(5, 110), [2., 1., 0., 3., 2.])\n np.testing.assert_array_equal(h.bin_edges(5, 110), [0., 10., 20., 40., 100., float('inf')])\n np.testing.assert_array_equal(h.bin_centers(5, 110), [5., 15., 30., 70., float('inf')])\n assert h.num_bins(5, 110) == 5", "def optimalBins(x,factor=1):\n sz = optimalBinSize(x) * factor\n return np.arange(x.min(), x.max(), sz)", "def unique_binning(t):\n diff= np.unique(t)\n diff= diff[1:] - diff[:-1]\n diff = np.min(diff)/2\n return np.digitize(t, np.hstack([np.unique(t) + diff]))", "def hist_bin_opt (x, minbin=20, maxbin=600, spacing=10, N_trials=1):\n bin_checks = np.arange(minbin, maxbin, spacing)\n # bin_checks = np.linspace(150, 300, 16)\n costs = np.zeros(len(bin_checks))\n i = 0\n # this might be vectorizable in np\n for n_bins in bin_checks:\n # use np.histogram to do the numerical minimization\n pdf, bin_edges = np.histogram(x, n_bins)\n # calculate bin width\n # some discrepancy here but should be fine\n w_bin = np.unique(np.diff(bin_edges))\n if len(w_bin) > 1: w_bin = w_bin[0]\n # calc mean and var\n kbar = np.mean(pdf)\n kvar = np.var(pdf)\n # calc cost\n costs[i] = (2.*kbar - kvar) / (N_trials * w_bin)**2.\n i += 1\n # find the bin size corresponding to a minimization of the costs\n bin_opt_list = bin_checks[costs.min() == costs]\n bin_opt = bin_opt_list[0]\n return bin_opt", "def _prep_buckets(buckets, len_x):\n if isinstance(buckets, int):\n lims = np.linspace(0, len_x-1, buckets+1, dtype=int)\n else:\n lims = buckets\n buckets = len(lims)-1\n\n # Determine center of each bucket\n mids = np.rint(np.convolve(lims, np.ones(2), 'valid') / 2).astype(int)\n mids[0] = 0\n mids[-1] = len_x - 1\n\n return lims, mids", "def bincalc(nbin=0.1,bmin=5,bmax=2000):\n\n logbmin=np.log10(bmin)\n logbmax=np.log10(bmax)\n\n logbins=np.arange(logbmin,logbmax,nbin)\n\n bins=10**logbins\n\n #bins=np.linspace(bmin,bmax,60)\n return (bins)", "def logbin_distribution(data, nbins = 30):\n # define the support of the distribution\n lower_bound = min(data)\n upper_bound = max(data)\n # define bin edges\n log = np.log10\n lower_bound = log(lower_bound) if lower_bound > 0 else -1\n upper_bound = log(upper_bound)\n bins = np.logspace(lower_bound, upper_bound, nbins)\n\n # compute the histogram using numpy\n y, _ = np.histogram(data, bins=bins, density=True)\n # for each bin, compute its midpoint\n x = bins[1:] - np.diff(bins) / 2.0\n # if bin is empty, drop it from the resulting list\n drop_indices = [i for i,k in enumerate(y) if k == 0.0]\n x = [k for i,k in enumerate(x) if i not in drop_indices]\n y = [k for i,k in enumerate(y) if i not in drop_indices]\n return x, y", "def computeBinWidth(self):\n self.binWidth = (self.data[-1] - self.data[0]) / self.numBins\n # Fill the frequencies array with zero\n for i in range(self.numBins):\n self.frequencies.append(0)", "def fast_histogram(\n val: List[float],\n discrete: Optional[bool] = None,\n bins: Optional[List[float]] = None,\n) -> Mapping[str, float]:\n val = np.asarray(val, dtype=float)\n size = len(val)\n\n # Unique does not work on nan since nan != nan\n val = val[~np.isnan(val)]\n size_nan = size - len(val)\n discrete = is_discrete(val) if discrete is None else discrete\n\n if discrete:\n bins, counts = np.unique(val, return_counts=True)\n bin_to_count = {str(bins[i]): counts[i] for i in range(len(bins))}\n if size_nan > 0:\n bin_to_count[\"nan\"] = size_nan\n return bin_to_count\n\n # Counts nan as part of infinity bin\n val = val[~np.isinf(val)]\n size_inf = size - len(val)\n if len(val) == 0:\n return {\"+Inf\": size_inf}\n\n # Take the negative of all values to use \"le\" as the bin upper bound\n bins = bins or get_bins(val)\n counts, _ = np.histogram(-val, bins=-np.flip([bins[0]] + bins))\n counts = np.flip(counts)\n bin_to_count = dict(p for p in zip(map(str, bins), counts))\n\n # Add infinity bin last to preserve insertion order\n bin_to_count[\"+Inf\"] = size_inf\n return bin_to_count", "def make_histogram(points, bucket_size):\r\n return Counter(bucketize(point, bucket_size) for point in points)", "def buckets(self, disable_last_bucket_padding=False):\n if self.__total_count == 0:\n return\n\n # We use the minimum value for the lower bound of the first bucket.\n previous = self.__min\n for i in range(0, len(self.__counts)):\n if self.__counts[i] > 0:\n yield self.__counts[i], previous, self.__bucket_ranges[i]\n previous = self.__bucket_ranges[i]\n\n if self.__overflow == 0:\n return\n\n if not disable_last_bucket_padding:\n padding = 0.01\n else:\n padding = 0.0\n\n # We use the maximum value for the upper bound of the overflow range. Note, we added 0.01 to make sure the\n # boundary is exclusive to the values that fell in it.\n yield self.__overflow, self.__bucket_ranges[-1], self.__max + padding", "def _make_bins(start, stop, step):\n bin_edges = np.arange(start, stop + step, step)\n\n return bin_edges", "def test_range_argument_ignored(self):\n bins_range = (1, 2)\n\n bin_edges, hist, _, _ = hist_w_unc(\n self.input,\n bins=self.bin_edges,\n bins_range=bins_range,\n normed=False,\n )\n\n # check if we end up with the same bin edges anyway\n np.testing.assert_array_almost_equal(self.bin_edges, bin_edges)\n np.testing.assert_array_almost_equal(self.hist, hist)", "def test_histogram_with_varying_number_of_bin(self):\n # this data use number of bins less than the max limit\n df1 = pd.Series([1, 2, 3, 4]).apply(str)\n profiler1 = FloatColumn(df1.name)\n profiler1.max_histogram_bin = 50\n profiler1.update(df1)\n num_bins = len(profiler1.profile['histogram']['bin_counts'])\n self.assertEqual(num_bins, 4)\n\n # this data uses large number of bins, which will be set to\n # the max limit\n df2 = pd.Series([3.195103249264023e+18, 9999995.0, 9999999.0,\n 0.0, -10 ** 10]).apply(str)\n profiler2 = FloatColumn(df2.name)\n profiler2.max_histogram_bin = 50\n profiler2.update(df2)\n num_bins = len(profiler2.profile['histogram']['bin_counts'])\n self.assertEqual(num_bins, 50)\n\n # max number of bin is increased to 10000\n profiler2 = FloatColumn(df2.name)\n profiler2.max_histogram_bin = 10000\n profiler2.update(df2)\n num_bins = len(profiler2.profile['histogram']['bin_counts'])\n self.assertEqual(num_bins, 10000)", "def generate_histogram(data, buckets):\n if not data:\n return {}\n\n minimum = min(data)\n maximum = max(data)\n if minimum == maximum:\n return {data[0]: len(data)}\n\n buckets = min(len(data), buckets)\n bucket_size = (maximum-minimum)/buckets\n out = dict((i, 0) for i in range(buckets))\n for i in data:\n out[min(int((i-minimum)/bucket_size), buckets-1)] += 1\n return dict(((k*bucket_size)+minimum, v) for k, v in out.items())", "def make_histogram(points, bucket_size):\n return Counter(bucketize(point, bucket_size) for point in points)", "def make_histogram(points, bucket_size):\n return Counter(bucketize(point, bucket_size) for point in points)", "def optimalBinSize(x):\n interquartile = np.diff(np.prctile(x, [25, 75]))\n return 2. * interquartile * len(x)**(-1./3)", "def getMistagBinBounds(config, mistag, mistagdistrib):\n mistag.setBins(1000, 'MistagBinBounds')\n from ROOT import RooArgSet, RooHistPdf, RooDataHist\n if (mistagdistrib.InheritsFrom('RooAbsData') and not\n mistagdistrib.InheritsFrom('RooDataHist')):\n # ok, unbinned data set, get only tagged events, and form a binned clone\n argset = RooArgSet(mistag)\n mistagdistrib = mistagdistrib.reduce(\n RooFit.SelectVars(argset), RooFit.cut('0 != qt'))\n ROOT.SetOwnership(mistagdistrib, True)\n dhist = RooDataHist(\n '%s_binned' % mistagdistrib.GetName(),\n '%s_binned' % mistagdistrib.GetName(),\n mistagdistrib.get(), 'MistagBinBounds')\n dhist.add(mistagdistrib)\n mistagdistrib = dhist\n if mistagdistrib.InheritsFrom('RooAbsData'):\n # convert a binned dataset to a RooHistPdf\n dhist = mistagdistrib\n mistagdistrib = RooHistPdf('%s_pdf' % dhist.GetName(),\n '%s_pdf' % dhist.GetName(), RooArgSet(mistag), dhist)\n if (mistagdistrib.InheritsFrom('RooAbsPdf')):\n # use createCdf to obtain the CDF\n cdfroofit = mistagdistrib.createCdf(\n RooArgSet(mistag), RooArgSet(mistag))\n ROOT.SetOwnership(cdfroofit, True)\n def cdf(x):\n oldval = mistag.getVal()\n mistag.setVal(x)\n retVal = cdfroofit.getVal()\n mistag.setVal(oldval)\n return retVal\n if (mistagdistrib.InheritsFrom('RooHistPdf') and\n (abs(cdf(mistag.getMin())) > 1e-9 or\n abs(cdf(mistag.getMax()) - 1.) > 1e-9)):\n # createCdf does not work properly for RooHistPdf in older ROOT\n # versions because RooHistPdf does not support integrals over\n # subranges, so we have to fake this functionality until it's\n # supported by RooFit upstream\n #\n # capture histogram bin boundaries and contents\n print 'WARNING: Your version of RooFit still has buggy analytical ' \\\n 'integrals for RooHistPdf - activating workaround.'\n binboundlist = mistagdistrib.binBoundaries(\n mistag, mistag.getMin(), mistag.getMax())\n ROOT.SetOwnership(binboundlist, True)\n binbounds = [ v for v in binboundlist ]\n del binboundlist\n bincontents = [ ]\n oldval = mistag.getVal()\n for i in xrange(0, len(binbounds) - 1):\n mistag.setVal(0.5 * (binbounds[i] + binbounds[i + 1]))\n bincontents.append(mistagdistrib.getValV(RooArgSet(mistag)))\n mistag.setVal(oldval)\n # build CDF from histogram\n def cdf(x):\n s = 0.\n for i in xrange(0, len(binbounds) - 1):\n if x < binbounds[i]:\n break\n elif x >= binbounds[i + 1]:\n s += bincontents[i]\n else:\n s += (bincontents[i] * (x - binbounds[i]) /\n (binbounds[i + 1] - binbounds[i]))\n break\n return s\n # find x for which f(x) = y by bisection\n def mybisect(y, f, lo, hi):\n initdx = abs(hi - lo)\n flo, fhi = f(lo) - y, f(hi) - y\n if 0. == flo: return lo\n elif 0. == fhi: return hi\n mid = .5 * (lo + hi)\n while (abs(hi - lo) > 1e-15 and abs(hi - lo) / initdx > 1e-15):\n fmid = f(mid) - y\n if 0. == fmid: break\n elif flo * fmid < 0.: hi, fhi = mid, fmid\n elif fmid * fhi < 0.: lo, flo = mid, fmid\n else: raise ValueError('no sign change in f(x) between %g and %g'\n % (lo, hi))\n mid = .5 * (lo + hi)\n return mid\n # find binning with roughly same stats by inverting the CDF by bisection\n lo, hi, binsum = mistag.getMin(), mistag.getMax(), cdf(mistag.getMax())\n retVal = [ lo ]\n for i in xrange(1, config['NMistagCategories']):\n retVal.append(mybisect(binsum *\n float(i) / float(config['NMistagCategories']), cdf, lo, hi))\n retVal.append(hi)\n print 'INFO: suggested mistag category bounds: %s' % str(retVal)\n return retVal", "def weightHistogram(self, min=None, max=None, nbins=10):\n raise NotImplementedError", "def createBinsByEntropy(self, data, structure, colName, numOfBins):\n splits = self.miningCalculator.getBestSplitsInDataByInfoGain(data, structure, colName, numOfBins-1)\n splits.sort()\n bins = {\"value<=\"+str(splits[0]): lambda x: x <= splits[0]}\n if len(splits) > 1:\n for i in range(1, numOfBins-1):\n bins[str(splits[i-1]) + '<value<=' + str(splits[i])] = (lambda x: splits[i-1] < x <= splits[i])\n bins[\"value>\" + str(splits[len(splits)-1])] = (lambda x: x > splits[len(splits)-1])\n return bins", "def binarize(h):\n # 1 (value >= 0.5)\n # 0 (value < 0.5)\n b = F.floor(h + 0.5)\n return b", "def bucket_for_value(self, value):\n\n # bisect.bisect_left is wrong because the buckets are of [lower, upper) form\n return bisect.bisect(self._lower_bounds, value) - 1", "def bin_data(x, N_bins=100, xmin=0.0, xmax=1.0, density=False):\n\n hist_y, hist_edges = np.histogram(x, bins=N_bins, range=(xmin, xmax), density=density)\n hist_x = 0.5 * (hist_edges[1:] + hist_edges[:-1])\n hist_sy = np.sqrt(hist_y)\n hist_mask = hist_y > 0\n\n return hist_x, hist_y, hist_sy, hist_mask", "def binning_axis(self) -> int:\r\n return 0", "def histogram_discrete(name,\n data,\n bucket_min,\n bucket_max,\n step=None,\n description=None):\n summary_metadata = metadata.create_summary_metadata(\n display_name=None, description=description)\n summary_scope = (getattr(tf.summary.experimental, 'summary_scope', None)\n or tf.summary.summary_scope)\n with summary_scope(\n name, 'histogram_summary',\n values=[data, bucket_min, bucket_max, step]) as (tag, _):\n with tf.name_scope('buckets'):\n bucket_count = bucket_max - bucket_min + 1\n data = data - bucket_min\n one_hots = tf.one_hot(\n tf.reshape(data, shape=[-1]), depth=bucket_count)\n bucket_counts = tf.cast(\n tf.reduce_sum(input_tensor=one_hots, axis=0), tf.float64)\n edge = tf.cast(tf.range(bucket_count), tf.float64)\n # histogram can not draw when left_edge == right_edge\n left_edge = edge - 1e-12\n right_edge = edge + 1e-12\n tensor = tf.transpose(\n a=tf.stack([left_edge, right_edge, bucket_counts]))\n\n return tf.summary.write(\n tag=tag, tensor=tensor, step=step, metadata=summary_metadata)", "def histogram(data,binwidth, xmin,xmax):\n bins = arange(xmin,xmax, binwidth)\n binsc = bins + (0.5 * binwidth)\n try: #FIXME: use updated numpy.histogram\n histo = numpyhisto(data, bins, new=False)[0]\n except:\n histo = numpyhisto(data, bins)[0]\n return binsc[:len(histo)], histo", "def get_histogram(self):\n\n for bin in range(self.bins.size):\n bin_inf = self.bins[bin]\n try: bin_sup = self.bins[bin + 1]\n except IndexError: bin_sup = self.vmax\n self.hist[bin] = np.sum(\n (self.values >= bin_inf)*(self.values < bin_sup))\n\n binned_values = np.sum(self.hist)\n if binned_values == 0: return self.hist # no binned value\n else: self.hist /= np.sum(self.hist)\n return self.hist", "def get_bins(size, n, max_value):\n bin_lims = get_bin_lims(n, max_value)\n return sort_by_rows(np.array(list(itertools.product(bin_lims, repeat=size))))", "def eqf_binning(t, n_bins):\n t_bins= []\n t= sorted(t)\n n_items= int(len(t)/n_bins)\n\n for i in range(1, n_bins):\n t_bins.append(t[int(i*n_items)])\n t_bins.append(np.max(t) + 0.01)\n t_binning= np.digitize(t, t_bins)\n return t_binning", "def create_bins(start, end, n_bins):\n bins = np.linspace(start, end, n_bins)\n return bins", "def make_age_bins(bin_size=1, lower=40, upper=69):\n bins = np.arange(lower, upper+1, bin_size)\n bins = np.append(bins, upper+1)\n print(bins)\n return bins", "def histogram_continuous(name,\n data,\n bucket_min=None,\n bucket_max=None,\n bucket_count=DEFAULT_BUCKET_COUNT,\n step=None,\n description=None):\n summary_metadata = metadata.create_summary_metadata(\n display_name=None, description=description)\n summary_scope = (getattr(tf.summary.experimental, 'summary_scope', None)\n or tf.summary.summary_scope)\n with summary_scope(\n name,\n 'histogram_summary',\n values=[data, bucket_min, bucket_max, bucket_count, step]) as (tag,\n _):\n with tf.name_scope('buckets'):\n data = tf.cast(tf.reshape(data, shape=[-1]), tf.float64)\n if bucket_min is None:\n bucket_min = tf.reduce_min(data)\n if bucket_max is None:\n bucket_max = tf.reduce_min(data)\n range_ = bucket_max - bucket_min\n bucket_width = range_ / tf.cast(bucket_count, tf.float64)\n offsets = data - bucket_min\n bucket_indices = tf.cast(\n tf.floor(offsets / bucket_width), dtype=tf.int32)\n clamped_indices = tf.clip_by_value(bucket_indices, 0,\n bucket_count - 1)\n one_hots = tf.one_hot(clamped_indices, depth=bucket_count)\n bucket_counts = tf.cast(\n tf.reduce_sum(input_tensor=one_hots, axis=0), dtype=tf.float64)\n edges = tf.linspace(bucket_min, bucket_max, bucket_count + 1)\n edges = tf.concat([edges[:-1], [tf.cast(bucket_max, tf.float64)]],\n 0)\n edges = tf.cast(edges, tf.float64)\n left_edges = edges[:-1]\n right_edges = edges[1:]\n tensor = tf.transpose(\n a=tf.stack([left_edges, right_edges, bucket_counts]))\n return tf.summary.write(\n tag=tag, tensor=tensor, step=step, metadata=summary_metadata)", "def hist_weights(p1, p2, z, zbins, n_chop=4, truncated=True):\n if not truncated:\n ixs = (z >= zbins[0]) & (z < zbins[-1])\n z = z[ixs]\n p1, p2 = p1[ixs], p2[ixs]\n\n n_zbins = len(zbins) - 1\n\n # Left closed, right open partitioning\n z0_bins = zbins\n z0_bins[-1] += 0.001\n z_ind = np.digitize(z, z0_bins)\n\n chop1 = np.linspace(min(p1), max(p1), n_chop)\n chop2 = np.linspace(min(p2), max(p2), n_chop)\n\n # CREATING A 3D DATACUBE OF WEIGHTS\n cube = np.zeros((n_zbins, n_chop - 1, n_chop - 1))\n\n for i in range(n_zbins):\n ind = (z >= zbins[i]) & (z < zbins[i + 1])\n cube[i] = np.histogram2d(p1[ind], p2[ind], bins=(chop1, chop2))[0]\n\n # Trim bins with no objects\n # Outer - parameter; Inner - redshift\n for i in range(n_chop - 1):\n for j in range(n_chop - 1):\n # Sets all bins to 0 if any one bin has no objects in it\n if 0 in cube[:, i, j]:\n cube[:, i, j] = 0\n\n cube_sum = np.sum(cube, axis=0)\n\n # A. NORMALIZED WEIGHTS ACROSS ALL REDSHIFTS\n p0_bins, p1_bins = chop1, chop2\n\n # <-- Required since histogram2d and digitize have different\n # binning schemes\n p0_bins[-1] += 0.001\n p1_bins[-1] += 0.001\n\n foo = np.digitize(p1, p0_bins)\n blah = np.digitize(p2, p1_bins)\n\n weight_mat = cube_sum / cube\n weight_mat[np.isnan(weight_mat)] = 0\n\n # To obtain consistent weights across all redshifts\n weight_mat /= np.linalg.norm(weight_mat, axis=(1, 2))[:, None, None]\n\n # Final histogram weights to be applied\n h_weights = weight_mat[z_ind - 1, foo - 1, blah - 1]\n\n # # To verify that the histogram rebinning has been done correctly\n # for i in range(n_zbins):\n # ind = (z >= zbins[i]) & (z < zbins[i + 1])\n # plt.figure()\n # plt.hist2d(p1[ind], p2[ind], bins=(chop1, chop2), weights=h_weights[ind], normed=True)[0]\n # plt.colorbar()\n # plt.show()\n\n return h_weights", "def bins(self, value):\n self.num_bins = int(value)", "def rebin(Data, width, mean=False, by_nbins=False) :\n \n # Input tells us whether to use mean or median.\n if mean :\n method = ma.mean\n else :\n method = ma.median\n\n if by_nbins :\n width = int(width)\n if width <= 1 :\n raise ValueError(\"Invalid number of bins to average\")\n # Get new axis parameters.\n new_cdelt = width*Data.field['CDELT1']\n nbins = int(sp.ceil(float(Data.dims[-1])/width))\n new_centre = nbins//2 + 1\n Data.calc_freq()\n Data.field['CRVAL1'] = Data.freq[int((new_centre+0.5)*width)]\n # Case where evenly divisable (much more efficient).\n if Data.dims[-1] % width == 0:\n new_data = Data.data\n new_data.shape = Data.data.shape[:-1] + (nbins, width)\n new_data = method(new_data, -1)\n else :\n # Allowcate memory for Data array.\n new_data = ma.empty(Data.dims[:3] + (nbins,))\n # Loop over new bins and rebin.\n for ii in xrange(nbins) :\n new_data[:,:,:,ii] = method(\n Data.data[:,:,:,ii*width:(ii+1)*width],3)\n Data.set_data(new_data)\n else :\n # Convert to Hertz.\n width = width*1.0e6\n new_cdelt = width * sp.sign(Data.field['CDELT1'])\n # Figure out some basics.\n Data.calc_freq()\n freq = sp.array(Data.freq)\n # Extra bit on the bandwidth is because frequency labels are channel \n # centre.\n bandwidth = abs(freq[-1] - freq[0]) + abs(Data.field['CDELT1'])\n nbins = int(bandwidth//width)\n new_centre = int((Data.field['CRPIX1']-1)\n * abs(Data.field['CDELT1'])/width)\n new_dims = Data.dims[0:-1] + (nbins, )\n # Get old data and allowcate memory for new data.\n old_data = ma.array(Data.data, copy=True)\n Data.set_data(ma.zeros(new_dims))\n new_freq = Data.field['CRVAL1'] + new_cdelt*(sp.arange(nbins)\n - new_centre)\n for ii in range(1,nbins-1) :\n inds = (sp.logical_and(\n abs(freq - new_freq[ii]) <= abs(freq - new_freq[ii+1]),\n abs(freq - new_freq[ii]) < abs(freq - new_freq[ii-1])))\n subdata = (old_data[:,:,:,inds])\n Data.data[:,:,:,ii] = method(subdata, 3)\n # Above loop breaks for end points... deal with them.\n inds, = sp.where(abs(freq - new_freq[0]) <= abs(freq - new_freq[1]))\n subdata = old_data[:,:,:,inds]\n Data.data[:,:,:,0] = method(subdata, 3)\n inds, = sp.where(abs(freq-new_freq[nbins-1])\n < abs(freq-new_freq[nbins-2]))\n subdata = old_data[:,:,:,inds]\n Data.data[:,:,:,nbins-1] = method(subdata, 3)\n Data.freq = new_freq\n Data.field['CRPIX1'] = sp.array(new_centre + 1, dtype=int)\n Data.field['CDELT1'] = sp.array(new_cdelt, dtype=float)", "def bin_width_doane(a):\n bad = np.isnan(a) | np.isinf(a)\n data = a[~bad]\n n = data.size\n g1 = skew(data)\n sigma_g1 = np.sqrt(6 * (n - 2) / ((n + 1) * (n + 3)))\n k = 1 + np.log2(n) + np.log2(1 + np.abs(g1) / sigma_g1)\n acc = (data.max() - data.min()) / k\n return acc", "def relative_position_bucket(relative_position,\n bidirectional: bool = True,\n num_buckets: int = 32,\n max_distance: int = 128):\n ret = 0\n relative_position = -relative_position\n if bidirectional:\n assert num_buckets % 2 == 0, 'When bidirectional is True, the number of buckets must be ' \\\n 'divisible by 2.'\n num_buckets //= 2\n ret = ret + (relative_position < 0).astype(np.int32) * num_buckets\n relative_position = np.abs(relative_position)\n else:\n # Clip all the negative values to 0\n relative_position = np.clip(relative_position, a_min=0, a_max=None)\n # Now, the relative_position is in the range [0, inf)\n\n # Half of the buckets deal with the exact increments,\n # i.e., 0, 1, 2, ..., max_exact - 1, where max_exact = num_buckets // 2\n max_exact = num_buckets // 2\n is_small = relative_position < max_exact\n\n # The other half of the buckets are for logarithmically bigger bins in positions up to\n # max_distance\n val_if_large = max_exact + (\n np.log(relative_position.astype(np.float32) / max_exact)\n / math.log(max_distance / max_exact) * (num_buckets - max_exact)).astype(np.int32)\n val_if_large = np.minimum(val_if_large, num_buckets - 1)\n ret = ret + np.where(is_small, relative_position, val_if_large)\n return ret", "def get_srr_bins(p_data):\n \n n_data = len(p_data)\n \n n_bins = np.sqrt(n_data)\n \n return int(n_bins)", "def estimate_bucket_boundaries(len2freq, min_waste=0.01, max_waste=0.1, min_aggr=0.01):\n buckets = init_buckets(len2freq)\n\n sizes = [sum(l2f.values()) for _, l2f in buckets]\n start = sizes.index(max(sizes))\n\n before = buckets[:start]\n middle = buckets[start]\n after = buckets[start + 1:]\n\n before, middle, after = group_buckets(before, middle, after, min_waste, max_waste, min_aggr)\n result = [middle]\n\n while len(before):\n middle = before[-1]\n before = before[:-1]\n before, middle, _ = group_buckets(before, middle, result + after, min_waste, max_waste, min_aggr)\n\n result = [middle] + result\n\n while len(after):\n middle = after[0]\n after = after[1:]\n _, middle, after = group_buckets(result, middle, after, min_waste, max_waste, min_aggr)\n\n result = result + [middle]\n\n original = Counter(len2freq)\n restored = sum([Counter(r[1]) for r in result], Counter())\n if not set(original.keys()) == set(restored.keys()) or not set(original.values()) == set(restored.values()):\n raise AssertionError('Estimated boundaries differs from source lengths or frequencies')\n\n return [r[0] for r in result]", "def solve_n_bins(x):\n from scipy.stats import iqr\n\n x = np.asarray(x)\n hat = 2 * iqr(x) / (len(x) ** (1 / 3))\n\n if hat == 0:\n return int(np.sqrt(len(x)))\n else:\n return int(np.ceil((x.max() - x.min()) / hat))", "def solve_n_bins(x):\n from scipy.stats import iqr\n\n x = np.asarray(x)\n hat = 2 * iqr(x) / (len(x) ** (1 / 3))\n\n if hat == 0:\n return int(np.sqrt(len(x)))\n else:\n return int(np.ceil((x.max() - x.min()) / hat))", "def bin_discretize(self, variables=[], bins=3,\n min_const_samples_bin_size=1.0/3):\n self.edges=np.zeros((self.arity.size,bins+1))\n for i in variables:\n un_cnt=np.unique(self.data[:,i],return_counts=True)\n constvals=un_cnt[0][un_cnt[1]>self.data.shape[0]*min_const_samples_bin_size]\n mask=np.ones(self.data.shape[0],dtype=bool)\n if constvals.size>0:\n for j,cv in enumerate(constvals):\n mask*=(self.data[:,i]!=cv)\n self.data[self.data[:,i]==cv,i]=j\n\n size=np.sum(mask)/bins\n sorted_i=np.argsort(self.data[mask,i])\n edges=[self.data[mask,i][sorted_i[int(size*num)-1]] for num in range(1,bins)]\n self.edges[i]=[self.data[mask,i][sorted_i[0]]]+edges+[self.data[mask,i][sorted_i[-1]]]\n self.data[mask,i]=np.searchsorted(edges,self.data[mask,i])+constvals.size\n self.arity[i]=len(edges)+1+constvals.size", "def binning ( self , axis , name = '' ) :\n assert isinstance ( axis , ROOT.TAxis ),\\\n 'Invalid axis %s/%s' % ( axis , type ( axis ) )\n\n ## uniform binning?\n if not axis.IsVariableBinSize() : \n return ROOT.RooFit.Binning ( axis.GetNbins() , axis.GetXmin() , axis.GetXmax() )\n ##\n xbins = axis.GetXbins().GetArray()\n rb = ROOT.RooBinning ( axis.GetNbins() , xbins , name )\n ##\n self.aux_keep.append ( rb )\n ##\n return ROOT.RooFit.Binning ( rb )", "def createBinsByEqualDepth(self, data, colIndex, numOfBins):\n colData = list(map(lambda x: float(x[colIndex]), data))\n Depth, splittedData, index = int(((len(colData) / numOfBins) + 1)), [], 0\n for i in range(0, numOfBins):\n splittedData, index, Depth = splittedData + [colData[index:Depth]], Depth, Depth + Depth\n bins = {\"value<=\" + str(max(splittedData[0])): lambda x: x <= max(splittedData[0])}\n index = 1\n while index < numOfBins-1:\n if max(splittedData[index-1]) != max(splittedData[index]):\n val1 = max(splittedData[index-1])\n val2 = max(splittedData[index])\n bins[str(val1) + '<value<=' + str(val2)] = (lambda x: val1 < x <= val2)\n index += 1\n bins[\"value>\" + str(max(splittedData[index-1]))] = (lambda x: x > max(splittedData[index-1]))\n return bins", "def test_num_bins(self):\n with Pandas() as pd:\n if pd is None:\n return\n with Numpy() as np: # noqa\n if numpy is None:\n return\n sys.stderr.write(\"\\n\")\n\n df1 = pd.DataFrame({'A': [0, 2, 4, 5, 7, 9, 11, 13, 13, 15]})\n df2 = pd.DataFrame({'A': [2, 4, 4, 6, 8, 7, 10, 14, 17, 19]})\n\n # building 1d-, 2d-, and 3d-histogram (iteratively)\n hist2 = hg.SparselyBin(origin=0.0, binWidth=1.0, quantity=unit('A'))\n hist3 = hg.SparselyBin(origin=0.0, binWidth=1.0, quantity=unit('A'))\n hist4 = hg.Bin(num=20, low=0.0, high=20., quantity=unit('A'))\n hist5 = hg.Bin(num=20, low=0.0, high=20., quantity=unit('A'))\n hist6 = hg.Bin(num=201, low=0.0, high=1.005)\n\n # fill them\n hist2.fill.numpy(df1)\n hist3.fill.numpy(df2)\n hist4.fill.numpy(df1)\n hist5.fill.numpy(df2)\n\n assert hist2.num_bins() == 16\n assert hist3.num_bins() == 18\n assert hist4.num_bins() == 20\n assert hist5.num_bins() == 20\n assert hist6.num_bins() == 201\n\n assert hist2.num_bins(low=10, high=25) == 15\n assert hist3.num_bins(low=10, high=25) == 15\n assert hist4.num_bins(low=10, high=25) == 10\n assert hist5.num_bins(low=10, high=25) == 10\n assert hist6.num_bins(low=0.2089, high=0.9333) == 146\n\n assert hist2.num_bins(low=-10, high=28) == 38\n assert hist3.num_bins(low=-10, high=28) == 38\n assert hist4.num_bins(low=-10, high=28) == 20\n assert hist5.num_bins(low=-10, high=28) == 20\n assert hist6.num_bins(low=0.205, high=0.935) == 146", "def create_bin_boundaries(config, epoch_df, data_type, obs_per_bin, verbose=False):\n \n edges = create_edges_set(config, epoch_df, data_type)\n \n boundaries = []\n for edge in edges:\n start, end, freq = edge\n bin_size = freq * obs_per_bin\n boundaries.append(np.arange(start, end, bin_size))\n boundaries = np.concatenate(boundaries)\n \n return boundaries", "def bin_width(self):\n return self.bins[2] - self.bins[1]", "def gc_bin_bedfile(\n bedfile, genome, number, length=200, bins=None, random_state=None, min_bin_size=100\n):\n if bins is None:\n bins = [(0.0, 0.2), (0.8, 1.0)]\n for b in np.arange(0.2, 0.799, 0.05):\n bins.append((round(b, 2), round(b + 0.05, 2)))\n bins = sorted(bins)\n\n if number < len(bins):\n raise ValueError(\"Number of sequences requested < number of bins\")\n\n fname = os.path.join(\n CACHE_DIR, f\"{os.path.basename(genome)}.gcfreq.{min_bin_size}.feather\"\n )\n try:\n df = pd.read_feather(fname)\n except FileNotFoundError:\n if not os.path.exists(CACHE_DIR):\n os.makedirs(CACHE_DIR)\n create_gc_bin_index(genome, fname, min_bin_size=min_bin_size)\n df = pd.read_feather(fname)\n\n if length >= min_bin_size:\n col = f\"w{((length + min_bin_size // 2) // min_bin_size) * min_bin_size}\"\n else:\n logger.warning(\n f\"For regions smaller than {min_bin_size} nt, GC% will not be exact\"\n )\n col = f\"w{min_bin_size}\"\n\n if col not in df.columns:\n df[col] = (\n df.iloc[:, 3]\n .rolling(length // min_bin_size, min_periods=length // min_bin_size)\n .mean()\n )\n df[col.replace(\"w\", \"n\")] = (\n df.iloc[:, 3]\n .rolling(length // min_bin_size, min_periods=length // min_bin_size)\n .sum()\n )\n\n df = df[df[col.replace(\"w\", \"n\")] < 0.1 * length]\n n = number // len(bins)\n\n with open(bedfile, \"w\") as f:\n pass\n\n with open(bedfile, \"a\") as f:\n for b_start, b_end in bins:\n df_bin = df[(df[col] > b_start) & (df[col] <= b_end)].copy()\n df_bin[\"start\"] = df_bin[\"end\"] - length\n df_bin = df_bin[df_bin[\"start\"] > 0]\n if df_bin.shape[0] > 0:\n df_bin = df_bin.sample(n, replace=True, random_state=random_state)\n df_bin[\"bin\"] = f\"{b_start:.2f}-{b_end:.2f}\"\n df_bin[[\"chrom\", \"start\", \"end\", \"bin\"]].to_csv(\n f, sep=\"\\t\", header=False, index=False\n )", "def histogramdd(sample, bins=10, range=None, weights=None, density=False):\n if isinstance(sample, cupy.ndarray):\n # Sample is an ND-array.\n if sample.ndim == 1:\n sample = sample[:, cupy.newaxis]\n nsamples, ndim = sample.shape\n else:\n sample = cupy.stack(sample, axis=-1)\n nsamples, ndim = sample.shape\n\n nbin = numpy.empty(ndim, int)\n edges = ndim * [None]\n dedges = ndim * [None]\n if weights is not None:\n weights = cupy.asarray(weights)\n\n try:\n nbins = len(bins)\n if nbins != ndim:\n raise ValueError(\n 'The dimension of bins must be equal to the dimension of the '\n ' sample x.'\n )\n except TypeError:\n # bins is an integer\n bins = ndim * [bins]\n\n # normalize the range argument\n if range is None:\n range = (None,) * ndim\n elif len(range) != ndim:\n raise ValueError('range argument must have one entry per dimension')\n\n # Create edge arrays\n for i in _range(ndim):\n if cupy.ndim(bins[i]) == 0:\n if bins[i] < 1:\n raise ValueError(\n '`bins[{}]` must be positive, when an integer'.format(i)\n )\n smin, smax = _get_outer_edges(sample[:, i], range[i])\n num = int(bins[i] + 1) # synchronize!\n edges[i] = cupy.linspace(smin, smax, num)\n elif cupy.ndim(bins[i]) == 1:\n if not isinstance(bins[i], cupy.ndarray):\n raise ValueError('array-like bins not supported')\n edges[i] = bins[i]\n if (edges[i][:-1] > edges[i][1:]).any(): # synchronize!\n raise ValueError(\n '`bins[{}]` must be monotonically increasing, when an '\n 'array'.format(i)\n )\n else:\n raise ValueError(\n '`bins[{}]` must be a scalar or 1d array'.format(i)\n )\n\n nbin[i] = len(edges[i]) + 1 # includes an outlier on each end\n dedges[i] = cupy.diff(edges[i])\n\n # Compute the bin number each sample falls into.\n ncount = tuple(\n # avoid cupy.digitize to work around NumPy issue gh-11022\n cupy.searchsorted(edges[i], sample[:, i], side='right')\n for i in _range(ndim)\n )\n\n # Using digitize, values that fall on an edge are put in the right bin.\n # For the rightmost bin, we want values equal to the right edge to be\n # counted in the last bin, and not as an outlier.\n for i in _range(ndim):\n # Find which points are on the rightmost edge.\n on_edge = sample[:, i] == edges[i][-1]\n # Shift these points one bin to the left.\n ncount[i][on_edge] -= 1\n\n # Compute the sample indices in the flattened histogram matrix.\n # This raises an error if the array is too large.\n xy = cupy.ravel_multi_index(ncount, nbin)\n\n # Compute the number of repetitions in xy and assign it to the\n # flattened histmat.\n hist = cupy.bincount(xy, weights, minlength=numpy.prod(nbin))\n\n # Shape into a proper matrix\n hist = hist.reshape(nbin)\n\n # This preserves the (bad) behavior observed in NumPy gh-7845, for now.\n hist = hist.astype(float) # Note: NumPy uses casting='safe' here too\n\n # Remove outliers (indices 0 and -1 for each dimension).\n core = ndim * (slice(1, -1),)\n hist = hist[core]\n\n if density:\n # calculate the probability density function\n s = hist.sum()\n for i in _range(ndim):\n shape = [1] * ndim\n shape[i] = nbin[i] - 2\n hist = hist / dedges[i].reshape(shape)\n hist /= s\n\n if any(hist.shape != numpy.asarray(nbin) - 2):\n raise RuntimeError('Internal Shape Error')\n return hist, edges", "def _get_optimal_threshold(arr, num_bins=1001, num_quantized_bins=255):\n if not isinstance(arr, np.ndarray):\n raise TypeError('get_optimal_threshold only supports input type of np.ndarray,'\n ' while received type=%s' % (str(type(arr))))\n min_val = np.min(arr)\n max_val = np.max(arr)\n th = max(abs(min_val), abs(max_val))\n\n hist, hist_edges = np.histogram(arr, bins=num_bins, range=(-th, th))\n zero_bin_idx = num_bins // 2\n num_half_quantized_bins = num_quantized_bins // 2\n assert np.allclose(hist_edges[zero_bin_idx] + hist_edges[zero_bin_idx + 1],\n 0, rtol=1e-5, atol=1e-7)\n\n thresholds = np.zeros(num_bins // 2 + 1 - num_quantized_bins // 2)\n divergence = np.zeros_like(thresholds)\n quantized_bins = np.zeros(num_quantized_bins, dtype=np.int32)\n # i means the number of bins on half axis excluding the zero bin.\n for i in range(num_quantized_bins // 2,\n num_bins // 2 + 1):\n p_bin_idx_start = zero_bin_idx - i\n p_bin_idx_stop = zero_bin_idx + i + 1\n thresholds[i - num_half_quantized_bins] = hist_edges[p_bin_idx_stop]\n sliced_nd_hist = hist[p_bin_idx_start:p_bin_idx_stop]\n\n # generate reference distribution p\n p = sliced_nd_hist.copy()\n assert p.size % 2 == 1\n assert p.size >= num_quantized_bins\n # put left outlier count in p[0]\n left_outlier_count = np.sum(hist[0:p_bin_idx_start])\n p[0] += left_outlier_count\n # put right outlier count in p[-1]\n right_outlier_count = np.sum(hist[p_bin_idx_stop:])\n p[-1] += right_outlier_count\n # is_nonzeros[k] indicates whether hist[k] is nonzero\n is_nonzeros = (sliced_nd_hist != 0).astype(np.int32)\n\n # calculate how many bins should be merged to generate quantized distribution q\n num_merged_bins = p.size // num_quantized_bins\n # merge hist into num_quantized_bins bins\n for j in range(num_quantized_bins):\n start = j * num_merged_bins\n stop = start + num_merged_bins\n quantized_bins[j] = sliced_nd_hist[start:stop].sum()\n quantized_bins[-1] += sliced_nd_hist[num_quantized_bins * num_merged_bins:].sum()\n # expand quantized_bins into p.size bins\n q = np.zeros(p.size, dtype=np.float32)\n for j in range(num_quantized_bins):\n start = j * num_merged_bins\n if j == num_quantized_bins - 1:\n stop = -1\n else:\n stop = start + num_merged_bins\n norm = is_nonzeros[start:stop].sum()\n if norm != 0:\n q[start:stop] = float(quantized_bins[j]) / float(norm)\n q[sliced_nd_hist == 0] = 0\n p = _smooth_distribution(p)\n # There is a chance that q is an invalid probability distribution.\n try:\n q = _smooth_distribution(q)\n except ValueError:\n divergence[i - num_half_quantized_bins] = float(\"inf\")\n else:\n divergence[i - num_half_quantized_bins] = stats.entropy(p, q)\n quantized_bins[:] = 0\n\n min_divergence_idx = np.argmin(divergence)\n min_divergence = divergence[min_divergence_idx]\n opt_th = thresholds[min_divergence_idx]\n return min_val, max_val, min_divergence, opt_th", "def test_bin_width(self):\n with Pandas() as pd:\n if pd is None:\n return\n with Numpy() as np: # noqa\n if numpy is None:\n return\n sys.stderr.write(\"\\n\")\n\n df1 = pd.DataFrame({'A': [0, 1, 2, 3, 4, 3, 2, 1, 1, 1]})\n\n # building test histograms\n hist2 = hg.SparselyBin(origin=0.0, binWidth=1.0, quantity=unit('A'))\n hist3 = hg.SparselyBin(origin=0.0, binWidth=1.0, quantity=unit('A'))\n hist4 = hg.Bin(num=20, low=0.0, high=10., quantity=unit('A'))\n hist5 = hg.Bin(num=20, low=0.0, high=10., quantity=unit('A'))\n\n # fill them\n hist2.fill.numpy(df1)\n hist4.fill.numpy(df1)\n\n assert hist2.bin_width() == 1.0\n assert hist3.bin_width() == 1.0\n assert hist4.bin_width() == 0.5\n assert hist5.bin_width() == 0.5", "def get_bins(data) :\n\tbins=np.unique(data)\n\treturn np.append(bins[~np.isnan(bins)], max(bins) +1)", "def hist_bins(bins, vals):\r\n\r\n hist = zeros(len(bins))\r\n j = 0\r\n for i in vals:\r\n while bins[j] < i:\r\n j += 1\r\n hist[j] += 1\r\n\r\n return asarray(bins), hist", "def test_nonmonotonic_bins(self):\n\n with pytest.raises(ValueError) as verr:\n avg.median2D(self.testInst, np.array([0., 300., 100.]), 'longitude',\n np.array([0., 24., 13.]), 'mlt',\n ['dummy1', 'dummy2', 'dummy3'], auto_bin=False)\n\n estr = 'bins must be monotonically increasing or decreasing'\n assert str(verr).find(estr) >= 0\n\n return", "def ahistogram (inarray,numbins=10,defaultlimits=None,printextras=1):\r\n inarray = N.ravel(inarray) # flatten any >1D arrays\r\n if (defaultlimits <> None):\r\n lowerreallimit = defaultlimits[0]\r\n upperreallimit = defaultlimits[1]\r\n binsize = (upperreallimit-lowerreallimit) / float(numbins)\r\n else:\r\n Min = N.minimum.reduce(inarray)\r\n Max = N.maximum.reduce(inarray)\r\n estbinwidth = float(Max - Min)/float(numbins) + 1e-6\r\n binsize = (Max-Min+estbinwidth)/float(numbins)\r\n lowerreallimit = Min - binsize/2.0 #lower real limit,1st bin\r\n bins = N.zeros(numbins)\r\n extrapoints = 0\r\n for num in inarray:\r\n try:\r\n if (num-lowerreallimit) < 0:\r\n extrapoints = extrapoints + 1\r\n else:\r\n bintoincrement = int((num-lowerreallimit) / float(binsize))\r\n bins[bintoincrement] = bins[bintoincrement] + 1\r\n except: # point outside lower/upper limits\r\n extrapoints = extrapoints + 1\r\n if (extrapoints > 0 and printextras == 1):\r\n print '\\nPoints outside given histogram range =',extrapoints\r\n return (bins, lowerreallimit, binsize, extrapoints)", "def bucket_intervals(alpha, bucket_sizes, sigma):\n heads = array(\"l\", [0] * sigma)\n tails = array(\"l\", [0] * sigma)\n j = 0\n for i in range(len(alpha)):\n heads[alpha[i]] = j\n j += bucket_sizes[alpha[i]]\n tails[alpha[i]] = j - 1\n\n # print_buckets(heads)\n # print_buckets(tails)\n return heads, tails", "def hist(sig, nbins, r):\n\n histsig, bin_edges = np.histogram(sig, bins=nbins[0], range=[-r[0], r[0]], density=True) #TODO:subsampling parameter\n\n # bin_edges = bin_edges[:-1]\n # bin_edges += (bin_edges[1]-bin_edges[0])/2.\n\n return tuple(histsig)", "def __init__(self, bucket_ranges):\n # An array of the histogram bucket boundaries, such as 1, 10, 30, 100\n self.__bucket_ranges = list(bucket_ranges)\n last_value = None\n for i in self.__bucket_ranges:\n if last_value is not None and i < last_value:\n raise ValueError(\"The bucket_ranges argument must be sorted.\")\n else:\n last_value = i\n\n # __counts[i] holds the total number of values we have seen >= to __boundaries[i-1] and < __boundaries[i]\n self.__counts = [0] * len(bucket_ranges)\n # __overflows holds the number of values >= __boundaries[-1]\n self.__overflow = 0\n # The minimum and maximum values seen.\n self.__min = None\n self.__max = None\n # The total number of values collected.\n self.__total_count = 0\n # The sum of the values collected\n self.__total_values = 0", "def histogram_function(r_min, a, N, nbins):\n r_min_no_selfdistance = r_min - np.identity(len(r_min)) #remove identitity matrix so own distance will be treated as 0\n histarray = np.array(r_min_no_selfdistance).flatten()\n histogram = np.histogram(histarray,bins=np.linspace(0,a,nbins))\n histogram[0][0] = histogram[0][0] - N # remove N from the first bin to prevent it counting particle distance to itself\n return histogram[0]", "def to_bins(reward, num_bins, out=None):\n if out is None:\n out = torch.zeros(num_bins, device=reward.device, dtype=reward.dtype)\n reward = max(0, min(reward.item(), 1))\n ind = math.floor(reward*(num_bins-1))\n out[ind] = 1\n return out", "def create_random_sample(idx_bins,count_array):\n idxs=[]\n for i,x in enumerate(count_array):\n if x > 0:\n idxs.extend(np.random.choice(idx_bins[i],size=x,replace=False))\n return idxs", "def binrange(_min, _max, stepsize, include_upper=False):\n _min = _min - _min % stepsize\n _max = _max - _max % stepsize + stepsize * (1 + include_upper)\n return np.arange(_min, _max, stepsize)", "def binaryBoundedOneSample(self, low=0., hi=+1., size=1):\n\n thisSample = np.random.uniform(low=low, high=hi, size=size)\n \n return thisSample", "def GeometricBucketer(growth_factor=10**0.2, num_finite_buckets=100):\n return Bucketer(width=0, growth_factor=growth_factor,\n num_finite_buckets=num_finite_buckets)", "def lhistogram (inlist,numbins=10,defaultreallimits=None,printextras=0):\r\n if (defaultreallimits <> None):\r\n if type(defaultreallimits) not in [ListType,TupleType] or len(defaultreallimits)==1: # only one limit given, assumed to be lower one & upper is calc'd\r\n lowerreallimit = defaultreallimits\r\n upperreallimit = 1.000001 * max(inlist)\r\n else: # assume both limits given\r\n lowerreallimit = defaultreallimits[0]\r\n upperreallimit = defaultreallimits[1]\r\n binsize = (upperreallimit-lowerreallimit)/float(numbins)\r\n else: # no limits given for histogram, both must be calc'd\r\n estbinwidth=(max(inlist)-min(inlist))/float(numbins) +1e-6 #1=>cover all\r\n binsize = ((max(inlist)-min(inlist)+estbinwidth))/float(numbins)\r\n lowerreallimit = min(inlist) - binsize/2 #lower real limit,1st bin\r\n bins = [0]*(numbins)\r\n extrapoints = 0\r\n for num in inlist:\r\n try:\r\n if (num-lowerreallimit) < 0:\r\n extrapoints = extrapoints + 1\r\n else:\r\n bintoincrement = int((num-lowerreallimit)/float(binsize))\r\n bins[bintoincrement] = bins[bintoincrement] + 1\r\n except:\r\n extrapoints = extrapoints + 1\r\n if (extrapoints > 0 and printextras == 1):\r\n print '\\nPoints outside given histogram range =',extrapoints\r\n return (bins, lowerreallimit, binsize, extrapoints)", "def get_buckets(self, first, last, num_buckets, hertz_cutoff=float(5)):\n # Pensar en la posibilidad de no aplicar PCA, permitir utilizar fft sobre una feature diferente, por ejemplo raiz-cuadrada(x2 + y2 + z2)\n if self.pca == True:\n pca = PCA(n_components=1, copy=True, whiten=True)\n numpy_data = array(self.data)\n transformed_dataset = PCA.fit_transform(pca, numpy_data)\n slice=transformed_dataset[first:last]\n else:\n slice = self.data[first:last]\n slice = [column[0] for column in slice]\n \n transformed = fft.fft(slice)\n absolute = [abs(complex) for complex in transformed]\n\n frequencies = self.get_frequencies()\n\n buckets = [0 for i in range(num_buckets)]\n width = hertz_cutoff / num_buckets\n sum_of_buckets = 0.0000001\n for i in range(1, len(absolute)):\n index = int(frequencies[i] / width)\n if index >= num_buckets:\n break\n buckets[index] += absolute[i]\n sum_of_buckets += absolute[i]\n\n #if args.normalize == 't':\n # buckets = map(lambda x: x/sum_of_buckets, buckets)\n\n return buckets", "def numpy_gw_hist(data, bins, scale):\n data = np.atleast_1d(data)\n bins = np.atleast_1d(bins)\n nbins, ndata = bins.size, data.size\n\n scale = np.zeros(ndata) + scale\n\n logsm_bin_matrix = np.repeat(\n bins, ndata).reshape((nbins, ndata)).astype('f4')\n data_matrix = np.tile(data, nbins).reshape((nbins, ndata)).astype('f4')\n smoothing_kernel_matrix = np.tile(\n scale, nbins).reshape((nbins, ndata)).astype('f4')\n\n cdf_matrix = norm.cdf(\n logsm_bin_matrix, loc=data_matrix, scale=smoothing_kernel_matrix)\n\n prob_bin_member = np.diff(cdf_matrix, axis=0) # Shape (nbins-1, ndata)\n\n total_num_bin_members = np.sum(\n prob_bin_member, axis=1) # Shape (nbins-1, )\n\n return total_num_bin_members", "def to_constant_bin_number(d,\n N_bin,\n weight_pos=None,\n key=None,\n lower_bound=None,\n upper_bound=None,\n ):\n\n isdict = isinstance(d,dict)\n\n if not hasattr(d,'__len__'):\n raise TypeError(\"d must be iterable\")\n\n if not isdict and hasattr(d[0], '__len__'):\n if weight_pos is not None:\n key = lambda x: x[weight_pos]\n if key is None:\n raise ValueError(\"Must provide weight_pos or key for tuple list\")\n\n if not isdict and key:\n new_dict = {i: val for i, val in enumerate(d)}\n d = {i: key(val) for i, val in enumerate(d)}\n isdict = True\n is_tuple_list = True\n else:\n is_tuple_list = False\n\n if isdict:\n\n #get keys and values (weights)\n keys_vals = d.items()\n keys = [ k for k, v in keys_vals ]\n vals = [ v for k, v in keys_vals ]\n\n #sort weights decreasingly\n ndcs = revargsort(vals)\n\n weights = get(vals, ndcs)\n keys = get(keys, ndcs)\n\n bins = [ {} for i in range(N_bin) ]\n else:\n weights = sorted(d,key=lambda x: -x)\n bins = [ [] for i in range(N_bin) ]\n\n #find the valid indices\n if lower_bound is not None and upper_bound is not None and lower_bound<upper_bound:\n valid_ndcs = filter(lambda i: lower_bound < weights[i] < upper_bound,range(len(weights)))\n elif lower_bound is not None:\n valid_ndcs = filter(lambda i: lower_bound < weights[i],range(len(weights)))\n elif upper_bound is not None:\n valid_ndcs = filter(lambda i: weights[i] < upper_bound,range(len(weights)))\n elif lower_bound is None and upper_bound is None:\n valid_ndcs = range(len(weights))\n elif lower_bound>=upper_bound:\n raise Exception(\"lower_bound is greater or equal to upper_bound\")\n\n valid_ndcs = list(valid_ndcs)\n\n weights = get(weights, valid_ndcs)\n\n if isdict:\n keys = get(keys, valid_ndcs)\n\n #the total volume is the sum of all weights\n V_total = sum(weights)\n\n #the first estimate of the maximum bin volume is \n #the total volume divided to all bins\n V_bin_max = V_total / float(N_bin)\n\n #prepare array containing the current weight of the bins\n weight_sum = [0. for n in range(N_bin) ]\n\n #iterate through the weight list, starting with heaviest\n for item, weight in enumerate(weights):\n\n if isdict:\n key = keys[item]\n\n #put next value in bin with lowest weight sum\n b = argmin(weight_sum)\n\n #calculate new weight of this bin\n new_weight_sum = weight_sum[b] + weight\n\n found_bin = False\n while not found_bin:\n\n #if this weight fits in the bin\n if new_weight_sum <= V_bin_max:\n\n #...put it in \n if isdict:\n bins[b][key] = weight\n else:\n bins[b].append(weight)\n\n #increase weight sum of the bin and continue with\n #next item \n weight_sum[b] = new_weight_sum\n found_bin = True\n\n else:\n #if not, increase the max volume by the sum of\n #the rest of the bins per bin\n V_bin_max += sum(weights[item:]) / float(N_bin)\n\n if not is_tuple_list:\n return bins\n else:\n new_bins = []\n for b in range(N_bin):\n new_bins.append([])\n for key in bins[b]:\n new_bins[b].append(new_dict[key])\n return new_bins", "def get_histogram(self):\n\n values_array = np.array(self.values)\n for bin0 in range(self.bins[0].size):\n bin_inf0 = self.bins[0][bin0]\n try: bin_sup0 = self.bins[0][bin0 + 1]\n except IndexError: bin_sup0 = self.vmax[0]\n values = values_array[\n (values_array[:, 0] >= bin_inf0)\n *(values_array[:, 0] < bin_sup0)][:, 1]\n for bin1 in range(self.bins[1].size):\n bin_inf1 = self.bins[1][bin1]\n try: bin_sup1 = self.bins[1][bin1 + 1]\n except IndexError: bin_sup1 = self.vmax[1]\n self.hist[bin0*self.Nbins[1] + bin1, 2] = (\n np.sum((values >= bin_inf1)*(values < bin_sup1)))\n\n if np.sum(self.hist[:, 2]) > 0: # there are binned values\n self.hist[:, 2] /= np.sum(self.hist[:, 2])\n return self.hist", "def test_nonmonotonic_bins(self):\n\n with pytest.raises(ValueError) as verr:\n avg.median1D(self.testInst, [0, 13, 5], self.test_label,\n self.test_data, auto_bin=False)\n\n estr = 'bins must be monotonically increasing or decreasing'\n assert str(verr).find(estr) >= 0\n\n return" ]
[ "0.68158084", "0.6405568", "0.6296729", "0.62813383", "0.62101144", "0.61591256", "0.61568004", "0.6148516", "0.61107266", "0.60635036", "0.60612935", "0.60541433", "0.60541433", "0.60535467", "0.60273194", "0.5927287", "0.5926463", "0.59147674", "0.58974785", "0.5892863", "0.58888173", "0.5884158", "0.5881197", "0.5856947", "0.58530104", "0.5840031", "0.583378", "0.58336747", "0.5806817", "0.57904476", "0.57569146", "0.57419634", "0.5740944", "0.5731742", "0.572133", "0.5711653", "0.56701857", "0.5660286", "0.5659003", "0.5658618", "0.56501055", "0.5642383", "0.5619372", "0.5615545", "0.5604408", "0.5602996", "0.5602996", "0.5599671", "0.5594199", "0.55929303", "0.5580563", "0.55745685", "0.55635136", "0.5547506", "0.5544293", "0.5540777", "0.5512641", "0.55102044", "0.5506086", "0.55060387", "0.54827666", "0.5471523", "0.54697174", "0.54673356", "0.5447625", "0.544594", "0.54446214", "0.5435442", "0.5429469", "0.5426553", "0.54236746", "0.54236746", "0.5423588", "0.54219407", "0.54213756", "0.54124504", "0.5409563", "0.53876233", "0.53827924", "0.5382048", "0.5379231", "0.53652936", "0.53612065", "0.53531325", "0.53504163", "0.53475124", "0.53463495", "0.5343429", "0.53351504", "0.5324148", "0.5321768", "0.531757", "0.5313142", "0.53129417", "0.53117573", "0.53077376", "0.5300783", "0.52823347", "0.5263851", "0.5256635", "0.5256543" ]
0.0
-1
Probability grouping of category variables
def probability_categorical(feature, label): assert feature.nunique()>2, 'feature category nums must be greater than 2.' t = pd.DataFrame({'feature':feature, 'label':label}) cat = label.unique() cat = [(cat[i], cat[i+1]) for i in range(len(cat)-1)] prob = label.value_counts(1).to_dict() slope = [prob.get(i[0], 0)-prob.get(i[1], 0) for i in cat] slope_dict = t.feature.value_counts(1).to_dict() prob = t.groupby([ 'feature']).label.value_counts(1).to_dict() slope_dict = {i:{'category_rate':slope_dict[i], 'slope':[prob.get((i,j[0]), 0)-prob.get((i,j[1]), 0) for j in cat]} for i in slope_dict} for i in slope_dict: slope_dict[i]['slope_diff'] = sum([abs(slope[j]-slope_dict[i]['slope'][j]) for j in range(len(slope))]) value1 = sorted([[[i], slope_dict[i]['slope_diff'], slope_dict[i]['category_rate']] for i in slope_dict], key=lambda x:x[1], reverse=1) distance = sorted([value1[i][1]-value1[i+1][1] for i in range(len(value1)-1)]) std = pd.Series([i[1] for i in value1]).std() coupe = value1 dis = distance[0] for k in distance: value = value1 while 1: for i in range(len(value)-1): if value[i][1]-k<value[i+1][1]: value[i+1][0] = value[i][0]+value[i+1][0] value[i+1][1] = value[i][1]*value[i][2]/(value[i][2]+value[i+1][2])+value[i+1][1]*value[i+1][2]/(value[i][2]+value[i+1][2]) value[i+1][2] = value[i][2]+value[i+1][2] value.remove(value[i]) break if i==len(value)-2: break if pd.Series([i[1] for i in value]).std()>std: coupe = value std = pd.Series([i[1] for i in value]).std() dis = k return {'group':{k:i for i,j in enumerate(coupe) for k in j[0]}, 'data':coupe, 'distance':dis, 'distance_index':f'{distance.index(dis)+1}/{len(distance)}', 'std':std}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ppf(self,x):\n return self.categoricalDist.ppf(x)", "def calc_priors(categories, data):\n counts = np.zeros(categories)\n for val in range(categories):\n counts[val] = np.count_nonzero(data.labels == val)\n return counts / len(data.labels)", "def categorical(pvals: np.ndarray) -> int:\n\n return sample_probabilities(pvals)() # faster than: np.argmax(np.random.multinomial(1, normalize(pvals)))", "def pdf(self,x):\n return self.categoricalDist.pdf(x)", "def grouping_cols(df, cat_percentage = 0.05, checking_itr = 10):", "def _class_distribution(y):\n unique, counts = np.unique(y, return_counts = True)\n\n percentages = counts / np.sum(counts)\n\n return unique, counts, percentages", "def purity_score(label, pred):\n \n df = pd.concat([label, pd.DataFrame(pred)], axis=1)\n df.set_axis(['label', 'pred'], axis=1, inplace=True)\n \n s = 0\n\n for x, cluster in df.groupby('pred'):\n s += cluster['label'].value_counts().iloc[0] # adding the most occuring class in a cluster\n\n return s / label.shape[0]", "def cdf(self,x):\n return self.categoricalDist.cdf(x)", "def conditional_probability(data, attr, cp_table):\n # gets class names for dataframe manipulation\n classes = attr.tail(1)['vars'].tolist()\n classlist = [classes[0][0], classes[0][1]]\n class0 = classlist[0]\n class1 = classlist[1]\n # number of instances beloning to each class\n nclass0 = cp_table.loc[0, class0].sum()\n nclass1 = cp_table.loc[0, class1].sum()\n total = nclass0 + nclass1\n # all probabilities include a laplace est of 1\n prior0 = (nclass0 + 1) / (total + 2)\n prior1 = (nclass1 + 1) / (total + 2)\n list0 = []\n list1 = []\n for index, row in cp_table.iterrows():\n numattr = len(attr.loc[index, 'vars'])\n numer0 = row[class0] + 1\n numer1 = row[class1] + 1\n denom0 = nclass0 + (1 * numattr)\n denom1 = nclass1 + (1 * numattr)\n cp0 = numer0 / denom0\n cp1 = numer1 / denom1\n list0.append(cp0)\n list1.append(cp1)\n # replacing columns in previous table with cond probs\n del cp_table[class0]\n del cp_table[class1]\n cp_table[class0] = list0\n cp_table[class1] = list1\n \n return cp_table, prior0, prior1", "def feature_prob(self, f, cat):\n if self.category_count(cat) == 0:\n return 0\n # The total number of times this feature appeared in this \n # category divided by the total number of items in this category\n pfc = self.feature_count(f, cat)\n pc = self.category_count(cat)\n return float(pfc)/pc", "def age_group_mixing():\n p = []\n for j in np.linspace(1,5,5):\n for k in np.linspace(1,5,5):\n if j == k:\n p.append(1)\n else:\n p.append(0.2**np.abs(j+1-k))\n p /= sum(p)\n return p", "def probabilities(self):\n raise NotImplementedError", "def evaluate_probabilities(self, batches):\n total_batches = batches.batches_per_epoch()\n catprobs = []\n for batch in range(total_batches):\n X_batch, y_batch = batches.get_batch()\n feed_dict = {\n self.x: X_batch,\n self.y: y_batch,\n self.keep_prob: 1.0}\n fetch_dict = {\n \"catprobs\": self.categorical_probabilities}\n result = self.session.run(fetch_dict, feed_dict)\n catprobs.append(result[\"catprobs\"])\n catprobs = np.concatenate(catprobs)\n return catprobs", "def calc_feature_probs(image_type, image_data, smoothing):\n counts = np.array([np.sum(image_data.features[image_data.labels == value], axis=0) + smoothing for value in range(image_type.categories)])\n denoms = np.array([np.count_nonzero(image_data.labels == value) + (smoothing * image_type.feature_kinds) for value in range(image_type.categories)])\n return counts / denoms[:, np.newaxis, np.newaxis]", "def calcProbability(self):\n for attribute in self.attributes:\n index = self.F2I[attribute]\n features = set([self.train[i][0][index] for i in range(len(self.train))])\n for feature in features:\n #all the true and false\n result_t = list(filter(lambda x: x[1]== True, self.train))\n total_t = len(result_t)\n result_f = list(filter(lambda x: x[1]== False, self.train))\n total_f= len(result_f)\n #the probability for the feature if its true or false\n t = len(list(filter(lambda x: x[0][index] == feature, result_t)))\n f = len(list(filter(lambda x: x[0][index] == feature, result_f)))\n prob_yes= t/total_t\n prob_no = f/total_f\n #assign the probabilities to the dictionaries\n self.probs_yes[(index,feature)] = prob_yes\n self.probs_no[(index,feature)] = prob_no", "def feature_prob(self, f, category): # Pr(A|B)\r\n if self.get_category_count(category) == 0:\r\n return 0\r\n fp = self.get_feature_count(f, category) / self.get_category_count(category)\r\n print \"Feature: %s | Feature count: %s | Category count: %s | Feature probability: %s\" % (f, self.get_feature_count(f, category), self.get_category_count(category), fp)\r\n return fp", "def get_probs(self, *vars):\n freqs = self.freq_counts([self.data.get_column_view(v)[0] for v in vars], [len(v.values) for v in vars])\n k = np.prod([len(v.values) for v in vars])\n return (freqs + self.alpha) / (np.sum(freqs) + self.alpha*k)", "def proportion_with_cardinals(df, PATH):\n \n df_test = df.copy()\n df_test['cardinal'] = df.title.apply(contains_cardinal)\n\n click = df_test[df_test.target == 1]\n non = df_test[df_test.target == 0]\n click = click.groupby(['cardinal']).target.count()\n non = non.groupby(['cardinal']).target.count()\n \n non = non[1]/non[0] * 100\n click = click[1]/click[0] * 100\n # plot the results\n fig, ax = plt.subplots(figsize=(12,6))\n sns.barplot(x=['Normal', \"Clickbait\"], y=[non, click], ax=ax)\n plt.title(\"Percent of Titles Containing Cardinal Numbers\", size = 24)\n plt.xlabel(\"Article Class\", size=24)\n plt.ylabel(\"Percent %\", size = 24)\n plt.ylim(0, 100)\n plt.xticks([0,1], label=[\"Normal\", \"Clickbait\"], size=24)\n if PATH:\n plt.savefig(PATH, bbox_inches=\"tight\", transparent=True)\n \n return ax", "def test_probabilities_are_ok(self, seed):\n bins = defaultdict(int)\n probs = (0.1, 0.2, 0.3, 0.4)\n categories = (\"asdfa\", \"2\", \"3\", \"4\")\n categories = OrderedDict(zip(categories, probs))\n dim = Categorical(\"yolo\", categories)\n for _ in range(500):\n sample = dim.sample(seed=seed)[0]\n bins[sample] += 1\n for keys in bins.keys():\n bins[keys] /= float(500)\n for key, value in categories.items():\n assert abs(bins[key] - value) < 0.01", "def prob(self, doc, cat):\n catprob = self.category_count(cat) / self.total_count() # Pr(Category)\n docprob = self.doc_prob(doc, cat) # Pr(Document | Category)\n return docprob*Decimal(str(catprob)) # Pr(Category | Document)", "def categorical_sample(prob_n, np_random):\n prob_n = np.asarray(prob_n)\n csprob_n = np.cumsum(prob_n)\n return (csprob_n > np_random.rand()).argmax()", "def categorical_sample(prob_n, np_random):\n prob_n = np.asarray(prob_n)\n csprob_n = np.cumsum(prob_n)\n return (csprob_n > np_random.rand()).argmax()", "def _predict(self, probabilities):\n child_categories = []\n for i in range(0, self.category_level):\n child_categories.append({})\n for category_label in self.classifiers[i].classes_:\n main_category = self._get_categories(category_label)[0]\n if main_category not in child_categories[i]:\n child_categories[i][main_category] = []\n child_categories[i][main_category].append(category_label)\n\n # find the primary category\n max_score = -1\n primary_category_label = None\n\n for i in range(0, self.category_level):\n for category_label in self.classifiers[i].classes_:\n if probabilities[category_label] < 1e-9:\n continue\n total_score = 0\n main_category = self._get_categories(category_label)[0]\n candidates = child_categories[i][main_category]\n for actual_label in candidates:\n probability = probabilities[actual_label]\n if probability < 1e-9:\n continue\n score = self._cal_score(category_label, None, actual_label, i)\n total_score += score * probability\n if total_score > max_score:\n max_score = total_score\n primary_category_label = category_label\n\n # find the secondary category\n max_score = -1\n secondary_category_label = None\n for i in range(0, self.category_level):\n for category_label in self.classifiers[i].classes_:\n if probabilities[category_label] < 1e-9 and secondary_category_label:\n continue\n if category_label == primary_category_label:\n continue\n total_score = 0\n main_category = self._get_categories(category_label)[0]\n main_category2 = self._get_categories(primary_category_label)[0]\n candidates = list(set(child_categories[i][main_category] + child_categories[i][main_category2]))\n for actual_label in candidates:\n probability = probabilities[actual_label]\n if probability < 1e-9:\n continue\n score = self._cal_score(primary_category_label, category_label, actual_label, i)\n total_score += score * probability\n if total_score > max_score:\n max_score = total_score\n secondary_category_label = category_label\n\n return [self._get_categories(primary_category_label), self._get_categories(secondary_category_label)]", "def classify_samples(nd, sample_list, cat_spec):\n tmp = []\n\n for key in cat_spec:\n tmp.append([PSD_sym_KL(psd, cat_spec[key]) for psd in nd])\n\n KL = np.array(tmp).T\n\n # This is a confusing formula\n # amounts to: for a given sample, prob of belonging to class k is:\n # (1 / KL_k) / sum_k(KL_i) = sum_k\\i(KL_i) / sum_k(KL_i)\n prob = ((1 / KL).T / (1 / KL).sum(axis=1)).T\n\n row_masks = np.array([row == row.max() for row in prob])\n cats = [cat_spec.columns[mask][0] for mask in row_masks]\n\n items = [('label', cats)] + [('P({})'.format(lab), p) for lab, p in zip(cat_spec.columns, prob.T)]\n df = pd.DataFrame.from_items(items)\n df.index = sample_list\n\n return df", "def classProbs(observation, tree, classes):\n res = classify(observation, tree) #res = results\n total = sum(res.values())\n probs = []\n for c in classes:\n if c in res.keys():\n probs.append(float(res[c])/total)\n else:\n probs.append(0)\n return probs", "def getClassCounts(column, uniqueVal, decision, yes, no , total):\r\n dataDict = {} # a dictionary of labels\r\n for val in uniqueVal:\r\n label1 = val + '/Y'\r\n label2 = val + '/N'\r\n dataDict[label1] = 0; dataDict[label2] = 0\r\n for dec, at in zip(decision, column):\r\n if at == val and dec == 'No':\r\n dataDict[label2] += 1\r\n if at == val and dec == 'Yes':\r\n dataDict[label1] += 1\r\n dataDict[val] = (dataDict[label2]+ dataDict[label1])/ total\r\n dataDict[label2] = dataDict[label2] / no\r\n dataDict[label1] = dataDict[label1] / yes\r\n return dataDict", "def probability(self, xvalue):\n # Initialize key variables\n probability = {}\n bayesian = {}\n classes = self.classes()\n\n # Calculate the principal components of the individual xvalue\n p1p2 = self.pca_object.pc_of_x(xvalue, self.components)\n\n # Get probability of each class\n for cls in classes:\n # Initialize values for the loop\n sample_count = len(self.pca_object.xvalues(cls))\n\n # Get values for calculating gaussian parameters\n dimensions = len(p1p2)\n x_mu = p1p2 - self.meanvector(cls)\n covariance = self.covariance(cls)\n inverse_cov = np.linalg.inv(covariance)\n determinant_cov = np.linalg.det(covariance)\n\n # Work on the exponent part of the bayesian classifer\n power = -0.5 * np.dot(np.dot(x_mu, inverse_cov), x_mu.T)\n exponent = math.pow(math.e, power)\n\n # Determine the constant value\n pipart = math.pow(2 * math.pi, dimensions / 2)\n constant = pipart * math.sqrt(determinant_cov)\n\n # Determine final bayesian\n bayesian[cls] = (sample_count * exponent) / constant\n\n # Calculate bayesian probability\n denominator = bayesian[classes[0]] + bayesian[classes[1]]\n for cls in classes:\n probability[cls] = bayesian[cls] / denominator\n\n # Return\n return probability", "def learn_distributions(file_lists_by_category):\n ### TODO: Write your code here\n\n #get word frequncies in each email category\n #key:word, value: number of occurences in this email loader\n spam_dict = util.get_word_freq(file_lists_by_category[0])\n ham_dict = util.get_word_freq(file_lists_by_category[1])\n\n #get total length of each email loader\n spam_length = sum(spam_dict.values())\n ham_length = sum(ham_dict.values())\n\n #get the length of the dictionary: D\n dict_D = util.Counter()\n for key in spam_dict:\n dict_D[key] += spam_dict[key]\n for key in ham_dict:\n dict_D[key] += ham_dict[key]\n D = len(dict_D)\n\n spam_distribution = {}\n ham_distribution = {}\n #get the distributions of two email loaders\n for i in dict_D:\n spam_distribution[i] = (spam_dict[i] + 1) / (D + spam_length)\n\n for i in dict_D:\n ham_distribution[i] = (ham_dict[i] + 1) / (D + ham_length)\n #create the required tuple\n probabilities_by_category = (spam_distribution, ham_distribution)\n return probabilities_by_category", "def categorical_sample(prob_n, np_random = None):\n prob_n = np.asarray(prob_n)\n csprob_n = np.cumsum(prob_n)\n return (csprob_n > np.random.rand()).argmax()", "def test_categorical():\n # assert the distribution of the samples is close to the distribution of the data\n # using cstest:\n # - uniform (assert p-value > 0.05)\n # - very skewed / biased? (assert p-value > 0.05)\n # - inversely correlated (assert correlation < 0)", "def avarage_for_group(data: Dict[int, int]) -> float:\n values = data.values()\n summary = sum(values)\n return summary // len(data)", "def conditional_entropy(df, var, var_t):\n row_list = df \\\n .groupBy(var) \\\n .agg(count(\"*\").alias('num_entries')) \\\n .withColumn('all', lit('all')) \\\n .withColumn('total_num_entries', sql_sum('num_entries').over(Window.partitionBy('all'))) \\\n .withColumn('pcg', col('num_entries') / col('total_num_entries')) \\\n .select(var, 'pcg').collect()\n\n cat_and_weight = [(r[var], r['pcg']) for r in row_list]\n\n return sum([w * single_entropy(df=df.filter(col(var) == c), var=var_t) for (c, w) in cat_and_weight])", "def pie_chart_score(self, grouped):\n picked_scenario = self.scenario_dict[\"%d\" % (self.scenario_num-1)]\n distinct_enum_X = self.data_dict[picked_scenario[\"X\"]]['distinct_enum']\n score = 0\n if min(grouped) < 0:\n score = 0\n elif distinct_enum_X == 1:\n score = 0\n elif picked_scenario[\"Agg_func_Y\"] == \"avg\":\n score = 0\n elif distinct_enum_X >= 2 and distinct_enum_X <= 8:\n score += self.calculate_entropy(self.data_dict[picked_scenario[\"Y\"]]) / 8\n elif distinct_enum_X > 8:\n score += 4 * (self.calculate_entropy(self.data_dict[picked_scenario[\"Y\"]])) / distinct_enum_X\n if score > 3:\n score = 3\n return score", "def summarize_ancestral_prob_df(df):\n df = df.groupby(['pattern', 'allele_count_a', 'allele_count_b',\n 'anc_species_state', 'anc_pop_state',\n 'anc_species_pop']) \\\n .apply(lambda x: x['joint_prob'].sum()) \\\n .reset_index() \\\n .set_index(['pattern', 'allele_count_a', 'allele_count_b'])\n df.columns = ['anc_species_state', 'anc_pop_state',\n 'anc_species_pop', 'prob']\n return df", "def calculate_purity(D, k):\n ti = np.array(D.groupby(by=\"cluster\").count()['x1'])\n ci = np.array(D.groupby(by=\"label\").count()['x1'])\n total_observations = 0\n for i in range(k):\n total_observations += min(ti[i], ci[i])\n purity = total_observations / D.shape[0]\n return purity", "def get_pred_class_probs(self, pred_mu, pred_sigma):", "def calculate_prior_probability(y):\n unique, counts = np.unique(y, return_counts=True)\n u_c = dict(zip(unique, counts))\n instances = len(y)\n for u in u_c:\n u_c[u] = float(u_c[u] / instances)\n return u_c", "def multinomial_pmf(sample, probabilities):\r\n # TODO\r\n a=[]\r\n b=[]\r\n i=0\r\n key_list=[]\r\n value_list=[]\r\n for key,value in sample.items():\r\n key_list.append(key)\r\n value_list.append(value)\r\n b=list(sample)\r\n while i< len(b):\r\n a.append(probabilities.keys()[probabilities.values().index(value_list[i])])\r\n\r\n\r\n return a", "def purity(clusters, classes):\n\n d = defaultdict(list)\n\n # Get a list of class numbers of all examples in a cluster.\n for k, v in zip(clusters, classes):\n d[k].append(v)\n\n mayority = 0\n\n # Count the mayority class number and add it up over all clusters.\n for k in d:\n mayority += Counter(d[k]).most_common(1)[0][1]\n\n return float(mayority) / len(clusters)", "def prob(self, tple, class_counts, feature_counts):\n feats = self.dataset.input_features\n unnorm = [prod(feature_counts[i][feat(tple)][c]\n for (i,feat) in enumerate(feats))\n /(class_counts[c]**(len(feats)-1))\n for c in range(self.num_classes)]\n thesum = sum(unnorm)\n return [un/thesum for un in unnorm]", "def entropyCategorical(attr, X, y):\n uniques = X[attr].unique().tolist()\n idxLists = []\n entropies = []\n weights = []\n for u in uniques:\n idxLists.append(X.index[X[attr] == u].tolist())\n entropies.append(entropy(y, idxLists[-1]))\n weights.append(len(idxLists[-1]))\n\n entropies = np.array(entropies).reshape(1, -1)\n weights = np.array(weights).reshape(-1, 1).astype(np.float32)\n weights /= np.sum(weights)\n\n return (uniques, idxLists, (entropies @ weights)[0, 0])", "def lp(word, category, unique, k, name=\"category\"):\n\t\tp1 = category.count(word) + k\n\t\tp2 = len(category) + unique\n\t\tprint(word + \" in \"+name+\": \" + str((p1 * 1.0) / (p2 * 1.0)))\n\t\treturn (p1 * 1.0) / (p2 * 1.0)", "def calc_prob(data):\n total = len(data)\n frequencies = sorted(Counter(data).items())\n probabilities = OrderedDict()\n for (key, value) in frequencies:\n probabilities[key] = value / total\n return probabilities", "def add_discrete_outcome(distribution, x, p, sort=False):\n\n atoms = np.append(x, distribution.atoms)\n pmv = np.append(p, distribution.pmv * (1 - p))\n\n if sort:\n indices = np.argsort(atoms)\n atoms = atoms[indices]\n pmv = pmv[indices]\n\n return DiscreteDistribution(pmv, atoms)", "def prior_probability(x, means, dispersions, cluster_probabilities):\n K = means.shape[0] # number of clusters\n prob = 0\n for j in range(K):\n prob += cluster_probabilities[j] * normal(\n x, means[j], dispersions[j]\n )\n return prob", "def get_pertinence (cats):\n sorted_cats = sorted(cats, key=cats.__getitem__, reverse=True)\n score_to_test = cats[sorted_cats[0]]\n all_values = [cats[key] for key in sorted_cats]\n average = sum(all_values) / len(all_values)\n logged_rest = [log(abs(average - val) + 1) for val in all_values[1:]]\n \n rest_average = sum(logged_rest) / len(logged_rest)\n logged_main = log(abs(average - all_values[0])+1)\n \n importance = max(logged_main - rest_average, 0)\n \n return importance", "def count(d, group, prob):\n if d.get(group) is None:\n d[group] = {prob: 0}\n d[group][prob] = d[group].get(prob, 0) + 1", "def probability(prods, prod_dict_As, count_dict):\n for p in prods:\n if p not in prod_dict_As:\n raise Exception(\"Think we cannot make the product {}.\".format(p))\n # Argh, Python, this is a reference!\n #possible_As = prod_dict_As[prods[0]]\n possible_As = set( prod_dict_As[prods[0]] )\n for p in prods[1:]:\n possible_As &= prod_dict_As[p]\n ret = []\n for A in possible_As:\n count = 1\n for p in prods:\n count *= count_dict[(p,A)]\n ret.append((A,count))\n return ret", "def predict_category(self):\n pass", "def density(categorical_var, numerical_var):\n #print(categorical_var)\n cat_list = categorical_var.astype('category')\n for cat in cat_list:\n sns.kdeplot(numerical_var[categorical_var == cat], label=cat)#, categorical_var)\n\n plt.show()", "def probability(cpts, term, obs):\r\n \r\n \r\n # term is a list e.g., ['x_1', '0']\r\n # flip refers to the assignment either '0' false or '1' true\r\n flip = term[1]\r\n # the term itself\r\n term = term[0]\r\n # accumulator variable\r\n answer = 0\r\n # this loop locates where in the CPT we're looking\r\n for clause in range(len(cpts)):\r\n if cpts[clause][0] == term:\r\n index = clause\r\n # focus on our term\r\n cpt = cpts[index]\r\n # this loop checks if there are no preconditions\r\n # if not, then we immediately know the probability and can return\r\n for m in range(len(cpt[1])):\r\n if cpt[1][m][-2][1] == '1':\r\n if cpt[1][m][0] == [[]]:\r\n answer = cpt[1][m][-1]\r\n # list of the variables we have observed\r\n have = []\r\n if obs != []:\r\n for k in obs:\r\n have.append(k[0])\r\n # list of variables we need to know in order to calculate the probability\r\n needed = []\r\n for prob in range(len(cpt[1])):\r\n for j in cpt[1][prob][0]:\r\n if j != []:\r\n if j[0] not in needed:\r\n needed.append(j[0])\r\n # conditional logic based on the known variables\r\n for required in needed:\r\n if required not in have:\r\n # deep copy our observations list\r\n obs2 = []\r\n obs3 = []\r\n for observs in obs:\r\n obs2.append(observs)\r\n obs3.append(observs)\r\n # if we need to know a variable but don't have it\r\n # then we allow it to be either 0 or 1\r\n obs3.append([required,'1'])\r\n obs2.append([required,'0'])\r\n # computes probability if the unknown term is true, times \r\n # the probability that the unknown term is true, plus the\r\n # probability if the unknown term is false, times the \r\n # probability that the unknown term is false\r\n answer = (probability(cpts, [term,flip], obs3) * probability(cpts, [required,'1'], obs)) + (probability(cpts, [term,flip], obs2) * (probability(cpts, [required,'0'], obs)))\r\n # this loop looks complicated but all it's doing is finding the correct\r\n # line in the CPT\r\n if cpt[1][prob][-2][1] == '1':\r\n count = 1\r\n for i in range(len(cpt[1][prob][0])):\r\n if cpt[1][prob][0][i] in obs:\r\n count *= 1\r\n else:\r\n count = 0\r\n if count == 1:\r\n answer += cpt[1][prob][-1]\r\n\r\n\r\n # this computes the probability that the term is true, so if we asked \r\n # for the probability that it is false, just return 1 - answer\r\n if flip == '0':\r\n return 1 - answer\r\n return answer", "def _cluster_hitprobability(self, x, y):\n hm_count = np.zeros_like(y).astype(float)\n hm = np.zeros_like(y).astype(float)\n #skf = StratifiedShuffleSplit(n_splits=self.n_iter, test_size=self.shuffle_test_split, random_state=self.random_state)\n\n ind = self._cluster(x, 35)\n\n for cluster_id in np.unique(ind):\n test = np.argwhere(ind == cluster_id)[:, 0]\n train = np.argwhere(ind != cluster_id)[:, 0]\n #print test\n self.basemodel.fit(x[train, :], y[train], hyperparams_optim=False)\n hm_count[test] += 1.\n hm[test] += (self.basemodel.predict(x[test, :]) == y[test]).astype(float)\n\n proba = hm / hm_count\n if self.verbose:\n # print('H/M count:')\n # print(hm_count)\n print('Proba:')\n print(proba)\n self.basemodel.fit(x, y, hyperparams_optim=False)\n return proba", "def prob(self, feature_index, feature_value, class_):\r\n\r\n deviation = self.conditional_prob[class_][feature_index][1]\r\n mean = self.conditional_prob[class_][feature_index][0]\r\n\r\n val1 = math.pow((feature_value - mean), 2)\r\n val1 = val1/math.pow(deviation, 2)\r\n\r\n val2 = 2*math.pi*math.pow(deviation, 2)\r\n val2 = 1/(math.sqrt(val2))\r\n\r\n probability = val2 * math.exp(-val1)\r\n\r\n return probability", "def _process(self, data: np.ndarray) -> np.ndarray:\n probabilities = np.empty(data.size, dtype=object)\n\n for idx, counts_dict in enumerate(data):\n shots = sum(counts_dict.values())\n freq = counts_dict.get(self._outcome, 0)\n alpha_posterior = [freq + self._alpha_prior[0], shots - freq + self._alpha_prior[1]]\n alpha_sum = sum(alpha_posterior)\n\n p_mean = alpha_posterior[0] / alpha_sum\n p_var = p_mean * (1 - p_mean) / (alpha_sum + 1)\n\n probabilities[idx] = ufloat(nominal_value=p_mean, std_dev=np.sqrt(p_var))\n\n return probabilities", "def calculate_class_probabilities(summaries, input_vector):\n probabilities = {}\n\n for class_key, class_summary in summaries.iteritems():\n # initialize the probability for the class to 1 to\n # prevent keyerrors\n probabilities[class_key] = float(1)\n\n for (mean, stdev), input_val in zip(class_summary, input_vector):\n attribute_probability = calculate_probability(input_val, mean, stdev)\n probabilities[class_key] *= attribute_probability\n\n return probabilities", "def nb_decision(X,p_y,p_x_given_y):\n cases = X.shape[0]\n dims = X.shape[1]\n\n bayes_prob = np.zeros((cases, 2))\n result = np.zeros(cases)\n\n y_values = p_y.size\n\n # find the probability for each y value\n for y_value in range(y_values):\n # iterate through each case and calculate the probability\n for case in range(cases):\n prob = p_y[y_value]\n\n for dim in range(dims):\n if X[case][dim] == 1:\n prob = prob * p_x_given_y[dim][y_value]\n elif X[case][dim] == 0:\n prob = prob * (1 - p_x_given_y[dim][y_value])\n\n\n bayes_prob[case][y_value] = prob\n\n # now find out which class has the biggest probability\n for case in range(cases):\n if bayes_prob[case][0] > bayes_prob[case][1]:\n result[case] = 0\n else:\n result[case] = 1\n\n return result", "def sample_categorical(distribution):\n sample = random.random()\n for event, prob in distribution.items():\n if sample < prob:\n return event\n sample -= prob\n raise ValueError('sum of distribution less than one')", "def probit(x):\n from tensorflow_probability import distributions\n return distributions.Normal(0, 1).cdf(x)", "def class_distribution(y): \n # ===================== PLEASE WRITE HERE =====================\n \n bin_array = np.bincount(y)\n n_class1 = bin_array[1]\n n_class2 = bin_array[2]\n n_class3 = bin_array[3]\n \n # ===================== PLEASE WRITE HERE =====================\n \n print('Number of samples in class_1:', n_class1)\n print('Number of samples in class_2:', n_class2)\n print('Number of samples in class_3:', n_class3)", "def produce_labels(y, return_stats=True):\n classes, inds, inv, counts = np.unique(y,\n return_index=True,\n return_inverse=True,\n return_counts=True)\n total_counts = np.sum(counts)\n counts = counts/float(total_counts)\n class_proportions = {clss: cnt for clss, cnt in zip(inv[inds], counts)}\n orig_classes = {new: old for new, old in zip(inv[inds], classes)}\n if return_stats:\n return inv, total_counts, class_proportions, orig_classes\n else:\n return inv", "def class_probability(self, x):\n # permutation before softmax b x a x c x spatial dims --> b x c x a x spatial dims\n # as expected by PyTorch Softmax the class axis = 1 \n return self._class_prob(x.permute([0, 2, 1, 3, 4]))", "def prob_category(new_music, fit):\n\tr = robjects.r\n\t#Be careful not to include the word 'data' in the function call below, although data is a keyword\n\tpredictions = r.predict(fit,new_music,type=\"prob\")\n\treturn predictions", "def calculate_priors(trainingLabels):\r\n sum = 0\r\n priors = {}\r\n totalSamples = len(trainingLabels)\r\n classes = set(trainingLabels)\r\n for cls in classes:\r\n numCls = len(filter(lambda x: x == cls, trainingLabels))\r\n sum += numCls\r\n priors[cls] = float(numCls) / float(totalSamples)\r\n \r\n # Sanity check: valid partitioning\r\n assert(sum == totalSamples)\r\n\r\n return priors", "def probability_density(dic):\n\n var = dic['var']\n par = dic['par']\n y1 = dic['y']\n y = y1.conjugate() * y\n return dic_result(var,par,y)", "def probability(series, params):\n\n prob = 1\n\n for result in series:\n\n prob *= params[result]\n\n return prob * params[\"die\"]", "def multinomial_prob(counts, probs):\n return nCkarray(*counts.values) * (probs ** counts).prod()", "def np_categorical_dice(pred, truth, k):\n A = (pred == k).astype(np.float32)\n B = (truth == k).astype(np.float32)\n return 2 * np.sum(A * B) / (np.sum(A) + np.sum(B))", "def test_categorical_log_frequency():\n # assert the distribution of the samples is close to the distribution of the data\n # using cstest:\n # - uniform (assert p-value > 0.05)\n # - very skewed / biased? (assert p-value > 0.05)\n # - inversely correlated (assert correlation < 0)", "def get_pass_rates(grouped):\n pass_rates = {}\n for name, group in grouped:\n vc = group['outcome'].value_counts()\n if True not in vc:\n pass_rates[name] = 0\n else:\n pass_rates[name] = vc[True] / len(group)\n return pass_rates", "def random_pmf(nb_labels):\n random_numbers = np.random.random(nb_labels)\n return random_numbers / np.sum(random_numbers)", "def classify_new_email(filename,probabilities_by_category,prior_by_category):\n ### TODO: Write your code here\n spam_distribution = 0\n ham_distribution = 0\n word_frequency = util.get_word_freq([filename])\n for w in word_frequency:\n if w in probabilities_by_category[0]:\n spam_distribution += word_frequency[w] * np.log(probabilities_by_category[0][w])\n if w in probabilities_by_category[1]:\n ham_distribution += word_frequency[w] * np.log(probabilities_by_category[1][w])\n spam_distribution += np.log(prior_by_category[0])\n ham_distribution += np.log(prior_by_category[1])\n\n predict = \"\"\n if(spam_distribution > ham_distribution):\n predict = \"spam\"\n else:\n predict = \"ham\"\n\n word_distribution = [spam_distribution, ham_distribution]\n\n classify_result = (predict, word_distribution)\n\n return classify_result", "def entropy(group_counts):\n total = sum(group_counts)\n entro = 0\n for item_count in group_counts:\n entro += item_entropy(item_count, total)\n return entro", "def group_and_vote_fractions_with_pop():\n group_share = np.array([0, 0.2, 0.4, 0.6, 0.8, 0.9])\n vote_share = np.array([0.1, 0.3, 0.5, 0.7, 0.9, 0.9])\n populations = np.array([1000, 1000, 1000, 1000, 1000, 1])\n return group_share, vote_share, populations", "def _calculate_probs_and_entropies(self):\n self._calculate_probs_and_entropy_y()\n self._calculate_probs_and_entropy_x(self.cat_cols)", "def doc_prob(self, doc, cat):\n features = self.get_features(doc) \n # Multiply the probabilities of all the features together\n p = Decimal(1)\n for f in features:\n p *= Decimal(str(self.weighted_prob(f, cat, self.feature_prob))) \n return p", "def probability(self, samples):\n pass", "def get_gold_probdist():\n\n # Read in the dataset as a pandas dataframe.\n card_data_annot = gspd.read_in_categorised()\n\n # Based on the frequencies of each category in the data, create probability distribution and return.\n probdist_dict = gspd.freq_dist_to_prob_dist(card_data_annot)\n return probdist_dict", "def monte_carlo_sample(self):\n\t\tresult = dict()\n\t\tfor n in self.topological_sort():\n\t\t\tpvals = tuple(result[p] for p in n.parents)\n\t\t\tresult[n.name] = n.cpt.rand_result(pvals)\n\t\treturn result", "def p(n):\n def p_(x):\n return np.percentile(x, n)\n\n p_.__name__ = \"p_%s\" % n\n return p_", "def _make_category_groups(data_struct):\n groups = {}\n for cat in set(data_struct[\"Objects\"]): \n \n data_names = [\"left_x\",\"top_y\",\"width\",\"height\",\"FPS\",\"AVG_FPS\",\"Accuracy\"]\n indices = [i for i, x in enumerate(data_struct[\"Objects\"]) if x == cat]\n for dn in data_names:\n for idx in indices:\n groups[cat] = data_struct[dn][idx]\n return(groups)", "def test_most_probable_value(self):\n with Pandas() as pd:\n if pd is None:\n return\n with Numpy() as np: # noqa\n if numpy is None:\n return\n sys.stderr.write(\"\\n\")\n\n df1 = pd.DataFrame(\n {'A': [0, 1, 2, 3, 4, 3, 2, 1, 1, 1], 'C': ['f1', 'f3', 'f4', 'f3', 'f4', 'f2', 'f2', 'f1', 'f3', 'f4']})\n df2 = pd.DataFrame(\n {'A': [2, 3, 4, 5, 7, 4, 6, 5, 7, 8], 'C': ['f7', 'f3', 'f5', 'f8', 'f9', 'f2', 'f3', 'f6', 'f7', 'f7']})\n\n # building 1d-, 2d-, and 3d-histogram (iteratively)\n hist0 = hg.Categorize(unit('C'))\n hist1 = hg.Categorize(unit('C'))\n hist2 = hg.SparselyBin(origin=0.0, binWidth=1.0, quantity=unit('A'))\n hist3 = hg.SparselyBin(origin=0.0, binWidth=1.0, quantity=unit('A'))\n\n # fill them\n hist0.fill.numpy(df1)\n hist1.fill.numpy(df2)\n hist2.fill.numpy(df1)\n hist3.fill.numpy(df2)\n\n assert hist0.mpv == 'f3'\n assert hist1.mpv == 'f7'\n assert hist2.mpv == 1.5\n assert hist3.mpv == 4.5", "def sample_discrete(probs):\r\n q = np.random.rand()\r\n i = 0\r\n p_sum = 0.0\r\n while p_sum < q:\r\n p_sum += probs[i]\r\n i += 1\r\n return i - 1", "def _proba(y):\n N = len(y)\n _, counts = np.unique(y, return_counts=True)\n return counts / N", "def categorize_attributes():\n global attr_categories, seeds\n print \"Generating seeds...\"\n seeds = get_seeds()\n\n print \"Categorizing attributes...\"\n categorized = categorize(seeds)\n \n category_distances = {}\n attr_categories = {}\n for c in categorized:\n for (attr, score) in categorized[c]:\n attr_categories[attr] = c\n category_distances[attr] = score", "def get_transition_probs(x, classes=None):\n ### ==============================================================================================================\n ### define list_ind as the list of unique ID if not defined\n if not classes:\n classes = np.unique(x.values)\n ### ==============================================================================================================\n transitions_matrix = []\n for cl in classes:\n list_mat = []\n list_freq = []\n for yi in np.unique(x.index.year):\n ### select the year\n mat = x.ix[str(yi),].values.flatten()\n ### select the class cl index + 1\n i = np.where(mat == cl)[0] + 1\n ### make sure we clip\n i = np.delete(i, np.where(i >= len(mat)))\n ### count the total number of days following the cluster \"cl\"\n list_freq.append(len(i))\n mat = mat[i,].tolist()\n ### count the number of occurences of each of the N clusters\n list_mat.append([mat.count(c) for c in classes])\n list_mat = np.array(list_mat)\n list_mat = list_mat.sum(0) * 1.0\n list_mat = list_mat / np.array(list_freq).sum()\n transitions_matrix.append(list_mat)\n transitions_matrix = np.array(transitions_matrix)\n return classes, transitions_matrix", "def random_times(p):\n while True:\n if sum(p.values()) != 1:\n raise ValueError('Probabilities must sum to unity')\n r = random.random()\n remaining = 1\n for category, probability in p.items():\n remaining -= probability\n if remaining <= r:\n yield category\n break", "def gen_class_weights(df):\n class_counts_df = df.select(\"tumor_score\").groupBy(\"tumor_score\").count()\n class_counts = {row[\"tumor_score\"]:row[\"count\"] for row in class_counts_df.collect()}\n max_count = max(class_counts.values())\n class_weights = {k-1:max_count/v for k,v in class_counts.items()}\n return class_weights", "def probability(self):\r\n \r\n my_dict = dict()\r\n \r\n for i in self.__dtmc:\r\n \r\n sum_Pij = float(sum([self.__dtmc[i][j] for j in self.__dtmc[i]]))\r\n \r\n if sum_Pij == 0:\r\n \r\n my_dict[i] = dict()\r\n \r\n elif sum_Pij > 0:\r\n \r\n if i not in my_dict:\r\n \r\n my_dict[i] = dict()\r\n \r\n for j in self.__dtmc[i]:\r\n \r\n Pij = self.__dtmc[i][j] / sum_Pij\r\n \r\n my_dict[i][j] = Pij\r\n \r\n return my_dict", "def _calculate_probs_and_entropy_x(self, columns):\n #calculate x probabilities and H(Xi)\n #H(Xi) = Sum(x € Xi)(-P(Xi=x) * log(P(Xi=x)))\n for col in columns:\n self.cat_entropies[col] = 0\n xsum = 0\n for val in self.categories[col]:\n self.cat_probs[col][val] = 0\n for label in self.labels:\n self.cat_probs[col][val] += self.cat_counts[col][label][val]\n xsum += self.cat_probs[col][val]\n for val in self.categories[col]:\n self.cat_probs[col][val] /= xsum\n self.cat_entropies[col] -= self.cat_probs[col][val] * self.log(self.cat_probs[col][val])", "def selection_profiles_by_chance(true, compare):\n n_neurons, M = true.shape\n probabilities = np.zeros(n_neurons)\n\n for neuron in range(n_neurons):\n n = np.count_nonzero(true[neuron])\n N = np.count_nonzero(compare[neuron])\n rv = hypergeom(M=M, n=n, N=N)\n\n overlap = np.count_nonzero(true[neuron] * compare[neuron])\n probabilities[neuron] = 1 - rv.cdf(x=overlap)\n\n return probabilities", "def ppf(self,x):\n if x > 1.0 or x < 0:\n self.raiseAnError(IOError,'Categorical distribution cannot calculate ppf for', str(x), '! Valid value should within [0,1]!')\n sortedMapping = sorted(self.mapping.items(), key=operator.itemgetter(0))\n if x == 1.0:\n return float(sortedMapping[-1][0]) if self.isFloat else sortedMapping[-1][0]\n else:\n cumulative=0.0\n for element in sortedMapping:\n cumulative += element[1]\n if cumulative >= x:\n return float(element[0]) if self.isFloat else element[0]", "def get_probs(self, a):\n with torch.no_grad():\n probabilities = (np.array(self.priorities) ** a) / sum(np.array(self.priorities) ** a)\n return probabilities", "def class_proportions(histogram, years, columns, year_header=None):\r\n\r\n wh = None\r\n\r\n if histogram is not None:\r\n histogram_mask = histogram[year_header].isin(years)\r\n wh = (histogram[histogram_mask].loc[:, columns].sum() /\r\n histogram[histogram_mask].loc[:, columns].sum().sum()).values\r\n return wh", "def compare_distr(adata, key, groupby = 'batch', **kwags):\n\n plt.figure(None, (8, 6), 70)\n levels = adata.obs[groupby].cat.categories\n for level in levels:\n plt.hist(adata[adata.obs[groupby] == level].obs[key], alpha = 0.5,\n label = level, density = True , **kwags)\n plt.legend()\n plt.title(key)\n plt.show()", "def distr(self,X):\r\n return {x:X.count(x) for x in set(X)}", "def predictProbabilities(self,density ='Gaussian'):\n\t\ttestingProbs = pd.DataFrame(index=self.testing.index.values,\n\t\t\t\t\t\t\t\t\tcolumns=self.trainingMeans.index.values)\n\n\t\ttesting = self.testing.copy().drop(self.classLabel,1)\n\n\t\tdef calculateGaussian(x, mean, stdev):\n\t\t\t\"\"\"\n\t\t\tReturns the density value of a Gaussian distribution\n\t\t\t\"\"\"\n\t\t\texponent = math.exp(-(math.pow(x-mean,2)/(2*math.pow(stdev,2))))\n\t\t\tvalue= (1 / (math.sqrt(2*math.pi) * stdev)) * exponent\n\t\t\tif value==0:\n\t\t\t\treturn np.nan\n\t\t\telse:\n\t\t\t\treturn math.log(value)\n\n\t\tdef calculateBernoulli(x, mean, stdev):\n\t\t\t\"\"\"\n\t\t\tReturns the density value of a Bernoulli distribution\n\t\t\t\"\"\"\n\t\t\tif x:\n\t\t\t\tprob = mean\n\t\t\telse:\n\t\t\t\tprob = 1-mean\n\t\t\treturn prob\n\n\t\tdef calculateMultinoulli(x, *series):\n\t\t\t\"\"\"\n\t\t\tReturns the density value of a Multinoulli distribution\n\t\t\t\"\"\"\n\t\t\tseries= series[0]\n\t\t\treturn series.ix[x]/float(series.sum())\n\n\t\tif density=='Multinoulli':\n\t\t\t#Redefine the parameters to be conditional means\n\t\t\tfor each in self.params.columns:\n\t\t\t\tfor el in self.params.index:\n\t\t\t\t\tmultiDF = pd.Series(index=self.data[each].unique())\n\t\t\t\t\tcounts = self.training[self.training[self.classLabel]==el][each].value_counts()\n\t\t\t\t\tself.params.ix[el][each] = (pd.concat([multiDF,counts],1).drop(0,1),)\n\t\t\tpdf = calculateMultinoulli\n\t\telif density == 'Bernoulli':\n\t\t\tpdf =calculateBernoulli\n\t\telse:\n\t\t\tpdf = calculateGaussian\n\n\t\tprint \"Note: Assuming features follow a \"+density+\" distribution\"\n\n\t\tfor el in testingProbs.columns:\n\t\t\t#Retrieve parameters of distribution\n\t\t\tparameters = self.params.ix[el]\n\t\t\tprobabilities = self.testing.copy().drop(self.classLabel,1)\n\n\t\t\t#For each feature, compute the likelihood of class being el\n\t\t\tfor each in probabilities.columns:\n\t\t\t\t#Skip features with 0 standard deviation\n\t\t\t\tif each in self.useless_features:\n\t\t\t\t\tcontinue\n\t\t\t\tprobabilities[each] = probabilities[each].apply(lambda x: pdf(x,*parameters[each]))\n\n\t\t\t#Multiply features together with prior\n\t\t\ttestingProbs[el] = math.log(self.priors.ix[el])+probabilities.sum(1)\n\t\t\t#testingProbs[el] = self.priors.ix[el]*probabilities.prod(1)\n\t\t#Use log-sum-exp trick. We need the offsetting factor as max among classLabels\n\t\tB = testingProbs.max(1)\n\t\t#Compute log_sum = log(\\sigma_c' exp(b_c' - B)) + B\n\t\tlog_sum = testingProbs.apply(lambda t: (t-B)).applymap(lambda u: math.exp(u)).sum(1).apply(math.log)+B\n\t\tself.testingProbs = testingProbs.apply(lambda x: x-log_sum)\n\t\t#self.testingProbs = testingProbs", "def prob4():\n#raise NotImplementedError(\"Problem 4 Incomplete\")\n h = lambda x : x[0] < -1 and x[1] > 1\n f = lambda x : stats.multivariate_normal.pdf(x,mean=np.array([0,0]),cov=np.eye(2))\n g = lambda x : stats.multivariate_normal.pdf(x,mean=np.array([-1,1]),cov=np.eye(2))\n X = np.random.multivariate_normal(mean=np.array([-1,1]),cov=np.eye(2),size=10000)\n return 1./10000*np.sum(np.apply_along_axis(h,1,X)*np.apply_along_axis(f,1,X)/np.apply_along_axis(g,1,X))", "def _group_separability_scores(self, sep_scores: Dict) -> Dict:\n grouped_sep_scores = {}\n\n for class_pair_key, class_pair_val in sep_scores.items():\n grouped_sep_scores[class_pair_key] = {}\n for concept_key, concept_attrs in CONCEPT_GROUPING.items():\n val = sum([class_pair_val[attr] for attr in concept_attrs]) / len(concept_attrs)\n grouped_sep_scores[class_pair_key][concept_key] = val\n return grouped_sep_scores", "def conditionalDistribution(self, d, v):\n probabilities_ts = np.ones((self.n_topic_components, self.n_sentiment_components))\n firstFactor = (self.n_ds[d] + self.alphaVec) / \\\n (self.n_d[d] + np.sum(self.alphaVec))\n secondFactor = np.zeros((self.n_topic_components,self.n_sentiment_components))\n for s in range(self.n_sentiment_components):\n \n secondFactor[:,s] = ((self.n_dst[d, s, :] + self.gammaVec) / \\\n (self.n_ds[d, s] + np.sum(self.gammaVec)))\n\n thirdFactor = (self.n_vts[v,:, :] + self.beta) / \\\n (self.n_ts + self.n_vts.shape[0] * self.beta)\n\n #forthFactor = np.zeros((self.n_topic_components, self.n_sentiment_components))\n #for k in range(self.n_topic_components):\n # forthFactor[k,:] = np.exp(np.dot(self.topic_embeddings[k,:],self.word_embeddings[v,:]))/np.sum(np.exp(np.dot(self.topic_embeddings[k,:],self.word_embeddings.T)))\n \n forthFactor = np.exp(np.dot(self.topic_embeddings,self.word_embeddings[v,:]))/np.sum(np.exp(np.dot(self.topic_embeddings,self.word_embeddings.T)),-1)\n probabilities_ts *= firstFactor[:, np.newaxis]\n #probabilities_ts *= secondFactor * thirdFactor\n probabilities_ts *= secondFactor * ((1-self.lambda_)*thirdFactor + self.lambda_*forthFactor)\n probabilities_ts /= np.sum(probabilities_ts)\n \n return probabilities_ts", "def get_cluster_country_distr(result, var):\n return pd.crosstab(result.country, result[var]).apply(lambda x: x / x.sum()*100, 0).round(2)" ]
[ "0.6292472", "0.6147138", "0.60119295", "0.59831697", "0.5894537", "0.5834089", "0.5826", "0.57258606", "0.5708978", "0.5685781", "0.56797826", "0.5673375", "0.5663211", "0.56225026", "0.55870885", "0.5568914", "0.55597067", "0.5559275", "0.5555052", "0.5546693", "0.55196595", "0.55196595", "0.5501669", "0.5496848", "0.5495503", "0.5462183", "0.54366463", "0.54021275", "0.539517", "0.53868943", "0.53740007", "0.5369462", "0.5368941", "0.5363459", "0.5350627", "0.5336148", "0.53318197", "0.5331721", "0.53248614", "0.531276", "0.53084564", "0.5305879", "0.5270203", "0.5267816", "0.5259715", "0.5255718", "0.52509683", "0.5247198", "0.5246876", "0.5245522", "0.5244552", "0.5236277", "0.52279997", "0.5226326", "0.5221989", "0.5208001", "0.52058333", "0.5184773", "0.51779586", "0.5177642", "0.51689863", "0.51673144", "0.5157144", "0.51548743", "0.51428306", "0.51376104", "0.5132179", "0.5130982", "0.51281226", "0.51277983", "0.51251143", "0.5124136", "0.5120958", "0.51206565", "0.510789", "0.5103376", "0.50997853", "0.5085613", "0.5081558", "0.50805354", "0.50774306", "0.5073992", "0.5070037", "0.50639063", "0.5059359", "0.504921", "0.5046019", "0.50446784", "0.5042946", "0.5041078", "0.5038964", "0.50346893", "0.5032194", "0.5020203", "0.5013963", "0.50101644", "0.49978107", "0.49929065", "0.49836162", "0.49826008" ]
0.70440626
0
Convert time_offsets to gps timestamps and nanoseconds
def get_gps_timestamp(file, time_offset): reference_date = get_reference_datetime(file) absolute_date = get_absolute_datetime(reference_date, time_offset) timestamp, nanosecond = datetime_to_gpstimestamp_nanoseconds(absolute_date) return timestamp, nanosecond
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def from_gps_time(self):\n reason = \"[!] GPS timestamps are 10 digits\"\n ts_type = self.ts_types['gpstime']\n try:\n if not len(self.gps) == 10 or not self.gps.isdigit():\n self.in_gpstime = indiv_output = combined_output = False\n pass\n else:\n leapseconds = self.leapseconds\n gps_stamp = self.epoch_1980 + timedelta(seconds=(float(self.gps)))\n tai_convert = gps_stamp + timedelta(seconds=19)\n epoch_convert = (tai_convert - self.epoch_1970).total_seconds()\n check_date = dt.utcfromtimestamp(epoch_convert)\n for entry in leapseconds:\n check = self.date_range(leapseconds.get(entry)[0], leapseconds.get(entry)[1], check_date)\n if check is True:\n variance = entry\n else:\n variance = 0\n gps_out = check_date - timedelta(seconds=variance)\n self.in_gpstime = gps_out.strftime('%Y-%m-%d %H:%M:%S.%f')\n indiv_output = str(\"{} {}\".format(ts_type, self.in_gpstime))\n combined_output = str(\"{}{}\\t\\t\\t{} UTC{}\".format(self.left_color, ts_type, self.in_gpstime, self.right_color))\n except Exception:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.in_gpstime = indiv_output = combined_output = False\n return self.in_gpstime, indiv_output, combined_output, reason", "def to_gps_time(self):\n ts_type = self.ts_types['gpstime']\n try:\n leapseconds = self.leapseconds\n check_date = duparser.parse(self.timestamp)\n if hasattr(check_date.tzinfo, '_offset'):\n dt_tz = check_date.tzinfo._offset.total_seconds()\n check_date = duparser.parse(self.timestamp, ignoretz=True)\n else:\n dt_tz = 0\n for entry in leapseconds:\n check = self.date_range(leapseconds.get(entry)[0], leapseconds.get(entry)[1], check_date)\n if check is True:\n variance = entry\n else:\n variance = 0\n leap_correction = check_date + timedelta(seconds=variance)\n epoch_shift = leap_correction - self.epoch_1970\n gps_stamp = (dt.utcfromtimestamp(epoch_shift.total_seconds()) - self.epoch_1980).total_seconds() - 19\n gps_stamp = int(gps_stamp) - int(dt_tz)\n self.out_gpstime = str(gps_stamp)\n ts_output = str(\"{}\\t\\t\\t{}\".format(ts_type, self.out_gpstime))\n except Exception:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.out_gpstime = ts_output = False\n return self.out_gpstime, ts_output", "def datetime_to_gpstimestamp_nanoseconds(date):\n timestamp = gpstime.utc_to_gps(calendar.timegm(date.utctimetuple()))\n nanosecond = date.microsecond * 1000\n\n return timestamp, nanosecond", "def ms2pts(ms, dt):\n return int(ms/dt)", "def calOffsets(self, Xi_arr, Vi_arr, hz):\n\n Wi_arr = [round(vi / hz, 6) for vi in Vi_arr] # tcptimestamps in seconds with microsecond precision\n Yi_arr = [(wi - xi) * 1000 for wi, xi in zip(Wi_arr, Xi_arr)] # offset in miliseconds\n offset_arr = [(round(x, 6), round(y, 6)) for x, y in zip(Xi_arr, Yi_arr)]\n return offset_arr", "def normalize_timestamp(timestamp_series):\n # convert datetime strings into milliseconds from epoch\n times = pd.to_datetime(timestamp_series, format='%Y-%m-%d %H:%M:%S').astype(np.int64) // int(1e6)\n return times", "def _calc_time(time_lines: list) -> np.ndarray:\n time = [time_to_fraction_hour(line.split()[1]) for line in time_lines]\n return np.array(time)", "def pcr_delta_time_ms(pcr_t1, pcr_t2, offset = 0):\n return float(pcr_t2-pcr_t1)/90000.0 + offset", "def ts_to_epoch_seconds(t) -> float:\n return t.astype(int) / 1e9", "def timestamps():\n timestamps = ( # Index\n 1459516622.1, # 0\n 1459516622.2, # 1\n 1459516622.3, # 2\n 1459516623.0, # 3\n 1459516623.1, # 4\n 1459516623.3, # 5\n 1459516624.0, # 6\n )\n return timestamps", "def ConvertTime( self ) :\n \n # modules:\n import logging\n import datetime\n import netCDF4\n import numpy\n \n #\n # Original 'Time' units and description:\n #\n # title = \"Time at Start of Scan (s, UNIX time / POSIX time), number of seconds that have elapsed since midnight Coordinated Universal Time (UTC), 1 January 1970.\"\n # units = \"s\"\n #\n # Create new field 'Datetime' field with units:\n # units = \"Seconds since 1970-01-01 00:00'\n #\n # select:\n varid = self.swaths[self.component]['Geolocation Fields']['Time']\n # values:\n tvalues = varid['data']\n # extract description:\n long_name = varid['long_name'].decode('latin-1')\n # check ...\n key = 'Time at Start of Scan (s, UNIX time / POSIX time), number of seconds that have elapsed since midnight Coordinated Universal Time (UTC),'\n if long_name.startswith(key) :\n # remove leading description:\n time0 = long_name.replace(key,'').replace('.','').strip()\n # extract datetime object:\n t0 = datetime.datetime.strptime(time0,'%d %B %Y')\n # convert:\n var = {}\n var['units' ] = t0.strftime('seconds since %Y-%m-%d %H:%M:%H')\n var['long_name'] = long_name\n if 'mask' in dir(tvalues) :\n values1d = netCDF4.num2date( tvalues.data, var['units'] )\n else :\n values1d = netCDF4.num2date( tvalues , var['units'] )\n #endif\n # alternative:\n # \"Time at Start of Scan (s, TAI93)\"\n elif 'TAI' in long_name :\n # find start:\n i0 = long_name.index('TAI')\n # extract:\n year = int(long_name[i0+3:].replace(')',''))\n # convert to 4-digits if necessary:\n if year < 100 :\n if year > 50 :\n year = 1900 + year\n else :\n year = 2000 + year\n #endif\n #endif\n # reference time:\n t0 = datetime.datetime(year,1,1,0,0,0)\n # convert:\n var = {}\n var['units' ] = t0.strftime('seconds since %Y-%m-%d %H:%M:%H')\n var['long_name'] = long_name\n values1d = netCDF4.num2date( tvalues, var['units'] )\n else :\n self.logger.error( 'could not convert time units \"%s\"' % long_name )\n self.logger.error( 'first value : %f' % tvalues[0] )\n raise Exception\n #endif\n \n # expand to 2D:\n var['data'] = numpy.zeros( (self.ntime,self.np), values1d.dtype )\n for ip in range(self.np) :\n var['data'][:,ip] = values1d\n #endfor\n \n # set dim names:\n var['dimnames'] = ('time','pixel')\n \n # store:\n self.swaths[self.component]['Geolocation Fields']['Datetime'] = var", "def parseLabels(labels):\n\n timestamp = [(parseTimestamp(e['gameTime']), e['label']) for e in labels['annotations']]\n \n return timestamp", "def normalize_time(full_timestamps, half_timestamp):\n phases = (half_timestamp - full_timestamps[0]) / (full_timestamps[-1] - full_timestamps[0])\n return phases", "def pts2ms(pts, dt):\n return pts*dt", "def timestamp_processor(timestamps, with_tz=False, check_delta=False):\n if timestamps.count(timestamps[0]) == len(timestamps):\n unified_timestamp = timestamps[0]\n else:\n average_timestamp = sum([dt.timestamp for dt in timestamps])/len(timestamps)\n unified_timestamp = arrow.get(average_timestamp)\n\n if check_delta:\n for ts in timestamps:\n delta = unified_timestamp - arrow.get(ts)\n second_difference = abs(delta.total_seconds())\n\n if second_difference > 3600:\n # more than 1 hour difference\n raise ValueError(\"\"\"South Korea generation data is more than 1 hour apart,\n saw {} hours difference\"\"\".format(second_difference/3600))\n\n if with_tz:\n unified_timestamp = unified_timestamp.replace(tzinfo='Asia/Seoul')\n\n return unified_timestamp", "def GPSlatlon2XY_time(lat_u, lon_u, theta):\n\n\trho_u = np.sqrt(np.power(lon_u, 2) + np.power(lat_u, 2))\n\ttheta_new_u = np.arctan2(lat_u, lon_u) - theta\n\n\tUx, Uy = rho_u * np.cos(theta_new_u), rho_u * np.sin(theta_new_u)\n\n\treturn Ux, Uy", "def _TIME2STEPS(time):\n return int(time*1000)", "def unixTimeConv(timestamps):\n\n\tnewTime = str(datetime.datetime.fromtimestamp(int(timestamps)))\n\tyearDate,timeT = newTime.split(' ')\n\tyear,month,day = str(yearDate).split('-')\n\thour,minutes,sec = timeT.split(':')\n\tsplitTimes = (year,month,day,hour,minutes,sec,timestamps)\n\n\treturn(splitTimes)", "def transform_timestamps(time_tracker):\n def calculate_timediff(t1, t2):\n return (t2 - t1).seconds + (t2 - t1).microseconds/1000000\n\n durations = dict()\n\n durations[\"Initialization\"] \\\n = round(calculate_timediff(time_tracker[\"time_start\"],\n time_tracker[\"after_init\"]), 3)\n\n durations[\"Configuration\"] \\\n = round(calculate_timediff(time_tracker[\"after_init\"],\n time_tracker[\"after_config\"]), 3)\n\n iter_list = []\n for i, iteration in enumerate(time_tracker[\"iterations\"]):\n if i == 0:\n iter_list\\\n .append(round(calculate_timediff(time_tracker[\"after_config\"],\n iteration), 3))\n else:\n iter_list\\\n .append(round(calculate_timediff(\n time_tracker[\"iterations\"][i-1], iteration), 3))\n durations[\"Iterations\"] = iter_list\n\n durations[\"Finalization\"] \\\n = round(calculate_timediff(time_tracker[\"iterations\"][-1],\n time_tracker[\"finish\"]), 3)\n durations[\"Total\"] \\\n = round(durations[\"Initialization\"] + durations[\"Configuration\"]\n + sum(durations[\"Iterations\"]) + durations[\"Finalization\"], 3)\n\n return durations", "def time_unwrap(val_timestamps):\n a=val_timestamps.shape[0]\n val_time =val_timestamps.astype('int64')\n for i in range(a-1):\n if val_time[i+1]-val_time[i]<-1*2**25:\n val_time[i+1:]+=2**26\n\n return(val_time)", "def from_deltatime(self,\n delta_time: np.ndarray,\n epoch: str | tuple | list | np.ndarray,\n standard: str = 'UTC'\n ):\n # assert delta time is an array\n delta_time = np.atleast_1d(delta_time)\n # calculate leap seconds if specified\n if (standard.upper() == 'GPS'):\n GPS_Epoch_Time = convert_delta_time(0, epoch1=epoch,\n epoch2= _gps_epoch, scale=1.0)\n GPS_Time = convert_delta_time(delta_time, epoch1=epoch,\n epoch2=_gps_epoch, scale=1.0)\n # calculate difference in leap seconds from start of epoch\n self.leaps = count_leap_seconds(GPS_Time) - \\\n count_leap_seconds(np.atleast_1d(GPS_Epoch_Time))\n elif (standard.upper() == 'LORAN'):\n # LORAN time is ahead of GPS time by 9 seconds\n GPS_Epoch_Time = convert_delta_time(-9.0, epoch1=epoch,\n epoch2=_gps_epoch, scale=1.0)\n GPS_Time = convert_delta_time(delta_time - 9.0, epoch1=epoch,\n epoch2=_gps_epoch, scale=1.0)\n # calculate difference in leap seconds from start of epoch\n self.leaps = count_leap_seconds(GPS_Time) - \\\n count_leap_seconds(np.atleast_1d(GPS_Epoch_Time))\n elif (standard.upper() == 'TAI'):\n # TAI time is ahead of GPS time by 19 seconds\n GPS_Epoch_Time = convert_delta_time(-19.0, epoch1=epoch,\n epoch2=_gps_epoch, scale=1.0)\n GPS_Time = convert_delta_time(delta_time-19.0, epoch1=epoch,\n epoch2=_gps_epoch, scale=1.0)\n # calculate difference in leap seconds from start of epoch\n self.leaps = count_leap_seconds(GPS_Time) - \\\n count_leap_seconds(np.atleast_1d(GPS_Epoch_Time))\n else:\n self.leaps = 0.0\n # convert time to days relative to Modified Julian days in UTC\n self.MJD = convert_delta_time(delta_time - self.leaps,\n epoch1=epoch, epoch2=_mjd_epoch, scale=(1.0/self.day))\n return self", "def epochCalc(timestamps):\n\tsplitTimes = unixTimeConv(timestamps)\n\tepochTimes = []\n\thour=int(splitTimes[3])\n\n\tif (hour >0 and hour <=9) or hour>=23:\n\t\tepoch='night'\n\telse:\n\t\tepoch='not_night'\n\tepochTimes.append((epoch,splitTimes[6]))\n\treturn epochTimes", "def map_scan_time(time, nmap_store):\n nmap_store[\"start_time\"] = datetime.datetime.fromtimestamp(int(time.get('starttime')))\n nmap_store[\"stop_time\"] = datetime.datetime.fromtimestamp(int(time.get('endtime')))", "def from_Timestamp(timestamp):\n # type: (timestamp_pb2.Timestamp) -> float\n return timestamp.seconds + float(timestamp.nanos) / 10**9", "def get_times_from_utterance(utterance: str,\n char_offset_to_token_index: Dict[int, int],\n indices_of_approximate_words: Set[int]) -> Dict[str, List[int]]:\n\n pm_linking_dict = _time_regex_match(r'\\d+pm',\n utterance,\n char_offset_to_token_index,\n pm_map_match_to_query_value,\n indices_of_approximate_words)\n\n am_linking_dict = _time_regex_match(r'\\d+am',\n utterance,\n char_offset_to_token_index,\n am_map_match_to_query_value,\n indices_of_approximate_words)\n\n oclock_linking_dict = _time_regex_match(r\"\\d+ o'clock\",\n utterance,\n char_offset_to_token_index,\n lambda match: digit_to_query_time(match.rstrip(\" o'clock\")),\n indices_of_approximate_words)\n\n hours_linking_dict = _time_regex_match(r\"\\d+ hours\",\n utterance,\n char_offset_to_token_index,\n lambda match: [int(match.rstrip(\" hours\"))],\n indices_of_approximate_words)\n\n\n times_linking_dict: Dict[str, List[int]] = defaultdict(list)\n linking_dicts = [pm_linking_dict, am_linking_dict, oclock_linking_dict, hours_linking_dict]\n\n for linking_dict in linking_dicts:\n for key, value in linking_dict.items():\n times_linking_dict[key].extend(value)\n\n return times_linking_dict", "def _convert_timestamp_2_periodic_time(self, timestamp):\n \n l = \"\"\n\n # daily periodic\n theta = self.two_pi_by_one_day_second * (int(timestamp[0:-3]) % self.one_day_second)\n #x = 1 + np.cos(theta)\n #y = 1 + np.sin(theta)\n x = np.cos(theta)\n y = np.sin(theta)\n l += str(x) + \",\" + str(y)\n l += \",\"\n\n # weekly periodic\n theta = self.two_pi_by_seven_days_second * (int(timestamp[0:-3]) % self.seven_days_second)\n # no need plus one?\n #x = 1 + np.cos(theta)\n #y = 1 + np.sin(theta)\n x = np.cos(theta)\n y = np.sin(theta)\n l += str(x) + \",\" + str(y)\n\n return l", "def ClockUsToTimestamp(clock_us, reference_clock_us, reference_timestamp):\n\n return reference_timestamp + (clock_us - reference_clock_us) / 1.0e6", "def to_gps_time(self):\n try:\n iso_time = Time(timestamp, format='iso', scale='utc')\n iso_time.format='gps'\n self.out_gpstime = str(iso_time)\n except Exception as e:\n if not args.log:\n pass\n else:\n logging.error(str(type(e)) + \",\" + str(e))\n self.out_gpstime = False\n return self.out_gpstime", "def convert_time(t):\n return datetime.fromtimestamp(t / 1e7 - 11644473600)", "def get_time_sec(gpx_track):\n start_time = gpx_track.segments[0].points[0].time\n time_in_sec = np.empty([gpx_track.get_points_no(), 1])\n n = 1\n for segment in gpx_track.segments:\n for point in segment.points:\n curr_time = point.time\n time_in_sec[n] = curr_time - start_time\n n = n + 1\n\n return time_in_sec, start_time", "def shift_time_points(self, offset):\n # Note that this is different from what we are doing in\n # shift_values_by_time in the helper class.\n self._time = [t + offset for t in self._time]\n self._time_idx_map = {t: idx for idx, t in enumerate(self._time)}", "def _convert_sfx_timestamp(ts: int) -> float:\n return float(ts) / 1000", "def OPCtimetransformOld(data, to):\n outtimes = []\n \n times = {\n 'ms':[],\n 'SS':[],\n 'MM':[],\n 'HH':[]\n }\n for i in range(0, len(data)):\n item = data[i]\n try: \n times['HH'].append(int(item[0:2]))\n times['MM'].append(int(item[2:4]))\n times['SS'].append(int(item[4:6]))\n times['ms'].append(int(item[7:9]))\n except ValueError:\n # strange value 2319010.00 in 201129 file...\n olditem = item\n newitem = item[:4] + item[4+1:]\n print( ('Repairing strange value %s into %s')%(olditem, newitem) )\n try:\n times['HH'].append(int(newitem[0:2]))\n times['MM'].append(int(newitem[2:4]))\n times['SS'].append(int(newitem[4:6]))\n times['ms'].append(int(newitem[7:9]))\n except ValueError:\n print(newitem)\n\n # OPC times go up to 60 minutes. This is corrected by moving one minute\n times['MM'] = [max(0,x-1) for x in times['MM']]\n times['SS'] = [max(0,x-1) for x in times['SS']]\n\n for i in range(0, len(data)):\n md = dt.datetime(1900,1,1,times['HH'][i], times['MM'][i], times['SS'][i]) \n outtimes.append( dt.datetime.strftime(md, to))\n\n return outtimes", "def hydrate_time(nanoseconds, tz=None):\n seconds, nanoseconds = map(int, divmod(nanoseconds, 1000000000))\n minutes, seconds = map(int, divmod(seconds, 60))\n hours, minutes = map(int, divmod(minutes, 60))\n seconds = (1000000000 * seconds + nanoseconds) / 1000000000\n t = Time(hours, minutes, seconds)\n if tz is None:\n return t\n tz_offset_minutes, tz_offset_seconds = divmod(tz, 60)\n zone = FixedOffset(tz_offset_minutes)\n return zone.localize(t)", "def lspe_coordinates (self, time):\n\n return (self.base_lat,\n self.base_long\n + time * 2 * np.pi * (1 + 1 / self.rev_days) / SECONDS_PER_DAY)", "def time_convert(intime):\n Nt = intime.shape[0]\n outtime = []\n for t in range(Nt):\n timestr = ''.join([intime[t,:][~intime[t,:].mask].data[i].decode('utf-8') for i in range(len(intime[t,:][~intime[t,:].mask].data))])\n outtime.append(datetime.strptime(timestr, '%Y-%m-%d_%H:%M:%S'))\n return outtime", "def OPCtimetransform(data, to):\n \n remove_times = []\n outtimes = []\n times = {'ms':[],'SS':[],'MM':[],'HH':[]}\n\n for i in range(0, len(data)):\n times['HH'] = 0\n times['MM'] = 0\n times['SS'] = 0\n times['ms'] = 0\n\n item = data[i]\n \n try:\n if len(item.split('.')[1]) < 2:\n item += '0'\n except IndexError:\n item += '.00'\n if len(item) < 9:\n item = item.zfill(9)\n if int(item[:2]) > 23:\n item = '0' + item\n \n # remove items with extra zero (2319010.00 to 231910)\n if len(item) > 9:\n olditem = item\n newitem = item[:4] + item[5:]\n print( ('Repairing strange value %s into %s')%(olditem, newitem) )\n item = newitem\n else:\n pass\n try:\n md = dt.datetime.strptime(item, \"%H%M%S.%f\")\n \n # round off items which exceed 59 minutes or 59 seconds \n # (i.e. 146001 to 150001.)\n except ValueError:\n \n try:\n times['HH'] = int(item[0:2])\n times['MM'] = int(item[2:4])\n times['SS'] = int(item[4:6])\n times['ms'] = int(item[7:9])\n except ValueError:\n print(i, item)\n\n if times['SS'] > 59:\n times['MM'] += 1\n times['SS'] = 0\n if times['MM'] > 59:\n times['HH'] += 1\n times['MM'] = 0\n # discard items which exceed 23 hours\n if times['HH'] > 23:\n times['HH'] = 23\n print( ('resetting value %s')%(item) )\n \n\n md = dt.datetime(1900,1,1,times['HH'], times['MM'], times['SS']) \n\n \n outtimes.append( dt.datetime.strftime(md, to) )\n\n return outtimes", "def _local_timestamps(self) -> npt.NDArray[np.int64]:\n if self.tz is None or timezones.is_utc(self.tz):\n # Avoid the copy that would be made in tzconversion\n return self.asi8\n return tz_convert_from_utc(self.asi8, self.tz, reso=self._creso)", "def get_timestamps( self, raster_pos=None ):\n if raster_pos is None:\n headers = self.time_specific_headers\n else:\n headers = self.get_raster_pos_headers( raster_pos )\n \n return [to_epoch( from_Tformat( h['DATE_OBS'] ) ) for h in headers]", "def _get_timestamps(self, time_interval: RawTimeIntervalType | None, bbox: BBox) -> list[dt.datetime]:", "def epoch2time(time):\n\tvalue = datetime.datetime.fromtimestamp(time)\n\tNormal = value.strftime('%Y-%m-%d %H:%M:%S')\n\tprint(normal)\n\treturn normal", "def normalize_times(time,dtstart,dtend):\n time=np.datetime64(time)\n dtstart=np.datetime64(dtstart)\n dtend=np.datetime64(dtend)\n time_range = (dtend - dtstart).astype('float64')\n seconds_from_t0 = (time - dtstart).astype('float64')\n return seconds_from_t0 / time_range", "def timestamp_decode(e: Encoding) -> List[int]:\n return _decode(e, Decoder)", "def timestamp(self, offset=0):\n return int(self._base_ts + offset) * 1000", "def timestamp_encode(timestamps: List[int]) -> List[int]:\n return _encode(timestamps, Encoder, Encoding)", "def build_metrics_times_data(time_metrics):\n return [{'name': name, 'latencies': latencies.get_latencies()}\n for name, latencies in iteritems(time_metrics)]", "def extract_from_date(epochseconds, offset=0):\n return time.gmtime(epochseconds+offset*3600)", "def decodeSpaceTime(self, result):\r\n if self.case == 1:\r\n if self.granularity == 'day':\r\n return map(lambda x: [reader.formatTime(reader.inverseDaySinceEpoch(int(x[0]/self.scale))),\r\n reader.morton2coordsX2D(x[1], self.offx, self.scalex, self.roundNum), \r\n reader.morton2coordsY2D(x[1], self.offy, self.scaley, self.roundNum), x[2]], result)\r\n else:\r\n return map(lambda x: [int(x[0]/self.scale), reader.morton2coordsX2D(x[1], self.offx, self.scalex, self.roundNum), \r\n reader.morton2coordsY2D(x[1], self.offy, self.scaley, self.roundNum), x[2]], result)\r\n elif self.case == 2:\r\n if self.granularity == 'day':\r\n return map(lambda x: [reader.formatTime(reader.inverseDaySinceEpoch(int(x[0]/self.scale))), \r\n reader.morton2coordsX3D(x[1], self.offx, self.scalex, self.roundNum), \r\n reader.morton2coordsY3D(x[1], self.offy, self.scaley, self.roundNum), \r\n reader.morton2coordsZ3D(x[1], self.offz, self.scalez, self.roundNum)], result)\r\n else:\r\n return map(lambda x: [int(x[0]/self.scale), reader.morton2coordsX3D(x[1], self.offx, self.scalex, self.roundNum), \r\n reader.morton2coordsY3D(x[1], self.offy, self.scaley, self.roundNum), \r\n reader.morton2coordsZ3D(x[1], self.offz, self.scalez, self.roundNum)], result)\r\n elif self.case == 3:\r\n if self.granularity == 'day':\r\n return map(lambda x: [reader.formatTime(reader.inverseDaySinceEpoch(int(reader.morton2coordst3D(x[0])/self.scale))), \r\n reader.morton2coordsY3D(x[0], self.offx, self.scalex, self.roundNum),\r\n reader.morton2coordsZ3D(x[0], self.offy, self.scaley, self.roundNum), x[1]], result)\r\n else:\r\n return map(lambda x: [int(reader.morton2coordst3D(x[0])/self.scale), \r\n reader.morton2coordsY3D(x[0], self.offx, self.scalex, self.roundNum), \r\n reader.morton2coordsZ3D(x[0], self.offy, self.scaley, self.roundNum), x[1]], result)\r\n elif self.case == 4:\r\n if self.granularity == 'day':\r\n return map(lambda x: [reader.formatTime(reader.inverseDaySinceEpoch(int(reader.morton2coordst4D(x[0])/self.scale))), \r\n reader.morton2coordsX4D(x[0], self.offx, self.scalex, self.roundNum), \r\n reader.morton2coordsY4D(x[0], self.offy, self.scaley, self.roundNum), \r\n reader.morton2coordsZ4D(x[0], self.offz, self.scalez, self.roundNum)], result)\r\n else:\r\n return map(lambda x: [int(reader.morton2coordst4D(x[0])/self.scale), \r\n reader.morton2coordsX4D(x[0], self.offx, self.scalex, self.roundNum), \r\n reader.morton2coordsY4D(x[0], self.offy, self.scaley, self.roundNum), \r\n reader.morton2coordsZ4D(x[0], self.offz, self.scalez, self.roundNum)], result)", "def timedelta64_to_secs(self, timedelta):\n if timedelta is None:\n return np.array([])\n else:\n return timedelta / np.timedelta64(1, 's')", "def unix_time_nanos(dt):\n return timedelta_to_micros(dt - epoch)", "def to_ios_time(self):\n ts_type = self.ts_types['iostime']\n try:\n dt_obj = duparser.parse(self.timestamp)\n if hasattr(dt_obj.tzinfo, '_offset'):\n dt_tz = dt_obj.tzinfo._offset.total_seconds()\n dt_obj = duparser.parse(self.timestamp, ignoretz=True)\n else:\n dt_tz = 0\n self.out_iostime = str(int(((dt_obj - self.epoch_2001).total_seconds() - int(dt_tz)) * self.nano_2001))\n ts_output = str(\"{}\\t\\t\\t{}\".format(ts_type, self.out_iostime))\n except Exception:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.out_iostime = ts_output = False\n return self.out_iostime, ts_output", "def get_frame_targets_from_time_targets(time_targets_ms, output_fps=24):\n return [(t * output_fps) // 1000 for t in time_targets_ms]", "def seg_times_to_trans_times(seg_times):\n trans_times = np.r_[0, np.cumsum(seg_times)]\n return trans_times", "def get_time_offset(self, event_ids, get_extra_info=True, use_cache=True):\n print \"Warning: This is only valid for data from the json files! Timestamps in csv are dummies\"\n if event_ids is None:\n raise ValueError('Must provide event ids ts')\n\n key = ('timestamps', tuple(event_ids), get_extra_info)\n if use_cache and key in self.cache:\n return self.cache[key].copy()\n\n # create event id references to query\n self.create_reference_ids_table(event_ids, table_name='_ref')\n\n ts_query = \"\"\"\n SELECT events.id as id, offsettimestamp, event FROM events, _ref\n WHERE events.id = _ref.id AND offsettimestamp >= 0\n \"\"\"\n\n ts = read_sql(ts_query, self.conn)\n\n # adds additional information such as user id, and session id for matching up timestamps\n if get_extra_info:\n extra_info_query = \"\"\"\n SELECT\n sessions.userid,\n events.id AS id,\n sessions.id AS sessionid\n FROM events, sessions, _ref\n WHERE events.id = _ref.id AND\n events.sessionid = sessions.id\n \"\"\"\n extra_info_df = read_sql(extra_info_query, self.conn)\n ts = ts.merge(extra_info_df, how='left', on='id')\n\n self.cache[key] = ts.copy()\n return ts", "def trans_times_to_seg_times(trans_times):\n seg_times = np.diff(trans_times)\n return seg_times", "def sfreq_to_times(gaze_array, sfreq, start_time=0):\n return np.arange(0, len(gaze_array) / sfreq, 1. / sfreq) + start_time", "def map_int_to_p_timestamps(start_timestamps,expired_timestamps):\n start_dates_list = protobuf_timestamps_to_dates(start_timestamps)\n expired_dates_list = protobuf_timestamps_to_dates(expired_timestamps)\n youngest_date = min(start_dates_list)\n # oldest_date = max(expired_dates_list)\n total_days = get_runing_days(start_dates_list,expired_dates_list)\n \n int_to_protobuf_timestamps_dict= {}\n \n for int_date in range(total_days):\n cur_date = youngest_date + datetime.timedelta(days =int_date)\n cur_date_time = datetime.datetime(cur_date.year,cur_date.month,cur_date.day)\n cur_timestamp = Timestamp()\n cur_timestamp.FromDatetime(cur_date_time)\n int_to_protobuf_timestamps_dict[int_date] = cur_timestamp.ToJsonString()\n \n return int_to_protobuf_timestamps_dict", "def to_timestamps(self):\n print ('\\nConverting Date: ' + timestamp + '\\n')\n self.to_unix_sec()\n self.to_unix_milli()\n self.to_win_64_hex()\n self.to_win_64_hexle()\n self.to_chrome()\n self.to_ad()\n self.to_unix_hex_32be()\n self.to_unix_hex_32le()\n self.to_cookie()\n self.to_ole_be()\n self.to_ole_le()\n self.to_mac()\n self.to_hfs_dec()\n self.to_hfs_be()\n self.to_hfs_le()\n self.to_msdos()\n self.to_fat()\n self.to_systime()\n self.to_filetime()\n self.to_prtime()\n self.to_ole_auto()\n self.to_ios_time()\n self.to_sym_time()\n self.to_gps_time()\n self.timestamp_output()\n print ('\\r')", "def utc_to_tt_offset(time: dt.datetime) -> float:\n if time.year < 1972:\n raise ValueError(\"dates prior to 1972 are not currently supported\")\n # this includes the horrible fractional leap seconds prior to 1972\n # and the base 32.184 s offset between TAI and TT\n offset = 42.184\n for threshold in LEAP_SECOND_THRESHOLDS:\n if time > threshold:\n offset += 1\n return offset", "def transform_to_seconds_without_intraday_gaps(timestamps: pd.DatetimeIndex, market_hours: ul.MarketHours, fake_daily_minutes_gap = 60):\n\n n_timestamps = timestamps.size\n transformed_seconds = np.zeros((n_timestamps,1))\n timestamps = ul.convert2dt(timestamps)\n for i in range(n_timestamps):\n transformed_seconds[i,0] = market_hours.to_seconds_since_origin_without_intraday_gaps(timestamps[i], fake_daily_minutes_gap)\n \n return transformed_seconds", "def np_dt_epoch_msec(value):\n return value.astype(long) / 1000", "def from_gps_time(self):\n try:\n gps_stamp = Time(int(gps), format='gps', scale='utc')\n gps_stamp.format='iso'\n self.in_gpstime = (duparser.parse(str(gps_stamp)).strftime('%Y-%m-%d %H:%M:%S.%f'))\n except Exception as e:\n if not args.log:\n pass\n else:\n logging.error(str(type(e)) + \",\" + str(e))\n self.in_gpstime = False\n return self.in_gpstime", "def event_time_1970(event_time):\n return event_time + start_time", "def _format_timestamps(self):\n epoch_pattern = \"\\d{13}\"\n iso_pattern = \"\\d{4}/\\d{2}/\\d{2}\"\n\n formatted_timestamps = []\n if re.match(epoch_pattern, self.timestamps[0]):\n for ts in self.timestamps:\n fmt_ts = pd.to_datetime(int(ts), unit=\"ms\").strftime(\"%Y/%m/%d\")\n formatted_timestamps.append(fmt_ts)\n elif re.match(iso_pattern, self.timestamps[0]):\n for ts in self.timestamps:\n y, m, d = ts.split(\"/\")\n fmt_ts = datetime(int(y), int(m), int(d)).strftime(\"%Y/%m/%d\")\n formatted_timestamps.append(fmt_ts)\n else:\n raise TimestampError\n\n return formatted_timestamps", "def parse_timestamp(row):\n ts0 = pd.Timestamp(row['raw_timestamp_part_1'], unit='s')\n ts1 = pd.Timedelta(row['raw_timestamp_part_2'], unit='microseconds')\n return ts0 + ts1", "def time_to_position(tracks, point):\n\n index1 = [index for index, track_point in enumerate(tracks[0]) if track_point == point][0]\n index2 = [index for index, track_point in enumerate(tracks[1]) if track_point == point][0]\n\n # We add one to the length of each track as 0,0 to first point is missing from the track data\n return index1 + 1 + index2 + 1", "def get_record_timestamps(records):\n if isinstance(records[0], MessageRecord):\n # UserMessageRecords generated before or after actual recording to disk,\n # or during a pause in recording, have a misleading TimeStamp of 0, so\n # instead use the DateTime for sorting MessageRecords in temporal order\n ts = np.asarray([ record.DateTime for record in records ])\n return ts\n try:\n ts = np.asarray([ record.TimeStamp for record in records ])\n except AttributeError:\n ts = np.asarray([ record['TimeStamp'] for record in records ])\n return ts", "def to_unix_milli(self):\n ts_type = self.ts_types['unix_milli']\n try:\n dt_obj = duparser.parse(self.timestamp)\n if hasattr(dt_obj.tzinfo, '_offset'):\n dt_tz = dt_obj.tzinfo._offset.total_seconds()\n dt_obj = duparser.parse(self.timestamp, ignoretz=True)\n else:\n dt_tz = 0\n self.out_unix_milli = str(int(((dt_obj - self.epoch_1970).total_seconds() - int(dt_tz))*1000))\n ts_output = str(\"{}\\t\\t{}\".format(ts_type, self.out_unix_milli))\n except Exception:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.out_unix_milli = ts_output = False\n return self.out_unix_milli, ts_output", "def UTC_times(times, \n trace, \n diff_thres = 30.0):\n # set times values to seconds\n \n #AUTOMATE THIS SECTION!\n #CHECK THAT THIS IS CORRECT\n times = times / trace.stats.sampling_rate\n #remove unwanted parts of times numpy array \n times = times[:,0]\n \n #remove the first instance of time because it is \n #somehow always of the wrong format!\n #times = np.delete(times, 0) \n \n event_times = []\n event = [times[0]]\n \n start_time = trace.stats.starttime\n \n #for item in times:\n # print start_time + item\n\n for i in range(1, len(times)):\n \n # check if two events in times array have a difference < diff_thres, \n #if not, run average of those times, if so append that events to a \n #new events_times list\n \n #time_diff = times[i + 1] - times[i]\n \n time_diff = times[i] - times[i-1]\n\n #save info until events are far enough apart! \n if time_diff < diff_thres:\n\n event.append(times[i])\n \n \n #raise conditional for if events are far enough apart! \n else:\n\n event_start = event[0] - 2 #minus 5 seconds\n event_end = max(event) + 2 #add 5 seconds\n\n event_times.append([event_start, event_end])\n \n event = [] \n \n event.append(times[i])\n\n #if event still contains something for any reason, add it to event times\n if len(event) > 0: \n event_start = event[0] - 2 #minus 5 seconds\n event_end = max(event) + 2 #add 5 seconds\n event_times.append([event_start, event_end])\n event = [] \n \n\n\n #if len(event_times) == 0 and len(event) > 0 or time_diff > diff_thres and len(event) > 0:\n \n #event_times.append(sum(event) / len(event))\n \n # event_start = event[0] - 2 #minus 5 seconds\n # event_end = event[-1] + 2 #add 5 seconds\n \n # event_times.append([event_start, event_end])\n \n # event = []\n \n #event_times.append(times[i])\n \n # else:\n # event.append(times[i])\n \n\n UTC_events = []\n\n #earthquake length threshold is 10 seconds and above!\n eq_len = 0#5.0\n\n for i in event_times:\n estart = start_time + i[0]\n eend = start_time + i[1]\n \n if eend - estart > eq_len:\n UTC_events.append([estart, eend])\n \n #UTC_events = np.unique(np.asarray(UTC_events))\n\n \n return UTC_events", "def _compute_epochs(events, rel_start_ms, rel_stop_ms, timestamps, sr):\n\n # THIS IS SO MUCH FASTER THAN NP.WHERE, CRAZY\n offsets = events.stTime.apply(lambda x: np.searchsorted(timestamps, x))\n # offsets = events.stTime.apply(lambda x: np.where(timestamps >= x)[0][0])\n rel_start_micro = int(rel_start_ms * sr / 1e3)\n rel_stop_micro = int(rel_stop_ms * sr / 1e3)\n epochs = np.array([(offset + rel_start_micro, offset + rel_stop_micro) for offset in offsets])\n return epochs", "def converting_timestamps(array):\r\n row = 0\r\n data = array\r\n month_dict = {\"Jan\": \"01\", \"Feb\": \"02\", \"Mar\": \"03\", \"Apr\": \"04\",\r\n \"May\": \"05\", \"Jun\": \"06\", \"Jul\": \"07\", \"Aug\": \"08\",\r\n \"Sept\": \"09\", \"Oct\": \"10\", \"Nov\": \"11\", \"Dec\": \"12\"}\r\n for i in array:\r\n if len(data[0][0]) <= 1:\r\n string_lst = i.split()\r\n data[row] = np.array(\"{}-{}-{} {}\".format(string_lst[5], month_dict[string_lst[1]],\r\n string_lst[2], string_lst[3]))\r\n row += 1\r\n else:\r\n string_lst = i[0].split()\r\n data[row][0] = np.array(\"{}-{}-{} {}\".format(string_lst[5],\r\n month_dict[string_lst[1]], string_lst[2], string_lst[3]))\r\n row += 1\r\n return data", "def time_pars_evt(evt):\n evtid = evt.get(_psana.EventId)\n tsec, tnsec = evtid.time()\n fid = evtid.fiducials()\n date = strftime('%Y-%m-%d', localtime(tsec))\n time = strftime('%H:%M:%S', localtime(tsec))\n return tsec, tnsec, fid, date, time", "def _get_time(\n date_sweep,\n time_sweep,\n first_angle_start,\n last_angle_stop,\n angle_step,\n nrays,\n ant_speed,\n scan_type=\"ppi\",\n):\n datetime_sweep = datetime.datetime.strptime(\n date_sweep + \" \" + time_sweep, \"%Y-%m-%d %H:%M:%S\"\n )\n sweep_start_epoch = (datetime_sweep - datetime.datetime(1970, 1, 1)).total_seconds()\n if scan_type == \"ppi\":\n if (last_angle_stop > first_angle_start) and (\n (last_angle_stop - first_angle_start) / nrays > angle_step\n ):\n sweep_duration = (last_angle_stop - first_angle_start) / ant_speed\n else:\n sweep_duration = (last_angle_stop + 360.0 - first_angle_start) / ant_speed\n else:\n if last_angle_stop > first_angle_start:\n sweep_duration = (last_angle_stop - first_angle_start) / ant_speed\n else:\n sweep_duration = (first_angle_start - last_angle_stop) / ant_speed\n\n time_angle = sweep_duration / nrays\n\n sweep_end_epoch = sweep_start_epoch + sweep_duration\n\n time_data = np.linspace(\n sweep_start_epoch + time_angle / 2.0,\n sweep_end_epoch - time_angle / 2.0,\n num=nrays,\n )\n\n return time_data, sweep_start_epoch", "def convert_all_timestamps(results: List[ResponseObject]) -> List[ResponseObject]:\n results = [convert_generic_timestamps(result) for result in results]\n results = [convert_observation_timestamps(result) for result in results]\n return results", "def get_timestamps(version):\n if version == '1w':\n return 1554476400, 1554562800\n elif version == '2w':\n return 1554908400, 1555081200\n elif version == '6w':\n return 1554044400, 1554649200\n else:\n raise ValueError(version)", "def add_time_delta(time_offset_value, date_time, dataset): \n \n if 'minutes' in time_offset:\n date_time_delta = [ timedelta(minutes = float(i) ) + time_offset_value for i in date_time ]\n elif 'hours' in time_offset:\n date_time_delta = [ timedelta(hours = float(i) ) + time_offset_value for i in date_time ] \n elif 'seconds' in time_offset: \n date_time_delta = [ timedelta(seconds = float(i) ) + time_offset_value for i in date_time ] \n \n \n '''\n if 'era' not in dataset:\n \n if 'minutes' in time_offset:\n date_time_delta = [ timedelta(minutes = float(i) ) + time_offset_value for i in date_time ]\n elif 'hours' in time_offset:\n date_time_delta = [ timedelta(hours = float(i) ) + time_offset_value for i in date_time ] \n elif 'seconds' in time_offset: \n date_time_delta = [ timedelta(seconds = float(i) ) + time_offset_value for i in date_time ] \n else:\n date_time = np.array( [ datetime.strptime(str(int(i)), '%Y%m%d%H') for i in date_time ] )# convert to datetime object \n ''' \n \n #else:\n # print('check if time is wrong !!!! (should never happen)')\n # sys.exit() \n #unique_dt = [i for i in [ time_offset_value + j for j in delta ] ] \n #unique_dt = [ i +0 ]\n date_time_delta = [ i.replace(minute=0, second=0) for i in date_time_delta ] \n \n return date_time_delta", "def epoch2datetime(t):\n return datetime.fromtimestamp(t/1000.0)", "def get_times(my_vars):\n base_time = my_vars['base_time'].getValue()\n try:\n times=my_vars['time']\n except KeyError:\n times = my_vars['time_offset']\n\n ts = []\n for time in times:\n temp = datetime.utcfromtimestamp(base_time+time)\n if (temp.minute == 0) :\n ts.append(temp)\n return ts", "def _getoffsets(isMountoffset):\n mplist = list()\n for i in range(23) :\n a = i+1\n mp = device.Carma(a).getName() + \".AntennaCommon.Drive.Point.\"\n if (isMountoffset): mp += \"mountOffset\"\n else: mp += \"offset\"\n mpaz = mp + \"Az\"\n mpel = mp + \"El\"\n mps = [mpaz, mpel]\n mplist.append(mps)\n r = queryMpValues(mplist, nothrow=True)\n if False:\n for i in range(23):\n if r[i][0] == None: astr = \" None\"\n else : astr = \"%5.2f\" %r[i][0]\n if r[i][1] == None: estr = \" None\"\n else : estr = \"%5.2f\" %r[i][1]\n print \"%2d: %s %s\" %(i+1, astr, estr)\n return r", "def totimestring(timenumbers, format, subseconds=False, t0=0.):\n\n if isinstance(t0, (numbers.Number, numpy.number)):\n t0 = float(t0)\n else:\n if subseconds:\n pytimestring, subsecs = t0.split(\".\")\n subsecs = float(\"0.\" + subsecs)\n else:\n pytimestring, subsecs = t0, 0.\n tmp = time.strptime(pytimestring, format)\n tmp = [tmp.tm_year, tmp.tm_mon, tmp.tm_mday, tmp.tm_hour, tmp.tm_min, tmp.tm_sec, tmp.tm_wday, tmp.tm_yday, tmp.tm_isdst]\n if format.find(\"%y\") == -1 and format.find(\"%Y\") == -1:\n tmp[0] = 1970\n tzoffset = 0\n if format.find(\"%Z\") == -1:\n # if time.daylight:\n # tzoffset = time.altzone\n # else:\n tzoffset = time.timezone\n t0 = time.mktime(tuple(tmp)) - tzoffset + subsecs\n\n single_value = False\n if isinstance(timenumbers, (numbers.Number, numpy.number)):\n single_value = True\n timenumbers = [timenumbers]\n \n output = []\n for timenumber in timenumbers:\n if subseconds:\n subsecs, secs = math.modf(timenumber + t0)\n ss = str(abs(subsecs))[2:]\n if ss == \"0\":\n output.append(time.strftime(format, time.gmtime(int(secs))))\n else:\n output.append(\"%s.%s\" % (time.strftime(format, time.gmtime(int(secs))), ss))\n else:\n secs = round(timenumber + t0)\n output.append(time.strftime(format, time.gmtime(int(secs))))\n\n if single_value: return output[0]\n else: return output", "def pre_timestamps(ldt_timestamps, window):\r\n dt_timeofday = dt.timedelta(hours=16)\r\n days_delta = dt.timedelta(days=(np.ceil(window*7/5)+20))\r\n dt_start = ldt_timestamps[0] - days_delta\r\n dt_end = ldt_timestamps[0] - dt.timedelta(days=1)\r\n pre_timestamps = du.getNYSEdays(dt_start, dt_end, dt_timeofday)\r\n return pre_timestamps", "def calculate_time_stamp(input_data, idx):\n new_time_stamp = input_data.loc[idx, \"timestamp\"].split(\"T\")\n yyyymmdd = list(map(int, new_time_stamp[0].split(\"-\")))\n hhmmss = list(map(int, new_time_stamp[1].split(\":\")[:-1]))\n new_time_stamp_list = yyyymmdd + hhmmss\n return new_time_stamp_list, yyyymmdd, hhmmss, new_time_stamp", "def gps2Time(self):\n self.posting_date = Time(self.posting_gpstime, format=\"gps\")", "def make_info(days, secs, nsecs, gtids, followers=None):\n array = []\n if gtids is None:\n array.append(('Not Processed', '-'))\n return array\n for t in range(len(days)):\n total_secs = TZERO + days[t]*24*3600 + secs[t] + float(nsecs[t])/1e9\n stime = time.strftime(\"%Y/%m/%d %H:%M:%S \", time.localtime(total_secs))\n if followers:\n array.append((gtids[t], stime, followers[t]))\n continue\n array.append((gtids[t], stime))\n\n return array", "def to_prtime(self):\n ts_type = self.ts_types['prtime']\n try:\n dt_obj = duparser.parse(self.timestamp)\n if hasattr(dt_obj.tzinfo, '_offset'):\n dt_tz = dt_obj.tzinfo._offset.total_seconds()\n dt_obj = duparser.parse(self.timestamp, ignoretz=True)\n else:\n dt_tz = 0\n self.out_prtime = str(int(((dt_obj - self.epoch_1970).total_seconds() - int(dt_tz)) * 1000000))\n ts_output = str(\"{}\\t\\t\\t{}\".format(ts_type, self.out_prtime))\n except Exception:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.out_prtime = ts_output = False\n return self.out_prtime, ts_output", "def convert_time(slog_time_str):\n \n base_time = datetime.datetime(2007, 1, 1)\n delta = datetime.timedelta(0, float(slog_time_str))\n \n timestamp = base_time + delta\n taml_dtg = timestamp.strftime('%Y-%m-%dT%H:%M:%S')\n return taml_dtg", "def normalize_time(timestamp):\r\n offset = timestamp.utcoffset()\r\n if offset is None:\r\n return timestamp\r\n return timestamp.replace(tzinfo=None) - offset", "def gprmc_convert(line):\r\n gps = line.strip().split(',')\r\n #check data\r\n if gps[2] == 'V':\r\n return\r\n raw_date = gps[9]\r\n time = ''\r\n date = raw_date[0:2]\r\n month = raw_date[2:4]\r\n year = raw_date[4:]\r\n #modify year if reaches year 2100\r\n time += date + '/' + month + '/20' + year\r\n return [time]", "def to_ios_time(self):\n try:\n dt_obj = duparser.parse(timestamp)\n self.out_iostime = str(int(((dt_obj - self.epoch_2001).total_seconds()) * self.nano_2001))\n except Exception as e:\n if not args.log:\n pass\n else:\n logging.error(str(type(e)) + \",\" + str(e))\n self.out_iostime = False\n return self.out_iostime", "def to_prtime(self):\n try:\n dt_obj = duparser.parse(timestamp)\n self.out_prtime = str(int((dt_obj - self.epoch_1970).total_seconds() * 1000000))\n except Exception as e:\n if not args.log:\n pass\n else:\n logging.error(str(type(e)) + \",\" + str(e))\n self.out_prtime = False\n return self.out_prtime", "def convert_time_to_seconds(self, time_value):\n time_epoch = []\n mylog.debug('Converting %s to epoch time' % time_value)\n for value in time_value:\n try:\n pattern = ' %I:%M:%S%p'\n time_epoch_mini = int(time.mktime(time.strptime(value, pattern))) \n time_epoch.append(time_epoch_mini)\n except:\n mylog.debug('%s Does not seem to be in format with leading space' % value)\n try:\n pattern = '%I:%M:%S%p'\n time_epoch_mini = int(time.mktime(time.strptime(value, pattern))) \n time_epoch.append(time_epoch_mini)\n except:\n mylog.debug('%s Does not appear to be in format without leading space' % value)\n return time_epoch", "def to_Timestamp(time):\n # type: (Union[int, float]) -> timestamp_pb2.Timestamp\n seconds = int(time)\n nanos = int((time - seconds) * 10**9)\n return timestamp_pb2.Timestamp(seconds=seconds, nanos=nanos)", "def LST2timeStamp(self, lst):\n if isinstance(lst, list):\n lst = np.array(lst)\n return (lst-self.lst_start)*3590.*1e6", "def to_ticks(self, *args, **kwargs):\n return _uhd_swig.time_spec_t_to_ticks(self, *args, **kwargs)", "def epoch_time(start_time, end_time):\n elapsed_time = end_time - start_time\n elapsed_mins = int(elapsed_time / 60)\n elapsed_secs = int(elapsed_time - (elapsed_mins * 60))\n return elapsed_mins, elapsed_secs", "def stringTimeToUnix_NEW(st):\n y, m, d, h, n, s, ms = stringTimeToTuple(st)\n epochSecs = mktime(map(int ,(y, m, d, h, n, s, 0, 0, 0)))-timezone\n #print \"seconds is %f\\n\" % (epochSecs + int(ms)/1000.0)\n return epochSecs + int(ms)/1000.0", "def _tzoffset2iso8601zone(seconds):\n return \"%+03d:%02d\" % divmod((seconds // 60), 60)", "def gon2dms(gon):\n return dec2dms(gon2dec(gon))", "def preprocess_time(data, metadata):\n timestamp_name = metadata[\"timestamp_name\"]\n if timestamp_name == \"\":\n timestamp_name = \"fake_ts\"\n data[timestamp_name] = data.index\n\n data[timestamp_name] = pd.to_datetime(data[timestamp_name])\n data.sort_values(by=[timestamp_name], inplace=True)\n data.set_index([timestamp_name], inplace=True)\n\n return data", "def epoch_time(start_time: float, end_time: float) -> Tuple[int, int]:\n elapsed_time = end_time - start_time\n elapsed_mins = int(elapsed_time / 60)\n elapsed_secs = int(elapsed_time - (elapsed_mins * 60))\n return elapsed_mins, elapsed_secs" ]
[ "0.6133533", "0.5981217", "0.5839412", "0.5720046", "0.5719699", "0.5716344", "0.5671996", "0.56716424", "0.56283784", "0.5613697", "0.55957836", "0.55948985", "0.55543983", "0.5532485", "0.55197525", "0.54900724", "0.54886615", "0.54784024", "0.5435569", "0.5418071", "0.54071826", "0.5405543", "0.5402045", "0.5390691", "0.5362241", "0.5346637", "0.53447425", "0.53435993", "0.5324789", "0.5324387", "0.5307583", "0.52710027", "0.52573955", "0.5256889", "0.5256787", "0.5256231", "0.52559257", "0.5250541", "0.5244904", "0.5237289", "0.52289057", "0.5224151", "0.5212016", "0.5211229", "0.5206162", "0.5205111", "0.51878273", "0.5186582", "0.51840895", "0.51750433", "0.5169251", "0.51584506", "0.5142242", "0.5140525", "0.5140372", "0.51324445", "0.51291746", "0.51261014", "0.5113889", "0.51133853", "0.5107176", "0.51020104", "0.5096293", "0.50951195", "0.5083412", "0.50816107", "0.5067911", "0.5060482", "0.5059041", "0.5057897", "0.5054644", "0.50538594", "0.5044436", "0.5042757", "0.50391734", "0.5032044", "0.5024734", "0.5022371", "0.5022192", "0.5019406", "0.50116855", "0.5007968", "0.4994435", "0.49915743", "0.4990034", "0.49889427", "0.49784064", "0.4972565", "0.49699295", "0.49695688", "0.49665782", "0.49658233", "0.4957471", "0.49524382", "0.49500725", "0.4947", "0.49448982", "0.49446338", "0.49418375", "0.49407163" ]
0.674219
0
Convert datetime objects to GPS timestamp and nanoseconds
def datetime_to_gpstimestamp_nanoseconds(date): timestamp = gpstime.utc_to_gps(calendar.timegm(date.utctimetuple())) nanosecond = date.microsecond * 1000 return timestamp, nanosecond
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_gps_time(self):\n ts_type = self.ts_types['gpstime']\n try:\n leapseconds = self.leapseconds\n check_date = duparser.parse(self.timestamp)\n if hasattr(check_date.tzinfo, '_offset'):\n dt_tz = check_date.tzinfo._offset.total_seconds()\n check_date = duparser.parse(self.timestamp, ignoretz=True)\n else:\n dt_tz = 0\n for entry in leapseconds:\n check = self.date_range(leapseconds.get(entry)[0], leapseconds.get(entry)[1], check_date)\n if check is True:\n variance = entry\n else:\n variance = 0\n leap_correction = check_date + timedelta(seconds=variance)\n epoch_shift = leap_correction - self.epoch_1970\n gps_stamp = (dt.utcfromtimestamp(epoch_shift.total_seconds()) - self.epoch_1980).total_seconds() - 19\n gps_stamp = int(gps_stamp) - int(dt_tz)\n self.out_gpstime = str(gps_stamp)\n ts_output = str(\"{}\\t\\t\\t{}\".format(ts_type, self.out_gpstime))\n except Exception:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.out_gpstime = ts_output = False\n return self.out_gpstime, ts_output", "def from_gps_time(self):\n reason = \"[!] GPS timestamps are 10 digits\"\n ts_type = self.ts_types['gpstime']\n try:\n if not len(self.gps) == 10 or not self.gps.isdigit():\n self.in_gpstime = indiv_output = combined_output = False\n pass\n else:\n leapseconds = self.leapseconds\n gps_stamp = self.epoch_1980 + timedelta(seconds=(float(self.gps)))\n tai_convert = gps_stamp + timedelta(seconds=19)\n epoch_convert = (tai_convert - self.epoch_1970).total_seconds()\n check_date = dt.utcfromtimestamp(epoch_convert)\n for entry in leapseconds:\n check = self.date_range(leapseconds.get(entry)[0], leapseconds.get(entry)[1], check_date)\n if check is True:\n variance = entry\n else:\n variance = 0\n gps_out = check_date - timedelta(seconds=variance)\n self.in_gpstime = gps_out.strftime('%Y-%m-%d %H:%M:%S.%f')\n indiv_output = str(\"{} {}\".format(ts_type, self.in_gpstime))\n combined_output = str(\"{}{}\\t\\t\\t{} UTC{}\".format(self.left_color, ts_type, self.in_gpstime, self.right_color))\n except Exception:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.in_gpstime = indiv_output = combined_output = False\n return self.in_gpstime, indiv_output, combined_output, reason", "def epoch2datetime(t):\n return datetime.fromtimestamp(t/1000.0)", "def to_timestamp(date_time: datetime, unit: TimeUnit = TimeUnit.SECONDS) -> float:\n return date_time.replace(tzinfo=timezone.utc).timestamp() * (1000 ** int(unit))", "def to_avro(date_time: datetime.datetime) -> float:\n if date_time.tzinfo:\n ts = (date_time - utils.epoch).total_seconds()\n else:\n ts = (date_time - utils.epoch_naive).total_seconds()\n\n return ts * 1000", "def from_Timestamp(timestamp):\n # type: (timestamp_pb2.Timestamp) -> float\n return timestamp.seconds + float(timestamp.nanos) / 10**9", "def unix_time_nanos(dt):\n return timedelta_to_micros(dt - epoch)", "def date_to_nano(ts):\n return calendar.timegm(ts.utctimetuple()) * int(1e3)", "def get_gps_timestamp(file, time_offset):\n reference_date = get_reference_datetime(file)\n absolute_date = get_absolute_datetime(reference_date, time_offset)\n timestamp, nanosecond = datetime_to_gpstimestamp_nanoseconds(absolute_date)\n\n return timestamp, nanosecond", "def to_stamp(datetime_):\r\n try:\r\n return datetime_.timestamp()\r\n except AttributeError:\r\n return time.mktime(datetime_.timetuple()) + datetime_.microsecond / 1e6", "def convertToEST(timestamp):\n newDateTime = datetime.datetime.fromtimestamp(timestamp/1000)\n return newDateTime.date(), newDateTime.time()", "def to_gps_time(self):\n try:\n iso_time = Time(timestamp, format='iso', scale='utc')\n iso_time.format='gps'\n self.out_gpstime = str(iso_time)\n except Exception as e:\n if not args.log:\n pass\n else:\n logging.error(str(type(e)) + \",\" + str(e))\n self.out_gpstime = False\n return self.out_gpstime", "def to_unix_milli(self):\n ts_type = self.ts_types['unix_milli']\n try:\n dt_obj = duparser.parse(self.timestamp)\n if hasattr(dt_obj.tzinfo, '_offset'):\n dt_tz = dt_obj.tzinfo._offset.total_seconds()\n dt_obj = duparser.parse(self.timestamp, ignoretz=True)\n else:\n dt_tz = 0\n self.out_unix_milli = str(int(((dt_obj - self.epoch_1970).total_seconds() - int(dt_tz))*1000))\n ts_output = str(\"{}\\t\\t{}\".format(ts_type, self.out_unix_milli))\n except Exception:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.out_unix_milli = ts_output = False\n return self.out_unix_milli, ts_output", "def datetime_to_epoch_microseconds(obj: \"datetime\") -> float:\n td = datetime_to_epoch_timedelta(obj)\n return (td.days * 86400 + td.seconds) * 10**6 + td.microseconds", "def datetime_to_timestamp(dt):\n return calendar.timegm(dt.timetuple()) * 1000", "def gps2Time(self):\n self.posting_date = Time(self.posting_gpstime, format=\"gps\")", "def convert_datetime(\n date: float | np.ndarray,\n epoch: str | tuple | list | np.datetime64 = _unix_epoch\n ):\n # convert epoch to datetime variables\n if isinstance(epoch, (tuple, list)):\n epoch = np.datetime64(datetime.datetime(*epoch))\n elif isinstance(epoch, str):\n epoch = np.datetime64(parse(epoch))\n # convert to delta time\n return (date - epoch) / np.timedelta64(1, 's')", "def convert_datetime(\n date: float | np.ndarray,\n epoch: str | tuple | list | np.datetime64 = _unix_epoch\n ):\n # convert epoch to datetime variables\n if isinstance(epoch, (tuple, list)):\n epoch = np.datetime64(datetime.datetime(*epoch))\n elif isinstance(epoch, str):\n epoch = np.datetime64(parse(epoch))\n # convert to delta time\n return (date - epoch) / np.timedelta64(1, 's')", "def ts_to_epoch_seconds(t) -> float:\n return t.astype(int) / 1e9", "def convert_time(t):\n return datetime.fromtimestamp(t / 1e7 - 11644473600)", "def to_pydatetime(self) -> npt.NDArray[np.object_]:\n return ints_to_pydatetime(self.asi8, tz=self.tz, reso=self._creso)", "def datetime_to_timestamp(obj: \"datetime\") -> \"Timestamp\":\n td = datetime_to_epoch_timedelta(obj)\n ts = Timestamp()\n ts.seconds = td.seconds + td.days * _SECONDS_PER_DAY\n ts.nanos = td.microseconds * _NANOS_PER_MICROSECOND\n return ts", "def to_ios_time(self):\n ts_type = self.ts_types['iostime']\n try:\n dt_obj = duparser.parse(self.timestamp)\n if hasattr(dt_obj.tzinfo, '_offset'):\n dt_tz = dt_obj.tzinfo._offset.total_seconds()\n dt_obj = duparser.parse(self.timestamp, ignoretz=True)\n else:\n dt_tz = 0\n self.out_iostime = str(int(((dt_obj - self.epoch_2001).total_seconds() - int(dt_tz)) * self.nano_2001))\n ts_output = str(\"{}\\t\\t\\t{}\".format(ts_type, self.out_iostime))\n except Exception:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.out_iostime = ts_output = False\n return self.out_iostime, ts_output", "def hydrate_datetime(seconds, nanoseconds, tz=None):\n minutes, seconds = map(int, divmod(seconds, 60))\n hours, minutes = map(int, divmod(minutes, 60))\n days, hours = map(int, divmod(hours, 24))\n seconds = (1000000000 * seconds + nanoseconds) / 1000000000\n t = DateTime.combine(Date.from_ordinal(unix_epoch_date_ordinal + days), Time(hours, minutes, seconds))\n if tz is None:\n return t\n if isinstance(tz, int):\n tz_offset_minutes, tz_offset_seconds = divmod(tz, 60)\n zone = FixedOffset(tz_offset_minutes)\n else:\n zone = timezone(tz)\n return zone.localize(t)", "def normalize_timestamp(timestamp_series):\n # convert datetime strings into milliseconds from epoch\n times = pd.to_datetime(timestamp_series, format='%Y-%m-%d %H:%M:%S').astype(np.int64) // int(1e6)\n return times", "def epoch2time(time):\n\tvalue = datetime.datetime.fromtimestamp(time)\n\tNormal = value.strftime('%Y-%m-%d %H:%M:%S')\n\tprint(normal)\n\treturn normal", "def np_dt_epoch_msec(value):\n return value.astype(long) / 1000", "def to_prtime(self):\n try:\n dt_obj = duparser.parse(timestamp)\n self.out_prtime = str(int((dt_obj - self.epoch_1970).total_seconds() * 1000000))\n except Exception as e:\n if not args.log:\n pass\n else:\n logging.error(str(type(e)) + \",\" + str(e))\n self.out_prtime = False\n return self.out_prtime", "def _GetNormalizedTimestamp(self):\n if self._normalized_timestamp is None:\n if (self._number_of_seconds is not None and\n self.fraction_of_second is not None):\n self._normalized_timestamp = (\n decimal.Decimal(self._number_of_seconds) + self.fraction_of_second)\n\n return self._normalized_timestamp", "def _datetime_to_timestamp(self, dt):\n return time.mktime(dt.timetuple())", "def _datetime2et(time: datetime) -> float:\n if isinstance(time, float):\n return time\n if not isinstance(time, datetime):\n raise TypeError(\"Time must be a float or a datetime object.\")\n return spy.str2et(time.isoformat())", "def to_ios_time(self):\n try:\n dt_obj = duparser.parse(timestamp)\n self.out_iostime = str(int(((dt_obj - self.epoch_2001).total_seconds()) * self.nano_2001))\n except Exception as e:\n if not args.log:\n pass\n else:\n logging.error(str(type(e)) + \",\" + str(e))\n self.out_iostime = False\n return self.out_iostime", "def datetime2ts(dt):\n return int(time.mktime(dt.timetuple()))", "def datetime2timestamp(value, millis=False):\n if millis:\n return int(time.mktime(value.timetuple()))\n else:\n return int(round((time.mktime(value.timetuple()) +\n value.microsecond / 1E6) * 1000))", "def timestamp(self):\n # this only returns second precision, which is why we don't use it\n #now = calendar.timegm(datetime.datetime.utcnow().utctimetuple())\n\n # this returns microsecond precision\n # http://bugs.python.org/msg180110\n epoch = datetime.datetime(1970, 1, 1)\n return (self - epoch).total_seconds()", "def int2dt(ts, ts_mult=1e3):\n return datetime.datetime.utcfromtimestamp(float(ts) / ts_mult)", "def ClockUsToTimestamp(clock_us, reference_clock_us, reference_timestamp):\n\n return reference_timestamp + (clock_us - reference_clock_us) / 1.0e6", "def timestamp(self, t):\n if isinstance(t, datetime):\n t = time.mktime(t.timetuple())\n return t - 631065600", "def to_timestamps(self):\n print ('\\nConverting Date: ' + timestamp + '\\n')\n self.to_unix_sec()\n self.to_unix_milli()\n self.to_win_64_hex()\n self.to_win_64_hexle()\n self.to_chrome()\n self.to_ad()\n self.to_unix_hex_32be()\n self.to_unix_hex_32le()\n self.to_cookie()\n self.to_ole_be()\n self.to_ole_le()\n self.to_mac()\n self.to_hfs_dec()\n self.to_hfs_be()\n self.to_hfs_le()\n self.to_msdos()\n self.to_fat()\n self.to_systime()\n self.to_filetime()\n self.to_prtime()\n self.to_ole_auto()\n self.to_ios_time()\n self.to_sym_time()\n self.to_gps_time()\n self.timestamp_output()\n print ('\\r')", "def to_unix_milli(self):\n try:\n dt_obj = duparser.parse(timestamp)\n self.out_unix_milli = str(int((dt_obj - self.epoch_1970).total_seconds()*1000))\n except Exception as e:\n if not args.log:\n pass\n else:\n logging.error(str(type(e)) + \",\" + str(e))\n self.out_unix_milli = False\n return self.out_unix_milli", "def datetime_to_epoch(datetime_obj):\n return int(datetime_obj.strftime(\"%s\")) * 1000", "def datetime_to_float(t, epoch=None):\n if epoch is None:\n epoch = np.datetime64(0, \"s\")\n return (t - epoch) / np.timedelta64(1, \"s\")", "def to_epoch(datetime_obj):\n if sys.version_info[0:2] < (3, 3):\n import calendar\n\n return (\n calendar.timegm(datetime_obj.timetuple())\n + datetime_obj.microsecond / 1000000\n )\n else:\n return datetime_obj.timestamp()", "def create_naive_datetime():\n # datetime(year, month, day, hour, minute, second, microsecond, tzinfo)\n dt = datetime.datetime(2017, 1, 1, 0, 0, 0)\n print(\"year: \" + str(dt.year))\n print(\"second: \" + str(dt.second))", "def _pd_datetime_to_timestamp_proto(dtype, value) -> Timestamp:\n\n if type(value) in [np.float64, np.float32, np.int32, np.int64]:\n return Timestamp(seconds=int(value))\n if dtype.__str__() == \"datetime64[ns]\":\n # If timestamp does not contain timezone, we assume it is of local\n # timezone and adjust it to UTC\n local_timezone = datetime.now(timezone.utc).astimezone().tzinfo\n value = value.tz_localize(local_timezone).tz_convert(\"UTC\").tz_localize(None)\n return Timestamp(seconds=int(value.timestamp()))\n if dtype.__str__() == \"datetime64[ns, UTC]\":\n return Timestamp(seconds=int(value.timestamp()))\n else:\n return Timestamp(seconds=np.datetime64(value).astype(\"int64\") // 1000000)", "def _CopyFromDateTimeValues(self, date_time_values):\n year = date_time_values.get('year', 0)\n month = date_time_values.get('month', 0)\n day_of_month = date_time_values.get('day_of_month', 0)\n hours = date_time_values.get('hours', 0)\n minutes = date_time_values.get('minutes', 0)\n seconds = date_time_values.get('seconds', 0)\n microseconds = date_time_values.get('microseconds', 0)\n time_zone_offset = date_time_values.get('time_zone_offset', 0)\n\n precision_helper = precisions.PrecisionHelperFactory.CreatePrecisionHelper(\n self._precision)\n\n fraction_of_second = precision_helper.CopyMicrosecondsToFractionOfSecond(\n microseconds)\n\n self._normalized_timestamp = None\n self._number_of_seconds = self._GetNumberOfSecondsFromElements(\n year, month, day_of_month, hours, minutes, seconds, time_zone_offset)\n self._time_elements_tuple = (\n year, month, day_of_month, hours, minutes, seconds)\n self._time_zone_offset = time_zone_offset\n\n self.fraction_of_second = fraction_of_second", "def pts2ms(pts, dt):\n return pts*dt", "def hydrate_time(nanoseconds, tz=None):\n seconds, nanoseconds = map(int, divmod(nanoseconds, 1000000000))\n minutes, seconds = map(int, divmod(seconds, 60))\n hours, minutes = map(int, divmod(minutes, 60))\n seconds = (1000000000 * seconds + nanoseconds) / 1000000000\n t = Time(hours, minutes, seconds)\n if tz is None:\n return t\n tz_offset_minutes, tz_offset_seconds = divmod(tz, 60)\n zone = FixedOffset(tz_offset_minutes)\n return zone.localize(t)", "def int2dt(ts, ts_mult=1e3):\n return datetime.utcfromtimestamp(float(ts) / ts_mult)", "def system_to_ntp_time(date):\n return date + NTP_DELTA", "def to_datetime(self):\n # convert Modified Julian Day epoch to datetime variable\n epoch = np.datetime64(datetime.datetime(*_mjd_epoch))\n # use nanoseconds to keep as much precision as possible\n delta_time = np.atleast_1d(self.MJD*self.day*1e9).astype(np.int64)\n # return the datetime array\n return np.array(epoch + delta_time.astype('timedelta64[ns]'))", "def _GetNormalizedTimestamp(self):\n if self._normalized_timestamp is None:\n if self._number_of_seconds is not None:\n self._normalized_timestamp = (\n decimal.Decimal(self._deciseconds) /\n definitions.DECISECONDS_PER_SECOND)\n self._normalized_timestamp += decimal.Decimal(self._number_of_seconds)\n\n if self._time_zone_offset:\n self._normalized_timestamp -= self._time_zone_offset * 60\n\n return self._normalized_timestamp", "def ConvertTime( self ) :\n \n # modules:\n import logging\n import datetime\n import netCDF4\n import numpy\n \n #\n # Original 'Time' units and description:\n #\n # title = \"Time at Start of Scan (s, UNIX time / POSIX time), number of seconds that have elapsed since midnight Coordinated Universal Time (UTC), 1 January 1970.\"\n # units = \"s\"\n #\n # Create new field 'Datetime' field with units:\n # units = \"Seconds since 1970-01-01 00:00'\n #\n # select:\n varid = self.swaths[self.component]['Geolocation Fields']['Time']\n # values:\n tvalues = varid['data']\n # extract description:\n long_name = varid['long_name'].decode('latin-1')\n # check ...\n key = 'Time at Start of Scan (s, UNIX time / POSIX time), number of seconds that have elapsed since midnight Coordinated Universal Time (UTC),'\n if long_name.startswith(key) :\n # remove leading description:\n time0 = long_name.replace(key,'').replace('.','').strip()\n # extract datetime object:\n t0 = datetime.datetime.strptime(time0,'%d %B %Y')\n # convert:\n var = {}\n var['units' ] = t0.strftime('seconds since %Y-%m-%d %H:%M:%H')\n var['long_name'] = long_name\n if 'mask' in dir(tvalues) :\n values1d = netCDF4.num2date( tvalues.data, var['units'] )\n else :\n values1d = netCDF4.num2date( tvalues , var['units'] )\n #endif\n # alternative:\n # \"Time at Start of Scan (s, TAI93)\"\n elif 'TAI' in long_name :\n # find start:\n i0 = long_name.index('TAI')\n # extract:\n year = int(long_name[i0+3:].replace(')',''))\n # convert to 4-digits if necessary:\n if year < 100 :\n if year > 50 :\n year = 1900 + year\n else :\n year = 2000 + year\n #endif\n #endif\n # reference time:\n t0 = datetime.datetime(year,1,1,0,0,0)\n # convert:\n var = {}\n var['units' ] = t0.strftime('seconds since %Y-%m-%d %H:%M:%H')\n var['long_name'] = long_name\n values1d = netCDF4.num2date( tvalues, var['units'] )\n else :\n self.logger.error( 'could not convert time units \"%s\"' % long_name )\n self.logger.error( 'first value : %f' % tvalues[0] )\n raise Exception\n #endif\n \n # expand to 2D:\n var['data'] = numpy.zeros( (self.ntime,self.np), values1d.dtype )\n for ip in range(self.np) :\n var['data'][:,ip] = values1d\n #endfor\n \n # set dim names:\n var['dimnames'] = ('time','pixel')\n \n # store:\n self.swaths[self.component]['Geolocation Fields']['Datetime'] = var", "def datetime_to_epoch(datetime):\n return datetime.astype('int64') // 1e9", "def convert_object_to_timestamp(data):\n for k, value in data.items():\n if isinstance(value, (datetime, date)):\n value = \"::\".join(\n [type(value).__name__, \"%d\" % time.mktime(value.timetuple())]\n )\n data[k] = value\n return data", "def from_gps_time(self):\n try:\n gps_stamp = Time(int(gps), format='gps', scale='utc')\n gps_stamp.format='iso'\n self.in_gpstime = (duparser.parse(str(gps_stamp)).strftime('%Y-%m-%d %H:%M:%S.%f'))\n except Exception as e:\n if not args.log:\n pass\n else:\n logging.error(str(type(e)) + \",\" + str(e))\n self.in_gpstime = False\n return self.in_gpstime", "def _timestamp_to_datetime(timestamp):\n return datetime.fromtimestamp(timestamp * 0.001, tz=timezone.utc)", "def _time_ms(dt):\n epoch = datetime.datetime.utcfromtimestamp(0)\n diff = dt - epoch\n return diff.total_seconds() * 1000", "def _convert_timestamp_2_periodic_time(self, timestamp):\n \n l = \"\"\n\n # daily periodic\n theta = self.two_pi_by_one_day_second * (int(timestamp[0:-3]) % self.one_day_second)\n #x = 1 + np.cos(theta)\n #y = 1 + np.sin(theta)\n x = np.cos(theta)\n y = np.sin(theta)\n l += str(x) + \",\" + str(y)\n l += \",\"\n\n # weekly periodic\n theta = self.two_pi_by_seven_days_second * (int(timestamp[0:-3]) % self.seven_days_second)\n # no need plus one?\n #x = 1 + np.cos(theta)\n #y = 1 + np.sin(theta)\n x = np.cos(theta)\n y = np.sin(theta)\n l += str(x) + \",\" + str(y)\n\n return l", "def gon2dms(gon):\n return dec2dms(gon2dec(gon))", "def to_prtime(self):\n ts_type = self.ts_types['prtime']\n try:\n dt_obj = duparser.parse(self.timestamp)\n if hasattr(dt_obj.tzinfo, '_offset'):\n dt_tz = dt_obj.tzinfo._offset.total_seconds()\n dt_obj = duparser.parse(self.timestamp, ignoretz=True)\n else:\n dt_tz = 0\n self.out_prtime = str(int(((dt_obj - self.epoch_1970).total_seconds() - int(dt_tz)) * 1000000))\n ts_output = str(\"{}\\t\\t\\t{}\".format(ts_type, self.out_prtime))\n except Exception:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.out_prtime = ts_output = False\n return self.out_prtime, ts_output", "def ldap_to_datetime(timestamp: float):\n from datetime import datetime, timedelta\n return datetime(1601, 1, 1) + timedelta(timestamp/10000000)", "def timestamps():\n timestamps = ( # Index\n 1459516622.1, # 0\n 1459516622.2, # 1\n 1459516622.3, # 2\n 1459516623.0, # 3\n 1459516623.1, # 4\n 1459516623.3, # 5\n 1459516624.0, # 6\n )\n return timestamps", "def pb_timestamp_to_datetime(timestamp_pb):\n return (\n _EPOCH +\n datetime.timedelta(\n seconds=timestamp_pb.seconds,\n microseconds=(timestamp_pb.nanos / 1000.0),\n )\n )", "def ms2pts(ms, dt):\n return int(ms/dt)", "def normalize_times(time,dtstart,dtend):\n time=np.datetime64(time)\n dtstart=np.datetime64(dtstart)\n dtend=np.datetime64(dtend)\n time_range = (dtend - dtstart).astype('float64')\n seconds_from_t0 = (time - dtstart).astype('float64')\n return seconds_from_t0 / time_range", "def timestamp2datetime(value, millis=False):\n return datetime.fromtimestamp(value / (millis and 1000. or 1))", "def convert_timestamp(stamp):\n date = datetime.fromtimestamp(float(stamp))\n return date.strftime(\"%m/%d/%y %I:%M:%S %p\")", "def secondsToDatum(timeint):\n return datetime.datetime.utcfromtimestamp(timeint).strftime('%Y-%m-%d')", "def _get_timestamp(self, timestamp):\n return int(timestamp * 1e6)", "def microseconds_to_datetime(ms):\n return datetime.utcfromtimestamp(ms / 1000000.0).replace(tzinfo=pytz.utc)", "def _time_to_datetime(value):\r\n assert isinstance(value, datetime.time)\r\n return datetime.datetime(1970, 1, 1,\r\n value.hour, value.minute, value.second,\r\n value.microsecond)", "def dehydrate_datetime(value):\n\n def seconds_and_nanoseconds(dt):\n if isinstance(dt, datetime):\n dt = DateTime.from_native(dt)\n zone_epoch = DateTime(1970, 1, 1, tzinfo=dt.tzinfo)\n t = dt.to_clock_time() - zone_epoch.to_clock_time()\n return t.seconds, t.nanoseconds\n\n tz = value.tzinfo\n if tz is None:\n # without time zone\n value = utc.localize(value)\n seconds, nanoseconds = seconds_and_nanoseconds(value)\n return Structure(ord(b\"d\"), seconds, nanoseconds)\n elif hasattr(tz, \"zone\") and tz.zone:\n # with named time zone\n seconds, nanoseconds = seconds_and_nanoseconds(value)\n return Structure(ord(b\"f\"), seconds, nanoseconds, tz.zone)\n else:\n # with time offset\n seconds, nanoseconds = seconds_and_nanoseconds(value)\n return Structure(ord(b\"F\"), seconds, nanoseconds, tz.utcoffset(value).seconds)", "def ntp_to_system_time(date):\n return date - NTP_DELTA", "def GPSlatlon2XY_time(lat_u, lon_u, theta):\n\n\trho_u = np.sqrt(np.power(lon_u, 2) + np.power(lat_u, 2))\n\ttheta_new_u = np.arctan2(lat_u, lon_u) - theta\n\n\tUx, Uy = rho_u * np.cos(theta_new_u), rho_u * np.sin(theta_new_u)\n\n\treturn Ux, Uy", "def _convert_sfx_timestamp(ts: int) -> float:\n return float(ts) / 1000", "def time_struct_to_datetime(struct_time_object):\n return datetime.datetime(*struct_time_object[:6])", "def testCopyFromDateTimeString(self):\n golang_time_object = golang_time.GolangTime()\n\n golang_time_object.CopyFromDateTimeString('0001-01-01')\n self.assertEqual(golang_time_object._number_of_seconds, 0)\n self.assertEqual(golang_time_object._nanoseconds, 0)\n self.assertEqual(golang_time_object._time_zone_offset, 0)\n\n golang_time_object.CopyFromDateTimeString('0001-01-01 00:01:00')\n self.assertEqual(golang_time_object._number_of_seconds, 60)\n self.assertEqual(golang_time_object._nanoseconds, 0)\n self.assertEqual(golang_time_object._time_zone_offset, 0)\n\n golang_time_object.CopyFromDateTimeString('0001-01-01 00:00:00.000001')\n self.assertEqual(golang_time_object._number_of_seconds, 0)\n self.assertEqual(golang_time_object._nanoseconds, 1000)\n self.assertEqual(golang_time_object._time_zone_offset, 0)\n\n golang_time_object.CopyFromDateTimeString('2000-01-01')\n self.assertEqual(golang_time_object._number_of_seconds, 63082281600)\n self.assertEqual(golang_time_object._nanoseconds, 0)\n self.assertEqual(golang_time_object._time_zone_offset, 0)\n\n golang_time_object.CopyFromDateTimeString('2000-01-01 12:23:45.567890')\n self.assertEqual(golang_time_object._number_of_seconds, 63082326225)\n self.assertEqual(golang_time_object._nanoseconds, 567890000)\n self.assertEqual(golang_time_object._time_zone_offset, 0)\n\n golang_time_object.CopyFromDateTimeString(\n '2000-01-01 12:23:45.567890+01:00')\n self.assertEqual(golang_time_object._number_of_seconds, 63082326225)\n self.assertEqual(golang_time_object._nanoseconds, 567890000)\n self.assertEqual(golang_time_object._time_zone_offset, 60)", "def to_ns(x):\n import pandas as pd\n return pd.to_datetime(x).value", "def _time_ms(self, dt):\n if dt.tzinfo is None:\n dt = dt.replace(tzinfo=pytz.utc)\n return int((dt - self._EPOCH).total_seconds() * 1000)", "def unixTime2JD(ts, tu):\n\n return date2JD(*unixTime2Date(ts, tu))", "def _GetNormalizedTimestamp(self):\n if self._normalized_timestamp is None:\n if self._number_of_seconds is not None:\n self._normalized_timestamp = decimal.Decimal(self._number_of_seconds)\n\n return self._normalized_timestamp", "def GetPlasoTimestamp(self):\n if self._number_of_seconds is None:\n return\n\n if self._microseconds is not None:\n return (self._number_of_seconds * 1000000) + self._microseconds\n\n return self._number_of_seconds * 1000000", "def convert_timeval(seconds_since_epoch):\n frac, whole = math.modf(seconds_since_epoch)\n microseconds = math.floor(frac * 1000000)\n seconds = math.floor(whole)\n return seconds, microseconds", "def from_np_datetimes(np_datetimes):\n\n # There's no easy way to extract year, month, day from numpy datetime, so\n # we start with ordinals.\n ordinals = tf.constant(np_datetimes, dtype=tf.int32) + _ORDINAL_OF_1_1_1970\n return from_ordinals(ordinals, validate=False)", "def iso_to_unix_time_nanos(iso_time: ISOTimestamp) -> int:\n base_ts, decimals, tz_part = iso_timestamp_split(iso_time)\n\n if tz_part == '':\n tz_part = '+00:00'\n\n nanos = int(_decimals_to_precision(decimals, 9, True)[1:])\n\n return int(datetime.fromisoformat(base_ts + tz_part).timestamp()) * 1000000000 + nanos", "def json_encoder(obj):\n if isinstance(obj, datetime.datetime):\n return time.mktime(obj.timetuple())\n if isinstance(obj, datetime.timedelta):\n return obj.total_seconds()", "def _create_timestamp():\n return (datetime.utcnow() - datetime(1970,1,1)).total_seconds()", "def datetime2epoch(dt):\n return int(mktime(dt.timetuple())*1000)", "def unixTime2Date(ts, tu, dt_obj=False):\n\n # Convert the UNIX timestamp to datetime object\n dt = datetime.utcfromtimestamp(float(ts) + float(tu)/1000000)\n\n\n if dt_obj:\n return dt\n\n else:\n return dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, float(tu)/1000", "def to_timestamp(dt):\n return time.mktime(dt.timetuple())", "def convert_timestamp(data):\n try:\n return datetime.datetime.fromtimestamp(float(data))\n except ValueError:\n return datetime.datetime.fromisoformat(data.decode(\"utf-8\"))", "def datetime_to_timestamp(dt):\n\n epoch = datetime.utcfromtimestamp(0).replace(tzinfo=UTC)\n return (dt - epoch).total_seconds()", "def to_Timestamp(time):\n # type: (Union[int, float]) -> timestamp_pb2.Timestamp\n seconds = int(time)\n nanos = int((time - seconds) * 10**9)\n return timestamp_pb2.Timestamp(seconds=seconds, nanos=nanos)", "def testCopyToDateTimeString(self):\n golang_timestamp = bytes.fromhex('010000000eafffe8d121d95050ffff')\n golang_time_object = golang_time.GolangTime(\n golang_timestamp=golang_timestamp)\n\n self.assertEqual(golang_time_object._number_of_seconds, 63082326225)\n self.assertEqual(golang_time_object._nanoseconds, 567890000)\n self.assertEqual(golang_time_object._time_zone_offset, 0)\n\n date_time_string = golang_time_object.CopyToDateTimeString()\n self.assertEqual(date_time_string, '2000-01-01 12:23:45.567890000')\n\n golang_timestamp = bytes.fromhex('010000000eafffe8d10000ddd5ffff')\n golang_time_object = golang_time.GolangTime(\n golang_timestamp=golang_timestamp)\n\n self.assertEqual(golang_time_object._number_of_seconds, 63082326225)\n self.assertEqual(golang_time_object._nanoseconds, 56789)\n self.assertEqual(golang_time_object._time_zone_offset, 0)\n\n date_time_string = golang_time_object.CopyToDateTimeString()\n self.assertEqual(date_time_string, '2000-01-01 12:23:45.000056789')", "def dehydrate_time(value):\n if isinstance(value, Time):\n nanoseconds = int(value.ticks * 1000000000)\n elif isinstance(value, time):\n nanoseconds = (3600000000000 * value.hour + 60000000000 * value.minute +\n 1000000000 * value.second + 1000 * value.microsecond)\n else:\n raise TypeError(\"Value must be a neotime.Time or a datetime.time\")\n if value.tzinfo:\n return Structure(ord(b\"T\"), nanoseconds, value.tzinfo.utcoffset(value).seconds)\n else:\n return Structure(ord(b\"t\"), nanoseconds)", "def timestamp_from_datetime(date):\n if getattr(date, 'tzinfo', None) is None:\n return (date - datetime.datetime(1970, 1, 1)).total_seconds()\n else:\n return (date - datetime.datetime(\n 1970, 1, 1, tzinfo=pytz.utc)).total_seconds()", "def timestamp_to_datetime(value):\n if not isinstance(value, (int, long, float)):\n raise ValueError(\n 'Expecting a number, got %s instead' % type(value).__name__)\n return EPOCH + datetime.timedelta(microseconds=value)", "def timestamp(self):\n if self._tzinfo is None:\n s = self._mktime()\n return s + self.microsecond / 1e6\n else:\n return (self - _EPOCH).total_seconds()", "def _CopyFromDateTimeValues(self, date_time_values):\n year = date_time_values.get('year', 0)\n month = date_time_values.get('month', 0)\n day_of_month = date_time_values.get('day_of_month', 0)\n hours = date_time_values.get('hours', 0)\n minutes = date_time_values.get('minutes', 0)\n seconds = date_time_values.get('seconds', 0)\n time_zone_offset = date_time_values.get('time_zone_offset', 0)\n\n self._normalized_timestamp = None\n self._number_of_seconds = self._GetNumberOfSecondsFromElements(\n year, month, day_of_month, hours, minutes, seconds, time_zone_offset)\n self._time_elements_tuple = (\n year, month, day_of_month, hours, minutes, seconds)\n self._time_zone_offset = time_zone_offset" ]
[ "0.64110595", "0.6309177", "0.6237445", "0.61667824", "0.61475044", "0.61468357", "0.6086212", "0.606989", "0.6046954", "0.60396606", "0.6038311", "0.60068566", "0.59911877", "0.5953865", "0.59505874", "0.5947502", "0.5941145", "0.5941145", "0.5921453", "0.591261", "0.58763117", "0.58726496", "0.58195144", "0.5815126", "0.5805712", "0.57957864", "0.579256", "0.57904685", "0.57684076", "0.5761309", "0.5758733", "0.5753922", "0.5746501", "0.5708094", "0.57073647", "0.56723046", "0.5663228", "0.5662688", "0.56610006", "0.5651665", "0.5643815", "0.56417584", "0.5638845", "0.56368977", "0.56350064", "0.56325364", "0.5626296", "0.56235796", "0.5619235", "0.56154686", "0.56123704", "0.5610337", "0.56073254", "0.5604302", "0.5602015", "0.5596741", "0.55944", "0.558678", "0.5582602", "0.5578301", "0.55761665", "0.557603", "0.5567313", "0.55552274", "0.55506563", "0.5537846", "0.553392", "0.5532345", "0.5521326", "0.5515715", "0.54940504", "0.54903924", "0.5479733", "0.5478004", "0.5465129", "0.54602623", "0.5453236", "0.5448258", "0.5442922", "0.5440002", "0.54358566", "0.5429021", "0.5428464", "0.54164886", "0.540156", "0.5398279", "0.53928185", "0.5380631", "0.5376501", "0.5375349", "0.5375284", "0.5373814", "0.5365464", "0.5363667", "0.5355794", "0.53479266", "0.53439075", "0.53426343", "0.5341043", "0.53318197" ]
0.7270666
0
Get the absolute time of the discharges
def get_absolute_datetime(reference, offset): absolute_datetime = reference + datetime.timedelta(seconds=offset) return absolute_datetime
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getChargeTime(self):\n return self.json_state.get(\"charging\").get(\"seconds_charging\")", "def get_ref_time(self):\n from datetime import datetime, timedelta\n\n ref_time = datetime(2010, 1, 1, 0, 0, 0)\n ref_time += timedelta(seconds=int(self.fid['/PRODUCT/time'][0]))\n return ref_time", "def get_time(self):\n return self.get_timed() / 10.0", "def pending_time(self):\n now = datetime.datetime.utcnow().replace(tzinfo=utc)\n timediff = now - self.time_requested\n return timediff", "def get_time(self) -> float:\n raise NotImplementedError()", "def get_time(self):\r\n\t\tactual_time = '{}, {} de {} del {} ({}:{} {} (UTC))'\r\n\t\tda_taim = actual_time.format(self.wday, self.day, self.mon,\r\n\t\t\t\t\t\t\t\t\t self.year, self.hour, self.min,\r\n\t\t\t\t\t\t\t\t\t self.day_or_night)\r\n\t\treturn da_taim", "def _get_remain_hd_time(self):\n return self.__remain_hd_time", "def duration(self):\r\n return self.t2 - self.t1", "def get_delta_time(self):\n return self.fid['/PRODUCT/delta_time'][0, :].astype(int)", "def get_time(self):\n return self._total_time", "def when(self):\n\n # current UTC time\n now = datetime.datetime.utcnow()\n # calculate timedelta and return\n return now - self.creation_time", "def get_time(cls):\n now = rospy.Time.now()\n return now.secs + now.nsecs*(10**-9) # time in seconds", "def discharge(self):\n return self._discharge", "def diff(self):\n return datetime.datetime.now() - self.stamp", "def gettime(self):\n return self.t", "def get_duration(self):\n return float(self.time.iloc[-1] - self.time.iloc[0])", "def ref_time(self) -> float:\n return ntp_to_system_time(self.ref_timestamp)", "def time_return(self):\n return self.time", "def duration(self):\n return (self.fcip_doc[\"latest_timestamp\"] - self.fcip_doc[\"packet_timestamps\"][0])", "def return_delay_time(self):\n return self._read(MX_RETURN_DELAY_TIME)", "def discharge_date_time(subject_id, conn):\n cur = conn.cursor()\n sql = \"\"\"SELECT ED_DEPARTURE_TIME, EDDisposition FROM DEMOGRAPHICS\n WHERE STUDYID = {}\"\"\".format(subject_id)\n cur.execute(sql)\n data = cur.fetchall()\n if data[0][0]: # subjects who were discharged from the ED\n date, time = data[0][0].split(\" \")\n dispo = data[0][1]\n return date, time, 'discharge', dispo\n else: # Subjects who were admitted\n sql = \"\"\"SELECT HOSP_ADMSN_TIME, EDDisposition FROM DEMOGRAPHICS\n WHERE STUDYID = {}\"\"\".format(subject_id)\n cur.execute(sql)\n data = cur.fetchall()\n if data[0][0]:\n date, time = data[0][0].split(\" \")\n dispo = data[0][1]\n return date, time, 'discharge', dispo", "def dep_time(self):\n return self._dep_time", "def getTime(self) -> float:\n return self.t", "def get_time(self):\n return self.time", "def time_detected(self) -> datetime:\n return datetime.fromtimestamp(\n self.properties[DBUS_ATTR_TIME_DETECTED] * 10**-6\n ).astimezone(timezone.utc)", "def get_time(self):\n return self.run_command('get_time')[0]", "def break_time(self):\n\t\ts = timedelta()\n\t\tfor i in xrange(1, len(self.toggles)-1, 2):\n\t\t\ts += self.toggles[i+1] - self.toggles[i]\n\n\t\t# If not working need to add the last period of time\n\t\tif not self.status():\n\t\t\ts += datetime.now() - self.toggles[-1]\n\t\treturn s", "def remaining_ms():", "def _getUpTime(self):\n diff = (datetime.datetime.now() - self._startTime).__str__()\n return diff[:diff.find('.')]", "def get_time(self) -> int:\n t = str(self.eval(\"pyb.RTC().datetime()\").encode(\"utf-8\"))[1:-1].split(\", \")\n return int(t[4]) * 3600 + int(t[5]) * 60 + int(t[6])", "def rt_dep_time(self):\n return self._rt_dep_time", "def timeSinceReport(self):\n return self.time - self.reportedTotal", "def orig_time(self) -> float:\n return ntp_to_system_time(self.orig_timestamp)", "def get_time_walking(self):\n return self.time_step_to_enqueue - self.time_entered", "def get_time() -> int:\n return store.time", "def getTimeUntilCalibrationExpire(self): \n return getI1ProTimeUntilCalibrationExpire()", "def actual_time():\n return _time.time()", "def deleted_time(self) -> str:\n return pulumi.get(self, \"deleted_time\")", "def TimeToRefill(self):\n # Get current timestamp in miliseconds from unix epoch\n now = int(time.time() * 1000)\n timeatrefile = self.status['timestamp'] + self.status['refillIn']\n\n timetorefil = timeatrefile - now + 1000 # plus one second fudge factor\n if timetorefil < 0:\n timetorefil = 0\n\n # Return value in seconds\n return timetorefil / 1000.0", "def get_duration(self, current_time):\n return current_time - self.slam.get_data(node_name=self.last_point_name)['time']", "def GAME_TIME_ADVANCE(dt):", "def getRemainingTime( self, userDN, userGroup ):\n cmd = \"SELECT TIMESTAMPDIFF( SECOND, UTC_TIMESTAMP(), ExpirationTime ) FROM `ProxyDB_Proxies`\"\n retVal = self._query( \"%s WHERE UserDN = '%s' AND UserGroup = '%s'\" % ( cmd, userDN, userGroup ) )\n if not retVal[ 'OK' ]:\n return retVal\n data = retVal[ 'Value' ]\n if not data:\n return S_OK( 0 )\n return S_OK( int( data[0][0] ) )", "def get_imeastime(self):\n return self.itime", "def calculation_time(self) -> float:\n return self.__calculation_time", "def get_duration(self):\n return (self.stop_day - self.start_day) * (24 * 60) \\\n + (self.stop_hour - self.start_hour) * 60", "def exptime(self):\n exptime = float(self.get('TRUITIME')) * int(self.get('COADDONE'))\n return exptime", "def calc_recharge_time(self, upper_battery_capacity = 4):\n # coasting_velocity = self.coast_speed(lap_length, angle) # KWh, point when to swap back to battery power\n time = ((upper_battery_capacity - self.current_capacity) / \n (self.recharge_rate))\n return time", "def calc_time(self, distance):\r\n if distance < 400:\r\n return 2*math.sqrt(distance / 1406.25)\r\n else:\r\n distance -= 400\r\n return distance / 750 + 16 / 15", "def getRefDelay (self, time):\n return self._response.getRefDelay(time)", "def _self_time(self):\r\n return self.duration() - sum([child.duration() for child in self.children])", "def delete_time(self) -> str:\n return pulumi.get(self, \"delete_time\")", "def delete_time(self) -> str:\n return pulumi.get(self, \"delete_time\")", "def get_date():\n return (datetime.now() - TIMEDELTA).isoformat()", "def acquisition_time(self):\n # Get time channels indices\n #time_channel_idx = [idx\n # for idx, channel in enumerate(self.channels)\n # if channel.lower() == 'time']\n #if len(time_channel_idx) > 1:\n # raise KeyError(\"more than one time channel in data\")\n ## Check if the time channel is available\n #elif len(time_channel_idx) == 1:\n ## # Use the event list\n # time_channel = self.channels[time_channel_idx[0]]\n # return (self[-1, time_channel] - self[0, time_channel]) \\\n # * self.time_step\n #elif (self._acquisition_start_time is not None and\n # self._acquisition_end_time is not None):\n # # Use start_time and end_time:\n # dt = (self._acquisition_end_time - self._acquisition_start_time)\n # return dt.total_seconds()\n #else:\n # return None\n \n dt = (self._acquisition_end_time - self._acquisition_start_time)\n \n return dt.total_seconds()", "def get_time(self) -> float:\n self.rocket.update()\n return self.rocket.time", "def get_time(self):\n return self._time", "def get_time(self):\n return self._time", "def get_time(self):\n return self._time", "def get_time(self):\n return ''", "def get_time(self):\n return self.block.create_time", "def get_propagation_time(self):\n return 0.0 # self.get_distance_to_gateway() / (3 * pow(10,8))", "def calculate_time(start_time):\r\n return round(time() - start_time, 2)", "def round_trip_time(self):\r\n return self.completion_time - self.launch_time", "def get_time(self):\n return self._current_time_sec", "def time():\n master = MasterTimer.getMasterTimer()\n\n if master.end_time:\n return master.end_time - master.start_time\n else:\n return time.time() - master.start_time", "def pending_time_descriptive(self):\n return get_time_descriptive(self.pending_time.seconds)", "def cadenceEventTime(self):\n return (self.raw[2] << 8) | self.raw[1]", "def ctime(self): # real signature unknown; restored from __doc__\r\n pass", "def get_time(self):\n\t\treturn time.time()", "def time_to_decision(self):\n if self.offending_date is None or self.date_of_decision is None:\n return None\n else:\n return self.date_of_decision - self.offending_date", "def response_time(self):\r\n if self.__arrival_time == INVALID_TIME:\r\n self.__logger.debug(\"Request %s missing arrival time\" % self.__id)\r\n return INVALID_TIME_DELTA\r\n completion_time = self.__arrival_time\r\n for task_id, task in self.__tasks.items():\r\n if task.completion_time == INVALID_TIME:\r\n self.__logger.debug((\"Task %s in request %s missing completion \"\r\n \"time\") % (task_id, self.__id))\r\n return INVALID_TIME_DELTA\r\n task_completion_time = task.adjusted_completion_time()\r\n #if task.scheduler_launch_time > task.node_monitor_launch_time:\r\n #self.__logger.warn((\"Task %s suggests clock skew: scheduler launch time %d, node \"\r\n # \"monitor launch time %d\") %\r\n\r\n #(task_id, task.scheduler_launch_time,\r\n # task.node_monitor_launch_time))\r\n completion_time = max(completion_time, task_completion_time)\r\n return completion_time - self.__arrival_time", "def get_timelag(self):\n self.do_query(\"SELECT EXTRACT(EPOCH FROM (now() - \"\n \"pg_last_xact_replay_timestamp()))::INT;\")\n return self.cursor.fetchone()", "def getDeliveryTime(ori, dest):\n\n start_time = time.time()\n\n routingApi = herepy.RoutingApi(os.getenv(\"HERE_KEY\"))\n gm = GoogleMaps(os.getenv(\"GOOGLE_KEY\"))\n\n try:\n response = routingApi.truck_route(ori.coords[::-1], dest.coords[::-1], [herepy.RouteMode.truck, herepy.RouteMode.fastest]).as_dict()\n distance = response.get('response').get('route')[0].get('summary').get('distance') / 1000\n except herepy.error.HEREError:\n try:\n response = gm.distance_matrix(ori.coords[::-1], dest.coords[::-1], mode=\"driving\", departure_time=dt.datetime.now(), traffic_model=\"pessimistic\")\n distance = response.get('rows')[0].get('elements')[0].get('distance').get('value') / 1000\n except Exception as e:\n capture_exception(e)\n raise e\n\n if distance < 51:\n deltime = 6\n elif distance > 50 and distance < 701:\n deltime = 24\n elif distance > 700 and distance < 1400:\n deltime = 48\n else:\n deltime = 72\n\n print('--- Tiempo de ejecucion calcDeliveryTime: {} segundos ---'.format((time.time() - start_time)))\n\n return deltime, distance", "def get_time(self):\n return self.__time", "def remaintime_hour(self):\n return self._get_time_info([\"Remain_Time_H\", \"remainTimeHour\"])", "def time_difference(time1: Time, time2: Time) -> float:\n dsec = time1.sec - time2.sec\n dnanosec = time1.nanosec - time2.nanosec\n dt = dsec + dnanosec/(10**9)\n return dt", "def time(self):\n return self._clock() - self._starttime", "def find_duration(discharge, enroll_date, discharge_date):\n #pass\n today = datetime.datetime.today()\n if discharge : #True\n return (discharge_date - enroll_date).days\n else:\n return (today - enroll_date).days", "def _arrival_time(self):\n \n return self.mkt_time + timedelta(0, 0, self.latency)", "def current_time(cls) -> float:", "def ctime(self):\n return \"\"", "def ctime(self):\n return \"\"", "def time(self):\n\t\treturn self._time", "def get_chime_time(self):\n actual_time = datetime(year=self.start_time.year, month=self.start_time.month, day=self.start_time.day,\n hour=self.start_time.hour, minute=0, second=0, microsecond=0)\n if self.start_time.minute > 30:\n actual_time = actual_time + timedelta(hours=1)\n return actual_time", "def free_flight_time(self):\n return self._free_flight_time", "def adoption_time(self):\n return self._adoption_time", "def get_reltriggertimes(self):\n return np.array(self.trtimes)-self.soundstarttime", "def ddm(self):\n if self.positive:\n return DDMAngle(self.degree, self.minute + (self.second/60))\n else:\n return -DDMAngle(self.degree, self.minute + (self.second/60))", "def time(self) -> int:\n return self.__droneTime", "def time(self):\n return self.raw() % (60 * 24)", "def time_for_travel(self):\n return great_circle(self.pickupcoords, self.dropoffcoords).miles * 3600 / 25", "def getSubmitTime():", "async def time(self) -> dt.time:\n now = await self.AD.sched.get_now()\n return now.astimezone(self.AD.tz).time()", "def get_answer_time(self):\n sec = (self.updated_at - self.created_at).total_seconds()\n return f'{int((sec / 60) % 60):02d}:{int(sec):02d}'", "async def getDelayTimeRemaining(self):\n delay_time_remaining = await self.director.getItemVariableValue(\n self.item_id, \"DELAY_TIME_REMAINING\"\n )\n return delay_time_remaining", "def scheduled_deletion_time(self) -> Optional[datetime.datetime]:\n if not self.temporary:\n return None\n\n delta = Project.TEMPORARY_PROJECT_LIFESPANS.get(\n self.account.name, Project.TEMPORARY_PROJECT_LIFESPANS.get(\"default\")\n )\n return self.created + delta", "def disposition_time(self) -> str:\n return pulumi.get(self, \"disposition_time\")", "def _time_delta_from_info(info):\n now = datetime.datetime.now()\n then = info.start_time\n return str(now.replace(microsecond=0) - then.replace(microsecond=0))", "def get_time(self):\n return self.widget().time()", "def getTime(self):\n return self.time", "def getTime(self):\n return self.step / (self.max_step + int(self.include))" ]
[ "0.6573679", "0.65164846", "0.6515013", "0.6418441", "0.6405775", "0.63922805", "0.6391688", "0.6354492", "0.6346032", "0.6239634", "0.62149274", "0.6191225", "0.6191107", "0.61854005", "0.6183254", "0.6182833", "0.61757195", "0.61671525", "0.6158372", "0.61397165", "0.61087644", "0.6089555", "0.60700816", "0.606942", "0.606401", "0.6058256", "0.6056167", "0.6048735", "0.6040895", "0.60173863", "0.5997816", "0.5997193", "0.5996535", "0.59955484", "0.5994824", "0.5993925", "0.59908265", "0.597944", "0.5979278", "0.5977888", "0.597065", "0.59691155", "0.5965965", "0.59659475", "0.5965313", "0.5958812", "0.5950813", "0.5948664", "0.59410655", "0.5934075", "0.5930468", "0.5930468", "0.59266603", "0.5924168", "0.5922304", "0.59218633", "0.59218633", "0.59218633", "0.59201396", "0.5915713", "0.5914593", "0.59012604", "0.5899569", "0.58956283", "0.58910614", "0.58784294", "0.58539385", "0.58538836", "0.5853747", "0.58320063", "0.58299834", "0.582889", "0.5823752", "0.5815875", "0.58153075", "0.58064646", "0.5806347", "0.5805981", "0.58048904", "0.5801696", "0.580148", "0.580148", "0.5798994", "0.5791067", "0.57898414", "0.578876", "0.57876784", "0.5786523", "0.57860255", "0.576827", "0.5767245", "0.57602775", "0.57572794", "0.5751231", "0.57486403", "0.5730405", "0.57301074", "0.57279414", "0.5726615", "0.57249284", "0.57233876" ]
0.0
-1
Get the reference datetime from the KNMI LGT file as datetime
def get_reference_datetime(file): date_string = file.root.discharge1._f_getAttr('reference_datetime')[0] ref_date = datetime.datetime.strptime(date_string, '%d-%b-%Y;%H:%M:%S.%f') return ref_date
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_ref_time(self):\n from datetime import datetime, timedelta\n\n ref_time = datetime(2010, 1, 1, 0, 0, 0)\n ref_time += timedelta(seconds=int(self.fid['/PRODUCT/time'][0]))\n return ref_time", "def get_file_date(self, file: str) -> date:", "def extract_datetime(fpath):\n try:\n handle = open(fpath, 'rb')\n if hexlify(handle.read(2)) != hexlify(u'MZ'):\n handle.close()\n return\n except:\n return\n\n try:\n handle.seek(60, 0)\n offset = handle.read(4)\n offset = hexlify(offset[::-1])\n\n if offset == '':\n handle.close()\n return\n\n offset = int(offset, 16)\n handle.seek(offset+8, 0)\n dword = handle.read(4)\n handle.close()\n\n t = unpack(\">L\", dword[::-1])[0]\n except:\n return\n return datetime.datetime.fromtimestamp(t)", "def _read_antti_datetime(dt_file):\n # NOTE: genfromtxt() doesn't work with gzipped files as it should, so we\n # unzip the file ourself, and use io.BytesIO to fake out genfromtext()\n if dt_file.split('.')[-1] == 'gz':\n ff = gzip.open(dt_file, 'r')\n else:\n ff = open(dt_file, 'r')\n\n sIO = io.BytesIO(ff.read().encode())\n ff.close()\n\n ymdHMS = np.genfromtxt(sIO, comments=\"%\")\n DT = np.array([dt.datetime(*elem) for elem in ymdHMS.astype('int')])\n sIO.close()\n\n return DT", "def _get_rec_datetime(self):\n\n rec_datetime = None\n date_string = ''\n time_string = ''\n datetime_string = ''\n\n if 'notes' not in self.info:\n return None\n\n for note_line in self.info['notes'].split('\\n'):\n\n # episodic acquisition mode\n if note_line.startswith('Created on '):\n date_string = note_line.strip('Created on ')\n if note_line.startswith('Start data acquisition at '):\n time_string = note_line.strip('Start data acquisition at ')\n\n # continuous acquisition mode\n if note_line.startswith('Created : '):\n datetime_string = note_line.strip('Created : ')\n\n if date_string and time_string:\n datetime_string = ' '.join([date_string, time_string])\n\n if datetime_string:\n try:\n rec_datetime = datetime.strptime(datetime_string,\n '%a %b %d %Y %H:%M:%S')\n except ValueError:\n pass\n\n return rec_datetime", "def get_mod_time(self):\n if self.file_meta[:2] == b'bp':\n file_meta_plist = ccl_bplist.load(BytesIO(self.file_meta))\n raw_date_time = file_meta_plist['$objects'][1]['LastModified']\n converted_time = datetime.datetime.fromtimestamp(raw_date_time)\n converted_time = converted_time.timetuple()\n return converted_time\n else:\n file_meta_plist = plistlib.loads(self.file_meta)\n return file_meta_plist['modified'].timetuple()", "def extract_datetime(filename) -> datetime:\n date_part = filename[-26:-7]\n return datetime.strptime(date_part, '%Y-%m-%d_%H-%M-%S')", "def _get_time(self): \n\t\t# need to variable-ize the version ??? \n\t\ttime = self.root.find('.//{http://www.opengis.net/kml/2.2}when').text\n\t\t## strip off last 5 chars, ie '.135Z in '2015-08-01T00:06:29.135Z'\n\t\tutc = dateutil.tz.tzutc() \n\t\tcentral = dateutil.tz.gettz('America/Chicago')\n\t\ttime = datetime.datetime.strptime(time[:-5], '%Y-%m-%dT%H:%M:%S')\n\t\ttime = time.replace(tzinfo=utc)\n\t\tself.time = time.astimezone(central)", "def read_datetime(self):\n with GPIOTimingContentManager(self.gpio, start=self._start_tx, end=self._end_tx):\n self._write_byte(self.REG_BURST_READ)\n\n regs = list()\n for _ in range(self.REG_SIZE):\n regs.append(self._read_byte())\n\n # Decode bytes to datetime\n return datetime.datetime.strptime(\" \".join([\"{:x}\".format(x) for x in regs]), self.DT_STR_FMT)", "def get_time(\n filepath,\n year_ref=2000,\n ):\n with xr.open_dataset(filepath) as fdata:\n # load time\n if 'Time' in fdata.dims:\n if 'xtime' in fdata.data_vars:\n xtime = fdata['xtime'].astype(str)\n elif 'xtime_startMonthly' in fdata.data_vars:\n xtime = fdata['xtime_startMonthly'].astype(str)\n else:\n print('Time variable not found. Using indices instead...')\n return np.arange(fdata.dims['Time'])\n else:\n return None\n time_str = [x.strip() for x in xtime.values]\n if int(time_str[0][:4]) < 1678:\n time_str = ['{:04d}'.format(int(s[:4])+year_ref)+s[4:] for s in time_str]\n time = pd.to_datetime(time_str, format='%Y-%m-%d_%H:%M:%S')\n return time", "def ref_time(self) -> float:\n return ntp_to_system_time(self.ref_timestamp)", "def reference_time(self):\n if hasattr(self, '_reference_time') is False:\n self._reference_time = self.midtime\n\n return self._reference_time", "def _get_mrk_meas_date(mrk):\n info = get_kit_info(mrk, False)[0]\n meas_date = info.get('meas_date', None)\n if isinstance(meas_date, (tuple, list, np.ndarray)):\n meas_date = meas_date[0]\n if isinstance(meas_date, datetime):\n meas_datetime = meas_date\n elif meas_date is not None:\n meas_datetime = datetime.fromtimestamp(meas_date)\n else:\n meas_datetime = datetime.min\n return meas_datetime", "def datetime(self):\r\n if 'observation_time_rfc822' in self.data \\\r\n and self.data['observation_time_rfc822']:\r\n tstr = self.data['observation_time_rfc822']\r\n tstr = ' '.join(tstr.split(' ')[:-2])\r\n return datetime.strptime(tstr, '%a, %d %b %Y %H:%M:%S')\r\n elif 'observation_time' in self.data:\r\n return datetime.strptime(self.data['observation_time'] \\\r\n +' %s'%datetime.now().year,\r\n 'Last Updated on %b %d, %H:%M %p %Z %Y')\r\n return ''", "def get_changefile_timestamp(changefile_type, file_sequence_number):\n url = get_url(changefile_type) + \"/\"\n url = url + (\"%03i/%03i/%03i\" % (file_sequence_number / 1000000,\n file_sequence_number / 1000 % 1000,\n file_sequence_number % 1000))\n url = url + \".state.txt\"\n changefile_timestamp = None\n for result in urllib.urlopen(url):\n # get timestamp\n timestamp_p = result.find(\"timestamp=\")\n if timestamp_p != -1:\n # found timestamp line\n timestamp_p += 10 # jump over text\n result = result[timestamp_p:].replace(\"\\\\\", \"\").strip()\n changefile_timestamp = strtodatetime(result)\n\n if not changefile_timestamp:\n logging.info(\"(no timestamp)\")\n if file_sequence_number == 0:\n changefile_timestamp = datetime(1900, 1, 1)\n else:\n AssertionError(\"no timestamp for %s changefile %i.\" %\n (changefile_type, file_sequence_number))\n else:\n logging.info(\"%s, id: %i, timestamp: %s\" %\n (changefile_type, file_sequence_number,\n changefile_timestamp.isoformat()))\n return changefile_timestamp", "def get_source_stamp(self):", "def get_source_stamp(self):", "def import_time(self) -> str:\n return pulumi.get(self, \"import_time\")", "def get_starttime(self):\n filetime = datetime.datetime.strptime(self.filenametime,\n \"%Y%m%d_%H%M%S\")\n if self.ldat_type != 'acc':\n starttime = filetime\n else:\n starttime = filetime - datetime.timedelta(seconds=512)\n return starttime", "def file_get_mdatetime(filename):\n return datetime.datetime.utcfromtimestamp(os.path.getmtime(filename))", "def get_timestamp(file_path):\n mtime = os.stat(file_path).st_mtime\n return datetime.datetime.fromtimestamp(mtime).isoformat()", "def time(self):\r\n return conf.lib.clang_getFileTime(self)", "def _get_orbit_start_date(self, filename):\n # if your data comes from the EUMETSAT EO Portal this function can\n if self.eo_portal is True:\n filename_base = os.path.basename(filename)\n fln_spl = filename_base.split('-')[5]\n fln_datetime = fln_spl.split('.')[0]\n return datetime.strptime(fln_datetime, self.datetime_format)\n\n else:\n orbit_start_str = os.path.basename(filename)[\n self.filename_datetime_format[0]:\n self.filename_datetime_format[1]]\n return datetime.strptime(orbit_start_str,\n self.filename_datetime_format[2])", "def get_datetime_from_model(model):\n hyperparams = get_hyperparams_from_model(model)\n run_datetime = hyperparams['datetime']\n run_datetime = run_datetime.replace('-', '_').replace(' ', '_').replace(':', '_')\n return run_datetime", "def original_start(self):\n if \"originalStart\" in self._prop_dict:\n return datetime.strptime(self._prop_dict[\"originalStart\"].replace(\"Z\", \"\"), \"%Y-%m-%dT%H:%M:%S.%f\")\n else:\n return None", "def get_time_stamp():\n with open(TIME_STAMP_FILE_NAME, 'r') as f:\n s = f.readline()\n return s", "def get_datetime_start(self):\n return self.get_t_sect()['datetime_start']", "def _parse_cvcfile(self, cvcfilepath):\n cvcfilename = os.path.basename(cvcfilepath)\n (Ymd, HMS, cvcextrest) = cvcfilename.split('_', 2)\n datatype, restdat = cvcextrest[0:3], cvcextrest[3:]\n (rest, _datstr) = restdat.split('.')\n _nr512 = 512\n if datatype == 'acc':\n rest = rest.lstrip('_')\n (_nr512, nrrcus0, nrrcus1) = map(int, rest.split('x'))\n filenamedatetime = datetime.datetime.strptime(Ymd + 'T' + HMS,\n '%Y%m%dT%H%M%S')\n # NOTE: For ACC, filename is last obstime, while for XST, it is first.\n if datatype == 'acc':\n filebegindatetime = filenamedatetime - datetime.timedelta(\n seconds=_nr512)\n else:\n filebegindatetime = filenamedatetime\n return datatype, filebegindatetime", "def etime(self):\n try:\n return self['datetime_2']\n except TypeError:\n return None", "def last_modified_date_time(self):\n if \"lastModifiedDateTime\" in self._prop_dict:\n return datetime.strptime(self._prop_dict[\"lastModifiedDateTime\"].replace(\"Z\", \"\"), \"%Y-%m-%dT%H:%M:%S.%f\")\n else:\n return None", "def file_creation_date(file_path):\n # Must be a valid path string\n assert os.path.isfile(file_path) is True\n\n unix_timestamp = os.path.getctime(file_path)\n\n return datetime.fromtimestamp(unix_timestamp)", "def get_previous_upload_timestamp(nightscout_data_file_name):\r\n return int(nightscout_data_file_name.split('_')[0])", "def _filetime_to_dt(ft):\r\n # Get seconds and remainder in terms of Unix epoch\r\n s, ns100 = divmod(ft - EPOCH_AS_FILETIME, HUNDREDS_OF_NANOSECONDS)\r\n # Convert to datetime object\r\n dt = datetime.utcfromtimestamp(s)\r\n # Add remainder in as microseconds. Python 3.2 requires an integer\r\n dt = dt.replace(microsecond=(ns100 // 10))\r\n return dt", "def get_datetime(self, record):\n value = RecordValue(self.timestamp_attribute).render(record)\n return datetime.datetime.fromtimestamp(value)", "def get_file_modified_date(filepath):\n return datetime.datetime.fromtimestamp(os.path.getmtime(filepath))", "def _get_date_taken(path):\n return Image.open(path)._getexif()[36867]", "def extract_datetime(self):\n\n # Legutolsó frissítés dátuma: 2020.03.24. 11:15\n doc = html.document_fromstring(self.req.text)\n el = doc.xpath('.//div[contains(@class, \"view-diagrams\")]')\n if el:\n text = \"\".join(\n el[0].xpath('.//text()')\n )\n # <p>Legutolsó frissítés dátuma: 2020.03.24. 11:15 </p>\n re_dt = re.compile(r\"Legutolsó frissítés dátuma: (.*?)\\n\")\n dt_from_re = re_dt.findall(text)\n\n if not dt_from_re:\n raise Exception(\"Did not find datetime from webpage\")\n\n dt_from_re = dt_from_re[0]\n dt_from_re = dateutil.parser.parse(dt_from_re)\n self.dt = dt_from_re", "def get_start_time(self):\n # Timezone and BST not accounted for. Always gives it as GMT.\n create_time = (os.path.getmtime(self.file_path))\n start_time = create_time - len(self.amplitude) / self.fs\n return datetime.fromtimestamp(start_time)", "def getDate(path):\n utime = ftp.stat(path=path).st_mtime\n last_modified = datetime.fromtimestamp(utime)\n return last_modified", "def compile_date(self):\n result = self._dll.JLINKARM_GetCompileDateTime()\n return ctypes.cast(result, ctypes.c_char_p).value.decode()", "def getCreationDate(filename):\r\n path = cachePath(filename)\r\n \r\n pe = pefile.PE(path)\r\n return datetime.fromtimestamp(pe.FILE_HEADER.TimeDateStamp)", "def date(self) -> datetime:\n return datetime.strptime(self.snapshot['snapshot_date'], fmt.DATETIME_TRACE_STRING)", "def get_source_ctime(self):\n return self.source_file_ctime", "def getTimestamp(self, filename=None):\n if filename == None:\n return float(self.filename[:-4])\n else:\n return float(filename[:-4])", "def get_seviri_file_time(file):\n if hasattr(file, '__iter__'):\n filenames = [f.split('/')[-1] for f in file]\n date = [datetime(int(f[38:42]), int(f[42:44]),\n int(f[44:46]), int(f[46:48]),\n int(f[48:50])) for f in filenames]\n else:\n f = file.split('/')[-1]\n date = datetime(int(f[38:42]), int(f[42:44]),\n int(f[44:46]), int(f[46:48]),\n int(f[48:50]))\n return date", "def file_timestamp(binblob):\n import pdb;pdb.set_trace()\n try:\n dt = datetime.datetime(1601,1,1,0,0,0) + datetime.timedelta(microseconds=binblob/10)\n except:\n dt = \"This field is incorrectly identified as a file timestamp in the template\"\n return dt", "def get_labels(file):\n file_split = file.split('+')\n lat = float(file_split[0])\n long = float(file_split[1])\n time = file_split[2].split('.')[0]\n #dtype='datetime64'\n return lat,long,time", "def modified(filename: str) -> datetime.datetime:\n fs, relative_path = url_to_fs(filename)\n return cast(datetime.datetime, fs.modified(relative_path))", "def timestamp(self):\n return parser.get_timestamp(self)", "def get_timestamp(self):\n p = self._get_sub_text('timestamp')\n if not p:\n return None\n else:\n return xep_0082.datetime(p)", "def _get_date_modified(path):\n return str(datetime.datetime.fromtimestamp(os.path.getmtime(path)))", "def get_date(img):\n exif = img.getexif()\n date = exif.get(306)\n if date is not None:\n date = date.replace(' ', '-').replace(':', '')\n return date", "def test_get_date(self):\n d = modis.get_date(os.path.splitext(self.fname)[0])\n self.assertTrue(isinstance(d, datetime.datetime))\n self.assertEqual(d, datetime.datetime(2015, 9, 23))", "def creation_date(path_to_file):\n if platform.system() == 'Windows':\n print(\"last modified: %s\" % time.ctime(os.path.getmtime(path_to_file)))\n modtime = time.ctime(os.path.getmtime(path_to_file))\n \n print(\"created: %s\" % time.ctime(os.path.getctime(path_to_file)))\n modtime = datetime.datetime.strptime(modtime, \"%a %b %d %H:%M:%S %Y\")\n modtime = datetime.datetime.strftime(modtime, \"%Y-%m-%d\")\n return modtime", "def _get_datetime(date):\n return datetime.strptime(date, '%Y-%m-%dT%H:%M:%S.000%z')", "def ref_now():\n return as_datetime(datetime.datetime.now(), REF_TZ)", "def get_last_image_date(self) -> datetime.datetime:\n\n soup = self.load_page()\n header = soup.select('.posted-on')\n data = header[0].getText()\n return datetime.datetime.strptime(data, \" %A, %B %d, %Y at %I:%M%p\")", "def _get_datetime(dt_value):\n result = None\n if result is None:\n result = _get_datetime_from_format(dt_value, \"%Y-%m-%d %H:%M:%S.%f %z\")\n if result is None:\n result = _get_datetime_from_format(dt_value, \"%Y-%m-%d %H:%M:%S.%f\")\n if result is None:\n result = _get_datetime_from_format(dt_value, \"%Y-%m-%d %H:%M:%S\")\n if result is None:\n raise RuntimeError(\n \"Failed to convert '{}' into datetime object\".format(dt_value))\n return result", "def datetime_timeplotxml(self):\n return self.date.strftime(\"%b %d %Y\") + \" \" + \"00:00:00\" + \" GMT\"", "def get_ts(c):\n date, tstamp = comment_content(c).split(\"/\")\n return tstamp.strip()", "def get_instance_stamp(instance_path):\n\n return commands.read_link(instance_path)[-40:]", "def modification_timestamp(self):\n return parse_windows_timestamp(self.unpack_qword(0xC))", "def parse_date(self) -> str:\r\n for line in self.lines:\r\n line = ''.join(line)\r\n if 'updated' in line:\r\n index = line.find('Last updated')\r\n if index != -1:\r\n substring = line[index + 10: index + 50].split('.')[0][-13:]\r\n print(substring)\r\n return pd.to_datetime(substring)\r\n if 'Scottish test n' in line:\r\n index_date = line.find('h test n')\r\n print(index_date)\r\n if index_date != -1:\r\n return pd.to_datetime(line[index_date+15:index_date+29])", "def filename2date(filename):\r\n # Find the '-SC' in the filename.\r\n dash = filename.find('-SC')\r\n if dash:\r\n return datetime.datetime.strptime(filename[dash-7:dash], '%Y%j')\r\n else:\r\n raise ValueError('Landsat filename does not conform to expected format.')", "def get_timestamp(self):\n raise NotImplementedError", "def parse_dt_from_logfile_name(key):\n ### Check file date by regular expression\n keydate = re.search(\"([0-9]{4}[0-9]{2}[0-9]{2})\", key).group(1)\n \n key_dt = datetime.strptime(keydate, '%Y%m%d')\n return key_dt", "def creation_date(path_to_file, return_datetime=True):\n if platform.system() == 'Windows':\n created_at = os.path.getctime(path_to_file)\n else:\n stat = os.stat(path_to_file)\n try:\n created_at = stat.st_birthtime\n except AttributeError:\n # We're probably on Linux. No easy way to get creation dates here,\n # so we'll settle for when its content was last modified.\n created_at = stat.st_mtime\n\n if return_datetime:\n return datetime.fromtimestamp(created_at)\n else:\n return created_at", "def get_time_and_date(self):\n date_str = ''\n time_str = ''\n for header in self.frd.headers:\n if header.code != 'U':\n continue\n elif header.string.startswith('DATE'):\n date_str = header.string.replace('DATE', '').strip()\n elif header.string.startswith('TIME'):\n time_str = header.string.replace('TIME', '').strip()\n return FRDParser._parse_ccx_date(date_str, time_str)", "def stime(self):\n try:\n return self['datetime_1']\n except TypeError:\n return None", "def get_last_image_date(self) -> datetime.datetime:\n\n soup = self.load_page()\n header = soup.select('#comicwrap > div.comicnav.top > div')\n\n lst = header[0].text.split(\" \")[-3:]\n lst[0] = lst[0][lst[0].find('\\n') + 1:]\n\n return datetime.datetime.strptime(\" \".join(lst), '%B %d, %Y')", "def get_file_cm_time(file_path):\n try:\n create_time = os.path.getctime(file_path)\n if create_time:\n return int(create_time)\n modify_time = os.path.getmtime(file_path)\n if modify_time:\n return int(modify_time)\n except (OSError, ValueError) as e:\n logger.error('Error while get creation time of file. ERROR: %s.',\n str(e))\n return 0", "def getchrony():\n \n filename = \"/var/log/chrony/tracking.log\"\n fileNotOK = True\n try:\n if os.path.isfile(filename):\n fileNotOK = False\n except:\n fileNotOK = True\n # if file is not OK, return default\n if fileNotOK:\n return( \"2020-02-20T02:02:02.000\", 0., 0.)\n \n #get the very last line in the filea\n line = subprocess.check_output(['tail', '-1', filename])\n parts = line.split()\n nparts = len(parts)\n\n if nparts < 10:\n return( \"\", 0., 0.)\n \n date = parts[0]\n time = parts[1]\n ip = parts[2]\n #print(\"Offset: %s\" % (parts[9]))\n offset = float(parts[6])\n offsetrms = float(parts[9])\n datestr = \"%sT%s\" % (date, time)\n return( datestr, offset, offsetrms)", "def get_file_timestamp(filename):\n plug.logger.debug(u\"Getting timestamp of {}\", filename)\n metadata = S3Conn.head_object(u(filename))\n timestamp = metadata.headers['last-modified']\n # convert timestamp to timestruct...\n timestamp = time.strptime(timestamp, HEAD_TIMESTAMP_FMT)\n # ...timestruct to float\n timestamp = time.mktime(timestamp)\n return timestamp", "def read_datetime(self, date_text):\n date_text = date_text.replace('/', '-')\n return datetime.strptime(date_text, '%Y-%m-%d %H:%M:%S')", "def datetime(self):\n return self._datetime", "def _GetRefdat(self):\n for rfile in self.refdats.keys():\n# Get times for ref.dat files with a time-stamp.\n words = rfile.replace('.','_').split('_')\n if len(words) == 6 and words[-2].count(':') == 20:\n# This file was time-stamped by the sequence. Get the\n# date and time. file name format:\n# ref_Sep_9_2007_11:28:32.dat\n rtime[rfile] = hms_to_secs(words[-2])\n for pfile in self.pfiles:\n min_difftime = 1.e20\n self.info[pfile]['refdat'] = None\n for rfile in self.refdats.keys():\n if rfile[:3] == 'ref' and 'dat' in rfile:\n# This is a reference data file. First see if the orientation is\n# appended. If the file has neither a time-stamp nor a plane and\n# there is more than one ref.dat, the epi reconstruction will\n# be aborted.\n rinfo = {}\n ref_file = None\n if 'sag' in rfile and self.info[pfile]['plane'] == 'sagittal':\n# self.info[pfile]['refdat'] = rfile\n ref_file = rfile\n break\n elif 'cor' in rfile and self.info[pfile]['plane'] == 'coronal':\n# self.info[pfile]['refdat'] = rfile\n ref_file = rfile\n break\n elif 'axial' in rfile and self.info[pfile]['plane'] == 'axial':\n# self.info[pfile]['refdat'] = rfile\n ref_file = rfile\n break\n elif len(self.refdats.keys()) == 1:\n# Use the only one if that is all there is.\n ref_file = rfile\n epi_time = hms_to_secs(self.info[pfile]['acqtime'].split()[-2])\n if epi_time - rtime[rfile] < min_difftime and \\\n rftime[rfile] > epi_time:\n# Use the reference file that acquired nearest to the EPI\n# but before it.\n min_difftime = epi_time - rtime[rfile]\n# self.info[pfile]['refdat'] = rfile\n ref_file = rfile\n if ref_file:\n# Found a candidate.\n if not self.info[pfile]['refdat']:\n# Haven't found one yet, use it.\n self.info[pfile]['refdat'] = ref_file\n else:\n# Found two. Choose one in the same directory.\n oldpath = os.path.dirname(self.info[pfile]['refdat'])\n newpath = os.path.dirname(ref_file)\n pfile_path = os.path.dirname(pfile)\n if oldpath == newpath:\n# Same path, use the old one.\n self.info[pfile]['refdat'] = ref_file\n elif newpath == pfile_path:\n self.info[pfile]['refdat'] = ref_file\n# else Do nothing, use existing choice.\n elif not os.path.exists(rfile):\n self.info[pfile]['refdat'] = None\n elif os.stat(rfile).st_size > 0:\n# This path is taken if no info is encoded in the file name.\n# Don't use empty ref.dat files.\n self.info[pfile]['refdat'] = rfile", "def get_file_last_modification_date(filename=None):\n with open(filename, 'r') as fp:\n for line in fp:\n if line.startswith('Modify'):\n date_line = line.split()[1]\n file_date = datetime.strptime(date_line, \"%Y-%m-%d\")\n return filename, file_date", "def findcreatedate(self):\n try:\n exifdata = subprocess.check_output(['exiftool', '-j', '-TAG', '-CreateDate', self.filename])\n except OSError as e:\n print \"exiftool may not be installed. Please go check.\"\n print \"Here is the error thrown: {}\".format(e)\n raise\n\n data = json.loads(exifdata)\n self.createdate = date(*[int(elt) for elt in re.split('[ :]', data[0]['CreateDate'])][0:3])\n return self.createdate", "def get_timestamp_from_path(file_path):\n return int(file_path.split('_')[1].split('.')[0])", "def get_file_modification_date() -> str:\n file_modification_date = datetime.now().strftime(\"%d.%m.%Y\")\n print(file_modification_date)\n return file_modification_date", "def image_time(self, path_name, timezone='GMT', delta_hours=0, delta_minutes=0, delta_seconds=0):\n # Open the file for reading\n f = open(path_name, 'r')\n # And extract the tags\n tags = EXIF.process_file(f)\n f.close()\n if len(tags) == 0 or 'Image DateTime' not in tags:\n return None\n capture_time = tags['Image DateTime']\n # Add the timezone the camera time is set to\n img_time = capture_time.printable + timezone\n # Parse the timestap\n cdt = datetime.strptime(img_time, '%Y:%m:%d %H:%M:%S%Z')\n # And process the offset for clock skew\n delta = timedelta(hours=delta_hours, minutes=delta_minutes, seconds=delta_seconds)\n cdt = cdt - delta\n self.files_read += 1\n return cdt", "def get_delta_time(self):\n return self.fid['/PRODUCT/delta_time'][0, :].astype(int)", "def as_ref_datetime(timespec):\n try:\n dt = as_datetime(timespec, tz=REF_TZ)\n hms_dt = dt.astimezone(REF_TZ)\n return hms_dt\n except Exception:\n raise DatetimeCoercionFailure(timespec=timespec, timezone=REF_TZ)", "def test_get_build_timestamp(self):\n pass", "def parse_data_from_file(path):\n print(path.stem)\n \n raw = path.stem.split('-')\n\n rawdate = raw[0][2:]\n print(rawdate)\n date = rawdate[6:] + \"/\" + rawdate[4:6] + '/' + rawdate[0:4]\n rawtime = raw[1]\n time = rawtime[0:2] + \"h\" + rawtime[2:4] + \"m\" + rawtime[4:6] + \"s\"\n dt = datetime.strptime(rawdate+rawtime, '%Y%m%d%H%M%S')\n print(dt)\n return dt", "def getTimestamp(self):\r\n\t\treturn self.pair.data['timestamp']", "def getKoanTime():\n if not os.path.exists(\"%s.awk\" % koanIndex[currentKoan] ):\n f = open(\"%s.awk\" % koanIndex[currentKoan] ,\"w\")\n f.write(sampleString)\n f.close()\n return os.path.getmtime(\"%s.awk\" % koanIndex[currentKoan] )", "def getlmtime(self):\n if self.islink() and self.lexists():\n st = os.lstat(self.path)\n return st.st_mtime\n return Stat.getmtime(self)", "def test_report_datetime(self):\n self.__opener.contents = '''<Report><Doc><Summary eTime=\"5-9-2017 - 14:18:23\"/></Doc></Report>'''\n self.assertEqual(datetime.datetime(2017, 9, 5, 14, 18, 23), self.__uft.datetime('url'))", "def get_date(layer):\n date = None\n type = 1\n layer_dates = layer.get_layer_dates()\n if layer_dates:\n date = layer_dates[0][0]\n type = layer_dates[0][1]\n if date is None:\n date = layer.created.date()\n # layer date > 2300 is invalid for sure\n # TODO put this logic in date miner\n if date.year > 2300:\n date = None\n if type == 0:\n type = \"Detected\"\n if type == 1:\n type = \"From Metadata\"\n return get_solr_date(date), type", "def get_release_date ():\n fname = os.path.join(\"doc\", \"changelog.txt\")\n release_date = \"unknown\"\n with open(fname) as fd:\n # the release date is on the first line\n line = fd.readline()\n mo = release_ro.search(line)\n if mo:\n release_date = mo.groups(1)\n return release_date", "def get_exp_start_time(self):\n\t\tif self.have_metadata is False:\n\t\t\tself._get_metadata()\n\t\t\tself.have_metadata = True\n\n\t\ttry:\n\t\t\tif self.keyinfo['tracking_id'].attrs['exp_start_time'].endswith('Z'):\n\t\t\t\t# MinKNOW >= 1.4 ISO format and UTC time\n\t\t\t\tdt = dateutil.parser.parse(self.keyinfo['tracking_id'].attrs['exp_start_time'])\n\t\t\t\ttimestamp = int(time.mktime(dt.timetuple()))\n\t\t\telse:\n\t\t\t\t# Unix time stamp from MinKNOW < 1.4\n\t\t\t\ttimestamp = int(self.keyinfo['tracking_id'].attrs['exp_start_time'])\n\t\t\treturn timestamp\n\t\texcept KeyError, e:\n\t\t\treturn None", "def read(self):\n with open(self) as f:\n return Timestamp.load(f)", "def _creation_date(path_to_file):\n if platform.system() == \"Windows\":\n return os.path.getctime(path_to_file)\n else:\n stat = os.stat(path_to_file)\n try:\n return stat.st_birthtime\n except AttributeError:\n # We're probably on Linux. No easy way to get creation dates here,\n # so we'll settle for when its content was last modified.\n return stat.st_mtime", "def getAttrDateTime(self, e, name):\n val = e.get(name)\n dateVal = datetime.strptime(val,\"%Y-%m-%dT%H:%M:%S\")\n return dateVal.strftime(\"%Y%m%d%H%M%S\")", "def get_uploaded_date_long(self):\n return self.uploaded_date_long", "def read_date(self):\n\n readval = self.__bus.read_i2c_block_data(self.__rtcaddress, 0, 7)\n date = (\"%02d-%02d-%02dT%02d:%02d:%02d\" % (self.__bcd_dec(readval[6]) +\n self.__century,\n self.__bcd_dec(readval[5]),\n self.__bcd_dec(readval[4]),\n self.__bcd_dec(readval[2]),\n self.__bcd_dec(readval[1]),\n self.__bcd_dec(readval[0])))\n return date", "def creation_date(path_to_file):\n if platform.system() == 'Windows':\n return os.path.getctime(path_to_file)\n else:\n stat = os.stat(path_to_file)\n try:\n return stat.st_birthtime\n except AttributeError:\n return stat.st_mtime", "def compute_tillage_date(scenario, row):\n wbfn = \"/i/0/wb/%s/%s/%s_%s.wb\" % (\n row[\"huc_12\"][:8],\n row[\"huc_12\"][8:],\n row[\"huc_12\"],\n row[\"fpath\"],\n )\n wbdf = read_wb(wbfn)\n wbdf2 = wbdf[\n (\n (wbdf[\"ofe\"] == 1)\n & (wbdf[\"date\"] >= APR15)\n & (wbdf[\"date\"] <= MAY30)\n )\n ]\n if scenario in THRESHOLDS:\n the_threshold = THRESHOLDS[scenario]\n else:\n the_threshold = get_threshold_bypl(scenario, row)\n wbdf3 = wbdf2[wbdf2[\"sw1\"] < the_threshold]\n if len(wbdf3.index) > 0:\n tillage_date = wbdf3.iloc[0][\"date\"]\n else:\n tillage_date = MAY30\n return tillage_date", "def getTimestamp(self):\r\n\t\treturn self.data['timestamp']" ]
[ "0.6449726", "0.6254226", "0.6224529", "0.6155637", "0.6119671", "0.61154586", "0.6110254", "0.5968073", "0.59634435", "0.5960696", "0.59445274", "0.5924622", "0.59102046", "0.5850813", "0.57510024", "0.574005", "0.574005", "0.5737356", "0.57372004", "0.5716487", "0.5684267", "0.5642409", "0.56256235", "0.5590432", "0.5586844", "0.5577186", "0.55681235", "0.55400085", "0.5530442", "0.5522788", "0.55171174", "0.55155843", "0.5510248", "0.5505324", "0.54940444", "0.54918504", "0.5477127", "0.54666734", "0.5456875", "0.5455157", "0.545036", "0.54470366", "0.5444406", "0.5441975", "0.5438168", "0.54267406", "0.5425561", "0.5423367", "0.5423132", "0.53861755", "0.53849846", "0.5380397", "0.5360583", "0.53594214", "0.53510433", "0.5349219", "0.5345545", "0.5338294", "0.5322343", "0.53139573", "0.5301795", "0.5294797", "0.52874845", "0.5285732", "0.5259226", "0.5259013", "0.52539754", "0.52483237", "0.5243637", "0.52383745", "0.5236658", "0.52228534", "0.52218914", "0.5221093", "0.5219417", "0.5215606", "0.52151245", "0.52081275", "0.52062106", "0.52040595", "0.5198187", "0.5188566", "0.5183352", "0.51787794", "0.5172507", "0.51694715", "0.51688683", "0.51663655", "0.5156054", "0.515573", "0.5153666", "0.5151156", "0.5148835", "0.51469827", "0.5144015", "0.5142521", "0.51354825", "0.5126023", "0.5119978", "0.5116111" ]
0.7556686
0
get all orders and find the time span if order have been prepared , the ticchen's name will store in the orderdata
def cook_order_list(request): all_orders = Order.objects.all().order_by("-id") css = CookStatus.objects.filter(cook_name=request.user) cs = None current_order = None if len(css) != 0: cs = css[0] if cs.current_order != None : current_order = cs.current_order.menu_items.all() new_orders = [] for order in all_orders: a = {} a['id'] = order.id a['status'] = order.status a['timespan'] = (datetime.datetime.utcnow().replace(tzinfo=utc) - order.timestamp_created).seconds cookofthis = CookStatus.objects.filter(current_order=order) if len(cookofthis) != 0: a['cookname'] = cookofthis[0].cook_name.username elif order.tikchen != None: a['cookname'] = order.tikchen new_orders.append(a) return render_to_response('staff/cook_order_list.html', {'all_orders':new_orders, 'user':request.user, 'current_order':current_order})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def trackOrderRequest(self):\n\t\tstart_dat=datetime.today()\n\t\tstart_date = start_dat - timedelta( hours=start_dat.time().hour,minutes=start_dat.time().minute,seconds=start_dat.time().second ) \n\t\tend_date=start_dat\n\t\tans=None\n\t\t#print start_dat.time().hour\n\t\tprint end_date\n\t\tans=Order.objects.filter(date_of_order__range=(start_date,end_date))\n\t\tlst=[]\n\t\tfor b in ans:\n\t\t\towneradd=b.owner_id.address\n\t\t\tuseradd=b.userid.address\n\t\t\tusername=b.userid.email\n\t\t\townername=b.owner_id.email\n\t\t\tuserphone=b.userid.contact_no\n\t\t\townerphone=b.owner_id.contact_no\n\t\t\tbookname=b.bookid.title\n\t\t\tstatus=b.paymentid.ispending\n\t\t\tbook=b.__dict__\n\t\t\tbook['owneradd']=owneradd\n\t\t\tbook['useradd']=useradd\n\t\t\tbook['username']=username\n\t\t\tbook['ownername']=ownername\n\t\t\tbook['userphone']=userphone\n\t\t\tbook['ownerphone']=ownerphone\n\t\t\tbook['name']=bookname\n\t\t\tif status==True:\n\t\t\t\tbook['status']=\"Pending\"\n\t\t\telse:\n\t\t\t\tbook['status']=\"Delivered\"\n\t\t\tlst.append(book)\n\t\t#print ans\n\t\t\n\t\treturn lst", "def get_all_orders():", "def extractHourlyOrders(orders, fromDate, toDate=datetime.today()):\n orderTimeStamps = getTimeStampsFromMongoOrderData(orders)\n toDate = datetime.today() + timedelta(days=1)\n # Every day fromDate to toDate.\n dateRange = getDaysInDateRange(fromDate, toDate)\n\n orderDetailsForDateRange = []\n for date in dateRange:\n orderDetails = {\n \"date\": object,\n \"orders\": []\n }\n orderDetails[\"date\"] = date\n # Get the orders for this date\n ordersForDate = getOrdersForDate(date, orderTimeStamps)\n # If order number is zero just fill all hours with order amount = 0\n if len(ordersForDate) == 0:\n orderDetails[\"orders\"] = zeroFillOrdersForFullDay(date)\n orderDetailsForDateRange.append(orderDetails)\n continue\n\n for hour in hours:\n ordersAmountForHour = len(getOrdersForHour(hour, ordersForDate))\n # As each hour only contains XX:XX, it doesn't have a date.\n # Combine the current hour iteration with the current date iteration\n hour = datetime.combine(date, datetime.time(hour))\n if ordersAmountForHour == 0:\n info = {\n \"hour\": hour,\n \"amount\": 0\n }\n orderDetails[\"orders\"].append(info)\n else:\n info = {\n \"hour\": hour,\n \"amount\": ordersAmountForHour\n }\n orderDetails[\"orders\"].append(info)\n orderDetailsForDateRange.append(orderDetails)\n return orderDetailsForDateRange", "def order_report():", "def returnOrderTrades(self, order_number):", "def get_order_details(game_id: int, user_id: int, start_time: float = None, end_time: float = None):\n start_time, end_time = get_time_defaults(game_id, start_time, end_time)\n query = \"\"\"\n SELECT\n o.id as order_id,\n relevant_orders.status,\n relevant_orders.order_status_id,\n symbol,\n relevant_orders.timestamp,\n buy_or_sell,\n quantity,\n order_type,\n time_in_force,\n price,\n relevant_orders.clear_price\n FROM orders o\n INNER JOIN (\n SELECT os_full.id,\n os_full.timestamp,\n os_full.order_id,\n os_full.clear_price,\n os_full.status,\n os_relevant.order_status_id\n FROM order_status os_full\n INNER JOIN (\n SELECT os.order_id, grouped_os.max_id as order_status_id\n FROM order_status os\n INNER JOIN\n (SELECT order_id, max(id) as max_id\n FROM order_status\n GROUP BY order_id) grouped_os\n ON\n os.id = grouped_os.max_id\n WHERE os.status NOT IN ('cancelled', 'expired')\n ) os_relevant\n ON os_relevant.order_id = os_full.order_id\n ) relevant_orders\n ON relevant_orders.order_id = o.id\n WHERE game_id = %s AND user_id = %s AND relevant_orders.timestamp >= %s AND relevant_orders.timestamp <= %s;\"\"\"\n\n with engine.connect() as conn:\n df = pd.read_sql(query, conn, params=[game_id, user_id, start_time, end_time])\n\n df = pivot_order_details(df)\n df[\"status\"] = \"fulfilled\"\n df.loc[df[\"timestamp_fulfilled\"].isna(), \"status\"] = \"pending\"\n return df", "def get_work_order_detail(self, date_range):\n work_order_obj = self.env[\"task.line\"]\n start = datetime.strptime(date_range.get(\"date_from\"), \"%Y-%m-%d\")\n end = datetime.strptime(date_range.get(\"date_to\"), \"%Y-%m-%d\")\n step = timedelta(days=1)\n workorder_detail = []\n while start <= end:\n sdate = str(\n datetime.strptime(\n str(start.date()) + \" 00:00:00\", DEFAULT_SERVER_DATETIME_FORMAT\n )\n )\n edate = str(\n datetime.strptime(\n str(start.date()) + \" 23:59:59\", DEFAULT_SERVER_DATETIME_FORMAT\n )\n )\n work_order_ids = work_order_obj.search(\n [(\"date_issued\", \">=\", sdate), (\"date_issued\", \"<=\", edate)]\n )\n if work_order_ids:\n parts_data = {}\n parts_value = []\n for parts_line in work_order_ids:\n if (\n parts_line.fleet_service_id\n and parts_line.fleet_service_id.state == \"done\"\n ):\n parts_dict = {\n \"wo_name\": parts_line.fleet_service_id\n and parts_line.fleet_service_id.name\n or \"\",\n \"vehicle_id\": parts_line.fleet_service_id\n and parts_line.fleet_service_id.vehicle_id\n and parts_line.fleet_service_id.vehicle_id.name\n or \"\",\n \"part_no\": parts_line.product_id\n and parts_line.product_id.default_code\n or \"\",\n \"part_name\": parts_line.product_id\n and parts_line.product_id.name\n or \"\",\n \"vehicle_make\": parts_line.vehicle_make_id\n and parts_line.vehicle_make_id.name\n or \"\",\n \"qty\": parts_line.qty or 0.0,\n \"uom\": parts_line.product_uom\n and parts_line.product_uom.name\n or \"\",\n \"old_part_return\": parts_line.old_part_return\n and \"Yes\"\n or \"No\",\n \"issued_by\": parts_line.issued_by\n and parts_line.issued_by.name\n or \"\",\n \"remarks\": parts_line.fleet_service_id\n and parts_line.fleet_service_id.note\n or \"\",\n }\n parts_value.append(parts_dict)\n if parts_value:\n parts_value = sorted(parts_value, key=lambda k: k[\"wo_name\"])\n parts_data = {\"date\": start.date(), \"value\": parts_value}\n workorder_detail.append(parts_data)\n start += step\n return workorder_detail", "def get_order(self):\n #store the orders for the current cycle inside the class\n self.orders = self.firebase.get_data(\"orders\")", "def get_all_orders(): \n data = order_obj.get_all_orders()\n return data", "def test_get_orders(self):\n pass", "def getTimeStampsFromMongoOrderData(orders):\n def extractTime(current):\n return current[\"createdAt\"]\n # Get all timestamps from mongo\n return map(extractTime, orders)", "def get_orders_list(\n self\n ) -> list:\n\n response = self.session.get(\"http://automationpractice.com/index.php?controller=history\")\n\n self.HTMLParser.set_html(response.text)\n\n tbody = self.HTMLParser.find_elements_by_xpath(\"//tbody/tr\")\n\n if not len(tbody):\n raise NoOrderError()\n\n orders = list()\n\n for tr in tbody:\n\n tr = self.HTMLParser.convert_node(tr)\n tds = tr.xpath(\"//td\")\n\n orders.append({\n \"reference\": self._find_reference(tds[0]),\n \"date\": tds[1].text_content().strip(),\n \"value\": tds[2].get(\"data-value\"),\n \"payment_method\": tds[3].text_content(),\n \"status\": self._find_status(tds[4]),\n \"invoice_link\": self._find_invoice_link(tds[5]),\n \"id_order\": self._find_id(tds[5])\n })\n\n return orders", "def parse_orders(self):\n #save the information from the firebase for this cycle\n self.get_order()\n #Loop through all the stores\n for store_name,store_orders in self.orders.items():\n #Loop through all the orders\n for order_id,order_details in store_orders.items():\n #store order\n self.store_order(store_name,store_orders,order_id,order_details)\n pass", "def get_orders(self):\n return self.order_lst", "def place_orders(self):\n buy_orders = []\n sell_orders = []\n buy_stop_order = {}\n sell_stop_order = {}\n order_status = 0\n \"\"\"order_status参数说明\n 0: running_qty为0, 维持原样\n 1: self.running_qty > 0, 买卖都变化, 买单按照offset2, 卖单按照offset3\n 2: 买单维持不变, 卖单按照offset3\n 3: self.running_qty < 0, 买卖都变化, 买单按照offset3, 卖单按照offset2\n 4: 卖单维持不变, 买单按照offset3\n 5: 追加指定订单\n 6: 取消指定订单\n 7: self.running_qty > 0, 买单按照offset2, 卖单不变\n 8: self.running_qty < 0, 买单不变, 卖单按照offset2\n \"\"\"\n # Create orders from the outside in. This is intentional - let's say the inner order gets taken;\n # then we match orders from the outside in, ensuring the fewest number of orders are amended and only\n # a new order is created in the inside. If we did it inside-out, all orders would be amended\n # down and a new order would be created at the outside.\n position_grade = self.get_position_grade()\n avgCostPrice = self.exchange.get_position()['avgCostPrice']\n print ('position_grade: %s ' % position_grade)\n print ('running_qty: %s ' % self.running_qty)\n print ('ORDER_START_SIZE: %s ' % self.ORDER_START_SIZE)\n schedule.run_pending()\n\n if(self.countdown == True): #设置倒数计时, 60秒后delay_order_check设为True, 可以重新挂非清仓方向的价格\n self.cycleclock = self.cycleclock - 1\n if(self.cycleclock <= 0):\n if(self.check_last_price_upordown() == True):\n self.cycleclock = 5\n else:\n self.countdown = False\n self.delay_order_check = True\n\n if(self.get_ticker()['last'] > STOP_PRICE and self.buy_only_flag == False):\n self.buy_only_flag = True\n if(self.running_qty < 0):\n self.clear_position(buy_orders, sell_orders)\n return self.converge_orders(buy_orders, sell_orders, order_status)\n\n if(self.get_5th_max_MA15_defference(getmessage = 1) > 100):\n self.stop_market_maker_flag = True\n self.cancel_all_orders_flag = True\n self.buy_only_flag = False\n self.sell_only_flag = False\n tg_important_message('上涨差值超过100,暂停交易')\n\n if(self.stop_market_maker_flag == True and self.cancel_all_orders_flag == True):\n if (len(self.exchange.get_orders()) != 0):\n self.exchange.cancel_all_orders()\n logger.info(\"Cancel all orders\")\n elif(self.stop_market_maker_flag == True and self.clear_position_flag == True):\n if(self.running_qty != 0):\n self.clear_position(buy_orders, sell_orders)\n else:\n if (len(self.exchange.get_orders()) != 0):\n self.exchange.cancel_all_orders()\n logger.info(\"Market_maker has stopped. No orders, no positions now\")\n elif(self.stop_market_maker_flag == True):\n if(self.running_qty > 0):\n if avgCostPrice != None:\n sell_stop_order = self.prepare_stop_order(math.toNearest(avgCostPrice - STOP_SIZE, self.instrument['tickSize']), \"Sell\", abs(self.running_qty))\n order_status = 4\n elif(self.running_qty < 0):\n if avgCostPrice != None:\n buy_stop_order = self.prepare_stop_order(math.toNearest(avgCostPrice + STOP_SIZE, self.instrument['tickSize']), \"Buy\", abs(self.running_qty))\n order_status = 2\n elif(self.running_qty == 0 and self.last_running_qty == 0):\n if (len(self.exchange.get_orders()) != 0):\n self.exchange.cancel_all_orders()\n logger.info(\"Market_maker has stopped. No orders, no positions now\")\n\n elif(self.running_qty == 0 and self.restart_flag == False):\n if(self.check_last_price_upordown() == True):\n self.restart_flag = True\n self.countdown_restart = 5\n return\n self.ORDER_START_SIZE = self.start_XBt // 1000000 * START_SIZE_MAGNIFICATION #新算法, 每次初始交易重新设定ORDER_START_SIZE\n order_status = 0\n if not(self.sell_only_flag == True):\n buy_orders.append(self.prepare_order(-1, order_status))\n if not(self.buy_only_flag == True):\n sell_orders.append(self.prepare_order(1, order_status))\n self.countdown = False\n self.restart_flag = True\n self.countdown_restart = 30\n\n elif(self.running_qty == 0 and self.restart_flag == True):\n self.countdown_restart = self.countdown_restart - 1\n if(self.countdown_restart <= 0):\n self.restart_flag = False\n return\n\n elif(self.running_qty != 0 and self.running_qty != self.last_running_qty): #仓位变动后开始倒计时60秒, 60秒后delay_order_check为True, 可以重新挂非清仓方向的价格\n if(self.running_qty > 0):\n order_status = 2\n sell_orders.append(self.prepare_order(1, order_status))\n elif(self.running_qty < 0):\n order_status = 4\n buy_orders.append(self.prepare_order(-1, order_status))\n self.cycleclock = 60\n self.countdown = True\n self.restart_flag = False\n self.delay_order_check = False\n\n elif(self.running_qty != 0 and self.running_qty == self.last_running_qty and self.delay_order_check == True): #可以重新挂非清仓方向的价格\n i = abs(self.running_qty) // (self.ORDER_START_SIZE//4) + 1\n if(self.running_qty > 0):\n order_status = 7\n if(i <= 3):\n buy_orders.append(self.prepare_order(-i, order_status))\n if(self.running_qty < 0):\n order_status = 8\n if(i <= 3):\n sell_orders.append(self.prepare_order(i, order_status))\n self.cycleclock = 30\n self.countdown = True\n self.delay_order_check = False\n\n else:\n if(self.running_qty > 0):\n order_status = 2\n sell_orders.append(self.prepare_order(1, order_status))\n elif(self.running_qty < 0):\n order_status = 4\n buy_orders.append(self.prepare_order(-1, order_status))\n\n if(self.last_running_qty != self.running_qty):\n self.send_tg_message()\n self.last_running_qty = self.running_qty\n self.reset = False\n buy_orders = list(filter(None.__ne__, buy_orders)) #去除None\n sell_orders = list(filter(None.__ne__, sell_orders)) #去除None\n print('BXBT_MA15: %s' % self.get_BXBT_MA15())\n print(buy_orders)\n print(sell_orders)\n if((self.last_buy_orders == buy_orders and self.last_sell_orders == sell_orders) or (buy_orders == [] and sell_orders == [])):\n print('order no change, return')\n return\n else:\n self.last_buy_orders = buy_orders\n self.last_sell_orders = sell_orders\n self.converge_stop_order(buy_stop_order, sell_stop_order)\n return self.converge_orders(buy_orders, sell_orders, order_status)", "def manage_orders(self):\r\n for coin, pair_info in self.usdt_pairs.items():\r\n orders = self.kc.get_orders(pair_info[\"symbol\"], status=\"active\")\r\n self.log(coin, orders[\"totalNum\"])\r\n if orders[\"totalNum\"]:\r\n self.log(len(orders[\"items\"]))\r\n for order in orders[\"items\"]:\r\n self.log(order)\r\n\r\n self.log(mp.mpf())\r\n\r\n # ticker = current price action, bid/ask, etc\r\n ticker = self.kc.get_ticker(pair_info[\"symbol\"])\r\n self.log(ticker)\r\n return", "def test_get_order(self):\n pass", "def get_one_order():", "def get_context_data(self, **kwargs):\n start, end = self.get_start_end_dates(self.request)\n if start is not None or end is not None:\n hours = 24\n\n qs = self.get_orders_with_range(start, end)\n sorted_qs = sorted(qs, key=lambda x: x.close_date)\n\n date_range_split = list()\n date_range = end - start\n\n for day in range(date_range.days + 1):\n date = start + datetime.timedelta(day)\n day_split_by_hours = list()\n\n for hour in range(hours):\n time = date + datetime.timedelta(hours=hour)\n next_hour = time + datetime.timedelta(hours=1)\n orders = filter(lambda x: time <= x.close_date <= next_hour, sorted_qs)\n amount = len(orders)\n sales = sum(order.grand_total for order in orders)\n\n if amount > 0:\n day_split_by_hours.append({\n 'hour': time.strftime(\"%I:%M %p\"),\n 'amount': amount,\n 'sales': sales\n })\n total = sum(day.get('amount') for day in day_split_by_hours)\n total_sales = sum(day.get('sales') for day in day_split_by_hours)\n if total > 0:\n date_range_split.append({\n 'date': date.strftime(\"%B, %d\"),\n 'hours': day_split_by_hours,\n 'total': total,\n 'total_sales': total_sales\n })\n\n return {\n 'sorted_qs': date_range_split,\n 'start_date': start.strftime('%B %d, %Y'),\n 'end_date': end.strftime('%B %d, %Y'),\n }\n else:\n return {}", "def generate_orders(self, cr, uid, ids, context=None):\n voucher_pool = self.pool.get('account.voucher')\n payment_term_obj = self.pool.get('account.payment.term')\n account_budget_confirmation_obj = self.pool.get('account.budget.confirmation')\n period_obj = self.pool.get('account.period')\n if context is None:\n context = {}\n for order in self.browse(cr, uid, ids, context=context):\n #################################to remind\n total_fixed = total_percent = 0\n for line in order.payment_term.line_ids:\n if line.value == 'fixed':\n total_fixed += line.value_amount\n if line.value == 'procent':\n total_percent += line.value_amount\n total_fixed = (total_fixed * 100) / (order.amount or 1.0)\n if (total_fixed + total_percent) > 100:\n raise orm.except_orm(_('Error!'), _(\"Can not create the payments !\\n\\\n The related payment term is probably miss configured as it gives a computed amount greater than the total permanent payment amount. \\\n The latest line of your payment term must be of type 'balance' to avoid rounding issues.\"))\n # create one move line for the total and possibly adjust the other lines amount\n totlines1 = []\n for o in order.line_ids:\n totlines1 += payment_term_obj.compute(cr, uid, order.payment_term.id, o.amount, order.date or False, context=context)\n \n d = {}\n for k, v in totlines1:\n d.setdefault(k, [k]).append(v)\n totlines = map(tuple, d.values())\n\n for t in totlines :\n #to substract date from the interval number \n order_date = t[0]\n entered_date = datetime.datetime.strptime(order_date, '%Y-%m-%d')\n entered_date = entered_date.date()\n account_id = (order.partner_id.property_account_payable and order.partner_id.property_account_payable.id) or \\\n (order.journal_id.default_credit_account_id and order.journal_id.default_credit_account_id.id)\n period_id = period_obj.find(cr, uid, t[0], context=context)[0]\n\n list_confirm = [conf.id for conf in o.confirmation_ids]\n confirmations = account_budget_confirmation_obj.search(cr, uid, [('id','in', list_confirm),('period_id','=', period_id)], context=context) #('date','=',t[0]),\n\n for confirm in confirmations:\n confirm_id = confirm\n\n voucher_lines = [(0, 0, {'name':ol.name, 'account_id':ol.account_id.id, 'type':'dr',\n 'amount':t[count + 1], 'account_analytic_id':ol.account_analytic_id.id, 'budget_confirm_id': confirm_id })\n for count, ol in enumerate(order.line_ids)]\n res = voucher_pool.onchange_price(cr, uid, 0, voucher_lines, [], partner_id=order.partner_id.id, context=context).get(\"value\", {})\n voucher_dict = {\n 'partner_id' : order.partner_id.id,\n 'account_id': account_id,\n 'company_id' : order.company_id.id,\n 'journal_id' : order.journal_id.id,\n 'period_id': order.period_id.id,\n 'type':'purchase',\n 'date' : t[0],\n 'reference': order.name,\n 'payment_permanent_voucher_id': order.id,\n 'line_ids':voucher_lines,\n 'amount':res.get(\"amount\", 0.0)\n }\n voucher_pool.create(cr, uid, voucher_dict, context=context)\n return self.write(cr, uid, ids, {'state':'done'}, context=context)", "def getOrderList(self):\r\n\t\treturn self.orders", "def extractHourlyConflicts(orders, fromDate, toDate=datetime.today(), multitask=2):\n def extractReleaseDeadline(order):\n return {\n \"id\": str(order[\"_id\"]),\n \"release\": order[\"release\"],\n \"deadline\": order[\"deadline\"],\n }\n allConflicts = []\n # convert all order to tasks.\n orders = map(extractReleaseDeadline, orders)\n intervalTree = IntervalTree()\n for index, order in enumerate(orders):\n intervalTree.addi(\n begin=order[\"release\"],\n end=order[\"deadline\"],\n data=order[\"id\"]\n )\n toDate = datetime.today() + timedelta(days=1)\n dateRange = getDaysInDateRange(fromDate, toDate)\n # now get conflicts for each hour\n for date in dateRange:\n conflictsForDate = {\n \"date\": date,\n \"conflicts\" : []\n }\n year = date.year\n month = date.month\n day = date.day\n for hour in range(0, 24):\n begin = datetime(year, month, day, hour, 00)\n end = datetime(year, month, day, hour, 59)\n conflicts, nonConflicts = findConflicts(intervalTree, begin, end)\n highest = highestConflictsForHour(conflicts)\n conflictsForHour = {\n \"hour\": begin,\n \"size\": len(highest),\n \"employeesNeeded\": workersNeeded(len(highest), multitask)\n }\n conflictsForDate[\"conflicts\"].append(conflictsForHour)\n allConflicts.append(conflictsForDate)\n return allConflicts", "def get_all_orders_count(): \n data = order_obj.get_all_orders(\"1\")\n return data", "def orders(self):\n return(self._d_orders['trades'])", "def one_day(status, after):\n return woo.fetch_all_orders(status, after)", "def __init__(self):\n self.orders = {}", "def _get_unit_records(self, start_time):\r\n\r\n if self.optMTTF.get_active():\r\n _query = \"SELECT t2.fld_unit, t1.fld_incident_id, \\\r\n t1.fld_age_at_incident, t1.fld_failure, \\\r\n t1.fld_suspension, t1.fld_cnd_nff, \\\r\n t1.fld_occ_fault, t1.fld_initial_installation, \\\r\n t1.fld_interval_censored, t2.fld_request_date, \\\r\n t2.fld_hardware_id \\\r\n FROM rtk_incident_detail AS t1 \\\r\n INNER JOIN \\\r\n ( \\\r\n SELECT DISTINCT MIN(fld_unit, fld_request_date), \\\r\n fld_incident_id, fld_request_date, \\\r\n fld_unit, fld_hardware_id \\\r\n FROM rtk_incident \\\r\n GROUP BY fld_unit \\\r\n ) AS t2 \\\r\n ON t2.fld_incident_id=t1.fld_incident_id \\\r\n WHERE t1.fld_age_at_incident >= {0:f} \\\r\n ORDER BY t2.fld_unit ASC, \\\r\n t1.fld_age_at_incident ASC, \\\r\n t2.fld_request_date ASC\".format(start_time)\r\n\r\n elif self.optMTBBD.get_active():\r\n _query = \"SELECT t2.fld_unit, t1.fld_incident_id, \\\r\n t1.fld_age_at_incident, t1.fld_failure, \\\r\n t1.fld_suspension, t1.fld_cnd_nff, \\\r\n t1.fld_occ_fault, t1.fld_initial_installation, \\\r\n t1.fld_interval_censored, t2.fld_request_date, \\\r\n t2.fld_hardware_id \\\r\n FROM rtk_incident_detail AS t1 \\\r\n INNER JOIN \\\r\n ( \\\r\n SELECT fld_incident_id, fld_request_date, fld_unit, \\\r\n fld_hardware_id \\\r\n FROM rtk_incident \\\r\n GROUP BY fld_unit, fld_request_date \\\r\n ) AS t2 \\\r\n ON t2.fld_incident_id=t1.fld_incident_id \\\r\n WHERE t1.fld_age_at_incident >= {0:f} \\\r\n GROUP BY t2.fld_unit, t1.fld_age_at_incident \\\r\n ORDER BY t2.fld_unit ASC, \\\r\n t1.fld_age_at_incident ASC, \\\r\n t2.fld_request_date ASC\".format(start_time)\r\n\r\n elif self.optMTBF.get_active():\r\n _query = \"SELECT t2.fld_unit, t1.fld_incident_id, \\\r\n t1.fld_age_at_incident, t1.fld_failure, \\\r\n t1.fld_suspension, t1.fld_cnd_nff, \\\r\n t1.fld_occ_fault, t1.fld_initial_installation, \\\r\n t1.fld_interval_censored, t2.fld_request_date, \\\r\n t2.fld_hardware_id \\\r\n FROM rtk_incident_detail AS t1 \\\r\n INNER JOIN rtk_incident AS t2 \\\r\n ON t2.fld_incident_id=t1.fld_incident_id \\\r\n WHERE t1.fld_age_at_incident >= {0:f} \\\r\n ORDER BY t2.fld_unit ASC, \\\r\n t1.fld_age_at_incident ASC, \\\r\n t2.fld_request_date ASC\".format(start_time)\r\n\r\n (_results, _error_code, __) = self._dao.execute(_query, commit=False)\r\n\r\n return(_results, _error_code)", "def get_trades_from_order(self, order):\r\n method = self.private_endpoints['trades_from_order']['method']\r\n url = self.base_url + self.private_endpoints['trades_from_order']['url'].format(orderId=order)\r\n req = requests.request(method, url, headers=self.get_auth_headers())\r\n res = req.json()\r\n\r\n if res['success'] == True:\r\n return res[\"result\"]\r\n else:\r\n return res", "def orders():\n # Megnyitjuk az adatbazis kapcsolatot\n conn = get_db()\n try:\n # Letrehozunk egy cursort az adatbazisban a navigalashoz\n cur = conn.cursor()\n try:\n # Ezzel az SQL lekerdezessel kapjuk meg az eredmenytabla megfelelo adatait\n cur.execute('SELECT order_id, description, quantity, deadline_date FROM orders')\n # Ezt a tombot fogjuk visszaadni\n results = []\n # Vegigiteralunk az eredmenytablan, es minden sort felveszunk\n # az eredmenytombbe, a megfelelo formatumban\n # A megrendeles lejarta az eredmenytablanak alapvetoen SQL.Date\n # tipusu, ezt ugy regisztraljuk mint amin nincs idopont\n # a date() metodussal, majd az igy kapott formatumot mar a\n # JSON-nal kompatibilisre tudjuk hozni az isoformat()-tal\n for order_id, description, quantity, deadline_date in cur:\n results.append({\"order_id\": order_id,\n \"description\": description,\n \"quantity\": quantity,\n \"deadline_date\": deadline_date.date().isoformat()})\n # Visszaadjuk az eredmenytombot\n return jsonify(orders=results)\n finally:\n # Bezarjuk a cursor-t\n cur.close()\n finally:\n # Bezarjuk a kapcsolatot-t\n conn.close()", "def get_orders(access_token,start_date,end_date,status):\r\n\r\n orders_url = 'https://api.tdameritrade.com/v1/orders'\r\n headers={'Authorization': 'Bearer {}'.format(access_token)}\r\n #Parameters for the order\r\n params = {'accountId':TDAuth_Info.account_num,\r\n 'fromEnteredTime': start_date,\r\n 'toEnteredTime': end_date,\r\n 'status': status}\r\n\r\n #Make the get request to TD Ameritrade\r\n orders_data_json = requests.get(url=orders_url,headers=headers,params=params)\r\n return orders_data_json.json()", "async def get_all_orders(self, symbol, order_id=None, start=None, end=None, limit=500):\n uri = \"/fapi/v1/allOrders\"\n params = {\n \"symbol\": symbol,\n \"limit\": limit,\n \"timestamp\": tools.get_cur_timestamp_ms()\n }\n if order_id:\n params[\"orderId\"] = order_id\n if start:\n params[\"startTime\"] = start\n if end:\n params[\"endTime\"] = end\n success, error = await self.request(\"GET\", uri, params=params, auth=True)\n return success, error", "def getOrdersForHour(hour, orders):\n def filterHours(hourToCompare):\n HOUR_FORMAT = \"%H\"\n hourOne = datetime.strftime(hour, HOUR_FORMAT)\n hourTwo = datetime.strftime(hourToCompare, HOUR_FORMAT)\n if hourOne == hourTwo:\n return hourTwo\n return filter(filterHours, orders)", "def create_order(self, row):\n\n order_dict = {}\n #error = False\n #error_list = []\n\n #break_err= False\n #break_err_list = []\n\n order_number = row[\"Order Number\"]\n order_dict[\"order_number\"] = order_number\n\n # convert date string to datetime obj\n try:\n order_datetime = datetime.datetime.strptime(row[\"Order Date\"], \"%Y-%m-%d %H:%M:%S\" )\n order_date = order_datetime.date()\n order_time = order_datetime.time()\n except:\n order_date, order_time = None # assign none to date and time \n \n #error = True\n #datetime_conversion_err_str = \"Couldn't convert for order -> %s\" % order_dict[\"order_number\"] # detailerror in error list\n #error_list.append(datetime_conversion_err_str)\n \n # assign date,time to dict \n order_dict[\"order_date\"] = order_date \n order_dict[\"order_time\"] = order_time\n\n order_dict[\"customer_uid\"] = row[\"Customer Uid\"]\n order_dict[\"customer_name\"] = row[\"Customer Name\"]\n order_dict[\"customer_email\"] = row[\"Customer Email\"]\n\n order_dict[\"billing_name\"] = row[\"Bill To Name\"]\n order_dict[\"billing_address\"] = row[\"Bill To Address\"]\n order_dict[\"billing_district\"] = row[\"Bill To District\"]\n order_dict[\"billing_state\"] = row[\"Bill To State\"]\n order_dict[\"billing_zip_code\"] = row[\"Bill To Zip\"]\n order_dict[\"billing_country\"] = row[\"Bill To Country\"]\n order_dict[\"billing_phone_No\"] = row[\"Bill To Mobile\"]\n\n order_dict[\"shipping_name\"] = row[\"Ship To Name\"]\n order_dict[\"shipping_address\"] = row[\"Ship To Address\"]\n order_dict[\"shipping_district\"] = row[\"Ship To District\"]\n order_dict[\"shipping_state\"] = row[\"Ship To State\"]\n order_dict[\"shipping_zip_code\"] = row[\"Ship To Zip\"]\n order_dict[\"shipping_country\"] = row[\"Ship To Country\"]\n order_dict[\"shipping_phone_No\"] = row[\"Ship To Mobile\"]\n\n order_dict[\"order_currency\"] = row[\"Order Currency\"]\n\n # convert order total to decimal\n order_total = self.str_to_decimal( row[\"Order Total\"], \n \"Order Total\", True)\n order_dict[\"order_total\"] = order_total \n \n #convert order_taxes to decimal \n order_taxes = self.str_to_decimal(\n row[\"Order Taxes\"], \n \"Order Taxes\")\n order_dict[\"order_taxes\"] = order_taxes\n\n # convert order_discount to decimal\n order_discounts = self.str_to_decimal( \n row[\"Order Discounts\"], \n \"Order Discounts\") \n order_dict[\"order_discounts\"] = order_discounts\n\n # convert order_taxes to decimal\n order_subtotal = self.str_to_decimal(\n row[\"Order Subtotal\"], \n \"Order Subtotal\", True) \n order_dict[\"order_subtotal\"] = order_subtotal \n\n # convert shipping cost to decimal\n order_shipping_cost = self.str_to_decimal(\n row[\"Order Shipping\"], \n \"Order Shipping\", True) \n order_dict[\"order_shipping_cost\"] = order_shipping_cost \n\n # convert shipping_TBD to decimal\n order_shipping_TBD = self.str_to_decimal( \n row[\"Order Ship Tbd\"], \n \"Order Ship Tbd\") \n order_dict[\"order_ship_TBD\"] = order_shipping_TBD\n \n # convert cart_total\n order_cart_total = self.str_to_decimal(\n row[\"Order Cart Total\"], \n \"Order Cart Total\", True) \n order_dict[\"order_cart_total\"] = order_cart_total \n\n # convert Cart Taxes to decimal\n order_cart_taxes = self.str_to_decimal( \n row[\"Order Cart Taxes\"], \n \"Order Cart Taxes\") \n order_dict[\"order_cart_taxes\"] = order_cart_taxes\n \n # convert Cart Discount to decimal\n order_cart_discount = self.str_to_decimal(\n row[\"Order Cart Discounts\"], \n \"Order Cart Discounts\") \n order_dict[\"order_cart_discount\"] = order_cart_discount\n\n # convert Grand Total\n order_grand_total = self.str_to_decimal( \n row[\"Order Grand Total\"], \n \"Order Grand Total\", True) \n order_dict[\"order_grand_total\"] = order_grand_total\n\n # convert coupon value to decimal\n order_coupon_value = self.str_to_decimal(\n row[\"Order Coupon Value\"], \n \"Order Coupon Value\") \n order_dict[\"order_coupon_value\"] = order_coupon_value\n\n # convert Payment fee to decimal\n order_payment_fee = self.str_to_decimal( \n row[\"Payment Fee\"], \n \"Payment Fee\") \n order_dict[\"payment_fee\"] = order_payment_fee\n\n # convert payment amt to decimal\n order_payment_amt = self.str_to_decimal( \n row[\"Payment Amount\"], \n \"Payment Amount\") \n order_dict[\"payment_amount\"] = order_payment_amt \n\n\n order_dict[\"order_coupon_code\"] = row[\"Order Coupon Code\"] \n order_dict[\"order_status\"] = row[\"Order Status\"]\n\n order_dict[\"payment_method\"] = row[\"Payment Method\"]\n order_dict[\"payment_response\"] = row[\"Payment Response\"]\n\n # Translate Yes/NO to True/false\n if row[\"Payment Is Live\"] == \"No\":\n order_dict[\"payment_live\"] = False\n else:\n order_dict[\"payment_live\"] = True\n\n # translate {Yes/No} to True/False\n if row[\"Payment Successful\"] == \"Yes\":\n order_dict[\"payment_successful\"] = True\n else:\n order_dict[\"payment_successful\"] = False\n \n return order_dict #", "def getOrderList(self):\r\n\t\treturn self.pair.orders", "def test_retrieve_all_orders(self):\n response = self.api_test_client.get('{}/orders'.format(self.BASE_URL))\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response_as_json(\n response)['orders'][0]['item_name'], self.ORDER['item_name'])\n self.assertEqual(response_as_json(\n response)['orders'][1]['item_name'], self.ORDER_2['item_name'])\n self.assertEqual(len(response_as_json(response)['orders']), 2)", "def check_order(self, ohlc, date, commission):\r\n\r\n op = ohlc[0]\r\n\r\n for o in order_queue:\r\n if position() != 0 and position() + o.units != 0 and len(order_queue) == 1:\r\n o.is_parents = False\r\n\r\n if o.limit_price:\r\n trading_price = o.limit_price\r\n\r\n else:\r\n trading_price = op\r\n\r\n setattr(o, 'trading_price', trading_price)\r\n setattr(o, 'trading_date', date)\r\n\r\n if o.is_long:\r\n if 1 > o.units > 0:\r\n\r\n size = int((self.execute.equity * o.units) / trading_price)\r\n setattr(o, 'units', size)\r\n\r\n if o.stop_loss:\r\n stop_loss_price = o.trading_price * (1 - o.stop_loss)\r\n setattr(o, 'stop_loss_prices', stop_loss_price)\r\n\r\n if o.stop_profit:\r\n stop_profit_price = o.trading_price * (1 + o.stop_profit)\r\n setattr(o, 'stop_profit_prices', stop_profit_price)\r\n\r\n if not o.is_parents:\r\n add_position_long_order.append(o)\r\n\r\n elif o.is_short:\r\n\r\n if -1 < o.units < 0:\r\n size = int((self.execute.equity * o.units) / trading_price)\r\n\r\n setattr(o, 'units', size)\r\n\r\n if o.stop_loss:\r\n stop_loss_price = o.trading_price * (1 + o.stop_loss)\r\n setattr(o, 'stop_loss_prices', stop_loss_price)\r\n\r\n if o.stop_profit:\r\n stop_profit_price = o.trading_price * (1 - o.stop_profit)\r\n setattr(o, 'stop_profit_prices', stop_profit_price)\r\n\r\n if not o.is_parents:\r\n add_position_short_order.append(o)\r\n\r\n order_execute.append(o)\r\n self.work(ohlc, date=date, commission=commission)\r\n\r\n order_queue.clear()\r\n\r\n self.check_if_sl_or_sp(ohlc=ohlc, date=date, commission=commission)", "def orders_history(self): \n return(self._d_orders['history'])", "def get_dep_times(self,route,dt):\n \n day = dt.weekday()\n month = dt.month\n weekend = day > 4\n \n\n variations = self.dep_times[route]\n output = []\n for index, variation in enumerate(variations):\n pattern = variation['pattern']\n times=[]\n busIDs=[]\n for bus_number, pair in enumerate(variation['leave_times']):\n if self.runs_today(pair['schedule'],day):\n ts = pair['lt'].split(':')\n total = int(ts[0])*3600 + int(ts[1]) * 60 + int(ts[2])\n times.append(total)\n busIDs.append(bus_number)\n \n matrix = pd.DataFrame({'actualtime_arr_from':times})\n matrix['dayofweek']=day\n matrix['month'] = month\n matrix['weekend'] = weekend\n matrix['variation'] = index\n matrix['busIDs'] = busIDs\n matrix['routeid'] = route \n if matrix.shape[0] > 0:\n output.append({'pattern':pattern,'matrix':matrix})\n return output", "def query_orders(self):\n return self._call_txtrader_api('query_orders', {})", "def get(self):\n return sync.get_open_orders()", "def get_order_limit_data():\n\n chargeDB = ChargeDBHelper()\n order_limit_list = []\n\n rxcui_bundles = chargeDB.get_all_charge_bundles()\n clinic_count = clinic_cnt_for_days(chargeDB.get_days_spanned())\n for bundle in rxcui_bundles:\n order_limit_list.append(to_order_limit_row(bundle, clinic_count))\n\n\n\n chargeDB.close()\n return order_limit_list", "def on_order(self, order: OrderData):\n self.position_calculator.update_position(order)\n\n self.current_pos = self.position_calculator.pos\n self.avg_price = self.position_calculator.avg_price\n\n if order.status == Status.ALLTRADED and order.vt_orderid in (self.long_orders + self.short_orders):\n\n if order.vt_orderid in self.long_orders:\n self.long_orders.remove(order.vt_orderid)\n\n if order.vt_orderid in self.short_orders:\n self.short_orders.remove(order.vt_orderid)\n\n self.last_filled_order = order\n\n for ids in (self.long_orders + self.short_orders + self.profit_orders):\n self.cancel_order(ids)\n\n if abs(self.position_calculator.pos) < self.fixed_size:\n return\n\n step = self.get_step()\n\n # tick 存在且仓位数量还没有达到设置的最大值.\n if self.tick and abs(self.position_calculator.pos) < self.max_pos_size * self.fixed_size:\n buy_price = order.price - step * self.grid_step\n sell_price = order.price + step * self.grid_step\n\n buy_price = min(self.tick.bid_price_1 * (1 - 0.0001), buy_price)\n sell_price = max(self.tick.ask_price_1 * (1 + 0.0001), sell_price)\n\n long_ids = self.buy(buy_price, self.fixed_size)\n short_ids = self.sell(sell_price, self.fixed_size)\n\n self.long_orders.extend(long_ids)\n self.short_orders.extend(short_ids)\n\n if order.status == Status.ALLTRADED and order.vt_orderid in self.profit_orders:\n self.profit_orders.remove(order.vt_orderid)\n if abs(self.position_calculator.pos) < self.fixed_size:\n self.cancel_all()\n\n if not order.is_active():\n if order.vt_orderid in self.long_orders:\n self.long_orders.remove(order.vt_orderid)\n\n elif order.vt_orderid in self.short_orders:\n self.short_orders.remove(order.vt_orderid)\n\n elif order.vt_orderid in self.profit_orders:\n self.profit_orders.remove(order.vt_orderid)\n\n elif order.vt_orderid in self.stop_orders:\n self.stop_orders.remove(order.vt_orderid)\n\n self.put_event()", "def get_timed_product(fromday, endday, shop):\n queryset = OrderDetail.objects.filter(shop=shop).filter(start_time__gte=fromday).filter(start_time__lte=endday)\n return queryset", "def on_change_mission_order(self, cr, uid, ids , mission_order_id , context=None):\n if context is None:\n context = {}\n res ={}\n result = []\n \n mission_order = self.pool.get('hr.employee.mission').browse(cr,uid,mission_order_id )\n \n return {'value': { 'start_grant_date': mission_order.start_date, \n 'end_grant_date': mission_order.end_date }}", "def get_orders(request):\n close_old_connections()\n try:\n # Give all orders maded on the given date.\n return Order.objects.filter(\n date__date=request.GET['date']).order_by('-date')\n except MultiValueDictKeyError:\n # Give all orders today.\n return Order.objects.filter(\n date__date=datetime.now().date()).order_by('-date')", "def _compute_order_priorities(self, orders):\n order_priorities = {}\n times_last_created = {}\n\n if len(orders) == 0:\n return order_priorities\n\n time_last_order_created = orders[-1].time_created\n # We'll analyze all orders created in the\n # bodega_all.serializers.DEFAULT_ORDER_EXPIRATION_TIME_LIMIT window\n # before the last order that we're computing priorities for. In\n # practice that covers virtually all orders that hadn't expired. It's\n # not worth refactoring to move that constant somewhere else for this\n # hack, and since it's only an approximation anyway it's okay if the\n # values don't match up exactly.\n ordering_start_time = time_last_order_created + timedelta(hours=-24)\n all_orders = Order.objects.filter(\n time_created__gt=ordering_start_time,\n time_created__lte=time_last_order_created)\n for order in all_orders.order_by('time_created'):\n owner_sid = get_sid(order.owner)\n time_last_created = times_last_created.get(owner_sid, None)\n if time_last_created is None:\n # First order is not throttled.\n throttled_time_created = order.time_created\n else:\n # Hard-coded throttle amount since this is only a hack.\n # At about 200 jobs per release pipeline run, throttling them\n # at 4 minutes each will spread them evenly over ~13 hours.\n # Any orders placed by other individual users during those ~13\n # hours will be in the middle rather than the back of the\n # queue.\n throttled_time_created = max(\n time_last_created + timedelta(minutes=4),\n order.time_created)\n times_last_created[owner_sid] = throttled_time_created\n order_priorities[order.sid] = throttled_time_created.isoformat()\n\n return order_priorities", "def retrieve_recently_changed_orders(self, **kwargs):\n return self.client.execute(\"order/multi-get\", \"GET\", kwargs)", "def on_order(self, order: OrderData):\n\n if order.vt_orderid not in (self.short_orders + self.long_orders):\n return\n\n self.pos_calculator.update_position(order)\n\n self.current_pos = self.pos_calculator.pos\n self.avg_price = self.pos_calculator.avg_price\n\n if order.status == Status.ALLTRADED:\n\n if order.vt_orderid in self.long_orders:\n self.long_orders.remove(order.vt_orderid)\n self.trade_count += 1\n\n short_price = order.price + self.step_price\n if short_price <= self.high_price:\n orders = self.short(short_price, self.order_volume)\n self.short_orders.extend(orders)\n\n if len(self.long_orders) < self.max_open_orders:\n long_price = order.price - self.step_price * self.max_open_orders\n if long_price >= self.low_price:\n orders = self.buy(long_price, self.order_volume)\n self.long_orders.extend(orders)\n\n if order.vt_orderid in self.short_orders:\n self.short_orders.remove(order.vt_orderid)\n self.trade_count += 1\n long_price = order.price - self.step_price\n if long_price >= self.low_price:\n orders = self.buy(long_price, self.order_volume)\n self.long_orders.extend(orders)\n\n if len(self.short_orders) < self.max_open_orders:\n short_price = order.price + self.step_price * self.max_open_orders\n if short_price <= self.high_price:\n orders = self.short(short_price, self.order_volume)\n self.short_orders.extend(orders)\n\n if not order.is_active():\n if order.vt_orderid in self.long_orders:\n self.long_orders.remove(order.vt_orderid)\n\n elif order.vt_orderid in self.short_orders:\n self.short_orders.remove(order.vt_orderid)\n\n self.put_event()", "def get_pulled(shop):\n ods = OrderDetail.objects.filter(assembled=1)\n ods = ods.filter(shop=shop)\n ods = ods.filter(eta__gt=datetime.datetime.now())\n pulled = 0\n for od in ods: pulled += od.plan\n\n return ods, pulled", "async def handle_get_active_orders_response(self, response: RequesterResponse\n ) -> HitbtcOrders:", "def test_get_order_items(self):\n pass", "def __handle_open_orders(self):\n portfolio = self.get_portfolio_object()\n # only take complete orders\n orders = [order for order in portfolio.orders if order.status == Status.confirmed]\n time_zone = TraderBase.get_timezone()\n now = datetime.datetime.now(time_zone)\n for order in orders:\n price = self.db_tool.session.query(Series)\\\n .filter(order.stock_id == Series.stock_id) \\\n .filter(Series.date.between(order.date, now)) \\\n .filter(order.price >= Series.pricehigh)\\\n .order_by(Series.date.asc()).first()\n if price:\n order.status = Status.completed\n order.date = price.date\n self.connect_related_order(order)\n else:\n diff = now - order.date.replace(tzinfo=time_zone)\n hours = diff.total_seconds() / 60\n if hours >= self.expire_in_hours:\n self.logger.info(\"Order is expired because limit {} for {} \"\n \"was not reached during the day\".\n format(order.price, order.stock_id))\n order.status = Status.expired\n portfolio.cash -= order.price_complete", "def get_entries(order):\n users_entries = {}\n for item in order.items.all():\n entries_per_order = []\n entries = Entries.objects.filter(orderItem=item.id)\n for ent in entries:\n entries_per_order.append(ent.ticket_number)\n n_order = {\n item.id: entries_per_order\n }\n users_entries.update(n_order)\n return users_entries", "def _update_order_statistics(self, price, dt):\n \n if self.price_open is None:\n if self.order_master.filled>0:\n #log.info('ok')\n self.price_open = price\n self.date_open = dt\n \n #if self.order_master.amount==self.order_master.filled:\n # self.price_open = self.data[self.symbol].price\n # \n\n #log.info(\"%s/%s\" % (bo.order_master.amount, bo.order_master.filled))\n \n self.price_close = price\n \n if self.price_diff_d>self.MFE:\n self.MFE = self.price_diff_d\n \n if self.price_diff_d<self.MAE:\n self.MAE = self.price_diff_d", "async def get_all_orders(self, symbol):\n params = {\n \"symbol\": symbol,\n \"timestamp\": tools.get_cur_timestamp_ms()\n }\n success, error = await self.request(\"GET\", \"/api/v3/allOrders\", params=params, auth=True)\n return success, error", "def get_order_detail_count(orderid): \n data = order_obj.get_order_detail(orderid,\"1\")\n return data", "def processMarketOrders(self):\n try:\n nextRound = self.currentRound+1\n resultsList = []\n master = {}\n self.genMarketStat()\n myMarketStat = self.marketStats[str(self.currentRound)]\n \n # sorted lists of market orders\n master['buyAL'] = anwp.func.funcs.sortDictByChildObjValue(self.marketOrders, 'max', True, {'value':'AL', 'min':0})\n master['buyEC'] = anwp.func.funcs.sortDictByChildObjValue(self.marketOrders, 'max', True, {'value':'EC', 'min':0})\n master['buyIA'] = anwp.func.funcs.sortDictByChildObjValue(self.marketOrders, 'max', True, {'value':'IA', 'min':0})\n master['sellAL'] = anwp.func.funcs.sortDictByChildObjValue(self.marketOrders, 'min', False, {'value':'AL', 'max':0})\n master['sellEC'] = anwp.func.funcs.sortDictByChildObjValue(self.marketOrders, 'min', False, {'value':'EC', 'max':0})\n master['sellIA'] = anwp.func.funcs.sortDictByChildObjValue(self.marketOrders, 'min', False, {'value':'IA', 'max':0})\n \n for res in ['AL', 'EC', 'IA']:\n for sellOrder in master['sell%s' % res]:\n # min sell order gets first chance to sell its product\n if sellOrder.amountUsed == sellOrder.amount:\n pass # seller has sold all he wants with this order\n else:\n i = 0\n for buyOrder in master['buy%s' % res]:\n # determine price, allow for bidding on price\n try:\n nextBuyOrder = master['buy%s' % res][i+1]\n if nextBuyOrder.max < buyOrder.max and (nextBuyOrder.max+1) >= sellOrder.min:\n price = nextBuyOrder.max + 1\n else:\n price = buyOrder.max\n except IndexError:\n price = buyOrder.max\n # max buy order gets first chance to buy sellers product\n resultsList.append(self.processMarketTransaction(buyOrder, sellOrder, price))\n i += 1\n \n # set the average market prices for this round\n if getattr(myMarketStat, 'volSold%s' % res) > 0:\n setattr(myMarketStat, 'avgSold%s' % res, (getattr(myMarketStat, 'sumSold%s' % res) / \n getattr(myMarketStat, 'volSold%s' % res)))\n \n # clean up market orders for next round\n for orderID in self.marketOrders.keys():\n myMarketOrder = self.marketOrders[orderID]\n myMarketOrder.cleanUp()\n if myMarketOrder.amount == 0:\n resultsList.append('cancel market Order=%s' % orderID)\n self.cancelMarketOrder(orderID)\n \n return str(resultsList)\n except:\n return 'galaxy->processMarketOrders error'", "def trackRentRequest(self):\n\t\t#start_date = timezone.now().date()\n\t\tstart_dat=datetime.today()\n\t\tstart_date = start_dat - timedelta( hours=start_dat.time().hour,minutes=start_dat.time().minute,seconds=start_dat.time().second ) \n\t\tend_date=start_dat\n\t\tans=None\n\t\t#print start_dat.time().hour\n\t\tprint end_date\n\t\tans=Rents.objects.filter(date_of_issue__range=(start_date,end_date))\n\t\tlst=[]\n\t\tfor b in ans:\n\t\t\towneradd=b.owner_id.address\n\t\t\tuseradd=b.userid.address\n\t\t\tusername=b.userid.email\n\t\t\townername=b.owner_id.email\n\t\t\tuserphone=b.userid.contact_no\n\t\t\townerphone=b.owner_id.contact_no\n\t\t\tbookname=b.bookid.title\n\t\t\tstatus=b.paymentid.ispending\n\t\t\tbook=b.__dict__\n\t\t\tbook['owneradd']=owneradd\n\t\t\tbook['useradd']=useradd\n\t\t\tbook['username']=username\n\t\t\tbook['ownername']=ownername\n\t\t\tbook['userphone']=userphone\n\t\t\tbook['ownerphone']=ownerphone\n\t\t\tbook['name']=bookname\n\t\t\tif status==True:\n\t\t\t\tbook['status']=\"Pending\"\n\t\t\telse:\n\t\t\t\tbook['status']=\"Delivered\"\n\t\t\tlst.append(book)\n\t\t#print ans\n\t\tif ans is None:\n\t\t\tprint \"not found\"\n\t\telse:\n\t\t\tprint \"found\"\n\t\treturn lst", "def order(self, request):\n is_auth = request.session.get(\"is_auth\", False)\n if not is_auth:\n return HttpResponseRedirect('/crisis')\n\n uid = request.session['uid']\n context = RUNNING_INFO.get(uid, {})\n context.update({\"is_auth\": is_auth,\n \"is_daily\": request.session.get(\"is_daily\", False),\n \"is_leader\": request.session.get(\"is_leader\", False),\n \"entity_list\": ENTITY,\n \"parts_list\": PARTS,\n \"detail_info_list\": DETAIL_INFO})\n\n if \"priority\" not in context:\n priority = {}\n for item in ARMY:\n priority.update({item: 1})\n context.update({\"priority\": priority})\n\n if context.get(\"is_run\", False):\n context.update({\"left_time\": self.utils.get_remaining_time(uid),\n \"order\": self.utils.get_current_unit_order(uid)})\n \"\"\" Context Example\n context = {\"username\": self.utils.get_user_name(uid),\n \"is_run\": False,\n \"is_auth\": is_auth,\n \"resource\": {\"money\": 100, \"food\": 200, \"fuel\": 300},\n \"entity\": {\"armor_composite\": 1, \"armor_plate\": 2, \"control_block\": 3,\n \"gun_receiver\": 4, \"kevlar_fiber\": 5, \"laser_aimer\": 6,\n \"powder_charge\": 7, \"rare_item\": 8, \"tnt_charge\": 9},\n \"parts\": {\"artillery_armor\": 1, \"artillery_chassis\": 2, \"artillery_shell\": 3, \"detonator\": 4,\n \"gunner_armor\": 5, \"gunner_gun\": 6, \"jeep_armor\": 7, \"jeep_gun\": 8, \"sniper_armor\": 9,\n \"sniper_gun\": 10, \"soldier_gun\": 11, \"tank_chassis\": 12, \"thrower_armor\": 13,\n \"thrower_gun\": 14, \"wave_emitter\": 15},\n 'order': {'soldier': 1, 'thrower': 4, 'artillery': 8, 'gunner': 2, 'base_artillery': 7, 'jeep': 6, 'artillery_emp': 9, 'base_tank': 5, 'artillery_cassete': 0, 'sniper': 3}\n }\n \"\"\"\n\n if request.method == \"POST\":\n if \"start\" in request.POST:\n order, priority = {}, {}\n data = dict(request.POST)\n\n for item in ARMY:\n try:\n count = int(data.get(item, [''])[0])\n except:\n count = 0\n try:\n prior = int(data.get(\"%s_priority\" % item, [''])[0])\n except:\n prior = 1\n order.update({item: count})\n priority.update({item: prior})\n\n context.update({\"is_run\": True,\n \"order\": order,\n \"priority\": priority,\n \"left_time\": self.utils.get_remaining_time(uid)})\n\n RUNNING_INFO.update({uid: context})\n self.utils.start_gather(uid, context)\n elif \"stop\" in request.POST:\n uid = request.session['uid']\n context = RUNNING_INFO.get(uid, {})\n context.update({\"is_run\": False, \"left_time\": \"00:00:00\"})\n RUNNING_INFO.update({uid: context})\n self.utils.stop_gather(uid)\n\n return render_to_response(\"crisis/order.html\",\n context,\n context_instance=RequestContext(request))", "def add_orders(self, response_data):\n orders = response_data[self.DATA][self.DATA]\n for order in orders:\n self.orders.append(self.process_order_data(order))", "def get(self):\n return DAO.orders", "def _getdata(self, data):\n lines = []\n start_date = str(data['form']['start_date'])\n end_date = str(data['form']['end_date'])\n department_ids = data['form']['department_ids']\n\n vehicles_ids = self.pool.get('fleet.vehicle').search(self.cr, self.uid,\\\n [('department_id', 'in', department_ids)], context=self.context)\n\n fuel_qty_line_obj = self.pool.get('fuel.qty.line')\n\n sdate = datetime.strptime(start_date, \"%Y-%m-%d\")\n syear = sdate.year\n smonth = sdate.month\n edate = datetime.strptime(end_date, \"%Y-%m-%d\")\n eyear = edate.year\n emonth = edate.month\n\n fuel_qty_line_ids = fuel_qty_line_obj.search(self.cr, self.uid,\\\n [('vehicles_id', 'in', vehicles_ids)], context=self.context)\n\n\n\n counter = 1\n for qty_line in fuel_qty_line_obj.browse(self.cr, self.uid, \\\n fuel_qty_line_ids, context=self.context):\n current_m = int(qty_line.month)\n current_y = int(qty_line.year)\n start = current_m >= smonth and current_y >= syear\n end = current_m <= emonth and current_y <= eyear\n if start and end:\n line = {'type':str(counter)+\" : \"+\\\n qty_line.vehicles_id.type.name}\n line['vehicle_no'] = qty_line.vehicles_id.vin_sn\n line['spent'] = qty_line.spent_qty\n line['counter_no'] = str(qty_line.vehicles_id.odometer)+\" \"+\\\n qty_line.vehicles_id.odometer_unit\n line['date'] = qty_line.month+\"/\"+qty_line.year\n lines.append(line)\n counter += 1\n return lines", "def get_data(self):\n# epoch_from = 1301641200\n# epoch_to = epoch_from+60*60*24\n \"\"\"\n letting runs finish for 2 more hours\n ideally, want to make this a function of time from schedule plus some\n variation, like 1 hour just in case\n \"\"\" \n# epoch_to_adjusted = epoch_to + 7200\n conn = self.connect_to_mongo()\n db = conn.muni\n \n# print \"==== Collecting starting runs from %s to %s ====\"\\\n# % (str(time.ctime(epoch_from)), str(time.ctime(epoch_to)))\n \"\"\"\n > db.location.find({loc:{$within:{$center:[[37.80241, -122.4364],\n 0.01]}}})\n > db.location.find({loc:{$within:{$center:[[37.76048, -122.38895],\n 0.002]}}})\n \"\"\"\n bus_ids = db.location.find({'route':self.route_name}).distinct(\"bus_id\")\n for bus_id in bus_ids:\n c_start = db.location.find({\"bus_id\":bus_id,\n \"loc\":{\"$within\":{\"$center\":[[self.start_lat, self.start_lon],\n self.start_prec]}}\n }).sort(\"cur_time\", DESCENDING)\n self.massage_start_data(c_start)\n \"\"\"\n TODO: the end point seems to be too nice to Muni, need to tighten\n the circle a little\n \"\"\"\n c_end = db.location.find({\"bus_id\":bus_id,\n \"loc\":{\"$within\":{\"$center\":[[self.end_lat, self.end_lon],\n self.end_prec]}}\n }).sort(\"cur_time\", ASCENDING)\n self.massage_end_data(c_end)\n if self.to_log:\n print self.start_bus_ids_to_times\n print self.end_bus_ids_to_times\n \n return self.start_bus_ids_to_times, self.end_bus_ids_to_times", "def getStatVentesMois(self, in_data):\n\n try:\n date_debut = in_data['date_debut']\n dt_debut = dateutil.parser.parse(date_debut)\n date_fin = in_data['date_fin']\n dt_fin = dateutil.parser.parse(date_fin)\n except:\n out_data = {\n 'success': False\n }\n return out_data\n\n local_dt_debut = dt_debut.astimezone (pytz.timezone('Europe/Paris'))\n debut = datetime(local_dt_debut.year, local_dt_debut.month, local_dt_debut.day)\n local_dt_fin = dt_fin.astimezone (pytz.timezone('Europe/Paris'))\n fin = datetime(local_dt_fin.year, local_dt_fin.month, local_dt_fin.day) + timedelta(days=1)\n\n commandes=[]\n ventes=[]\n day = 0\n stop = False\n ca = 0\n nb_commandes = 0\n nb_souscriptions = 0\n while not stop :\n time_debut = debut + timedelta(days=day)\n timestamp = calendar.timegm(time_debut.timetuple()) * 1000\n time_fin = time_debut + timedelta(days=1)\n c_list = Commande.objects.filter(etat='PAY',date__gte=time_debut,date__lt=time_fin).distinct()\n # ch_list = CommandeHistory.objects.filter(etat='PAY',date__gte=time_debut, date__lt=time_fin)\n total_euros = 0\n total_souscriptions = 0\n total_commandes = 0\n\n for commande in c_list:\n total_euros += commande.montant\n for souscription in commande.souscription_set.all():\n total_souscriptions += souscription.quantite\n total_commandes += 1\n\n ca+=total_euros\n nb_souscriptions+=total_souscriptions\n nb_commandes+=total_commandes\n commandes.append([timestamp,total_commandes])\n ventes.append([timestamp,total_euros])\n day += 1\n if (debut + timedelta(days=day))>=fin:\n stop=True\n\n serie_list = [\n {\n 'label': \"commandes\",\n 'data': commandes,\n 'yaxis': 1\n },\n {\n 'label': \"€\",\n 'data': ventes,\n 'yaxis': 2\n }\n ]\n\n options = {\n \"series\": {\n \"lines\": {\n \"show\": True,\n \"fill\": True\n },\n \"points\": { \"show\": True }\n },\n 'axisLabels': {\n 'show': True\n },\n \"xaxis\": {\n \"mode\": \"time\",\n \"timeformat\": \"%e %b\",\n \"monthNames\": [\"jan\", \"fev\", \"mar\", \"avr\", \"mai\", \"juin\", \"juil\", \"aout\", \"sept\", \"oct\", \"nov\", \"dec\"]\n },\n \"yaxes\": [\n {\n 'axisLabel': 'commandes',\n \"tickColor\":[\"#fff\"],\n \"tickDecimals\": 0,\n \"min\":0\n },\n {\n 'axisLabel': \"CA\",\n \"position\": \"right\",\n \"tickColor\":[\"#fff\"],\n \"tickDecimals\": 0,\n \"min\":0\n }\n ],\n \"grid\": {\n \"hoverable\": True,\n \"borderWidth\": 1\n },\n \"colors\": [\"rgb(138,75,117)\", \"rgb(71,160,62)\"],\n \"tooltip\":True,\n \"tooltipOpts\": {\n \"content\": \"%x : %y %s\"\n },\n \"legend\": {\n \"show\": True,\n \"labelFormatter\": None, # null or (fn: string, series object -> string)\n #\"labelBoxBorderColor\": color,\n #noColumns: number\n #'position': \"ne\" or \"nw\" or \"se\" or \"sw\"\n #margin: number of pixels or [x margin, y margin]\n #backgroundColor: null or color\n #backgroundOpacity: number between 0 and 1\n #container: null or jQuery object/DOM element/jQuery expression\n #sorted: null/false, true, \"ascending\", \"descending\", \"reverse\", or a comparator\n }\n };\n\n\n out_data = {\n 'success': True,\n 'souscriptions': serie_list,\n 'options': options,\n 'ca':ca,\n 'nb_commandes':nb_commandes,\n 'nb_souscriptions':nb_souscriptions\n }\n return out_data", "def _compute_order_priorities_stats(self, orders):\n order_prices = {}\n tab_limits = {}\n tab_demands = {}\n total_fulfilled_prices = Counter()\n valid_statuses = set([Order.STATUS_OPEN, Order.STATUS_FULFILLED])\n\n for order in orders:\n if order.status not in valid_statuses:\n bodega_value_error(\n log,\n ('Order %s status %s is not valid for computing '\n 'price-based priority') % (order, order.status))\n\n order_price = 0.0\n if not order.maintenance:\n # We currently assume that each user has a single tab,\n # but this may change in the future.\n if order.tab.sid not in tab_limits:\n tab_limits[order.tab.sid] = order.tab.limit\n\n if order.tab.sid not in tab_demands:\n tab_demands[order.tab.sid] = 0.0\n\n # Compute order price as a sum of its items' prices.\n item_prices = \\\n self.item_tools.get_prices_for_items(order.items.items())\n order_price = sum(item_prices.values())\n\n if order.status == Order.STATUS_FULFILLED:\n total_fulfilled_prices[order.tab.id] += order_price\n\n tab_demands[order.tab.sid] += order_price\n\n log.debug('Order %s has a price of %s' % (order, order_price))\n order_prices[order.sid] = order_price\n\n total_tab_limit = sum(tab_limits.values())\n\n # Generate a list of tab_demands / tab_limit to compute the median\n # demand\n tab_demand_per_limit = sorted(\n [tab_demands[key] / tab_limits[key]\n for key in tab_demands])\n\n if total_tab_limit < 0:\n bodega_value_error(\n log,\n 'Total tab limit is negative: %s' % total_tab_limit)\n elif total_tab_limit == 0:\n if orders:\n bodega_value_error(\n log,\n ('Total tab limit is 0 for non-empty list of orders. '\n 'This may be due to a race condition in between the time '\n 'we collect the tab ids and fetch their limits.'))\n median_demand = None\n else:\n median_demand = statistics.median(tab_demand_per_limit)\n\n order_priority_stats = {\n 'median_demand': median_demand,\n 'order_prices': order_prices,\n 'tab_limits': tab_limits,\n 'total_fulfilled_prices': dict(total_fulfilled_prices)\n }\n\n log.debug('Order priority stats: %s' % order_priority_stats)\n return order_priority_stats", "def on_order(self, order: OrderData):\n pass", "def on_order(self, order: OrderData):\n pass", "def on_order(self, order: OrderData):\n pass", "def test_get_order_list(self):\n resp = self.app.get('/orders')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = json.loads(resp.data)\n self.assertEqual(len(data), 2)", "def test_ordered_amount(self):\n self.app = self.make_app(argv = ['report', 'project_status', self.examples[\"project\"], '--debug', '-o', \"{'P001_101_index3':10, 'P001_102':20}\"],extensions=['scilifelab.pm.ext.ext_couchdb'])\n handler.register(DeliveryReportController)\n self._run_app()\n data = ast.literal_eval(self.app._output_data['debug'].getvalue())\n ordered = {x[0]:x[4] for x in data['table']}\n self.assertEqual(ordered[\"P001_101_index3\"], 10)\n self.assertEqual(ordered[\"P001_102\"], 20)", "def get_user_orders(self, loginID):\n order_details = {}\n self.cursor.execute(\"\"\"SELECT orderNumber, orderDate FROM orderlog WHERE loginID=%s \n ORDER BY orderDate DESC, orderNumber DESC\"\"\", (loginID,))\n for order in self.cursor.fetchall():\n order_details[str(order[0])] = {'title': [], 'quantity': [], 'ISBN': []}\n # this line only needs to execute once, but its easier to do it like this.\n order_details[str(order[0])]['date'] = order[1]\n self.cursor.execute(\"\"\"SELECT ISBN FROM orderlog O INNER JOIN productof P ON O.orderNumber = P.orderNumber\n WHERE O.orderNumber=%s\"\"\", (order[0],))\n for book in self.cursor.fetchall():\n self.cursor.execute(\"\"\"SELECT title, quantity FROM book B, productof P, orderlog O WHERE P.ISBN=%s\n AND P.orderNumber = O.orderNumber AND P.ISBN = B.ISBN AND O.orderNumber = %s\"\"\", (book[0], order[0]))\n for details in self.cursor.fetchall():\n title = details[0]\n quantity = details[1]\n order_details[str(order[0])]['title'].append(title)\n order_details[str(order[0])]['quantity'].append(quantity)\n order_details[str(order[0])]['ISBN'].append(book[0])\n return order_details", "def get_new_orders(self):\n\n # note we only get orders from the strategies with UPDATED =\n # True, i.e. only those which got new pricing information this\n # tick. Among other reasons, this is because some strategies\n # (e.g. MMStrategy) need to be fed new prices in order to\n # clear the order dictionary, so if we didn't use _if, we\n # could potentially place these orders many times.\n\n return self.stratgroup.get_orders_to_place_if(UPDATED)", "def get_open_orders(self, id):\n self.__init_client()\n open_orders = retry(lambda: self.client\n .futures_get_open_orders(symbol=self.pair)) \n open_orders = [o for o in open_orders if o[\"clientOrderId\"].startswith(id)]\n if len(open_orders) > 0:\n return open_orders\n else:\n return None", "def test_get_order_list(self):\n self._create_orders(5)\n resp = self.app.get('/orders')\n print(resp.data)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = resp.get_json()\n self.assertEqual(len(data), 5)", "def orders(self):\n return self._orders", "def do_orders(self,args):\n try:\n orders = bitstamp.open_orders()\n orders = sorted(orders, key=lambda x: float(x['price']))\n buytotal,selltotal = 0,0\n numbuys,numsells = 0,0\n amtbuys,amtsells = 0,0\n buyavg,sellavg = 0,0\n numorder = 0 \n for order in orders:\n ordertype=\"Sell\" if order['type'] == 1 else \"Buy\"\n numorder += 1\n print '%s = %s | $%s @ %s BTC %s' % (numorder,ordertype,order['price'],order['amount'],order['id']) \n if order['type'] == 0:\n buytotal += D(order['price'])*D(order['amount'])\n numbuys += D('1')\n amtbuys += D(order['amount'])\n elif order['type'] == 1:\n selltotal += D(order['price'])*D(order['amount'])\n numsells += D('1')\n amtsells += D(order['amount'])\n if amtbuys:\n buyavg = D(buytotal/amtbuys).quantize(cPrec)\n if amtsells:\n sellavg = D(selltotal/amtsells).quantize(cPrec)\n print \"There are %s Buys. There are %s Sells\" % (numbuys,numsells)\n print \"Avg Buy Price: $%s. Avg Sell Price: $%s\" % (buyavg,sellavg)\n except Exception as e:\n print e", "def create_order():", "def getTimes():", "def getTimes():", "def getTimes():", "async def fetch_closed_orders(self, symbol: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}):\n # returns the most recent closed or canceled orders up to circa two weeks ago\n await self.load_markets()\n request = {}\n if since is not None:\n request['start'] = since\n if limit is not None:\n request['limit'] = limit # default 25, max 2500\n market = None\n response = None\n if symbol is None:\n response = await self.privatePostAuthROrdersHist(self.extend(request, params))\n else:\n market = self.market(symbol)\n request['symbol'] = market['id']\n response = await self.privatePostAuthROrdersSymbolHist(self.extend(request, params))\n #\n # [\n # [\n # 95412102131, # Order ID\n # null, # Group Order ID\n # 1653325121798, # Client Order ID\n # \"tDOGE:UST\", # Market ID\n # 1653325122000, # Created Timestamp in milliseconds\n # 1653325122000, # Updated Timestamp in milliseconds\n # -10, # Amount remaining(Positive means buy, negative means sell)\n # -10, # Original amount\n # \"EXCHANGE LIMIT\", # Order type\n # null, # Previous Order Type\n # null, # Millisecond timestamp of Time-In-Force: automatic order cancellation\n # null, # _PLACEHOLDER\n # \"4096\", # Flags, see parseOrderFlags()\n # \"POSTONLY CANCELED\", # Order Status, see parseOrderStatus()\n # null, # _PLACEHOLDER\n # null, # _PLACEHOLDER\n # 0.071, # Price\n # 0, # Average Price\n # 0, # Trailing Price\n # 0, # Auxiliary Limit price(for STOP LIMIT)\n # null, # _PLACEHOLDER\n # null, # _PLACEHOLDER\n # null, # _PLACEHOLDER\n # 0, # Notify(0 if False, 1 if True)\n # 0, # Hidden(0 if False, 1 if True)\n # null, # Placed ID(If another order caused self order to be placed(OCO) self will be that other order's ID)\n # null, # _PLACEHOLDER\n # null, # _PLACEHOLDER\n # \"API>BFX\", # Routing, indicates origin of action: BFX, ETHFX, API>BFX, API>ETHFX\n # null, # _PLACEHOLDER\n # null, # _PLACEHOLDER\n # {\"_$F7\":1} # additional meta information about the order( _$F7 = IS_POST_ONLY(0 if False, 1 if True), _$F33 = Leverage(int))\n # ]\n # ]\n #\n return self.parse_orders(response, market, since, limit)", "def get_orders_group_by_day():\n\n try:\n initial_date = request.json.get('initial_date')\n final_date = request.json.get('final_date')\n\n order_schema = OrderSchema(many=True)\n\n all_orders = current_app.db.session.query(\\\n Order.order_date, \\\n func.sum(Order.total_value)\n )\\\n .filter(Order.status == 'close')\\\n .filter(Order.order_date >= initial_date)\\\n .filter(Order.order_date <= final_date)\\\n .group_by(\\\n Order.order_date\n ).all()\n\n\n\n \n response = generate_report_order_response(all_orders)\n\n return response, 201\n\n except Exception as e:\n response = format_standard_response(success=False,error=str(e))\n return response, 500", "def prepare_data_with_location(self,from_date,to_date,locations,all_products):\n data_dict = {}\n stock_quant_obj=self.env['stock.quant']\n for loc in locations:\n all_locations = self.get_all_locations(warehouse=False, location=loc)\n if not all_locations:\n continue\n #here we are finding the opening stock for these we are using base query\n #of inventory at date v10\n result = self.get_product_qty(all_locations,from_date)\n qty_dict = dict((x,y) for x, y in result)\n \n for product in all_products:\n last_sales = ''\n qty_purchase_in_duration = 0\n qty_sales_in_duration = 0\n last_purchase_date = ''\n scrap_location_qty = 0\n adjusted_qty_in_duration = 0\n warehouse_out_qty = 0\n warehouse_in_qty = 0\n# here from result of inventory at date we are seaching for specific product.\n opening_product_qty = qty_dict.get(product.id)\n\n #finding last sales qty\n last_sales = self.find_last_sales_qty(from_date,to_date,False,all_locations,product)\n #finding last purchase date of product\n last_purchase_date = self.find_last_purchase_date(from_date,to_date,all_locations,product)\n #fiding date purchase qty in duration for specific product\n qty_purchase_in_duration = self.find_purchase_qty_in_duration(from_date,to_date,all_locations,product)\n #fiding scrap qty of precific product\n scrap_location_qty = self.find_scap_location_qty(from_date,to_date,product,all_locations)\n #finding sales qty in duration\n qty_sales_in_duration = self.find_sale_qty_in_duration(from_date,to_date,False,all_locations,product)\n #fidning adjusted qty in duration\n adjusted_qty_in_duration = self.find_adjusted_qty_in_duration(from_date, to_date, product, all_locations)\n\n # dest_location_lst = self.get_other_wahouse_locations(warehouse)\n \n # if any(all_locations) and any(dest_location_lst):\n # #fidning warehouse in qty \n # warehouse_in_qty = self.find_warehouse_transer_in_qty(product, all_locations, dest_location_lst,from_date,to_date)\n # #fidning warehouse out qty for specific product.\n # warehouse_out_qty = self.find_warehouse_transer_out_qty(product, all_locations, dest_location_lst,from_date,to_date)\n \n # if warehouse_out_qty:\n # warehouse_out_qty = warehouse_out_qty and warehouse_out_qty[0][0] or ''\n # if warehouse_in_qty:\n # warehouse_in_qty = warehouse_in_qty and warehouse_in_qty[0][0] or ''\n \n if adjusted_qty_in_duration:\n adjusted_qty_in_duration = adjusted_qty_in_duration and adjusted_qty_in_duration[0][0] or '' \n if scrap_location_qty:\n scrap_location_qty = scrap_location_qty and scrap_location_qty[0][0] or ''\n \n # if qty_sales_in_duration:\n # qty_sales_in_duration = qty_sales_in_duration and qty_sales_in_duration[0][0] or ''\n # if qty_purchase_in_duration:\n # qty_purchase_in_duration = qty_purchase_in_duration or ''\n if last_sales:\n last_sales = datetime.strptime(last_sales and last_sales[0][0], '%Y-%m-%d %H:%M:%S').strftime('%d-%m-%Y') or ''\n \n if last_purchase_date:\n last_purchase_date = datetime.strptime(last_purchase_date and last_purchase_date[0][0], '%Y-%m-%d %H:%M:%S').strftime('%d-%m-%Y') or ''\n \n if data_dict.has_key(loc.id):\n data_lst=data_dict.get(loc.id)\n data_lst.append({'product':product,'sku':product.default_code or '','name':product.name,\n 'Cost':product.standard_price or '','sales_price':product.lst_price or '',\n 'opening_qty':opening_product_qty or 0,'last_sales':last_sales or '',\n 'last_purchase_date':last_purchase_date or '','qty_purchase_in_duration':qty_purchase_in_duration or 0,\n 'qty_sales_in_duration': qty_sales_in_duration or 0,'scrap_location_qty':scrap_location_qty or 0,\n 'adjusted_qty_in_duration':adjusted_qty_in_duration or 0\n ,'warehouse_in_qty':warehouse_in_qty or 0,\n 'warehouse_out_qty':warehouse_out_qty or 0 \n })\n data_dict.update({loc.id:data_lst})\n continue\n data_dict.update({loc.id:[{'product':product,'sku':product.default_code or '','name':product.name,\n 'Cost':product.standard_price or '','sales_price':product.lst_price or '',\n 'opening_qty':opening_product_qty or 0,\n 'last_sales':last_sales or '','last_purchase_date':last_purchase_date or '',\n 'qty_purchase_in_duration':qty_purchase_in_duration or 0,\n 'qty_sales_in_duration': qty_sales_in_duration or 0,\n 'scrap_location_qty':scrap_location_qty or 0,\n 'adjusted_qty_in_duration':adjusted_qty_in_duration or 0,\n 'warehouse_in_qty':warehouse_in_qty or 0,\n 'warehouse_out_qty':warehouse_out_qty or 0\n }]})\n return data_dict", "def get_data(self):\n data = list(IgnitionRow.objects.all().order_by('-pub_date')[:self.num_ticks].values())\n two_hours = data[::-1]\n #two_hours = data\n num_players_data = [[max(min(elem['num_players_{}'.format(key)],50),0) for elem in two_hours] for key in self.keys]\n return num_players_data", "def prep(self, order):\n update = {}\n for col in list(set(self.numeric + self.non_numeric + self.currencies + self.columns)):\n try:\n if col in self.numeric:\n value = float(order[col])\n else:\n value = order[col]\n update[col] = value\n except:\n update[col] = 0.0\n continue\n update = pd.Series(update).fillna(0)\n update['currency_on_hold'] = order['product_id'][-3:] if order['side'] == 'buy' else order['product_id'][:3]\n update['create_time'] = pd.to_datetime(order['time'])\n update['update_time'] = pd.to_datetime(order['time'])\n update['time'] = update.update_time.to_datetime64().astype('int64')//1e9\n update['status'] = order['type']\n update['order_type'] = 'unknown' if not update['order_type'] else update['order_type']\n return update#pd.Series(update).fillna(0)", "def prepare_data_with_warehouse(self,from_date,to_date,warehouses,all_products):\n data_dict = {}\n stock_quant_obj=self.env['stock.quant']\n for warehouse in warehouses:\n all_locations = self.get_all_locations(warehouse)\n if not all_locations:\n continue\n \n #here we are finding the opening stock for these we are using base query\n #of inventory at date v10\n result = self.get_product_qty(all_locations,from_date)\n qty_dict = dict((x,y) for x, y in result)\n \n for product in all_products:\n last_sales = ''\n qty_purchase_in_duration = 0\n qty_sales_in_duration = 0\n last_purchase_date = ''\n scrap_location_qty = 0\n adjusted_qty_in_duration = 0\n warehouse_out_qty = 0\n warehouse_in_qty = 0\n# here from result of inventory at date we are seaching for specific product.\n opening_product_qty = qty_dict.get(product.id)\n\n #finding last sales qty\n last_sales = self.find_last_sales_qty(from_date,to_date,warehouse,all_locations,product)\n #finding last purchase date of product\n last_purchase_date = self.find_last_purchase_date(from_date,to_date,all_locations,product)\n #fiding date purchase qty in duration for specific product\n qty_purchase_in_duration = self.find_purchase_qty_in_duration(from_date,to_date,all_locations,product)\n #fiding scrap qty of precific product\n scrap_location_qty = self.find_scap_location_qty(from_date,to_date,product,all_locations)\n #finding sales qty in duration\n qty_sales_in_duration = self.find_sale_qty_in_duration(from_date,to_date,warehouse,all_locations,product)\n #fidning adjusted qty in duration\n adjusted_qty_in_duration = self.find_adjusted_qty_in_duration(from_date, to_date, product, all_locations)\n \n dest_location_lst = self.get_other_wahouse_locations(warehouse)\n \n if any(all_locations) and any(dest_location_lst):\n #fidning warehouse in qty \n warehouse_in_qty = self.find_warehouse_transer_in_qty(product, all_locations, dest_location_lst,from_date,to_date)\n #fidning warehouse out qty for specific product.\n warehouse_out_qty = self.find_warehouse_transer_out_qty(product, all_locations, dest_location_lst,from_date,to_date)\n \n if warehouse_out_qty:\n warehouse_out_qty = warehouse_out_qty and warehouse_out_qty[0][0] or ''\n if warehouse_in_qty:\n warehouse_in_qty = warehouse_in_qty and warehouse_in_qty[0][0] or ''\n \n if adjusted_qty_in_duration:\n adjusted_qty_in_duration = adjusted_qty_in_duration and adjusted_qty_in_duration[0][0] or '' \n if scrap_location_qty:\n scrap_location_qty = scrap_location_qty and scrap_location_qty[0][0] or ''\n \n # if qty_sales_in_duration:\n # qty_sales_in_duration = qty_sales_in_duration and qty_sales_in_duration[0][0] or ''\n # if qty_purchase_in_duration:\n # qty_purchase_in_duration = qty_purchase_in_duration[0][0] or ''\n if last_sales:\n last_sales = datetime.strptime(last_sales and last_sales[0][0], '%Y-%m-%d %H:%M:%S').strftime('%d-%m-%Y') or ''\n \n if last_purchase_date:\n last_purchase_date = datetime.strptime(last_purchase_date and last_purchase_date[0][0], '%Y-%m-%d %H:%M:%S').strftime('%d-%m-%Y') or ''\n \n if data_dict.has_key(warehouse.id):\n data_lst=data_dict.get(warehouse.id)\n data_lst.append({'product':product,'sku':product.default_code or '','name':product.name,\n 'Cost':product.standard_price or '','sales_price':product.lst_price or '',\n 'opening_qty':opening_product_qty or 0,'last_sales':last_sales or '',\n 'last_purchase_date':last_purchase_date or '','qty_purchase_in_duration':qty_purchase_in_duration or 0,\n 'qty_sales_in_duration': qty_sales_in_duration or 0,'scrap_location_qty':scrap_location_qty or 0,\n 'adjusted_qty_in_duration':adjusted_qty_in_duration or 0\n ,'warehouse_in_qty':warehouse_in_qty or 0,\n 'warehouse_out_qty':warehouse_out_qty or 0 \n })\n data_dict.update({warehouse.id:data_lst})\n continue\n data_dict.update({warehouse.id:[{'product':product,'sku':product.default_code or '','name':product.name,\n 'Cost':product.standard_price or '','sales_price':product.lst_price or '',\n 'opening_qty':opening_product_qty or 0,\n 'last_sales':last_sales or '','last_purchase_date':last_purchase_date or '',\n 'qty_purchase_in_duration':qty_purchase_in_duration or 0,\n 'qty_sales_in_duration': qty_sales_in_duration or 0,\n 'scrap_location_qty':scrap_location_qty or 0,\n 'adjusted_qty_in_duration':adjusted_qty_in_duration or 0,\n 'warehouse_in_qty':warehouse_in_qty or 0,\n 'warehouse_out_qty':warehouse_out_qty or 0\n }]})\n return data_dict", "def prepare_order(self, index, order_status):\n if(self.running_qty > 0 and index > 0):\n quantity = self.running_qty\n price = self.get_price_offset3(index)\n elif(self.running_qty < 0 and index < 0):\n quantity = abs(self.running_qty)\n price = self.get_price_offset3(index)\n else:\n quantity = self.ORDER_START_SIZE // 4\n price = self.get_price_offset2(index)\n if (price == None):\n return None\n else:\n return {'price': price, 'orderQty': quantity, 'side': \"Buy\" if index < 0 else \"Sell\"}", "def GetLiveOrders(self):\n\n return self._lorders", "async def test_retrieve_history_orders_by_time_range(self):\n history_orders = {\n 'historyOrders': [{\n 'clientId': 'TE_GBPUSD_7hyINWqAlE',\n 'currentPrice': 1.261,\n 'currentVolume': 0,\n 'doneTime': '2020-04-15T02:45:06.521Z',\n 'id': '46214692',\n 'magic': 1000,\n 'platform': 'mt5',\n 'positionId': '46214692',\n 'state': 'ORDER_STATE_FILLED',\n 'symbol': 'GBPUSD',\n 'time': '2020-04-15T02:45:06.260Z',\n 'type': 'ORDER_TYPE_BUY',\n 'volume': 0.07\n }],\n 'synchronizing': False\n }\n client.get_history_orders_by_time_range = AsyncMock(return_value=history_orders)\n start_time = datetime.now() - timedelta(seconds=1)\n end_time = datetime.now()\n actual = await api.get_history_orders_by_time_range(start_time, end_time, 1, 100)\n assert actual == history_orders\n client.get_history_orders_by_time_range.assert_called_with('accountId', start_time, end_time, 1, 100)", "def get_week_order_detailed(date):\n query = sqla.text(\"\"\"\n SELECT \"user\".first_name, \"user\".last_name, \"user\".corridor, \"user\".room, bt.name\n FROM bread_order_date AS bod\n JOIN bread_order AS bo ON bod.id = bo.date_id\n JOIN \"user\" ON bo.user_id = \"user\".id\n JOIN bread_type bt ON bo.type_id = bt.id\n WHERE bod.id = :date\n ORDER BY \"user\".corridor, \"user\".room asc\n \"\"\")\n return db.session.execute(query, {\"date\": date.id})", "def _serialize_order_and_product_data(order_data:dict):\n\n placed_orders = []\n ordered_products = []\n\n for order in order_data:\n if order[\"financial_status\"] not in COMPLETE_ORDER_STATUSES:\n continue\n \n items = []\n products = []\n for item in order[\"line_items\"]:\n items.append(\n {\n \"ProductID\": item[\"id\"],\n \"SKU\": item[\"sku\"],\n \"ProductName\": item[\"title\"],\n \"Quantity\": item[\"quantity\"],\n \"ItemPrice\": item[\"name\"]\n }\n )\n\n products.append(\n {\n \"token\": PUBLIC_KEY,\n \"event\": \"Ordered Product\",\n \"customer_properties\": {\n \"$email\": order[\"customer\"][\"email\"],\n \"$first_name\": order[\"customer\"][\"first_name\"],\n \"$last_name\": order[\"customer\"][\"last_name\"]\n },\n \"properties\": {\n \"$event_id\": item[\"id\"],\n \"$value\": item[\"price\"],\n \"ProductID\": item[\"product_id\"],\n \"SKU\": item[\"sku\"],\n \"ProductName\": item[\"title\"],\n \"Quantity\": item[\"quantity\"]\n }\n }\n )\n \n ordered_products.append({\"order_id\":order[\"id\"], \"body\": products})\n\n placed_orders.append(\n {\n \"token\": PUBLIC_KEY,\n \"event\": \"Placed Order\",\n \"customer_properties\": {\n \"$email\": order[\"customer\"][\"email\"],\n \"$first_name\": order[\"customer\"][\"first_name\"],\n \"$last_name\": order[\"customer\"][\"last_name\"],\n \"$phone_number\": order[\"customer\"][\"phone\"],\n \"$address1\": order[\"customer\"][\"default_address\"][\"address1\"] if \"default_address\" in order[\"customer\"].keys() else None,\n \"$address2\": order[\"customer\"][\"default_address\"][\"address2\"] if \"default_address\" in order[\"customer\"].keys() else None,\n \"$city\": order[\"customer\"][\"default_address\"][\"city\"] if \"default_address\" in order[\"customer\"].keys() else None,\n \"$zip\": order[\"customer\"][\"default_address\"][\"zip\"] if \"default_address\" in order[\"customer\"].keys() else None,\n \"$region\": order[\"customer\"][\"default_address\"][\"province_code\"] if \"default_address\" in order[\"customer\"].keys() else None,\n \"$country\": order[\"customer\"][\"default_address\"][\"country_name\"] if \"default_address\" in order[\"customer\"].keys() else None,\n },\n \"properties\": {\n \"$event_id\": order[\"id\"],\n \"$value\": order[\"total_price\"],\n \"ItemNames\": [item[\"name\"] for item in order[\"line_items\"]],\n \"DiscountCode\": order[\"discount_codes\"],\n \"DiscountValue\": order[\"total_discounts\"],\n \"Items\": items,\n \"BillingAddress\": None if \"billing_address\" not in order.keys() else\n {\n \"FirstName\": order[\"billing_address\"][\"first_name\"],\n \"LastName\": order[\"billing_address\"][\"last_name\"],\n \"Company\": order[\"billing_address\"][\"company\"],\n \"Addaress1\": order[\"billing_address\"][\"address1\"],\n \"Address2\": order[\"billing_address\"][\"address2\"],\n \"City\": order[\"billing_address\"][\"city\"],\n \"Region\": order[\"billing_address\"][\"province\"],\n \"RegionCode\": order[\"billing_address\"][\"province_code\"],\n \"Country\": order[\"billing_address\"][\"country\"],\n \"CountryCode\": order[\"billing_address\"][\"country_code\"],\n \"Zip\": order[\"billing_address\"][\"zip\"],\n \"Phone\": order[\"billing_address\"][\"phone\"]\n },\n \"ShippingAddress\": None if \"shipping_address\" not in order.keys() else\n {\n \"FirstName\": order[\"shipping_address\"][\"first_name\"],\n \"LastName\": order[\"shipping_address\"][\"last_name\"],\n \"Company\": order[\"shipping_address\"][\"company\"],\n \"Addaress1\": order[\"shipping_address\"][\"address1\"],\n \"Address2\": order[\"shipping_address\"][\"address2\"],\n \"City\": order[\"shipping_address\"][\"city\"],\n \"Region\": order[\"shipping_address\"][\"province\"],\n \"RegionCode\": order[\"shipping_address\"][\"province_code\"],\n \"Country\": order[\"shipping_address\"][\"country\"],\n \"CountryCode\": order[\"shipping_address\"][\"country_code\"],\n \"Zip\": order[\"shipping_address\"][\"zip\"],\n \"Phone\": order[\"shipping_address\"][\"phone\"]\n }\n },\n \"time\": int(time.time())\n }\n )\n \n return placed_orders, ordered_products", "def get_timeslots(self):\n\n timeorder = self.xml().find(\".//TIME_ORDER\")\n try:\n timeslots = {\n slot.attrib[\"TIME_SLOT_ID\"]: slot.attrib[\"TIME_VALUE\"]\n for slot in timeorder.findall(\"TIME_SLOT\")\n }\n except AttributeError:\n timeslots = {}\n return timeslots", "def list_orders(self, symbol):\r\n param = {}\r\n param['symbol'] = self.__transfer_symbol(symbol)\r\n param['appid'] = self.apiKey\r\n param['nonce'] = int(time.time() * 1000)\r\n param['timestamp'] = int(time.time())\r\n return self.__signed_GET('/api/v1/processing-orders', param, self.timeout)", "def on_order(self, order: OrderData):\n # print(\"on_order\")\n # print(order)\n pass", "def OrderforInventory(request):\n modes=['manage','add','order']\n departments={}\n clearfilter=False\n for league in models.Dept.objects.all(): \n departments[league.pk]=league\n visited=False\n message=\"\"\n nonemptyAuthors = [x for x in request.POST.getlist('oAuthorName') if x!='']\n nonemptybooknames = [x for x in request.POST.getlist('obookName') if x!='']\n nonemptybookDesc = [x for x in request.POST.getlist('obookdesc') if x!='']\n nonemptyQuantities = [x for x in request.POST.getlist('oQuantity') if x!='']\n nonemptyRows = [x for x in request.POST.getlist('oRowRack') if x!='']\n nonemptyselectedDeparts = [x for x in request.POST.getlist('odepart_select') if x!='NA']\n\n \n for j,k,h,fa,z,loc in itertools.zip_longest(nonemptyAuthors,nonemptybooknames,nonemptybookDesc,nonemptyselectedDeparts,nonemptyQuantities,nonemptyRows):\n visited=True\n shortname=k[1:5] \n values=k.split(\"-\")\n if len(values)==1:\n ye=dt.today().year\n values.extend(['I',ye,'0'])\n c=loc.split(\"-\")\n if len(c)==1:\n c.extend(['0','0'])\n if len(values) >0:\n try:\n departmentDetails=models.Dept.objects.get(dpt_id=fa)\n except Exception as e:\n print(e)\n pass\n try:\n i=0\n testa = models.Atr.objects.values('a_id')\n for test in testa:\n if i>int(test['a_id']):\n i=i\n else:\n i=int(test['a_id'])\n \n varas = models.Atr.objects.values('name')\n isin=False\n for f in list(varas):\n if str(j) in f['name']:\n isin=True\n break\n if isin:\n pass\n else:\n models.Atr.objects.create(a_id=str(i+1),name=str(j),title=\"Mr.\",email=\"[email protected]\")\n except Exception as e:\n if \"does not\" in str(e):\n models.Atr.objects.create(a_id=str(i+1),name=str(j),title=\"Mr.\",email=\"[email protected]\")\n print(e)\n pass\n varset=None\n try:\n bookop=None\n i=0;\n testab = models.Bks.objects.values('b_id')\n for test in testab:\n if i>int(str(test['b_id']).split('_')[2]):\n i=i\n else:\n i=int(str(test['b_id']).split('_')[2])\n if (models.Bks.objects.filter(title=str(values[0])).exists()):\n try: \n if not models.Bks.objects.filter(title=str(values[0]),edition=str(values[1]),p_year=str(values[2]),pub=str(values[3])).exists():\n models.Bks.objects.create(b_id=\"IN_\"+shortname+\"_\"+str(i+1),title=str(values[0]),desc=str(h),type=\"ref\",edition=str(values[1]),p_year=str(values[2]),pub=str(values[3]),email=\"[email protected]\",a_id_id=str(i+1),dpt_id_id=str(fa))\n else:\n message=\"book with the same name already exists\"\n bookop=models.Bks.objects.filter(title=str(values[0]),edition=str(values[1]),p_year=str(values[2]),pub=str(values[3]))\n except Exception as e:\n print(e)\n else:\n if isin:\n atrobj=models.Atr.objects.get(name=str(j))\n models.Bks.objects.create(b_id=\"IN_\"+shortname+\"_\"+str(i+1),title=str(values[0]),desc=str(h),type=\"ref\",edition=str(values[1]),p_year=str(values[2]),pub=str(values[3]),email=\"[email protected]\",a_id_id=atrobj.a_id,dpt_id_id=str(fa))\n else:\n atrobj=models.Atr.objects.get(name=str(j))\n models.Bks.objects.create(b_id=\"IN_\"+shortname+\"_\"+str(i+1),title=str(values[0]),desc=str(h),type=\"ref\",edition=str(values[1]),p_year=str(values[2]),pub=str(values[3]),email=\"[email protected]\",a_id_id=atrobj.a_id,dpt_id_id=str(fa))\n\n except Exception as e:\n if \"Bks matching query does not\" in str(e):\n models.Bks.objects.create(b_id=\"IN_\"+shortname+\"_\"+str(i+1),title=str(values[0]),desc=str(h),type=\"ref\",edition=str(values[1]),p_year=str(values[2]),pub=str(values[3]),email=\"[email protected]\",a_id_id=str(i+1),dpt_id_id=str(fa))\n print(e)\n pass\n \n try:\n g=0\n bookobj =models.Bks.objects.filter(title=str(values[0]),edition=str(values[1]),p_year=str(values[2]),pub=str(values[3]))\n testba = models.Invt.objects.values('id') \n for test in testba:\n if g>int(str(test['id'])):\n g=g\n else:\n g=int(str(test['id']))\n \n Invobj=models.Invt.objects.filter(i_id_id=\"IN_\"+shortname+\"_\"+str(g+1))\n librarians=get_librarians()\n librnobj=None\n for u in librarians:\n if request.user.username.lower() == u.lower():\n librnobj=models.Librn.objects.get(lb_id=u)\n\n if len(bookobj) >= 0:\n if(len(Invobj) == 0):\n for s in bookobj:\n models.Invt.objects.create(id=str(g+1),qty=int(z),i_id_id=s.b_id,shelf=str(c[0]),rack=str(0),row=int(0))\n models.Border.objects.create(id=int(g+1),qty=int(z),status=loc,i_id_id=s.invt.id,lb_id_id=librnobj.lb_id)\n message=\"Order placed successfully\"\n else:\n for s in bookobj:\n models.Invt.objects.create(id=str(g+1),qty=int(z),i_id_id=s.b_id,shelf=str(c[0]),rack=str(0),row=int(0))\n models.Border.objects.create(id=int(g+1),qty=int(z),status=loc,i_id_id=s.invt.id,lb_id_id=librnobj.lb_id)\n message=\"Order placed successfully\"\n\n else:\n models.Invt.objects.create(id=str(g+1),qty=int(z),i_id_id=\"IN_\"+shortname+\"_\"+str(g+1),shelf=str(c[0]),rack=str(0),row=int(0))\n models.Border.objects.create(id=int(g+1),qty=int(z),status=loc,i_id_id=int(g+1),lb_id_id=librnobj.lb_id)\n message=\"Order placed successfully\"\n except Exception as e:\n try:\n if \"does not\" in str(e): \n models.Invt.objects.create(id=str(g+1),qty=int(z),i_id_id=\"IN_\"+shortname+\"_\"+str(g+1),shelf=str(c[0]),rack=str(0),row=int(0))\n else:\n t=models.Invt.objects.get(i_id_id=\"IN_\"+shortname+\"_\"+str(g+1))\n t.qty= t.qty+int(z)\n t.save()\n except Exception as e:\n message=\"There is already an exisiting order for this book\"\n print(e)\n \n \n else:\n message=\"the book details are not given properly\"\n pass\n \n if not visited:\n message=\"Fill the form properly and then press the SAVE \"\n return render(\n request,\n 'app/orderInv.html',\n {\n 'title':'Order Inventory',\n 'invmodes':modes,\n 'dispmode':'order',\n 'message':message,\n 'librarian':get_librarians(),\n 'le':list(range(1,2)),\n 'DepartmentList':departments.keys(),\n 'books':get_Books().values(),\n 'clearfilter':clearfilter,\n 'year':datetime.now().year,\n }\n )", "def test_addsNewOrdersFromAPIs(self):\n DataManagerUnitTest.dm.reload()\n result = DataManagerUnitTest.dm.onlineStoreDatabase.getOrders()\n self.assertEqual(len(DataManagerUnitTest.dm.getAllOrders()), len(result))", "def get_gdax_historical_data():\n \n start = None\n while not start:\n start,end,tid = getStartAndEndHistoric()\n if not start:\n time.sleep(60)\n #Todo: change this to 1min\n firsttimestamp = start\n engine = sa.create_engine(sql_address)\n products = [\"LTC-USD\",\"LTC-BTC\",\"ETH-USD\",\"ETH-BTC\",\"BTC-USD\"]\n public_client = gdax.PublicClient()\n deltat = datetime.timedelta(seconds = 200)\n timewindows = []\n while end - start > datetime.timedelta(seconds=0):\n if start + deltat > end:\n endx = end\n else:\n endx = start + deltat\n timewindows.append([start,endx])\n start += deltat\n results = []\n total = len(timewindows)\n current_idx = 0\n timeold = time.time()\n numofqueries = 0\n engine = sa.create_engine(sql_address)\n Base.metadata.bind = engine\n DBSession = sa.orm.sessionmaker()\n DBSession.bind = engine\n session = DBSession()\n for startx,endx in timewindows:\n\n current_idx += 1\n for i in products:\n repeat = True\n while repeat:\n\n #delay if ratelimts are close\n if numofqueries < 3:\n while time.time() - timeold < 1:\n time.sleep(0.05)\n \n timeold = time.time()\n numofqueries = 0\n try:\n alist = public_client.get_product_historic_rates(i, start = startx, end = endx, granularity = 1)\n except:\n time.sleep(30)\n public_client = gdax.PublicClient()\n alist = public_client.get_product_historic_rates(i, start = startx, end = endx, granularity = 1)\n\n alist = public_client.get_product_historic_rates(i, start = startx, end = endx, granularity = 1)\n\n numofqueries += 1\n\n #rate limit exceeded has 'message' as dict.\n if not 'message' in alist:\n repeat = False\n for a in alist:\n a[0] = datetime.datetime.fromtimestamp(float(a[0]))\n tmp = i.split('-')\n d = dict(coin = tmp[0], currency = tmp[1], timestamp = a[0], low=a[1], high=a[2], open=a[3], close=a[4], volume=a[5])\n results.append(d)\n lasttimestamp = a[0]\n\n #upload with batch size of 10000\n if len(results) > 10000:\n engine.execute(\n GADXHistoricalDataOneSecondOHLC.__table__.insert(),\n results\n )\n results = []\n \n update = session.query(historicalDataProgramState).filter(sa.and_(historicalDataProgramState.transaction_id == tid,historicalDataProgramState.entry_type == 'update')).first()\n if update:\n update.end = lasttimestamp\n session.commit()\n else:\n new_update = historicalDataProgramState(entry_type = 'update',transaction_id = tid,start=firsttimestamp,end=lasttimestamp,platform='GDAX',status='incomplete')\n session.add(new_update)\n session.commit()\n if len(results) > 0:\n engine.execute(\n GADXHistoricalDataOneSecondOHLC.__table__.insert(),\n results\n )\n results = []\n \n update = session.query(historicalDataProgramState).filter(sa.and_(historicalDataProgramState.transaction_id == tid,historicalDataProgramState.entry_type == 'update')).first()\n if update:\n update.end = lasttimestamp\n session.commit()\n else:\n new_update = historicalDataProgramState(entry_type = 'update',transaction_id = tid,start=firsttimestamp,end=lasttimestamp,platform='GDAX',status='incomplete')\n session.add(new_update)\n session.commit()\n\n update = session.query(historicalDataProgramState).filter(sa.and_(historicalDataProgramState.transaction_id == tid,historicalDataProgramState.entry_type == 'update')).first()\n update.status='complete'\n order = session.query(historicalDataProgramState).filter(sa.and_(historicalDataProgramState.transaction_id == tid,historicalDataProgramState.entry_type == 'order')).first()\n order.status='complete'\n session.commit()", "def test_order_ejection_4_hours(self):\n manager = EjectionManager(self.user)\n curr_time = self.curr_time\n\n manager.process_order_time_limit(self.order, curr_time)\n self.assertEqual(self.order.number_of_ejection_notices, 1)\n self.set_latest_order_update_time_created(\n self.order,\n curr_time)\n\n manager.process_order_time_limit(self.order,\n curr_time + timedelta(minutes=61))\n self.assertEqual(self.order.number_of_ejection_notices, 1)\n\n manager.process_order_time_limit(self.order,\n curr_time + timedelta(minutes=121))\n self.assertEqual(self.order.number_of_ejection_notices, 2)\n self.set_latest_order_update_time_created(\n self.order,\n curr_time + timedelta(minutes=121))\n\n manager.process_order_time_limit(self.order,\n curr_time + timedelta(minutes=181))\n self.assertEqual(self.order.number_of_ejection_notices, 3)\n self.set_latest_order_update_time_created(\n self.order,\n curr_time + timedelta(minutes=181))\n\n manager.process_order_time_limit(self.order,\n curr_time + timedelta(minutes=211))\n self.assertEqual(self.order.number_of_ejection_notices, 4)\n self.set_latest_order_update_time_created(\n self.order,\n curr_time + timedelta(minutes=211))\n\n manager.process_order_time_limit(self.order,\n curr_time + timedelta(minutes=226))\n self.assertEqual(self.order.number_of_ejection_notices, 5)\n self.set_latest_order_update_time_created(\n self.order,\n curr_time + timedelta(minutes=226))\n\n manager.process_order_time_limit(self.order,\n curr_time + timedelta(minutes=230))\n self.assertEqual(self.order.number_of_ejection_notices, 5)\n\n manager.process_order_time_limit(self.order,\n curr_time + timedelta(minutes=241))\n self.assertEqual(self.order.number_of_ejection_notices, 5)\n self.assertEqual(self.order.status, Order.STATUS_CLOSED)", "def killQueueOrder(self):\n # CHECK ALL QUEUE ORDERS AND CANCEL ORDER IF GREATER THAN TWO HOURS OLD\n queue_orders = self.queue.find(\n {\"Trader\": self.user[\"Name\"], \"Asset_Type\": self.asset_type, \"Account_ID\": self.account_id})\n\n dt = datetime.now(tz=pytz.UTC).replace(microsecond=0)\n\n dt_central = dt.astimezone(pytz.timezone('US/Central'))\n\n two_hours_ago = datetime.strptime(datetime.strftime(\n dt_central - timedelta(hours=2), \"%Y-%m-%d %H:%M:%S\"), \"%Y-%m-%d %H:%M:%S\")\n\n ten_minutes_ago = datetime.strptime(datetime.strftime(\n dt_central - timedelta(minutes=10), \"%Y-%m-%d %H:%M:%S\"), \"%Y-%m-%d %H:%M:%S\")\n\n for order in queue_orders:\n\n order_date = order[\"Date\"]\n\n order_type = order[\"Order_Type\"]\n\n id = order[\"Order_ID\"]\n\n forbidden = [\"REJECTED\", \"CANCELED\", \"FILLED\"]\n\n if two_hours_ago > order_date and (order_type == \"BUY\" or order_type == \"BUY_TO_OPEN\") and id != None and order[\"Order_Status\"] not in forbidden:\n\n # FIRST CANCEL ORDER\n resp = self.tdameritrade.cancelOrder(id)\n\n if resp.status_code == 200 or resp.status_code == 201:\n\n other = {\n \"Symbol\": order[\"Symbol\"],\n \"Order_Type\": order[\"Order_Type\"],\n \"Order_Status\": \"CANCELED\",\n \"Strategy\": order[\"Strategy\"],\n \"Account_ID\": self.account_id,\n \"Aggregation\": order[\"Aggregation\"],\n \"Trader\": self.user[\"Name\"],\n \"Date\": getDatetime()\n }\n\n if self.asset_type == \"OPTION\":\n\n other[\"Pre_Symbol\"] = order[\"Pre_Symbol\"]\n\n other[\"Exp_Date\"] = order[\"Exp_Date\"]\n\n self.other.insert_one(other)\n\n self.queue.delete_one(\n {\"Trader\": self.user[\"Name\"], \"Symbol\": order[\"Symbol\"], \"Strategy\": order[\"Strategy\"], \"Asset_Type\": self.asset_type})\n\n self.logger.INFO(\n f\"CANCELED ORDER FOR {order['Symbol']} - TRADER: {self.user['Name']}\", True)\n\n # IF QUEUE ORDER DATE GREATER THAN 10 MINUTES OLD AND ORDER ID EQUALS NONE, SEND ALERT\n if ten_minutes_ago > order_date and order[\"Order_ID\"] == None and order[\"Account_ID\"] == self.account_id:\n\n if order[\"Symbol\"] not in self.no_ids_list:\n\n self.logger.ERROR(\n \"QUEUE ORDER ID ERROR\", f\"ORDER ID FOR {order['Symbol']} NOT FOUND - TRADER: {self.user['Name']} - ACCOUNT ID: {self.account_id}\")\n\n self.no_ids_list.append(order[\"Symbol\"])\n\n else:\n\n if order[\"Symbol\"] in self.no_ids_list:\n\n self.no_ids_list.remove(order[\"Symbol\"])", "def test91_GetFilledOrders(self):\n payload = PAYLOAD()\n payload['method'] = 'get_filled_orders'\n res = requests.post( url, data=json.dumps(payload), headers=headers).json()['result']\n self.assertEqual(len(res), 3)\n self.assertEqual(res[0]['receiving_address'], 'mrUedhEhZzbmdSbmd41CxoTZuTVgrwdL7p')\n self.assertEqual(res[0]['order_id'], 'DUMMY_ORD_2')\n sorted(res, key = lambda x: x['created_at'])\n payload['params']['timestamp'] = res[0]['created_at'] + 1\n res = requests.post( url, data=json.dumps(payload), headers=headers).json()['result']\n self.assertEqual(len(res), 2)" ]
[ "0.67804915", "0.66184914", "0.60975254", "0.60764414", "0.6066185", "0.5877417", "0.58634293", "0.58248085", "0.57850134", "0.57413775", "0.5655956", "0.5629255", "0.55018866", "0.5500083", "0.54759413", "0.54736453", "0.54555213", "0.544849", "0.5419155", "0.53935933", "0.5392713", "0.53889877", "0.5381565", "0.5374848", "0.5366832", "0.53077114", "0.5305447", "0.5244529", "0.52365327", "0.5235319", "0.5233781", "0.52286303", "0.5225701", "0.52164066", "0.5206676", "0.5206588", "0.5198936", "0.5181379", "0.5167191", "0.5161119", "0.515253", "0.51447225", "0.51401097", "0.5117479", "0.5112214", "0.50974524", "0.5063973", "0.505895", "0.5042581", "0.5040908", "0.5039887", "0.50369054", "0.50227517", "0.5022266", "0.5007137", "0.5001048", "0.50006586", "0.49994692", "0.49820498", "0.49818388", "0.49632666", "0.49478716", "0.4942151", "0.4939684", "0.49349177", "0.49278924", "0.49278924", "0.49278924", "0.49138713", "0.49122468", "0.49112624", "0.49098164", "0.4900834", "0.4898298", "0.48957175", "0.48910725", "0.48908573", "0.48837197", "0.48837197", "0.48837197", "0.48794144", "0.48791948", "0.48759243", "0.48757464", "0.4875592", "0.4874194", "0.486849", "0.48666322", "0.48613364", "0.48551267", "0.48462453", "0.4844603", "0.48401466", "0.48144037", "0.48127863", "0.48113513", "0.4809855", "0.48093224", "0.47992805", "0.47963077" ]
0.5067255
46
chang the order's status to be "cooking" which is selected by the id of order
def cook_order(request): order_id = request.GET.get('order_id', 0) cs , status = CookStatus.objects.get_or_create(cook_name=request.user) if cs.current_order is None: cs.current_order = Order.objects.get(id=order_id) cs.current_order.status = 'cooking' cs.current_order.tikchen = request.user.username cs.current_order.save() cs.save() return HttpResponseRedirect("/staff/cook_order_list/")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def order_ready(request):\n\tcs , status = CookStatus.objects.get_or_create(cook_name=request.user)\n\tif cs.current_order is not None:\n\t\tcs.current_order.status = 'ready-to-serve'\n\t\tcs.current_order.save()\n\t\tcs.current_order = None\n\t\tcs.save()\n\n\treturn HttpResponseRedirect(\"/staff/cook_order_list/\")", "def mark_completed(self, order_id=None):\n self.status = \"paid\"\n if order_id and not self.order_id:\n self.order_id = order_id\n print(\"Order completed\")\n self.save()", "def change_status(id):\n query = \"\"\"UPDATE parcels SET status = %s WHERE id = %s\"\"\"\n tuple =('delivered' , id)\n db.insert(query, tuple)", "def update_specific_order(self,status,order_id):\n self.query = \"UPDATE orders SET order_status=%s WHERE order_id=%s\"\n self.input = (status,order_id) #tuple to support indexing\n self.query_1 = \"SELECT order_id FROM orders ORDER BY order_id DESC LIMIT 1.\"\n self.query_2 = \"SELECT * FROM orders WHERE order_id=%s\"\n self.input_2 = (order_id,) #tuple to support indexing\n self.event = \"admin_update_specific_order\"\n self.error = \"Invalid order id\"\n self.message = \"Successfully updated the order.\"\n self.order_id = order_id\n self.db_error = None", "def put(self, order_id):\n body = request.get_json()\n order = db.session.query(models.Order).filter_by(id=order_id).first()\n if order is None:\n return 'Order id not found', 400\n borrower = body.get('borrower')\n borrower = query_user_by_name(borrower)\n if borrower is None:\n return 'User does not exit in the system', 404\n # if invalid_user(borrower.username):\n # return 'Unauthorized user, please login as a user/borrower', 401\n copy_id = body.get('copy_id')\n print(body)\n print(copy_id)\n copy = db.session.query(models.Copy).filter_by(id=copy_id).first()\n if copy is None:\n return 'Copy ID {} not found in system'.format(copy_id), 409\n elif copy.id != copy_id and copy.status == BOOK_COPY_STATUS_UNAVAILABLE:\n return 'The copy of the book is not available', 400\n copy_owner = body.get('copy_owner')\n owner = query_user_by_name(copy_owner)\n if owner is None:\n return 'Copy owner not found in the system'.format(copy_owner), 409\n # return_date = body.get('return_date')\n # if datetime.strptime(return_date, \"%y%m%d\") < datetime.strptime(datetime.utcnow().strftime(\"%Y-%m-%d\"), \"%y%m%d\"):\n # return 'Return date should be later than today', 400\n status = body.get('order_status')\n if status is not None and status < 0 or status > 4:\n return 'Status should between 0-4', 400\n order.parse_body_status(body)\n copy = db.session.query(models.Copy).filter_by(id=order.copy).first()\n if order.status == ORDER_STATUS_COMPLETED or order.status == ORDER_STATUS_DECLINED:\n copy.status = BOOK_COPY_STATUS_AVAILABLE\n else:\n copy.status = BOOK_COPY_STATUS_UNAVAILABLE\n db.session.commit()\n return order.serialize(), 200", "def test_manager_change_order_status(self):\n self.client.force_authenticate(self.user)\n cancel = \"CA\"\n url = reverse('order-set_status', args=[self.order.id])\n resp = self.client.patch(url, data={\n \"status\": cancel\n })\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n new_status = Order.objects.values(\"status\").get(pk=self.order.id)\n self.assertEqual(new_status[\"status\"], cancel)\n\n with self.subTest('customer can not change order status'):\n self.user.role = get_user_model().CUSTOMER\n self.client.force_authenticate(self.user)\n resp = self.client.patch(url, data={\n \"status\": cancel\n })\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)", "def UpdateStatus(self,pid):\n\t\tb1=Rents.objects.filter(paymentid_id=pid).first()\n\t\tamount=Payment.objects.filter(paymentid=pid).values('amount')\n\t\tb=b1.__dict__\n\t\tquant=b['quantity']\n\t\tbookid=b['bookid_id']\n\t\tprice=amount[0]['amount']/quant\n\t\t#price=float(\"{.2f}\".format(amount[0]['amount']))/float(\"{0:.2f}\".format(quant))\n\t\tRents.objects.filter(paymentid_id=pid).update(status='r')\n\t\tBook.objects.filter(bookid=bookid).update(quantity=F('quantity')+quant)\n\t\tStatus.objects.filter(ISBN=b['ISBN'],rentprice=price).update(quantity=F('quantity')+quant)\n\t\tUpload.objects.filter(owner_id_id=b['owner_id_id'],sellprice=price).update(qtyavailable=F('qtyavailable')+quant)\n\t\tself.notifyBuyer(b['ISBN'])", "def order_update_status():\n result = order_obj.order_update_status(request.forms) \n return result", "def cook_order_list(request):\n\tall_orders = Order.objects.all().order_by(\"-id\")\n\tcss = CookStatus.objects.filter(cook_name=request.user)\n\tcs = None\n\tcurrent_order = None\n\tif len(css) != 0:\n\t\tcs = css[0]\n\t\tif cs.current_order != None :\n\t\t\tcurrent_order = cs.current_order.menu_items.all()\n\n\tnew_orders = []\n\tfor order in all_orders:\n\t\ta = {}\n\t\ta['id'] = order.id\n\t\ta['status'] = order.status\n\n\t\ta['timespan'] = (datetime.datetime.utcnow().replace(tzinfo=utc) - order.timestamp_created).seconds\n\t\tcookofthis = CookStatus.objects.filter(current_order=order)\n\t\tif len(cookofthis) != 0:\n\t\t\ta['cookname'] = cookofthis[0].cook_name.username\n\t\telif order.tikchen != None:\n\t\t\ta['cookname'] = order.tikchen\n\n\t\tnew_orders.append(a)\n\n\n\treturn render_to_response('staff/cook_order_list.html', \n\t\t{'all_orders':new_orders, 'user':request.user, 'current_order':current_order})", "async def update_order_status():\n symbol = App.config[\"symbol\"]\n\n # Get currently active order and id (if any)\n order = App.order\n order_id = order.get(\"orderId\", 0) if order else 0\n if not order_id:\n log.error(f\"Wrong state or use: check order status cannot find the order id.\")\n return None\n\n # -----\n # Retrieve order from the server\n try:\n new_order = App.client.get_order(symbol=symbol, orderId=order_id)\n except Exception as e:\n log.error(f\"Binance exception in 'get_order' {e}\")\n return\n\n # Impose and overwrite the new order information\n if new_order:\n order.update(new_order)\n else:\n return None\n\n # Now order[\"status\"] contains the latest status of the order\n return order[\"status\"]", "def Cook(self, env, customer, cooking_time_type = 'fixed', manual_cook_time = None):\n with self.resource.request() as req:\n yield req #resource를 점유 해야 함.\n now_time = round(env.now , 1)\n req.info = [customer.name, now_time]\n if cooking_time_type == 'fixed':\n cooking_time = self.order_ready_time\n elif cooking_time_type == 'random':\n cooking_time = random.randrange(1,self.order_ready_time)\n elif cooking_time_type == 'uncertainty':\n cooking_time = customer.cook_time\n else:\n cooking_time = 0.001\n print('T :{} 가게 {}, {} 분 후 주문 {} 조리 완료'.format(int(env.now),self.name,cooking_time,customer.name))\n if manual_cook_time == None:\n yield env.timeout(cooking_time)\n else:\n yield env.timeout(manual_cook_time)\n print('T :{} 가게 {} 주문 {} 완료'.format(int(env.now),self.name,customer.name))\n customer.food_ready = True\n customer.ready_time = env.now\n self.ready_order.append(customer)", "def test_updating_item_status(self):\n #test original quantity\n self.assertEqual(self.supply1.quantity, 10)\n self.assertEqual(self.supply2.quantity, 10)\n \n modified_po = copy.deepcopy(base_purchase_order)\n modified_po['status'] = 'Received'\n modified_po['items'][0]['id'] = 1\n modified_po['items'][0]['status'] = 'Receieved'\n \n resp = self.client.put('/api/v1/purchase-order/1/',\n format='json',\n data=modified_po)\n \n self.assertEqual(resp.status_code, 200, msg=resp)\n \n po = resp.data\n \n self.assertEqual(Supply.objects.get(pk=1).quantity, 20)", "def set_order_status(self, status, orderid=0, envtype=0):\n if int(status) not in TRADE_CN.REV_ORDER_STATUS:\n error_str = ERROR_STR_PREFIX + \"the type of status is wrong \"\n return RET_ERROR, error_str\n\n if not TRADE_CN.check_envtype_cn(envtype):\n error_str = ERROR_STR_PREFIX + \"the type of environment param is wrong \"\n return RET_ERROR, error_str\n\n query_processor = self._get_sync_query_processor(SetOrderStatusCN.cn_pack_req,\n SetOrderStatusCN.cn_unpack_rsp)\n\n # the keys of kargs should be corresponding to the actual function arguments\n kargs = {'cookie': str(self.cookie), 'envtype': str(envtype), 'localid': str(0),\n 'orderid': str(orderid), 'status': str(status)}\n\n ret_code, msg, set_order_list = query_processor(**kargs)\n if ret_code != RET_OK:\n return RET_ERROR, msg\n\n col_list = ['envtype', 'orderID']\n set_order_table = pd.DataFrame(set_order_list, columns=col_list)\n\n return RET_OK, set_order_table", "def status(self, id):", "def change_status(self, status, application_id):", "def make_coffee(self, order):\n for item in order.ingredients:\n self.resources[item] -= order.ingredients[item]\n print(f\"Here is your {order.name} ☕️. Enjoy!\")", "def test_update_order(self):\n response = self.api_test_client.put('{}/orders/1'.format(\n self.BASE_URL), json={'order_status': 'accepted'})\n\n self.assertEqual(response.status_code, 201)\n self.assertTrue(\n response_as_json(response)['order']['status_updated_on'])\n self.assertEqual(\n response_as_json(response)['order']['order_status'], 'accepted')", "def make_coffee(self, order):\n for item in order.ingredients:\n self.resources[item] -= order.ingredients[item]\n print(f\"Here is your {order.name} ☕️. Enjoy!\")", "def set_OrderStatus(self, value):\n super(ListOrdersInputSet, self)._set_input('OrderStatus', value)", "def return_item(self,reason):\n if reason == \"defective\":\n self.status = \"defective\"\n self.price = 0\n elif reason == \"unopened\":\n self.status = \"for sale\"\n else:\n self.status = \"used\"\n self.price -= (.20 * self.price)\n return self", "def update_order():", "def update_order():", "def onOrderSelected(self, item):\n if self.lstOrders.getMultiSelectedItems() == []:\n self.clearBidData()\n else:\n self.btnCancelOrder.enable()", "def update_order(self, order):\n order.order_id = self.order_id\n order.average_price = self.avg_execution_price\n order.symbol = self.symbol\n order.side = self.side\n order.type = self.order_type\n order.amount = self.original_amount\n order.price = self.price\n order.filled = self.executed_amount\n order.remaining = self.remaining_amount\n if self.is_cancelled:\n order.status = exchanges.Order.Status.CANCELLED\n elif self.is_live:\n order.status = exchanges.Order.Status.OPEN\n else:\n order.status = exchanges.Order.Status.CLOSED\n return order", "def test_order_update_status_function(self):\n order = OrderInfo.objects.create(user=self.create_test_user())\n self.assertIsInstance(order.ordered, datetime)\n self.assertIsNone(order.cooked)\n self.assertIsNone(order.delivered)\n\n order.update_current_state()\n self.assertIsInstance(order.cooked, datetime)\n self.assertIsNone(order.delivered)\n\n order.update_current_state()\n self.assertIsInstance(order.cooked, datetime)\n self.assertIsInstance(order.delivered, datetime)", "def onChange(self, customer, pizza_id):\n \n pizza_qty = self.vars[pizza_id].get()\n customer.ChangePizzaQTY(pizza_id, pizza_qty)\n self.showOrderPrice(customer.my_order)", "def fill_order(self, order: Order) -> None:\n order = self.get_order_by_id(order.id)\n order.status = OrderStatus.FILL", "def put(self):\n request_data = json.loads(request.data)\n print(request_data)\n order_id = request_data['order_id']\n status = request_data['status']\n MM.update_order_status(ObjectId(order_id), status)\n return {\"message\": \"Order Status Updated\"}, 200", "def onSelected(self, item):\n if not item:\n self.clearBidData()\n else:\n self.enableAddOrder()", "def finish_order(self, order_id):\n request_name = \"get_order_info\"\n\n orders = self.make_request(request_name, url_id=order_id)\n if orders is None:\n print(\"Unsuccessful updating order\")\n return\n order = orders[0]\n update_dict = dict()\n for key in order:\n if str(key)[0] == \"_\":\n continue\n try:\n update_dict[key.encode('utf-8')] = order[key].encode('utf-8')\n except AttributeError:\n update_dict[key.encode('utf-8')] = order[key]\n\n update_dict['status'] = 'Complete'\n resp = self.make_request('set_inventory_order', url_id=order_id, arguments=update_dict)", "def change_to_shopping(self):\n self.ids[\"tsk_btn\"].color = 1, 1, 1, 0.5", "def set_order_done():\n data = select_data_source()\n user = data['user']\n order_id = data['id']\n \n if check_user_permission(user) : return permission_denied_return\n \n db = database.getdb()\n \n ### Check if is valid.\n \n cmd = 'select passed from orders where id==\"{0}\"'.format(order_id)\n order_valid = db.execute(cmd).fetchall()[0][0]\n if order_valid == 0 :\n return finish_invalid_return\n \n ### Check if is done.\n cmd = 'select done from orders where id==\"{0}\"'.format(order_id)\n order_done = db.execute(cmd).fetchall()[0][0]\n if order_done != 0 :\n return finish_done_return\n \n ### All check done.\n ### Set it to done.\n cmd = 'update orders set done=1 where id==\"{0}\"'.format(order_id)\n db.execute(cmd)\n db.commit()\n print('user sets order {0} to be done.'.format(user))\n \n return finish_complete_return", "def update_status(self, id, status):\n sql = f\"UPDATE incidences SET status = \\'{status}\\'\\\n WHERE incidences.id = {id}\"\n conn = Db().con\n curr = conn.cursor()\n curr.execute(sql)\n conn.commit()", "def test_admin_change_order_status(self):\n # Test unregistered id\n # Correct format but not there\n response = self.client.put(\n 'api/v1/parcels/35420', headers=self.admin_token_dict)\n data = json.loads(response.data)\n self.assertEqual(response.status_code, 400)\n self.assertEqual(\n data, {'message': 'No Parcel delivery order with that id'})\n # Test invalid format id\n response = self.client.put(\n 'api/v1/parcels/35uh420', headers=self.admin_token_dict) # Incorrect id format\n data = json.loads(response.data)\n self.assertEqual(response.status_code, 400)\n self.assertEqual(data, {'message': 'Wrong id format'})", "def sell(self):\n self.status = \"sold\"\n return self", "def item_status(item_id):\n\n item_completed = request.form.get(\"item_completed\", \"off\")\n list_id = request.form[\"list_id\"]\n\n item_completed = item_completed == \"on\"\n\n to_do_item = ToDoItem.query.get(item_id)\n to_do_item.completed = item_completed\n db.session.commit()\n\n return redirect(f\"/lists/{list_id}\")", "def chase_order(self, order_id, side, avg, qty=None):\n sleep(1) # takes a second for order_id to register in bitmex trade engine\n last_price = avg\n max_chase_buy = float(avg) + float(self.strategy.chase)\n max_chase_sell = float(avg) - float(self.strategy.chase)\n self.logger.info(f'Chasing {side} order, initial price: {avg}, chase: {self.strategy.chase}, '\n f'Failsafe: {self.strategy.chase_failsafe} ')\n\n while True:\n # o = self.rest_open_order(orderID=order_id)\n o = self.ws_open_order(oid=order_id)\n if o:\n if side == 'Buy':\n if self.strategy.double_check or self.ws_restarting:\n quote = self.get_quote()\n self.logger.info('Bid: {} Ask: {}'.format(quote['bidPrice'], quote['askPrice']))\n _price = quote['bidPrice']\n else:\n _price = self.ws.get_ticker()['buy']\n\n self.logger.debug(\n f'Chasing buy order {order_id}, order_price: {avg}, last_price: {last_price}, current price: '\n f'{_price} max chase: {max_chase_buy}')\n if float(_price) <= float(max_chase_buy):\n if float(last_price) < float(_price):\n self.logger.info(f'Amending order {order_id} to price {_price}')\n try:\n ret = self.client.Order.Order_amend(orderID=order_id, price=_price).result()\n except Exception as fuck:\n self.logger.info(f'Error: {fuck}')\n else:\n self.logger.debug(ret)\n finally:\n last_price = _price\n else:\n self.logger.debug(f'Sleeping, order_price: {last_price}, current price: {_price}')\n if self.strategy.double_check:\n sleep(0.5)\n\n else:\n if self.strategy.chase_failsafe:\n self.logger.info(f'Price {_price} exceeded max chase {max_chase_buy}, buying market.')\n self.client.Order.Order_cancelAll(symbol=self.strategy.symbol).result()\n self.execute_order(oq=qty, ot='market', text='Chase failsafe market long order')\n else:\n self.logger.info(f'Price {_price} exceeded max chase {max_chase_buy}, giving up.')\n break\n elif side == 'Sell':\n if self.strategy.double_check or self.ws_restarting:\n quote = self.get_quote()\n self.logger.info('Bid: {} Ask: {}'.format(quote['bidPrice'], quote['askPrice']))\n _price = quote['askPrice']\n else:\n _price = self.ws.get_ticker()['sell']\n\n self.logger.debug(\n f'Chasing sell order {order_id}, order_price: {avg}, last_price: {last_price}, current price: '\n f'{_price} max chase: {max_chase_sell}')\n if float(_price) >= float(max_chase_sell):\n if float(last_price) > float(_price):\n self.logger.info(f'Amending order {order_id} to price {_price} ')\n try:\n ret = self.client.Order.Order_amend(orderID=order_id, price=_price).result()\n except Exception as fuck:\n self.logger.info(f'Error: {fuck}')\n else:\n self.logger.debug(ret)\n finally:\n last_price = _price\n else:\n self.logger.debug(f'Sleeping, order_price: {last_price}, current price: {_price}')\n if self.strategy.double_check:\n sleep(0.5)\n\n else:\n if self.strategy.chase_failsafe:\n self.logger.info(f'Price {_price} exceeded max chase {max_chase_sell}, selling market.')\n self.client.Order.Order_cancelAll(symbol=self.strategy.symbol).result()\n self.execute_order(oq=qty, ot='market', text='Chase failsafe market short order')\n else:\n self.logger.info(f'Price {_price} below max chase {max_chase_sell}, giving up.')\n break\n else:\n self.logger.info('Order Filled')\n break", "def change_status(self):\n if self.status == \"Still Loaned\":\n self.status = \"Given Back\"\n else:\n self.status = \"Still Loaned\"", "def api_mark_order_ready(request, id):\n\n close_old_connections()\n \n # Not marking it as served if it isn't even ready yet.\n if not request.user.is_authenticated:\n return HttpResponseForbidden(\"You're not authenticated.\")\n \n # Get the order.\n order = Order.objects.get(id=id)\n \n # Mark the order as served and save it.\n order.ready = not order.ready\n order.save()\n\n close_old_connections()\n\n return HttpResponse('Marked as ready')", "def set_add_dispute_status(self, status):\n self.single_selection_from_kendo_dropdown(self.add_dispute_status_kendo_dropdown_locator, status)\n self.wait_for_ajax_spinner_load()", "def mark_refunded(self):\n order = self.clone()\n order.status = Order.STATUS_REFUNDED\n order.save()\n return order", "def status_update(request, id=None):\n #obj = Todo.objects.all()\n user = request.user if request.user.is_authenticated else None\n Todo.objects.filter(id=id).update(mark_done=True, answered_by= user)\n return redirect('lists:alllist')", "def order(self, id, long, qty, limit=0, stop=0, post_only=False, reduce_only=False, trailing_stop=0, activationPrice=0, when=True):\n self.__init_client()\n\n # if self.get_margin()['excessMargin'] <= 0 or qty <= 0:\n # return\n\n if not when:\n return\n\n side = \"BUY\" if long else \"SELL\"\n ord_qty = qty\n\n order = self.get_open_order(id)\n ord_id = id + ord_suffix() #if order is None else order[\"clientOrderId\"]\n\n if order is None:\n self.__new_order(ord_id, side, ord_qty, limit, stop, post_only, reduce_only, trailing_stop, activationPrice)\n else:\n self.__new_order(ord_id, side, ord_qty, limit, stop, post_only, reduce_only, trailing_stop, activationPrice)\n #self.__amend_order(ord_id, side, ord_qty, limit, stop, post_only)\n return", "def set_status(self, scenario_id, status):\n self.cur.execute(\n \"UPDATE execute_list SET status = %s WHERE id = %s\",\n (status, scenario_id),\n )", "def update_orders(comp, order, user_correct, payment_id):\n users_orders = []\n for item in order.items.all():\n users_orders.append(item.id)\n item.is_paid = True\n item.save()\n order.related_competition = comp\n order.payment_id = payment_id\n order.order_date = timezone.now()\n order.answer_correct = user_correct\n order.ordered = True\n order.save()\n return order", "def prepare_order(self, index, order_status):\n if(self.running_qty > 0 and index > 0):\n quantity = self.running_qty\n price = self.get_price_offset3(index)\n elif(self.running_qty < 0 and index < 0):\n quantity = abs(self.running_qty)\n price = self.get_price_offset3(index)\n else:\n quantity = self.ORDER_START_SIZE // 4\n price = self.get_price_offset2(index)\n if (price == None):\n return None\n else:\n return {'price': price, 'orderQty': quantity, 'side': \"Buy\" if index < 0 else \"Sell\"}", "def test_an_order_can_be_edited(self):\n\n\t\tnew_data = {\n\t\t\t\t\t\"owner\": \"Pemwa\",\n\t\t\t\t\t\"meal_name\": \"Burger\",\n\t\t\t\t\t\"quantity\": 8\n\t\t\t\t\t }\n\n\t\tres = self.login_user()\n\t\taccess_token = json.loads(res.data.decode())['access_token']\n\n\t\tresponse = self.client().post(\n\t\t\t'/api/v2/orders',\n\t\t\theaders={\"x-access-token\": access_token},\n\t\t\tdata = json.dumps(\n\t\t\t\tself.order_data) , content_type = 'application/json')\n\t\tself.assertEqual(response.status_code, 201)\n\n\t\tresponse = self.client().put(\n\t\t\t'/api/v2/orders/1',\n\t\t\theaders={\"x-access-token\": access_token},\n\t\t\tdata = json.dumps(\n\t\t\t\tnew_data), content_type = 'application/json')\n\t\tresult = json.loads(response.data)\n\t\tself.assertEqual(response.status_code, 200)", "def mark_ready_for_review(self, user: User) -> None:\n from .exceptions import OperationForbiddenError, OrderEmptyError\n\n # If order is not in the \"CREATED\" state, raise an\n # OperationForbiddenError\n if not self.is_created:\n raise OperationForbiddenError(\n self.STATE_CHANGE_FORBIDDEN_ERROR_MSG % {\n 'current_state': Order.OrderState.get_choice_display(\n self.state\n ),\n 'new_state': Order.OrderState.PENDING.choice_display\n }\n )\n\n # If the order's item list is empty, raise an OrderEmptyError\n if not self.orderitem_set.exists():\n raise OrderEmptyError(\n self,\n 'An order should contain at least one Order item before it '\n 'can be marked as \"PENDING\".'\n )\n\n # Update the order to \"PENDING\" state\n self.update(user, state=Order.OrderState.PENDING.choice_value)", "def update_after_pick(self, item_id):\n request_name = \"get_shop_info\"\n items = self.make_request(request_name, url_id=item_id)\n update_dict = dict()\n for key in items[0]:\n if str(key)[0] == \"_\":\n continue\n try:\n update_dict[key.encode('utf-8')] = items[0][key].encode('utf-8')\n except AttributeError:\n update_dict[key.encode('utf-8')] = items[0][key]\n\n update_dict['quantity'] -= 1\n resp = self.make_request('set_shop', url_id=item_id, arguments=update_dict)", "def test_change_order_status_when_order_does_not_exist(self):\n response = self.api_test_client.put('{}/orders/1000'.format(\n self.BASE_URL), json={'order_status': 'accepted'})\n\n self.assertEqual(response.status_code, 404)\n self.assertEqual(response_as_json(\n response)['message'],\n 'Order with id 1000 not found')", "def set_status(trades, status):\n acm.BeginTransaction()\n try:\n for trade in trades:\n msg = \"Changing status on trade {0} ({1}) to {2}\"\n print(msg.format(trade.Oid(), trade.Instrument().Name(), status))\n trade.Status(status)\n trade.Commit()\n acm.CommitTransaction()\n print(\"Statuses successfully changed\")\n except Exception as ex:\n print(\"Failed to change statuses on pswap trades: {0}\".format(ex))\n acm.AbortTransaction()", "def purchase_item(self):\r\n self.purchased_callback()\r\n self.status = 'purchased'\r\n self.fulfilled_time = datetime.now(pytz.utc)\r\n self.save()", "def setComicStatus(self, handles, status):\n db.execute(\"update comics set status=%s where handle in ('\" + \"', '\".join(handles) + \"')\", status)", "def update_order_state(self, orderid, new_state):\n\n cur.execute(\"\"\"UPDATE orders SET state = ? WHERE orderid = ?\"\"\",\n (new_state, orderid))", "def setState(self, session, state_id, order_item_id=None, batch_id=None,\n product_item_id=None):\n orders = []\n for wi in self.getWorkItems(session, order_item_id, \n batch_id, product_item_id):\n if batch_id is None and state_id == State.INPROGRESS and \\\n self.activities.get(wi.activity_id)['entity_type'] == 'batch':\n return self.setState(session, state_id, batch_id=wi.batch_id)\n wi.state_id = state_id\n orders.append(wi.order_item_id)\n session.add(wi)\n return orders", "def change_order(self, price, qty, orderid=0, envtype=0):\n if not TRADE_CN.check_envtype_cn(envtype):\n error_str = ERROR_STR_PREFIX + \"the type of environment param is wrong \"\n return RET_ERROR, error_str\n\n query_processor = self._get_sync_query_processor(ChangeOrderCN.cn_pack_req,\n ChangeOrderCN.cn_unpack_rsp)\n\n # the keys of kargs should be corresponding to the actual function arguments\n kargs = {'cookie': str(self.cookie), 'envtype': str(envtype), 'localid': str(0),\n 'orderid': str(orderid), 'price': str(price), 'qty': str(qty)}\n\n ret_code, msg, change_order_list = query_processor(**kargs)\n if ret_code != RET_OK:\n return RET_ERROR, msg\n\n col_list = ['envtype', 'orderID']\n change_order_table = pd.DataFrame(change_order_list, columns=col_list)\n\n return RET_OK, change_order_table", "async def update(self, *args, **kwargs):\n if not self.__bought:\n random_stock = 1\n stock_price = self.priceindicator[random_stock].price\n if stock_price != 0:\n random_const = float(decimal.Decimal(random.randrange(-5,5))/100)\n stock_price = stock_price + stock_price*random_const\n stock_price = int(stock_price)\n await self.place_buy_order(random_stock, self.settings[\"stocks_per_company\"], stock_price, 1)\n log_message = \"StockBuyerBot(\" + self.name + \") bought \" + str(random_stock)\n print(log_message)\n else:\n log_message = \"StockBuyerBot(\" + self.name + \") bought nothing\"\n print(log_message)\n self.add_to_log(self.id, log_message)", "def update_request_status(self, requestID, ISBN, quantity, approved):\n if approved:\n self.cursor.execute(\"\"\"SELECT orderNumber FROM returnrequest WHERE requestID=%s\"\"\", (requestID,))\n orderNumber = self.cursor.fetchone()[0]\n self.cursor.execute(\"\"\"UPDATE returnrequest SET status='APPROVED' WHERE requestID=%s\"\"\", (requestID,))\n self.cursor.execute(\"\"\"SELECT quantity FROM productof WHERE orderNumber=%s AND ISBN=%s\"\"\",\n (orderNumber, ISBN))\n remaining_books_ordered = self.cursor.fetchone()[0] - int(quantity)\n if not int(remaining_books_ordered):\n self.cursor.execute(\"\"\"DELETE FROM productof WHERE orderNumber=%s AND ISBN=%s\"\"\", (orderNumber, ISBN))\n else:\n self.cursor.execute(\"\"\"UPDATE productof SET quantity=quantity-%s WHERE orderNumber=%s AND ISBN=%s\"\"\",\n (quantity, orderNumber, ISBN))\n self.cursor.execute(\"\"\"UPDATE book SET stock=stock+%s WHERE ISBN=%s\"\"\", (quantity, ISBN))\n self.db.commit()\n if self.is_empty_order(orderNumber):\n self.cursor.execute(\"\"\"DELETE FROM orderlog WHERE orderNumber=%s\"\"\", (orderNumber,))\n else:\n self.cursor.execute(\"\"\"UPDATE returnrequest SET status='DENIED' WHERE requestID=%s\"\"\", (requestID,))\n self.db.commit()", "def kitchenConfirm(self, id, eta=0):\n if(eta == 0):\n json = None\n else: \n json = {\"eta\": eta}\n return self.__insertOrderHistory(id, \"kitchenConfirmed\", json)", "def food_selected(self, arg):\n\t\tfood = fooditemdao.retrieve_food(self.selected_food.get())\n\t\tself.lbl_unit.config(text=food.info['unit'])", "def change_oakhaven_pswap_trades(status):\n print(\"Processing pswaps for OAKHAVEN\")\n portfolio = acm.FPhysicalPortfolio[\"PB_PSWAP_OAKHAVEN_CR\"]\n trades = [t for t in portfolio.Trades() if \"OLD\" in t.Instrument().Name()]\n set_status(trades, status)", "def updatestatus(id, status):\n username = os.getlogin()\n res = requests.put('{}update/{}/'.format(base_url, id),\n data={\"keyword_fetching_status\": status, \"user_fetched\": username})\n res = res.json()\n return res", "def ord_status(self, uid):\n # TODO: use ticker to select market orderbook\n return self.mkt.get(uid)", "def _act_task_checked(self, iden, b):\n if b:\n self.data.turn_on(iden)\n else:\n self.data.turn_off()", "def test_update_an_order(self):\n order = PizzaOrder.objects.get(id=self.response.data['id'])\n updated_order = {\"customer\": {\n \"first_name\": \"Lara\",\n \"last_name\": \"Tanbari\",\n \"address\": \"Coppistr22, 10365 Berlin\"\n },\n \"size\": \"SMALL\"\n }\n res = self.client.put(\n reverse('order_details',\n kwargs={'order_id': order.id}),\n updated_order,\n format='json'\n )\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)", "def _set_status(self):\n result = self._get_status()\n if result and result[0]['state'] == 'aborted':\n raise Exception(\"Aborted because the status flag is set to 'aborted' in dynamodb\")\n\n # record the status\n self.status['timestamp'] = time.strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n self.db_handler.update_item({'api_version': TsV2CatalogHandler.api_version}, self.status)", "def mark_completed(self,status):\r\n if status == \"r\":\r\n self.status = \"c\"#It is to test the mark complete function in the test_book.py, otherwise this program works fine in the main.py\r\n return True\r\n elif status == \"c\":\r\n return False", "def set_cancelled_order(self):\n self.set_values(\n start_phrase='Cancelled Orders',\n end_phrase=None,\n start_with=2,\n end_until=-1,\n prop_keys=self.cancelled_order_keys,\n prop_name='cancelled_order'\n )\n\n self.cancelled_order = map(self.del_empty_keys, self.cancelled_order)\n self.fillna_dict_with_exists(\n self.cancelled_order,\n 'time_cancelled',\n ('time_cancelled', 'spread', 'order', 'tif', 'status')\n )\n self.replace_nan(self.cancelled_order)\n\n self.convert_type(self.cancelled_order, 'time_cancelled', self.convert_datetime, 0)\n self.convert_type(self.cancelled_order, 'quantity', int, 0)\n self.convert_type(self.cancelled_order, 'strike', float, 0.0)\n self.convert_type(self.cancelled_order, 'price', float, 0.0)\n self.convert_type(self.cancelled_order, 'expire_date', str, '')", "def id_status(self, id_status):\n self._id_status = id_status", "def onCancelOrder(self, item):\n self.frame.mode.cancelIndustryOrder(self.lstOrders.getMultiSelectedItems(), self.mySystemDict['id'])", "def switch():\n\n if request.method == 'POST':\n id = request.args['id']\n\n cur = db.get_db().cursor()\n\n cur.execute(\n 'UPDATE todos SET completed=(%s)'\n 'WHERE id = (%s)',\n ('True', id))\n\n g.db.commit()\n\n cur.close()\n\n return redirect(url_for('todos.index'))", "def update_status(request_id, status):\n pass", "def kitchenComplete(self, id):\n return self.__insertOrderHistory(id, \"kitchenComplete\", {})", "def settle_self(self):\n self.state = 'completed'\n self.save()\n self.safe_post()", "def slot_user_order(self, dummy_sender, data):\r\n (price, volume, typ, oid, status) = data\r\n found = False\r\n removed = False # was the order removed?\r\n opened = False # did the order change from 'post-pending' to 'open'\"?\r\n voldiff = 0 # did the order volume change (full or partial fill)\r\n if \"executing\" in status:\r\n # don't need this status at all\r\n return\r\n if \"post-pending\" in status:\r\n # don't need this status at all\r\n return\r\n if \"removed\" in status:\r\n for i in range(len(self.owns)):\r\n if self.owns[i].oid == oid:\r\n order = self.owns[i]\r\n\r\n # work around MtGox strangeness:\r\n # for some reason it will send a \"completed_passive\"\r\n # immediately followed by a \"completed_active\" when a\r\n # market order is filled and removed. Since \"completed_passive\"\r\n # is meant for limit orders only we will just completely\r\n # IGNORE all \"completed_passive\" if it affects a market order,\r\n # there WILL follow a \"completed_active\" immediately after.\r\n if order.price == 0:\r\n if \"passive\" in status:\r\n # ignore it, the correct one with\r\n # \"active\" will follow soon\r\n return\r\n\r\n self.debug(\r\n \"### removing order %s \" % oid,\r\n \"price:\", self.gox.quote2str(order.price),\r\n \"type:\", order.typ)\r\n\r\n # remove it from owns...\r\n self.owns.pop(i)\r\n\r\n # ...and update own volume cache in the bids or asks\r\n self._update_level_own_volume(\r\n order.typ,\r\n order.price,\r\n self.get_own_volume_at(order.price, order.typ)\r\n )\r\n removed = True\r\n break\r\n else:\r\n for order in self.owns:\r\n if order.oid == oid:\r\n found = True\r\n self.debug(\r\n \"### updating order %s \" % oid,\r\n \"volume:\", self.gox.base2str(volume),\r\n \"status:\", status)\r\n voldiff = volume - order.volume\r\n opened = (order.status != \"open\" and status == \"open\")\r\n order.volume = volume\r\n order.status = status\r\n break\r\n\r\n if not found:\r\n # This can happen if we added the order with a different\r\n # application or the gox server sent the user_order message\r\n # before the reply to \"order/add\" (this can happen because\r\n # actually there is no guarantee which one arrives first).\r\n # We will treat this like a reply to \"order/add\"\r\n self.add_own(Order(price, volume, typ, oid, status))\r\n\r\n # The add_own() method has handled everything that was needed\r\n # for new orders and also emitted all signals already, we\r\n # can immediately return here because the job is done.\r\n return\r\n\r\n # update level own volume cache\r\n self._update_level_own_volume(\r\n typ, price, self.get_own_volume_at(price, typ))\r\n\r\n # We try to help the strategy with tracking the orders as good\r\n # as we can by sending different signals for different events.\r\n if removed:\r\n reason = self.gox.msg[\"user_order\"][\"reason\"]\r\n self.signal_own_removed(self, (order, reason))\r\n if opened:\r\n self.signal_own_opened(self, (order))\r\n if voldiff:\r\n self.signal_own_volume(self, (order, voldiff))\r\n self.signal_changed(self, None)\r\n self.signal_owns_changed(self, None)", "def change_status(self):\n if self.status == 'in progress':\n self.status = 'done'\n return self.status\n elif self.status == 'done':\n self.status = 'in progress'\n self.eisenhower_priority()\n return self.status", "def onOrderSelected(self, item):\n if self.lstOrders.getMultiSelectedItems() == []:\n self.btnCancelOrder.disable()\n else:\n self.enableButtons(self.lstOrders, [self.btnCancelOrder])", "def triggerPrintOrderStatus(self, prompt):\n if prompt is not None:\n split_prompt = prompt.split(':')\n truck_id = int(split_prompt[1].lstrip())\n print(f'The following status of {truck_id} orders:')\n self._printOrderStatus(truck_id)", "async def update_trade_status():\n # GET /api/v3/openOrders - get current open orders\n # GET /api/v3/allOrders - get all orders: active, canceled, or filled\n\n symbol = App.config[\"symbol\"]\n\n # -----\n try:\n open_orders = App.client.get_open_orders(symbol=symbol) # By \"open\" orders they probably mean \"NEW\" or \"PARTIALLY_FILLED\"\n # orders = App.client.get_all_orders(symbol=symbol, limit=10)\n except Exception as e:\n log.error(f\"Binance exception in 'get_open_orders' {e}\")\n return\n\n if not open_orders:\n # -----\n await update_account_balance()\n\n last_kline = App.analyzer.get_last_kline(symbol)\n last_close_price = to_decimal(last_kline[4]) # Close price of kline has index 4 in the list\n\n base_quantity = App.base_quantity # BTC\n btc_assets_in_usd = base_quantity * last_close_price # Cost of available BTC in USD\n\n usd_assets = App.quote_quantity # USD\n\n if usd_assets >= btc_assets_in_usd:\n App.status = \"SOLD\"\n else:\n App.status = \"BOUGHT\"\n\n elif len(open_orders) == 1:\n order = open_orders[0]\n if order.get(\"side\") == SIDE_SELL:\n App.status = \"SELLING\"\n elif order.get(\"side\") == SIDE_BUY:\n App.status = \"BUYING\"\n else:\n log.error(f\"Neither SELL nor BUY side of the order {order}.\")\n return None\n\n else: # Many orders\n log.error(f\"Wrong state. More than one open order. Fix manually.\")\n return None", "def completed(request):\n order_id = ''\n try:\n order_id = request.session['order_id']\n except:\n pass\n if order_id != '':\n auth = HTTPBasicAuth(klarna_un, klarna_pw)\n headers = {'content-type': 'application/json'}\n response = requests.get(\n settings.KLARNA_BASE_URL + '/checkout/v3/orders/' +\n order_id,\n auth=auth,\n headers=headers,\n )\n klarna_order = response.json()\n order = Order(\n order_id=klarna_order['order_id'],\n status=klarna_order['status'],\n given_name=klarna_order['billing_address']['given_name'],\n family_name=klarna_order['billing_address']['family_name'],\n email=klarna_order['billing_address']['email'],\n phone_number=klarna_order['billing_address']['phone'],\n country=klarna_order['billing_address']['country'],\n postcode=klarna_order['billing_address']['postal_code'],\n town_or_city=klarna_order['billing_address']['city'],\n street_address1=klarna_order['billing_address']['street_address'],\n order_total=klarna_order['order_amount'],\n klarna_line_items=klarna_order['order_lines']\n )\n order.save()\n request.session['cart'] = {}\n request.session['order_id'] = ''\n \n context = {\n 'klarna_order': klarna_order\n }\n\n return render(request, 'checkout/completed.html', context)\n else:\n return redirect(reverse(view_cart))", "def set_vendor_price_list_status(self, status_items):\n self.multiple_items_selection_from_kendo_dropdown(self.vendor_price_list_status_kendo_dropdown_locator, status_items)\n self.wait_for_ajax_spinner_load()", "async def on_order_completed(self, order_id: str):\n self._orders = list(filter(lambda order: order['id'] != order_id, self._orders))", "def checkout(request):\n\n\n auth = HTTPBasicAuth(klarna_un, klarna_pw)\n headers = {'content-type': 'application/json'}\n cart = request.session.get('cart')\n total = 0\n orderlines = []\n order_id = 0\n try:\n order_id = request.session['order_id']\n except:\n pass\n for item in cart:\n product = utils.get_product(item)\n orderlines.append({\n 'name': product[1].name,\n 'reference': product[1].id,\n 'unit_price': int(product[1].price * 100),\n 'quantity': int(cart[item]),\n 'tax_rate': int(00),\n 'total_amount': int(product[1].price * cart[item] * 100),\n \"total_tax_amount\": 0\n })\n total += product[1].price * cart[item] * 100\n integer_total = int(total)\n if order_id:\n response = requests.get(\n settings.KLARNA_BASE_URL + '/checkout/v3/orders/' +\n order_id,\n auth=auth,\n headers=headers,\n )\n\n klarna_order = response.json()\n if klarna_order['order_lines'] == orderlines:\n context = {\n 'klarna_order': klarna_order\n\n }\n return render(request, 'checkout/checkout.html', context)\n else:\n body = {\n \"purchase_country\": \"se\",\n \"purchase_currency\": \"eur\",\n \"locale\": \"en-GB\",\n \"order_amount\": integer_total,\n \"order_tax_amount\": 0,\n \"order_lines\": orderlines,\n \"merchant_urls\": {\n \"terms\": \"https://beerbrewing-supply.herokuapp.com\" + \"/checkout/terms\",\n \"checkout\": \"https://beerbrewing-supply.herokuapp.com\" + \"/checkout/completed\",\n \"confirmation\": \"https://beerbrewing-supply.herokuapp.com\" + \"/checkout/completed\",\n \"push\": \"https://beerbrewing-supply.herokuapp.com\" + \"/orders/register_order?sid={checkout.order.id}\"\n },\n \"shipping_options\": [\n {\n \"id\": \"free_shipping\",\n \"name\": \"Free Shipping\",\n \"description\": \"Delivers in 5-7 days\",\n \"price\": 0,\n \"tax_amount\": 0,\n \"tax_rate\": 0,\n \"preselected\": True,\n \"shipping_method\": \"Home\"\n },\n {\n \"id\": \"pick_up_store\",\n \"name\": \"Pick up at closest store\",\n \"price\": 399,\n \"tax_amount\": 0,\n \"tax_rate\": 0,\n \"preselected\": False,\n \"shipping_method\": \"PickUpStore\"\n }\n ]\n }\n data = json.dumps(body)\n response = requests.post(\n settings.KLARNA_BASE_URL + '/checkout/v3/orders/' +\n order_id,\n auth=auth,\n headers=headers,\n data=data)\n\n klarna_order = response.json()\n context = {\n 'klarna_order': klarna_order\n }\n return render(request, 'checkout/checkout.html', context)\n else:\n cart = request.session.get('cart')\n total = 0\n orderlines = []\n for item in cart:\n product = utils.get_product(item)\n orderlines.append({\n 'name': product[1].name,\n 'product_id': product[1].id,\n 'unit_price': int(product[1].price * 100),\n 'quantity': int(cart[item]),\n 'tax_rate': int(00),\n 'total_amount': int(product[1].price * cart[item] * 100),\n \"total_tax_amount\": 0\n\n })\n total += product[1].price * cart[item] * 100\n\n integer_total = int(total)\n body = {\n \"purchase_country\": \"se\",\n \"purchase_currency\": \"eur\",\n \"locale\": \"en-GB\",\n \"order_amount\": integer_total,\n \"order_tax_amount\": 0,\n \"order_lines\": orderlines,\n \"merchant_urls\": {\n \"terms\": \"https://beerbrewing-supply.herokuapp.com\" + \"/checkout/terms\",\n \"checkout\": \"https://beerbrewing-supply.herokuapp.com\" + \"/checkout/completed\",\n \"confirmation\": \"https://beerbrewing-supply.herokuapp.com\" + \"/checkout/completed\",\n \"push\": \"https://beerbrewing-supply.herokuapp.com\" + \"/orders/register_order?sid={checkout.order.id}\"\n },\n \"shipping_options\": [\n {\n \"id\": \"free_shipping\",\n \"name\": \"Free Shipping\",\n \"description\": \"Delivers in 5-7 days\",\n \"price\": 0,\n \"tax_amount\": 0,\n \"tax_rate\": 0,\n \"preselected\": True,\n \"shipping_method\": \"Home\"\n },\n {\n \"id\": \"pick_up_store\",\n \"name\": \"Pick up at closest store\",\n \"price\": 399,\n \"tax_amount\": 0,\n \"tax_rate\": 0,\n \"preselected\": False,\n \"shipping_method\": \"PickUpStore\"\n }\n ]\n }\n data = json.dumps(body)\n response = requests.post(\n settings.KLARNA_BASE_URL + '/checkout/v3/orders',\n auth=auth,\n headers=headers,\n data=data)\n\n klarna_order = response.json()\n context = {\n 'klarna_order': klarna_order\n\n }\n order_id = klarna_order['order_id']\n request.session['order_id'] = order_id\n\n return render(request, 'checkout/checkout.html', context)", "def checked_classified(self, order):\n assert (order.get_status() is OrderStatus.Created)\n assert (order.direction is not Direction.Cancel)\n if order.exec_type not in [Exectype.Market, Exectype.Stop]:\n assert (order.price is not None)\n check_left = np.round(order.price / self.table.tick_size, self.tick_size_decimals)\n check_right = np.round(order.price / self.table.tick_size, self.tick_size_decimals)\n assert check_left == check_right\n # Check expiration\n order = self.checked_expired(order)\n if order.get_status() is OrderStatus.Expired:\n self.debug(\"Order expired: order.reject()\")\n order.reject(self.table.get_current_time())\n else:\n if not self.table.allow_duplicated_ids and order.m_orderId in self.queue_observer.get_order_ids():\n self.debug(\"Order implies duplicated id: order.reject()\")\n order.reject(self.table.get_current_time())\n else:\n self.debug(\"Order can be accepted: order.accept()\")\n order.accept(self.table.get_current_time())\n return order", "def order_w_order_id(order_id):\n # Megnyutjuk a kapcsolatot\n conn = get_db()\n try:\n # Keszitunk egy cursort\n cur = conn.cursor()\n try:\n # Ezt a parameteres SQL lekerdezest hajtjuk vegre, mellyel megkapjuk az adott\n # order_id-ju megrendelest.\n cur.execute('SELECT description, vehicle_type, quantity, origin, destination,' +\n ' order_date, deadline_date, comment_text FROM orders WHERE' +\n ' order_id = :order_id', order_id=order_id)\n # Ebben a valtozoban lesz az eredmenytabla egyetlen\n # sora (Biztosan 1 lesz, mert az order_id egyedi)\n result = cur.fetchone()\n # Ha nem talaltunk ilyen megrendelest, szolunk a felhasznalonak\n if result is None:\n abort(404)\n else:\n # 2. feladat - lekerdezzuk az adott orszag valutajat\n #\n # Az origin illetve destination mezokben megkeressuk az orszag betujelet\n # Ez mindig a string vegen, ( es ) jelek kozott allo 2 betu.\n # Mivel ezek nagybetuvel irodtak at kell konvertalnunk kisbeture.\n # Ezek futtatjuk a kerest, majd a kapott eredmenyt JSON formatumra parsoljuk.\n # Ebbol kiolvassuk a valuta erteket, amit majd atadunk a kimeneti mezonknek.\n origin001 = result[3]\n origin_len = len(origin001)\n origin_tmp = origin001[origin_len-3:origin_len-1]\n origin_url = \"http://rapid.eik.bme.hu:9080/currency_ws/currencies/\" + origin_tmp.lower() + \".json\"\n r1 = requests.get(origin_url)\n var1 = r1.json()\n origin_currency = var1['currency']\n \n destination001 = result[4]\n destination_len = len(destination001)\n destination_tmp = destination001[destination_len-3:destination_len-1]\n destination_url = \"http://rapid.eik.bme.hu:9080/currency_ws/currencies/\" + destination_tmp.lower() + \".json\"\n r2 = requests.get(destination_url)\n var2 = r2.json()\n destination_currency = var2['currency']\n # Visszaterunk a JSON formatumu dictionary-vel,\n # ami mindent a megfelelo formatumban tarol\n return jsonify({\"description\": result[0],\n \"vehicle_type\": result[1],\n \"quantity\": result[2],\n \"origin\": result[3],\n \"destination\": result[4],\n \"order_date\": result[5].date().isoformat(),\n \"deadline_date\": result[6].date().isoformat(),\n \"comment_text\": result[7],\n \"origin_currency\": origin_currency,\n\"destination_currency\": destination_currency})\n finally:\n cur.close()\n finally:\n conn.close()", "def cancel_a_parcel(id):\n query = \"\"\"UPDATE parcels SET status = %s WHERE id = %s\"\"\"\n tuple =('canceled' , id)\n db.insert(query, tuple)", "def on_trade_order_check(self, orderid, envtype, status):\n if is_cntrade_order_status_finish(status):\n self._obj_order_sub.del_val(orderid=orderid, envtype=envtype)\n elif (not self._obj_order_sub.has_val(orderid, envtype)) and self._obj_order_sub.has_val(u'', envtype):\n self._obj_order_sub.add_val(orderid, envtype) # record info for subscribe order u''", "def order_id(self, order_id):\n\n self._order_id = order_id", "def order_id(self, order_id):\n\n self._order_id = order_id", "def order_id(self, order_id):\n\n self._order_id = order_id", "def anche_cliente(self, request, pk=None):\n fornitore = get_object_or_404(Entita, pk=pk)\n fornitore.is_client = True\n fornitore.save()\n serializer = ClienteSerializer(fornitore)\n return Response(serializer.data, status=status.HTTP_200_OK)", "def activate(self, id):\n self.db.commit([\n 'UPDATE comments SET',\n ' mode=1',\n 'WHERE id=%s AND mode=2'], (id, ))", "async def on_order_updated(self, order: MetatraderOrder):\n for i in range(len(self._orders)):\n if self._orders[i]['id'] == order['id']:\n self._orders[i] = order\n break\n else:\n self._orders.append(order)", "def id_status_conta(self, id_status_conta):\n self._id_status_conta = id_status_conta", "def _update_status(self):\n self._db_update({'status': self.status})", "def descricao_status_cartao(self, descricao_status_cartao):\n self._descricao_status_cartao = descricao_status_cartao", "def update_order(request):\n order = request.session.get('order', {})\n quantity = int(request.POST.get('quantity'))\n sizeID = str(request.POST.get('sizeID'))\n colorID = str(request.POST.get('colorID'))\n productID = str(request.POST.get('productID'))\n order_item_identifier = productID + \" \" + colorID + \" \" + sizeID\n if quantity > 0:\n order[order_item_identifier] = quantity\n print(str(order_item_identifier))\n else:\n order.pop(order_item_identifier)\n request.session['order'] = order\n return redirect(reverse('orders'))", "def set_status(self, status):\n # TODO log to db\n self.status = status", "def make_active(self, request, queryset):\n queryset.update(is_active=True)", "def set_status(self, status):\n self.status = status" ]
[ "0.63635933", "0.586669", "0.5858225", "0.58000624", "0.56946445", "0.5581705", "0.55623066", "0.55381656", "0.5464021", "0.5398148", "0.53747994", "0.5373412", "0.53628695", "0.5324506", "0.5319295", "0.52873236", "0.5282698", "0.52594453", "0.5258599", "0.52334434", "0.52323323", "0.52323323", "0.5220004", "0.52061313", "0.5198367", "0.51858896", "0.51746446", "0.51692444", "0.51510894", "0.51437265", "0.514347", "0.5140636", "0.50827116", "0.50657225", "0.5064627", "0.50352085", "0.50342745", "0.5026687", "0.501803", "0.49929836", "0.49858505", "0.49701184", "0.4938751", "0.49357566", "0.4928911", "0.4917222", "0.49070767", "0.490578", "0.4902955", "0.4893624", "0.48909044", "0.48822966", "0.48702443", "0.4867332", "0.48661292", "0.484832", "0.48281193", "0.48269013", "0.4826091", "0.48227748", "0.48210022", "0.4811911", "0.48023936", "0.4793385", "0.4788168", "0.47870874", "0.47841012", "0.47823602", "0.47782773", "0.47704098", "0.47678402", "0.47604176", "0.4757403", "0.47571447", "0.47550118", "0.47519714", "0.47500423", "0.4749633", "0.4743691", "0.4740933", "0.47385022", "0.47365662", "0.47360435", "0.4734022", "0.47211516", "0.47147587", "0.47146857", "0.47130507", "0.47130507", "0.47130507", "0.4709882", "0.47089073", "0.47084042", "0.47025263", "0.47005513", "0.46946785", "0.4689134", "0.46858037", "0.4684825", "0.46612138" ]
0.7182565
0
chang the order's status to be "readytoserve" which is selected by the id of order
def order_ready(request): cs , status = CookStatus.objects.get_or_create(cook_name=request.user) if cs.current_order is not None: cs.current_order.status = 'ready-to-serve' cs.current_order.save() cs.current_order = None cs.save() return HttpResponseRedirect("/staff/cook_order_list/")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def update_order_status():\n symbol = App.config[\"symbol\"]\n\n # Get currently active order and id (if any)\n order = App.order\n order_id = order.get(\"orderId\", 0) if order else 0\n if not order_id:\n log.error(f\"Wrong state or use: check order status cannot find the order id.\")\n return None\n\n # -----\n # Retrieve order from the server\n try:\n new_order = App.client.get_order(symbol=symbol, orderId=order_id)\n except Exception as e:\n log.error(f\"Binance exception in 'get_order' {e}\")\n return\n\n # Impose and overwrite the new order information\n if new_order:\n order.update(new_order)\n else:\n return None\n\n # Now order[\"status\"] contains the latest status of the order\n return order[\"status\"]", "def order_update_status():\n result = order_obj.order_update_status(request.forms) \n return result", "def api_mark_order_ready(request, id):\n\n close_old_connections()\n \n # Not marking it as served if it isn't even ready yet.\n if not request.user.is_authenticated:\n return HttpResponseForbidden(\"You're not authenticated.\")\n \n # Get the order.\n order = Order.objects.get(id=id)\n \n # Mark the order as served and save it.\n order.ready = not order.ready\n order.save()\n\n close_old_connections()\n\n return HttpResponse('Marked as ready')", "def update_specific_order(self,status,order_id):\n self.query = \"UPDATE orders SET order_status=%s WHERE order_id=%s\"\n self.input = (status,order_id) #tuple to support indexing\n self.query_1 = \"SELECT order_id FROM orders ORDER BY order_id DESC LIMIT 1.\"\n self.query_2 = \"SELECT * FROM orders WHERE order_id=%s\"\n self.input_2 = (order_id,) #tuple to support indexing\n self.event = \"admin_update_specific_order\"\n self.error = \"Invalid order id\"\n self.message = \"Successfully updated the order.\"\n self.order_id = order_id\n self.db_error = None", "def mark_completed(self, order_id=None):\n self.status = \"paid\"\n if order_id and not self.order_id:\n self.order_id = order_id\n print(\"Order completed\")\n self.save()", "def change_status(id):\n query = \"\"\"UPDATE parcels SET status = %s WHERE id = %s\"\"\"\n tuple =('delivered' , id)\n db.insert(query, tuple)", "def status(self, id):", "def test_manager_change_order_status(self):\n self.client.force_authenticate(self.user)\n cancel = \"CA\"\n url = reverse('order-set_status', args=[self.order.id])\n resp = self.client.patch(url, data={\n \"status\": cancel\n })\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n new_status = Order.objects.values(\"status\").get(pk=self.order.id)\n self.assertEqual(new_status[\"status\"], cancel)\n\n with self.subTest('customer can not change order status'):\n self.user.role = get_user_model().CUSTOMER\n self.client.force_authenticate(self.user)\n resp = self.client.patch(url, data={\n \"status\": cancel\n })\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)", "def test_update_order(self):\n response = self.api_test_client.put('{}/orders/1'.format(\n self.BASE_URL), json={'order_status': 'accepted'})\n\n self.assertEqual(response.status_code, 201)\n self.assertTrue(\n response_as_json(response)['order']['status_updated_on'])\n self.assertEqual(\n response_as_json(response)['order']['order_status'], 'accepted')", "def update_order(self, order):\n order.order_id = self.order_id\n order.average_price = self.avg_execution_price\n order.symbol = self.symbol\n order.side = self.side\n order.type = self.order_type\n order.amount = self.original_amount\n order.price = self.price\n order.filled = self.executed_amount\n order.remaining = self.remaining_amount\n if self.is_cancelled:\n order.status = exchanges.Order.Status.CANCELLED\n elif self.is_live:\n order.status = exchanges.Order.Status.OPEN\n else:\n order.status = exchanges.Order.Status.CLOSED\n return order", "def set_add_dispute_status(self, status):\n self.single_selection_from_kendo_dropdown(self.add_dispute_status_kendo_dropdown_locator, status)\n self.wait_for_ajax_spinner_load()", "def test_admin_change_order_status(self):\n # Test unregistered id\n # Correct format but not there\n response = self.client.put(\n 'api/v1/parcels/35420', headers=self.admin_token_dict)\n data = json.loads(response.data)\n self.assertEqual(response.status_code, 400)\n self.assertEqual(\n data, {'message': 'No Parcel delivery order with that id'})\n # Test invalid format id\n response = self.client.put(\n 'api/v1/parcels/35uh420', headers=self.admin_token_dict) # Incorrect id format\n data = json.loads(response.data)\n self.assertEqual(response.status_code, 400)\n self.assertEqual(data, {'message': 'Wrong id format'})", "def put(self):\n request_data = json.loads(request.data)\n print(request_data)\n order_id = request_data['order_id']\n status = request_data['status']\n MM.update_order_status(ObjectId(order_id), status)\n return {\"message\": \"Order Status Updated\"}, 200", "def set_order_status(self, status, orderid=0, envtype=0):\n if int(status) not in TRADE_CN.REV_ORDER_STATUS:\n error_str = ERROR_STR_PREFIX + \"the type of status is wrong \"\n return RET_ERROR, error_str\n\n if not TRADE_CN.check_envtype_cn(envtype):\n error_str = ERROR_STR_PREFIX + \"the type of environment param is wrong \"\n return RET_ERROR, error_str\n\n query_processor = self._get_sync_query_processor(SetOrderStatusCN.cn_pack_req,\n SetOrderStatusCN.cn_unpack_rsp)\n\n # the keys of kargs should be corresponding to the actual function arguments\n kargs = {'cookie': str(self.cookie), 'envtype': str(envtype), 'localid': str(0),\n 'orderid': str(orderid), 'status': str(status)}\n\n ret_code, msg, set_order_list = query_processor(**kargs)\n if ret_code != RET_OK:\n return RET_ERROR, msg\n\n col_list = ['envtype', 'orderID']\n set_order_table = pd.DataFrame(set_order_list, columns=col_list)\n\n return RET_OK, set_order_table", "def UpdateStatus(self,pid):\n\t\tb1=Rents.objects.filter(paymentid_id=pid).first()\n\t\tamount=Payment.objects.filter(paymentid=pid).values('amount')\n\t\tb=b1.__dict__\n\t\tquant=b['quantity']\n\t\tbookid=b['bookid_id']\n\t\tprice=amount[0]['amount']/quant\n\t\t#price=float(\"{.2f}\".format(amount[0]['amount']))/float(\"{0:.2f}\".format(quant))\n\t\tRents.objects.filter(paymentid_id=pid).update(status='r')\n\t\tBook.objects.filter(bookid=bookid).update(quantity=F('quantity')+quant)\n\t\tStatus.objects.filter(ISBN=b['ISBN'],rentprice=price).update(quantity=F('quantity')+quant)\n\t\tUpload.objects.filter(owner_id_id=b['owner_id_id'],sellprice=price).update(qtyavailable=F('qtyavailable')+quant)\n\t\tself.notifyBuyer(b['ISBN'])", "def change_status(self, status, application_id):", "def change_status(self):\n if self.status == 'in progress':\n self.status = 'done'\n return self.status\n elif self.status == 'done':\n self.status = 'in progress'\n self.eisenhower_priority()\n return self.status", "def update_order():", "def update_order():", "def api_mark_order_served(request, id):\n\n close_old_connections()\n \n # Not allow unauthenticated users.\n if not request.user.is_authenticated:\n return HttpResponseForbidden(\"You're not authenticated.\")\n \n # Get the order.\n order = Order.objects.get(id=id)\n \n # Not marking it as served if it isn't even ready yet.\n if not order.ready:\n return HttpResponseForbidden(\"This order is not yet ready!\")\n \n # Mark it as served and save it.\n order.served = not order.served\n order.save()\n\n close_old_connections()\n\n return HttpResponse('Marked as served')", "def test_change_order_status_when_order_does_not_exist(self):\n response = self.api_test_client.put('{}/orders/1000'.format(\n self.BASE_URL), json={'order_status': 'accepted'})\n\n self.assertEqual(response.status_code, 404)\n self.assertEqual(response_as_json(\n response)['message'],\n 'Order with id 1000 not found')", "def _update_status(self):\n self._db_update({'status': self.status})", "def set_order_done():\n data = select_data_source()\n user = data['user']\n order_id = data['id']\n \n if check_user_permission(user) : return permission_denied_return\n \n db = database.getdb()\n \n ### Check if is valid.\n \n cmd = 'select passed from orders where id==\"{0}\"'.format(order_id)\n order_valid = db.execute(cmd).fetchall()[0][0]\n if order_valid == 0 :\n return finish_invalid_return\n \n ### Check if is done.\n cmd = 'select done from orders where id==\"{0}\"'.format(order_id)\n order_done = db.execute(cmd).fetchall()[0][0]\n if order_done != 0 :\n return finish_done_return\n \n ### All check done.\n ### Set it to done.\n cmd = 'update orders set done=1 where id==\"{0}\"'.format(order_id)\n db.execute(cmd)\n db.commit()\n print('user sets order {0} to be done.'.format(user))\n \n return finish_complete_return", "def test_order_update_status_function(self):\n order = OrderInfo.objects.create(user=self.create_test_user())\n self.assertIsInstance(order.ordered, datetime)\n self.assertIsNone(order.cooked)\n self.assertIsNone(order.delivered)\n\n order.update_current_state()\n self.assertIsInstance(order.cooked, datetime)\n self.assertIsNone(order.delivered)\n\n order.update_current_state()\n self.assertIsInstance(order.cooked, datetime)\n self.assertIsInstance(order.delivered, datetime)", "def mark_ready_for_review(self, user: User) -> None:\n from .exceptions import OperationForbiddenError, OrderEmptyError\n\n # If order is not in the \"CREATED\" state, raise an\n # OperationForbiddenError\n if not self.is_created:\n raise OperationForbiddenError(\n self.STATE_CHANGE_FORBIDDEN_ERROR_MSG % {\n 'current_state': Order.OrderState.get_choice_display(\n self.state\n ),\n 'new_state': Order.OrderState.PENDING.choice_display\n }\n )\n\n # If the order's item list is empty, raise an OrderEmptyError\n if not self.orderitem_set.exists():\n raise OrderEmptyError(\n self,\n 'An order should contain at least one Order item before it '\n 'can be marked as \"PENDING\".'\n )\n\n # Update the order to \"PENDING\" state\n self.update(user, state=Order.OrderState.PENDING.choice_value)", "def request_order_status_request(self, req):\n assert req.MsgType == Fix.Tags.MsgType.Values.ORDERSTATUSREQUEST, \\\n \"Order request is not ORDERSTATUSREQUEST\"\n\n params = { \"filter\": \"{\\\"orderID\\\": \\\"%s\\\"}\" % req.OrderID.value }\n return self.api_connector.send_request(\"Order\", RestApiConnector.HTTPMethod.GET, params)", "def finish_order(self, order_id):\n request_name = \"get_order_info\"\n\n orders = self.make_request(request_name, url_id=order_id)\n if orders is None:\n print(\"Unsuccessful updating order\")\n return\n order = orders[0]\n update_dict = dict()\n for key in order:\n if str(key)[0] == \"_\":\n continue\n try:\n update_dict[key.encode('utf-8')] = order[key].encode('utf-8')\n except AttributeError:\n update_dict[key.encode('utf-8')] = order[key]\n\n update_dict['status'] = 'Complete'\n resp = self.make_request('set_inventory_order', url_id=order_id, arguments=update_dict)", "def fill_order(self, order: Order) -> None:\n order = self.get_order_by_id(order.id)\n order.status = OrderStatus.FILL", "async def on_order_completed(self, order_id: str):\n self._orders = list(filter(lambda order: order['id'] != order_id, self._orders))", "def updateStatus(self, status):\n pass", "async def on_order_updated(self, order: MetatraderOrder):\n for i in range(len(self._orders)):\n if self._orders[i]['id'] == order['id']:\n self._orders[i] = order\n break\n else:\n self._orders.append(order)", "def cook_order(request):\n\torder_id = request.GET.get('order_id', 0)\n\tcs , status = CookStatus.objects.get_or_create(cook_name=request.user)\n\n\tif cs.current_order is None:\n\t\tcs.current_order = Order.objects.get(id=order_id)\n\t\tcs.current_order.status = 'cooking'\n\t\tcs.current_order.tikchen = request.user.username\n\t\tcs.current_order.save()\n\t\tcs.save()\n\n\treturn HttpResponseRedirect(\"/staff/cook_order_list/\")", "def __order_status(self):\n log.debug(\"Displaying __order_status\")\n # Find the latest orders\n orders = self.session.query(db.Order) \\\n .filter(db.Order.user == self.user) \\\n .order_by(db.Order.creation_date.desc()) \\\n .limit(20) \\\n .all()\n # Ensure there is at least one order to display\n if len(orders) == 0:\n self.bot.send_message(self.chat.id, self.loc.get(\"error_no_orders\"))\n # Display the order status to the user\n for order in orders:\n self.bot.send_message(self.chat.id, order.text(w=self, session=self.session, user=True))\n # TODO: maybe add a page displayer instead of showing the latest 5 orders", "def prepare_order(self, index, order_status):\n if(self.running_qty > 0 and index > 0):\n quantity = self.running_qty\n price = self.get_price_offset3(index)\n elif(self.running_qty < 0 and index < 0):\n quantity = abs(self.running_qty)\n price = self.get_price_offset3(index)\n else:\n quantity = self.ORDER_START_SIZE // 4\n price = self.get_price_offset2(index)\n if (price == None):\n return None\n else:\n return {'price': price, 'orderQty': quantity, 'side': \"Buy\" if index < 0 else \"Sell\"}", "def set_status(self, scenario_id, status):\n self.cur.execute(\n \"UPDATE execute_list SET status = %s WHERE id = %s\",\n (status, scenario_id),\n )", "def _order_cannot_be_updated_if_not_pending(order_status):\n pecan.abort(400, u._(\"Only PENDING orders can be updated. Order is in the\"\n \"{0} state.\").format(order_status))", "def UpdateStatus(self, status):\r\n self.status.update(status)", "def update_status(request_id, status):\n pass", "def _set_status(self, status):\n with self.status_lock:\n if (status in _ENDING_STATUSES) or (not self.status in _ENDING_STATUSES):\n self.status = status", "def set_status(self, status):\n # TODO log to db\n self.status = status", "def reject_order(self, order: Order) -> None:\n order = self.get_order_by_id(order.id)\n order.status = OrderStatus.REJECT", "def update_status(self, id, status):\n sql = f\"UPDATE incidences SET status = \\'{status}\\'\\\n WHERE incidences.id = {id}\"\n conn = Db().con\n curr = conn.cursor()\n curr.execute(sql)\n conn.commit()", "def _status_btn_clicked(root, item):\n sql_status_update = 'UPDATE job SET Job_Status = \"Complete\" WHERE Job_ID = '+str(item[0])+';'\n print (sql_status_update)\n conn = pymysql.connect(host='localhost', user='root', password='#######', db='######')\n a = conn.cursor()\n a.execute(sql_status_update)\n conn.commit()\n a.close()\n conn.close()", "def item_status(item_id):\n\n item_completed = request.form.get(\"item_completed\", \"off\")\n list_id = request.form[\"list_id\"]\n\n item_completed = item_completed == \"on\"\n\n to_do_item = ToDoItem.query.get(item_id)\n to_do_item.completed = item_completed\n db.session.commit()\n\n return redirect(f\"/lists/{list_id}\")", "def set_status(self, status):\n self.status = status", "def set_status(self, status):\n self.status = status", "def set_status(self, status):\n self.status = status", "async def update_trade_status():\n # GET /api/v3/openOrders - get current open orders\n # GET /api/v3/allOrders - get all orders: active, canceled, or filled\n\n symbol = App.config[\"symbol\"]\n\n # -----\n try:\n open_orders = App.client.get_open_orders(symbol=symbol) # By \"open\" orders they probably mean \"NEW\" or \"PARTIALLY_FILLED\"\n # orders = App.client.get_all_orders(symbol=symbol, limit=10)\n except Exception as e:\n log.error(f\"Binance exception in 'get_open_orders' {e}\")\n return\n\n if not open_orders:\n # -----\n await update_account_balance()\n\n last_kline = App.analyzer.get_last_kline(symbol)\n last_close_price = to_decimal(last_kline[4]) # Close price of kline has index 4 in the list\n\n base_quantity = App.base_quantity # BTC\n btc_assets_in_usd = base_quantity * last_close_price # Cost of available BTC in USD\n\n usd_assets = App.quote_quantity # USD\n\n if usd_assets >= btc_assets_in_usd:\n App.status = \"SOLD\"\n else:\n App.status = \"BOUGHT\"\n\n elif len(open_orders) == 1:\n order = open_orders[0]\n if order.get(\"side\") == SIDE_SELL:\n App.status = \"SELLING\"\n elif order.get(\"side\") == SIDE_BUY:\n App.status = \"BUYING\"\n else:\n log.error(f\"Neither SELL nor BUY side of the order {order}.\")\n return None\n\n else: # Many orders\n log.error(f\"Wrong state. More than one open order. Fix manually.\")\n return None", "def test_updating_item_status(self):\n #test original quantity\n self.assertEqual(self.supply1.quantity, 10)\n self.assertEqual(self.supply2.quantity, 10)\n \n modified_po = copy.deepcopy(base_purchase_order)\n modified_po['status'] = 'Received'\n modified_po['items'][0]['id'] = 1\n modified_po['items'][0]['status'] = 'Receieved'\n \n resp = self.client.put('/api/v1/purchase-order/1/',\n format='json',\n data=modified_po)\n \n self.assertEqual(resp.status_code, 200, msg=resp)\n \n po = resp.data\n \n self.assertEqual(Supply.objects.get(pk=1).quantity, 20)", "def put(self, order_id):\n body = request.get_json()\n order = db.session.query(models.Order).filter_by(id=order_id).first()\n if order is None:\n return 'Order id not found', 400\n borrower = body.get('borrower')\n borrower = query_user_by_name(borrower)\n if borrower is None:\n return 'User does not exit in the system', 404\n # if invalid_user(borrower.username):\n # return 'Unauthorized user, please login as a user/borrower', 401\n copy_id = body.get('copy_id')\n print(body)\n print(copy_id)\n copy = db.session.query(models.Copy).filter_by(id=copy_id).first()\n if copy is None:\n return 'Copy ID {} not found in system'.format(copy_id), 409\n elif copy.id != copy_id and copy.status == BOOK_COPY_STATUS_UNAVAILABLE:\n return 'The copy of the book is not available', 400\n copy_owner = body.get('copy_owner')\n owner = query_user_by_name(copy_owner)\n if owner is None:\n return 'Copy owner not found in the system'.format(copy_owner), 409\n # return_date = body.get('return_date')\n # if datetime.strptime(return_date, \"%y%m%d\") < datetime.strptime(datetime.utcnow().strftime(\"%Y-%m-%d\"), \"%y%m%d\"):\n # return 'Return date should be later than today', 400\n status = body.get('order_status')\n if status is not None and status < 0 or status > 4:\n return 'Status should between 0-4', 400\n order.parse_body_status(body)\n copy = db.session.query(models.Copy).filter_by(id=order.copy).first()\n if order.status == ORDER_STATUS_COMPLETED or order.status == ORDER_STATUS_DECLINED:\n copy.status = BOOK_COPY_STATUS_AVAILABLE\n else:\n copy.status = BOOK_COPY_STATUS_UNAVAILABLE\n db.session.commit()\n return order.serialize(), 200", "def id_status(self, id_status):\n self._id_status = id_status", "def mark_refunded(self):\n order = self.clone()\n order.status = Order.STATUS_REFUNDED\n order.save()\n return order", "def set_vendor_price_list_status(self, status_items):\n self.multiple_items_selection_from_kendo_dropdown(self.vendor_price_list_status_kendo_dropdown_locator, status_items)\n self.wait_for_ajax_spinner_load()", "def onOrderSelected(self, item):\n if self.lstOrders.getMultiSelectedItems() == []:\n self.clearBidData()\n else:\n self.btnCancelOrder.enable()", "def received_order(self, order):\n\t\tif order.direction == ORDERDIR.IN:\n\t\t\tself.set_button_light(order.floor, OUTPUT.IN_LIGHTS, 1)\n\t\telse:\n\t\t\tself.startedOrderQueue.put(order)\n\t\tself.orderQueue.add_order(order)\n\t\tself.update_and_send_elevator_info()\n\t\tself.should_drive()", "def on_trade_order_check(self, orderid, envtype, status):\n if is_cntrade_order_status_finish(status):\n self._obj_order_sub.del_val(orderid=orderid, envtype=envtype)\n elif (not self._obj_order_sub.has_val(orderid, envtype)) and self._obj_order_sub.has_val(u'', envtype):\n self._obj_order_sub.add_val(orderid, envtype) # record info for subscribe order u''", "def onSelected(self, item):\n if not item:\n self.clearBidData()\n else:\n self.enableAddOrder()", "def change_approval(self, status):\r\n if status == 'approve':\r\n return self.approve()\r\n elif status == 'disapprove':\r\n return self.disapprove()", "def status(self, status):\n self._status = status", "def status(self, status):\n self._status = status", "def status(self, status):\n self._status = status", "def status(self, status):\n self._status = status", "def status(self, status):\n self._status = status", "def status(self, status):\n self._status = status", "def status(self, status):\n self._status = status", "def set_OrderStatus(self, value):\n super(ListOrdersInputSet, self)._set_input('OrderStatus', value)", "def _set_status(self):\n result = self._get_status()\n if result and result[0]['state'] == 'aborted':\n raise Exception(\"Aborted because the status flag is set to 'aborted' in dynamodb\")\n\n # record the status\n self.status['timestamp'] = time.strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n self.db_handler.update_item({'api_version': TsV2CatalogHandler.api_version}, self.status)", "def SetStatus(self, status):\r\n self.status = status", "def change_status():\n if self.on:\n connect.SOCKET.sendall(bytes(\"OFF\\n\", \"utf-8\"))\n self.on = False\n else:\n connect.SOCKET.sendall(bytes(\"ON\\n\", \"utf-8\"))\n self.on = True", "def slot_user_order(self, dummy_sender, data):\r\n (price, volume, typ, oid, status) = data\r\n found = False\r\n removed = False # was the order removed?\r\n opened = False # did the order change from 'post-pending' to 'open'\"?\r\n voldiff = 0 # did the order volume change (full or partial fill)\r\n if \"executing\" in status:\r\n # don't need this status at all\r\n return\r\n if \"post-pending\" in status:\r\n # don't need this status at all\r\n return\r\n if \"removed\" in status:\r\n for i in range(len(self.owns)):\r\n if self.owns[i].oid == oid:\r\n order = self.owns[i]\r\n\r\n # work around MtGox strangeness:\r\n # for some reason it will send a \"completed_passive\"\r\n # immediately followed by a \"completed_active\" when a\r\n # market order is filled and removed. Since \"completed_passive\"\r\n # is meant for limit orders only we will just completely\r\n # IGNORE all \"completed_passive\" if it affects a market order,\r\n # there WILL follow a \"completed_active\" immediately after.\r\n if order.price == 0:\r\n if \"passive\" in status:\r\n # ignore it, the correct one with\r\n # \"active\" will follow soon\r\n return\r\n\r\n self.debug(\r\n \"### removing order %s \" % oid,\r\n \"price:\", self.gox.quote2str(order.price),\r\n \"type:\", order.typ)\r\n\r\n # remove it from owns...\r\n self.owns.pop(i)\r\n\r\n # ...and update own volume cache in the bids or asks\r\n self._update_level_own_volume(\r\n order.typ,\r\n order.price,\r\n self.get_own_volume_at(order.price, order.typ)\r\n )\r\n removed = True\r\n break\r\n else:\r\n for order in self.owns:\r\n if order.oid == oid:\r\n found = True\r\n self.debug(\r\n \"### updating order %s \" % oid,\r\n \"volume:\", self.gox.base2str(volume),\r\n \"status:\", status)\r\n voldiff = volume - order.volume\r\n opened = (order.status != \"open\" and status == \"open\")\r\n order.volume = volume\r\n order.status = status\r\n break\r\n\r\n if not found:\r\n # This can happen if we added the order with a different\r\n # application or the gox server sent the user_order message\r\n # before the reply to \"order/add\" (this can happen because\r\n # actually there is no guarantee which one arrives first).\r\n # We will treat this like a reply to \"order/add\"\r\n self.add_own(Order(price, volume, typ, oid, status))\r\n\r\n # The add_own() method has handled everything that was needed\r\n # for new orders and also emitted all signals already, we\r\n # can immediately return here because the job is done.\r\n return\r\n\r\n # update level own volume cache\r\n self._update_level_own_volume(\r\n typ, price, self.get_own_volume_at(price, typ))\r\n\r\n # We try to help the strategy with tracking the orders as good\r\n # as we can by sending different signals for different events.\r\n if removed:\r\n reason = self.gox.msg[\"user_order\"][\"reason\"]\r\n self.signal_own_removed(self, (order, reason))\r\n if opened:\r\n self.signal_own_opened(self, (order))\r\n if voldiff:\r\n self.signal_own_volume(self, (order, voldiff))\r\n self.signal_changed(self, None)\r\n self.signal_owns_changed(self, None)", "def change_status(self):\n if self.status == \"Still Loaned\":\n self.status = \"Given Back\"\n else:\n self.status = \"Still Loaned\"", "def update_status(request):\n tasklist = request.GET.get(\"tasklist\")\n pk = request.GET.get(\"pk\")\n status = request.GET.get(\"status\")\n qs = Todo.objects.get(pk=pk)\n qs.status = status\n if status == \"Done\":\n qs.close()\n elif status == \"Undone\":\n qs.reopen()\n elif status == \"In-Progress\":\n qs.in_progress()\n qs.save()\n return redirect(\"tasks\", tasklist=tasklist)", "def change_status(self, status):\n for option in status:\n response = requests.patch(self.base_url + 'settings', headers=self.headers, json={option: status.get(option)})\n print(f'[Mudando {option}] {response.status_code} - {response.ok}')", "def status_update(request, id=None):\n #obj = Todo.objects.all()\n user = request.user if request.user.is_authenticated else None\n Todo.objects.filter(id=id).update(mark_done=True, answered_by= user)\n return redirect('lists:alllist')", "async def status(self, ctx:utils.Context, status:str):\n\n status_o = getattr(discord.Status, status.lower())\n await self.bot.change_presence(activity=self.bot.guilds[0].me.activity, status=status_o)", "def set_status(self, status):\n if not status == self._status:\n self._status = status\n self.winstance.send_event('State changed to ' + self._status)\n\n self.completed = not self.parent_node.is_job or \\\n self._status == 'COMPLETED'\n\n if self.completed:\n self.publish()\n\n if not self.parent_node.is_job:\n self.failed = False\n else:\n self.failed = self.parent_node.is_job and \\\n (self._status == 'BOOT_FAIL' or\n self._status == 'CANCELLED' or\n self._status == 'FAILED' or\n self._status == 'REVOKED' or\n self._status == 'TIMEOUT')", "async def change_status(self, status: str) -> int:\n data = {'status': str(status)}\n r = await self.request.request(url='https://www.roblox.com/home/updatestatus', method='POST', data=j.dumps(data))\n return r.status_code", "def setComicStatus(self, handles, status):\n db.execute(\"update comics set status=%s where handle in ('\" + \"', '\".join(handles) + \"')\", status)", "def update(self, instance, validated_data):\n\n # If an order is cancelled or delivered, it cannot be modified.\n if instance.status == CANCELLED or instance.status == DELIVERED:\n raise exceptions.PermissionDenied('This order cannot be modified.')\n\n # If an order is already confirmed but UI/agent sends another confirmation request by mistake,\n # we deny it as each confirmation is a big operation that includes generating invoices/ledger entries.\n if instance.status == validated_data['status'] == CONFIRMED:\n raise exceptions.PermissionDenied('This order is already confirmed.')\n\n if instance.status == ACCEPTED and validated_data['status'] == CONFIRMED:\n # 1. Transition: accepted -> confirmed\n instance.status = validated_data.get('status')\n elif instance.status == CONFIRMED and validated_data['status'] in [CANCELLED, DELIVERED]:\n # 2. Transition: confirmed -> cancelled/delivered and return\n instance.status = validated_data.get('status')\n instance.save(update_fields=['status'])\n return instance\n else:\n # In case of any invalid transition, reject it.\n raise exceptions.PermissionDenied('There seems to be some discrepancy. Please contact your agent.')\n\n # Get exclusive lock on all relevant data rows\n orderlines = instance.orderlines.select_for_update().select_related('product').all()\n\n # Do order and product update in a single transaction\n with transaction.atomic():\n\n # Validate that order can be approved.\n self._validate_units_and_balance_in_orderlines(orderlines, instance.user)\n\n for orderline in orderlines:\n\n # Decrement product stock count by orderline(buying) requirement\n product = orderline.product\n product.units = F('units') - orderline.units\n product.save(update_fields=['units'])\n\n # Lock current standing price into the orderline, calculate sub total and lock it.\n product_price = product.price\n orderline.confirmed_price = product_price\n orderline.locked = CONFIRMED\n orderline.sub_total = product_price * F('units')\n orderline.save(update_fields=['confirmed_price', 'locked', 'sub_total'])\n\n # Mark order as confirmed.\n instance.save(update_fields=['status'])\n return instance", "def set_status(self, new_status):\n if new_status == self.status:\n return\n\n old_status = self.status\n\n if new_status not in self.available_statuses():\n raise exceptions.InvalidOrderStatus(\n _(\n \"'%(new_status)s' is not a valid status for order %(number)s\"\n \" (current status: '%(status)s')\"\n )\n % {\n \"new_status\": new_status,\n \"number\": self.number,\n \"status\": self.status,\n }\n )\n self.status = new_status\n if new_status in self.cascade:\n new_line_status = self.cascade[new_status]\n for line in self.lines.all():\n if new_line_status in line.available_statuses():\n line.status = new_line_status\n line.save()\n self.save()\n\n # Send signal for handling status changed\n order_status_changed.send(\n sender=self,\n order=self,\n old_status=old_status,\n new_status=new_status,\n )\n\n self._create_order_status_change(old_status, new_status)", "async def _check_order_update(self, *args, **kwargs):\n order_nos = list(self._orders.keys())\n if not order_nos:\n return\n for order_no in order_nos:\n success, error = await self._rest_api.get_order_status(order_no)\n if error:\n return\n await self._update_order(success[\"data\"][0])", "def on_order(self, order: OrderData):\n # print(\"on_order\")\n # print(order)\n pass", "async def get_order_status(self, symbol, order_id, client_order_id):\n params = {\n \"symbol\": symbol,\n \"orderId\": str(order_id),\n \"origClientOrderId\": client_order_id,\n \"timestamp\": tools.get_cur_timestamp_ms()\n }\n success, error = await self.request(\"GET\", \"/api/v3/order\", params=params, auth=True)\n return success, error", "def _create_order(self, order_status):\n if order_status['is_cancelled']:\n status = ORDER_STATUS.CANCELLED\n elif not order_status['is_live']:\n log.info('found executed order {}'.format(order_status))\n status = ORDER_STATUS.FILLED\n else:\n status = ORDER_STATUS.OPEN\n\n amount = float(order_status['original_amount'])\n filled = float(order_status['executed_amount'])\n\n if order_status['side'] == 'sell':\n amount = -amount\n filled = -filled\n\n price = float(order_status['price'])\n order_type = order_status['type']\n\n stop_price = None\n limit_price = None\n\n # TODO: is this comprehensive enough?\n if order_type.endswith('limit'):\n limit_price = price\n elif order_type.endswith('stop'):\n stop_price = price\n\n executed_price = float(order_status['avg_execution_price'])\n\n # TODO: bitfinex does not specify comission. I could calculate it but not sure if it's worth it.\n commission = None\n\n date = pd.Timestamp.utcfromtimestamp(float(order_status['timestamp']))\n date = pytz.utc.localize(date)\n order = Order(\n dt=date,\n asset=self.assets[order_status['symbol']],\n amount=amount,\n stop=stop_price,\n limit=limit_price,\n filled=filled,\n id=str(order_status['id']),\n commission=commission\n )\n order.status = status\n\n return order, executed_price", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status" ]
[ "0.67920375", "0.670818", "0.66758204", "0.6641249", "0.65196955", "0.6437769", "0.6087314", "0.6031311", "0.5999321", "0.59882295", "0.5978718", "0.5966084", "0.59375423", "0.59296644", "0.59227234", "0.5900806", "0.58614755", "0.5841006", "0.5841006", "0.58058894", "0.57958835", "0.5776419", "0.57573617", "0.5730916", "0.57189417", "0.57016826", "0.56785095", "0.5673114", "0.5636598", "0.5635252", "0.5634406", "0.56208634", "0.5580037", "0.55799913", "0.55757123", "0.5568158", "0.5561071", "0.5548472", "0.5542574", "0.55308735", "0.55049044", "0.5498065", "0.5491699", "0.5483908", "0.5475187", "0.5475187", "0.5475187", "0.54624295", "0.54574764", "0.5454165", "0.54515916", "0.5447942", "0.54468596", "0.5443191", "0.54423183", "0.54316443", "0.5424822", "0.5418616", "0.54159707", "0.54159707", "0.54159707", "0.54159707", "0.54159707", "0.54159707", "0.54159707", "0.5405097", "0.5397041", "0.53817475", "0.5344476", "0.534347", "0.53364384", "0.5319629", "0.5316878", "0.5316218", "0.5311027", "0.5302227", "0.5301443", "0.529895", "0.52963144", "0.52821606", "0.5282012", "0.5281435", "0.5281128", "0.52640784", "0.5255918", "0.5255918", "0.5255918", "0.5255918", "0.5255918", "0.5255918", "0.5255918", "0.5255918", "0.5255918", "0.5255918", "0.5255918", "0.5255918", "0.5255918", "0.5255918", "0.5255918", "0.5255918" ]
0.68117625
0
Format trajectory into a list of tuples before they are stored in memory. Trajectory is list of (s,a,r,s,d) tuples
def formatTrajectory(self, trajectory): return self.RLModel.formatTrajectory(trajectory)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_trajectory(path: str) -> Optional[List[Dict[str, tuple]]]:\n lines = _get_lines_from_file(path)\n\n ess_file = False\n if path.split('.')[-1] != 'xyz':\n try:\n log = ess_factory(fullpath=path, check_for_errors=False)\n ess_file = True\n except (InputError, RMGInputError):\n ess_file = False\n\n if ess_file:\n if not isinstance(log, GaussianLog):\n raise NotImplementedError(f'Currently parse_trajectory only supports Gaussian files, got {type(log)}')\n traj = list()\n done = False\n i = 0\n while not done:\n if i >= len(lines) or 'Normal termination of Gaussian' in lines[i] or 'Error termination via' in lines[i]:\n done = True\n elif 'Input orientation:' in lines[i]:\n i += 5\n xyz_str = ''\n while len(lines) and '--------------------------------------------' not in lines[i]:\n splits = lines[i].split()\n xyz_str += f'{qcel.periodictable.to_E(int(splits[1]))} {splits[3]} {splits[4]} {splits[5]}\\n'\n i += 1\n traj.append(str_to_xyz(xyz_str))\n i += 1\n\n else:\n # this is not an ESS output file, probably an XYZ format file with several Cartesian coordinates\n skip_line = False\n num_of_atoms = 0\n traj, xyz_lines = list(), list()\n for line in lines:\n splits = line.strip().split()\n if len(splits) == 1 and all([c.isdigit() for c in splits[0]]):\n if len(xyz_lines):\n if len(xyz_lines) != num_of_atoms:\n raise ParserError(f'Could not parse trajectory, expected {num_of_atoms} atoms, '\n f'but got {len(xyz_lines)} for point {len(traj) + 1} in the trajectory.')\n traj.append(str_to_xyz(''.join([xyz_line for xyz_line in xyz_lines])))\n num_of_atoms = int(splits[0])\n skip_line = True\n xyz_lines = list()\n elif skip_line:\n # skip the comment line\n skip_line = False\n continue\n else:\n xyz_lines.append(line)\n\n if len(xyz_lines):\n # add the last point in the trajectory\n if len(xyz_lines) != num_of_atoms:\n raise ParserError(f'Could not parse trajectory, expected {num_of_atoms} atoms, '\n f'but got {len(xyz_lines)} for point {len(traj) + 1} in the trajectory.')\n traj.append(str_to_xyz(''.join([xyz_line for xyz_line in xyz_lines])))\n\n if not len(traj):\n logger.error(f'Could not parse trajectory from {path}')\n return None\n return traj", "def simulation_to_lines(data: List(Float))->List(Tuple(Int, Float)):\n result = []\n counter = 0\n for payoff in data:\n result = result + [(counter, payoff)]\n counter+=1\n return result\n\n #print(str(result))", "def trajectory(self):\n return Trajectory.createFromTuples(self.points)", "def _print_tisserand_lists(self, Trajectory=[]):\n\t\n\timport numpy as np\n\t\n\tn = len(Trajectory);\n\trpl = [];\n\tral = [];\n\tpl = [];\n\tvinfl = [];\n\tfor i in range(n):\n\t\tral.append(Trajectory[i][6]);\n\t\trpl.append(Trajectory[i][5]);\n\t\tpl.append(Trajectory[i][7]);\n\t\tvinfl.append(Trajectory[i][8]);\n\t\n\tprint 'list_ra_python = [',\n\tn = len(ral);\n\tfor i in range(n-1):\n\t\tprint '%f, ' % ral[i],\n\tprint '%f];' % ral[n-1];\n\t\n\tprint 'list_rp_python = [',\n\tn = len(rpl);\n\tfor i in range(n-1):\n\t\tprint '%f, ' % rpl[i],\n\tprint '%f];' % rpl[n-1];\n\t\n\tprint 'list_period_python = [',\n\tn = len(pl);\n\tfor i in range(n-1):\n\t\tprint '%f, ' % pl[i],\n\tprint '%f];' % pl[n-1];\n\t\n\tprint 'list_vinf_python = [',\n\tn = len(vinfl);\n\tfor i in range(n-1):\n\t\tif(vinfl[i] != []):\n\t\t\tprint '%f, ' % np.linalg.norm(vinfl[i]),\n\t\telse:\n\t\t\tprint '0, ',\n\tprint '%f];' % np.linalg.norm(vinfl[n-1]);\n\t\n\tprint 'list_vinf_python_x = [',\n\tn = len(vinfl);\n\tfor i in range(n-1):\n\t\tif(vinfl[i] != []):\n\t\t\tprint '%f, ' % vinfl[i][0],\n\t\telse:\n\t\t\tprint '0, ',\n\tprint '%f];' % vinfl[n-1][0];\n\t\n\tprint 'list_vinf_python_y = [',\n\tn = len(vinfl);\n\tfor i in range(n-1):\n\t\tif(vinfl[i] != []):\n\t\t\tprint '%f, ' % vinfl[i][1],\n\t\telse:\n\t\t\tprint '0, ',\n\tprint '%f];' % vinfl[n-1][1];\n\t\n\tprint 'list_vinf_python_z = [',\n\tn = len(vinfl);\n\tfor i in range(n-1):\n\t\tif(vinfl[i] != []):\n\t\t\tprint '%f, ' % vinfl[i][2],\n\t\telse:\n\t\t\tprint '0, ',\n\tprint '%f];' % vinfl[n-1][2];", "def format_coords(self, coordinates):\n\n coords = []\n for i in range(0, len(coordinates), 3):\n coords.append(tuple(coordinates[i:i+3]))\n\n return coords", "def convert_shapely_points_to_tuples(list_of_points) -> list:\n return [(p.x, p.y) for p in list_of_points]", "def get_trajectory(self) -> Tuple:\n # Ensure index is in an allowable range\n assert self.step <= self.num_steps\n # TODO NOTE consider edge case: what happens if this is called right\n # after self.cycle() has been called? does it still work?\n\n # ==\n # Get the trajectory up to current timestep\n o_traj = (self.obs_buffer[:self.step]\n .view(-1, *self.observation_shape)) # (T, *obs_shape)\n h_init = (self.hid_buffer[0]\n .view(-1, self.hidden_state_dim)) # (1, hidden_dim)\n d_traj = (self.don_buffer[:self.step]\n .view(-1, 1)) # (T, 1)\n a_traj = (self.act_buffer[:self.step]\n .view(-1, self.action_dim)) # (T, action_dim)\n\n # Return\n return o_traj, h_init, d_traj, a_traj", "def build(self, trajectory):\n buf = StringIO()\n\n print >>buf, str(datetime.datetime.now())\n print >>buf, '%5d' % trajectory.n_atoms\n\n linecount = 0\n for atom in range(trajectory.n_atoms):\n for dim in range(3):\n # need to convert from nm to angstroms by multiplying by ten\n fmt = '%12.7f' % (10 * trajectory.xyz[0, atom, dim])\n assert len(fmt) == 12, 'fmt overflowed writing inpcrd. blowup?'\n buf.write(fmt)\n linecount += 1\n if linecount >= 6:\n buf.write(os.linesep)\n linecount = 0\n\n if trajectory.unitcell_lengths != None:\n if linecount != 0:\n buf.write(os.linesep)\n box = (trajectory.unitcell_lengths[0]*10).tolist()\n box.extend(trajectory.unitcell_angles[0].tolist())\n buf.write(('%12.7f' * 6) % tuple(box))\n \n return buf.getvalue()", "def process_coordinates(void: 'ChargeSystem', steps: int) -> list:\n result = [[] for _ in void]\n\n for _ in range(steps):\n for ind, charge in enumerate(void):\n result[ind].append(list(charge.position))\n void.update()\n\n return result", "def to_numpy_trajectory(trajectory):\n tx = []\n ty = []\n tyaw = []\n for pose in trajectory:\n tx.append(pose.x)\n ty.append(pose.y)\n tyaw.append(pose.yaw)\n return numpy.asarray([tx, ty, tyaw])", "def read_frame_trajectory_file( filename ):\n file = open(filename, \"r\")\n\n timestamps = list()\n path = list()\n\n for line in file:\n # eliminate leading spaces\n line = line.strip()\n\n # ignore comments and empty lines\n if len(line) == 0 or line[0] == '#':\n continue\n\n # divide on whitespace and convert to numbers\n nums = [float(x) for x in line.split()]\n \n # separate out components and build lists\n\n timestamps.append( nums[0] )\n\n origin = list( nums[1:4] )\n unitx = list( nums[4:7] )\n unity = list( nums[7:10] )\n unitz = list( nums[10:13] )\n\n path.append( list( (origin, unitx, unity, unitz ) ) )\n\n return path, timestamps", "def get_trajectory(self, sort_by: str = \"trials\") -> tuple[list[float], list[float]]:\n raise NotImplementedError", "def dump_ue4_trajectory(name: str, trajectory: typing.Mapping[float, tf.Transform]) -> None:\n with open('unreal_trajectory_{0}.csv'.format(name), 'w') as output_file:\n output_file.write('Name,X,Y,Z,Roll,Pitch,Yaw\\n')\n for idx, timestamp in enumerate(sorted(trajectory.keys())):\n ue_pose = uetf.transform_to_unreal(trajectory[timestamp])\n output_file.write('{name},{x},{y},{z},{roll},{pitch},{yaw}\\n'.format(\n name=idx,\n x=ue_pose.location[0],\n y=ue_pose.location[1],\n z=ue_pose.location[2],\n roll=ue_pose.euler[0],\n pitch=ue_pose.euler[1],\n yaw=ue_pose.euler[2]))", "def ex_list(data):\n return tuple(data)", "def toListOfTuple(self, df:pd.core.frame.DataFrame) -> List[Tuple]: \n df['TIME_STAMP'] = df['TIME_STAMP'].astype('str')\n records = df.to_records(index=False)\n listOfTuple = list(records)\n return listOfTuple", "def get_list(self):\n if self.key == 'L':\n return array_to_list([self.key, self.timing, self.data])\n if self.key == 'T':\n return array_to_list([self.key, self.data, self.timing])\n tmp_data = copy.deepcopy(self.data)\n for i in range(len(self.data)):\n if isinstance(self.data[i], float):\n tmp_data[i] = str('%.3f' % tmp_data[i])\n if tmp_data[i].split('.')[1] == '000':\n tmp_data[i] = tmp_data[i].split('.')[0]\n return array_to_list([self.key, self.easing, self.timing, tmp_data])", "def trajectory_to_json(trajectory: Trajectory) -> str:\n # numpy arrays need to be converted to normal tuples\n return json.dumps(trajectory, cls=NumpyEncoder)", "def parseTupleList(self,string):\r\n string = string.replace(\"[\",\"\")\r\n string = string.replace(\"),\",\"*\")\r\n string = string.replace(\"(\", \"\")\r\n string = string.replace(\")\", \"\")\r\n string = string.replace(\"]\", \"\")\r\n string = string.split(\"*\")\r\n for i in xrange(len(string)):\r\n string[i] = string[i].split(\",\")\r\n for i in xrange(len(string)):\r\n for j in xrange(len(string[i])):\r\n string[i][j] = int(string[i][j])\r\n string[i] = tuple(string[i])\r\n return string", "def compute_trajectory():\n pass", "def __str__(self):\n out_str = \"\\n\".join(`\"%.5f, %.5f, %.1f, %s, %s\" % (point[0], point[1], point[2], point[3], point[4])` for point in self.__traectory_list)\n return \"\\'x, y, altitude, capture time, capture date'\\n\"+out_str", "def parse_1d_scan_coords(path: str) -> List[Dict[str, tuple]]:\n if not os.path.isfile(path):\n raise InputError(f'Could not find file {path}')\n software = identify_ess(path)\n traj = list()\n\n if software == 'xtb':\n scan_path = os.path.join(os.path.dirname(path), 'xtbscan.log')\n if os.path.isfile(scan_path):\n lines = _get_lines_from_file(scan_path)\n xyz_str = ''\n for line in lines:\n splits = line.split()\n if len(splits) == 1:\n if xyz_str:\n traj.append(str_to_xyz(xyz_str))\n xyz_str = ''\n continue\n if 'energy:' in line:\n continue\n xyz_str += f'{qcel.periodictable.to_E(splits[0])} {splits[1]} {splits[2]} {splits[3]}\\n'\n traj.append(str_to_xyz(xyz_str))\n return traj\n\n lines = _get_lines_from_file(path)\n log = ess_factory(fullpath=path, check_for_errors=False)\n if not isinstance(log, GaussianLog):\n raise NotImplementedError(f'Currently parse_1d_scan_coords only supports Gaussian files, got {type(log)}')\n done = False\n i = 0\n while not done:\n if i >= len(lines) or 'Normal termination of Gaussian' in lines[i] or 'Error termination via' in lines[i]:\n done = True\n elif 'Optimization completed' in lines[i]:\n while i < len(lines) + 10 and 'Input orientation:' not in lines[i] or 'Forces (Hartrees/Bohr)' in lines [i + 7]:\n i += 1\n if 'Error termination via' in lines[i]:\n return traj\n i += 5\n xyz_str, skip_traj = '', False\n while len(lines) and '--------------------------------------------' not in lines[i]:\n if 'DIIS: error' in lines[i]:\n skip_traj = True\n break\n splits = lines[i].split()\n xyz_str += f'{qcel.periodictable.to_E(int(splits[1]))} {splits[3]} {splits[4]} {splits[5]}\\n'\n i += 1\n if not skip_traj:\n traj.append(str_to_xyz(xyz_str))\n i += 1\n return traj", "def plot_trajectory(axis, trajectory: typing.Mapping[float, tf.Transform], label: str, style: str = '-') \\\n -> typing.Tuple[float, float]:\n x = []\n y = []\n z = []\n max_point = 0\n min_point = 0\n times = sorted(trajectory.keys())\n first_pose = None\n for timestamp in times:\n pose = trajectory[timestamp]\n if first_pose is None:\n first_pose = pose\n x.append(0)\n y.append(0)\n z.append(0)\n else:\n pose = first_pose.find_relative(pose)\n max_point = max(max_point, pose.location[0], pose.location[1], pose.location[2])\n min_point = min(min_point, pose.location[0], pose.location[1], pose.location[2])\n x.append(pose.location[0])\n y.append(pose.location[1])\n z.append(pose.location[2])\n axis.plot(x, y, z, style, label=label, alpha=0.7)\n return min_point, max_point", "def tup_list_maker(tup_list):\n final_list = []\n for item in tup_list:\n index = item[0]\n sentences = item[1]\n for sentence in sentences:\n pair = (index, sentence)\n final_list.append(pair)\n return final_list", "def get_tuples(self):\n pattern = list()\n for gi in self.gradual_items:\n temp = tuple([gi.attribute_col, gi.symbol])\n pattern.append(temp)\n return pattern", "def coords_to_structure(self) -> None:\n ...", "def convert_listofrollouts(paths):\n #print([p[\"observation\"] for p in paths[:2]])\n observations = np.concatenate([path[\"observation\"] for path in paths])\n actions = np.concatenate([path[\"action\"] for path in paths])\n next_observations = np.concatenate([path[\"next_observation\"] for path in paths])\n terminals = np.concatenate([path[\"terminal\"] for path in paths])\n concatenated_rewards = np.concatenate([path[\"reward\"] for path in paths])\n unconcatenated_rewards = [path[\"reward\"] for path in paths]\n #print(\"DONE\")\n return observations, actions, next_observations, terminals, concatenated_rewards, unconcatenated_rewards", "def toArray(parsedList):\n interpretCommand = {\n 'C' : lambda x, prevL : x[-2:], # bezier curve. Ignore the curve.\n 'L' : lambda x, prevL : x[0:2],\n 'M' : lambda x, prevL : x[0:2],\n 'Z' : lambda x, prevL : prevL[0],\n }\n\n points =[]\n for i,(c, arg) in enumerate(parsedList):\n #debug('toArray ', i, c , arg)\n newp = interpretCommand[c](arg, points)\n points.append( newp)\n a=numpy.array( points )\n\n # Some times we have points *very* close to each other\n # these do not bring any meaning full info, so we remove them\n #\n x,y, w,h = computeBox(a)\n sizeC = 0.5*(w+h)\n #deltas = numpy.zeros((len(a),2) )\n deltas = a[1:] - a[:-1] \n #deltas[-1] = a[0] - a[-1]\n deltaD = numpy.sqrt(numpy.sum( deltas**2, 1 ))\n sortedDind = numpy.argsort(deltaD)\n # expand longuest segments\n nexp = int(len(deltaD)*0.9)\n newpoints=[ None ]*len(a)\n medDelta = deltaD[sortedDind[len(deltaD)/2] ]\n for i,ind in enumerate(sortedDind):\n if deltaD[ind]/sizeC<0.005: continue\n if i>nexp:\n np = int(deltaD[ind]/medDelta)\n pL = [a[ind]]\n #print i,'=',ind,'adding ', np,' _ ', deltaD[ind], a[ind], a[ind+1]\n for j in range(np-1):\n f = float(j+1)/np\n #print '------> ', (1-f)*a[ind]+f*a[ind+1]\n pL.append( (1-f)*a[ind]+f*a[ind+1] )\n newpoints[ind] = pL\n else:\n newpoints[ind]=[a[ind]]\n if(D(a[0],a[-1])/sizeC > 0.005 ) :\n newpoints[-1]=[a[-1]]\n\n points = numpy.concatenate([p for p in newpoints if p!=None] )\n ## print ' medDelta ', medDelta, deltaD[sortedDind[-1]]\n ## print len(a) ,' ------> ', len(points)\n\n rel_norms = numpy.sqrt(numpy.sum( deltas**2, 1 )) / sizeC\n keep = numpy.concatenate([numpy.where( rel_norms >0.005 )[0],numpy.array([len(a)-1])])\n\n #return a[keep] , [ parsedList[i] for i in keep]\n #print len(a),' ',len(points)\n return points , []", "def build(self, trajectory):\n #TODO Implement?", "def transform_trajectory(self, trajectory, theta):\n origin = np.array([trajectory.s_coordinates[0],\n trajectory.d_coordinates[0]])\n\n for step in range(self.t_steps):\n point = np.array([trajectory.s_coordinates[step],\n trajectory.d_coordinates[step]])\n point = self._rotate_vector(point - origin, theta) + origin\n trajectory.s_coordinates[step] = point[0]\n trajectory.d_coordinates[step] = point[1]\n\n return trajectory", "def split(self, time: float) -> Tuple['Trajectory','Trajectory']:\n if time <= self.times[0]:\n #split before start of trajectory\n return self.constructor()([time],[self.milestones[0]]),self.constructor()([time]+self.times,[self.milestones[0]]+self.milestones)\n elif time >= self.times[-1]:\n #split after end of trajectory\n return self.constructor()(self.times+[time],self.milestones+[self.milestones[-1]]),self.constructor()([time],[self.milestones[-1]])\n i,u = self.getSegment(time)\n assert i >= 0,\"getSegment returned -1? something must be wrong with the times\"\n #split in middle of trajectory\n splitpt = self.interpolate_state(self.milestones[i],self.milestones[i+1],u,self.times[i+1]-self.times[i])\n front = self.constructor()(self.times[:i+1],self.milestones[:i+1])\n back = self.constructor()(self.times[i+1:],self.milestones[i+1:])\n if u > 0:\n front.times.append(time)\n front.milestones.append(splitpt)\n if u < 1:\n back.times = [time] + back.times\n back.milestones = [splitpt] + back.milestones\n return (front,back)", "def _prepare_to_convert(coordinates: str) -> tuple:\n degrees, minutes, seconds = True, True, True\n\n if coordinates == coordinates.replace(\"°\", \" \"): degrees = False\n if coordinates == coordinates.replace(\"′\", \" \"): minutes = False\n if coordinates == coordinates.replace(\"″\", \" \"): seconds = False\n\n coordinates = coordinates.replace(\"°\", \" \").replace(\"′\", \" \").replace(\"″\", \" \").split(\" \")\n del (coordinates[-1])\n\n if seconds is False: coordinates.append(0)\n if minutes is False: coordinates.insert(0, 1)\n if degrees is False: coordinates.insert(0, 0)\n\n for i in range(len(coordinates)):\n coordinates[i] = float(coordinates[i])\n return tuple(coordinates)", "def format_tuple(data):\n return \",\".join([str(item) for item in data])", "def tuple_to_list(tup):\n return [element for element in tup]", "def trajectory1(self):\r\n\r\n trackt = [] # particle trajectory,\r\n trackx = [] # particle trajectory\r\n an = [] # analitical s**2 + x**2 = t**2\r\n s1 = [] # s = 10; s = 0, light\r\n s2 = [] # s = 20;\r\n s3 = [] # s = 40;\r\n for i in range(0, len(self.dt.obs.obt_g)):\r\n trackt.append(float(i))\r\n trackx.append(self.dt.x[i])\r\n an.append(math.sqrt(float(i) ** 2 + self.dt.x[i] ** 2))\r\n s1.append(math.sqrt(1.0 ** 2 + self.dt.x[i] ** 2))\r\n s2.append(math.sqrt(2.0 ** 2 + self.dt.x[i] ** 2))\r\n s3.append(math.sqrt(4.0 ** 2 + self.dt.x[i] ** 2))\r\n\r\n # plots:\r\n\r\n (fig, ax) = plt.subplots() # figsize=(7,5)\r\n\r\n # trajectory\r\n\r\n ax.plot(\r\n trackx,\r\n trackt,\r\n marker='+',\r\n linewidth=1,\r\n linestyle='-',\r\n color='green',\r\n label='treck',\r\n )\r\n\r\n # measurement t\r\n # ax.plot(self.dt.x, self.dt.t, marker=\"+\", linestyle=\" \", color=\"blue\", label=\"result of measurement\")\r\n\r\n ax.plot(\r\n self.dt.x,\r\n self.dt.t,\r\n marker='o',\r\n linestyle=' ',\r\n color='black',\r\n label='result of measurement',\r\n )\r\n\r\n # analitical t\r\n\r\n ax.plot(self.dt.x, an, linestyle='-', color='red',\r\n label='continuum')\r\n\r\n # light trajectory\r\n\r\n ax.plot(trackx, trackx, linestyle='-', color='yellow',\r\n label='s=0 (light)')\r\n\r\n # s(x) curves\r\n\r\n ax.plot(\r\n trackx,\r\n s1,\r\n linestyle=':',\r\n linewidth=1,\r\n color='k',\r\n label='s=1.0',\r\n )\r\n ax.plot(\r\n trackx,\r\n s2,\r\n linestyle=':',\r\n linewidth=1,\r\n color='k',\r\n label='s=2.0',\r\n )\r\n ax.plot(\r\n trackx,\r\n s3,\r\n linestyle=':',\r\n linewidth=1,\r\n color='k',\r\n label='s=4.0',\r\n )\r\n\r\n # error of measurement t\r\n\r\n ax.errorbar(self.dt.x, self.dt.t, fmt='k ', yerr=self.dt.t_err)\r\n\r\n # signature on the horizontal x-axis\r\n\r\n ax.set_xlabel('x in metres')\r\n xm = -1.0\r\n for i in range(len(self.dt.x)):\r\n if self.dt.x[i] > xm:\r\n xm = self.dt.x[i]\r\n stepx = round(xm / float(len(self.dt.x)), 1)\r\n xm = round(xm + stepx, 1)\r\n ax.set_xlim([0.0, xm])\r\n\r\n # signature on vertical y axis\r\n\r\n ax.set_ylabel('t in metres of light time ')\r\n ym = -1.0\r\n for i in range(len(self.dt.t)):\r\n if self.dt.t[i] > ym:\r\n ym = self.dt.t[i]\r\n stepy = round(ym / float(len(self.dt.t)), 1)\r\n ym = round(ym + stepy, 1)\r\n ax.set_ylim([0.0, ym])\r\n\r\n # Create an instance of the class that will be responsible for the location of the labels (base is step on x)\r\n\r\n locatorx = matplotlib.ticker.MultipleLocator(base=stepx)\r\n\r\n # Set the locator for the main labels\r\n\r\n ax.xaxis.set_major_locator(locatorx)\r\n\r\n # Create an instance of the class that will be responsible for the location of the labels (base is step on y)\r\n\r\n locatory = matplotlib.ticker.MultipleLocator(base=stepy)\r\n\r\n # Set the locator for the main labels\r\n\r\n ax.yaxis.set_major_locator(locatory)\r\n\r\n ax.grid()\r\n\r\n # show legend\r\n\r\n ax.legend(loc='upper left')\r\n\r\n # show drawing\r\n\r\n plt.show()", "def trans_to_coordinates(T, pts):\n p = []\n for i in range(len(pts)):\n \n p_b = [pts[i][0], pts[i][1], pts[i][2], 1]\n p_a = np.matmul(T, p_b).tolist()\n p.append(p_a[0:3])\n\n return p", "def trans_to_coordinates(T, pts):\n p = []\n for i in range(len(pts)):\n \n p_b = [pts[i][0], pts[i][1], pts[i][2], 1]\n p_a = np.matmul(T, p_b).tolist()\n p.append(p_a[0:3])\n\n return p", "def progress(adjustments):\n # intialize list\n progress = []\n # split string into list of adjustments and process individual adjustments\n for adjustment in adjustments.split(','):\n # split adjustment into year and number of minutes before midnight\n year, minutes = adjustment.split()\n # append year and representation on 24-hour clock to list\n progress.append((int(year), clock(int(minutes))))\n # convert list into tuple\n return tuple(progress)", "def reshape_to_trajectory(data, trajectory_length):\n\treturn data.reshape((-1, trajectory_length*data.shape[1]))", "def build(self, trajectory):\n pass", "def tuple_arrivals(self):\n # Note that these are tuple arrival values for future cases and predictions themselves.\n # we shouldn't use them to validate queue sizes.\n if self.arrival_rate is None:\n self.arrival_rates()\n\n self.tuple_arrival = self.arrival_rate.copy()\n # the following converts the data back to tuple arrivals per minute\n self.tuple_arrival['num-tuples'] = self.arrival_rate['mean_arrival_rate'] * 60 * 1000\n self.tuple_arrival.drop([\"mean_arrival_rate\"], axis=1)\n return self.tuple_arrival", "def create_activity_list(station,data_order):\n # Generate an ASCII representation of the GPS timestamped segments of time covered by the input data\n seglist = segmentlist(data_order.keys())\n # Sort the segment list\n seglist.sort()\n # Initialise dictionary for segment information\n full_seglist = DataQualityDict()\n # Save time span for each segment in ASCII file\n with open(\"segments.txt\", \"w\") as fout:\n for seg in seglist:\n print >>fout, \"%10.9f %10.9f\" % seg\n # FIXME: Active should be masked from the sanity channel\n full_seglist[station] = DataQualityFlag(station,active=seglist.coalesce(),known=seglist.coalesce())\n return full_seglist", "def state_(state):\n return tuple( [ tuple( row ) for row in state ] )", "def _maketriples_all(self):\n nholes = self.ctrs.shape[0]\n tlist = []\n for i in range(nholes):\n for j in range(nholes):\n for k in range(nholes):\n if i < j and j < k:\n tlist.append((i, j, k))\n tarray = np.array(tlist).astype(np.int)\n if self.verbose:\n print(\"tarray\", tarray.shape, \"\\n\", tarray)\n\n tname = []\n uvlist = []\n # foreach row of 3 elts...\n for triple in tarray:\n tname.append(\"{0:d}_{1:d}_{2:d}\".format(\n triple[0], triple[1], triple[2]))\n if self.verbose:\n print('triple:', triple, tname[-1])\n uvlist.append((self.ctrs[triple[0]] - self.ctrs[triple[1]],\n self.ctrs[triple[1]] - self.ctrs[triple[2]]))\n #print(len(uvlist), \"uvlist\", uvlist)\n if self.verbose:\n print(tarray.shape, np.array(uvlist).shape)\n return tarray, np.array(uvlist)", "def make_taw_list(taw_tup):\n # upack\n begin_taw, end_taw, taw_step = taw_tup\n taw_list = []\n for i in range(0, ((end_taw - begin_taw) / taw_step)):\n taw = begin_taw + (i * taw_step)\n taw_list.append(taw)\n\n return taw_list", "def to_list(cls, data):\n\t\tif isinstance(data, Atom) == False:\n\t\t\traise Exception(\"data must be a class object\")\n\t\tx,y,z = (data.atom_loc)[0], (data.atom_loc)[1], (data.atom_loc)[2]\n\t\treturn [x,y,z]", "def group_tuplets (music_list, events):\n\n\n indices = []\n brackets = {}\n\n j = 0\n for (ev_chord, tuplet_elt, time_modification) in events:\n while (j < len (music_list)):\n if music_list[j] == ev_chord:\n break\n j += 1\n nr = 0\n if hasattr (tuplet_elt, 'number'):\n nr = getattr (tuplet_elt, 'number')\n if tuplet_elt.type == 'start':\n tuplet_object = musicxml_tuplet_to_lily (tuplet_elt, time_modification)\n tuplet_info = [j, None, tuplet_object]\n indices.append (tuplet_info)\n brackets[nr] = tuplet_info\n elif tuplet_elt.type == 'stop':\n bracket_info = brackets.get (nr, None)\n if bracket_info:\n bracket_info[1] = j # Set the ending position to j\n del brackets[nr]\n\n new_list = []\n last = 0\n for (i1, i2, tsm) in indices:\n if i1 > i2:\n continue\n\n new_list.extend (music_list[last:i1])\n seq = musicexp.SequentialMusic ()\n last = i2 + 1\n seq.elements = music_list[i1:last]\n\n tsm.element = seq\n\n new_list.append (tsm)\n #TODO: Handle nested tuplets!!!!\n\n new_list.extend (music_list[last:])\n return new_list", "def makeChronList(self):\n from operator import itemgetter\n ## make list of msg lists in the format accespted by reconstructLine\n self.outData_temp = [] # this will be in chronological order\n for sens in self.outData:\n if sens is not 'header':\n for meas in self.outData[sens]:\n for time in self.outData[sens][meas]:\n value = self.outData[sens][meas][time]\n thismsg = [time, sens, meas, str(value)] # leave time as float for sorting\n self.outData_temp.append(thismsg)\n self.outData_temp.sort(key=itemgetter(0)) # sort by first index\n for msg in self.outData_temp: # now we can make time a string\n msg[0] = str(msg[0])", "def convert_mtr_to_kittimot_format(data_list: List[Union[str, int, float]], frame_id: int) -> List[Union[str, int, float]]:\n annotation_list = []\n track_id = -1\n for data in data_list:\n annotation = [frame_id, -1]\n # print(\"type: \", str2id(bboxes['object_id']))\n object_type = data[0]\n truncated = -1\n occluded = -1\n alpha = -1\n bbox2d = [-1, -1, -1, -1]\n dimensions = data[1:4]\n location = data[4:7]\n rotation_y = data[7]\n\n annotation.append(object_type)\n annotation.append(truncated)\n annotation.append(occluded)\n annotation.append(alpha)\n annotation += bbox2d\n annotation += dimensions\n annotation += location\n annotation.append(rotation_y)\n annotation_list.append(annotation)\n return annotation_list\n\n\n\n \"\"\"\n convert KITTI MOTS format to AB3DMOT format\n\n \n @params:\n data_list: a list containing data in KITTI MOTs format\n \"\"\"", "def print_trajectory(self, u):\n # Get the trajectory\n s = self.STL_signal(u)\n trajectory = s[0:2,:] # x and y position\n\n print(repr(trajectory))", "def to_pose_2d(np_trajectory):\n arr = []\n for i in range(len(np_trajectory[0])):\n pose = msgs.Pose2D(np_trajectory[0,i], np_trajectory[1,i], 0.)\n arr.append(pose)\n return arr", "def parse_lats(lines):\n class Parser:\n def __init__(self):\n self.state = 'get_utt_id'\n self.utt_id = ''\n self.out = {}\n\n def is_line_utt_id(self, splited_line):\n return len(splited_line) == 1\n\n def new_utt(self, splited_line):\n self.utt_id = splited_line[0]\n self.out[self.utt_id] = []\n self.state = 'get_arc'\n\n def start(self):\n self.state = 'get_utt_id'\n self.utt_id = ''\n self.out = {}\n\n def add(self, line):\n splited_line = line.split()\n if self.state == 'get_utt_id':\n assert self.is_line_utt_id(splited_line), RuntimeError(\"parse_lats init error.\")\n self.new_utt(splited_line)\n return\n if self.state == 'get_arc':\n # if self.is_line_utt_id(splited_line):\n # self.new_utt(splited_line)\n # else:\n if len(splited_line) == 4:\n # classic arc\n state_from, state_to, word_id = map(int, splited_line[:3])\n weight_hclg, weight_am, ali = splited_line[3].split(',')\n weight_hclg, weight_am = float(weight_hclg), float(weight_am)\n self.out[self.utt_id].append((state_from, state_to, word_id, weight_hclg, weight_am, ali))\n elif len(splited_line) == 3:\n state_from, state_to, word_id = map(int, splited_line[:3])\n weight_hclg, weight_am, ali = 0.0, 0.0, ''\n self.out[self.utt_id].append((state_from, state_to, word_id, weight_hclg, weight_am, ali))\n elif len(splited_line) == 2:\n # eos arc\n state_from = int(splited_line[0])\n weight_hclg, weight_am, ali = splited_line[1].split(',')\n weight_hclg, weight_am = float(weight_hclg), float(weight_am)\n self.out[self.utt_id].append((state_from, weight_hclg, weight_am, ali))\n elif len(splited_line) == 1:\n state_from = int(splited_line[0])\n self.out[self.utt_id].append((state_from, 0, 0, ''))\n elif len(splited_line) == 0:\n self.state = 'get_utt_id'\n else:\n raise RuntimeError(f\"parse_lats Wrong line in {self.utt_id}: {line}\")\n return\n\n def get_out(self):\n return self.out\n\n parser = Parser()\n parser.start()\n for i, line in enumerate(lines):\n parser.add(line)\n utt2lat = parser.get_out()\n return utt2lat", "def build_tuple(self, t):\n comma = self.art_type([self.string_type(', ')],\n baseline=0,\n breakpoints=[1])\n repr_elems = self.concatenate(t, comma)\n return self.build_container(\n repr_elems, self.left_parenthesis, self.right_parenthesis)", "def get_tuples(outputs) -> list:\n return list(map(get_tuples_helper, outputs))", "def _make_list_islot_otuples_from_nodelist(self):\n if self.node_list is None:\n raise ValueError(\"`node_list` attribute undefined\")\n \n # [input sequence, LDS, prior]\n return [[('loc', 'prec')], [('prec', 'A')], [('scale',)]]", "def __init__(self, length):\n self.length = length\n self.lst = []\n for x in range(length):\n for y in range(length):\n t = ((x+1, y+1), (x+1)*(y+1))\n self.lst.append(t)", "def pretty(self,x):\n\n\t\t#1 - we 'decode' the chromosome recording the various times of flight (days) in the list T for convenience\n\t\tT = list([0]*(self.__n_legs))\n\n\t\tfor i in xrange(self.__n_legs):\n\t\t\tT[i] = (x[4+4*i]/sum(x[4::4]))*x[3]\n\n\t\t#2 - We compute the epochs and ephemerides of the planetary encounters\n\t\tt_P = list([None] * (self.__n_legs))\n\t\tr_P = list([None] * (self.__n_legs))\n\t\tv_P = list([None] * (self.__n_legs))\n\t\tDV = list([None] * (self.__n_legs))\n\t\tclose_d = list([None] * (self.__n_legs))\n\t\t\n\t\tfor i,planet in enumerate(self.seq):\n\t\t\tt_P[i] = epoch(x[0]+sum(T[:i+1]))\n\t\t\tr_P[i],v_P[i] = self.seq[i].eph(t_P[i])\n\n\t\t#3 - We start with the first leg: a lambert arc\n\t\ttheta = 2*pi*x[1]\n\t\tphi = acos(2*x[2]-1)-pi/2\n\t\tr = [cos(phi)*sin(theta), cos(phi)*cos(theta), sin(phi)] #phi close to zero is in the moon orbit plane injection\n\t\tr = [JR*1000*d for d in r]\n\t\t\n\t\tl = lambert_problem(r,r_P[0],T[0]*DAY2SEC,self.common_mu, False, False)\n\n\t\t#Lambert arc to reach seq[1]\n\t\tv_end_l = l.get_v2()[0]\n\t\tv_beg_l = l.get_v1()[0]\n\t\tclose_d[0] = closest_distance(r,v_beg_l, r_P[0], v_end_l, self.common_mu)[0] / JR\n\n\t\t#First DSM occuring at the very beginning (will be cancelled by the optimizer)\n\t\tDV[0] = abs(norm(v_beg_l) - 3400)\n\n\t\tprint \"\\nFirst Leg: 1000JR to \" + self.seq[0].name \n\t\tprint \"\\tDeparture: \" + str(t_P[0]) + \" (\" + str(t_P[0].mjd2000) + \" mjd2000) \" \n\t\tprint \"\\tDuration: \" + str(T[0]) + \"days\"\n\t\tprint \"\\tInitial Velocity Increment (m/s): \" + str(DV[0])\n\t\tprint \"\\tArrival relative velocity at \" + self.seq[0].name +\" (m/s): \" + str(norm([a-b for a,b in zip(v_end_l,v_P[0])]))\n\t\tprint \"\\tClosest approach distance: \" + str(close_d[0])\n\n\t\t#4 - And we proceed with each successive leg\n\t\tfor i in xrange(1,self.__n_legs):\n\t\t\t#Fly-by \n\n\t\t\tv_out = fb_prop(v_end_l,v_P[i-1],x[6+(i-1)*4]*self.seq[i-1].radius,x[5+(i-1)*4],self.seq[i-1].mu_self)\n\t\t\t#s/c propagation before the DSM\n\t\t\tr,v = propagate_lagrangian(r_P[i-1],v_out,x[7+(i-1)*4]*T[i]*DAY2SEC,self.common_mu)\n\t\t\ttmp, ra = closest_distance(r_P[i-1],v_out, r,v, self.common_mu)\n\t\t\t#Lambert arc to reach Earth during (1-nu2)*T2 (second segment)\n\t\t\tdt = (1-x[7+(i-1)*4])*T[i]*DAY2SEC\n\t\t\tl = lambert_problem(r,r_P[i],dt,self.common_mu, False, False)\n\t\t\tv_end_l = l.get_v2()[0]\n\t\t\tv_beg_l = l.get_v1()[0]\n\t\t\ttmp2, ra2 = closest_distance(r,v_beg_l, r_P[i], v_end_l, self.common_mu)\n\t\t\tif tmp < tmp2:\n\t\t\t\tclose_d[i] = tmp/JR\n\t\t\t\tra = ra/JR\n\t\t\telse:\n\t\t\t\tclose_d[i] = tmp2/JR\n\t\t\t\tra = ra2/JR\n\t\t\t#DSM occuring at time nu2*T2\n\t\t\tDV[i] = norm([a-b for a,b in zip(v_beg_l,v)])\n\n\t\t\tprint \"\\nleg no. \" + str(i+1) + \": \" + self.seq[i-1].name + \" to \" + self.seq[i].name \n\t\t\tprint \"\\tDuration (days): \" + str(T[i])\n\t\t\tprint \"\\tFly-by epoch: \" + str(t_P[i]) + \" (\" + str(t_P[i].mjd2000) + \" mjd2000) \" \n\t\t\tprint \"\\tFly-by altitude (km): \" + str((x[6+(i-1)*4]*self.seq[i-1].radius-self.seq[i-1].radius)/1000)\n\t\t\tprint \"\\tDSM after (days): \" + str(x[7+(i-1)*4]*T[i])\n\t\t\tprint \"\\tDSM magnitude (m/s): \" + str(DV[i]) \n\t\t\tprint \"\\tClosest approach distance: \" + str(close_d[i])\n\t\t\tprint \"\\tApoapsis at closest distance: \" + str(ra)\n\t\t\tprint \"\\tV in (m/s): \" + str(v_end_l)\n\t\t\tprint \"\\tV out (m/s): \" + str(v_out)\n\t\t\n\t\t\n\t\tprint \"\\nArrival at \" + self.seq[-1].name\n\t\tvel_inf = [a-b for a,b in zip(v_end_l,v_P[-1])]\n\t\tprint \"Arrival epoch: \" + str(t_P[-1]) + \" (\" + str(t_P[-1].mjd2000) + \" mjd2000) \" \n\t\tprint \"Arrival Vinf (m/s): \" + vel_inf.__repr__() + \" - \" + str(norm(vel_inf))\n\t\tprint \"Total mission time (days): \" + str(sum(T))\n\t\tprint \"Total DV (m/s): \" + str(sum(DV))", "def gen_4_tuples(it):\n\n return list(zip(it[0::4], it[1::4], it[2::4], it[3::4]))", "def gen_4_tuples(it):\n\n return list(zip(it[0::4], it[1::4], it[2::4], it[3::4]))", "def _covert_list_tensor_to_tuple_tensor(list_of_tensor):\n if isinstance(list_of_tensor, list):\n tuple_of_tensor = ()\n for tensor in list_of_tensor:\n tuple_of_tensor += (tensor,)\n return tuple_of_tensor\n return list_of_tensor", "def __parse_move_line(self, line):\n parts = re.sub('\\(.*?\\)', '', line).split()\n x, y = None, None\n for part in parts[:0:-1]:\n axis = part.upper()[0]\n value = float(part[1:])\n if axis in ['Z', 'F']:\n parts.remove(part)\n elif axis == 'X':\n x = value\n parts.remove(part)\n elif axis == 'Y':\n y = value\n parts.remove(part)\n if x is None or y is None:\n return None\n template = parts[0] + ' X{:.6f} Y{:.6f} ' + ' '.join(parts[1:])\n return [template, x, y]", "def triangulate(polyline):\n\timport p2t\n\tfrom ynlib.beziers import Point\n\n\t# Convert into p2t Points\n\tfor p in polyline:\n\t\tp = p2t.Point(p.x, p.y)\n\t\n\tcdt = p2t.CDT(polyline)\n\tp2ttriangles = cdt.triangulate()\n\t\n\ttriangles = []\n\tfor t in p2ttriangles:\n\t\ttriangles.append( (Point(t.a.x, t.a.y), Point(t.b.x, t.b.y), Point(t.c.x, t.c.y)) )\n\n\treturn triangles", "def convert_obs_to_state(self, observations):\n speed = observations[0]\n distance = observations[1]\n angle = observations[2]\n\n state_converted = [speed, distance, angle]\n\n return state_converted", "def coord_list_t(connected_data, t):\n coord_list = []\n for spot_id in connected_data:\n this_spot_data = connected_data[spot_id]\n row = this_spot_data[this_spot_data[:,0] == t]\n if (len(row) > 0):\n row = list(row[0])\n spot_coords = [spot_id] + row[2:5]\n coord_list.append(spot_coords)\n return coord_list", "def transpositions(self):\n a = self.cyclic_form\n res = []\n for x in a:\n nx = len(x)\n if nx == 2:\n res.append(tuple(x))\n elif nx > 2:\n first = x[0]\n for y in x[nx-1:0:-1]:\n res.append((first,y))\n return res", "def calcTrajPoints(trajectory_angles):\n \n traj_points = np.empty([trajectory_angles.shape[0], 3])\n tcp_origin = np.array([[0], [0], [0], [1]])\n \n \n for i in range(trajectory_angles.shape[0]):\n temp_our_angle = kf.SubstractOffsetToAngles(trajectory_angles[i])\n tmp_points = kf.CalculateDirectKinematicsTransformation(temp_our_angle, tcp_origin, 0, 7)\n traj_points[i,0] = tmp_points[0] \n traj_points[i,1] = tmp_points[1]\n traj_points[i,2] = tmp_points[2]\n \n return traj_points", "def populate_weight_tuple_list(list_object):\n tuple_list = []\n\n for i in range(len(list_object[0])):\n weight_tuple = (list_object[0][i], float(list_object[1][i]))\n tuple_list.append(weight_tuple)\n \n return tuple_list", "def readtrajectories(filelist, frames):\r\n filelen = len(filelist) - 2\r\n trajstart = filelen\r\n output = {}\r\n LtoeZ = []\r\n RtoeZ = []\r\n LeftToeCol = 29\r\n RightToeCol = 47\r\n for row in range(filelen):\r\n try:\r\n # Assign gait parameters to dictionary\r\n if filelist[row][0] == 'Trajectories':\r\n trajstart = row + 5\r\n except IndexError:\r\n continue\r\n output.update(tableread(filelist,trajstart,frames))\r\n sides = ['Left', 'Right']\r\n for side in sides:\r\n output[side+'StepTime'] = (frames[side+'End']-frames[side+'Start'])/100\r\n output[side+'FoffFraction'] = (frames[side+'Foff']-frames[side+'Start']) / output[side+'StepTime']\r\n try:\r\n output[side+'StepLen'] = abs(float(filelist[frames[side+'End']+trajstart][locals()[side+'ToeCol']+1]) - float(filelist[frames[side+'Start']+trajstart][locals()[side+'ToeCol']+1]))/1000\r\n except ValueError:\r\n output[side+'StepLen'] = 'NA'\r\n output[side+'SpeedCalc'] = output[side+'StepLen'] / output[side+'StepTime']\r\n #import pdb; pdb.set_trace()\r\n return output", "def transform_points(Points,R,t):\r\n return [transform_point(p,R,t) for p in Points]", "def _trajectory_to_multi_dof_joint_trajectory(self, p, v, a, j, s, c):\n\n msg = tm.MultiDOFJointTrajectory()\n point = tm.MultiDOFJointTrajectoryPoint()\n msg.points.append(point)\n\n #print(p)\n\n transform = gm.Transform()\n transform.translation.x = p[0]\n transform.translation.y = p[1]\n transform.translation.z = p[2]\n quaternion = tft.quaternion_from_euler(0.0, 0.0, p[3])\n transform.rotation.x = quaternion[0]\n transform.rotation.y = quaternion[1]\n transform.rotation.z = quaternion[2]\n transform.rotation.w = quaternion[3]\n point.transforms.append(transform)\n\n velocity = gm.Twist()\n velocity.linear.x = v[0]\n velocity.linear.y = v[1]\n velocity.linear.z = v[2]\n velocity.angular.z = v[3]\n point.velocities.append(velocity)\n\n acceleration = gm.Twist()\n acceleration.linear.x = a[0]\n acceleration.linear.y = a[1]\n acceleration.linear.z = a[2]\n point.accelerations.append(acceleration)\n\n return msg", "def tolists(self):\n return self._times, self._values", "def read_xyz_traj(filename):\n with open(filename, 'r') as traj_file:\n traj = traj_file.readlines()\n n_atoms = int(traj[0].strip()) # Get number of atoms from first line\n n_frames = int(len(traj) / (n_atoms + 2)) # Calculate number of frames (assuming n_atoms is constant)\n trajectory = {'atoms': np.empty((n_frames, n_atoms), dtype='U2'), # String of length 2\n 'coordinates': np.empty((n_frames, n_atoms, 3)), # Float\n 'headers': np.empty((n_frames,), dtype=object)} # Python object\n for frame in range(n_frames):\n start = frame * (n_atoms + 2) # Frame start\n end = (frame + 1) * (n_atoms + 2) # Frame end\n trajectory['coordinates'][frame] = [[float(i) for i in line.split()[1:4]] for line in traj[start + 2:end]]\n trajectory['atoms'][frame] = [line.split()[0] for line in traj[start + 2:end]]\n trajectory['headers'][frame] = (traj[start + 1].strip())\n return trajectory", "def __wrap_with_tuple(self) -> tuple:\r\n l = list()\r\n length = len(self.data)\r\n while self.idx < length:\r\n l.append(self.__parse())\r\n return tuple(l)", "def txyz_2_representation(self: Q, representation: str = \"\") -> List:\n\n symbolic = self.is_symbolic()\n\n if representation == \"\":\n rep = [self.t, self.x, self.y, self.z]\n\n elif representation == \"polar\":\n amplitude = (self.t ** 2 + self.x ** 2 + self.y ** 2 + self.z ** 2) ** (\n 1 / 2\n )\n\n abs_v = abs_of_vector(self).t\n\n if symbolic:\n theta = sp.atan2(abs_v, self.t)\n else:\n theta = math.atan2(abs_v, self.t)\n\n if abs_v == 0:\n theta_x, theta_y, theta_z = 0, 0, 0\n\n else:\n theta_x = theta * self.x / abs_v\n theta_y = theta * self.y / abs_v\n theta_z = theta * self.z / abs_v\n\n rep = [amplitude, theta_x, theta_y, theta_z]\n\n elif representation == \"spherical\":\n\n spherical_t = self.t\n\n spherical_r = (self.x ** 2 + self.y ** 2 + self.z ** 2) ** (1 / 2)\n\n if spherical_r == 0:\n theta = 0\n else:\n if symbolic:\n theta = sp.acos(self.z / spherical_r)\n\n else:\n theta = math.acos(self.z / spherical_r)\n\n if symbolic:\n phi = sp.atan2(self.y, self.x)\n else:\n phi = math.atan2(self.y, self.x)\n\n rep = [spherical_t, spherical_r, theta, phi]\n\n else:\n raise ValueError(f\"Oops, don't know representation: representation\")\n\n return rep", "def make_list(sv, piece):\r\n li=[tree_build(sv,x) for x in piece.split(Comma)] # process each element RECURSIVE\r\n res=(Comma, li, None) # triplet for a list: (\",\", [list of elements], None)\r\n return res", "def transforming_coordinates(self, coordinates_lists, transform): \n \n transformed_coordinates_lists = []\n for coordinates_list in coordinates_lists:\n transformed_coordinates_list = []\n for coordinate in coordinates_list:\n coordinate = tuple(coordinate)\n transformed_coordinate = list(transform(coordinate[0], coordinate[1]))\n transformed_coordinates_list.append(transformed_coordinate)\n transformed_coordinates_lists.append(transformed_coordinates_list)\n \n \n return transformed_coordinates_lists", "def db_converter(self, event):\n # Splits the arr on the semicolon to separate the pairs\n arr = str(self.input.get()).strip().split(';')\n for i, entry in enumerate(arr):\n # Splits the pairs into x and y\n new_entry = tuple(map(float, entry.strip().split(\",\")))\n # Adds the new pairs back into the array\n arr[i] = new_entry\n tuple_grapher(arr)", "def _sequence(game_record):\n seq = []\n for item in game_record.get_main_sequence():\n color, move = item.get_move()\n # color == None is entries that are not actual game play\n # move == None is a pass, which in theory we could try to\n # predict, but not yet\n if color is not None and move is not None:\n seq.append((color, move))\n return seq", "def get_point_list(self, string):\n a = re.findall('\\(\\d+\\.\\d+, \\d+\\.\\d+\\)', string)\n lst = []\n for tp in a:\n lst.append(self.get_tuple(tp))\n print lst", "def create_record_tuple(ilist):\n list_length = len(ilist)\n if list_length:\n rec_tuple = '('\n for row in list[0:list_length-1]:\n rec_tuple += str(row)\n rec_tuple += ','\n rec_tuple += str(list[list_length-1])\n rec_tuple += ')'\n else: rec_tuple = '()'\n return rec_tuple", "def tuple_from_sequence(*args):\n return tuple(args)", "def vec2tuple(x):\n return (x.x, x.y, x.z)", "def position_trajectory(self):\n return self._read(MX_POSITION_TRAJECTORY)", "def constructor(self) -> Callable[[List,List],'Trajectory']:\n return Trajectory", "def _parse_coords(self):\n\n coords = []\n\n while True:\n try:\n _, x, y = self._lines.current.split()\n coords.append((float(x), float(y)))\n except ValueError:\n break\n\n try:\n next(self._lines)\n except StopIteration:\n break\n\n return coords", "def _travel_times(self, trip_list, index=0):\n\n def distance_in_travel_time(dep_secs, arr_secs):\n t_dist = arr_secs - dep_secs\n if t_dist < 0:\n t_dist = self._DUMMY_SEPARATOR # min separation\n return t_dist\n\n if not trip_list:\n return []\n\n if 0 < index < len(trip_list):\n trip = trip_list[index]\n else:\n trip = trip_list[0]\n\n t_dists2 = [distance_in_travel_time(stop[3], tail[2]) for (stop, tail)\n in zip(trip.get_time_stops(), trip.get_time_stops()[1:])]\n return t_dists2", "def process_timestamps(self):\n self.conn = sqlite3.connect(':memory:')\n self.cursor = self.conn.cursor()\n self.cursor.execute('CREATE TABLE tracklog(dt PRIMARY KEY, lat REAL, lon REAL, elev REAL)')\n num_points = 0\n for d in gps.parse_gpx_iter(self.gpx_file):\n self.cursor.execute('INSERT INTO tracklog VALUES(?,?,?,?)', d)\n num_points += 1\n sys.stderr.write('The GPX file contained %d Points\\n' % num_points)\n self.conn.commit()", "def _parse_tour(self):\n\n tour = []\n\n while True:\n try:\n s = int(self._lines.current)\n if s == -1:\n return tour\n tour.append(s)\n except ValueError:\n break\n\n try:\n next(self._lines)\n except StopIteration:\n break\n\n return tour", "def make_lists(sv):\r\n \r\n mark_delayed(sv) # identify delayed objects\r\n make_pin_list(sv) # detect and initialize inputs (to false) \r\n make_old_list(sv) # create a list of used old/old \r", "def convert_trace_tuples(\n trace_tuples: Sequence[TraceTuple], export_destination: str, driver: str\n) -> List[TraceTuple]:\n convert_paths: List[TraceTuple] = []\n # Iterate over datasets and save\n trace_tuple: TraceTuple\n for trace_tuple in trace_tuples:\n convert_paths.append(\n rename_trace_tuple_paths(\n trace_tuple=trace_tuple,\n export_destination=export_destination,\n driver=driver,\n )\n )\n\n return convert_paths", "def to_tuple(waypoint):\n if isinstance(waypoint, dict):\n return (waypoint[\"lat\"], waypoint[\"lng\"])\n else:\n return waypoint", "def ReadTrajectory(trajFile):\n trajectory=[]\n with open(trajFile, \"r\") as tF:\n line = tF.readline()\n while line is not \"\":\n #first line is number of atoms\n N = int(line.strip())\n tF.readline().strip() # second line is a comment that we throw away\n\n q = []\n for i in range(N):\n line = tF.readline().strip().split(\" \")\n for c in line[1:]:\n if c is not \"\":\n q.append(float(c))\n trajectory.append(np.array(q))\n\n line = tF.readline()\n\n return trajectory, N", "def mk_lst_trans_met(self):\n\t\telem_rnge_I = [[21,30],[39,44],[46,48],[74,76],[78,80]]\n\t\telem_rnge=[]\n\t\tfor i in elem_rnge_I:\n\t\t\tel_strt=i[0]\n\t\t\tel_end=i[1]\n\t\t\trnge_sect=range(el_strt,el_end+1)\n\t\t\telem_rnge.extend(rnge_sect)\n\t\telements=[]\n\t\tfor i in elem_rnge:\n\t\t\telement=Element.from_Z(i)\t# Indice -> pymatgen element object\n\t\t\telements.append(element)\n\t\treturn elements", "def tile_coordinates(text):\n UL = (text[1]), (text[2]) # Upper Left\n UR = (text[3]), (text[2]) # Upper Right\n LR = (text[3]), (text[4]) # Lower Right\n LL = (text[1]), (text[4]) # Lower Left\n coordinates = (UL, UR, LR, LL)\n return text[0], [tuple(float(x) for x in xs) for xs in coordinates]", "def points (p, line: str) -> list:\n direction = line [0]\n steps = list (range (1, 1 + int (F.tail (line))))\n return F.map (point (p, direction)) (steps)", "def pointListForT(x, y, type):\n\n\tpointList = []\n\n\tif x < 10:\n\t\txString = \"0%d\" % x\n\telse:\n\t\txString = \"%d\" % x\n\n\tif x < 11:\n\t\txMString = \"0%d\" % (x - 1)\n\telse:\n\t\txMString = \"%d\" % (x - 1)\n\n\tif x < 9:\n\t\txPString = \"0%d\" % (x + 1)\n\telse:\n\t\txPString = \"%d\" % (x + 1)\n\n\tif y < 11:\n\t\tyMString = \"0%d\" % (y - 1)\n\telse:\n\t\tyMString = \"%d\" % (y - 1)\n\n\tif y < 9:\n\t\tyPString = \"0%d\" % (y + 1)\n\telse:\n\t\tyPString = \"%d\" % (y + 1)\n\n\tif y < 10:\n\t\tyString = \"0%d\" % y\n\telse:\n\t\tyString = \"%d\" % y\n\n\tif type == 3:\t\t# Down\n\t\ttl = [\"%s%sTL\" % (xString, yString), 0.25, 0.25, [\t\"%s%sTR\" % (xMString, yString)]]\n\t\ttr = [\"%s%sTR\" % (xString, yString), 0.75, 0.25, [\t\"%s%sTL\" % (xString, yString),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"%s%sCC\" % (xString, yString)]]\n\t\tbl = [\"%s%sBL\" % (xString, yString), 0.25, 0.75, [\t\"%s%sBR\" % (xString, yString),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"%s%sTL\" % (xString, yPString)]]\n\t\tbr = [\"%s%sBR\" % (xString, yString), 0.75, 0.75, [\t\"%s%sBL\" % (xPString, yString),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"%s%sCC\" % (xString, yString)]]\n\t\tcc = [\"%s%sCC\" % (xString, yString), 0.50, 0.50, [\t\"%s%sTL\" % (xString, yString),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"%s%sBL\" % (xString, yString),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"%s%sBR\" % (xString, yString)]]\n\telif type == 4:\t\t# Left\n\t\ttl = [\"%s%sTL\" % (xString, yString), 0.25, 0.25, [\t\"%s%sTR\" % (xMString, yString),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"%s%sBL\" % (xString, yString)]]\n\t\ttr = [\"%s%sTR\" % (xString, yString), 0.75, 0.25, [\t\"%s%sBR\" % (xString, yMString)]]\n\t\tbl = [\"%s%sBL\" % (xString, yString), 0.25, 0.75, [\t\"%s%sTL\" % (xString, yPString),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"%s%sCC\" % (xString, yString)]]\n\t\tbr = [\"%s%sBR\" % (xString, yString), 0.75, 0.75, [\t\"%s%sTR\" % (xString, yString),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"%s%sCC\" % (xString, yString)]]\n\t\tcc = [\"%s%sCC\" % (xString, yString), 0.50, 0.50, [\t\"%s%sTL\" % (xString, yString),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"%s%sTR\" % (xString, yString),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"%s%sBL\" % (xString, yString)]]\t\n\telif type == 5:\t\t# Up\n\t\ttl = [\"%s%sTL\" % (xString, yString), 0.25, 0.25, [\t\"%s%sTR\" % (xMString, yString),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"%s%sCC\" % (xString, yString)]]\n\t\ttr = [\"%s%sTR\" % (xString, yString), 0.75, 0.25, [\t\"%s%sTL\" % (xString, yString),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"%s%sBR\" % (xString, yMString)]]\n\t\tbl = [\"%s%sBL\" % (xString, yString), 0.25, 0.75, [\t\"%s%sBR\" % (xString, yString),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"%s%sCC\" % (xString, yString)]]\n\t\tbr = [\"%s%sBR\" % (xString, yString), 0.75, 0.75, [\t\"%s%sBL\" % (xPString, yString),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"%s%sCC\" % (xString, yString)]]\n\t\tcc = [\"%s%sCC\" % (xString, yString), 0.50, 0.50, [\t\"%s%sTL\" % (xString, yString),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"%s%sTR\" % (xString, yString),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"%s%sBL\" % (xString, yString),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"%s%sBR\" % (xString, yString)]]\n\telse: # Type == 6\t# Right\n\t\ttl = [\"%s%sTL\" % (xString, yString), 0.25, 0.25, [\t\"%s%sBL\" % (xString, yString),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"%s%sCC\" % (xString, yString)]]\n\t\ttr = [\"%s%sTR\" % (xString, yString), 0.75, 0.25, [\t\"%s%sBR\" % (xString, yMString),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"%s%sCC\" % (xString, yString)]]\n\t\tbl = [\"%s%sBL\" % (xString, yString), 0.25, 0.75, [\t\"%s%sBR\" % (xString, yString),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"%s%sTL\" % (xString, yPString)]]\n\t\tbr = [\"%s%sBR\" % (xString, yString), 0.75, 0.75, [\t\"%s%sBL\" % (xPString, yString),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"%s%sTR\" % (xString, yString)]]\n\t\tcc = [\"%s%sCC\" % (xString, yString), 0.50, 0.50, [\t\"%s%sTR\" % (xString, yString),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"%s%sBL\" % (xString, yString),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"%s%sBR\" % (xString, yString)]]\n\n\tpointList = [tl, tr, bl, br, cc]\n\n\treturn pointList", "def creaLE(venta): #Esta sección fue hecha por Ángel\n listaPGA = [] # Esto genera la lista necesaria para pasarlo al archivo\n for elemento in venta:\n listaN = elemento[0] + \",\"\n listaN += str(elemento[1]) + \"\\n\"\n listaPGA.append(listaN)\n return listaPGA", "def eventlist():\n\n infile = conf[\"run_path_derived\"] + 'LOCALIZED.txt'\n\n data = np.genfromtxt(infile, skip_header=1) \n\n mlt = cx.MAGtoMLT(data[:, 5], data[:, 0:5])\n\n # Swap mlat and mlon colums so in expected order (lat then long)\n data[:, [6,5]] = data[:, [5,6]]\n \n data = np.hstack((data, np.reshape(mlt, (mlt.shape[0], 1))))\n \n return data", "def arrange_value_tuples(reader, point_names):\n array_for_json = []\n # Gets the time stamp and value and adds it with the point name to be added to the database\n for row in reader:\n if len(row) == 1: # End of File\n break\n timestamp = datetime.strptime(row[0] + row[1], '%m/%d/%Y%H:%M:%S')\n\n for i in range(2, len(row)):\n # Currently we are simply ignoring cases of data loss\n if row[i] != \"No Data\" and row[i] != \"Data Loss\":\n array_for_json.append([point_names[i - 1], timestamp.timestamp(), row[i]])\n\n return array_for_json", "def serialize_tuple(self, obj):\n return '(' + ''.join([self.serialize(i) for i in obj]) + 't'", "def plot_trajectories_XYZ(t_start,t_stop):\n \n time, ankle_l_trajectory, ankle_r_trajectory,foot_l_contact,foot_r_contact,muscle_lh_activations, muscle_rh_activations,muscle_lh_forces,muscle_rh_forces,joint_lh_positions,joint_rh_positions = load_data()\n \n index_start = np.where(time == t_start)[0][0]\n index_end = np.where(time == t_stop)[0][0]\n \n time = time[index_start:index_end+1]\n ankle_l_trajectory = ankle_l_trajectory[index_start:index_end+1,:]\n ankle_r_trajectory = ankle_r_trajectory[index_start:index_end+1,:]\n \n #time=np.linspace(1,len(ankle_l_trajectory[:,0]),len(ankle_l_trajectory[:,0]));\n \n plt.figure('Trajectories')\n plt.subplot(311)\n plt.plot(time,ankle_l_trajectory[:,0])\n plt.plot(time,ankle_r_trajectory[:,0])\n #plt.title('Trajectory of the X component')\n plt.xlabel('Time [s]')\n plt.ylabel('X Position [cm]')\n plt.legend(['Left ankle','Right ankle'],loc='upper right')\n \n plt.subplot(312)\n plt.plot(time,ankle_l_trajectory[:,1])\n plt.plot(time,ankle_r_trajectory[:,1])\n #plt.title('Trajectory of the Y component')\n plt.xlabel('Time [s]')\n plt.ylabel('Y Position [cm]')\n plt.legend(['Left ankle','Right ankle'],loc='upper right')\n \n plt.subplot(313)\n plt.plot(time,ankle_l_trajectory[:,2])\n plt.plot(time,ankle_r_trajectory[:,2])\n #plt.title('Trajectory of the Z component')\n plt.xlabel('Time [s]')\n plt.ylabel('Z Position [cm]')\n plt.legend(['Left ankle','Right ankle'],loc='upper right')\n \n# plt.suptitle('Decomposition of the trajectories of the hind feet')\n return" ]
[ "0.60951096", "0.6015128", "0.5985093", "0.5921288", "0.5903913", "0.5848871", "0.5752752", "0.5743873", "0.5734865", "0.5705948", "0.56735694", "0.5643993", "0.55531675", "0.5550036", "0.55357385", "0.5534374", "0.550818", "0.5483543", "0.5480718", "0.5441098", "0.5439881", "0.5385624", "0.5363065", "0.53434515", "0.52942", "0.5265581", "0.52618796", "0.52603185", "0.52405906", "0.5237256", "0.5232832", "0.52316844", "0.5222555", "0.5218484", "0.5208251", "0.5208251", "0.5194154", "0.5179009", "0.5162729", "0.51561755", "0.5146933", "0.51391387", "0.5134398", "0.5134146", "0.5131708", "0.5118235", "0.5113096", "0.51125914", "0.510299", "0.5097575", "0.5092759", "0.5082477", "0.5076129", "0.50754684", "0.50741446", "0.50705945", "0.5067481", "0.5067481", "0.50654376", "0.50638914", "0.5059461", "0.5057738", "0.5055144", "0.5050949", "0.50477487", "0.503841", "0.5031993", "0.5023962", "0.5020801", "0.5019163", "0.50171196", "0.5017068", "0.50142556", "0.5005651", "0.4995379", "0.49890795", "0.49878576", "0.49791577", "0.49784753", "0.49773502", "0.49733245", "0.49693894", "0.4966897", "0.49538574", "0.494839", "0.4936646", "0.49298304", "0.49288407", "0.49267125", "0.4924866", "0.49207833", "0.4918709", "0.49144992", "0.49117318", "0.4908979", "0.49046052", "0.49021062", "0.49016562", "0.4890996", "0.48901173" ]
0.6233229
0
This function will be use to train model and save model for given training set.
def train(X_train, y_train, save_model='model.h5'): # Hyperparameters batch_size = 32 epochs = 30 learning_rate = 0.001 # Loading model from model.py model = m(input_height=IMAGE_HEIGHT, input_width=IMAGE_WIDTH) # Plot model as image plot_model(model, to_file='model_plot.png', show_shapes=True, show_layer_names=True) # If trained model exist already then load first for further training if tf.gfile.Exists(save_model): model.load_weights(save_model) model.compile(loss='mse', optimizer=Adam(learning_rate)) # Only save model which has best performed on validation set. # These are callbacks which are being used in "model.fit" call earlyStopping = EarlyStopping(monitor='val_loss', patience=5, verbose=1, mode='min') mcp_save = ModelCheckpoint('model.h5', save_best_only=True, monitor='val_loss', mode='min') reduce_lr_loss = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=7, verbose=1, epsilon=1e-4, mode='min') # Train the model model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, callbacks=[earlyStopping, mcp_save, reduce_lr_loss], validation_split=0.2, shuffle=True) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def trainAndSaveModels():\n print \"\\nTraining models...\"\n\n #Use the best-performed train and test splitted data \n X_train = pickle.load(open('X_train.sav','rb'))\n X_test = pickle.load(open('X_test.sav','rb'))\n Y_train = pickle.load(open('Y_train.sav','rb'))\n \n #train models\n lassoModel = LassoPrediction(X_train, X_test, Y_train)\n forestModel = RandomForestPrediction(X_train, X_test, Y_train)\n boostingModel = GradientBoosting(X_train, X_test, Y_train)\n \n #save the modes\n pickle.dump(lassoModel,open('lasso_Model.sav','wb'))\n pickle.dump(forestModel,open('forest_Model.sav','wb'))\n pickle.dump(boostingModel,open('sgb_Model.sav','wb'))", "def fit(self, train_set, train_labels, validation_set=None, validation_labels=None, restore_previous_model=False):\n\n with tf.Session() as self.tf_session:\n self._initialize_tf_utilities_and_ops(restore_previous_model)\n self._train_model(train_set, train_labels, validation_set, validation_labels)\n self.tf_saver.save(self.tf_session, self.models_dir + self.model_name)", "def train(self):\n\t\tself.model.fit(self.training_data, self.training_labels)", "def train(\n model_path=\"./trained_model/\",\n model_file_name=\"model.h5\",\n training_data_path=\"./train.csv\",\n):\n config = SConfig(training_data_path=training_data_path)\n s2s = Seq2Seq(config)\n s2s.fit()\n s2s.save_model(path_to_model=model_path, model_file_name=model_file_name)", "def fit_store(X, y):\n print(\"Fitting model to training set...\")\n model = pr.build_model.fit_model(X, y)\n pickle.dump(model, open(\"models/\" + \"model\" + \".pkl\", \"wb\"))", "def train(self):\n self.epoch = 0\n self.step = 0\n self.start_time = time.time()\n for self.epoch in range(self.opt.num_epochs):\n self.run_epoch()\n if (self.epoch + 1) % self.opt.save_frequency == 0:\n self.save_model()", "def training(self) -> None:\n self.compile_model()\n self.train_epoch()\n self.agent.save()", "def train(self):\n # setup model\n self.createModel()\n self.setGenerators()\n self.buildCallbacks()\n self.printParameters()\n \n # train model\n _ = self.model.fit_generator(\n generator = self.trainGen,\n validation_data = self.validateGen,\n steps_per_epoch = self.steps_per_epoch,\n validation_steps = self.validation_steps,\n epochs = self.epochs,\n use_multiprocessing = True,\n callbacks = self.callbacks)\n # clear save paths to avoid overwriting accidentaly\n self.saveName = None", "def train_model(self):\n if not self.is_exist(self.path_model_directory):\n # Then create the parent folder\n os.makedirs(self.path_model_directory)\n\n # Create a meta-data pickle for the model\n self.create_meta_data_pickle()\n\n # Necessary meta-data file must be created before starting the training. Check if the file exists\n if self.is_exist(self.path_model_metadata):\n\n # We do not need to train a model if there is already a best model for the same training exist\n try:\n self.model = load_model(self.path_best_model)\n return\n except:\n self.log_event('There is no best trained model found in the parent folder. Going with the training...')\n\n # Load the model meta-data\n self.load_model_metadata()\n self.encoding_vector_size = self.number_of_distinct_items\n\n # Iterate trough the split data for the training\n for split_number in range(self.k_split):\n split_path = f'split_{str(split_number)}/'\n split_directory = self.path_model_directory + split_path\n\n # Check the split directory is already created. If it is, then we can directly start the training by using the existing data\n if self.is_exist(split_directory):\n try:\n self.load_best_tuned_model(split_number)\n except (IndexError, FileNotFoundError):\n self.load_fold_k_data_and_fit(split_number=int(split_number))\n\n else:\n # Create a folder for the split data and prepare the data for the training\n os.makedirs(split_directory)\n\n # Create an array which will contain train features-labels and test features-labels\n train_array = np.full(4, fill_value=self.mask_value, dtype=object)\n train_index = 0\n for position, split_name in enumerate(['train_split_', 'test_split_']):\n training_features_directory = split_directory + f'{split_name}{str(split_number)}_all_training_features.data'\n training_targets_directory = split_directory + f'{split_name}{str(split_number)}_all_training_targets.data'\n fold_directory = self.path_shared_folds + f'{split_name}{str(split_number)}.fold'\n\n self.process_training_data(fold_directory=fold_directory)\n\n self.save_data_to_disk(data_to_save=self.all_features, path_to_save=training_features_directory)\n train_array[train_index] = self.all_features\n train_index += 1\n self.all_features = None # Memory Management\n\n self.save_data_to_disk(data_to_save=self.all_targets, path_to_save=training_targets_directory)\n train_array[train_index] = self.all_targets\n train_index += 1\n self.all_targets = None # Memory Management\n\n # Assign the input data to respective variables for the training\n self.train_features = train_array[0]\n self.train_targets = train_array[1]\n self.test_features = train_array[2]\n self.test_targets = train_array[3]\n del train_array\n\n self.start_hyper_parameter_tuning(split_number)\n\n self.retrieve_best_model(metric=self.hyper_parameters['metric'])", "def set_train(self):\n self.model.train()", "def train(self):\n self.log(f\"{self.cur_file_path}\\t\\tInfo: train method invoked!\")\n self.log(f\"{self.cur_file_path}\\t\\tInfo: training {self.model.__class__.__name__} model!\")\n\n self.model.fit(self.trainX, self.trainY)", "def trainModel( self, featureTrain, classTrain):", "def train_model(x_data, y_data, model_type):\n # def lr model object\n clr = None\n try:\n clr = model_list[model_type]()\n except Exception as e:\n print(e)\n # fit model\n clr.fit(x_data, y_data)\n # save model in pkl file\n try:\n joblib.dump(clr, \"model/\" + model_type + \".pkl\")\n except Exception as e:\n print(e)\n return clr", "def train_full_model(X,y_train):\n scaler = MinMaxScaler()\n x_train = scaler.fit_transform(X)\n\n #chose model\n # model = RandomForestClassifier()\n model = GradientBoostingClassifier()\n\n\n #train\n print(\"train model\")\n tic = time.time()\n model.fit(x_train,y_train)\n tac = time.time()\n print(\"elapsed time\", tac-tic)\n\n #save data\n save_data(model,scaler,x_train,y_train)", "def train_model_4(model, X_train, y_train, image_name):\n # Train the model\n model.fit(X_train, y_train)\n \n # Save the model\n model_file = os.path.join(OUTPUT_DIR,\n \"{}_model.joblib\".format(image_name))\n joblib.dump(model, model_file)\n \n return model, model", "def train(self):\n self.epoch = 0\n self.step = 0\n self.start_time = time.time()\n for self.epoch in range(self.num_epochs):\n print(\"EPOHA\")\n self.run_epoch()\n print(\"SAVE MODEL\")\n self.save_model()", "def model_train(data_dir,test=False):\r\n \r\n if not os.path.isdir(MODEL_DIR):\r\n os.mkdir(MODEL_DIR)\r\n\r\n if test:\r\n print(\"... test flag on\")\r\n print(\"...... subsetting data\")\r\n print(\"...... subsetting countries\")\r\n \r\n ## fetch time-series formatted data\r\n ts_data = fetch_ts(data_dir)\r\n\r\n ## train a different model for each data sets\r\n for country,df in ts_data.items():\r\n if test and country not in ['all','united_kingdom']:\r\n continue\r\n model_name = re.sub(\"\\.\",\"_\",str(MODEL_VERSION))\r\n saved_model = os.path.join(MODEL_DIR,\r\n \"sl-{}-{}.joblib\".format(country,model_name))\r\n saved_test_model = os.path.join(MODEL_DIR,\r\n \"test-{}-{}.joblib\".format(country,model_name))\r\n saved_baseline = os.path.join(BASELINE_DIR,\r\n \"b-sl-{}-{}.joblib\".format(country,model_name))\r\n saved_test_baseline = os.path.join(BASELINE_DIR,\r\n \"b-test-{}-{}.joblib\".format(country,model_name))\r\n if (test and (not os.path.isfile(saved_test_model))) or ((not test) and (not os.path.isfile(saved_model))):\r\n _model_train(df,country,test=test)\r\n if (test and (not os.path.isfile(saved_test_baseline))) or ((not test) and (not os.path.isfile(saved_baseline))):\r\n _baseline_train(df,country,test=test)", "def train_model(self):\n self.logger.info('Loading the data...')\n train_data = self.load_data(split=\"train\")\n dev_data = self.load_data(split=\"dev\")\n self.config.best_model = os.path.join(self.config.output_dir,\"best_model\")\n self.logger.info('Training the model, outputdir=%s...,best_model=%s' % (self.config.output_dir,self.config.best_model))\n\n train_params = {\n \"overwrite_output_dir\" : True,\n \"reprocess_input_data\": True,\n \"learning_rate\" : self.config.learning_rate,\n \"num_train_epochs\" : self.config.num_train_epochs,\n \"train_batch_size\" : self.config.train_batch_size,\n \"eval_batch_size\" : self.config.eval_batch_size,\n \"gradient_accumulation_steps\": self.config.gradient_accumulation_steps,\n \"use_early_stopping\" : self.config.early_stopping,\n \"fp16\" : False,\n \"classification_report\" : True,\n \"evaluate_during_training\" : True,\n \"evaluate_during_training_verbose\" : True,\n \"best_model_dir\": self.config.best_model,\n \"save_model_every_epoch\" : self.config.save_model_every_epoch,\n \"save_steps\" : self.config.save_steps,\n \"save_optimizer_and_scheduler\" : self.config.save_optimizer_and_scheduler,\n \"save_best_model\": True,\n }\n\n ## train the model \n self.model.train_model(\n train_data,\n eval_data=dev_data,\n output_dir=self.config.output_dir,\n show_running_loss=False,\n args=train_params,\n )\n\n ## backing up the config and create pointer to best model \n with open(os.path.join(self.config.best_model,\"trainer_config.json\"),'w') as mconfig:\n mconfig.write(json.dumps(self.config.__dict__))\n self.config.existing_model = self.config.best_model", "def train():\n import training\n\n # Ensure output directories exist\n os.makedirs(os.path.dirname(cfg.scaler_path), exist_ok=True)\n os.makedirs(cfg.model_path, exist_ok=True)\n os.makedirs(cfg.log_path, exist_ok=True)\n\n # Load (standardized) input data and target values\n tr_x, tr_y, _ = _load_data(cfg.training_set, is_training=True)\n val_x, val_y, _ = _load_data(cfg.validation_set)\n\n # Try to create reproducible results\n np.random.seed(cfg.initial_seed)\n\n # Save free parameters to disk\n utils.log_parameters(cfg.training, os.path.join(cfg.model_path,\n 'parameters.json'))\n\n training.train(tr_x, tr_y, val_x, val_y)", "def train(self, train_set=dt.treat_data(dt.load_data(\"data/train.csv\"))):\n if self.model is None:\n return\n my_callback = keras.callbacks.callbacks.EarlyStopping(monitor='loss', min_delta=0.0, patience=1000, verbose=2,\n mode='auto', baseline=None, restore_best_weights=False)\n\n train_data, train_labels = train_set\n history = self.model.fit(x=train_data, y=train_labels, epochs=100000, batch_size=45,\n callbacks=[my_callback], verbose=2, shuffle=True)\n self.model.save(\"titanic_\" + str(time.time()) + \".h5\")\n return history", "def set_model_for_train(self):\n if self.train_time is None:\n self.train_time = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')\n\n self.curr_folder = data_functions.create_path(\n self.save_path, self.train_time)\n logger.info(f\"training results will be stored in: {self.curr_folder}\")\n\n self.save_model_params()\n self.train_generator, self.val_generator = \\\n self.clarifruit_train_val_generators()\n keras_logs_path = self.set_model_checkpint()\n\n return keras_logs_path", "def set_train(self):\n for m in self.models.values():\n m.train()", "def train_model(self, *args, **kwargs):\n self.model.train(self.training, *args, **kwargs)", "def train_and_save(self, checkpoint_dir):\n dataset = self._read_dataset(self._train_dataset_path)\n features, labels = self.get_features_and_labels(dataset)\n self._model.partial_fit(features, labels, classes=self._classes)\n checkpoint_path = self._save_model(checkpoint_dir)\n return checkpoint_path", "def training(self):\n self.model.fit(self.train_x, self.train_y)", "def run(self) -> None:\n self.model = self.trainer.train_model(self.model, self.data)", "def train(model_name, batch_size, steps_per_epoch, epochs, validation_steps, \n model_file=None, save_path=None):\n \n print(\"- Loading configuration...\")\n if model_name in models_default_params:\n default_params = models_default_params[model_name]\n else:\n print(\"Error: the model '{}' has not been implemented\".format(model_name))\n return\n custom_objects = default_params['custom_objects']\n patch_size = default_params['patch_size']\n if save_path is None:\n save_path = default_params['default_path']\n if os.path.isfile(save_path):\n print(\"Warning: {} is an existing file and will be overwritten.\".format(save_path))\n print(\"- Configuration loaded.\")\n \n print(\"- Loading datasets...\")\n train_gen = preprocess.load_dataset(batch_size, x_directory = \"datasets/Potsdam/Training/RGB/\",\n y_directory = \"datasets/Potsdam/Training/Labels/\",\n patch_size = patch_size)\n val_gen = preprocess.load_dataset(batch_size, x_directory = \"datasets/Potsdam/Validation/RGB/\",\n y_directory = \"datasets/Potsdam/Validation/Labels/\",\n patch_size = patch_size)\n print(\"- Data loaded.\")\n \n print(\"- Initialising model...\")\n if(model_file is not None): # Further train existing model\n model = keras.models.load_model(model_file, custom_objects=custom_objects)\n else: # Create new model\n if model_name == 'fcn':\n model = fcn.make_fcn_resnet((patch_size, patch_size, channels), nb_labels, \n use_pretraining=False, freeze_base=False)\n elif model_name == 'pspnet':\n model = pspnet.build_pspnet(nb_classes=nb_labels, resnet_layers=50,\n input_shape=patch_size)\n elif model_name == 'mobilenetv2':\n model = mobilenetv2.MobileNetv2((patch_size, patch_size, channels), nb_labels) \n\n model.compile(\n optimizer = optimizers.Adam(lr = 0.00001),\n loss = losses.categorical_crossentropy,\n metrics = [metrics.categorical_accuracy]) \n model.summary() \n print(\"- Model initialised.\")\n \n tensorboard = callbacks.TensorBoard(log_dir='./logs')\n csv_logger = callbacks.CSVLogger('logs/training.csv')\n checkpoint = callbacks.ModelCheckpoint(filepath=checkpoint_path,\n save_weights_only=True,\n save_best_only=True)\n \n print(\"- Starting training.\")\n model.fit_generator(\n generator=train_gen,\n steps_per_epoch=steps_per_epoch,\n epochs=epochs,\n validation_data=val_gen,\n validation_steps=validation_steps,\n # callbacks=[checkpoint, csv_logger]\n )\n print(\"- Training complete.\")\n \n model.save(save_path)\n print(\"- Model saved to {}\".format(save_path))", "def train_model(self, *args, **kwargs):\n raise NotImplementedError", "def train(self):\n self.emission_model(self.train_data)\n self.transition_model(self.train_data)", "def train(self, dataset, model_dir):\n raise NotImplementedError", "def save_trained_model(self):\n save_keras_sequential(self.model, self.RELATIVE_DATA_DIRECTORY, self.get_name())\n logger.info(f\"DQL Trader: Saved trained model\")", "def train(\n self,\n training_set,\n validation_set=None,\n test_set=None,\n save_path=\"model\",\n return_state_dict: bool = False,\n **kwargs,\n ):\n # ====== General setup =======\n output_features = self.model.output_features\n\n # Only use signals when on the main thread to avoid issues with CherryPy\n # https://github.com/ludwig-ai/ludwig/issues/286\n if threading.current_thread() == threading.main_thread():\n # set the original sigint signal handler\n # as we want to restore it at the end of training\n self.original_sigint_handler = signal.getsignal(signal.SIGINT)\n signal.signal(signal.SIGINT, self.set_steps_to_1_or_quit)\n\n metrics_names = get_metric_names(output_features)\n\n # ====== Setup file names =======\n model_hyperparameters_path = None\n tensorboard_log_dir = None\n if self.is_coordinator():\n os.makedirs(save_path, exist_ok=True)\n model_hyperparameters_path = os.path.join(save_path, MODEL_HYPERPARAMETERS_FILE_NAME)\n tensorboard_log_dir = os.path.join(save_path, \"logs\")\n\n # Sync save_path across the workers\n save_path = self.distributed.broadcast_object(save_path or \"\")\n\n training_progress_tracker_path = None\n training_checkpoints_path = None\n if save_path:\n training_progress_tracker_path = os.path.join(save_path, TRAINING_PROGRESS_TRACKER_FILE_NAME)\n training_checkpoints_path = os.path.join(save_path, TRAINING_CHECKPOINTS_DIR_PATH)\n\n self.callback(\n lambda c: c.on_trainer_train_setup(self, save_path, self.is_coordinator()), coordinator_only=False\n )\n\n # ====== Setup session =======\n checkpoint_manager = None\n checkpoint = self.distributed.create_checkpoint_handle(\n dist_model=self.dist_model, model=self.model, optimizer=self.optimizer, scheduler=self.scheduler\n )\n checkpoint_manager = CheckpointManager(checkpoint, training_checkpoints_path, device=self.device)\n\n # ====== Setup Tensorboard writers =======\n train_summary_writer = None\n validation_summary_writer = None\n test_summary_writer = None\n if self.is_coordinator() and not self.skip_save_log and tensorboard_log_dir:\n train_summary_writer = SummaryWriter(os.path.join(tensorboard_log_dir, TRAINING))\n if validation_set is not None and validation_set.size > 0:\n validation_summary_writer = SummaryWriter(os.path.join(tensorboard_log_dir, VALIDATION))\n if test_set is not None and test_set.size > 0:\n test_summary_writer = SummaryWriter(os.path.join(tensorboard_log_dir, TEST))\n\n # ================ Resume logic ================\n self.callback(lambda c: c.on_resume_training(self.is_coordinator()))\n\n should_resume = self.resume and self.resume_files_exist(\n training_progress_tracker_path, training_checkpoints_path\n )\n # make sure all workers are on the same page about resuming.\n should_resume = self.distributed.broadcast_object(should_resume, name=\"should_resume\")\n\n if should_resume:\n try:\n progress_tracker = self.resume_training_progress_tracker(training_progress_tracker_path)\n self.resume_weights_and_optimizer(training_checkpoints_path, checkpoint)\n logger.info(\"Resuming training from previous run.\")\n except Exception:\n # This may happen if model training is interrupted after the progress tracker is initialized\n # but before any real training progress is made.\n progress_tracker = get_new_progress_tracker(\n batch_size=self.batch_size,\n learning_rate=self.base_learning_rate,\n best_eval_metric_value=get_initial_validation_value(self.validation_metric),\n best_increase_batch_size_eval_metric=get_initial_validation_value(\n self.increase_batch_size_eval_metric\n ),\n output_features=output_features,\n )\n logger.info(\"Failed to resume training from previous run. Creating fresh model training run.\")\n else:\n progress_tracker = get_new_progress_tracker(\n batch_size=self.batch_size,\n learning_rate=self.base_learning_rate,\n best_eval_metric_value=get_initial_validation_value(self.validation_metric),\n best_increase_batch_size_eval_metric=get_initial_validation_value(self.increase_batch_size_eval_metric),\n output_features=output_features,\n )\n logger.info(\"Creating fresh model training run.\")\n\n # Distributed: broadcast initial variable states from rank 0 to all other processes.\n # This is necessary to ensure consistent initialization of all workers when\n # training is started with random weights or restored from a checkpoint.\n self.distributed.sync_model(self.dist_model)\n self.distributed.sync_optimizer(self.optimizer)\n self.scheduler.load_state_dict(self.distributed.broadcast_object(self.scheduler.state_dict()))\n\n # For DeepSpeed, we need to set the batch size here in case it was modfied during auto-tuning\n self.distributed.set_batch_size(self.dist_model, self.batch_size)\n\n set_random_seed(self.random_seed)\n\n try:\n with training_set.initialize_batcher(\n batch_size=self.batch_size,\n should_shuffle=self.should_shuffle,\n random_seed=self.random_seed,\n distributed=self.distributed,\n ignore_last=True,\n augmentation_pipeline=self.model.get_augmentation_pipelines(),\n ) as batcher:\n # ================ Training Loop ================\n self.total_steps = get_total_steps(self.epochs, batcher.steps_per_epoch, self.train_steps)\n\n # Get the terminal steps per checkpoint.\n final_steps_per_checkpoint = get_final_steps_per_checkpoint(\n batcher.steps_per_epoch,\n self.steps_per_checkpoint,\n self.checkpoints_per_epoch,\n self.is_coordinator(),\n )\n final_steps_per_checkpoint = min(final_steps_per_checkpoint, self.total_steps)\n early_stopping_steps = final_steps_per_checkpoint * self.early_stop\n\n # Initialize the learning rate scheduler.\n self.scheduler = LRScheduler(\n self.config.learning_rate_scheduler,\n self.optimizer,\n steps_per_checkpoint=final_steps_per_checkpoint,\n total_steps=self.total_steps,\n )\n\n if self.is_coordinator():\n logger.info(\n f\"Training for {self.total_steps} step(s), approximately \"\n f\"{int(self.total_steps / batcher.steps_per_epoch)} epoch(s).\"\n )\n if self.early_stop < 0:\n logger.info(\"Early stopping policy: None\")\n else:\n logger.info(\n f\"Early stopping policy: {self.early_stop} round(s) of evaluation, or \"\n f\"{early_stopping_steps} step(s), approximately \"\n f\"{int(early_stopping_steps / batcher.steps_per_epoch)} epoch(s).\\n\"\n )\n logger.info(f\"Starting with step {progress_tracker.steps}, epoch: {progress_tracker.epoch}\")\n\n progress_bar_config = {\n \"desc\": \"Training\",\n \"total\": self.total_steps,\n \"disable\": is_progressbar_disabled(),\n \"file\": sys.stdout,\n }\n progress_bar = LudwigProgressBar(self.report_tqdm_to_ray, progress_bar_config, self.is_coordinator())\n\n while progress_tracker.steps < self.total_steps:\n # note that batch size may change over epochs\n batcher.set_epoch(progress_tracker.epoch, progress_tracker.batch_size)\n\n # epoch init\n start_time = time.time()\n\n # Reset the metrics at the start of the next epoch\n self.dist_model.train() # Sets model to training mode.\n self.model.reset_metrics()\n\n self.callback(lambda c: c.on_epoch_start(self, progress_tracker, save_path))\n\n # Trains over a full epoch of data.\n should_break = self._train_loop(\n batcher,\n progress_tracker,\n save_path,\n train_summary_writer,\n progress_bar,\n training_set,\n validation_set,\n test_set,\n start_time,\n validation_summary_writer,\n test_summary_writer,\n model_hyperparameters_path,\n output_features,\n metrics_names,\n checkpoint_manager,\n final_steps_per_checkpoint,\n early_stopping_steps,\n )\n\n # ================ Post Training Epoch ================\n progress_tracker.epoch += 1\n self.callback(lambda c: c.on_epoch_end(self, progress_tracker, save_path))\n\n if self.is_coordinator():\n # ========== Save training progress ==========\n logger.debug(\n f\"Epoch {progress_tracker.epoch} took: \"\n f\"{time_utils.strdelta((time.time() - start_time) * 1000.0)}.\"\n )\n if not self.skip_save_progress:\n checkpoint_manager.save(progress_tracker.steps)\n if self.is_coordinator():\n progress_tracker.save(os.path.join(save_path, TRAINING_PROGRESS_TRACKER_FILE_NAME))\n\n if not self.skip_save_model and self.skip_all_evaluation:\n # All evaluation was skipped, so save the current step as the best so far.\n checkpoint_manager.save_best(progress_tracker.steps)\n\n # Early stop if needed.\n if should_break:\n break\n finally:\n # ================ Finished Training ================\n self.callback(\n lambda c: c.on_trainer_train_teardown(self, progress_tracker, save_path, self.is_coordinator()),\n coordinator_only=False,\n )\n\n if train_summary_writer is not None:\n train_summary_writer.close()\n if validation_summary_writer is not None:\n validation_summary_writer.close()\n if test_summary_writer is not None:\n test_summary_writer.close()\n\n if not self.skip_save_model and self.skip_all_evaluation:\n # All evaluation was skipped, so save the current step as the best so far.\n checkpoint_manager.save_best(progress_tracker.steps)\n\n if not self.skip_save_progress:\n checkpoint_manager.close()\n\n # Load the best weights from saved checkpoint\n state_dict = None\n if self.distributed.is_coordinator():\n if not self.skip_save_model:\n state_dict = checkpoint_manager.get_best_checkpoint_state_for_inference(self.return_device)\n if not return_state_dict:\n if self.distributed.is_model_parallel():\n # Assume the full weights cannot fit in memory on GPU\n self.model = self.model.cpu()\n _, unexpected_keys = self.model.load_state_dict(state_dict, strict=False)\n assert unexpected_keys == [], f\"Unexpected keys found in state dict: {unexpected_keys}\"\n elif return_state_dict:\n state_dict = self.model.cpu().state_dict()\n\n # When running with Ray, we only need to return the state dict, as it's faster and cheaper to send the\n # state dict over the network than to load the model state here, serialize it back to a state dict, then\n # load it back on the head node.\n return_value = self.model if not return_state_dict else state_dict\n\n # restore original sigint signal handler\n if self.original_sigint_handler and threading.current_thread() == threading.main_thread():\n signal.signal(signal.SIGINT, self.original_sigint_handler)\n\n return (\n return_value,\n progress_tracker.train_metrics,\n progress_tracker.validation_metrics,\n progress_tracker.test_metrics,\n )", "def train(self, X_train, y_train):\n self.model.fit(X_train, y_train)", "def train(fitted_model_filename):\n click.echo(\"Mode: training.\\n\")\n defaults = get_defaults()\n\n fitted_model_filename = add_extension(fitted_model_filename)\n\n # derive final path for fitted model as base output path for fitted models + model filename\n fitted_model_path = os.path.join(defaults.OUTPUT.FITTED_MODELS_PATH, fitted_model_filename)\n\n new_options = [\"OUTPUT.FITTED_MODEL_PATH\", fitted_model_path]\n\n boot_data = bootstrap(new_options, mode=\"train\")\n defaults = boot_data['defaults']\n\n X_train, y_train = boot_data['data']\n fitted_model = train_model(X_train, y_train, defaults)\n\n # dump fitted model\n os.makedirs(defaults.OUTPUT.FITTED_MODELS_PATH, exist_ok=True)\n save_obj(fitted_model, defaults.OUTPUT.FITTED_MODEL_PATH)", "def train(self, x_data, y_data):\n for model in self.list_of_models:\n model.fit(x_data, y_data)\n self.trained_models.append(model)", "def fit_model(\n model,\n trainset,\n valset,\n sensor,\n target_var,\n output_models_path,\n early_stop_patience,\n epochs,\n time_stmp_str,\n out_model_name,\n history_out_name,\n):\n train_X = trainset[\"X\"]\n train_y = trainset[\"y\"]\n\n val_X = valset[\"X\"]\n val_y = valset[\"y\"]\n\n # fit network\n # que corte despues de no mejorar mucho\n early_stopping = EarlyStopping(\n monitor=\"val_loss\",\n patience=early_stop_patience,\n verbose=1,\n mode=\"min\",\n restore_best_weights=True,\n )\n # que use el mejor modelo\n checkpoint = ModelCheckpoint(\n out_model_name, save_best_only=True, monitor=\"val_loss\", mode=\"min\", verbose=2\n )\n\n history = model.fit(\n train_X,\n train_y,\n epochs=epochs,\n validation_data=(val_X, val_y),\n verbose=1,\n shuffle=False,\n callbacks=[checkpoint, early_stopping],\n )\n\n with open(history_out_name, \"wb\") as file_pi:\n pickle.dump(history, file_pi)\n\n return load_model(out_model_name)", "def train_network(self):\n if self.trainData:\n if self.verbose:\n print('Started training...')\n\n for epoch in range(135):\n pass\n # save the model\n else:\n if self.verbose:\n print('No train data available')", "def train(self, training_set):\n\n lengths = [ seq.shape[0] for seq in training_set ]\n\n #preprocessing the training set to obtain the desired shape\n concatenated_set = self.preprocessing( training_set )\n\n #fitting the model\n self.model.fit(concatenated_set, lengths)\n\n return", "def _set_train(self):\n\n if not self.model.__dict__['training']:\n self.model.train()", "def train(self, x, t):\n for i in range(self.number_model):\n curr_model = self.all_model[i]\n curr_model.fit(x, t)", "def train(self, training_set):\n self.originModel.train(training_set)\n return self", "def trainAndPredict(self):\r\n print(\"train\")\r\n filename= 'finalized_model.sav'\r\n # train the algorithm on training data and predict using the testing data\r\n model = self.svc_model.fit(self.X.T, self.Y)\r\n pickle.dump(model, open(filename, 'wb'))\r\n #model = pickle.load(open(filename, 'rb'))\r\n pred1 =model.predict(self.TestSet.T)\r\n # print the accuracy score of the model\r\n print(\"LinearSVC accuracy : \", accuracy_score(self.TestSetY, pred1, normalize=True))", "def train_model(self):\r\n alpha, accuracy_rate = self.select_model()\r\n # Initialize logistic regression with alpha(learning rate)\r\n lg = logisticregression(C=alpha)\r\n # Train the model.\r\n lg.fit(self.training_data, self.target_data)\r\n # Save the trained model as .pkl file.\r\n joblib.dump(value=lg, filename=self.intention_id+'.pkl', compress=1)\r\n print \"Estimated Parameters of Logistic Regression\"\r\n # Estimated parameters of logistic regression.\r\n print lg.get_params()", "def train(\n train_sets: tuple,\n test_sets: tuple,\n input_shape: tuple = (1, 128, 128, 1),\n model_version=\"1.0.0\",\n epochs: int = 100,\n classes: int = 2,\n batch_size: int = 1,\n verbose=1,\n out_dir: str = \"saved_models\"):\n (x_train, y_train), (x_test, y_test) = train_sets, test_sets\n y_train = keras.utils.to_categorical(y_train, classes)\n y_test = keras.utils.to_categorical(y_test, classes)\n m = get_model(model_version)\n if not m:\n return\n model = m.build_model(input_shape)\n model.compile(\n loss=BinaryCrossentropy(),\n optimizer=RMSprop(learning_rate=0.0001),\n metrics=['accuracy']\n )\n saver = ModelSaver(out_dir)\n csv_logger = CSVLogger(\n \"%s/%s/log.csv\" %\n (out_dir, datetime.datetime.now().date().strftime(\"%Y_%m_%d\")),\n append=True,\n separator=','\n )\n history = model.fit(\n x_train,\n y_train,\n batch_size=batch_size,\n epochs=epochs,\n verbose=verbose,\n validation_data=(x_test, y_test),\n callbacks=[saver, csv_logger]\n )\n model.save(\"%s/%s/final.hd5\" %\n (out_dir, datetime.datetime.now().date().strftime(\"%Y_%m_%d\")))\n print(\"Model saved in %s as final.hd5\" % out_dir)\n plot_results(\n history,\n epochs,\n out_dir\n )", "def train_model_3(model, X_train, y_train, X_val, y_val, image_name):\n # Train the model\n model.fit(X_train, y_train, eval_set=[(X_val, y_val)], verbose=100)\n \n # Save the model\n model_file = os.path.join(OUTPUT_DIR, \"{}_model.txt\".format(image_name))\n model.booster_.save_model(model_file)\n \n return model, model", "def train():\n \n ## check for request data\n if not request.json:\n print(\"ERROR: API (train): did not receive request data\")\n return jsonify(False)\n\n ## set the test flag\n test = False\n if 'mode' in request.json and request.json['mode'] == 'test':\n test = True\n\n print(\"... training model\")\n model = model_train(test=test)\n print(\"... training complete\")\n\n return(jsonify(True))", "def train(train_x_df, train_y_df):\n x = list(train_x_df.columns.values)\n model = build_model(len(x))\n\n os.makedirs(\"./saved_models\", exist_ok=True)\n\n cp_callback = keras.callbacks.ModelCheckpoint(\n checkpoint_path, save_weights_only=True, save_best_only=True, verbose=1\n )\n\n # first 80 percent for training\n train_x = train_x_df[1:246005]\n train_y = train_y_df[1:246005]\n\n # other 20 percent for evaluating\n eval_x = train_x_df[246006 : len(train_x_df) - 1]\n eval_y = train_y_df[246006 : len(train_y_df) - 1]\n\n # train model\n model.fit(\n train_x,\n train_y,\n epochs=epochs,\n validation_split=0.2,\n verbose=0,\n batch_size=batch_size,\n callbacks=[cp_callback],\n )\n\n print(\"done training\")\n\n # export the tensorflow model to a onnx model file\n # for loading in tfe and secure enclave\n export_to_onnx(\n x,\n model,\n \"./house_credit_default.onnx\",\n )\n\n # evaluate the model using AUC, the metric used in the kaggle competition\n loss = model.evaluate(eval_x, eval_y, batch_size=batch_size)\n\n predictions = model.predict(eval_x, batch_size=batch_size)\n auc = tf.metrics.auc(eval_y, predictions)\n\n print(\"Evaluation Loss:\", loss[0])\n print(\"Accuracy:\", loss[1])\n print(\"AUC: \", auc[0][1])", "def train(model_conf:ModelConf, train_set, validation_set=None):\n model_conf.print_conf()\n\n images_placeholder = tf.placeholder(dtype=tf.float32, shape=[None, model_conf.HEIGHT*model_conf.WIDTH])\n labels_placeholder = tf.placeholder(dtype=tf.int64, shape=[None])\n model = DenseNet(model_conf, is_training=True, images=images_placeholder, labels=labels_placeholder)\n model.build_graph()\n\n summary_op = tf.summary.merge_all()\n saver = tf.train.Saver(tf.all_variables())\n\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n summary_writer = tf.summary.FileWriter(model_conf.SUMMARY_DIR, graph=sess.graph)\n\n for step in range(model_conf.NUM_STEPS):\n train_images, train_labels = train_set.next_batch()\n feed_dict = {images_placeholder: train_images,\n labels_placeholder: train_labels\n }\n start_time = time.time()\n [loss_value, acc_value, _, lr_value] = sess.run([model.loss, model.acc, model.train_op, model.learning_rate],\n feed_dict=feed_dict)\n duration = time.time() - start_time\n if step % 10 == 0:\n _print_log('training', step, loss_value, acc_value, lr_value, duration)\n\n if step % 100 == 0:\n summary_str = sess.run(summary_op, feed_dict=feed_dict)\n summary_writer.add_summary(summary_str, step)\n\n if step % 500 == 0:\n validation_images = validation_set.images\n validation_labels = validation_set.labels\n feed_dict = {\n images_placeholder: validation_images,\n labels_placeholder: validation_labels\n }\n [loss_value, acc_value] = sess.run([model.loss, model.acc], feed_dict=feed_dict)\n logger.info('(validation)loss: %f, acc: %f' % (loss_value, acc_value))\n\n if (step != 0 and step % 10000 == 0) or step + 1 == model_conf.NUM_STEPS:\n checkpoint_path = os.path.join(model_conf.MODEL_SAVER_DIR, 'model.ckpt')\n saver.save(sess, checkpoint_path, step)", "def _train_model(self):\n raise NotImplementedError()", "def train_model(self, save_folder: str = \"latest_model\"):\n dataset_folder = self.dataset_config[\"dataset_folder\"]\n if not path.exists(dataset_folder):\n raise ValueError(f\"Folder {dataset_folder} is not exists\")\n\n files_list = [path.join(dataset_folder, f) for f in listdir(dataset_folder) if path.isfile(path.join(dataset_folder, f)) and f.endswith(\".yml\")]\n\n df, _, _, synonym_dict = make_dataframe(files=files_list)\n\n self.synonym_dict.update(synonym_dict)\n self.config[\"model\"][\"synonym\"] = self.synonym_dict\n\n dataset = DIETClassifierDataset(dataframe=df, tokenizer=self.tokenizer, entities=self.entities[1:], intents=self.intents)\n\n trainer = DIETTrainer(model=self.model, dataset=dataset,\n train_range=self.training_config[\"train_range\"],\n num_train_epochs=self.training_config[\"num_train_epochs\"],\n per_device_train_batch_size=self.training_config[\"per_device_train_batch_size\"],\n per_device_eval_batch_size=self.training_config[\"per_device_eval_batch_size\"],\n warmup_steps=self.training_config[\"warmup_steps\"],\n weight_decay=self.training_config[\"weight_decay\"],\n logging_dir=self.training_config[\"logging_dir\"],\n early_stopping_patience=self.training_config[\"early_stopping_patience\"],\n early_stopping_threshold=self.training_config[\"early_stopping_threshold\"],\n output_dir=self.training_config[\"output_dir\"])\n\n trainer.train()\n\n self.save_pretrained(directory=save_folder)", "def trainModel(self, train_set_path, test_set_path, img_shape, output_dir, batch_size=32, num_epochs=25):\n\n train_datagen = ImageDataGenerator(\n # Random transform - Data augmenting\n rescale=1. / 255,\n shear_range=0.2,\n zoom_range=0.2,\n horizontal_flip=True)\n\n test_datagen = ImageDataGenerator(rescale=1. / 255)\n\n training_set = train_datagen.flow_from_directory(\n train_set_path,\n target_size=(img_shape, img_shape),\n batch_size=32,\n class_mode='categorical')\n\n test_set = test_datagen.flow_from_directory(\n test_set_path,\n target_size=(img_shape, img_shape),\n batch_size=batch_size,\n class_mode='categorical')\n\n\n # Saves the model weights after each epoch if the validation loss decreased\n now = datetime.now()\n nowstr = 'CNN_Model'\n\n now = os.path.join(output_dir, nowstr)\n\n # Make the directory\n os.makedirs(now, exist_ok = True)\n\n # Create callbacks\n savepath = os.path.join(now, 'CNN_Model.h5')\n checkpointer = ModelCheckpoint(filepath=savepath, monitor='val_acc', mode='max', verbose=0, save_best_only=True)\n fout = open(os.path.join(now, 'labels.txt'), 'wt')\n\n # Write labels to file\n for key, val in training_set.class_indices.items():\n fout.write(str(key + '\\n'))\n\n self.model.fit_generator(\n training_set,\n steps_per_epoch=len(training_set.filenames) // batch_size,\n epochs=num_epochs,\n validation_data=test_set,\n validation_steps=len(test_set.filenames) // batch_size,\n callbacks=[checkpointer])", "def train_model():\n # Decode the request\n data = request.data.decode(\"utf-8\")\n\n # Write data from the request in a local csv file\n train_csv = \"train_local.csv\"\n f = open(train_csv, \"w\", encoding=\"utf-8\")\n f.write(data)\n f.close()\n\n # Load the train csv file as a DataFrame\n train_df = pd.read_csv(train_csv)\n\n # Train model\n model.train_model(train_df)\n\n return jsonify({\"success\": \"The model was trained sucessfully\"})", "def fit(self,X_train,y_train):\r\n \r\n self.X_train_data=X_train.reset_index(drop=True)\r\n self.y_train_data=y_train.reset_index(drop=True)\r\n \r\n temp_fitted_model=[]\r\n for each_model in self.model_list:\r\n each_model.fit(self.X_train_data,self.y_train_data)\r\n temp_fitted_model.append(each_model)\r\n \r\n self.fitted_model=temp_fitted_model", "def training(self):\n \n best_valid_loss = np.inf\n c = 0\n \n self.train_loader, self.test_loader = self.get_train_test_loaders()\n \n print('Training the {} model with the following architecture:'.format(self.model_name))\n print(summary(self.model, (3, self.image_width, self.image_height)))\n print('*'*100)\n print('Starting the training...')\n print('*'*100)\n \n # Create the model save dir if it already doesn't exist\n if not os.path.exists(self.model_save_dir):\n os.makedirs(self.model_save_dir)\n \n for epoch in range(self.n_epochs):\n\n print(f'Epoch: {epoch+1:02}')\n\n start_time = time.time()\n\n train_loss = self.train(self.train_loader)\n valid_loss = self.evaluate(self.test_loader)\n\n epoch_mins, epoch_secs = self.epoch_time(start_time, time.time())\n\n c+=1\n if valid_loss < best_valid_loss:\n best_valid_loss = valid_loss\n torch.save(self.model.state_dict(), os.path.join(self.model_save_dir, '{}_trained.pt'.format(self.model_name)))\n c=0\n\n if c>4:\n #decrease lr if loss does not decrease after 5 steps\n self.scheduler.step()\n c=0\n\n print(f'Time: {epoch_mins}m {epoch_secs}s') \n print(f'Train Loss: {train_loss:.3f}')\n print(f'Val Loss: {valid_loss:.3f}')\n print('-'*60)\n print('The best validation loss is', best_valid_loss)\n print('*'*100)", "def train_model(self,model):\r\n \r\n train_state = {'stop_early': False,\r\n 'early_stopping_step': 0,\r\n 'early_stopping_best_val': 1e8,\r\n 'learning_rate': self.lr,\r\n 'epoch_index': 0,\r\n 'train_loss': [],\r\n 'val_loss': [],\r\n 'best_model':model}\r\n \r\n dataset = self.dataset\r\n loss_fn = self.loss_fn\r\n \r\n dataset.set_split('train')\r\n print(\"Training module with \"+str(len(dataset))+\" examples\")\r\n \r\n data_loader = DataLoader(dataset,batch_size=self.batch_size,shuffle=True,\r\n drop_last=True)\r\n \r\n optimizer = optim.Adam(model.parameters(), lr=self.lr)\r\n \r\n for epoch in range(self.epochs):\r\n train_state['epoch_index'] = epoch\r\n #First step in each epoch is to train over all batches\r\n model.train()\r\n dataset.set_split('train')\r\n train_loss = 0\r\n for b_i,batch_data in enumerate(data_loader):\r\n #Step 1: zero gradients\r\n optimizer.zero_grad()\r\n #Step 2: run forward\r\n X = batch_data['x']\r\n output = model(X)\r\n #Step 3: compute loss\r\n target = batch_data['y']\r\n loss = loss_fn(output,target)\r\n #Step 4: run backward\r\n loss.backward()\r\n #Step 5: update\r\n optimizer.step()\r\n \r\n #Record accumulated loss\r\n new_loss = loss.item()\r\n train_loss += new_loss\r\n \r\n train_loss /= b_i\r\n train_state['train_loss'].append(train_loss)\r\n \r\n #After training, compute loss on validation set and check for early stop\r\n model.eval()\r\n dataset.set_split('val')\r\n val_loss = 0\r\n for b_i,batch_data in enumerate(data_loader):\r\n #Step 1: run forward\r\n X = batch_data['x']\r\n output = model(X)\r\n #Step 2: compute loss\r\n target = batch_data['y']\r\n loss = loss_fn(output,target)\r\n \r\n #Record accumulated loss\r\n new_loss = loss.item()\r\n val_loss += new_loss\r\n \r\n val_loss /= b_i\r\n train_state['val_loss'].append(val_loss)\r\n \r\n print(\"Finished epoch \"+str(epoch+1)+\". Train loss=\"+\\\r\n str(train_loss)+\", Val loss=\"+str(val_loss))\r\n \r\n if val_loss < train_state['early_stopping_best_val']:\r\n #new best model, reset stopping counter, store model\r\n train_state['early_stopping_step'] = 0\r\n train_state['early_stopping_best_val'] = val_loss\r\n best_model = copy.deepcopy(model)\r\n best_model.load_state_dict(model.state_dict())\r\n train_state['best_model'] = best_model\r\n else:\r\n #val loss not improved; increase early stopping counter\r\n train_state['early_stopping_step'] += 1\r\n if train_state['early_stopping_step'] >= self.early_stopping_criteria:\r\n train_state['stop_early'] = True\r\n print(\"Val loss failed to improve. Stopping early.\")\r\n break\r\n \r\n return train_state['best_model'],train_state", "def train(self, x_data, y_data):\n self.model.fit(np.array(x_data), np.array(y_data),\n batch_size=2,\n epochs=3,\n verbose=1)\n self.model.save_weights(self.model_filename)", "def train(self) -> None:\n\n # Check if in the saved model path there is already a trained model\n if self.config.TRN_HYPERP[\"save_path\"]:\n if tf.saved_model.contains_saved_model(self.config.TRN_HYPERP[\"save_path\"]):\n print(\"INFO: An existing saved model will be used for inference\\n\")\n else:\n params = {**self.config.TRN_HYPERP, **self.config.DATASET_HYPERP}\n trainer = Trainer(**params)\n\n print(f\"INFO: Starting training ... \\n\")\n start_time = time.time()\n trainer.train()\n print(f\"\\nINFO: Training completed in {round((time.time() - start_time)/60, 2)} minutes.\\n\")\n\n # Instantiate the saved translator for inference\n saved_path = self.config.TRN_HYPERP[\"save_path\"]\n self.saved_translator = tf.saved_model.load(saved_path)\n else:\n print(\"INFO: Path to save model wasn't provided in config file. Can't train the model\\n\")", "def train(self, training_data):\n pass", "def train(self, tr_set_path: str, save_path: str, va_split: float=0.1, stratified_split: bool=False, early_stopping: bool=True):\n df_tr = read_csv_json(tr_set_path)\n if stratified_split:\n df_va = df_tr.groupby('intent').apply(lambda g: g.sample(frac=va_split, random_state=SEED))\n df_tr = df_tr[~df_tr.index.isin(df_va.index.get_level_values(1))]\n va_messages, va_labels = list(df_va.text), list(df_va.intent)\n va_dataset = [{'data': va_messages[i], 'label': va_labels[i]} for i in range(len(df_va))]\n tr_messages, tr_labels = list(df_tr.text), list(df_tr.intent)\n tr_dataset = [{'data': tr_messages[i], 'label': tr_labels[i]} for i in range(len(df_tr))]\n (x_train, y_train, le_encoder) = self.__preprocess(tr_dataset)\n (x_va, y_va, _) = self.__preprocess(va_dataset, le_encoder)\n else:\n tr_messages, tr_labels = list(df_tr.text), list(df_tr.intent)\n tr_dataset = [{'data': tr_messages[i], 'label': tr_labels[i]} for i in range(len(df_tr))]\n (x_train, y_train, le_encoder) = self.__preprocess(tr_dataset)\n\n K.clear_session()\n graph = tf.Graph()\n with graph.as_default():\n session = tf.Session()\n with session.as_default():\n session.run(tf.global_variables_initializer())\n model = self.__build_model(num_classes=len(le_encoder.categories_[0]))\n model.compile(\n loss=losses.CategoricalCrossentropy(label_smoothing=self.label_smoothing),\n #metrics=['categorical_accuracy'],\n optimizer=self.model_cfg.get('optimizer', 'adam') #default lr at 0.001\n #optimizer=optimizers.Adam(learning_rate=5e-4)\n )\n # early stopping callback using validation loss \n callback = tf.keras.callbacks.EarlyStopping(\n monitor=\"val_loss\",\n min_delta=0,\n patience=5,\n verbose=0,\n mode=\"auto\",\n baseline=None,\n restore_best_weights=True,\n )\n #callback = EarlyStoppingAtMaxMacroF1(\n # patience=100, # record all epochs\n # validation=(x_va, y_va)\n #)\n\n print('start training')\n history = model.fit(x_train, y_train,\n batch_size=self.model_cfg['batch_size'],\n epochs=100,\n validation_split=va_split if not stratified_split else 0,\n validation_data=(x_va, y_va) if stratified_split else None,\n callbacks=[callback] if early_stopping else None)\n history.history['train_data'] = tr_set_path\n print(f'finished training in {len(history.history[\"loss\"])} epochs')\n save(model, le_encoder, save_path, history)\n self.model = model\n self.session = session\n self.graph = graph\n self.le_encoder = le_encoder\n # return training history \n return history.history", "def train_model(net, model_name, max_epochs=50, patience=7, batch_size=256, overwrite=False):\n file_exists = os.path.isfile(_get_model_file(CHECKPOINT_PATH, model_name))\n if file_exists and not overwrite:\n print(\"Model file already exists. Skipping training...\")\n else:\n if file_exists:\n print(\"Model file exists, but will be overwritten...\")\n\n # Defining optimizer, loss and data loader\n optimizer = optim.SGD(net.parameters(), lr=1e-2, momentum=0.9) # Default parameters, feel free to change\n loss_module = nn.CrossEntropyLoss()\n train_loader_local = data.DataLoader(\n train_set, batch_size=batch_size, shuffle=True, drop_last=True, pin_memory=True\n )\n\n val_scores = []\n best_val_epoch = -1\n for epoch in range(max_epochs):\n ############\n # Training #\n ############\n net.train()\n true_preds, count = 0.0, 0\n for imgs, labels in tqdm(train_loader_local, desc=f\"Epoch {epoch+1}\", leave=False):\n imgs, labels = imgs.to(device), labels.to(device) # To GPU\n optimizer.zero_grad() # Zero-grad can be placed anywhere before \"loss.backward()\"\n preds = net(imgs)\n loss = loss_module(preds, labels)\n loss.backward()\n optimizer.step()\n # Record statistics during training\n true_preds += (preds.argmax(dim=-1) == labels).sum()\n count += labels.shape[0]\n train_acc = true_preds / count\n\n ##############\n # Validation #\n ##############\n val_acc = test_model(net, val_loader)\n val_scores.append(val_acc)\n print(\n f\"[Epoch {epoch+1:2i}] Training accuracy: {train_acc*100.0:05.2f}%, Validation accuracy: {val_acc*100.0:05.2f}%\"\n )\n\n if len(val_scores) == 1 or val_acc > val_scores[best_val_epoch]:\n print(\"\\t (New best performance, saving model...)\")\n save_model(net, CHECKPOINT_PATH, model_name)\n best_val_epoch = epoch\n elif best_val_epoch <= epoch - patience:\n print(f\"Early stopping due to no improvement over the last {patience} epochs\")\n break\n\n # Plot a curve of the validation accuracy\n plt.plot([i for i in range(1, len(val_scores) + 1)], val_scores)\n plt.xlabel(\"Epochs\")\n plt.ylabel(\"Validation accuracy\")\n plt.title(f\"Validation performance of {model_name}\")\n plt.show()\n plt.close()\n\n load_model(CHECKPOINT_PATH, model_name, net=net)\n test_acc = test_model(net, test_loader)\n print((f\" Test accuracy: {test_acc*100.0:4.2f}% \").center(50, \"=\") + \"\\n\")\n return test_acc", "def train(self):\n if self.retrain:\n self.states = self.get_states()\n self.transitions = self.get_transitions()\n self.matrix = self.get_matrix()\n self.save_training()\n else:\n self.load_training()", "def train_model(self, initial=False):\n if len(self.loading.intersection({'model_train', 'base_model'})) > 0:\n return\n\n train_files = []\n if not self.model_trained and not initial:\n train_files = list(askopenfilenames())\n if len(train_files) is 0:\n return\n\n self.queue_gui_update('model_train_status', {'visible': False})\n self.queue_gui_update('btn_train_model', {'text': 'Training...'})\n self.queue_gui_update('model_train_loading', {'visible': True})\n self.loading.add('model_train')\n else:\n self.queue_gui_update('base_model_status', {'visible': False})\n self.queue_gui_update('btn_train_model', {'text': 'Loading base model...'})\n self.queue_gui_update('base_model_loading', {'visible': True})\n self.loading.add('base_model')\n\n\n self.model, is_base = get_model(train_files)\n if is_base or is_base is None:\n self.base_model_loaded = True\n self.model_trained = False\n self.queue_gui_update('base_model_status', {'value': u'\\u2713', 'text_color': 'green', 'visible': True})\n self.queue_gui_update('base_model_loading', {'visible': False})\n\n self.queue_gui_update('model_train_status', {'value': u'\\u2717', 'text_color': 'red', 'visible': True})\n self.queue_gui_update('btn_train_model', {'text': 'Train transfer model'})\n self.queue_gui_update('model_train_loading', {'visible': False})\n\n if is_base:\n self.loading.remove('base_model')\n else:\n self.model_trained = True\n self.queue_gui_update('model_train_status', {'value': u'\\u2713', 'text_color': 'green', 'visible': True})\n self.queue_gui_update('btn_train_model', {'text': 'Reset model'})\n self.queue_gui_update('model_train_loading', {'visible': False})\n self.loading.remove('model_train')\n\n self.model._make_predict_function()", "def model_switch_to_training(self):\n pass", "def train(self, model_type, params=None):\n Model = load_model_class(model_type)\n self.model_type = model_type\n X, y = self.task.make_dataset()\n self.final_data = X.copy()\n # Save preds\n preds = np.zeros_like(y.values).astype(np.float)\n with TMPFolder():\n N = len(X)\n n = N // self.cv\n # Assign a fold to each sample\n folds = np.random.permutation(np.repeat(np.arange(self.cv), n+1)[:N])\n if self.cv == 1:\n folds[:] = 1\n folds[np.random.permutation(np.arange(N))[:int(round(0.25 * N))]] = 0\n # Iterate over folds\n for k in range(self.cv):\n print(\"Fold\", k)\n # Create model\n model = Model()\n if params is not None:\n model.set_hp(params)\n # Create sub-dataset\n X_train = X[folds != k]\n y_train = y[folds != k]\n X_test = X[folds == k]\n y_test = y[folds == k]\n # Train the model\n model.train(X_train, y_train)\n # Make predictions on test samples\n y_pred = model.predict(X_test)\n # Save the predictions\n preds[folds == k] = y_pred\n self.model_save.append(model)\n # Save folds\n self.folds = folds\n self.is_trained = True\n self.preds = preds\n self.true_labels = y", "def train(self):\n # Change directory to the code directory\n current_working_directory = os.getcwd()\n\n os.chdir(self.model_parameters[\"NN_code_directory\"])\n\n self.call_training_routine()\n\n # Come back to the original directory\n os.chdir(current_working_directory)", "def train():\n # YOUR TRAINING CODE GOES HERE", "def retrain(datapath, model_version):\n df = get_df(datapath)\n X = df.drop(columns='target')\n y = df['target']\n fitted_model = fit(RF, X, y)\n\n with open(f'trained_models/model_{model_version}.joblib', 'wb') as file:\n joblib.dump(fitted_model, file)", "def train_val_training(X_train, y_train, model):\n # set pach where trained models will be saved to \n savepath = Path('/home/kwaygo/Documents/NUS/SPH6004/P2/SPH6004_P2/models/Regression')\n checkpoint_name = os.path.join(savepath, 'Weights-{epoch:03d}--{val_loss:.5f}.hdf5' ) \n # define callbacks\n cp = ModelCheckpoint(checkpoint_name, monitor='val_loss', verbose = 1, save_best_only = True, mode ='auto')\n es = EarlyStopping(monitor='val_loss', patience= 4, verbose=1)\n callbacks_list = [es, cp]\n # start training\n hist = model.fit(X_train, y_train, epochs=500, batch_size=500, validation_split = 0.2, callbacks=callbacks_list) \n \n print(\"[INFO] avg. ICU LOS of train set: {}, std ICU LOS of test set: {}\".format(np.mean(y_train), np.std(y_train)))\n # plot training History \n plotHist(hist)\n return model", "def train_and_eval(self):\n self.__create_indexes()\n model = None\n model = None\n if self.model == 'OMult':\n model = OMult(self.kwargs)\n elif self.model == 'ConvO':\n model = ConvO(self.kwargs)\n elif self.model == 'QMult':\n model = QMult(self.kwargs)\n elif self.model == 'ConvQ':\n model = ConvQ(self.kwargs)\n elif self.model == 'OMultBatch':\n model = OMultBatch(self.kwargs)\n elif self.model == 'ConvOBatch':\n model = ConvOBatch(self.kwargs)\n elif self.model == 'QMultBatch':\n model = QMultBatch(self.kwargs)\n elif self.model == 'ConvQBatch':\n model = ConvQBatch(self.kwargs)\n else:\n print(self.model, ' is not valid name')\n raise ValueError\n\n self.train(model)\n self.eval(model)", "def train(self, trainset, testset, niter=1000, ntest=300, epochs=int(1e6)):\n\n print('\\n>> Training begins!\\n')\n\n def fetch_dict(datagen, keep_prob=0.5):\n \"\"\"Format model input data.\"\"\"\n bx, by, br = next(datagen)\n while not (bx.shape[0] > 0 and bx.shape[1] > 0):\n bx, by, br = next(datagen)\n\n dec_lengths = np.full((bx.shape[0], ), bx.shape[1], dtype=np.int32)\n\n feed_dict = {\n self.xs_: bx,\n self.ys_: by,\n self.dec_inputs_length_: dec_lengths,\n self.ext_context_: br,\n self.keep_prob_: keep_prob\n }\n return feed_dict\n\n # setup session\n sess = tf.Session()\n sess.run(tf.global_variables_initializer())\n\n load_classification_parameters(\n sess, \"models/model.ckpt-1500000\", \"class\")\n load_classification_parameters(\n sess, \"models/model.ckpt-1500000\", \"feat\")\n\n # get last checkpoint\n saver = tf.train.Saver(get_collections([\"encoder\", \"decoder\", \"proj\"]))\n ckpt = tf.train.get_checkpoint_state(self.ckpt_path)\n # verify it\n if ckpt and ckpt.model_checkpoint_path:\n try:\n saver.restore(sess, ckpt.model_checkpoint_path)\n except tf.OpError:\n # graph structure changed, cannot load, restart training\n pass\n\n try:\n # start training\n for j in range(epochs):\n mean_loss = 0\n for n_it in range(niter):\n _, loss = sess.run(\n [self.train_op, self.loss],\n feed_dict=fetch_dict(trainset))\n mean_loss += loss\n print(' [{}/{}]\\r'.format(n_it, niter))\n\n print('[{}] train loss : {}'.format(j, mean_loss / niter))\n saver.save(sess, self.ckpt_path + self.model_name + '.ckpt',\n global_step=j)\n\n # evaluate\n testloss = 0\n for _ in range(ntest):\n testloss += sess.run(\n [self.loss],\n feed_dict=fetch_dict(testset, keep_prob=1.))[0]\n print('test loss : {}'.format(testloss / ntest))\n\n except KeyboardInterrupt:\n print('\\n>> Interrupted by user at iteration {}'.format(j))", "def train_model(self):\n self.best_epoch = {'auto':{}, 'coffee':{}, 'movie':{}, 'pizza':{}, 'restaurant':{}, 'uber':{} }\n self.best_f1 = {'auto':{}, 'coffee':{}, 'movie':{}, 'pizza':{}, 'restaurant':{}, 'uber':{} }\n for t in self.topic:\n if t != 'other':\n for st in self.topic2sub_topic[t].keys():\n\n print(\"Now training the classsfier for topic: \", t, \" ; intent: \", st)\n print(128 * \"=\")\n print(\"Input: str; Output: boolean(if the str contents the intent: \", st, \" ).\")\n print(64 * \"-\")\n X, y = self.get_data(t, st)\n print(\"data_loaded!\")\n X_train, X_dev, y_train, y_dev = self.my_train_test_split(X, y)\n best_f1 = 0\n for e in range(1,10):\n model = tf.keras.Sequential()\n model.add(tf.keras.layers.InputLayer(input_shape=[1024, ]))\n model.add(tf.keras.layers.Dense(64, activation='relu'))\n model.add(tf.keras.layers.Dense(64, activation='relu'))\n model.add(tf.keras.layers.Dense(1, activation='relu'))\n model.compile(loss='mean_squared_logarithmic_error', optimizer='adam', metrics=[metrics.mae, metrics.categorical_accuracy])\n model.fit(X_train, y_train, epochs=e, batch_size=128)\n print(\"f1_score on dev set: \")\n f1 = self.f1_score_model(model, X_dev, y_dev)[0]\n if f1 > best_f1:\n self.model_zoo[t][st] = model\n model.save_weights(self.trained_w_folder+\"/%s/%s.h5\" %(t,st))\n self.best_epoch[t][st] = e\n self.best_f1[t][st] = f1\n best_f1 = f1\n\n print(64*\"=\")\n print()", "def fit(self):\n \n # Open an existing model and get the training & test dataset and targets\n train_test_df, target_df = self._get_model_and_data(target=True, set_feature_def=True)\n \n # Check that the estimator is an supervised ML algorithm\n if self.model.estimator_type not in [\"classifier\", \"regressor\"]:\n err = \"Incorrect usage. The estimator specified is not a known classifier or regressor: {0}\".format(self.model.estimator)\n raise Exception(err)\n \n # Check which validation strategy is to be used, if any\n # For an explanation of cross validation in scikit-learn see: http://scikit-learn.org/stable/modules/cross_validation.html#multimetric-cross-validation\n if self.model.time_series_split > 0:\n self.model.validation = \"timeseries\"\n # Set up cross validation to be performed using TimeSeriesSplit\n self.model.cv = TimeSeriesSplit(n_splits=self.model.time_series_split, max_train_size=self.model.max_train_size)\n elif self.model.cv > 0:\n self.model.validation = \"k-fold\"\n elif self.model.test_size > 0:\n self.model.validation = \"hold-out\"\n else:\n self.model.validation = \"external\"\n\n if self.model.validation == \"hold-out\": \n # Split the data into training and testing subsets\n self.X_train, self.X_test, self.y_train, self.y_test = \\\n train_test_split(train_test_df, target_df, test_size=self.model.test_size, random_state=self.model.random_state)\n else:\n self.X_train = train_test_df\n self.y_train = target_df\n \n # Add the training and test data to the model if required\n if self.model.retain_data:\n self.model.X_train = self.X_train\n self.model.y_train = self.y_train\n \n try:\n self.model.X_test = self.X_test\n self.model.y_test = self.y_test\n except AttributeError:\n pass\n \n # Scale the targets and increase stationarity if required\n if self.model.scale_target or self.model.make_stationary:\n # Set up the target transformer\n self.model.target_transformer = TargetTransformer(scale=self.model.scale_target, make_stationary=self.model.make_stationary, stationarity_lags=self.model.stationarity_lags,\\\n missing=self.model.missing, scaler=self.model.scaler, logfile=self.logfile, **self.model.scaler_kwargs)\n\n # Fit the transformer to the training targets\n self.model.target_transformer = self.model.target_transformer.fit(self.y_train)\n\n # Apply the transformer to the training targets\n self.y_train = self.model.target_transformer.transform(self.y_train)\n # Drop samples where the target cannot be transformed due to insufficient lags\n self.X_train = self.X_train.iloc[len(self.X_train)-len(self.y_train):] \n \n # Add lag observations to the samples if required\n if self.model.lags or self.model.lag_target:\n # Check if the current sample will be included as an input, or whether we only use lag observations for predictions\n extrapolate = 1 if self.model.current_sample_as_input else 0\n # Add the lag observations\n self.X_train = self._add_lags(self.X_train, self.y_train, extrapolate=extrapolate, update_features_df=True)\n # Drop targets for samples which were dropped due to null values after adding lags.\n if len(self.y_train) > len(self.X_train):\n self.y_train = self.y_train.iloc[len(self.y_train)-len(self.X_train):]\n\n # If this is a Keras estimator, we require the preprocessing to return a data frame instead of a numpy array\n prep_return = 'df' if self.model.using_keras else 'np'\n\n # Construct the preprocessor\n prep = Preprocessor(self.model.features_df, return_type=prep_return, scale_hashed=self.model.scale_hashed, scale_vectors=self.model.scale_vectors,\\\n missing=self.model.missing, scaler=self.model.scaler, logfile=self.logfile, **self.model.scaler_kwargs)\n\n # Setup a list to store steps for the sklearn pipeline\n pipe_steps = [('preprocessor', prep)]\n\n if self.model.dim_reduction:\n # Construct the dimensionality reduction object\n reduction = self.decomposers[self.model.reduction](**self.model.dim_reduction_args)\n \n # Include dimensionality reduction in the pipeline steps\n pipe_steps.append(('reduction', reduction))\n self.model.estimation_step = 2\n else:\n self.model.estimation_step = 1 \n\n # If this is a Keras estimator, update the input shape and reshape the data if required\n if self.model.using_keras:\n # Update the input shape based on the final number of features after preprocessing\n self._keras_update_shape(prep)\n\n # Add the Keras build function, architecture and prediction_periods to the estimator keyword arguments\n self.model.estimator_kwargs['build_fn'] = self._keras_build_fn\n self.model.estimator_kwargs['architecture'] = self.model.architecture\n self.model.estimator_kwargs['prediction_periods'] = self.model.prediction_periods\n\n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n self._print_log(10)\n \n # Check than an identifier has been provided for sorting data if this is a sequence prediction problem\n if self.model.lags or len(self.model.first_layer_kwargs[\"input_shape\"]) > 1:\n assert len(self.model.original_features_df[self.model.original_features_df['variable_type'].isin([\"identifier\"])]) == 1, \\\n \"An identifier is mandatory when using lags or with sequence prediction problems. Define this field in your feature definitions.\"\n\n # Cater for multi-step predictions\n if self.model.prediction_periods > 1:\n # Transform y to a vector of values equal to prediction_periods\n self.y_train = utils.vectorize_array(self.y_train, steps=self.model.prediction_periods)\n # Drop values from x for which we don't have sufficient y values\n self.X_train = self.X_train.iloc[:-len(self.X_train)+len(self.y_train)]\n\n # Add a pipeline step to update the input shape and reshape the data if required\n # This transform will also add lag observations if specified through the lags parameter\n # If lag_target is True, an additional feature will be created for each sample using the previous value of y \n reshape = Reshaper(first_layer_kwargs=self.model.first_layer_kwargs, logfile=self.logfile)\n pipe_steps.append(('reshape', reshape))\n self.model.estimation_step += self.model.estimation_step\n\n # Avoid tensorflow error for keras models\n # https://github.com/tensorflow/tensorflow/issues/14356\n # https://stackoverflow.com/questions/40785224/tensorflow-cannot-interpret-feed-dict-key-as-tensor\n kerasbackend.clear_session()\n \n # Try assuming the pipeline involves a grid search\n try:\n # Construct an estimator\n estimator = self.algorithms[self.model.estimator](**self.model.estimator_kwargs)\n\n # Prepare the grid search using the previously set parameter grid\n grid_search = GridSearchCV(estimator=estimator, param_grid=self.model.param_grid, **self.model.grid_search_args)\n \n # Add grid search to the pipeline steps\n pipe_steps.append(('grid_search', grid_search))\n\n # Construct the sklearn pipeline using the list of steps\n self.model.pipe = Pipeline(pipe_steps)\n\n if self.model.validation in [\"k-fold\", \"timeseries\"]:\n # Perform K-fold cross validation\n self._cross_validate()\n\n # Fit the training data to the pipeline\n if self.model.using_keras:\n # https://stackoverflow.com/questions/54652536/keras-tensorflow-backend-error-tensor-input-10-specified-in-either-feed-de\n session = tf.Session()\n kerasbackend.set_session(session)\n with session.as_default():\n with session.graph.as_default():\n sys.stdout.write(\"\\nMODEL: {}, INPUT SHAPE: {}\\n\\n\".format(self.model.name, self.model.first_layer_kwargs['input_shape']))\n y = self.y_train.values if self.y_train.shape[1] > 1 else self.y_train.values.ravel()\n self.model.pipe.fit(self.X_train, y)\n else:\n self.model.pipe.fit(self.X_train, self.y_train.values.ravel())\n\n # Get the best parameters and the cross validation results\n grid_search = self.model.pipe.named_steps['grid_search']\n self.model.best_params = grid_search.best_params_\n self.model.cv_results = grid_search.cv_results_\n\n # Get the best estimator to add to the final pipeline\n estimator = grid_search.best_estimator_\n\n # Update the pipeline with the best estimator\n self.model.pipe.steps[self.model.estimation_step] = ('estimator', estimator)\n\n except AttributeError:\n # Construct an estimator\n estimator = self.algorithms[self.model.estimator](**self.model.estimator_kwargs)\n\n # Add the estimator to the pipeline steps\n pipe_steps.append(('estimator', estimator))\n\n # Construct the sklearn pipeline using the list of steps\n self.model.pipe = Pipeline(pipe_steps)\n\n if self.model.validation in [\"k-fold\", \"timeseries\"]:\n # Perform K-fold cross validation\n self._cross_validate()\n\n # Fit the training data to the pipeline\n if self.model.using_keras:\n # https://stackoverflow.com/questions/54652536/keras-tensorflow-backend-error-tensor-input-10-specified-in-either-feed-de\n session = tf.Session()\n kerasbackend.set_session(session)\n with session.as_default():\n with session.graph.as_default():\n sys.stdout.write(\"\\nMODEL: {}, INPUT SHAPE: {}\\n\\n\".format(self.model.name, self.model.first_layer_kwargs['input_shape']))\n y = self.y_train.values if self.y_train.shape[1] > 1 else self.y_train.values.ravel()\n self.model.pipe.fit(self.X_train, y)\n else:\n self.model.pipe.fit(self.X_train, self.y_train.values.ravel())\n \n if self.model.validation == \"hold-out\": \n # Evaluate the model using the test data \n self.calculate_metrics(caller=\"internal\")\n \n if self.model.calc_feature_importances:\n # Select the dataset for calculating importances\n if self.model.validation == \"hold-out\":\n X = self.X_test\n y = self.y_test # Already a numpy array after calculate_metrics\n else:\n X = self.X_train\n y = self.y_train.values.ravel()\n \n # Calculate model agnostic feature importances\n self._calc_importances(X = X, y = y)\n\n # Persist the model to disk\n self.model = self.model.save(self.model.name, self.path, overwrite=self.model.overwrite, compress=self.model.compress)\n \n # Update the cache to keep this model in memory\n self._update_cache()\n \n # Prepare the output\n if self.model.validation != \"external\": \n message = [[self.model.name, 'Model successfully trained, tested and saved to disk.',\\\n time.strftime('%X %x %Z', time.localtime(self.model.state_timestamp)),\\\n \"{0} model has a score of {1:.3f} against the test data.\"\\\n .format(self.model.estimator, self.model.score), self.model.score]]\n else:\n message = [[self.model.name, 'Model successfully trained and saved to disk.',\\\n time.strftime('%X %x %Z', time.localtime(self.model.state_timestamp)),\\\n \"{0} model score unknown as test_size was <= 0.\"\\\n .format(self.model.estimator), np.NaN]]\n \n self.response = pd.DataFrame(message, columns=['model_name', 'result', 'time_stamp', 'score_result', 'score'])\n \n # Send the reponse table description to Qlik\n self._send_table_description(\"fit\")\n \n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n self._print_log(4)\n \n # Finally send the response\n return self.response", "def fit(self,\n X_train,\n y_train, \n X_test, \n y_test):\n \n #instantiate path_model_dirs dictionary so we can know where the models are saved\n self.path_model_dirs = {}\n\n for key in self.models_dict.keys():\n \n if self.verbose >=1: print('\\n----',key,'----')\n\n #define model directory\n path_model_dir = _os.path.join(self.path_GridSearchCV_dir, key)\n self.path_model_dirs[key] = path_model_dir\n if self.verbose >=1: print('path_model_dir:',path_model_dir)\n \n model_type = type(self.models_dict[key]['model'])\n if 'sklearn' in str(model_type) or 'xgboost' in str(model_type):\n path_file = _os.path.join(path_model_dir,'model_dict.dill')\n elif 'Net' in key:\n path_file = _os.path.join(path_model_dir,'best_params_.dill')\n\n if self.retrain or _os.path.isfile(path_file)==False:\n self.models_dict[key] = self._single_model_GridSearchCV(self.models_dict[key], \n X_train, y_train, \n X_test, y_test,\n path_model_dir)\n\n else: #reload previously trained model\n if 'sklearn' in str(type(self.models_dict[key]['model'])):\n self.models_dict[key] = self.load('model_dict', 'dill', path_model_dir)\n elif 'Net' in key:\n #check kwargs for epochs\n epochs = 100\n for item in self.kwargs.items():\n if 'epochs' in item[0]: epochs = item[1]\n self.models_dict[key] = self.load_NeuralNet(path_model_dir, \n X_train, y_train, \n epochs)\n\n y_pred = self.models_dict[key]['best_model'].predict(X_test)\n\n if 'Net' not in key:\n self.models_dict[key]['best_pred_score'] = self.models_dict[key]['best_model'].score(X_test, y_test)\n else:\n self.models_dict[key]['best_pred_score'] = self.models_dict[key]['best_model'].evaluate(X_test, y_test, verbose =0)\n \n if self.verbose >=1:\n print('\\tbest_cv_score:',self.models_dict[key]['best_cv_score'])\n print('\\tbest_pred_score:',self.models_dict[key]['best_pred_score'])\n\n for metric_key in self.metrics.keys():\n if self.metrics[metric_key] !=None:\n try:\n self.models_dict[key][metric_key] = self.metrics[metric_key](y_test, y_pred)\n print('\\t',metric_key,':',self.models_dict[key][metric_key])\n except Exception as e:\n print('Exception occured for',metric_key,':',str(e))\n\n if 'sklearn' in str(type(self.models_dict[key]['model'])):\n self.save(self.models_dict[key], 'model_dict', 'dill', path_model_dir)\n elif 'Net' in key:\n model_dict_subset = self.models_dict[key].copy()\n for key in self.models_dict[key].keys():\n if key not in ['y_test','y_pred','best_pred_score'] +list(self.metrics.keys()):\n model_dict_subset.pop(key)", "def train(self, train_dataset):\n\n # check fine_tuning option\n model_path = os.path.join(self.check_point, 'model.pt')\n if self.fine_tune and not os.path.exists(model_path):\n raise Exception('Cannot find %s.' %model_path)\n elif self.fine_tune and os.path.exists(model_path):\n if self.verbose:\n pass\n self.model = torch.load(model_path)\n self.optimizer = optim.Adam(self.model.parameters(), lr=self.learning_rate)\n \n # capture best model\n best_val_psnr = -1\n best_model_state = self.model.state_dict()\n\n # Train the model\n for epoch in range(self.num_epochs):\n self._epoch_step(train_dataset, epoch)\n self.scheduler.step()\n\n\n\n # capture running PSNR on train and val dataset\n train_psnr, train_ssim, _, _ = self._check_PSNR(train_dataset)\n self.hist_train_psnr.append(train_psnr)\n \n\n \n # write the model to hard-disk for testing\n if not os.path.exists(self.check_point):\n os.makedirs(self.check_point)\n model_path = os.path.join(self.check_point, 'model.pt')\n torch.save(self.model, model_path)", "def train(sess, env, Xdata, ydata, X_valid=None, y_valid=None, epochs=1,\n load=False, shuffle=True, batch_size=128, name='model'):\n if load:\n if not hasattr(env, 'saver'):\n return print('\\nError: cannot find saver op')\n print('\\nLoading saved model')\n return env.saver.restore(sess, 'model/{}'.format(name))\n\n print('\\nTrain model')\n n_sample = Xdata.shape[0]\n n_batch = int((n_sample+batch_size-1) / batch_size)\n for epoch in range(epochs):\n print('\\nEpoch {0}/{1}'.format(epoch + 1, epochs))\n\n if shuffle:\n print('\\nShuffling data')\n ind = np.arange(n_sample)\n np.random.shuffle(ind)\n Xdata = Xdata[ind]\n ydata = ydata[ind]\n\n for batch in range(n_batch):\n print(' batch {0}/{1}'.format(batch + 1, n_batch), end='\\r')\n begin = batch * batch_size\n ending = min(n_sample, begin + batch_size)\n sess.run(env.train_op, feed_dict={env.x: Xdata[begin:ending],\n env.y: ydata[begin:ending],\n env.training: True})\n if X_valid is not None:\n evaluate(sess, env, X_valid, y_valid)\n\n if hasattr(env, 'saver'):#在此处保存模型吧?\n print('\\n Saving model')\n os.makedirs('model', exist_ok=True)\n env.saver.save(sess, 'model/{}'.format(name))", "def train_model(model, x_train, y_train, x_test, y_test,\n epochs=None, batch_size=None):\n\n # Training\n if batch_size is None:\n batch_size = 128\n if epochs is None:\n epochs = 20\n\n print('x_train shape:', x_train.shape)\n print('x_test shape:', x_test.shape)\n\n print('Train...')\n model.fit(x_train,\n y_train,\n batch_size=batch_size,\n epochs=epochs,\n validation_data=(x_test, y_test),\n use_multiprocessing=True)", "def save_model(self, epoch):\n # Set the name for the model\n gen_lungs_filename = 'gen_lungs_model_epoch_{}.h5'.format(epoch + 1)\n disc_lungs_filename = 'disc_lungs_model_epoch_{}.h5'.format(epoch + 1)\n train_summary_lungs_filename = 'train_summary_lungs_epoch_{}.csv'.format(epoch + 1)\n\n gen_organs_filename = 'gen_organs_model_epoch_{}.h5'.format(epoch + 1)\n disc_organs_filename = 'disc_organs_model_epoch_{}.h5'.format(epoch + 1)\n train_summary_organs_filename = 'train_summary_organs_epoch_{}.csv'.format(epoch + 1)\n\n # Save the model and train summary\n self.generator_lungs.save(op.join(self.model_dir, gen_lungs_filename), include_optimizer=True)\n self.disc_lungs.save(op.join(self.model_dir, disc_lungs_filename), include_optimizer=True)\n self.summary_writer_lungs.to_csv(op.join(self.train_summary_dir, train_summary_lungs_filename))\n\n self.generator_organs.save(op.join(self.model_dir, gen_organs_filename), include_optimizer=True)\n self.disc_organs.save(op.join(self.model_dir, disc_organs_filename), include_optimizer=True)\n self.summary_writer_organs.to_csv(op.join(self.train_summary_dir, train_summary_organs_filename))\n return self", "def train(cleaner, data_source, save_to=\"../models/model.pkl\"):\n df = pd.read_csv(data_source)\n df = df[pd.notnull(df['tags'])]\n print(\"Start : Pre-cleaning process . . . \")\n print(\" HTML decoding . . . done\")\n print(\" lowercase text . . . done\")\n print(\" replace [/(){}\\[\\]\\|@,;] symbols by space . . . done\")\n print(\" remove remaining symbols . . . done\")\n print(\" remove stopwords . . . done\")\n df['post'] = df['post'].apply(cleaner)\n print(\"End : Pre-cleaning process\")\n x = df.post\n y = df.tags\n # no need for split data in final training stage\n # X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state = 42)\n print(\"Start : model creation process . . . \")\n sgd = Pipeline([('vect', CountVectorizer()),\n ('tfidf', TfidfTransformer()),\n ('clf', SGDClassifier(loss='hinge', penalty='l2', alpha=1e-3, random_state=42, max_iter=5,\n tol=None)),\n ])\n # sgd.fit(X_train, y_train)\n sgd.fit(x, y)\n print(\"End : model creation process\")\n model = open(save_to, 'wb')\n pickle.dump(sgd, model)\n model.close()\n print(\"Trained model saved to \" + save_to)\n return sgd", "def train(self, train_set, model, num_epochs=5, resume=False, optimizer=None, dev_set=None):\n\n\t\ttorch.cuda.empty_cache()\n\t\tself.resume = resume\n\t\tif resume:\n\t\t\tlatest_checkpoint_path = Checkpoint.get_latest_epoch_checkpoint(self.load_dir)\n\t\t\tprint('resuming {} ...'.format(latest_checkpoint_path))\n\t\t\tresume_checkpoint = Checkpoint.load(latest_checkpoint_path)\n\t\t\tmodel = resume_checkpoint.model\n\t\t\tself.optimizer = resume_checkpoint.optimizer\n\n\t\t\t# check var\n\t\t\tmodel.set_var('attention_forcing', self.attention_forcing)\n\t\t\tmodel.set_var('debug_count', 0)\n\t\t\tmodel.reset_use_gpu(self.use_gpu)\n\t\t\tprint('attention forcing: {}'.format(model.attention_forcing))\n\t\t\tprint('use gpu: {}'.format(model.use_gpu))\n\t\t\tif self.use_gpu:\n\t\t\t\tmodel = model.cuda()\n\t\t\telse:\n\t\t\t\tmodel = model.cpu()\n\n\t\t\t# A walk around to set optimizing parameters properly\n\t\t\tresume_optim = self.optimizer.optimizer\n\t\t\tdefaults = resume_optim.param_groups[0]\n\t\t\tdefaults.pop('params', None)\n\t\t\tdefaults.pop('initial_lr', None)\n\t\t\tself.optimizer.optimizer = resume_optim.__class__(model.parameters(), **defaults)\n\n\t\t\tstart_epoch = resume_checkpoint.epoch\n\t\t\tstep = resume_checkpoint.step\n\n\t\telse:\n\t\t\tstart_epoch = 1\n\t\t\tstep = 0\n\n\t\t\tfor name, param in model.named_parameters():\n\t\t\t\tlog = self.logger.info('{}:{}'.format(name, param.size()))\n\t\t\t\t# check embedder init\n\t\t\t\t# if 'embedder' in name:\n\t\t\t\t# \tprint('{}:{}'.format(name, param[5]))\n\n\t\t\tif optimizer is None:\n\t\t\t\toptimizer = Optimizer(torch.optim.Adam(model.parameters(), \n\t\t\t\t\t\t\tlr=self.learning_rate), max_grad_norm=self.max_grad_norm) # 5 -> 1\n\n\t\t\t\t# set scheduler\n\t\t\t\t# optimizer.set_scheduler(torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer.optimizer, 'min'))\n\n\t\t\tself.optimizer = optimizer\n\n\t\tself.logger.info(\"Optimizer: %s, Scheduler: %s\" % (self.optimizer.optimizer, self.optimizer.scheduler))\n\n\t\tself._train_epoches(train_set, model, num_epochs, start_epoch, step, dev_set=dev_set)\n\t\t\n\t\treturn model", "def train_model(clf, dataloaders, criterion, optimizer, num_epochs=25,\n patience=10, save_model_path=None, resume=False, finetune=False):\n\n since = time.time()\n\n val_acc_history = []\n\n #best_model_wts = copy.deepcopy(clf.state_dict())\n best_acc = 0.0\n best_epoch = 0\n epoch = 0\n\n if resume:\n assert save_model_path is not None\n if save_model_path in glob(save_model_path):\n _model, _criterion, _optimizer, _epoch, _loss, _accuracy, _history = _resume_from_checkpoint(save_model_path)\n #if finetune:\n # model.set_requires_grad(_model, True)\n clf = _model\n criterion = _criterion\n optimizer = _optimizer\n epoch = _epoch + 1\n best_epoch = _epoch\n best_acc = _accuracy\n val_acc_history = _history\n else:\n raise Exception(\"No such model file in the specified path.\")\n\n if finetune:\n model.set_requires_grad(clf, True)\n\n best_model_wts = copy.deepcopy(clf.state_dict())\n test_dataloader = dataloaders.pop('test', None)\n\n clf = clf.to(device)\n\n for epoch in range(epoch, num_epochs):\n print('Epoch {}/{}'.format(epoch + 1, num_epochs))\n print('-' * 10)\n\n # Each epoch has a training and validation phase\n for phase in ['train', 'val']:\n if phase == 'train':\n clf.train() # Set model to training mode\n else:\n clf.eval() # Set model to evaluate mode\n\n running_loss = 0.0\n running_corrects = 0\n\n # Iterate over data.\n for inputs, labels in tqdm(dataloaders[phase]):\n inputs = inputs.to(device)\n labels = labels.to(device).long()\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward\n # track history if only in train\n with torch.set_grad_enabled(phase == 'train'):\n # Get model outputs and calculate loss\n outputs = clf(inputs)\n loss = criterion(outputs, labels)\n\n _, preds = torch.max(outputs, 1)\n\n # backward + optimize only if in training phase\n if phase == 'train':\n loss.backward()\n optimizer.step()\n\n # statistics\n running_loss += loss.item() * inputs.size(0)\n running_corrects += torch.sum(preds == labels.data)\n\n epoch_loss = running_loss / len(dataloaders[phase].dataset)\n epoch_acc = running_corrects.double() / len(dataloaders[phase].dataset)\n\n print('{} Loss: {:.4f}, Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc))\n\n # deep copy the model\n if phase == 'val':\n val_acc_history.append(epoch_acc)\n if phase == 'val' and epoch_acc > best_acc:\n best_acc = epoch_acc\n best_model_wts = copy.deepcopy(clf.state_dict())\n best_epoch = epoch\n if save_model_path:\n _save_checkpoint(clf, criterion, optimizer, epoch, epoch_loss, best_acc, val_acc_history, save_model_path)\n print(\"Model checkpoint saved successfully in the given path!\")\n print()\n if patience is not None:\n if epoch - best_epoch >= patience:\n break\n\n time_elapsed = time.time() - since\n print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))\n print('Best val Acc: {:4f}'.format(best_acc))\n\n # load best model weights\n clf.load_state_dict(best_model_wts)\n\n test_acc = test_model(clf, test_dataloader, criterion, optimizer)\n if not save_model_path:\n if IN_COLAB:\n save_model_path = \"/content/drive/My Drive/Audio-classification-using-multiple-attention-mechanism/best_weights.h5\"\n else:\n save_model_path = \"best_weights.h5\"\n save_model(clf, os.path.splitext(save_model_path)[0] + (\"_final_finetuned\" if finetune else \"_final\") + os.path.splitext(save_model_path)[1])\n\n return clf, val_acc_history, test_acc", "def train_model():\n return model.fit(train_images, train_labels, epochs=10, validation_data=(test_images, test_labels), shuffle='True')", "def train_on_whole_data(self):\n save_model_path = './savedModel/cnn-model'\n self.build_graph(save_model_path)\n\n tf.reset_default_graph()\n with tf.Session(graph=tf.get_default_graph()) as sess:\n try:\n graph = self.__load_graph(sess, save_model_path)\n self.__train_and_report(sess, graph, range(1, 6), save_model_path)\n except Exception as e:\n logger.error(\"Something is missing from the previous saved graph, remove it and regenerate graph\")\n shutil.rmtree(\"./savedModel\")\n exit()", "def train_model(dataset):\n\n # clear the session so that we can train more than one model\n K.clear_session()\n\n # initialize the model\n model = initalizer.init_nn()\n\n # fit the model\n model.fit(dataset, epochs=40)\n\n return model", "def train(self, trainfile):", "def save_model(self, path):\n try:\n # os.makedirs(osp.dirname(path), exist_ok=1)\n joblib.dump(self.model, path)\n except Exception as e:\n print(e)\n print(\"Couldn't save scikit learn model on path {}!\".format(path))", "def train():\n pass", "def train(data_dir, epochs, batch_size, lr, optimizer, categories):\n run = Run.get_context()\n\n run.log('optimizer', optimizer)\n run.log('minibatch_size', batch_size)\n run.log('learning_rate', lr)\n run.log('categories', categories)\n\n # Get model and data objects\n train_generator, validation_generator = create_dataset_generators(\n data_dir, batch_size, categories\n )\n \n model = create_model(lr=lr, classes=train_generator.num_classes, optimizer_name=optimizer)\n print(model.optimizer)\n \n os.makedirs(\"./outputs\", exist_ok=True)\n\n with open('./outputs/labels.json', 'w') as fo:\n json.dump(train_generator.class_indices, fo)\n\n aml_callback = AzureMLCallback(run)\n checkpointer = ModelCheckpoint(\n filepath=\"./outputs/weights_{epoch:02d}.hdf5\", period=25)\n\n history = model.fit_generator(\n train_generator,\n steps_per_epoch=train_generator.samples / batch_size,\n epochs=epochs,\n validation_data=validation_generator,\n validation_steps=validation_generator.samples / batch_size,\n verbose=2,\n callbacks=[aml_callback, checkpointer],\n )\n\n model.save(\"./outputs/final_model.hdf5\")", "def train_test_model_batch():\n train=learning.Train_kmer_clf()\n train.run()", "def trainModel(model, train_raw, validation_raw):\n # compile and train the model using the generator function\n train_generator = generator(train_raw, batch_size=32)\n validation_generator = generator(validation_raw, batch_size=32)\n\n # model checkpoint\n now = datetime.datetime.now()\n datenow = now.strftime(\"%Y-%m-%d-\")\n #file_path_model = \"Model_checkpoints/\" + datenow + \"model-weights-{epoch:02d}-{val_loss:0.2f}.hdf5\"\n file_path_model = \"Model_checkpoints/\" + datenow + \"model-weights.hdf5\"\n checkpoint = ModelCheckpoint(file_path_model, monitor='val_loss', verbose=1, save_best_only=True, mode='auto')\n callbacks_list = [checkpoint]\n model.compile(loss='mse', optimizer='adam')\n # left/center/right images, and all flipped\n ntrain = len(train_raw)*3*2\n nvalid = len(validation_raw)*3*2\n history_object = model.fit_generator(train_generator, samples_per_epoch= \\\n ntrain, validation_data=validation_generator, \\\n nb_val_samples=nvalid, nb_epoch=10, \\\n callbacks = callbacks_list, verbose=1)\n #history_object = model.fit_generator(train_generator, steps_per_epoch= ntrain, \\\n # validation_data=validation_generator, validation_steps=nvalid, \\\n # callbacks = callbacks_list, epochs=5, verbose = 1) \n return history_object", "def save_model(model):\n model.to_disk(\"../model/custom_ner_model\")", "def train_teacher (nb_teachers, teacher_id):\n # Load the dataset\n X_train, X_test, y_train, y_test = models.get_dataset()\n\n print(X_train.shape)\n print(y_train.shape)\n print(X_test.shape)\n print(y_test.shape)\n \n # Retrieve subset of data for this teacher\n data, labels = partition.partition_dataset(X_train,\n y_train,\n nb_teachers,\n teacher_id)\n\n print(\"Length of training data: \" + str(len(labels)))\n\n # Define teacher checkpoint filename and full path\n\n filename = str(nb_teachers) + '_teachers_' + str(teacher_id) + '.hdf5'\n filename2 = str(nb_teachers) + '_teachers_' + str(teacher_id) + '.h5'\n \n # Perform teacher training need to modify \n \n\n # Create teacher model\n model, opt = models.create_two_layer_mlp(46) # num of cols\n model.compile(loss='binary_crossentropy',\n optimizer=\"Adam\",\n metrics=['accuracy'])\n model, hist = models.training(model, data, X_test, labels, y_test,filename)\n\n #modify\n model_json = model.to_json()\n with open(\"model.json\", \"w\") as json_file:\n json_file.write(model_json)\n# serialize weights to HDF5\n model.save_weights(filename2)\n print(\"Saved model to disk\")\n return True", "def train(self, X_train, y_train):\n\n self.model_pipeline.fit(X_train, y_train)", "def train_model():\n if (not request.json \n or not 'data-format' in request.json\n or not 'ML-model' in request.json\n or not 'source-data-directory' in request.json\n or not 'source-data-repo' in request.json):\n abort(400)\n\n data_format = request.json['data-format']\n ML_model = request.json['ML-model']\n source_data_directory = request.json['source-data-directory']\n source_data_repo = request.json['source-data-repo']\n \n start = time.time()\n\n do_train(model_name=ML_model, \n repo_url=source_data_repo, \n repo_data_directory=source_data_directory, \n data_format=data_format)\n \n end = time.time()\n\n runningtime = (end - start)\n\n return f'Model {ML_model} trained using source data in directory {source_data_directory}, running time {runningtime} seconds', 201", "def setup_training(model, train_loader, valid_loader, hps):\r\n\r\n train_dir = os.path.join(hps.save_root, \"train\")\r\n if not os.path.exists(train_dir): os.makedirs(train_dir)\r\n\r\n if hps.restore_model != 'None':\r\n logger.info(\"[INFO] Restoring %s for training...\", hps.restore_model)\r\n bestmodel_file = os.path.join(train_dir, hps.restore_model)\r\n loader = ModelLoader()\r\n loader.load_pytorch(model, bestmodel_file)\r\n else:\r\n logger.info(\"[INFO] Create new model for training...\")\r\n\r\n run_training(model, train_loader, valid_loader, hps) # this is an infinite loop until interrupted\r", "def train_model(model, X_train, X_valid, Y_train, Y_valid):\n\n # Let's CreateCheckPoint so as to save our best models and model Logs after every epoch.\n # This checkPoint function will be called as callback functions after every epoch.\n\n checkpoint = ModelCheckpoint(\n 'model-{epoch:03d}.h5', monitor='val_loss', verbose=0, save_best_only=True, mode='auto')\n\n tensorboard = TensorBoard(log_dir=\"log\\{}\".format(time())) \n\n model.compile(loss='mean_squared_error', optimizer=Adam(lr=LEARNING_RATE))\n\n model.fit_generator(batch_generator(DATA_DIR, X_train, Y_train, BATCH_SIZE, True),\n SAMPLES_PER_EPOCH,\n NO_OF_EPOCHS,\n max_q_size=1,\n validation_data=batch_generator(\n DATA_DIR, X_valid, Y_valid, BATCH_SIZE, False),\n nb_val_samples=len(X_valid),\n callbacks=[checkpoint, tensorboard],\n verbose=1\n )", "def save_model(self):\n self.pred_net.save((self.save_path / \"iqn_pred_net\").absolute().as_posix())\n self.target_net.save((self.save_path / \"iqn_target_net\").absolute().as_posix())", "def train_model(model, dataset, validation_set, epochs, label_name,\n batch_size=None):\n\n # Split the training set into features and label.\n features = {name:np.array(value) for name, value in dataset.items()}\n label = np.array(features.pop(label_name))\n \n # Split the validation set into features and label.\n val_features = {name:np.array(value) for name, value in validation_set.items()}\n val_label = np.array(val_features.pop(label_name))\n \n history = model.fit(x=features, y=label, batch_size=batch_size,\n epochs=epochs, shuffle=True, validation_data=(val_features, val_label)) \n\n # The list of epochs is stored separately from the rest of history.\n epochs = history.epoch\n \n # To track the progression of training, gather a snapshot\n # of the model's mean squared error at each epoch. \n hist = pd.DataFrame(history.history)\n rmse = hist[\"root_mean_squared_error\"]\n\n return epochs, rmse, hist", "def start_training(self):\n i = 0\n for _ in range(self.train_steps):\n print(f\"Start Training Step {i + 1}\")\n self.model.learn(total_timesteps=self.total_time_steps)\n self.model.save(self.save_path)\n print(f\"Finished Training Step {i + 1}\")\n i += 1", "def train_and_eval(model_dir, model_type, train_steps, train_data, test_data, train_embeddings_file_name, test_embeddings_file_name, positive_labels, combination_method, method):\n \n index_map, weights = wvd.load(train_embeddings_file_name)\n #Get positive labels\n positive_labels = positive_labels.split(',')\n \n print(\"reading data...\")\n train_file_name = train_data \n df_train = pd.read_table(train_file_name, dtype={'node1':str, 'node2':str})\n df_train = df_train.sample(frac=1)\n\n # remove NaN elements\n df_train = df_train.dropna(how='any', axis=0)\n \n df_train[LABEL_COLUMN] = (\n df_train[\"label\"].apply(lambda x: label_func(x, positive_labels))).astype(int)\n\n model_dir = tempfile.mkdtemp() if not model_dir else model_dir\n print(\"model directory = %s\" % model_dir)\n \n train_x, _, train_y, _ = get_input(df_train, weights, index_map, combination_method)\n \n print(\"\\nBuilding model...\")\n m = build_estimator(model_dir, model_type, weights, index_map, combination_method)\n \n print(\"\\nTraining model...\")\n if model_type == \"regressor\":\n m.fit(train_x, train_y, n_epoch=train_steps, show_metric=True, snapshot_epoch=False)\n \n print(\"\\nTesting model...\")\n index_map, weights = wvd.load(test_embeddings_file_name)\n \n print(\"reading data...\")\n test_file_name = test_data\n df_test = pd.read_table(test_file_name, dtype={'node1':str, 'node2':str})\n df_test = df_test.sample(frac=1)\n\n # remove NaN elements\n df_test = df_test.dropna(how='any', axis=0)\n \n df_test[LABEL_COLUMN] = (\n df_test[\"label\"].apply(lambda x: label_func(x, positive_labels))).astype(int)\n \n if model_type == \"regressor\":\n test_x, test_original_y, test_index_y, test_original_x = get_input(df_test, weights, index_map, combination_method, data_purpose='test')\n node_sets = get_node_sets(test_original_x, test_original_y)\n \n print(\"\\nPredicting:\")\n model_predictions = m.predict(test_x)\n model_predictions = list(model_predictions)\n #Covert back to 1 and 0\n predictions = []\n model_predictions_probs = []\n for prediction in model_predictions:\n predictions.append(prediction[1]) #non-thresholded value of positve class\n model_predictions_probs.append(prediction[1])\n \n k = int(len([i for i in test_original_y if i == 1]) * 0.3)\n do_evaluations([x for x in test_original_x], [y for y in test_original_y], [p for p in predictions], k, node_sets, \n positive_labels, model=m, weights=weights, index_map=index_map, combination_method=combination_method)\n #Uncomment to log ranked links\n #log_predictions([x for x in test_original_x], [y for y in test_original_y], [p for p in predictions], k, node_sets, \n # positive_labels, model=m, weights=weights, index_map=index_map, combination_method=combination_method,\n # outfilename=combination_method, method=method)", "def train(self, dataset, k=20, n_epochs=10, save_dir='./models', save=True, model_name='GRU4REC'):\n print(f'Training {model_name}...')\n\n for epoch in range(n_epochs):\n results = self.run_epoch(dataset, k=k, training=True)\n results = [f'{k}:{v:.3f}' for k, v in results.items()]\n print(f'epoch:{epoch+1:2d}/{\"/\".join(results)}')\n \n # Store the intermediate model\n if save:\n save_dir = Path(save_dir)\n if not save_dir.exists(): save_dir.mkdir()\n model_fname = f'{model_name}_{self.loss_type}_{self.optimizer_type}_{self.lr}_epoch{epoch+1:d}'\n torch.save(self.gru.state_dict(), save_dir/model_fname)" ]
[ "0.7705048", "0.74385786", "0.7426859", "0.7407801", "0.7267371", "0.7208689", "0.71939373", "0.71755517", "0.71754044", "0.7151909", "0.7151358", "0.7147768", "0.7145118", "0.7128991", "0.71117663", "0.7085029", "0.7073787", "0.7053388", "0.70399153", "0.70298266", "0.702379", "0.7020547", "0.7000431", "0.69963115", "0.69957876", "0.6975565", "0.69719315", "0.692459", "0.6905385", "0.68885994", "0.68865496", "0.6881413", "0.68598706", "0.6840358", "0.6837278", "0.6833031", "0.68308747", "0.67920214", "0.67840433", "0.677422", "0.6760922", "0.6751502", "0.67489666", "0.6743778", "0.67423356", "0.67336494", "0.6728917", "0.6725941", "0.67234224", "0.67076045", "0.67050743", "0.6691931", "0.66871285", "0.6676595", "0.6676402", "0.667454", "0.66725355", "0.66650546", "0.66611016", "0.66486365", "0.6648079", "0.6646048", "0.6639478", "0.6639115", "0.66315794", "0.66142654", "0.6609703", "0.6609204", "0.66041833", "0.65919757", "0.65910083", "0.6590335", "0.6578667", "0.65765846", "0.65764374", "0.65759283", "0.65742743", "0.65733427", "0.65660536", "0.6563976", "0.6560233", "0.6556987", "0.65524936", "0.6552144", "0.65453595", "0.65434384", "0.6540877", "0.6537877", "0.6532175", "0.65266305", "0.6524525", "0.6523608", "0.65229046", "0.65190005", "0.6514093", "0.6510024", "0.65053195", "0.65003926", "0.6494326", "0.6485995" ]
0.7325195
4
Base class for Python RL environments.
def __init__(self, handle_auto_reset: bool = False): self._handle_auto_reset = handle_auto_reset self._current_time_step = None common.assert_members_are_not_overridden( base_cls=PyEnvironment, instance=self, denylist=('reset', 'step') )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self): \n\t\n\t # get the environment\n\t\tself.env = env()", "def __init__(self, env):\n gym.Wrapper.__init__(self, env)", "def __init__(self, env):\n gym.Wrapper.__init__(self, env)", "def __init__(self, env):\n super().__init__(env)", "def __init__(self, env):\n super().__init__(env)", "def __init__(self, env):\n super().__init__(env)", "def _run_env(self):\n raise NotImplementedError()", "def __init__(self, env):\n gym.RewardWrapper.__init__(self, env)", "def __init__(self, env, system=None):\n self._env = env\n self._system = system if system is not None else {}", "def running_environment(self) -> type:\n return OrdinaryMNLBandit", "def get_environment():\n return GenericGymEnv(id=\"real-time-gym-v1\", gym_kwargs={\"config\": CONFIG_DICT})", "def __init__(self, env):\r\n gym.Wrapper.__init__(self, env)\r\n self.lives = 0\r\n self.was_real_done = True", "def __init__( self ):\n self._env = None\n self._steps = None\n\n self._initialize( )", "def _init_env_variables(self):\n raise NotImplementedError()", "def _init_env_variables(self):\n raise NotImplementedError()", "def _init_env_variables(self):\n raise NotImplementedError()", "def _init_env_variables(self):\n raise NotImplementedError()", "def _init_env_variables(self):\n raise NotImplementedError()", "def _init_env_variables(self):\n raise NotImplementedError()", "def __init__(self, env):\n gym.Wrapper.__init__(self, env)\n self.lives = 0\n self.was_real_done = True", "def __init__(self, env):\n gym.Wrapper.__init__(self, env)\n self.lives = 0\n self.was_real_done = True", "def __init__(self, env):\n gym.Wrapper.__init__(self, env)\n self.lives = 0\n self.was_real_done = True", "def __init__(self, env):\n gym.Wrapper.__init__(self, env)\n self.lives = 0\n self.was_real_done = True", "def __init__(self, printer):\n super(BashParentEnvironment, self).__init__()\n self._printer = printer", "def __init__(self, template_env):\r\n self._template_env = template_env\r\n self._pythons = []", "def __init__(self, \n ns: str, \n reward_fnc: str, \n is_action_space_discrete, \n safe_dist: float = None, \n goal_radius: float = 0.1, \n max_steps_per_episode=100, \n train_mode: bool = True, \n debug: bool = False,\n task_mode: str = \"staged\",\n PATHS: dict = dict(),\n extended_eval: bool = False,\n *args, **kwargs):\n super(FlatlandEnv, self).__init__()\n\n self.ns = ns\n try:\n # given every environment enough time to initialize, if we dont put sleep,\n # the training script may crash.\n ns_int = int(ns.split(\"_\")[1])\n time.sleep(ns_int*2)\n except Exception:\n rospy.logwarn(f\"Can't not determinate the number of the environment, training script may crash!\")\n pass\n\n\n # process specific namespace in ros system\n self.ns_prefix = '' if (ns == '' or ns is None) else '/'+ns+'/'\n \n if not debug:\n if train_mode:\n rospy.init_node(f'train_env_{self.ns}', disable_signals=False)\n else:\n rospy.init_node(f'eval_env_{self.ns}', disable_signals=False)\n\n self._extended_eval = extended_eval\n self._is_train_mode = rospy.get_param(\"/train_mode\")\n self._is_action_space_discrete = is_action_space_discrete\n \n self.setup_by_configuration(PATHS['robot_setting'], PATHS['robot_as'])\n\n # set rosparam\n rospy.set_param(\"/laser_num_beams\", self._laser_num_beams)\n \n # observation collector\n self.observation_collector = ObservationCollector(\n self.ns, self._laser_num_beams, self._laser_max_range)\n self.observation_space = self.observation_collector.get_observation_space()\n\n # reward calculator\n if safe_dist is None:\n safe_dist = 1.6*self._robot_radius\n\n self.reward_calculator = RewardCalculator(\n robot_radius=self._robot_radius, safe_dist=1.6*self._robot_radius, goal_radius=goal_radius, \n rule=reward_fnc, extended_eval=self._extended_eval)\n\n # action agent publisher\n if self._is_train_mode:\n self.agent_action_pub = rospy.Publisher(f'{self.ns_prefix}cmd_vel', Twist, queue_size=1)\n else:\n self.agent_action_pub = rospy.Publisher(f'{self.ns_prefix}cmd_vel_pub', Twist, queue_size=1)\n\n # service clients\n if self._is_train_mode:\n self._service_name_step = f'{self.ns_prefix}step_world'\n self._sim_step_client = rospy.ServiceProxy(\n self._service_name_step, StepWorld)\n \n # instantiate task manager\n self.task = get_predefined_task(\n ns, mode=task_mode, start_stage=kwargs['curr_stage'], PATHS=PATHS)\n\n self._steps_curr_episode = 0\n self._max_steps_per_episode = max_steps_per_episode\n\n # for extended eval\n self._action_frequency = 1/rospy.get_param(\"/robot_action_rate\")\n self._last_robot_pose = None\n self._distance_travelled = 0\n self._safe_dist_counter = 0\n self._collisions = 0\n self._in_crash = False", "def base_env(*args, **kwargs):\n try:\n # regular gym\n env = gym.make(*args, **kwargs)\n except:\n try:\n # gym retro\n env = retro.make(*args, **kwargs)\n except:\n # gym-super-mario-bros\n env = gym_super_mario_bros.make(*args, **kwargs)\n env.recognized = None\n return env", "def __init__(self, printer):\n super(PowershellParentEnvironment, self).__init__()\n self._printer = printer", "def __init__(self, env):\n gym.Wrapper.__init__(self, env)\n # 1st, we will have 2 attributes: self.lives and self.was_real_done\n self.lives = 0\n self.was_real_done = True", "def init():\n env = Environment(5, 5, 20, [10, 20, 10, 5])\n return env", "def standard_env():\n \"\"\"An environment with some Scheme standard procedures.\"\"\"\n env = Env()\n env.update(\n {\n \"+\": op.add,\n \"-\": op.sub,\n \"*\": op.mul,\n \"/\": op.truediv,\n \"car\": lambda x: x[0],\n \"cdr\": lambda x: x[1:] if len(x) > 2 else x[1],\n \"cons\": lambda x, y: [x] + y if isinstance(y, list) else [x, y],\n \"eq?\": op.eq,\n \"atom?\": lambda x: type(x) in AtomicTypes,\n }\n )\n return env", "def initialise(self, args, environ):", "def __init__(self, env):\n\n self.env = env\n self.env_info = env.env_info\n\n self.best_policy = None\n self.best_score = 0\n\n # Time to run the environment for during training and evaluation\n self.max_t = MAX_T\n # Maximum possible reward given the environment\n if (self.env.max_reward_per_timestep is None\n and self.env.max_reward_per_episode is None):\n raise ValueError(\"Either max_reward_per_timestep or \"\n \"max_reward_per_episode needs to be set.\")\n elif (self.env.max_reward_per_timestep is None\n and self.env.max_reward_per_episode is None):\n raise ValueError(\"Either max_reward_per_timestep or \"\n \"max_reward_per_episode needs to be None.\")\n elif self.env.max_reward_per_timestep is not None:\n self.max_reward = self.env.max_reward_per_timestep * self.max_t\n else:\n self.max_reward = self.env.max_reward_per_episode", "def __init__(self,\n env_name='blobble-world-v0'\n ):\n self._env_name = env_name\n\n # Take a timestamp. This will be used for any output files created in the output folder\n self._timestamp = datetime.datetime.now().strftime(\"%Y-%m-%d-%H-%M\")\n\n # Create training and evaluation environments\n self._train_py_env = suite_gym.load(self._env_name)\n self._eval_py_env = suite_gym.load(self._env_name)\n\n # Convert the training and test environments to Tensors\n self._train_env = tf_py_environment.TFPyEnvironment(self._train_py_env)\n self._eval_env = tf_py_environment.TFPyEnvironment(self._eval_py_env)\n print('=====================================================')\n print('Environments created for : ', self._env_name)\n print('Training Environment')\n print(' Observation Spec:')\n print(' ', self._train_env.time_step_spec().observation)\n print(' Reward Spec:')\n print(' ', self._train_env.time_step_spec().reward)\n print(' Action Spec:')\n print(' ', self._train_env.action_spec())\n print('Evaluation Environment')\n print(' Observation Spec:')\n print(' ', self._eval_env.time_step_spec().observation)\n print(' Reward Spec:')\n print(' ', self._eval_env.time_step_spec().reward)\n print(' Action Spec:')\n print(' ', self._eval_env.action_spec())\n print('=====================================================')\n\n self._config = BlobbleConfig('blobble_config.ini')\n self._config.print_config()\n\n # Get the demonstration parameters and output folder. We don't need these just yet but it's\n # good to do now in case there is an error in the config file (exception will be thrown)\n self._output_folder = (self._config.get_output_params()['output_folder'])\n\n self._num_demo_episodes = int(self._config.get_output_params()['num_demonstration_episodes'])\n demo_video = (self._config.get_output_params()['demonstration_video'])\n if demo_video == 'True':\n self._demo_video = True\n else:\n self._demo_video = False\n\n # Get and check the advanced learning parameters\n self._learning_rate = float(self._config.get_learning_adv_params()['learning_rate'])\n self._fc_layer_params = tuple(self._config.get_learning_adv_params()['fc_layer_params'].split(','))\n\n print('Create and train a neural network agent')\n self._neural_network_agent = create_neural_network_agent(self._train_env,\n self._learning_rate,\n self._fc_layer_params)\n\n learning_params = self._config.get_learning_params()\n train_neural_network(self._neural_network_agent,\n self._train_env,\n self._eval_env,\n num_train_iterations=learning_params['training_iterations'],\n log_interval=learning_params['training_log_interval'],\n eval_interval=learning_params['eval_interval'],\n num_eval_episodes=learning_params['num_eval_episodes'],\n replay_buffer_max_length=learning_params['replay_buffer_max_length'],\n collect_steps_per_iteration=learning_params['collect_steps_per_iteration'],\n output_folder=self._output_folder,\n timestamp=self._timestamp)", "def env(self) -> Optional[Env]:\n raise NotImplementedError", "def _get_environment(cls):\n return cls.__name__.lower()", "def SetupEnvironment(self):\n pass", "def getEnvironment(self):\n pass", "def __init__(self, *args):\n self.env = os.environ.copy()\n \"\"\"Environment variables (:class:`dict`)\"\"\"\n command = \"modulecmd python \"+' '.join(args)\n p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)\n retval = p.communicate()\n self._parse(retval)", "def step_env(self):\n raise NotImplementedError\n # Not needed for this homework", "def __init__(self, max_step=-1):\n self.environment = mls.rl.common.Environment()\n self.environment.game = mls.rl.common.Game(max_step=max_step)\n self.environment.current_state = self.environment.game.init_state(self.environment)", "def __init__(__self__,\n resource_name: str,\n args: EnvironmentArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(self):\n self._opts = {} # dict of dicts of (opt:, override:, default:)\n self._groups = {}\n self._deprecated_opts = {}\n\n self._args = None\n\n self._oparser = None\n self._namespace = None\n self._mutable_ns = None\n self._mutate_hooks = set([])\n self.__cache = {}\n self.__drivers_cache = {}\n self._config_opts = []\n self._cli_opts = collections.deque()\n self._validate_default_values = False\n self._sources = []\n self._ext_mgr = None\n # Though the env_driver is a Source, we load it by default.\n self._use_env = True\n self._env_driver = _environment.EnvironmentConfigurationSource()\n\n self.register_opt(self._config_source_opt)", "def __init__(self, model: WorldModel, real_env: ModelBasedEnv):\n super(VirtualEnv, self).__init__()\n self.action_space = real_env.action_space\n self.observation_space = real_env.observation_space\n self.reward_range = real_env.reward_range\n self.cost_fn = real_env.cost_fn\n self.model = model\n self.current_state = None\n\n self.initial_states_pool = None\n\n if real_env.spec is not None and hasattr(real_env.spec, 'max_episode_steps'):\n self.max_episode_steps = real_env.spec.max_episode_steps\n else:\n self.max_episode_steps = float('inf')", "def __MakeEnvironment(self):\n environment= os.environ.copy()\n\n for key, value in self.__context.items():\n if type(value) is str:\n name = \"QMV_\" + key.replace(\".\", \"__\")\n environment[name]= value\n\n return environment", "def __init__(self):\n # Select all the environment variables starting with 'ASH_CFG_' and strip\n # off the leading ASH_CFG_ portion to use as the name of the variable.\n self.variables = dict(\n [(x[8:], y) for x, y in os.environ.items() if x.startswith('ASH_CFG_')]\n )", "def __init__(self, env, noop_max=30):\n gym.Wrapper.__init__(self, env)\n self.noop_max = noop_max\n self.override_num_noops = None\n self.noop_action = 0", "def __init__(self, environment_variables=None, var_prefix='PLATFORM_'):\n\n self._environmentVariables = os.environ if environment_variables is None else environment_variables\n self._varPrefix = var_prefix\n\n if self['ROUTES']:\n routes = self['ROUTES']\n self._routesDef = self.decode(routes)\n if self['RELATIONSHIPS']:\n relationships = self['RELATIONSHIPS']\n self._relationshipsDef = self.decode(relationships)\n self.register_formatter('pymongo', pymongo_formatter)\n self.register_formatter('pysolr', pysolr_formatter)\n self.register_formatter('postgresql_dsn', posgresql_dsn_formatter)\n\n if self['VARIABLES']:\n variables = self['VARIABLES']\n self._variablesDef = self.decode(variables)\n if self['APPLICATION']:\n application = self['APPLICATION']\n self._applicationDef = self.decode(application)", "def __init__(self, step_runner, properties, environ, universe_view,\n engine_flags=None):\n self._step_runner = step_runner\n self._properties = properties\n self._environ = environ.copy()\n self._universe_view = universe_view\n self._clients = {client.IDENT: client for client in (\n recipe_api.DependencyManagerClient(self),\n recipe_api.PathsClient(),\n recipe_api.PropertiesClient(self),\n recipe_api.SourceManifestClient(self, properties),\n recipe_api.StepClient(self),\n )}\n self._engine_flags = engine_flags\n\n # A stack of ActiveStep objects, holding the most recently executed step at\n # each nest level (objects deeper in the stack have lower nest levels).\n # When we pop from this stack, we close the corresponding step stream.\n self._step_stack = []", "def __attrs_post_init__(self):\n self.path = (Path(CONFIG['conda_folder']) / 'envs' / self.name)", "def initialize():\n environment = Environment()\n environment.setup()", "def __init__(self, name=None):\n super(TTSEngine, self).__init__()", "def __init__(__self__, *,\n application_name: pulumi.Input[str],\n cname_prefix: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n environment_name: Optional[pulumi.Input[str]] = None,\n operations_role: Optional[pulumi.Input[str]] = None,\n option_settings: Optional[pulumi.Input[Sequence[pulumi.Input['EnvironmentOptionSettingArgs']]]] = None,\n platform_arn: Optional[pulumi.Input[str]] = None,\n solution_stack_name: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Sequence[pulumi.Input['EnvironmentTagArgs']]]] = None,\n template_name: Optional[pulumi.Input[str]] = None,\n tier: Optional[pulumi.Input['EnvironmentTierArgs']] = None,\n version_label: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"application_name\", application_name)\n if cname_prefix is not None:\n pulumi.set(__self__, \"cname_prefix\", cname_prefix)\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if environment_name is not None:\n pulumi.set(__self__, \"environment_name\", environment_name)\n if operations_role is not None:\n pulumi.set(__self__, \"operations_role\", operations_role)\n if option_settings is not None:\n pulumi.set(__self__, \"option_settings\", option_settings)\n if platform_arn is not None:\n pulumi.set(__self__, \"platform_arn\", platform_arn)\n if solution_stack_name is not None:\n pulumi.set(__self__, \"solution_stack_name\", solution_stack_name)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)\n if template_name is not None:\n pulumi.set(__self__, \"template_name\", template_name)\n if tier is not None:\n pulumi.set(__self__, \"tier\", tier)\n if version_label is not None:\n pulumi.set(__self__, \"version_label\", version_label)", "def make_problem_service(config: ProblemServiceConfig):\n\n class ProblemService(rpyc.Service):\n \"\"\"Spools up a new Python interpreter and uses it to sandbox SSiPP and\n MDPSim. Can interact with this to train a Q-network.\"\"\"\n\n # TODO: figure out some way of ensuring that arguments and return\n # values are deep-copied, or the whole impl will end up dog slow.\n\n def exposed_extend_replay(self, get_action, n_paths):\n \"\"\"Extend the replay buffer using the given policy (represented as a\n function from flattened observation vectors to action numbenrs).\"\"\"\n n_paths = to_local(n_paths)\n return self.internal_extend_replay(get_action, n_paths)\n\n def exposed_batch_iter(self, batch_size, n_batches):\n \"\"\"Sample <batch_size> elements from internal buffer.\"\"\"\n batch_size = to_local(batch_size)\n n_batches = to_local(n_batches)\n # first convert replay buffer to a list so that we can shuffle and\n # take indices\n assert len(self.replay) > 0, 'need non-empty replay pool'\n ordered_buf = list(self.replay)\n shuffle(ordered_buf) # in-place\n gen = cycle(ordered_buf)\n for batch_num in range(n_batches):\n rich_batch = list(islice(gen, batch_size))\n yield self.flatten_batch(rich_batch)\n\n def exposed_env_reset(self):\n return self.env_wrapped.reset()\n\n def exposed_action_name(self, action_num):\n action_num = to_local(action_num)\n return self.env_raw.action_name(action_num)\n\n def exposed_env_step(self, action):\n action = to_local(action)\n return self.env_wrapped.step(action)\n\n # note to self: RPyC doesn't support @property\n\n def exposed_get_ssipp_dead_end_value(self):\n return self.p.ssipp_dead_end_value\n\n def exposed_get_meta(self):\n \"\"\"Get name, ProblemMeta and DomainMeta for the current problem.\"\"\"\n return self.problem_meta, self.domain_meta\n\n def exposed_get_replay_size(self):\n return len(self.replay)\n\n def exposed_get_env_spec(self):\n # this will have to be deep-copied to actually work (I think)\n return self.env_wrapped.spec\n\n def exposed_get_obs_dim(self):\n return self.env_wrapped.observation_space.flat_dim\n\n def exposed_get_act_dim(self):\n return self.env_wrapped.action_space.flat_dim\n\n def exposed_get_obs_dtype_name(self):\n return self.env_wrapped.observation_space.dtype.name\n\n def exposed_get_dg_extra_dim(self):\n data_gens = self.env_raw.data_gens\n return sum([g.extra_dim for g in data_gens])\n\n def exposed_get_max_len(self):\n return self.max_len\n\n def exposed_get_problem_names(self):\n # fetch a list of all problems loaded by MDPSim\n return sorted(self.p.mdpsim.get_problems().keys())\n\n def exposed_get_current_problem_name(self):\n return self.p.problem_name\n\n def on_connect(self):\n self.p = PlannerExtensions(config.pddl_files,\n config.init_problem_name)\n mdpsim_p = self.p.mdpsim_problem\n self.domain_meta = get_domain_meta(mdpsim_p.domain)\n self.problem_meta = get_problem_meta(mdpsim_p, self.domain_meta)\n self.env_raw, self.env_wrapped = create_environment(\n problem_meta=self.problem_meta,\n planner_extensions=self.p,\n heuristic_name=config.heuristic_name,\n use_lm_cuts=config.use_lm_cuts,\n dump_table_path=config.dump_table_path,\n dump_table_interval=config.dump_table_interval)\n self.planner = Planner(self.p, 'lrtdp', config.teacher_heur)\n\n # maximum length of a trace to gather\n self.max_len = config.max_len\n # will hold (state, action) pairs to train on\n self.replay = set() # type: Set[StateQVs]\n\n @lru_cache(None)\n def opt_pol_envelope(self, obs: FPGObservation) -> List[StateQVs]:\n \"\"\"Get (s, a) pairs for optimal policy from given state.\"\"\"\n return planner_trace(self.planner, self.problem_meta,\n self.p.mdpsim_problem, obs)\n\n def internal_extend_replay(self, get_action: Any, n_paths: int) \\\n -> float:\n \"\"\"Extend the supervision buffer with some new paths. Can probably make\n this more sophisticated by turning it into a least-recently-visited\n cache or something.\"\"\"\n paths, succ_rate = collect_paths(\n rllab_env=self.env_wrapped,\n prob_meta=self.problem_meta,\n get_action=get_action,\n n_paths=n_paths,\n max_len=self.max_len)\n new_pairs = set()\n for path in paths:\n # ignore original action\n for obs, _ in path:\n # I used to hard negative mine (only add to training set if\n # net gets it wrong), but now I don't bother\n new_pairs.update(self.opt_pol_envelope(obs))\n self.replay.update(new_pairs)\n return succ_rate\n\n def flatten_batch(self, rich_batch: List[StateQVs]) \\\n -> Tuple[np.ndarray, np.ndarray]:\n rich_obs, rich_qvs = zip(*rich_batch)\n obs_tensor = self.env_raw.observation_space.flatten_n(rich_obs)\n qv_lists = []\n for qv_pairs in rich_qvs:\n qv_dict = dict(qv_pairs)\n qv_list = [\n qv_dict[ba] for ba in self.problem_meta.bound_acts_ordered\n ]\n qv_lists.append(qv_list)\n qv_tensor = np.array(qv_lists)\n return obs_tensor, qv_tensor\n\n return ProblemService", "def set_pyenv_cfg(self): # noqa: D205\n super().set_pyenv_cfg()\n self.pyenv_cfg[\"base-prefix\"] = self.interpreter.system_prefix\n self.pyenv_cfg[\"base-exec-prefix\"] = self.interpreter.system_exec_prefix\n self.pyenv_cfg[\"base-executable\"] = self.interpreter.system_executable", "def cli(self, env):\n raise NotImplementedError", "def __init__(self):\n # The current information for the environment\n self.state_space = ()\n self.action_space = ()\n self.state = None\n self.reward = 0\n self.terminal = False\n self.truncated = False\n self.info = None", "def make_env():\n return {\n 'init': init,\n 'step': step,\n 'is_terminal': is_terminal,\n 'state_as_example': state_as_example,\n }", "def __init__(self, installation, language, version, parent_shell):\n super(LanguageBase, self).__init__(installation)\n self._language = language\n self._version = version\n self._installation = self._container_dir\n self._parent_shell = parent_shell", "def __init__(self, parms, body, env):\n self.parms = parms\n self.body = body\n self.env = env", "def __init__(self, evaluation_only=False, seed=0):\n os = platform.system()\n if os == 'Darwin':\n file_name = 'Soccer.app'\n elif os == 'Linux':\n file_name = 'Soccer_Linux/Soccer.x86_64'\n self.env = UnityEnvironment(file_name='unity_envs/' + file_name, seed=seed)\n self.brain_names = self.env.brain_names\n self.evaluation_only = evaluation_only", "def scope(self): # noqa: ANN201", "def __init__(self):\n self.shell = get_ipython()\n if self.shell is not None:\n self.pt_app = get_app()\n self.session = get_session()\n else:\n from prompt_toolkit import VERSION as ptk_version\n\n # Gonna need a ptk version check\n if ptk_version > 3:\n from prompt_toolkit.application.current import get_app_or_none\n\n self.pt_app = get_app_or_none()\n self.session = self.pt_app.session\n # todo:\n # else:", "def __init__(self, engine: str = \"sfdp\"):\n self.engine = engine", "def get_environment() -> Environment:\n return Environment(\n media_url=get_endpoint(\"MEDIA\"),\n datastore_reader_url=get_endpoint(\"DATASTORE_READER\"),\n datastore_writer_url=get_endpoint(\"DATASTORE_WRITER\"),\n vote_url=get_endpoint(\"VOTE\"),\n )", "def __init__(self, env):\n super().__init__(env)\n self.lives = 0\n self.was_real_done = True", "def __init__(self, runtime_dir=\"/tmp/tbots\"):\n\n # inputs to full_system\n self.robot_status_sender = ThreadedUnixSender(runtime_dir + ROBOT_STATUS_PATH)\n self.ssl_wrapper_sender = ThreadedUnixSender(runtime_dir + SSL_WRAPPER_PATH)\n self.ssl_referee_sender = ThreadedUnixSender(runtime_dir + SSL_REFEREE_PATH)\n self.sensor_proto_sender = ThreadedUnixSender(runtime_dir + SENSOR_PROTO_PATH)\n\n # outputs from full_system\n self.world_listener = ThreadedUnixListener(runtime_dir + WORLD_PATH, World)\n self.primitive_listener = ThreadedUnixListener(\n runtime_dir + PRIMITIVE_PATH, PrimitiveSet\n )\n\n # override the tactic\n self.tactic_override = ThreadedUnixSender(runtime_dir + TACTIC_OVERRIDE_PATH)\n\n # TODO (#2510) rename to full_system\n self.full_system_process = Popen([\"software/unix_full_system\"])", "def __init__():", "def __init__(self):\n BuildSystemBase.__init__(self, \"makefile\")", "def __init__(self, env: gym.Env, eval_episodes: int, render_freq: int, \n fps: int, verbose=0):\n super().__init__(verbose=verbose)\n self.env = env\n self.eval_episodes = eval_episodes\n self.render_freq = render_freq\n self.fps = fps", "def _base(self):\n pass", "def __init__(self, env):\r\n gym.Wrapper.__init__(self, env)\r\n assert env.unwrapped.get_action_meanings()[1] == 'FIRE'\r\n assert len(env.unwrapped.get_action_meanings()) >= 3", "def get_py(self):\n pass", "def __init__(self, environ, stdin):\n\n # Request-Line\n # ============\n\n method = environ['REQUEST_METHOD']\n\n path = environ.get('PATH_INFO', '/')\n query = environ.get('QUERY_STRING', '')\n if query:\n query = '?' + query\n raw_uri = environ.get('SCRIPT_NAME', '') + path + query\n uri = urlparse.urlparse(raw_uri)\n keys = ( 'scheme'\n , 'netloc'\n , 'path'\n , 'parameters'\n , 'query'\n , 'fragment'\n )\n _uri = {}\n for i in range(len(uri)):\n k = keys[i]\n v = uri[i]\n _uri[k] = v\n uri = _uri\n\n http_version = environ['SERVER_PROTOCOL']\n\n raw_line = \"%s %s %s\\r\\n\" % (method, raw_uri, http_version)\n\n\n # Headers\n # =======\n\n headers = []\n for k, v in environ.iteritems():\n k = k.lower()\n if k.startswith('http_'):\n as_string = \"%s: %s\" % (k[5:].replace('_','-'), v)\n headers.append(as_string)\n raw_headers = '\\r\\n'.join(headers)\n raw_headers += '\\r\\n'\n headers = message_from_string(raw_headers)\n\n\n # Body\n # ====\n\n raw_body = stdin.read()\n\n\n # Save the API we want.\n # =====================\n\n raw = raw_line + raw_headers + raw_body\n\n self.raw = raw\n self.raw_line = raw_line\n self.raw_headers = raw_headers\n self.raw_body = raw_body\n\n self.method = method\n self.uri = uri\n self.path = path\n self.headers = headers", "def __init__(self):\n self.__verbose=False\n self.__fake=False\n self.__all=False\n self.__detector=''\n self.__authpath='.'\n self.__connect=''\n self.__registry={}\n self.__basedir=''\n self.__dbconfig=''", "def main(cls):\n raise NotImplementedError", "def register_env_creator(self):\n raise NotImplementedError(\"Subclasses should implement this to call ray.tune.registry.register_env\")", "def standard_env():\n env = Env()\n\n env.update(vars(math)) # gives us sin, cos, sqrt, pi\n\n env.update({\n '+': op.add, '-': op.sub, '*':op.mul, '/': op.truediv,\n '>': op.gt, '<': op.lt, '>=': op.ge, '<=': op.le, '=': op.eq,\n 'begin': lambda *x: x[-1],\n 'or': op.or_,\n 'even?': lambda x: x % 2 == 0\n })\n return env", "def __init__(self, env, key):\n gym.ObservationWrapper.__init__(self, env)\n self.key = key", "def __init__(self, env, key):\n gym.ObservationWrapper.__init__(self, env)\n self.key = key", "def __init__(self, source_dir, strict=True, interpreter=None, install_dir=None):\r\n self._source_dir = source_dir\r\n self._install_tmp = install_dir or safe_mkdtemp()\r\n self._installed = None\r\n self._strict = strict\r\n self._interpreter = interpreter or PythonInterpreter.get()\r\n if not self._interpreter.satisfies(self.capability) and strict:\r\n raise self.IncapableInterpreter('Interpreter %s not capable of running %s' % (\r\n self._interpreter, self.__class__.__name__))", "def __init__(self, name=\"ProRobot\"):\n super().__init__(name)", "def environment_variables(self, alias):\n raise NotImplementedError", "def __init__(self, *args, **kwargs):\n super(PythonTaskWrapper, self).__init__(*args, **kwargs)\n\n self.setOption(\n 'executableName',\n os.environ.get(\n 'KOMBI_PYTHON3_EXECUTABLE',\n 'python3'\n )\n )", "def __init__(self, env):\n gym.Wrapper.__init__(self, env)\n assert env.unwrapped.get_action_meanings()[1] == 'FIRE'\n assert len(env.unwrapped.get_action_meanings()) >= 3", "def __init__(self, env):\n gym.Wrapper.__init__(self, env)\n assert env.unwrapped.get_action_meanings()[1] == 'FIRE'\n assert len(env.unwrapped.get_action_meanings()) >= 3", "def Setup(self):\n raise NotImplementedError(\n 'No runtime setup defined for %s' % self.__class__.__name__)", "def __init__(self):\n self.parser_model_dir = None\n self.parser_options = {}\n self.reranker_model = None\n self.unified_model_dir = None", "def base():", "def _create_extra_environment(self):\n return {}", "def __init__(self):\n # env = 'Hexapod-v1'\n env = 'HexapodFewerStatesStandardRL-v1'\n alg = 'ppo2'\n purpose = \"standard-rl\"\n num_timesteps = 1e7\n if_load = False\n play = False\n\n base_path = \"/home/shiqing/gc-batch-rl-locomotion/\"\n date_str = datetime.strftime(datetime.now(), '%Y-%m-%d_%H-%M-%S')\n storage_path = os.path.join(base_path, \"logs\", date_str + \"_\" + env + \"_\" + purpose + \"_\" + alg + \"_\" + str(\"%e\"%num_timesteps))\n if play: storage_path = storage_path + \"_\" + \"play\"\n save_path = os.path.join(storage_path, \"save\")\n log_path = os.path.join(storage_path, \"log\")\n os.mkdir(storage_path)\n os.mkdir(save_path)\n os.mkdir(log_path)\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--env', help='environment ID', type=str, default=env)\n parser.add_argument('--env_type', help='type of environment, used when the environment type cannot be automatically determined', type=str)\n parser.add_argument('--seed', help='RNG seed', type=int, default=0)\n parser.add_argument('--alg', help='Algorithm', type=str, default=alg)\n parser.add_argument('--num_timesteps', type=float, default=num_timesteps),\n parser.add_argument('--network', help='network type (mlp, cnn, lstm, cnn_lstm, conv_only)', default='mlp')\n parser.add_argument('--gamestate', help='game state to load (so far only used in retro games)', default=None)\n parser.add_argument('--num_env', help='Number of environment copies being run in parallel. When not specified, set to number of cpus for Atari, and to 1 for Mujoco', default=None, type=int)\n parser.add_argument('--reward_scale', help='Reward scale factor. Default: 1.0', default=1.0, type=float)\n parser.add_argument('--save_path', help='Path to save trained model to', default=save_path+'/'+ 'model_weights', type=str)\n parser.add_argument('--load_path', help='Path to load trained model from', default='/home/shiqing/gc-batch-rl-locomotion/logs/2020-10-29_00-58-19_HexapodFewerStatesStandardRL-v1_standard-rl_ppo2_4.000000e+06/save/model_weights', type=str)\n parser.add_argument('--save_video_interval', help='Save video every x steps (0 = disabled)', default=0, type=int)\n parser.add_argument('--save_video_length', help='Length of recorded video. Default: 200', default=200, type=int)\n parser.add_argument('--log_path', help='Directory to save learning curve data.', default=log_path, type=str)\n parser.add_argument('--play', default=play)\n parser.add_argument('--if_load', default=if_load)\n\n self.args = parser.parse_args()\n\n #########Extra args (defaults)########\n # extra_args= dict(\n # nsteps=2048,\n # nminibatches=32,\n # lam=0.95,\n # gamma=0.99,\n # noptepochs=10,\n # log_interval=1,\n # ent_coef=0.0,\n # lr=lambda f: 3e-4 * f,\n # cliprange=0.2,\n # value_network='copy')", "def __init__(self, herbstclient='herbstclient'):\n self.herbstclient_path = herbstclient\n self.env = None\n self.proc = None", "def __init__(self, conf, python, requirements, tagged_env_vars):\n self._env_dir = conf.env_dir\n self._repo_subdir = conf.repo_subdir\n self._install_timeout = conf.install_timeout # gh-391\n self._default_benchmark_timeout = conf.default_benchmark_timeout # gh-973\n self._tagged_env_vars = tagged_env_vars\n self._path = os.path.abspath(os.path.join(\n self._env_dir, self.dir_name))\n self._project = conf.project\n\n self._is_setup = False\n\n self._cache = build_cache.BuildCache(conf, self._path)\n self._build_root = os.path.abspath(os.path.join(self._path, 'project'))\n\n self._requirements = requirements\n # These are needed for asv to build and run the project, not part of\n # benchmark name mangling\n self._base_requirements = {}\n # gh-1314\n asv_runner_path = os.getenv(\"ASV_RUNNER_PATH\", \"\")\n module_path = Path(asv_runner_path) / \"asv_runner\"\n\n # Check if the path points to a directory containing the \"asv_runner\" module\n if module_path.is_dir() and (module_path / \"__init__.py\").is_file():\n spec = importlib.util.spec_from_file_location(\"asv_runner\",\n module_path / \"__init__.py\")\n # Attempt to load the module\n asv_runner_module = importlib.util.module_from_spec(spec)\n try:\n spec.loader.exec_module(asv_runner_module)\n self._base_requirements[\"pip+asv_runner\"] = asv_runner_path\n except Exception as e:\n self._base_requirements[\"pip+asv_runner\"] = \"\"\n log.warning(f\"Failed to load module from ASV_RUNNER_PATH: {e}\")\n else:\n self._base_requirements[\"pip+asv_runner\"] = \"\"\n if asv_runner_path:\n log.warning(\"ASV_RUNNER_PATH does not point\"\n \"to a directory containing the 'asv_runner' module\")\n if not util.ON_PYPY:\n # XXX: What if pypy installed asv tries to benchmark a cpython\n # python?\n self._base_requirements[\"pip+pympler\"] = \"\"\n if (Path.cwd() / \"poetry.lock\").exists():\n self._base_requirements[\"poetry-core\"] = \"\"\n\n if (Path.cwd() / \"pdm.lock\").exists():\n self._base_requirements[\"pdm\"] = \"\"\n\n # Update the _base_requirements if needed\n for key in list(self._requirements.keys()):\n if key in self._base_requirements:\n self._base_requirements[key] = self._requirements[key]\n del self._requirements[key]\n\n self._build_command = conf.build_command\n self._install_command = conf.install_command\n self._uninstall_command = conf.uninstall_command\n\n self._global_env_vars = {}\n self._global_env_vars['ASV'] = 'true'\n self._global_env_vars['ASV_PROJECT'] = conf.project\n self._global_env_vars['ASV_CONF_DIR'] = os.path.abspath(os.getcwd())\n self._global_env_vars['ASV_ENV_NAME'] = self.name\n self._global_env_vars['ASV_ENV_DIR'] = self._path\n self._global_env_vars['ASV_ENV_TYPE'] = self.tool_name\n\n installed_commit_hash = self._get_installed_commit_hash()\n self._set_commit_hash(installed_commit_hash)", "def __init__(self, env):\n self._env = env\n self._routes = []\n self._proto = None\n self._port = None\n self._state = False\n self._key = None", "def _env_setup(self, initial_qpos):\n raise NotImplementedError()", "def user_env(self):\n\n # FIXME I think the JPY_ variables have been deprecated in JupyterHub\n # since 0.7.2, we should replace them. Can we figure this out?\n\n env = super(EC2Spawner, self).get_env()\n env.update(dict(\n JUPYTERHUB_PREFIX=self.hub.server.base_url,\n Name='Jupyter',\n PATH=self.path\n ))\n\n if self.notebook_dir:\n env['NOTEBOOK_DIR'] = self.notebook_dir\n\n hub_api_url = self.hub.api_url\n if self.hub_api_url != '':\n hub_api_url = self.hub_api_url\n\n env['JPY_HUB_API_URL'] = hub_api_url\n env['JUPYTERHUB_API_URL'] = hub_api_url\n\n self.log.debug(\"Env built: {}\".format(env))\n return env", "def get_empty_env():\n return EvalEnvironment(namespaces={})", "def get_empty_env():\n return EvalEnvironment(namespaces={})", "def __init__(self, agent_id=\"default\", experiment_id=\"default\"):\n self.runtime = runtime()\n self.agent_id = agent_id\n self.experiment_id = experiment_id", "def __init__(self):\n\n self.config = load_config()\n self.set_env_var()", "def __init__(object):" ]
[ "0.68557495", "0.66981584", "0.66981584", "0.66636485", "0.66636485", "0.66636485", "0.6464115", "0.616451", "0.6071239", "0.5907203", "0.589397", "0.58922243", "0.58735794", "0.58711684", "0.58711684", "0.58711684", "0.58711684", "0.58711684", "0.58711684", "0.5841212", "0.5841212", "0.5832843", "0.5832843", "0.58098954", "0.57577604", "0.56992954", "0.5657971", "0.5650119", "0.56304014", "0.5618274", "0.56020564", "0.5596296", "0.55683327", "0.55449563", "0.5544772", "0.55331326", "0.55248123", "0.55241865", "0.5516979", "0.55163074", "0.5468055", "0.54613525", "0.5453816", "0.5452291", "0.5451629", "0.54513377", "0.54403", "0.54392517", "0.54213804", "0.542035", "0.54173666", "0.5410413", "0.5403248", "0.5387881", "0.5384381", "0.537894", "0.5376007", "0.5373222", "0.5358999", "0.53305006", "0.5328333", "0.5323135", "0.5319307", "0.53130233", "0.53086436", "0.53054", "0.53033555", "0.52972734", "0.5296905", "0.52761036", "0.5268875", "0.52669555", "0.5257173", "0.5256486", "0.5236418", "0.5234417", "0.5225822", "0.5219782", "0.5215224", "0.5215224", "0.52119434", "0.52106225", "0.5200898", "0.5197508", "0.5196801", "0.5196801", "0.5186255", "0.51714736", "0.516971", "0.51684445", "0.51646864", "0.51578754", "0.51563543", "0.5151843", "0.51499724", "0.5146501", "0.5140478", "0.5140478", "0.5136346", "0.51337147", "0.51314086" ]
0.0
-1
Whether the environment is batched or not. If the environment supports batched observations and actions, then overwrite this property to True. A batched environment takes in a batched set of actions and returns a batched set of observations. This means for all numpy arrays in the input and output nested structures, the first dimension is the batch size. When batched, the leftmost dimension is not part of the action_spec or the observation_spec and corresponds to the batch dimension. When batched and handle_auto_reset, it checks `np.all(steps.is_last())`.
def batched(self) -> bool: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def batch_size(self) -> Optional[int]:\n if self.batched:\n raise RuntimeError(\n 'Environment %s marked itself as batched but did not override the '\n 'batch_size property'\n % type(self)\n )\n return None", "def is_batch():\n\n pass", "def _global_batch_size(self):\n return True", "def has_full_batch(self) -> bool:", "def should_handle_all_batches(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"should_handle_all_batches\")", "def batching_enabled(self) -> bool:\n ...", "def has_batch(self) -> bool:\n return self._buffer and (self._batch_size is None or sum(\n BlockAccessor.for_block(b).num_rows()\n for b in self._buffer) >= self._batch_size)", "def dispatch_one_batch(self, iterator):\n\n if self._aborting:\n return False\n\n batch_size = self._get_batch_size()\n\n with self._lock:\n # to ensure an even distribution of the workload between workers,\n # we look ahead in the original iterators more than batch_size\n # tasks - However, we keep consuming only one batch at each\n # dispatch_one_batch call. The extra tasks are stored in a local\n # queue, _ready_batches, that is looked-up prior to re-consuming\n # tasks from the origal iterator.\n try:\n tasks = self._ready_batches.get(block=False)\n except queue.Empty:\n # slice the iterator n_jobs * batchsize items at a time. If the\n # slice returns less than that, then the current batchsize puts\n # too much weight on a subset of workers, while other may end\n # up starving. So in this case, re-scale the batch size\n # accordingly to distribute evenly the last items between all\n # workers.\n n_jobs = self._cached_effective_n_jobs\n big_batch_size = batch_size * n_jobs\n\n islice = list(itertools.islice(iterator, big_batch_size))\n if len(islice) == 0:\n return False\n elif (iterator is self._original_iterator and\n len(islice) < big_batch_size):\n # We reached the end of the original iterator (unless\n # iterator is the ``pre_dispatch``-long initial slice of\n # the original iterator) -- decrease the batch size to\n # account for potential variance in the batches running\n # time.\n final_batch_size = max(1, len(islice) // (10 * n_jobs))\n else:\n final_batch_size = max(1, len(islice) // n_jobs)\n\n # enqueue n_jobs batches in a local queue\n for i in range(0, len(islice), final_batch_size):\n tasks = BatchedCalls(islice[i:i + final_batch_size],\n self._backend.get_nested_backend(),\n self._reducer_callback,\n self._pickle_cache)\n self._ready_batches.put(tasks)\n\n # finally, get one task.\n tasks = self._ready_batches.get(block=False)\n if len(tasks) == 0:\n # No more tasks available in the iterator: tell caller to stop.\n return False\n else:\n self._dispatch(tasks)\n return True", "def batch_size(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"batch_size\")", "def isFull(self):\n if len(self.batch) == self.__batch_size:\n return True\n return False", "def get_evaluation_batch_size():\n return 1", "def batch(self):\n return self._batch", "def _batch(self, batch_size):\n transform_or_spec = self._specs.get(\n 'transform_or_spec', self.transform_or_spec)\n if hasattr(transform_or_spec, '_batch'):\n transform_or_spec = transform_or_spec._batch(batch_size)\n return _DeferredTensorSpec(\n self._get_batched_input_spec(batch_size),\n transform_or_spec=transform_or_spec,\n dtype=self.dtype,\n shape=(None if self.shape is None\n else tf.TensorShape([batch_size]).concatenate(self.shape)),\n name=self.name,\n also_track_spec=self._also_track_spec)", "def _implements_predict_batch_hooks(self):\n return not is_default(self.on_predict_batch_begin) or not is_default(\n self.on_predict_batch_end\n )", "def batch_size(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"batch_size\")", "def _implements_train_batch_hooks(self):\n return not is_default(self.on_train_batch_begin) or not is_default(\n self.on_train_batch_end\n )", "def batch_size(self) -> typing.Optional[jsii.Number]:\n return self._values.get('batch_size')", "def batch_size(self) -> typing.Optional[jsii.Number]:\n return self._values.get('batch_size')", "def batch_size(self) -> typing.Optional[jsii.Number]:\n return self._values.get('batch_size')", "def batch_size(self) -> typing.Optional[jsii.Number]:\n return self._values.get('batch_size')", "def compute_batch(self, duplicate_manager=None,context_manager=None):\n from ...acquisitions import AcquisitionTS\n assert isinstance(self.acquisition, AcquisitionTS)\n \n X_batch,_ = self.acquisition.optimize()\n k=1\n \n # --- GET the remaining elements\n while k<self.batch_size:\n new_sample,_ = self.acquisition.optimize()\n X_batch = np.vstack((X_batch,new_sample))\n k +=1\n \n return X_batch", "def batch_size(self) -> int:\n ...", "def batch_size(self):\n return self._batch_size", "def batch_size(self):\n return self._batch_size", "def batch_size(self):\n return self._batch_size", "def batch_size(self):\n return self._batch_size", "def batch_size(self):\n if self._batch_size is not None:\n return self._batch_size # custom batch size defined\n if self.task == 'objdet':\n return 8\n annos_per_img = self._annos_per_img[self.dataset]\n if self.task in {'predcls', 'sgcls'}:\n annos_per_img = annos_per_img['pairs']\n elif self.task == 'objcls':\n annos_per_img = annos_per_img['objects']\n elif self.task == 'preddet' and self.filter_multiple_preds:\n annos_per_img = annos_per_img['predicates_filtered']\n elif self.task == 'preddet' and self.filter_duplicate_rels:\n annos_per_img = annos_per_img['duplicates_filtered']\n elif self.task in {'preddet', 'sggen'}:\n annos_per_img = annos_per_img['relations']\n batch_size = ceil(self._annotations_per_batch / annos_per_img)\n return max(batch_size, 2)", "def _get_batch_size(self):\n if self.batch_size == 'auto':\n return self._backend.compute_batch_size()\n else:\n # Fixed batch size strategy\n return self.batch_size", "def _assert_is_batched(self, *arrays):\n shape_list = []\n for array in arrays:\n if isinstance(array, tf.Tensor):\n shape_list.append(array.shape.as_list())\n else:\n shape_list.append(np.shape(array))\n # All arrays should have at least two dimensions.\n assert all([len(shape) >= 2 for shape in shape_list])\n # All arrays should have the same batch size.\n assert len(set([shape[0] for shape in shape_list])) == 1", "def batch_size(self):\n return self.size", "def process_state_batch(self, batch):\n # batch = np.squeeze(batch, axis=1)\n batch = np.array([np.concatenate(obs, axis=-1) for obs in batch])\n return batch", "def _implements_test_batch_hooks(self):\n return not is_default(self.on_test_batch_begin) or not is_default(\n self.on_test_batch_end\n )", "def compute_actions(\n self,\n observations: TensorStructType,\n state: Optional[List[TensorStructType]] = None,\n *,\n prev_action: Optional[TensorStructType] = None,\n prev_reward: Optional[TensorStructType] = None,\n info: Optional[EnvInfoDict] = None,\n policy_id: PolicyID = DEFAULT_POLICY_ID,\n full_fetch: bool = False,\n explore: Optional[bool] = None,\n timestep: Optional[int] = None,\n episodes: Optional[List[Episode]] = None,\n unsquash_actions: Optional[bool] = None,\n clip_actions: Optional[bool] = None,\n **kwargs,\n ):\n # `unsquash_actions` is None: Use value of config['normalize_actions'].\n if unsquash_actions is None:\n unsquash_actions = self.config.normalize_actions\n # `clip_actions` is None: Use value of config['clip_actions'].\n elif clip_actions is None:\n clip_actions = self.config.clip_actions\n\n # Preprocess obs and states.\n state_defined = state is not None\n policy = self.get_policy(policy_id)\n filtered_obs, filtered_state = [], []\n for agent_id, ob in observations.items():\n worker = self.workers.local_worker()\n preprocessed = worker.preprocessors[policy_id].transform(ob)\n filtered = worker.filters[policy_id](preprocessed, update=False)\n filtered_obs.append(filtered)\n if state is None:\n continue\n elif agent_id in state:\n filtered_state.append(state[agent_id])\n else:\n filtered_state.append(policy.get_initial_state())\n\n # Batch obs and states\n obs_batch = np.stack(filtered_obs)\n if state is None:\n state = []\n else:\n state = list(zip(*filtered_state))\n state = [np.stack(s) for s in state]\n\n input_dict = {SampleBatch.OBS: obs_batch}\n\n # prev_action and prev_reward can be None, np.ndarray, or tensor-like structure.\n # Explicitly check for None here to avoid the error message \"The truth value of\n # an array with more than one element is ambiguous.\", when np arrays are passed\n # as arguments.\n if prev_action is not None:\n input_dict[SampleBatch.PREV_ACTIONS] = prev_action\n if prev_reward is not None:\n input_dict[SampleBatch.PREV_REWARDS] = prev_reward\n if info:\n input_dict[SampleBatch.INFOS] = info\n for i, s in enumerate(state):\n input_dict[f\"state_in_{i}\"] = s\n\n # Batch compute actions\n actions, states, infos = policy.compute_actions_from_input_dict(\n input_dict=input_dict,\n explore=explore,\n timestep=timestep,\n episodes=episodes,\n )\n\n # Unbatch actions for the environment into a multi-agent dict.\n single_actions = space_utils.unbatch(actions)\n actions = {}\n for key, a in zip(observations, single_actions):\n # If we work in normalized action space (normalize_actions=True),\n # we re-translate here into the env's action space.\n if unsquash_actions:\n a = space_utils.unsquash_action(a, policy.action_space_struct)\n # Clip, according to env's action space.\n elif clip_actions:\n a = space_utils.clip_action(a, policy.action_space_struct)\n actions[key] = a\n\n # Unbatch states into a multi-agent dict.\n unbatched_states = {}\n for idx, agent_id in enumerate(observations):\n unbatched_states[agent_id] = [s[idx] for s in states]\n\n # Return only actions or full tuple\n if state_defined or full_fetch:\n return actions, unbatched_states, infos\n else:\n return actions", "def batch_shape(self) -> torch.Size:\n self._check_if_fitted()\n return torch.Size([self.num_mcmc_samples])", "def get_batch_size():\n return get_global_variable(GraphKeys.BATCH_SIZE)", "def batch_split(self) -> np.array:\n pass", "def get_batch(self):\n return self.batch", "def enable_batched_operations(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"enable_batched_operations\")", "def batch_actions(self):\n return self.resource.model.batch_actions", "def num_batches(self):\n\t\t\n\t\treturn len(self.batch_stats)", "def BatchConfig(**kwargs):\n return _validate(_BatchConfig(**kwargs))", "def has_next_batch(self):\n return self.current_index + self.batch_size <= self.count", "def hook_store_batch_size(module):\n if self._batch_size == {}:\n batch_axis = 0\n batch_size = module.input0.shape[batch_axis]\n\n for group in param_groups:\n group_id = id(group)\n\n if self._verbose:\n print(f\"Group {group_id}: Store 'batch_size'\")\n\n self._batch_size[group_id] = batch_size", "def backend_is_buffered(cls):\n return bool(cls._buffer_context)", "def batch_request_size(self):\n return self._batch_request_size", "def process_batch(self, batch):\n # shapes are [time, ...original dims...]\n v_global = np.stack(batch[:,0]) # [time, agents, l_state_one_agent]\n # note that *_local objects have shape\n # [time, agents, ...original dim...]\n obs_others = np.stack(batch[:,1]) # [time,agents,h,w,c] or [time, agents, obs_others]\n v_local = np.stack(batch[:,2]) # [time,agents,l]\n actions = np.stack(batch[:,3]) # [time,agents]\n reward = np.stack(batch[:,4]) # [time]\n reward_local = np.stack(batch[:,5]) # [time,agents]\n v_global_next = np.stack(batch[:,6]) # [time, agents, l_state_one_agent]\n obs_others_next = np.stack(batch[:,7]) # [time,agents,h,w,c]\n v_local_next = np.stack(batch[:,8]) # [time,agents,l]\n done = np.stack(batch[:,9]) # [time]\n goals = np.stack(batch[:,10]) # [time, agents, l_goal]\n\n batch = None\n \n n_steps = v_global.shape[0]\n \n # For all global quantities, for each time step,\n # duplicate values <n_agents> times for\n # batch processing of all agents\n reward = np.repeat(reward, self.n_agents, axis=0)\n\n # In-place reshape for *_local quantities,\n # so that one time step for one agent is considered\n # one batch entry\n if self.experiment == 'sumo':\n obs_others.shape = (n_steps*self.n_agents, self.h_obs,\n self.w_obs, self.c_obs)\n obs_others_next.shape = (n_steps*self.n_agents, self.h_obs,\n self.w_obs, self.c_obs)\n elif self.experiment == 'particle':\n obs_others.shape = (n_steps*self.n_agents, self.l_obs_others)\n obs_others_next.shape = (n_steps*self.n_agents, self.l_obs_others)\n v_local.shape = (n_steps*self.n_agents, self.l_obs)\n reward_local.shape = (n_steps*self.n_agents)\n v_local_next.shape = (n_steps*self.n_agents, self.l_obs)\n\n actions_1hot, actions_others_1hot = self.process_actions(n_steps, actions)\n \n return n_steps, v_global, obs_others, v_local, actions_1hot, actions_others_1hot, reward, reward_local, v_global_next, obs_others_next, v_local_next, done, goals", "def get_batch(self) -> CBInput:\n sample_batch_idx, batch_arms = self.gen_arms_per_batch()\n context_arm_features = self.gen_features_batch(batch_idx=sample_batch_idx)\n assert context_arm_features.ndim == 3\n rewards_all_arms = self.features_to_rewards(\n inp_feature=context_arm_features, sample_batch_idx=sample_batch_idx\n )\n batch = CBInput(\n context_arm_features=context_arm_features,\n arms=batch_arms, # ads of batch_size campaigns\n rewards_all_arms=rewards_all_arms,\n )\n return batch", "def process_state_batch(self, batch):\n return np.squeeze(batch, axis=1)", "def batch_size(self):\n return self._first_rgb.shape[0]", "def batch_size(self):\n self.validate_shape_and_dtype()\n return self.rgb.shape[0]", "def get_batch_actions(self, states, greedy=False):\n with self._policy.G._session.as_default():\n return self._policy.policy.predict(\n self._policy.get_session(),\n states, greedy)", "def __len__(self):\n return len(self.batches)", "def eval_batch_size(sess=None):\n return eval_global_variable(GraphKeys.BATCH_SIZE, sess)", "def batch_size(self) -> ConfigNodePropertyInteger:\n return self._batch_size", "def get_batch_runs(self):\n return self.batch_runs", "def record_batch_size(self):\n return 10000", "def observe_np(\n state: np.ndarray,\n impulse: np.ndarray,\n):\n state_shape = state.shape\n if len(state_shape) < 3:\n raise ValueError(\n \"State must be at least 3D (`[width, height, channel]`) but got {}\"\n \"\".format(state.shape))\n\n state_shape = [-1 if dim is None else dim for dim in state_shape]\n\n # State now has single batch dimension, has shape\n # `[batch_size, height, width, channel]`.\n state = np.reshape(state, [-1] + state_shape[-3:])\n\n state = numpy_convolution.convolve_2d(state, impulse, padding=\"SAME\")\n\n # Retrieve original `batch_dimensions`.\n return np.reshape(state, state_shape[:-1] + [impulse.shape[-1]])", "def in_memory(self) -> bool:\n return all(isinstance(x, np.ndarray) for x in self.chunks.values())", "def _update_num_batches(self):\n # maximum possible number of batches is equal to number of whole times\n # batch_size divides in to the number of data points which can be\n # found using integer division\n possible_num_batches = self.inputs.shape[0] // self.batch_size\n if self.max_num_batches == -1:\n self.num_batches = possible_num_batches\n else:\n self.num_batches = min(self.max_num_batches, possible_num_batches)", "def active_observation_shape(self):\n # if isinstance(self._env.observation_space, spaces.Box) and len(self._env.observation_space.shape) > 1:\n # return np.prod(self._env.observation_space.spaces[key].shape)\n\n if not isinstance(self._env.observation_space, spaces.Dict):\n return super(GymAdapter, self).active_observation_shape\n\n observation_keys = (\n self.observation_keys\n or list(self._env.observation_space.spaces.keys()))\n\n active_size = sum(\n np.prod(self._env.observation_space.spaces[key].shape)\n for key in observation_keys)\n\n active_observation_shape = (active_size, )\n return active_observation_shape", "def _maybe_do_batch(\n self,\n accumulator: _CombinerStatsGeneratorsCombineFnAcc,\n force: bool = False) -> None:\n if self._should_do_batch(accumulator, force):\n self._combine_batch_size.update(accumulator.curr_batch_size)\n self._combine_byte_size.update(accumulator.curr_byte_size)\n if len(accumulator.input_record_batches) == 1:\n record_batch = accumulator.input_record_batches[0]\n else:\n record_batch = table_util.MergeRecordBatches(\n accumulator.input_record_batches)\n accumulator.partial_accumulators = self._for_each_generator(\n lambda gen, gen_acc: gen.add_input(gen_acc, record_batch),\n accumulator.partial_accumulators)\n del accumulator.input_record_batches[:]\n accumulator.curr_batch_size = 0\n accumulator.curr_byte_size = 0", "def on_batch_mode(self, event):\n if self.parent is not None:\n wx.PostEvent(self.parent,\n NewBatchEvent(enable=True))", "def batch(ctx, config, job_number, total_jobs, executor, force_overwrite):\n # Imports inside CLI for speed\n from yatsm.utils import distribute_jobs\n\n # TODO: remove when not debugging\n import dask\n dask.set_options(get=dask.async.get_sync)\n\n\n # TODO: Better define how authoritative reader when using multiple datasets\n # and choosing block shape (in config?)\n # TODO: Allow user to specify block shape in config (?)\n block_windows = config.primary_reader.block_windows\n job_idx = distribute_jobs(job_number, total_jobs, len(block_windows))\n\n logger.debug('Working on {} of {} block windows'\n .format(len(job_idx), len(block_windows)))\n\n block_windows = [block_windows[i] for i in job_idx]\n\n force_overwrite = (force_overwrite or\n config['pipeline'].get('overwrite', False))\n\n # TODO: iterate over block_windows assigned to ``job_id``\n futures = {}\n for idx, window in block_windows:\n future = executor.submit(batch_block,\n config=config,\n readers=config.readers,\n window=window,\n overwrite=force_overwrite)\n futures[future] = window\n\n n_good, n_skip, n_fail = 0, 0, 0\n for future in executor.as_completed(futures):\n window = futures[future]\n try:\n result = future.result()\n if isinstance(result, str):\n logger.info(\"Wrote to: %s\" % result)\n n_good += 1\n else:\n n_skip += 1\n time.sleep(1)\n except KeyboardInterrupt:\n logger.critical('Interrupting and shutting down')\n executor.shutdown()\n raise click.Abort()\n except Exception:\n logger.exception(\"Exception for window: {}\".format(window))\n n_fail += 1\n raise # TODO: remove and log?\n\n logger.info('Complete: %s' % n_good)\n logger.info('Skipped: %s' % n_skip)\n logger.info('Failed: %s' % n_fail)", "def get_eval_batch(self) -> jnp.ndarray:\n\n return self.dataset[-self.eval_batch_size:, ...]", "def get_batch(self, batch_size, batch_length):\n with tf.device(self._device):\n min_size = tf.reduce_min(self._current_size)\n tf.Assert(min_size >= batch_length, [\n \"Not all environments has enough data. The smallest data \"\n \"size is: \", min_size, \"Try storing more data before \"\n \"calling get_batch\"\n ])\n\n batch_size_per_env = batch_size // self._num_envs\n remaining = batch_size % self._num_envs\n if batch_size_per_env > 0:\n env_ids = tf.tile(\n tf.range(self._num_envs, dtype=tf.int64),\n [batch_size_per_env])\n else:\n env_ids = tf.zeros((0, ), tf.int64)\n if remaining > 0:\n eids = tf.range(self._num_envs, dtype=tf.int64)\n eids = tf.random.shuffle(eids)[:remaining]\n env_ids = tf.concat([env_ids, eids], axis=0)\n\n r = tf.random.uniform(tf.shape(env_ids))\n num_positions = self._current_size - batch_length + 1\n num_positions = tf.gather(num_positions, env_ids)\n pos = tf.cast(r * tf.cast(num_positions, tf.float32), tf.int64)\n pos += tf.gather(self._current_pos - self._current_size, env_ids)\n pos = tf.reshape(pos, [-1, 1]) # [B, 1]\n pos = pos + tf.expand_dims(\n tf.range(batch_length, dtype=tf.int64), axis=0) # [B, T]\n pos = pos % self._max_length\n pos = tf.expand_dims(pos, -1) # [B, T, 1]\n env_ids = tf.reshape(env_ids, [-1, 1]) # [B, 1]\n env_ids = tf.tile(env_ids, [1, batch_length]) # [B, T]\n env_ids = tf.reshape(env_ids, [-1, batch_length, 1]) # [B, T, 1]\n indices = tf.concat([env_ids, pos], axis=-1) # [B, T, 2]\n return tf.nest.map_structure(\n lambda buffer: tf.gather_nd(buffer, indices), self._buffer)", "def add(\n self,\n batch: RolloutBatchProtocol,\n buffer_ids: Optional[Union[np.ndarray, list[int]]] = None,\n ) -> tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:\n # preprocess batch\n new_batch = Batch()\n for key in set(self._reserved_keys).intersection(batch.keys()):\n new_batch.__dict__[key] = batch[key]\n batch = new_batch\n batch.__dict__[\"done\"] = np.logical_or(batch.terminated, batch.truncated)\n assert {\"obs\", \"act\", \"rew\", \"terminated\", \"truncated\", \"done\"}.issubset(batch.keys())\n if self._save_only_last_obs:\n batch.obs = batch.obs[:, -1]\n if not self._save_obs_next:\n batch.pop(\"obs_next\", None)\n elif self._save_only_last_obs:\n batch.obs_next = batch.obs_next[:, -1]\n # get index\n if buffer_ids is None:\n buffer_ids = np.arange(self.buffer_num)\n ptrs, ep_lens, ep_rews, ep_idxs = [], [], [], []\n for batch_idx, buffer_id in enumerate(buffer_ids):\n ptr, ep_rew, ep_len, ep_idx = self.buffers[buffer_id]._add_index(\n batch.rew[batch_idx],\n batch.done[batch_idx],\n )\n ptrs.append(ptr + self._offset[buffer_id])\n ep_lens.append(ep_len)\n ep_rews.append(ep_rew)\n ep_idxs.append(ep_idx + self._offset[buffer_id])\n self.last_index[buffer_id] = ptr + self._offset[buffer_id]\n self._lengths[buffer_id] = len(self.buffers[buffer_id])\n ptrs = np.array(ptrs)\n try:\n self._meta[ptrs] = batch\n except ValueError:\n batch.rew = batch.rew.astype(float)\n batch.done = batch.done.astype(bool)\n batch.terminated = batch.terminated.astype(bool)\n batch.truncated = batch.truncated.astype(bool)\n if self._meta.is_empty():\n self._meta = create_value(batch, self.maxsize, stack=False) # type: ignore\n else: # dynamic key pops up in batch\n alloc_by_keys_diff(self._meta, batch, self.maxsize, False)\n self._set_batch_for_children()\n self._meta[ptrs] = batch\n return ptrs, np.array(ep_rews), np.array(ep_lens), np.array(ep_idxs)", "def can_sample(self, batch_size):\n return batch_size + 1 <= self.num_in_buffer", "def get_global_batch_size(self) -> int:\n return self._global_batch_size", "def is_ready_update(self):\n size_of_buffer = len(self.training_buffer.update_buffer['actions'])\n return size_of_buffer > max(int(self.trainer_parameters['buffer_size'] / self.policy.sequence_length), 1)", "def batch_len(batch):\n flatlist, _ = tree_util.tree_flatten(batch)\n if len(flatlist) < 1:\n return 0\n b = flatlist[0].shape[0]\n assert all(\n arr.shape[0] == b for arr in flatlist if th.is_tensor(arr)\n ), \"Not all arrays have same batchsize!\"\n return b", "def _create_batch_list(self):\n return [None] * self.bufsize", "def init_batch(self):\n pass", "def collect_batch(self, observation):\n observations, actions, rewards, dones = [observation], [], [], []\n for _ in range(self.rollout_len):\n action = self.act(observation)\n observation, reward, done = self.env_pool.step(action)\n observations.append(observation)\n actions.append(action)\n rewards.append(reward)\n dones.append(done)\n return observations, actions, rewards, dones", "def _is_buffered(self):\n return self.buffered or type(self)._buffer_context", "def _get_batched_input_spec(self, batch_size):\n if isinstance(self._input_spec, type_spec.BatchableTypeSpec):\n return self._input_spec._batch(batch_size) # pylint: disable=protected-access\n if isinstance(self._input_spec, resource_variable_ops.VariableSpec):\n return resource_variable_ops.VariableSpec(\n shape=tf.TensorShape([batch_size]).concatenate(\n self._input_spec.shape),\n dtype=self._input_spec.dtype,\n trainable=self._input_spec.trainable)\n raise NotImplementedError(\n f'`{self.value_type.__name__}`s `TypeSpec` is not supported for '\n f'inputs of type {type(self._input_spec)}.')", "def gather_all(self):\n size = tf.reduce_min(self._current_size)\n max_size = tf.reduce_max(self._current_size)\n tf.Assert(size == max_size, [\n \"Not all environment have the same size. min_size:\", size,\n \"max_size:\", max_size\n ])\n\n if size == self._max_length:\n return tf.nest.map_structure(lambda buf: buf.value(), self._buffer)\n else:\n return tf.nest.map_structure(lambda buf: buf[:, :size, ...],\n self._buffer)", "def sample_batch(models, env_constructor, device, config):\n\n # Initialize envs.\n envs = [SubProcessWrapper(env_constructor) for _ in range(config[\"SAMPLE_PARALLEL_ENVS\"])]\n\n player_ids = list(models.keys())\n\n # EpisodeData for in-progress episodes.\n # ep_datas[i][p_id] references the EpisodeData for player p_id in the i'th env.\n ep_datas = [{p_id: None for p_id in player_ids} for _ in envs]\n\n # actions[i][p_id] references the action for player p_id in the i'th env.\n actions = [{p_id: None for p_id in player_ids} for _ in envs]\n\n num_steps = {p_id: 0 for p_id in player_ids}\n\n # final_ep_datas[p_id][i] references the EpisodeData for the i'th episode collected for player p_id.\n final_ep_datas = {p_id: [] for p_id in player_ids}\n\n # While at least one player is below SAMPLE_MIN_NUM_STEPS.\n while np.any(np.array([n for n in num_steps.values()]) < config[\"SAMPLE_MIN_NUM_STEPS\"]):\n # 1. Step all envs asynchronously.\n\n # Keep a record of which envs were 'reset' and which were 'stepped' so that we\n # know what return values to expect when we receive the results asynchronously.\n env_was_reset = []\n for i_env, env in enumerate(envs):\n if not env.call_sync(\"is_in_progress\"):\n env_was_reset.append(True)\n for p_id in player_ids:\n ep_data = ep_datas[i_env][p_id]\n # If this is not the very first iteration, then save the episode.\n if ep_data is not None:\n # Drop the last observation, as we never acted on it.\n ep_data.observations = ep_data.observations[:len(ep_data.rewards)]\n final_ep_datas[p_id].append(ep_data)\n num_steps[p_id] += len(ep_data.rewards)\n ep_datas[i_env] = {p_id: EpisodeData() for p_id in player_ids}\n env.call_async(\"reset\")\n else:\n env_was_reset.append(False)\n actions = {p_id: ep_datas[i_env][p_id].actions[-1] for p_id in player_ids}\n env.call_async(\"step\", actions)\n\n # 2. Receive results from async env steps.\n\n for i_env, env in enumerate(envs):\n if env_was_reset[i_env]:\n obs = env.get_result()\n for p_id in player_ids:\n ep_datas[i_env][p_id].observations.append(obs[p_id])\n else:\n obs, rewards, dones, step_infos = env.get_result()\n for p_id in player_ids:\n ep_data = ep_datas[i_env][p_id]\n ep_data.observations.append(obs[p_id])\n ep_data.rewards.append(rewards[p_id])\n # step_infos entry should already exist for this step.\n ep_data.step_info[-1].update(step_infos[p_id])\n\n # 3. Sample actions.\n\n player_id_to_state_batch = {p_id: [] for p_id in player_ids}\n for i_env, env in enumerate(envs):\n for p_id in player_ids:\n player_id_to_state_batch[p_id].append(ep_datas[i_env][p_id].observations[-1])\n\n for p_id in player_ids:\n model = models[p_id]\n with torch.no_grad():\n state_batch = np.array(player_id_to_state_batch[p_id])\n state_batch = torch.Tensor(state_batch)\n state_batch = state_batch.to(device)\n ship_act_logits, shipyard_act_logits, value_preds = model(state_batch)\n\n ship_action_dist, shipyard_action_dist = model.get_action_distribution(\n ship_act_logits, shipyard_act_logits, state_batch)\n\n ship_action = ship_action_dist.sample()\n shipyard_action = shipyard_action_dist.sample()\n ship_act_entropy = ship_action_dist.entropy()\n shipyard_act_entropy = shipyard_action_dist.entropy()\n\n action_log_prob = model.action_log_prob(\n ship_action_dist,\n shipyard_action_dist,\n ship_action,\n shipyard_action,\n )\n\n ship_action = ship_action.cpu().detach().numpy()\n shipyard_action = shipyard_action.cpu().detach().numpy()\n action_log_prob = action_log_prob.cpu().detach().numpy()\n value_preds = value_preds.cpu().detach().numpy()\n ship_act_entropy = ship_act_entropy.cpu().detach().numpy()\n shipyard_act_entropy = shipyard_act_entropy.cpu().detach().numpy()\n\n for i_env, env in enumerate(envs):\n if env.call_sync(\"is_in_progress\"):\n ep_data = ep_datas[i_env][p_id]\n ep_data.actions.append((\n ship_action[i_env, ...],\n shipyard_action[i_env, ...],\n ))\n ep_data.act_log_probs.append(action_log_prob[i_env])\n ep_data.value_preds.append(value_preds[i_env])\n # Create step_info entry with info for step that hasn't happend (in env) yet.\n ep_data.step_info.append(\n {\n \"ship_action_dist_entropy\": ship_act_entropy[i_env],\n \"shipyard_action_dist_entropy\": shipyard_act_entropy[i_env],\n }\n )\n\n # Close all envs\n for e in envs:\n e.close()\n\n return final_ep_datas", "def train_batch(self, batch_info: BatchInfo) -> None:\n # Each DQN batch is\n # 1. Roll out environment and store out experience in the buffer\n self.model.eval()\n\n # Helper variables for rollouts\n episode_information = []\n frames = 0\n\n with torch.no_grad():\n if not self.env_roller.is_ready_for_sampling():\n while not self.env_roller.is_ready_for_sampling():\n rollout = self.env_roller.rollout(batch_info, self.model)\n\n episode_information.extend(rollout.episode_information())\n frames += rollout.frames()\n else:\n for i in range(self.settings.batch_rollout_rounds):\n rollout = self.env_roller.rollout(batch_info, self.model)\n\n episode_information.extend(rollout.episode_information())\n frames += rollout.frames()\n\n batch_info['frames'] = frames\n batch_info['episode_infos'] = episode_information\n\n # 2. Sample the buffer and train the algo on sample batch\n self.model.train()\n\n # Algo will aggregate data into this list:\n batch_info['sub_batch_data'] = []\n\n for i in range(self.settings.batch_training_rounds):\n sampled_rollout = self.env_roller.sample(batch_info, self.model)\n\n batch_result = self.algo.optimizer_step(\n batch_info=batch_info,\n device=self.device,\n model=self.model,\n rollout=sampled_rollout\n )\n\n self.env_roller.update(rollout=sampled_rollout, batch_info=batch_result)\n\n batch_info['sub_batch_data'].append(batch_result)\n\n batch_info.aggregate_key('sub_batch_data')", "def acq_batch_size(self):\n return self.batch_size * self.batches_per_acquisition", "def run_batch(self):\n\n print(\"Running experiment with batch_size {}...\".format(self.batch_size))\n\n errors = self.W.new_tensor([])\n energies = self.W.new_tensor([])\n\n x = (2 * self.W.new(self.N, self.batch_size).random_(2) - 1).float() # Initialize the x vector\n rands = self.W.new(self.t_max, self.batch_size).uniform_() # The random values which will be compared to the acceptance probabilities\n idxs = self.W.new(self.t_max, self.batch_size).random_(self.N).long() # The indices which will be flipped in x at each iteration\n\n energy, wx = utils.compute_energy_batch(x, self.W, self.Y) # Compute the initial value of the energy\n\n for iteration in range(self.t_max):\n self.beta_scheduler.step(energies) # Update the value of beta according to the cooling strategy\n\n x, energy, wx = self.chain.step_batch(x, self.W, self.Y, self.beta_scheduler.beta, energy, wx, idxs[iteration], rands[iteration])\n energies = torch.cat((energies, energy.unsqueeze(0)))\n\n e = utils.compute_reconstruction_error_batch(x, self.X) # Compute the current reconstruction error\n errors = torch.cat((errors, e.unsqueeze(0)))\n\n return errors, energies, x", "def application_enabled(namespace):\n client = get_mgmt_service_client(BatchManagementClient)\n acc = client.batch_account.get(namespace.resource_group, namespace.account_name)\n if not acc:\n raise ValueError(\"Batch account '{}' not found.\".format(namespace.account_name))\n if not acc.auto_storage or not acc.auto_storage.storage_account_id: # pylint: disable=no-member\n raise ValueError(\"Batch account '{}' needs auto-storage enabled.\".\n format(namespace.account_name))", "def batch_compute_environment(self):\n return self._get_output(\"BatchComputeEnvironmentArn\")", "def is_ready_update(self):\n size_of_buffer = len(self.training_buffer.update_buffer[\"actions\"])\n return size_of_buffer > max(\n int(self.trainer_parameters[\"buffer_size\"] / self.policy.sequence_length), 1\n )", "def expand(self, batch_shape):\n batch_shape = list(batch_shape)\n if len(batch_shape) < len(self.batch_shape):\n raise ValueError(\"Expected len(batch_shape) >= len(self.batch_shape), \"\n \"actual {} vs {}\".format(len(batch_shape), len(self.batch_shape)))\n # check sizes of existing dims\n for dim in range(-1, -1 - len(self.batch_shape), -1):\n if batch_shape[dim] == -1:\n batch_shape[dim] = self.batch_shape[dim]\n elif batch_shape[dim] != self.batch_shape[dim]:\n if self.batch_shape[dim] != 1:\n raise ValueError(\"Cannot broadcast dim {} of size {} to size {}\".format(\n dim, self.batch_shape[dim], batch_shape[dim]))\n else:\n raise NotImplementedError(\"https://github.com/uber/pyro/issues/1119\")\n sample_shape = batch_shape[:len(batch_shape) - len(self.batch_shape)]\n return self.expand_by(sample_shape)", "def batch_node_count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"batch_node_count\")", "def ExpectedMaxBatchSizes(self, run_params):\n return self.max_batch_sizes", "def obs_action_shape(env):\n obs_space = env.observation_space\n return np.append(np.ravel(obs_space.high)+1, [env.action_space.n])", "def process_batch(self, batch):\n # extend with current batch\n self._extend(batch)\n\n # unpack and compute bounds\n length = len(self.obs)\n c = self.c\n\n # normally we cannot compute samples for the last c elements, but\n # in the terminal case, we halluciante values where necessary\n end = length if batch.terminal else length - c\n\n # collect samples to return in a FeudalBatch\n feudal_batch = FeudalBatch()\n for t in range(c, end):\n\n # state difference\n s_diff = self.s[t + c] - self.s[t]\n\n # intrinsic reward\n ri = 0\n # note that this for loop considers s and g values\n # 1 timestep to c timesteps (inclusively) ago\n for i in range(1, c + 1):\n ri_s_diff = self.s[t] - self.s[t - i]\n if np.linalg.norm(ri_s_diff) != 0:\n ri += cosine_similarity(ri_s_diff, self.g[t - i])\n ri /= c\n\n # sum of g values used to derive w, input to the linear transform\n gsum = np.zeros_like(self.g[t - c])\n for i in range(t - c, t + 1):\n gsum += self.g[i]\n\n # add to the batch\n feudal_batch.add(self.obs[t], self.a[t], self.returns[t], s_diff,\n ri, gsum, self.features[t])\n\n # in the terminal case, set reset flag\n if batch.terminal:\n self.last_terminal = True\n # in the general case, forget all but the last 2 * c elements\n # reason being that the first c of those we have already computed\n # a batch for, and the second c need those first c\n else:\n twoc = 2 * self.c\n self.obs = self.obs[-twoc:]\n self.a = self.a[-twoc:]\n self.returns = self.returns[-twoc:]\n self.s = self.s[-twoc:]\n self.g = self.g[-twoc:]\n self.features = self.features[-twoc:]\n\n return feudal_batch.get_batch()", "def is_buffer_empty(self): \n if self.buffer.shape == (0, 5):\n return True\n else:\n return False", "def valid_minibatches(self):\n batch_size = self.params['batch_size']\n start_index = 0\n while start_index + batch_size < 500:\n end_index = start_index + batch_size\n yield self.input[start_index:end_index], self.y[start_index:end_index]\n start_index = end_index", "def observationsMatchingBatchDim(self):\n ret = []\n for inp in range(len(self._observations)):\n all_obs = self._observations[inp].getSlice(0)\n processed = all_obs\n # If we have more than 1 observation per state\n if self._batch_dimensions[inp][0] > 1 and len(all_obs) > 0:\n obs_per_state = self._batch_dimensions[inp][0]\n processed = np.zeros((len(all_obs), obs_per_state, ) + all_obs.shape[1:])\n # for every observation, we create a state\n for i in range(all_obs.shape[0]):\n state = np.zeros((obs_per_state,) + all_obs.shape[1:])\n # everything before state_start_idx is all_obs[0]\n state_start_idx = 0\n\n # start index in all_obs\n start_idx = i - obs_per_state\n\n # if we're in the first obs_per_state observations, we need to fill the first\n # -start_idx elements with all_obs[0]\n if start_idx < 0:\n n_to_fill = -start_idx\n state[0:n_to_fill] = np.repeat(all_obs[0][None, :, :], n_to_fill, axis=0)\n\n # start of where to fill the rest\n state_start_idx = n_to_fill\n\n # new start_idx for\n start_idx = 0\n state[state_start_idx:] = all_obs[start_idx+1:i+1]\n processed[i] = state\n\n ret.append(processed)\n return ret", "def evaluate_agent(self, batch_size=None):\n if batch_size is None:\n batch_size = self.minibatch_size\n\n i_test = 0\n i_comp = 0\n test_scores = []\n batch_scores = [0]*batch_size\n\n test_envs = np.array([None]*batch_size)\n obs_batch = []\n\n for i in range(len(self.test_envs)):\n test_env = self.test_envs[i]\n obs = test_env.reset()\n score = 0.0\n while True:\n action = self.predict(torch.FloatTensor(obs).to(self.device),\n test_env.reversible_spins)\n obs, rew, done, info = test_env.step(action)\n score += rew\n if done:\n test_scores.append(score)\n break\n\n '''\n while i_comp < self.test_episodes:\n\n for i, env in enumerate(test_envs):\n if env is None and i_test < self.test_episodes:\n test_env, testing_in_reversible_spin_env = self.get_random_env(self.test_envs)\n obs = test_env.reset()\n test_env = deepcopy(test_env)\n\n test_envs[i] = test_env\n obs_batch.append(obs)\n\n i_test += 1\n\n actions = self.predict(torch.FloatTensor(np.array(obs_batch)).to(self.device),\n testing_in_reversible_spin_env)\n\n obs_batch = []\n\n i = 0\n for env, action in zip(test_envs, actions):\n\n if env is not None:\n obs, rew, done, info = env.step(action)\n\n if self.test_metric == TestMetric.CUMULATIVE_REWARD:\n batch_scores[i] += rew\n\n if done:\n if self.test_metric == TestMetric.BEST_ENERGY:\n batch_scores[i] = env.best_energy\n elif self.test_metric == TestMetric.ENERGY_ERROR:\n batch_scores[i] = abs(env.best_energy - env.calculate_best()[0])\n elif self.test_metric == TestMetric.MAX_CUT:\n batch_scores[i] = env.get_best_cut()\n elif self.test_metric == TestMetric.FINAL_CUT:\n batch_scores[i] = env.calculate_cut()\n\n test_scores.append(batch_scores[i])\n\n if self.test_metric == TestMetric.CUMULATIVE_REWARD:\n batch_scores[i] = 0\n\n i_comp += 1\n test_envs[i] = None\n else:\n obs_batch.append(obs)\n\n i += 1\n\n if self.test_metric == TestMetric.ENERGY_ERROR:\n print(\"\\n{}/{} graphs solved optimally\".format(np.count_nonzero(np.array(test_scores)==0),self.test_episodes), end=\"\")\n '''\n print(test_scores)\n return np.mean(test_scores)", "def on_predict_batch_begin(self, step, logs=None):", "def action_dimension(self):\n return self.env.action_spec[0].shape[0]", "def test_minibatches(self):\n batch_size = self.params['batch_size']\n start_index = 0\n while start_index + batch_size < 500:\n end_index = start_index + batch_size\n yield self.input[start_index:end_index], self.y[start_index:end_index]\n start_index = end_index", "def next_batch(self):\n # Whether an epoch is done.\n done = False\n samples = []\n for _ in range(self.batch_size):\n # Indeed, `>` will not occur.\n if self.ptr >= self.dataset_size:\n done = True\n break\n else:\n self.ptr += 1\n sample = self.enqueuer.queue.get()\n samples.append(sample)\n # print 'queue size: {}'.format(self.enqueuer.queue.qsize())\n # Indeed, `>` will not occur.\n if self.ptr >= self.dataset_size:\n done = True\n return samples, done", "def max_batching_window(self) -> typing.Optional[aws_cdk.core.Duration]:\n return self._values.get('max_batching_window')", "def max_batching_window(self) -> typing.Optional[aws_cdk.core.Duration]:\n return self._values.get('max_batching_window')", "def max_batching_window(self) -> typing.Optional[aws_cdk.core.Duration]:\n return self._values.get('max_batching_window')", "def train_dynamic(batch_size=10):\n \n return" ]
[ "0.6829763", "0.63745177", "0.6135504", "0.6097213", "0.60270363", "0.59971696", "0.5886409", "0.5588769", "0.5547144", "0.55365115", "0.5511294", "0.55018145", "0.5477585", "0.5467263", "0.54394144", "0.5351245", "0.53225625", "0.53225625", "0.53225625", "0.53225625", "0.52915865", "0.52565306", "0.5244268", "0.5244268", "0.5244268", "0.5244268", "0.52200854", "0.5215194", "0.52132463", "0.51738775", "0.5172817", "0.51261413", "0.5093524", "0.50768274", "0.5042808", "0.50413835", "0.50240433", "0.50051385", "0.49522966", "0.4952097", "0.4942096", "0.49359027", "0.49339613", "0.4918338", "0.4907964", "0.49072936", "0.48932785", "0.48847315", "0.48774704", "0.48510036", "0.48433387", "0.4833988", "0.48336107", "0.48329565", "0.4825394", "0.48071757", "0.47850147", "0.47840536", "0.4751383", "0.47476903", "0.47459322", "0.47064692", "0.4699173", "0.46897465", "0.46751386", "0.46708438", "0.46666625", "0.46599618", "0.46551463", "0.4653388", "0.4647359", "0.46340147", "0.46253866", "0.4624071", "0.46209487", "0.4618165", "0.46176532", "0.46144202", "0.46121776", "0.46116048", "0.46086174", "0.45974356", "0.4595725", "0.45943585", "0.45940033", "0.45853534", "0.45806214", "0.45797914", "0.45789543", "0.45763037", "0.45753714", "0.4557641", "0.45504916", "0.45494097", "0.45491627", "0.45418888", "0.45379227", "0.45379227", "0.45379227", "0.4537518" ]
0.6470478
1
The batch size of the environment.
def batch_size(self) -> Optional[int]: if self.batched: raise RuntimeError( 'Environment %s marked itself as batched but did not override the ' 'batch_size property' % type(self) ) return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def batch_size(self):\n return self.size", "def batch_size(self):\n return self._batch_size", "def batch_size(self):\n return self._batch_size", "def batch_size(self):\n return self._batch_size", "def batch_size(self):\n return self._batch_size", "def get_batch_size():\n return get_global_variable(GraphKeys.BATCH_SIZE)", "def batch_size(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"batch_size\")", "def batch_size(self) -> ConfigNodePropertyInteger:\n return self._batch_size", "def batch_size(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"batch_size\")", "def batch_request_size(self):\n return self._batch_request_size", "def batch_size(self) -> int:\n ...", "def get_global_batch_size(self) -> int:\n return self._global_batch_size", "def _get_batch_size(self):\n if self.batch_size == 'auto':\n return self._backend.compute_batch_size()\n else:\n # Fixed batch size strategy\n return self.batch_size", "def batch_size(self) -> typing.Optional[jsii.Number]:\n return self._values.get('batch_size')", "def batch_size(self) -> typing.Optional[jsii.Number]:\n return self._values.get('batch_size')", "def batch_size(self) -> typing.Optional[jsii.Number]:\n return self._values.get('batch_size')", "def batch_size(self) -> typing.Optional[jsii.Number]:\n return self._values.get('batch_size')", "def batch_shape(self) -> torch.Size:\n self._check_if_fitted()\n return torch.Size([self.num_mcmc_samples])", "def get_evaluation_batch_size():\n return 1", "def get_num_batches(self,batch_size):\r\n \r\n return len(self) // batch_size", "def get_generator_batch_size(self):\n\n return self.generator_batch_size", "def batch_size(self):\n self.validate_shape_and_dtype()\n return self.rgb.shape[0]", "def record_batch_size(self):\n return 10000", "def batch_size(self):\n if self._batch_size is not None:\n return self._batch_size # custom batch size defined\n if self.task == 'objdet':\n return 8\n annos_per_img = self._annos_per_img[self.dataset]\n if self.task in {'predcls', 'sgcls'}:\n annos_per_img = annos_per_img['pairs']\n elif self.task == 'objcls':\n annos_per_img = annos_per_img['objects']\n elif self.task == 'preddet' and self.filter_multiple_preds:\n annos_per_img = annos_per_img['predicates_filtered']\n elif self.task == 'preddet' and self.filter_duplicate_rels:\n annos_per_img = annos_per_img['duplicates_filtered']\n elif self.task in {'preddet', 'sggen'}:\n annos_per_img = annos_per_img['relations']\n batch_size = ceil(self._annotations_per_batch / annos_per_img)\n return max(batch_size, 2)", "def batch_size(self):\n return self._first_rgb.shape[0]", "def get_batch_size(self, recip_count: int) -> int:\n Logger.debug(f'In get_batch_size.', TAG)\n yesterdays_count = self._yesterdays_bet_count.get()\n if yesterdays_count < 1:\n yesterdays_count = 1\n size = (DIST_DURATION_PARAM * recip_count // yesterdays_count)\n if size < TX_MIN_BATCH_SIZE:\n size = TX_MIN_BATCH_SIZE\n if size > TX_MAX_BATCH_SIZE:\n size = TX_MAX_BATCH_SIZE\n Logger.debug(f'Returning batch size of {size}', TAG)\n return size", "def batch_size(x):\n return tf.to_float(tf.shape(x)[0], name='get_batch_size_in_fl32')", "def get_per_slot_batch_size(self) -> int:\n return self._per_slot_batch_size", "def number_of_batches(self):\n return int(np.floor(len(self.file_paths_list) / self.batch_size))", "def batch_size(features, labels):\n return extract_batch_length(features)", "def input_size(self):\n return self.env.input_size", "def ExpectedMaxBatchSizes(self, run_params):\n return self.max_batch_sizes", "def acq_batch_size(self):\n return self.batch_size * self.batches_per_acquisition", "def batch_size_percentage(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"batch_size_percentage\")", "def num_batches(self):\n\t\t\n\t\treturn len(self.batch_stats)", "def _global_batch_size(self):\n return True", "def eval_batch_size(sess=None):\n return eval_global_variable(GraphKeys.BATCH_SIZE, sess)", "def max_num_batches(self):\n return self._max_num_batches", "def batch_size(self, batch_size: ConfigNodePropertyInteger):\n\n self._batch_size = batch_size", "def size():\n return int(os.environ['WORLD_SIZE'])", "def size_in_gb(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"size_in_gb\")", "def size_in_gb(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"size_in_gb\")", "def size_in_gb(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"size_in_gb\")", "def get_size(self):\n return self.get_params().shape[0]", "def train_size(self) -> int:\n return int(self.data_size * self.__train_fraction)", "def batch_size(input_tensor):\n return input_tensor.get_shape().as_list()[0]", "def size(self):\n return int(misc.intprod(self.shape))", "def __len__(self):\n return math.ceil(self.number_of_images / self.batch_size)", "def batch_size_percentage(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"batch_size_percentage\")", "def batch_size_percentage(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"batch_size_percentage\")", "def total_train_batches(self) -> int:\n return self.trainer.num_training_batches", "def get_batch_seq_size(self, sentence):\n batch_size, seq_length, _ = sentence.size()\n self.seq_length = seq_length\n self.batch_size = batch_size\n batch_seq_size = {'sentence_len': self.seq_length,\n 'batch_size': self.batch_size,\n 'is_multi_root': self.is_multi_root,\n 'max_dependency_len': self.max_dependency_len,\n 'use_gpu': self.use_gpu,\n 'length_constraint_on_root': self.length_constraint_on_root}\n\n return batch_seq_size", "def size(self, batch):\n x,y,m = batch \n return sum([mm.sum() for mm in m])", "def chunksize(self):\n\n return self._chunksize", "def dataset_size(self):\n return self.dataset.size", "def get_step_size(total_items, batch_size):\n return np.ceil(total_items / batch_size)", "def total_predict_batches(self) -> int:\n return sum(self.trainer.num_predict_batches)", "def batch_size_percentage(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"batch_size_percentage\")", "def batch_size_percentage(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"batch_size_percentage\")", "def size(self):\r\n return len(self._train_datas)", "def get_train_data_size(self):\n return len(self.pipeline.data['train'])", "def chunk_size(self):\r\n return int(self.frame_length * self.sample_rate)", "def batch_len(batch):\n flatlist, _ = tree_util.tree_flatten(batch)\n if len(flatlist) < 1:\n return 0\n b = flatlist[0].shape[0]\n assert all(\n arr.shape[0] == b for arr in flatlist if th.is_tensor(arr)\n ), \"Not all arrays have same batchsize!\"\n return b", "def size(self):\n\t\treturn self.dims", "def chunk_size(self) -> global___Expression:", "def dimension_size(self):\n return self._dim", "def __len__(self):\n return int(np.ceil(len(self.ids) / self.batch_size))", "def chunksize(self):\n\n return self.data.chunksize", "def size(self):\n return self.N", "def size_gb(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"size_gb\")", "def size_gb(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"size_gb\")", "def __len__(self):\n return int(np.ceil(self.total_frame_count / self.batch_size))", "def get_ent_span_feature_size(self):\n return self.ent_output_size", "def getSize(self):\n return self.bf.memory()", "def __len__(self):\n return len(self.batches)", "def __len__(self):\n return int(np.ceil(self.max_index / float(self.batch_size)))", "def size(self):\n return numpy.prod(self.shape)", "def __len__(self) -> int:\n num_batches, remainder = divmod(len(self.mapped_triples), self.batch_size)\n if remainder and not self.drop_last:\n num_batches += 1\n return num_batches", "def dim(self) -> int:\n return self._n_dim", "def window_size(self):\n return self.__train_prop[\"window_size\"]", "def size_in_gb(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"size_in_gb\")", "def size_in_gb(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"size_in_gb\")", "def training_dataset_size(self):\n\n if not self.cs_learning and hasattr(FLAGS, 'orig_size'):\n return FLAGS.orig_size\n\n traindata_size_dir = os.path.join(self.cache_path, 'ds_sizes')\n ensure_dir(traindata_size_dir)\n if not hasattr(FLAGS, 'train_split'):\n setattr(FLAGS, 'train_split', 'train')\n\n size_cache_file = os.path.join(traindata_size_dir, '{}_{}'.format(FLAGS.dataset.lower(), FLAGS.train_split))\n\n if os.path.exists(size_cache_file):\n with open(size_cache_file) as f:\n ds_size = int(f.readline().strip())\n else:\n ds = load_ds() # Loads the dataset.\n [data_X, _, _] = ds.load()\n ds_size = len(data_X)\n with open(size_cache_file, 'w') as f:\n f.write(str(ds_size))\n\n return ds_size", "def __len__(self):\n return int(np.floor(len(self.dataset_df) / self.batch_size))", "def dimension(self):\n return self.__N", "def _update_num_batches(self):\n # maximum possible number of batches is equal to number of whole times\n # batch_size divides in to the number of data points which can be\n # found using integer division\n possible_num_batches = self.inputs.shape[0] // self.batch_size\n if self.max_num_batches == -1:\n self.num_batches = possible_num_batches\n else:\n self.num_batches = min(self.max_num_batches, possible_num_batches)", "def __len__(self):\n return int(np.ceil(len(self.image_filenames) / (self.batch_size)))", "def dataset_size(self):\n if not self._dataset_size:\n # pylint: disable=attribute-defined-outside-init\n self._dataset_size = count_file_lines(\n self._hparams.source_dataset.files)\n return self._dataset_size", "def array_size(self):\n return self._array_size", "def batch_node_count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"batch_node_count\")", "def size(self):\n return self.size_number", "def __len__(self):\n return int(np.floor(len(self.ids) / self.batch_size))", "def size(self) -> int:\n\n return self.sizes.sum()", "def _get_batch_size(self, tensor, expected_shape, expected_size):\n size = self._size(tensor)\n if self._ndim(tensor) > len(expected_shape) or size > expected_size:\n return size // expected_size\n\n return None", "def getGlobalSize(self):\n return self._get_global_size( )", "def state_size(self):\n\t\treturn (\n\t\t\ttf.TensorShape([self.args[\"kb_node_max_len\"], self.args[\"mp_state_width\"]]),\n\t\t)", "def GlobalSize(self):\n return _hypre.HypreParVector_GlobalSize(self)", "def __len__(self):\n return math.ceil(len(self._sampler) / self._batch_size)", "def get_size(self):\n tmpsize = 0\n for variable in self.variables:\n tmpsize += variable.get_size()\n for subchunk in self.subchunks:\n tmpsize += subchunk.get_size()\n return tmpsize", "def __len__(self):\n return len(self.indexes) // self.batch_size" ]
[ "0.8748392", "0.8526226", "0.8526226", "0.8526226", "0.8526226", "0.8332826", "0.82787216", "0.82280636", "0.8090263", "0.8073407", "0.80665404", "0.80136347", "0.8010651", "0.8002175", "0.8002175", "0.8002175", "0.8002175", "0.7821338", "0.7717743", "0.76162803", "0.75835013", "0.7577369", "0.7482849", "0.7477492", "0.7456697", "0.7443691", "0.7434761", "0.7410707", "0.73818415", "0.7373639", "0.73424715", "0.7314066", "0.72372675", "0.72368157", "0.72245526", "0.7199826", "0.7173232", "0.71658796", "0.71607846", "0.7119457", "0.7093072", "0.7093072", "0.7093072", "0.70770085", "0.7076817", "0.70643765", "0.7060099", "0.70474166", "0.70411474", "0.70411474", "0.70146674", "0.70055145", "0.6984045", "0.6981086", "0.6947", "0.6933843", "0.69150734", "0.68957824", "0.68957824", "0.6889237", "0.6873225", "0.68623924", "0.6857491", "0.6838837", "0.6819186", "0.68133605", "0.68131876", "0.68085223", "0.680837", "0.6800605", "0.6800605", "0.679935", "0.6785485", "0.6785051", "0.6780219", "0.6775504", "0.6770444", "0.67590845", "0.6751887", "0.67443293", "0.6725365", "0.6725365", "0.6704503", "0.669706", "0.666763", "0.66591716", "0.665645", "0.66532", "0.6649343", "0.6631341", "0.66248304", "0.6624744", "0.6611511", "0.6609779", "0.66059047", "0.66003615", "0.6600167", "0.6595153", "0.65661263", "0.6564034" ]
0.7726753
18
Whether the Environmet should reset given the current timestep. By default it only resets when all time_steps are `LAST`.
def should_reset(self, current_time_step: ts.TimeStep) -> bool: handle_auto_reset = getattr(self, '_handle_auto_reset', False) return handle_auto_reset and np.all(current_time_step.is_last())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reset_values(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"reset_values\")", "def reset():\n return True", "def reset_values(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"reset_values\")", "def reset(self, **kwargs):\n if self._backend_agent:\n self._backend_agent._on_gym_reset_begin(self, **kwargs)\n\n result = self.env.reset(**kwargs)\n if self.steps_done_in_episode > 0 and not self.is_episode_done:\n self.episodes_done += 1\n self.total.episodes_done_inc()\n self.is_episode_done = False\n self.steps_done_in_episode = 0\n\n if self._backend_agent:\n self._backend_agent._on_gym_reset_end(self, result, **kwargs)\n return result", "def is_reset(self):\n return self._tag == 'reset'", "def is_reset_task_states(self, task_state):\r\n return all(self.is_initial_child_state(child) for child in task_state)", "def reset(self) -> ts.TimeStep:\n self._current_time_step = self._reset()\n return self._current_time_step", "def should_reset(self):\n # type: () -> bool\n if not self._is_cache_enabled():\n return False\n elapsed = time.time() - self._last_ts\n return elapsed > self._refresh_interval_sec", "def last_reset(self):\n native_implementation = getattr(self._feature, \"last_reset\", None)\n\n return native_implementation or super().last_reset", "def test_reset(self, scml_system):\n scml_system._t = 12\n scml_system._k = 33\n state_space = scml_system.state_space\n state_positions = scml_system.state_positions\n initial_state = scml_system.reset()\n target = np.array([0, 0, 0, 0, 0, 0, 560]) / scml_system.limits\n assert np.all(initial_state == target), 'Initial states of the system are incorrect'\n assert scml_system._t == 0, 'Time of the system was not set to zero after reset'\n assert scml_system._k == 0, 'Episode step of the system was not set to zero after reset'\n assert scml_system.converter.reset_counter == scml_system.electrical_motor.reset_counter \\\n == scml_system.mechanical_load.reset_counter == scml_system.supply.reset_counter,\\\n 'The reset was not passed to all components of the SCMLSystem'\n assert scml_system._ode_solver.t == 0, 'The ode solver was not reset correctly'\n assert all(scml_system._ode_solver.y == np.zeros_like(\n scml_system.mechanical_load.state_names + scml_system.electrical_motor.CURRENTS, dtype=float\n )), ' The ode solver was not reset correctly'", "def IsLocalRerun(self):\n return self.prev_test_run_key is not None", "def reset(self):\n self._timestep = np.array([0])", "def reset_next_step(self):\n return self._reset_next_step", "def get_is_reset(self, timeout = 0):\n response = self.send_command_to_shouter(BP_TOOL.IS_RESET)\n if response == BP_TOOL.ACK:\n return False\n elif response == BP_TOOL.IS_RESET:\n return True\n else:\n return False", "def __bool__(self) -> bool:\n if self.initial_value == 1 and self.number_of_steps == 0:\n return True\n return False", "def reset(self):\n self.success = False\n self.i = 0\n if self.monitor:\n self.env = gym.wrappers.Monitor(self.env, \"./mountaincar-monitor\", force=True)\n state = self.env.reset()\n state = self.preprocess_state(state)\n state = np.concatenate([state] * self.action_repeat)\n return state", "def IsRerun(self):\n return self.prev_test_context is not None", "def reset(self):\n self.clock.reset()\n self.microgrid.reset();\n self.steps_beyond_done = None\n self.updateState();\n return self.state", "def is_done(self):\n\n # Robosuite envs always rollout to fixed horizon.\n return False", "def reset(self, **kwargs):\n if self.was_real_done:\n obs = self.env.reset(**kwargs)\n else:\n # no-op step to advance from terminal/lost life state\n obs, _, _, _ = self.env.step(0)\n self.lives = self.env.unwrapped.ale.lives()\n return obs", "def reset(self, **kwargs):\n if self.was_real_done:\n obs = self.env.reset(**kwargs)\n else:\n # no-op step to advance from terminal/lost life state\n obs, _, _, _ = self.env.step(0)\n self.lives = self.env.unwrapped.ale.lives()\n return obs", "def reset(self, **kwargs):\n if self.was_real_done:\n obs = self.env.reset(**kwargs)\n else:\n # no-op step to advance from terminal/lost life state\n obs, _, _, _ = self.env.step(0)\n self.lives = self.env.unwrapped.ale.lives()\n return obs", "def reset(self, **kwargs):\n if self.was_real_done:\n obs = self.env.reset(**kwargs)\n else:\n # no-op step to advance from terminal/lost life state\n obs, _, _, _ = self.env.step(0)\n self.lives = self.env.unwrapped.ale.lives()\n return obs", "def reset(self, **kwargs):\n if self.was_real_done:\n obs = self.env.reset(**kwargs)\n else:\n # no-op step to advance from terminal/lost life state\n obs, _, _, _ = self.env.step(0)\n self.lives = self.env.unwrapped.ale.lives()\n return obs", "def reset(self, **kwargs):\n if self.was_real_done:\n obs = self.env.reset(**kwargs)\n else:\n # no-op step to advance from terminal/lost life state\n obs, _, _, _ = self.env.step(0)\n self.lives = self.env.unwrapped.ale.lives()\n return obs", "def is_dry_run(self):\n try:\n v = environment.get(\"Run\")\n return v.lower() == \"dry\"\n except KeyError:\n return False", "def is_restarting(self) -> bool:\r\n return False", "def reset(self):\n # Initialize the timestep\n self.timestep = 0\n self.state = self.starting_state\n\n if self.from_data:\n self.episode_num += 1\n\n\n return self.starting_state", "def reset(self, **kwargs):\r\n if self.was_real_done:\r\n obs = self.env.reset(**kwargs)\r\n else:\r\n # no-op step to advance from terminal/lost life state\r\n obs, _, _, _ = self.env.step(0)\r\n self.lives = self.env.unwrapped.ale.lives()\r\n return obs", "def get_reset(self):\n resetcond = \"\"\n # OnConditions in dynamics\n dyn = self.dynamics\n for ev in dyn.event_handlers:\n if(type(ev) is lems.OnCondition):\n for sa in ev.actions:\n if(type(sa) is lems.StateAssignment):\n resetcond+=sa.variable + \" = \" + sa.value + \"\\n\"\n\n rgs = self.dynamics.regimes\n for r in rgs:\n if(r.initial==True): main_regime = r\n elif(r.initial==False): refractory_regime = r\n # OnConditions in main regimes\n roc = main_regime.event_handlers\n for oc in roc:\n if(type(oc) is lems.OnCondition):\n for sa in oc.actions:\n if(type(sa) is lems.StateAssignment):\n resetcond+=sa.variable + \" = \" + sa.value + \"\\n\"\n # OnEntry in refractory regime\n roe = refractory_regime.event_handlers\n for oe in roe:\n if(type(oe) is lems.OnEntry):\n for sa in oe.actions:\n if(type(sa) is lems.StateAssignment):\n resetcond+=sa.variable + \" = \" + sa.value + \"\\n\"\n\n return resetcond", "def update_shuttle_state(self):\n if len(self.steps) > self.current_step >= 0:\n step = self.steps[self.current_step]\n if step.is_fulfilled():\n step.end(True)", "def is_forced_run(self):\n try:\n v = environment.get(\"Run\")\n return v.lower() == \"force\"\n except KeyError:\n return False", "def _reset(self) -> ts.TimeStep:", "def reset(self):\n\n return bool(APIConsumer.post(\"/reset\"))", "def IsFinal(self):\n return self.state in FINAL_TEST_RUN_STATES", "def reset(self):\n self._reset_next_step = False\n self.step_count = 0\n \n self._state = self.state_initializer()\n self._meta_state = self._meta_state_initializer()\n self.task.reset(self._state, self._meta_state)\n self.physics.reset(self._state)\n self.action_space.reset(self._state)\n for rule in self.game_rules:\n rule.reset(self._state, self._meta_state)\n rule.step(self._state, self._meta_state)\n \n return dm_env.restart(self.observation())", "def _on_step(self) -> bool:\n\t\t#self.model.get_env().env_method(\"set_model_reference\", self.model.get_parameters())\n\t\tself.env.set_model_reference(self.model.get_parameters())\n\t\tprint(\"current timestep\", self.num_timesteps)\n\t\treturn True", "def reset(self):\n return self.env.reset()", "def _ShouldStop(self, task_global_step):\n if task_global_step >= self._task_params.train.max_steps:\n tf.logging.info('ShouldStop: step:%6d params.train.max_steps:%6d',\n task_global_step, self._task_params.train.max_steps)\n return True\n\n return False", "def _ShouldStop(self, task_global_step):\n if task_global_step >= self._task_params.train.max_steps:\n tf.logging.info('ShouldStop: step:%6d params.train.max_steps:%6d',\n task_global_step, self._task_params.train.max_steps)\n return True\n\n return False", "def check_allow_reset(self):\r\n if not self.ready_to_reset:\r\n if self.current_task_number > 0:\r\n last_response_data = self.get_last_response(self.current_task_number - 1)\r\n current_response_data = self.get_current_attributes(self.current_task_number)\r\n\r\n if (current_response_data['min_score_to_attempt'] > last_response_data['score']\r\n or current_response_data['max_score_to_attempt'] < last_response_data['score']):\r\n self.state = self.DONE\r\n self.ready_to_reset = True\r\n\r\n return self.ready_to_reset", "def reset_fsm_preempted(self):\n self._fsm_recent_goal_preempted = False", "def _allow_reset(self):\r\n return (self.child_state == self.DONE and self.child_attempts < self.max_attempts)", "def is_restarting(self) -> bool:\r\n restart_flag = self._restart_flag\r\n # Reset the flag\r\n self._restart_flag = False\r\n\r\n return restart_flag", "def is_restarting(self) -> bool:\r\n restart_flag = self._restart_flag\r\n # Reset the flag\r\n self._restart_flag = False\r\n\r\n return restart_flag", "def stored_reset(self):\r\n\t\tself.stored_reward = np.zeros((self.num_timesteps - self.first_considered_reward_step,))\r\n\t\tself.stored_optimum = np.zeros_like(self.stored_reward)", "def reset(self):\n \n self.env.reset()\n obs, _, done, _ = self.env.step(1)\n if done: \n self.env.reset()\n obs, _, done, _ = self.env.step(2)\n if done: \n self.env.reset()\n \n return obs", "def test_reset_temporal_axis(PM_ds_control_3d_full):\r\n smooth = 10\r\n tsmooth_kws = {\"time\": smooth}\r\n first_ori = PM_ds_control_3d_full.time[0].values\r\n first_actual = _reset_temporal_axis(\r\n PM_ds_control_3d_full, tsmooth_kws=tsmooth_kws, dim=\"time\"\r\n ).time.values[0]\r\n first_expected = f\"{first_ori}-{first_ori+smooth*1-1}\"\r\n assert first_actual == first_expected", "def is_done(self, agent, world) -> bool:\n if self.steps_from_last_reset / self.num_agents > self.episode_length:\n return True\n return False", "def check_early_stop(self) -> bool:\n if self.args.early_stopping_steps == -1:\n return False\n return self._steps_since_new_prefix >= self.args.early_stopping_steps", "def set_first_machine_time_step(self, first_machine_time_step):", "def _hard_reset(self):\n self._reset_specific_envs(np.ones_like(self.episodes_done))\n self._update_other_info()", "def hasReset(self, p_int): # real signature unknown; restored from __doc__\n return False", "def isSetDefaultTerm(self):\n return _libsbml.Transition_isSetDefaultTerm(self)", "def reset(self):\n if self.reset_tracker >= self.reset_interval:\n instance = self.sampling_function()\n self.env.use_next_instance(instance=instance)\n return self.env.reset()", "def should_trigger_for_step(self, step):\n if self._last_triggered_step == step:\n return False\n\n if self._every_steps is not None:\n if step >= self._last_triggered_step + self._every_steps:\n return True\n return False", "def must_run(self):\r\n self.current_time = datetime.now()\r\n return all([self._minute(), self._hour(), self._day_of_month(), self._month(), self._day_of_week()])", "def reinitialize_conditions(self):\n if self._plumb is not None and self.current_step is not None:\n time = self._plumb.time\n pressures = self._plumb.current_pressures()\n state = {'time': time, 'pressures': pressures}\n\n for condition, _ in self.current_step.conditions:\n condition.reinitialize(state)", "def _should_reinitialize_check_run(self, payload):\n state = payload.get('state')\n context = payload.get('context')\n logger.debug(f'status context: {context}, state: {state}')\n return context == CI_STATUS_CONTEXT and state != 'success'", "def reset_env(self):\n return self.env.reset()", "def reset_continued(self): \n self._recent_goal_continued = False\n self._update_action = False\n self._update_action_without_pause = False", "def is_steady_state(self):\n if isinstance(self.problem, TimeMixin):\n return self.problem.steady_state\n return True", "def requires_restart(self):\n options = InvenTreeSetting.SETTINGS.get(self.key, None)\n\n if options:\n return options.get('requires_restart', False)\n else:\n return False", "def reset(self):\n while not self._check_episode_start_condition():\n self._simulate()\n self.state, _ = self._extract_state()\n return self.state", "def is_sim_end(self):\n\n return self.cur_round == self.num_rounds + 1", "def auto_renew(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"auto_renew\")", "def auto_renew(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"auto_renew\")", "def reset_world(self, world):\n self.steps_from_last_reset = 0", "def trigger(self):\n if self.timer is None or time.time() - self.last_try > self.min_sec * 2:\n self.timer = time.time()\n self.last_try = time.time()\n return False\n elif time.time() - self.timer > self.min_sec:\n self.reset()\n return True\n else:\n self.last_try = time.time()\n return False", "def test_issue_reset_time(self):\n pass", "def reset(self, **kwargs):\n return self.env.reset(**kwargs)", "def finished(self, step: t.Optional[int] = None) -> bool:\n if step is None:\n return self.status() == self.expectedstatus()\n return self.status() & (1 << step)", "def test_correctly_sets_halt_on_next(self, get_pipe_manager):\n\n # Establish manager and perform initial control assertions.\n pm = get_pipe_manager(name=\"TestPM\")\n pm.stop_after = \"step2\"\n assert not pm.halt_on_next\n\n # Make non-halt-status-altering checkpointed timestamp call and\n # verify that we're still running and that we're not scheduled to halt.\n pm.timestamp(checkpoint=\"step1\")\n assert not pm.halt_on_next\n\n # Make halt-status-altering checkpointed timestamp call and verify\n # that we're still running and that we've now been scheduled to halt.\n pm.timestamp(checkpoint=\"step2\")\n assert pm.halt_on_next", "def reset(self):\n\n for value in self.__dict__.itervalues():\n if isinstance(value, EnvParm):\n value._value = 'use_env'\n getattr(value, 'value')", "def reset(self):\n if self._draw_new_turn_on_reset:\n turn_params = self._draw_random_turn_params()\n config = AisleTurnEnvParams(turn_params=turn_params, env_params=self._env_params)\n self._env = AisleTurnEnv(config)\n\n return self._env.reset()", "def __check_mode_change(self):\n if self.mode[\"auto_mode\"] != self.mode[\"last_mode\"]:\n self.mode[\"last_mode\"] = self.mode[\"auto_mode\"]\n return True\n return False", "def test_env_reset_and_step(self):\n create_env = CreateEnv()\n env = create_env.env\n\n # Assert that the total number of agents matches the sum of the 'n_agents'\n # configuration and the number of planners (1 in this case)\n num_planners = 1\n self.assertEqual(\n len(env.all_agents), create_env.env_config[\"n_agents\"] + num_planners\n )\n\n # Assert that the number of agents created in the world\n # matches the configuration specification\n self.assertEqual(len(env.world.agents), create_env.env_config[\"n_agents\"])\n\n # Assert that the planner's index in the world is 'p'\n self.assertEqual(env.world.planner.idx, \"p\")\n\n obs = env.reset()\n\n # Test whether the observation dictionary keys are created as expected\n self.assertEqual(\n sorted(list(obs.keys())),\n [str(i) for i in range(create_env.env_config[\"n_agents\"])] + [\"p\"],\n )\n\n obs, reward, done, info = env.step({})\n\n # Check that the observation, reward and info keys match\n self.assertEqual(obs.keys(), reward.keys())\n self.assertEqual(obs.keys(), info.keys())\n\n # Assert that __all__ is in done\n assert \"__all__\" in done", "def reset(self, **kwargs):\n self.env.reset(**kwargs)\n if self.override_num_noops is not None:\n noops = self.override_num_noops\n else:\n noops = self.unwrapped.np_random.randint(1, self.noop_max + 1) #pylint: disable=E1101\n assert noops > 0\n obs = None\n for _ in range(noops):\n obs, _, done, _ = self.env.step(self.noop_action)\n if done:\n obs = self.env.reset(**kwargs)\n return obs", "def test_reset_tenant_token_later(self):\n new_token, orig_token = self._check_reset_token(invalidate=False)\n self.assertEqual(new_token.previous, orig_token.valid)", "def reset(dut):\n\n dut.reset_n <= 1\n yield ClockCycles(dut.clock, 10)\n dut.reset_n <= 0\n yield ClockCycles(dut.clock, 10)\n dut.reset_n <= 1", "def _restart_environment_episode(self, force_environment_reset=False) -> None:\n raise NotImplementedError(\"\")", "def reset(self, **kwargs):\n self.env.reset(**kwargs)\n if self.override_num_noops is not None:\n noops = self.override_num_noops\n else:\n noops = self.unwrapped.np_random.randint(1, self.noop_max + 1) # pylint: disable=E1101\n assert noops > 0\n obs = None\n for _ in range(noops):\n obs, _, done, _ = self.env.step(self.noop_action)\n if done:\n obs = self.env.reset(**kwargs)\n return obs", "def auto_renew(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"auto_renew\")", "def test_reset_computer(self):\n computer1 = computer.Computer(1)\n computer1.reset_computer()\n res = computer1.greediness == 7 and computer1.rolls == 0\n self.assertTrue(res)", "def recurrent(self):\n return False", "def auto_renew(self) -> Optional[bool]:\n return pulumi.get(self, \"auto_renew\")", "def auto_renew(self) -> Optional[bool]:\n return pulumi.get(self, \"auto_renew\")", "def reset(self, **kwargs):\n self.env.reset(**kwargs)\n noops = self.unwrapped.np_random.randint(1, self.noop_max + 1)\n obs = None\n for _ in range(noops):\n obs, _, done, _ = self.env.step(self.noop_action)\n if done:\n obs = self.env.reset(**kwargs)\n return obs", "def epidemic_finish(states, iteration):\n return np.sum(states) == 0 and iteration > 10", "def _compute_is_terminal(self):\n # by default the episode will terminate when all samples are labelled\n done = LalEnv._compute_is_terminal(self)\n # it also terminates when self.n_horizon datapoints were labelled\n if np.size(self.indeces_known) == self.n_horizon:\n done = True\n return done", "def reset_camera(i, cam):\n\ttry:\n\t\tresult = True\n\n\t\tprint('*** RESETTING CAMERA SETTINGS ***')\n\n\t\tif cam.GetUniqueID() == serial_1:\t\t\n\t\t\tresult &= auto_exposure_mode(i, cam, 'Continuous')\n\t\t\tresult &= auto_gain_mode(i, cam, 'Continuous')\n\n\t\t\tcam.V3_3Enable.SetValue(False)\n\t\t\tcam.LineSelector.SetValue(PySpin.LineSelector_Line0)\n\t\t\tcam.TriggerMode.SetValue(PySpin.TriggerMode_Off)\n\n\t\tif cam.GetUniqueID() == serial_2:\t\t\n\t\t\tresult &= auto_exposure_mode(i, cam, 'Continuous')\n\t\t\tresult &= auto_gain_mode(i, cam, 'Continuous')\n\t\t\tcam.TriggerMode.SetValue(PySpin.TriggerMode_Off)\n\t\tprint('\\n')\n\n\texcept PySpin.SpinnakerException as ex:\n\t\tprint('Error: %s' % ex)\n\n\treturn result", "def _verify_agent_reset(self):\n if self._ia_client is None:\n return\n\n state = self._ia_client.get_agent_state()\n if state != ResourceAgentState.UNINITIALIZED:\n cmd = AgentCommand(command=ResourceAgentEvent.RESET)\n retval = self._ia_client.execute_agent(cmd)", "def until_reset(self) -> int:\n return int((self.resets_at - datetime.now()).total_seconds())", "def if_end(self, **kwargs):\n\n index = self.get('_index')\n\n if index and index >= len(self.steps)-1:\n return True # all steps have been used\n\n return False", "def reset(self, time):\n for key in self.data['step']:\n self.data['step'][key] = None\n\n self.time = time", "def __init__(self, handle_auto_reset: bool = False):\n self._handle_auto_reset = handle_auto_reset\n self._current_time_step = None\n common.assert_members_are_not_overridden(\n base_cls=PyEnvironment, instance=self, denylist=('reset', 'step')\n )", "def is_last(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"is_last\")", "def _is_valid_reset(self, reset):\n if reset.ResetType() in ('Single', 'Compound', 'Weighted'):\n reset_date = reset.Day()\n if reset_date >= self.start_date and reset_date <= self.end_date:\n return True\n\n return False", "def _set(env_var: str) -> bool:\n return os.getenv(env_var) not in [None, \"0\"]", "def reset_target(scope) -> None:\n if globals.cw_platform == \"CW303\" or globals.cw_platform == \"CWLITEXMEGA\":\n scope.io.pdic = 'low'\n time.sleep(0.1)\n scope.io.pdic = 'high_z' #XMEGA doesn't like pdic driven high\n time.sleep(0.1) #xmega needs more startup time\n else: \n scope.io.nrst = 'low'\n time.sleep(0.05)\n scope.io.nrst = 'high_z'\n time.sleep(0.05)" ]
[ "0.6065845", "0.5846722", "0.5738438", "0.57173324", "0.5702698", "0.5700601", "0.56152225", "0.5585178", "0.55754685", "0.5555264", "0.5542493", "0.55404925", "0.5514392", "0.5394026", "0.5376499", "0.5355567", "0.53485197", "0.53262925", "0.5297183", "0.52819175", "0.52819175", "0.52819175", "0.52819175", "0.52819175", "0.52819175", "0.5280383", "0.5276791", "0.5252488", "0.52435327", "0.5237724", "0.522949", "0.5220523", "0.5218894", "0.52003044", "0.51928765", "0.5183386", "0.5178595", "0.5167659", "0.5134019", "0.5134019", "0.5133045", "0.5131466", "0.5120896", "0.51201564", "0.51201564", "0.511805", "0.5103139", "0.5096017", "0.5093936", "0.5081916", "0.5075817", "0.50745475", "0.5063044", "0.5061533", "0.50506365", "0.5037881", "0.50338906", "0.5029084", "0.502597", "0.5015943", "0.5007547", "0.5006716", "0.50061625", "0.4996792", "0.49965736", "0.4993423", "0.4993423", "0.49897972", "0.49767748", "0.49767298", "0.49757093", "0.49491033", "0.49458483", "0.4945029", "0.4944715", "0.49375892", "0.4926302", "0.49120924", "0.49114105", "0.49087328", "0.49086422", "0.4904369", "0.49017486", "0.48981065", "0.48958537", "0.48942623", "0.48942623", "0.48902318", "0.48876482", "0.48868796", "0.4871556", "0.48685995", "0.48630232", "0.4862149", "0.48573598", "0.48565975", "0.48551467", "0.48538715", "0.48445508", "0.48395744" ]
0.8290577
0